2 * Copyright (C) 2010 Google, Inc.
4 * Copyright (c) 2012-2014, NVIDIA CORPORATION. All rights reserved.
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
17 #include <linux/err.h>
18 #include <linux/module.h>
19 #include <linux/init.h>
20 #include <linux/platform_device.h>
21 #include <linux/clk.h>
24 #include <linux/of_device.h>
25 #include <linux/of_gpio.h>
26 #include <linux/gpio.h>
27 #include <linux/slab.h>
28 #include <linux/mmc/card.h>
29 #include <linux/mmc/host.h>
30 #include <linux/module.h>
31 #include <linux/mmc/sd.h>
32 #include <linux/regulator/consumer.h>
33 #include <linux/delay.h>
34 #include <linux/pm_runtime.h>
35 #include <linux/tegra_pm_domains.h>
36 #include <linux/dma-mapping.h>
41 #include <linux/debugfs.h>
42 #include <linux/seq_file.h>
43 #include <linux/reboot.h>
44 #include <linux/devfreq.h>
45 #include <linux/clk/tegra.h>
46 #include <linux/tegra-soc.h>
47 #include <linux/tegra-fuse.h>
49 #include <linux/platform_data/mmc-sdhci-tegra.h>
50 #include <mach/pinmux.h>
52 #include "sdhci-pltfm.h"
55 #define SDHCI_TEGRA_DBG(stuff...) pr_info(stuff)
57 #define SDHCI_TEGRA_DBG(stuff...) do {} while (0)
60 #define SDHCI_VNDR_CLK_CTRL 0x100
61 #define SDHCI_VNDR_CLK_CTRL_SDMMC_CLK 0x1
62 #define SDHCI_VNDR_CLK_CTRL_PADPIPE_CLKEN_OVERRIDE 0x8
63 #define SDHCI_VNDR_CLK_CTRL_SPI_MODE_CLKEN_OVERRIDE 0x4
64 #define SDHCI_VNDR_CLK_CTRL_INPUT_IO_CLK 0x2
65 #define SDHCI_VNDR_CLK_CTRL_TAP_VALUE_SHIFT 16
66 #define SDHCI_VNDR_CLK_CTRL_TRIM_VALUE_SHIFT 24
67 #define SDHCI_VNDR_CLK_CTRL_SDR50_TUNING 0x20
68 #define SDHCI_VNDR_CLK_CTRL_INTERNAL_CLK 0x2
69 #define SDHCI_VNDR_CLK_CTRL_TAP_VALUE_MASK 0xFF
70 #define SDHCI_VNDR_CLK_CTRL_TRIM_VALUE_MASK 0x1F
72 #define SDHCI_VNDR_MISC_CTRL 0x120
73 #define SDHCI_VNDR_MISC_CTRL_ENABLE_SDR104_SUPPORT 0x8
74 #define SDHCI_VNDR_MISC_CTRL_ENABLE_SDR50_SUPPORT 0x10
75 #define SDHCI_VNDR_MISC_CTRL_ENABLE_DDR50_SUPPORT 0x200
76 #define SDHCI_VNDR_MISC_CTRL_ENABLE_SD_3_0 0x20
77 #define SDHCI_VNDR_MISC_CTRL_INFINITE_ERASE_TIMEOUT 0x1
78 #define SDHCI_VNDR_MISC_CTRL_PIPE_STAGES_MASK 0x180
79 #define SDHCI_VNDR_MISC_CTRL_EN_EXT_LOOPBACK_SHIFT 17
81 #define SDHCI_VNDR_PRESET_VAL0_0 0x1d4
82 #define SDCLK_FREQ_SEL_HS_SHIFT 20
83 #define SDCLK_FREQ_SEL_DEFAULT_SHIFT 10
85 #define SDHCI_VNDR_PRESET_VAL1_0 0x1d8
86 #define SDCLK_FREQ_SEL_SDR50_SHIFT 20
87 #define SDCLK_FREQ_SEL_SDR25_SHIFT 10
89 #define SDHCI_VNDR_PRESET_VAL2_0 0x1dc
90 #define SDCLK_FREQ_SEL_DDR50_SHIFT 10
92 #define SDMMC_SDMEMCOMPPADCTRL 0x1E0
93 #define SDMMC_SDMEMCOMPPADCTRL_VREF_SEL_MASK 0xF
94 #define SDMMC_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD_MASK 0x80000000
96 #define SDMMC_AUTO_CAL_CONFIG 0x1E4
97 #define SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_START 0x80000000
98 #define SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_ENABLE 0x20000000
99 #define SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_PD_OFFSET_SHIFT 0x8
101 #define SDMMC_AUTO_CAL_STATUS 0x1EC
102 #define SDMMC_AUTO_CAL_STATUS_AUTO_CAL_ACTIVE 0x80000000
103 #define SDMMC_AUTO_CAL_STATUS_PULLDOWN_OFFSET 24
104 #define PULLUP_ADJUSTMENT_OFFSET 20
106 #define SDMMC_VENDOR_ERR_INTR_STATUS_0 0x108
108 #define SDMMC_IO_SPARE_0 0x1F0
109 #define SPARE_OUT_3_OFFSET 19
111 #define SDMMC_VENDOR_IO_TRIM_CNTRL_0 0x1AC
112 #define SDMMC_VENDOR_IO_TRIM_CNTRL_0_SEL_VREG_MASK 0x4
114 /* Erratum: Version register is invalid in HW */
115 #define NVQUIRK_FORCE_SDHCI_SPEC_200 BIT(0)
116 /* Erratum: Enable block gap interrupt detection */
117 #define NVQUIRK_ENABLE_BLOCK_GAP_DET BIT(1)
118 /* Do not enable auto calibration if the platform doesn't support */
119 #define NVQUIRK_DISABLE_AUTO_CALIBRATION BIT(2)
120 /* Set Calibration Offsets */
121 #define NVQUIRK_SET_CALIBRATION_OFFSETS BIT(3)
122 /* Set Drive Strengths */
123 #define NVQUIRK_SET_DRIVE_STRENGTH BIT(4)
124 /* Enable PADPIPE CLKEN */
125 #define NVQUIRK_ENABLE_PADPIPE_CLKEN BIT(5)
126 /* DISABLE SPI_MODE CLKEN */
127 #define NVQUIRK_DISABLE_SPI_MODE_CLKEN BIT(6)
129 #define NVQUIRK_SET_TAP_DELAY BIT(7)
131 #define NVQUIRK_SET_TRIM_DELAY BIT(8)
132 /* Enable SDHOST v3.0 support */
133 #define NVQUIRK_ENABLE_SD_3_0 BIT(9)
134 /* Enable SDR50 mode */
135 #define NVQUIRK_ENABLE_SDR50 BIT(10)
136 /* Enable SDR104 mode */
137 #define NVQUIRK_ENABLE_SDR104 BIT(11)
138 /*Enable DDR50 mode */
139 #define NVQUIRK_ENABLE_DDR50 BIT(12)
140 /* Enable Frequency Tuning for SDR50 mode */
141 #define NVQUIRK_ENABLE_SDR50_TUNING BIT(13)
142 /* Enable HS200 mode */
143 #define NVQUIRK_ENABLE_HS200 BIT(14)
144 /* Enable Infinite Erase Timeout*/
145 #define NVQUIRK_INFINITE_ERASE_TIMEOUT BIT(15)
146 /* No Calibration for sdmmc4 */
147 #define NVQUIRK_DISABLE_SDMMC4_CALIB BIT(16)
148 /* ENAABLE FEEDBACK IO CLOCK */
149 #define NVQUIRK_EN_FEEDBACK_CLK BIT(17)
150 /* Disable AUTO CMD23 */
151 #define NVQUIRK_DISABLE_AUTO_CMD23 BIT(18)
152 /* Shadow write xfer mode reg and write it alongwith CMD register */
153 #define NVQUIRK_SHADOW_XFER_MODE_REG BIT(19)
154 /* update PAD_E_INPUT_OR_E_PWRD bit */
155 #define NVQUIRK_SET_PAD_E_INPUT_OR_E_PWRD BIT(20)
156 /* Shadow write xfer mode reg and write it alongwith CMD register */
157 #define NVQUIRK_SET_PIPE_STAGES_MASK_0 BIT(21)
158 #define NVQUIRK_HIGH_FREQ_TAP_PROCEDURE BIT(22)
159 /* Disable SDMMC3 external loopback */
160 #define NVQUIRK_DISABLE_EXTERNAL_LOOPBACK BIT(23)
161 /* Select fix tap hole margins */
162 #define NVQUIRK_SELECT_FIXED_TAP_HOLE_MARGINS BIT(24)
163 /* Enable HS400 mode */
164 #define NVQUIRK_ENABLE_HS400 BIT(26)
165 /* Enable AUTO CMD23 */
166 #define NVQUIRK_ENABLE_AUTO_CMD23 BIT(27)
167 #define NVQUIRK_SET_SDMEMCOMP_VREF_SEL BIT(28)
168 /* Special PAD control register settings are needed for T210 */
169 #define NVQUIRK_UPDATE_PAD_CNTRL_REG BIT(29)
170 #define NVQUIRK_UPDATE_PIN_CNTRL_REG BIT(30)
171 /* Use timeout clk for write crc status data timeout counter */
172 #define NVQUIRK_USE_TMCLK_WR_CRC_TIMEOUT BIT(31)
174 /* Enable T210 specific SDMMC WAR - sd card voltage switch */
175 #define NVQUIRK2_CONFIG_PWR_DET BIT(0)
176 /* Enable T210 specific SDMMC WAR - Tuning Step Size, Tuning Iterations*/
177 #define NVQUIRK2_UPDATE_HW_TUNING_CONFG BIT(1)
179 /* Common subset of quirks for Tegra3 and later sdmmc controllers */
180 #define TEGRA_SDHCI_NVQUIRKS (NVQUIRK_ENABLE_PADPIPE_CLKEN | \
181 NVQUIRK_DISABLE_SPI_MODE_CLKEN | \
182 NVQUIRK_EN_FEEDBACK_CLK | \
183 NVQUIRK_SET_TAP_DELAY | \
184 NVQUIRK_ENABLE_SDR50_TUNING | \
185 NVQUIRK_ENABLE_SDR50 | \
186 NVQUIRK_ENABLE_SDR104 | \
187 NVQUIRK_SHADOW_XFER_MODE_REG | \
188 NVQUIRK_DISABLE_AUTO_CMD23)
190 #define TEGRA_SDHCI_QUIRKS (SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | \
191 SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | \
192 SDHCI_QUIRK_SINGLE_POWER_WRITE | \
193 SDHCI_QUIRK_NO_HISPD_BIT | \
194 SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC | \
195 SDHCI_QUIRK_BROKEN_CARD_DETECTION)
197 #define TEGRA_SDHCI_QUIRKS2 (SDHCI_QUIRK2_PRESET_VALUE_BROKEN | \
198 SDHCI_QUIRK2_NON_STD_VOLTAGE_SWITCHING | \
199 SDHCI_QUIRK2_NON_STANDARD_TUNING | \
200 SDHCI_QUIRK2_NO_CALC_MAX_DISCARD_TO | \
201 SDHCI_QUIRK2_REG_ACCESS_REQ_HOST_CLK)
203 #define IS_QUIRKS2_DELAYED_CLK_GATE(host) \
204 (host->quirks2 & SDHCI_QUIRK2_DELAYED_CLK_GATE)
206 /* Interface voltages */
207 #define SDHOST_1V8_OCR_MASK 0x8
208 #define SDHOST_HIGH_VOLT_MIN 2700000
209 #define SDHOST_HIGH_VOLT_MAX 3600000
210 #define SDHOST_HIGH_VOLT_2V8 2800000
211 #define SDHOST_LOW_VOLT_MIN 1800000
212 #define SDHOST_LOW_VOLT_MAX 1800000
213 #define SDHOST_HIGH_VOLT_3V2 3200000
214 #define SDHOST_HIGH_VOLT_3V3 3300000
216 /* Clock related definitions */
217 #define MAX_DIVISOR_VALUE 128
218 #define DEFAULT_SDHOST_FREQ 50000000
219 #define SDMMC_AHB_MAX_FREQ 150000000
220 #define SDMMC_EMC_MAX_FREQ 150000000
221 #define SDMMC_EMC_NOM_VOLT_FREQ 900000000
223 /* Tuning related definitions */
224 #define MMC_TUNING_BLOCK_SIZE_BUS_WIDTH_8 128
225 #define MMC_TUNING_BLOCK_SIZE_BUS_WIDTH_4 64
226 #define MAX_TAP_VALUES 255
227 #define TUNING_FREQ_COUNT 3
228 #define TUNING_VOLTAGES_COUNT 3
229 #define TUNING_RETRIES 1
230 #define DFS_FREQ_COUNT 2
231 #define NEG_MAR_CHK_WIN_COUNT 2
232 #define PRECISION_FOR_ESTIMATE 100000
233 /* Tuning core voltage requirements */
234 #define NOMINAL_VCORE_TUN BIT(0)
235 #define BOOT_VCORE_TUN BIT(1)
236 #define MIN_OVERRIDE_VCORE_TUN BIT(2)
238 /* Tap cmd sysfs commands */
239 #define TAP_CMD_TRIM_DEFAULT_VOLTAGE 1
240 #define TAP_CMD_TRIM_HIGH_VOLTAGE 2
243 * Defined the chip specific quirks and clock sources. For now, the used clock
244 * sources vary only from chip to chip. If the sources allowed varies from
245 * platform to platform, then move the clock sources list to platform data.
246 * When filling the tuning_freq_list in soc_data, the number of entries should
247 * be equal to TUNNG_FREQ_COUNT. Depending on number DFS frequencies supported,
248 * set the desired low, high or max frequencies and set the remaining entries
249 * as 0s. The number of entries should always be equal to TUNING_FREQ_COUNT
250 * inorder to get the right tuning data.
252 struct sdhci_tegra_soc_data {
253 const struct sdhci_pltfm_data *pdata;
255 const char *parent_clk_list[2];
256 unsigned int tuning_freq_list[TUNING_FREQ_COUNT];
258 u8 tap_hole_coeffs_count;
259 u8 tap_hole_margins_count;
260 struct tuning_t2t_coeffs *t2t_coeffs;
261 struct tap_hole_coeffs *tap_hole_coeffs;
262 struct tuning_tap_hole_margins *tap_hole_margins;
266 enum tegra_regulator_config_ops {
272 enum tegra_tuning_freq {
278 struct tuning_t2t_coeffs {
282 unsigned int t2t_vnom_slope;
283 unsigned int t2t_vnom_int;
284 unsigned int t2t_vmax_slope;
285 unsigned int t2t_vmax_int;
286 unsigned int t2t_vmin_slope;
287 unsigned int t2t_vmin_int;
290 #define SET_TUNING_COEFFS(_device_id, _vmax, _vmin, _t2t_vnom_slope, \
291 _t2t_vnom_int, _t2t_vmax_slope, _t2t_vmax_int, _t2t_vmin_slope, \
294 .dev_id = _device_id, \
297 .t2t_vnom_slope = _t2t_vnom_slope, \
298 .t2t_vnom_int = _t2t_vnom_int, \
299 .t2t_vmax_slope = _t2t_vmax_slope, \
300 .t2t_vmax_int = _t2t_vmax_int, \
301 .t2t_vmin_slope = _t2t_vmin_slope, \
302 .t2t_vmin_int = _t2t_vmin_int, \
305 struct tuning_t2t_coeffs t11x_tuning_coeffs[] = {
306 SET_TUNING_COEFFS("sdhci-tegra.3", 1250, 950, 55, 135434,
307 73, 170493, 243, 455948),
308 SET_TUNING_COEFFS("sdhci-tegra.2", 1250, 950, 50, 129738,
309 73, 168898, 241, 453050),
310 SET_TUNING_COEFFS("sdhci-tegra.0", 1250, 950, 62, 143469,
311 82, 180096, 238, 444285),
314 struct tuning_t2t_coeffs t12x_tuning_coeffs[] = {
315 SET_TUNING_COEFFS("sdhci-tegra.3", 1150, 950, 27, 118295,
316 27, 118295, 48, 188148),
317 SET_TUNING_COEFFS("sdhci-tegra.2", 1150, 950, 29, 124427,
318 29, 124427, 54, 203707),
319 SET_TUNING_COEFFS("sdhci-tegra.0", 1150, 950, 25, 115933,
320 25, 115933, 47, 187224),
323 struct tap_hole_coeffs {
325 unsigned int freq_khz;
326 unsigned int thole_vnom_slope;
327 unsigned int thole_vnom_int;
328 unsigned int thole_vmax_slope;
329 unsigned int thole_vmax_int;
330 unsigned int thole_vmin_slope;
331 unsigned int thole_vmin_int;
334 #define SET_TAP_HOLE_COEFFS(_device_id, _freq_khz, _thole_vnom_slope, \
335 _thole_vnom_int, _thole_vmax_slope, _thole_vmax_int, \
336 _thole_vmin_slope, _thole_vmin_int) \
338 .dev_id = _device_id, \
339 .freq_khz = _freq_khz, \
340 .thole_vnom_slope = _thole_vnom_slope, \
341 .thole_vnom_int = _thole_vnom_int, \
342 .thole_vmax_slope = _thole_vmax_slope, \
343 .thole_vmax_int = _thole_vmax_int, \
344 .thole_vmin_slope = _thole_vmin_slope, \
345 .thole_vmin_int = _thole_vmin_int, \
348 struct tap_hole_coeffs t11x_tap_hole_coeffs[] = {
349 SET_TAP_HOLE_COEFFS("sdhci-tegra.3", 200000, 765, 102357, 507,
351 SET_TAP_HOLE_COEFFS("sdhci-tegra.3", 156000, 1042, 142044, 776,
353 SET_TAP_HOLE_COEFFS("sdhci-tegra.3", 136000, 1215, 167702, 905,
355 SET_TAP_HOLE_COEFFS("sdhci-tegra.3", 81600, 1925, 284516, 1528,
356 253188, 366, 120001),
357 SET_TAP_HOLE_COEFFS("sdhci-tegra.2", 204000, 472, 53312, 318,
359 SET_TAP_HOLE_COEFFS("sdhci-tegra.2", 156000, 765, 95512, 526,
361 SET_TAP_HOLE_COEFFS("sdhci-tegra.2", 136000, 949, 121887, 656,
363 SET_TAP_HOLE_COEFFS("sdhci-tegra.2", 81600, 1901, 259035, 1334,
364 215539, 326, 100986),
365 SET_TAP_HOLE_COEFFS("sdhci-tegra.0", 204000, 411, 54495, 305,
367 SET_TAP_HOLE_COEFFS("sdhci-tegra.0", 156000, 715, 97623, 516,
369 SET_TAP_HOLE_COEFFS("sdhci-tegra.0", 136000, 905, 124579, 648,
371 SET_TAP_HOLE_COEFFS("sdhci-tegra.0", 81600, 1893, 264746, 1333,
372 221722, 354, 109880),
375 struct tap_hole_coeffs t12x_tap_hole_coeffs[] = {
376 SET_TAP_HOLE_COEFFS("sdhci-tegra.3", 200000, 1037, 106934, 1037,
378 SET_TAP_HOLE_COEFFS("sdhci-tegra.3", 136000, 1703, 186307, 1703,
379 186307, 890, 130617),
380 SET_TAP_HOLE_COEFFS("sdhci-tegra.3", 100000, 2452, 275601, 2452,
381 275601, 1264, 193957),
382 SET_TAP_HOLE_COEFFS("sdhci-tegra.3", 81600, 3090, 351666, 3090,
383 351666, 1583, 247913),
384 SET_TAP_HOLE_COEFFS("sdhci-tegra.2", 204000, 468, 36031, 468,
386 SET_TAP_HOLE_COEFFS("sdhci-tegra.2", 200000, 468, 36031, 468,
388 SET_TAP_HOLE_COEFFS("sdhci-tegra.2", 136000, 1146, 117841, 1146,
390 SET_TAP_HOLE_COEFFS("sdhci-tegra.2", 100000, 1879, 206195, 1879,
391 206195, 953, 141341),
392 SET_TAP_HOLE_COEFFS("sdhci-tegra.2", 81600, 2504, 281460, 2504,
393 281460, 1262, 194452),
394 SET_TAP_HOLE_COEFFS("sdhci-tegra.0", 204000, 874, 85243, 874,
396 SET_TAP_HOLE_COEFFS("sdhci-tegra.0", 136000, 1554, 167210, 1554,
397 167210, 793, 115672),
398 SET_TAP_HOLE_COEFFS("sdhci-tegra.0", 100000, 2290, 255734, 2290,
399 255734, 1164, 178691),
400 SET_TAP_HOLE_COEFFS("sdhci-tegra.0", 81600, 2916, 331143, 2916,
401 331143, 1480, 232373),
404 struct tuning_tap_hole_margins {
406 unsigned int tap_hole_margin;
409 #define SET_TUNING_TAP_HOLE_MARGIN(_device_id, _tap_hole_margin) \
411 .dev_id = _device_id, \
412 .tap_hole_margin = _tap_hole_margin, \
415 struct tuning_tap_hole_margins t12x_automotive_tap_hole_margins[] = {
416 SET_TUNING_TAP_HOLE_MARGIN("sdhci-tegra.3", 13),
417 SET_TUNING_TAP_HOLE_MARGIN("sdhci-tegra.2", 7),
418 SET_TUNING_TAP_HOLE_MARGIN("sdhci-tegra.0", 10),
421 struct freq_tuning_constraints {
422 unsigned int vcore_mask;
425 static struct freq_tuning_constraints tuning_vcore_constraints[3] = {
427 .vcore_mask = BOOT_VCORE_TUN,
430 .vcore_mask = BOOT_VCORE_TUN,
433 .vcore_mask = BOOT_VCORE_TUN,
442 enum tap_win_edge_attr {
448 struct tap_window_data {
451 enum tap_win_edge_attr win_start_attr;
452 enum tap_win_edge_attr win_end_attr;
457 struct tuning_values {
465 struct tegra_tuning_data {
466 unsigned int freq_hz;
468 int nom_best_tap_value;
469 struct freq_tuning_constraints constraints;
470 struct tap_hole_coeffs *thole_coeffs;
471 struct tuning_t2t_coeffs *t2t_coeffs;
472 struct tuning_values est_values;
473 struct tuning_values calc_values;
474 struct tap_window_data *tap_data;
475 struct tap_window_data *final_tap_data;
476 u8 num_of_valid_tap_wins;
480 bool is_partial_win_valid;
483 #ifdef CONFIG_MMC_FREQ_SCALING
484 struct freq_gov_params {
486 u8 polling_interval_ms;
487 u8 active_load_threshold;
490 static struct freq_gov_params gov_params[3] = {
492 .idle_mon_cycles = 3,
493 .polling_interval_ms = 50,
494 .active_load_threshold = 25,
497 .idle_mon_cycles = 3,
498 .polling_interval_ms = 50,
499 .active_load_threshold = 25,
502 .idle_mon_cycles = 3,
503 .polling_interval_ms = 50,
504 .active_load_threshold = 25,
509 struct tegra_freq_gov_data {
510 unsigned int curr_active_load;
511 unsigned int avg_active_load;
512 unsigned int act_load_high_threshold;
513 unsigned int max_idle_monitor_cycles;
514 unsigned int curr_freq;
515 unsigned int freqs[DFS_FREQ_COUNT];
516 unsigned int freq_switch_count;
517 bool monitor_idle_load;
520 struct sdhci_tegra_sd_stats {
521 unsigned int data_crc_count;
522 unsigned int cmd_crc_count;
523 unsigned int data_to_count;
524 unsigned int cmd_to_count;
527 #ifdef CONFIG_DEBUG_FS
528 struct dbg_cfg_data {
529 unsigned int tap_val;
530 unsigned int trim_val;
535 const struct tegra_sdhci_platform_data *plat;
536 const struct sdhci_tegra_soc_data *soc_data;
538 /* ensure atomic set clock calls */
539 struct mutex set_clock_mutex;
540 struct regulator *vdd_io_reg;
541 struct regulator *vdd_slot_reg;
542 struct regulator *vcore_reg;
543 /* Host controller instance */
544 unsigned int instance;
546 unsigned int vddio_min_uv;
548 unsigned int vddio_max_uv;
549 /* DDR and low speed modes clock */
551 /* HS200, SDR104 modes clock */
553 /* Check if ddr_clk is being used */
555 /* max clk supported by the platform */
556 unsigned int max_clk_limit;
557 /* max ddr clk supported by the platform */
558 unsigned int ddr_clk_limit;
560 bool is_rail_enabled;
562 bool is_sdmmc_emc_clk_on;
564 bool is_sdmmc_sclk_on;
565 struct sdhci_tegra_sd_stats *sd_stat_head;
566 struct notifier_block reboot_notify;
568 bool set_1v8_calib_offsets;
569 int nominal_vcore_mv;
570 int min_vcore_override_mv;
572 /* Tuning related structures and variables */
573 /* Tuning opcode to be used */
574 unsigned int tuning_opcode;
575 /* Tuning packet size */
576 unsigned int tuning_bsize;
577 /* Num of tuning freqs selected */
578 int tuning_freq_count;
579 unsigned int tap_cmd;
581 unsigned int tuning_status;
583 #define TUNING_STATUS_DONE 1
584 #define TUNING_STATUS_RETUNE 2
585 /* Freq tuning information for each sampling clock freq */
586 struct tegra_tuning_data tuning_data[DFS_FREQ_COUNT];
587 struct tegra_freq_gov_data *gov_data;
589 #ifdef CONFIG_DEBUG_FS
590 /* Override debug config data */
591 struct dbg_cfg_data dbg_cfg;
595 static struct clk *pll_c;
596 static struct clk *pll_p;
597 static unsigned long pll_c_rate;
598 static unsigned long pll_p_rate;
599 static bool vcore_overrides_allowed;
600 static bool maintain_boot_voltage;
601 static unsigned int boot_volt_req_refcount;
602 DEFINE_MUTEX(tuning_mutex);
604 static struct tegra_tuning_data *sdhci_tegra_get_tuning_data(
605 struct sdhci_host *sdhci, unsigned int clock);
606 static unsigned long get_nearest_clock_freq(unsigned long pll_rate,
607 unsigned long desired_rate);
608 static void sdhci_tegra_set_tap_delay(struct sdhci_host *sdhci,
609 unsigned int tap_delay);
610 static int tegra_sdhci_configure_regulators(struct sdhci_tegra *tegra_host,
611 u8 option, int min_uV, int max_uV);
612 static void sdhci_tegra_set_trim_delay(struct sdhci_host *sdhci,
613 unsigned int trim_delay);
614 static void tegra_sdhci_do_calibration(struct sdhci_host *sdhci,
615 unsigned char signal_voltage);
616 static int get_tuning_tap_hole_margins(struct sdhci_host *sdhci,
617 int t2t_tuning_value);
619 static int show_error_stats_dump(struct seq_file *s, void *data)
621 struct sdhci_host *host = s->private;
622 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
623 struct sdhci_tegra *tegra_host = pltfm_host->priv;
624 struct sdhci_tegra_sd_stats *head;
626 seq_printf(s, "ErrorStatistics:\n");
627 seq_printf(s, "DataCRC\tCmdCRC\tDataTimeout\tCmdTimeout\n");
628 head = tegra_host->sd_stat_head;
630 seq_printf(s, "%d\t%d\t%d\t%d\n", head->data_crc_count,
631 head->cmd_crc_count, head->data_to_count,
636 static int show_dfs_stats_dump(struct seq_file *s, void *data)
638 struct sdhci_host *host = s->private;
639 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
640 struct sdhci_tegra *tegra_host = pltfm_host->priv;
641 struct tegra_freq_gov_data *gov_data = tegra_host->gov_data;
643 seq_printf(s, "DFS statistics:\n");
645 if (host->mmc->dev_stats != NULL)
646 seq_printf(s, "Polling_period: %d\n",
647 host->mmc->dev_stats->polling_interval);
649 if (gov_data != NULL) {
650 seq_printf(s, "cur_active_load: %d\n",
651 gov_data->curr_active_load);
652 seq_printf(s, "avg_active_load: %d\n",
653 gov_data->avg_active_load);
654 seq_printf(s, "act_load_high_threshold: %d\n",
655 gov_data->act_load_high_threshold);
656 seq_printf(s, "freq_switch_count: %d\n",
657 gov_data->freq_switch_count);
662 static int sdhci_error_stats_dump(struct inode *inode, struct file *file)
664 return single_open(file, show_error_stats_dump, inode->i_private);
667 static int sdhci_dfs_stats_dump(struct inode *inode, struct file *file)
669 return single_open(file, show_dfs_stats_dump, inode->i_private);
673 static const struct file_operations sdhci_host_fops = {
674 .open = sdhci_error_stats_dump,
677 .release = single_release,
680 static const struct file_operations sdhci_host_dfs_fops = {
681 .open = sdhci_dfs_stats_dump,
684 .release = single_release,
687 static u32 tegra_sdhci_readl(struct sdhci_host *host, int reg)
691 if (unlikely(reg == SDHCI_PRESENT_STATE)) {
692 /* Use wp_gpio here instead? */
693 val = readl(host->ioaddr + reg);
694 return val | SDHCI_WRITE_PROTECT;
696 return readl(host->ioaddr + reg);
699 static u16 tegra_sdhci_readw(struct sdhci_host *host, int reg)
701 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
702 struct sdhci_tegra *tegra_host = pltfm_host->priv;
703 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
705 if (unlikely((soc_data->nvquirks & NVQUIRK_FORCE_SDHCI_SPEC_200) &&
706 (reg == SDHCI_HOST_VERSION))) {
707 return SDHCI_SPEC_200;
709 return readw(host->ioaddr + reg);
712 static void tegra_sdhci_writel(struct sdhci_host *host, u32 val, int reg)
714 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
715 struct sdhci_tegra *tegra_host = pltfm_host->priv;
716 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
718 /* Seems like we're getting spurious timeout and crc errors, so
719 * disable signalling of them. In case of real errors software
720 * timers should take care of eventually detecting them.
722 if (unlikely(reg == SDHCI_SIGNAL_ENABLE))
723 val &= ~(SDHCI_INT_TIMEOUT|SDHCI_INT_CRC);
725 writel(val, host->ioaddr + reg);
727 if (unlikely((soc_data->nvquirks & NVQUIRK_ENABLE_BLOCK_GAP_DET) &&
728 (reg == SDHCI_INT_ENABLE))) {
729 u8 gap_ctrl = readb(host->ioaddr + SDHCI_BLOCK_GAP_CONTROL);
730 if (val & SDHCI_INT_CARD_INT)
734 writeb(gap_ctrl, host->ioaddr + SDHCI_BLOCK_GAP_CONTROL);
738 static void tegra_sdhci_writew(struct sdhci_host *host, u16 val, int reg)
740 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
741 struct sdhci_tegra *tegra_host = pltfm_host->priv;
742 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
744 if (soc_data->nvquirks & NVQUIRK_SHADOW_XFER_MODE_REG) {
746 case SDHCI_TRANSFER_MODE:
748 * Postpone this write, we must do it together with a
749 * command write that is down below.
751 pltfm_host->xfer_mode_shadow = val;
754 writel((val << 16) | pltfm_host->xfer_mode_shadow,
755 host->ioaddr + SDHCI_TRANSFER_MODE);
756 pltfm_host->xfer_mode_shadow = 0;
761 writew(val, host->ioaddr + reg);
764 #ifdef CONFIG_MMC_FREQ_SCALING
766 static bool disable_scaling __read_mostly;
767 module_param(disable_scaling, bool, 0644);
770 * Dynamic frequency calculation.
771 * The active load for the current period and the average active load
772 * are calculated at the end of each polling interval.
774 * If the current active load is greater than the threshold load, then the
775 * frequency is boosted(156MHz).
776 * If the active load is lower than the threshold, then the load is monitored
777 * for a max of three cycles before reducing the frequency(82MHz). If the
778 * average active load is lower, then the monitoring cycles is reduced.
780 * The active load threshold value for both eMMC and SDIO is set to 25 which
781 * is found to give the optimal power and performance. The polling interval is
784 * The polling interval and active load threshold values can be changed by
785 * the user through sysfs.
787 static unsigned long calculate_mmc_target_freq(
788 struct tegra_freq_gov_data *gov_data)
790 unsigned long desired_freq = gov_data->curr_freq;
791 unsigned int type = MMC_TYPE_MMC;
793 if (gov_data->curr_active_load >= gov_data->act_load_high_threshold) {
794 desired_freq = gov_data->freqs[TUNING_HIGH_FREQ];
795 gov_data->monitor_idle_load = false;
796 gov_data->max_idle_monitor_cycles =
797 gov_params[type].idle_mon_cycles;
799 if (gov_data->monitor_idle_load) {
800 if (!gov_data->max_idle_monitor_cycles) {
801 desired_freq = gov_data->freqs[TUNING_LOW_FREQ];
802 gov_data->max_idle_monitor_cycles =
803 gov_params[type].idle_mon_cycles;
805 gov_data->max_idle_monitor_cycles--;
808 gov_data->monitor_idle_load = true;
809 gov_data->max_idle_monitor_cycles *=
810 gov_data->avg_active_load;
811 gov_data->max_idle_monitor_cycles /= 100;
818 static unsigned long calculate_sdio_target_freq(
819 struct tegra_freq_gov_data *gov_data)
821 unsigned long desired_freq = gov_data->curr_freq;
822 unsigned int type = MMC_TYPE_SDIO;
824 if (gov_data->curr_active_load >= gov_data->act_load_high_threshold) {
825 desired_freq = gov_data->freqs[TUNING_HIGH_FREQ];
826 gov_data->monitor_idle_load = false;
827 gov_data->max_idle_monitor_cycles =
828 gov_params[type].idle_mon_cycles;
830 if (gov_data->monitor_idle_load) {
831 if (!gov_data->max_idle_monitor_cycles) {
832 desired_freq = gov_data->freqs[TUNING_LOW_FREQ];
833 gov_data->max_idle_monitor_cycles =
834 gov_params[type].idle_mon_cycles;
836 gov_data->max_idle_monitor_cycles--;
839 gov_data->monitor_idle_load = true;
840 gov_data->max_idle_monitor_cycles *=
841 gov_data->avg_active_load;
842 gov_data->max_idle_monitor_cycles /= 100;
849 static unsigned long calculate_sd_target_freq(
850 struct tegra_freq_gov_data *gov_data)
852 unsigned long desired_freq = gov_data->curr_freq;
853 unsigned int type = MMC_TYPE_SD;
855 if (gov_data->curr_active_load >= gov_data->act_load_high_threshold) {
856 desired_freq = gov_data->freqs[TUNING_HIGH_FREQ];
857 gov_data->monitor_idle_load = false;
858 gov_data->max_idle_monitor_cycles =
859 gov_params[type].idle_mon_cycles;
861 if (gov_data->monitor_idle_load) {
862 if (!gov_data->max_idle_monitor_cycles) {
863 desired_freq = gov_data->freqs[TUNING_LOW_FREQ];
864 gov_data->max_idle_monitor_cycles =
865 gov_params[type].idle_mon_cycles;
867 gov_data->max_idle_monitor_cycles--;
870 gov_data->monitor_idle_load = true;
871 gov_data->max_idle_monitor_cycles *=
872 gov_data->avg_active_load;
873 gov_data->max_idle_monitor_cycles /= 100;
880 static unsigned long sdhci_tegra_get_target_freq(struct sdhci_host *sdhci,
881 struct devfreq_dev_status *dfs_stats)
883 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
884 struct sdhci_tegra *tegra_host = pltfm_host->priv;
885 struct tegra_freq_gov_data *gov_data = tegra_host->gov_data;
886 unsigned long freq = sdhci->mmc->actual_clock;
889 dev_err(mmc_dev(sdhci->mmc),
890 "No gov data. Continue using current freq %ld", freq);
898 * If clock gating is enabled and clock is currently disabled, then
901 if (!tegra_host->clk_enabled)
904 if (dfs_stats->total_time) {
905 gov_data->curr_active_load = (dfs_stats->busy_time * 100) /
906 dfs_stats->total_time;
908 gov_data->curr_active_load = 0;
911 gov_data->avg_active_load += gov_data->curr_active_load;
912 gov_data->avg_active_load >>= 1;
914 if (sdhci->mmc->card) {
915 if (sdhci->mmc->card->type == MMC_TYPE_SDIO)
916 freq = calculate_sdio_target_freq(gov_data);
917 else if (sdhci->mmc->card->type == MMC_TYPE_MMC)
918 freq = calculate_mmc_target_freq(gov_data);
919 else if (sdhci->mmc->card->type == MMC_TYPE_SD)
920 freq = calculate_sd_target_freq(gov_data);
921 if (gov_data->curr_freq != freq)
922 gov_data->freq_switch_count++;
923 gov_data->curr_freq = freq;
929 static int sdhci_tegra_freq_gov_init(struct sdhci_host *sdhci)
931 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
932 struct sdhci_tegra *tegra_host = pltfm_host->priv;
937 if (!((sdhci->mmc->ios.timing == MMC_TIMING_UHS_SDR104) ||
938 (sdhci->mmc->ios.timing == MMC_TIMING_MMC_HS200))) {
939 dev_info(mmc_dev(sdhci->mmc),
940 "DFS not required for current operating mode\n");
944 if (!tegra_host->gov_data) {
945 tegra_host->gov_data = devm_kzalloc(mmc_dev(sdhci->mmc),
946 sizeof(struct tegra_freq_gov_data), GFP_KERNEL);
947 if (!tegra_host->gov_data) {
948 dev_err(mmc_dev(sdhci->mmc),
949 "Failed to allocate memory for dfs data\n");
954 /* Find the supported frequencies */
955 dev_info(mmc_dev(sdhci->mmc), "DFS supported freqs");
956 for (i = 0; i < tegra_host->tuning_freq_count; i++) {
957 freq = tegra_host->tuning_data[i].freq_hz;
959 * Check the nearest possible clock with pll_c and pll_p as
960 * the clock sources. Choose the higher frequency.
962 tegra_host->gov_data->freqs[i] =
963 get_nearest_clock_freq(pll_c_rate, freq);
964 freq = get_nearest_clock_freq(pll_p_rate, freq);
965 if (freq > tegra_host->gov_data->freqs[i])
966 tegra_host->gov_data->freqs[i] = freq;
967 pr_err("%d,", tegra_host->gov_data->freqs[i]);
970 tegra_host->gov_data->monitor_idle_load = false;
971 tegra_host->gov_data->curr_freq = sdhci->mmc->actual_clock;
972 if (sdhci->mmc->card) {
973 type = sdhci->mmc->card->type;
974 sdhci->mmc->dev_stats->polling_interval =
975 gov_params[type].polling_interval_ms;
976 tegra_host->gov_data->act_load_high_threshold =
977 gov_params[type].active_load_threshold;
978 tegra_host->gov_data->max_idle_monitor_cycles =
979 gov_params[type].idle_mon_cycles;
987 static unsigned int tegra_sdhci_get_cd(struct sdhci_host *sdhci)
989 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
990 struct sdhci_tegra *tegra_host = pltfm_host->priv;
992 return tegra_host->card_present;
995 static unsigned int tegra_sdhci_get_ro(struct sdhci_host *sdhci)
997 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
998 struct sdhci_tegra *tegra_host = pltfm_host->priv;
999 const struct tegra_sdhci_platform_data *plat = tegra_host->plat;
1001 if (!gpio_is_valid(plat->wp_gpio))
1004 return gpio_get_value_cansleep(plat->wp_gpio);
1007 static int tegra_sdhci_set_uhs_signaling(struct sdhci_host *host,
1011 u32 vndr_ctrl, trim_delay, best_tap_value;
1012 struct tegra_tuning_data *tuning_data;
1013 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1014 struct sdhci_tegra *tegra_host = pltfm_host->priv;
1015 const struct tegra_sdhci_platform_data *plat = tegra_host->plat;
1017 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1019 /* Select Bus Speed Mode for host
1020 * For HS200 we need to set UHS_MODE_SEL to SDR104.
1021 * It works as SDR 104 in SD 4-bit mode and HS200 in eMMC 8-bit mode.
1022 * SDR50 mode timing seems to have issues. Programming SDR104
1023 * mode for SDR50 mode for reliable transfers over interface.
1025 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
1027 case MMC_TIMING_UHS_SDR12:
1028 ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
1030 case MMC_TIMING_UHS_SDR25:
1031 ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
1033 case MMC_TIMING_UHS_SDR50:
1034 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
1036 case MMC_TIMING_UHS_SDR104:
1037 case MMC_TIMING_MMC_HS200:
1038 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
1040 case MMC_TIMING_UHS_DDR50:
1041 ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
1045 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
1047 if (uhs == MMC_TIMING_UHS_DDR50) {
1048 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1049 clk &= ~(0xFF << SDHCI_DIVIDER_SHIFT);
1050 clk |= 1 << SDHCI_DIVIDER_SHIFT;
1051 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1053 /* Set the ddr mode trim delay if required */
1054 if (plat->ddr_trim_delay != -1) {
1055 trim_delay = plat->ddr_trim_delay;
1056 vndr_ctrl = sdhci_readl(host, SDHCI_VNDR_CLK_CTRL);
1057 vndr_ctrl &= ~(SDHCI_VNDR_CLK_CTRL_TRIM_VALUE_MASK <<
1058 SDHCI_VNDR_CLK_CTRL_TRIM_VALUE_SHIFT);
1059 vndr_ctrl |= (trim_delay <<
1060 SDHCI_VNDR_CLK_CTRL_TRIM_VALUE_SHIFT);
1061 sdhci_writel(host, vndr_ctrl, SDHCI_VNDR_CLK_CTRL);
1064 /* Set the best tap value based on timing */
1065 if (((uhs == MMC_TIMING_MMC_HS200) ||
1066 (uhs == MMC_TIMING_UHS_SDR104) ||
1067 (uhs == MMC_TIMING_UHS_SDR50)) &&
1068 (tegra_host->tuning_status == TUNING_STATUS_DONE)) {
1069 tuning_data = sdhci_tegra_get_tuning_data(host,
1070 host->mmc->ios.clock);
1071 best_tap_value = (tegra_host->tap_cmd ==
1072 TAP_CMD_TRIM_HIGH_VOLTAGE) ?
1073 tuning_data->nom_best_tap_value :
1074 tuning_data->best_tap_value;
1076 best_tap_value = tegra_host->plat->tap_delay;
1078 vndr_ctrl = sdhci_readl(host, SDHCI_VNDR_CLK_CTRL);
1079 vndr_ctrl &= ~(SDHCI_VNDR_CLK_CTRL_TAP_VALUE_MASK <<
1080 SDHCI_VNDR_CLK_CTRL_TAP_VALUE_SHIFT);
1081 vndr_ctrl |= (best_tap_value <<
1082 SDHCI_VNDR_CLK_CTRL_TAP_VALUE_SHIFT);
1083 sdhci_writel(host, vndr_ctrl, SDHCI_VNDR_CLK_CTRL);
1087 static void sdhci_status_notify_cb(int card_present, void *dev_id)
1089 struct sdhci_host *sdhci = (struct sdhci_host *)dev_id;
1090 struct platform_device *pdev = to_platform_device(mmc_dev(sdhci->mmc));
1091 struct tegra_sdhci_platform_data *plat;
1092 unsigned int status, oldstat;
1094 pr_debug("%s: card_present %d\n", mmc_hostname(sdhci->mmc),
1097 plat = pdev->dev.platform_data;
1098 if (!plat->mmc_data.status) {
1099 if (card_present == 1) {
1100 sdhci->mmc->rescan_disable = 0;
1101 mmc_detect_change(sdhci->mmc, 0);
1102 } else if (card_present == 0) {
1103 sdhci->mmc->detect_change = 0;
1104 sdhci->mmc->rescan_disable = 1;
1109 status = plat->mmc_data.status(mmc_dev(sdhci->mmc));
1111 oldstat = plat->mmc_data.card_present;
1112 plat->mmc_data.card_present = status;
1113 if (status ^ oldstat) {
1114 pr_debug("%s: Slot status change detected (%d -> %d)\n",
1115 mmc_hostname(sdhci->mmc), oldstat, status);
1116 if (status && !plat->mmc_data.built_in)
1117 mmc_detect_change(sdhci->mmc, (5 * HZ) / 2);
1119 mmc_detect_change(sdhci->mmc, 0);
1123 static irqreturn_t carddetect_irq(int irq, void *data)
1125 struct sdhci_host *sdhost = (struct sdhci_host *)data;
1126 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhost);
1127 struct sdhci_tegra *tegra_host = pltfm_host->priv;
1128 struct platform_device *pdev = to_platform_device(mmc_dev(sdhost->mmc));
1129 struct tegra_sdhci_platform_data *plat;
1132 plat = pdev->dev.platform_data;
1134 tegra_host->card_present =
1135 (gpio_get_value_cansleep(plat->cd_gpio) == 0);
1137 if (tegra_host->card_present) {
1138 err = tegra_sdhci_configure_regulators(tegra_host,
1139 CONFIG_REG_EN, 0, 0);
1141 dev_err(mmc_dev(sdhost->mmc),
1142 "Failed to enable card regulators %d\n", err);
1144 err = tegra_sdhci_configure_regulators(tegra_host,
1145 CONFIG_REG_DIS, 0 , 0);
1147 dev_err(mmc_dev(sdhost->mmc),
1148 "Failed to disable card regulators %d\n", err);
1150 * Set retune request as tuning should be done next time
1151 * a card is inserted.
1153 tegra_host->tuning_status = TUNING_STATUS_RETUNE;
1154 tegra_host->force_retune = true;
1157 tasklet_schedule(&sdhost->card_tasklet);
1161 static void tegra_sdhci_reset_exit(struct sdhci_host *host, u8 mask)
1165 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1166 struct sdhci_tegra *tegra_host = pltfm_host->priv;
1167 struct tegra_tuning_data *tuning_data;
1168 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
1169 const struct tegra_sdhci_platform_data *plat = tegra_host->plat;
1170 unsigned int best_tap_value;
1172 if (!(mask & SDHCI_RESET_ALL))
1175 if (tegra_host->sd_stat_head != NULL) {
1176 tegra_host->sd_stat_head->data_crc_count = 0;
1177 tegra_host->sd_stat_head->cmd_crc_count = 0;
1178 tegra_host->sd_stat_head->data_to_count = 0;
1179 tegra_host->sd_stat_head->cmd_to_count = 0;
1182 if (tegra_host->gov_data != NULL)
1183 tegra_host->gov_data->freq_switch_count = 0;
1185 vendor_ctrl = sdhci_readl(host, SDHCI_VNDR_CLK_CTRL);
1186 if (soc_data->nvquirks & NVQUIRK_ENABLE_PADPIPE_CLKEN) {
1188 SDHCI_VNDR_CLK_CTRL_PADPIPE_CLKEN_OVERRIDE;
1190 if (soc_data->nvquirks & NVQUIRK_DISABLE_SPI_MODE_CLKEN) {
1192 ~SDHCI_VNDR_CLK_CTRL_SPI_MODE_CLKEN_OVERRIDE;
1194 if (soc_data->nvquirks & NVQUIRK_EN_FEEDBACK_CLK) {
1196 ~SDHCI_VNDR_CLK_CTRL_INPUT_IO_CLK;
1198 vendor_ctrl |= SDHCI_VNDR_CLK_CTRL_INTERNAL_CLK;
1201 if (soc_data->nvquirks & NVQUIRK_SET_TAP_DELAY) {
1202 if ((tegra_host->tuning_status == TUNING_STATUS_DONE)
1203 && (host->mmc->pm_flags & MMC_PM_KEEP_POWER)) {
1204 tuning_data = sdhci_tegra_get_tuning_data(host,
1205 host->mmc->ios.clock);
1206 best_tap_value = (tegra_host->tap_cmd ==
1207 TAP_CMD_TRIM_HIGH_VOLTAGE) ?
1208 tuning_data->nom_best_tap_value :
1209 tuning_data->best_tap_value;
1211 best_tap_value = tegra_host->plat->tap_delay;
1213 vendor_ctrl &= ~(SDHCI_VNDR_CLK_CTRL_TAP_VALUE_MASK <<
1214 SDHCI_VNDR_CLK_CTRL_TAP_VALUE_SHIFT);
1215 vendor_ctrl |= (best_tap_value <<
1216 SDHCI_VNDR_CLK_CTRL_TAP_VALUE_SHIFT);
1219 if (soc_data->nvquirks & NVQUIRK_SET_TRIM_DELAY) {
1220 vendor_ctrl &= ~(SDHCI_VNDR_CLK_CTRL_TRIM_VALUE_MASK <<
1221 SDHCI_VNDR_CLK_CTRL_TRIM_VALUE_SHIFT);
1222 vendor_ctrl |= (plat->trim_delay <<
1223 SDHCI_VNDR_CLK_CTRL_TRIM_VALUE_SHIFT);
1225 if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50_TUNING)
1226 vendor_ctrl |= SDHCI_VNDR_CLK_CTRL_SDR50_TUNING;
1227 sdhci_writel(host, vendor_ctrl, SDHCI_VNDR_CLK_CTRL);
1229 misc_ctrl = sdhci_readl(host, SDHCI_VNDR_MISC_CTRL);
1230 if (soc_data->nvquirks & NVQUIRK_ENABLE_SD_3_0)
1231 misc_ctrl |= SDHCI_VNDR_MISC_CTRL_ENABLE_SD_3_0;
1232 if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR104) {
1234 SDHCI_VNDR_MISC_CTRL_ENABLE_SDR104_SUPPORT;
1236 if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50) {
1238 SDHCI_VNDR_MISC_CTRL_ENABLE_SDR50_SUPPORT;
1240 /* Enable DDR mode support only for SDMMC4 */
1241 if (soc_data->nvquirks & NVQUIRK_ENABLE_DDR50) {
1242 if (tegra_host->instance == 3) {
1244 SDHCI_VNDR_MISC_CTRL_ENABLE_DDR50_SUPPORT;
1247 if (soc_data->nvquirks & NVQUIRK_INFINITE_ERASE_TIMEOUT) {
1249 SDHCI_VNDR_MISC_CTRL_INFINITE_ERASE_TIMEOUT;
1251 if (soc_data->nvquirks & NVQUIRK_SET_PIPE_STAGES_MASK_0)
1252 misc_ctrl &= ~SDHCI_VNDR_MISC_CTRL_PIPE_STAGES_MASK;
1254 /* External loopback is valid for sdmmc3 only */
1255 if ((soc_data->nvquirks & NVQUIRK_DISABLE_EXTERNAL_LOOPBACK) &&
1256 (tegra_host->instance == 2)) {
1257 if ((tegra_host->tuning_status == TUNING_STATUS_DONE)
1258 && (host->mmc->pm_flags &
1259 MMC_PM_KEEP_POWER)) {
1261 SDHCI_VNDR_MISC_CTRL_EN_EXT_LOOPBACK_SHIFT);
1264 SDHCI_VNDR_MISC_CTRL_EN_EXT_LOOPBACK_SHIFT);
1267 sdhci_writel(host, misc_ctrl, SDHCI_VNDR_MISC_CTRL);
1269 if (soc_data->nvquirks & NVQUIRK_DISABLE_AUTO_CMD23)
1270 host->flags &= ~SDHCI_AUTO_CMD23;
1272 /* Mask the support for any UHS modes if specified */
1273 if (plat->uhs_mask & MMC_UHS_MASK_SDR104)
1274 host->mmc->caps &= ~MMC_CAP_UHS_SDR104;
1276 if (plat->uhs_mask & MMC_UHS_MASK_DDR50)
1277 host->mmc->caps &= ~MMC_CAP_UHS_DDR50;
1279 if (plat->uhs_mask & MMC_UHS_MASK_SDR50)
1280 host->mmc->caps &= ~MMC_CAP_UHS_SDR50;
1282 if (plat->uhs_mask & MMC_UHS_MASK_SDR25)
1283 host->mmc->caps &= ~MMC_CAP_UHS_SDR25;
1285 if (plat->uhs_mask & MMC_UHS_MASK_SDR12)
1286 host->mmc->caps &= ~MMC_CAP_UHS_SDR12;
1288 #ifdef CONFIG_MMC_SDHCI_TEGRA_HS200_DISABLE
1289 host->mmc->caps2 &= ~MMC_CAP2_HS200;
1291 if (plat->uhs_mask & MMC_MASK_HS200)
1292 host->mmc->caps2 &= ~MMC_CAP2_HS200;
1296 static int tegra_sdhci_buswidth(struct sdhci_host *sdhci, int bus_width)
1298 struct platform_device *pdev = to_platform_device(mmc_dev(sdhci->mmc));
1299 const struct tegra_sdhci_platform_data *plat;
1302 plat = pdev->dev.platform_data;
1304 ctrl = sdhci_readb(sdhci, SDHCI_HOST_CONTROL);
1305 if (plat->is_8bit && bus_width == MMC_BUS_WIDTH_8) {
1306 ctrl &= ~SDHCI_CTRL_4BITBUS;
1307 ctrl |= SDHCI_CTRL_8BITBUS;
1309 ctrl &= ~SDHCI_CTRL_8BITBUS;
1310 if (bus_width == MMC_BUS_WIDTH_4)
1311 ctrl |= SDHCI_CTRL_4BITBUS;
1313 ctrl &= ~SDHCI_CTRL_4BITBUS;
1315 sdhci_writeb(sdhci, ctrl, SDHCI_HOST_CONTROL);
1320 * Calculation of nearest clock frequency for desired rate:
1321 * Get the divisor value, div = p / d_rate
1322 * 1. If it is nearer to ceil(p/d_rate) then increment the div value by 0.5 and
1323 * nearest_rate, i.e. result = p / (div + 0.5) = (p << 1)/((div << 1) + 1).
1324 * 2. If not, result = p / div
1325 * As the nearest clk freq should be <= to desired_rate,
1326 * 3. If result > desired_rate then increment the div by 0.5
1327 * and do, (p << 1)/((div << 1) + 1)
1328 * 4. Else return result
1329 * Here, If condtions 1 & 3 are both satisfied then to keep track of div value,
1330 * defined index variable.
1332 static unsigned long get_nearest_clock_freq(unsigned long pll_rate,
1333 unsigned long desired_rate)
1335 unsigned long result;
1339 div = pll_rate / desired_rate;
1340 if (div > MAX_DIVISOR_VALUE) {
1341 div = MAX_DIVISOR_VALUE;
1342 result = pll_rate / div;
1344 if ((pll_rate % desired_rate) >= (desired_rate / 2))
1345 result = (pll_rate << 1) / ((div << 1) + index++);
1347 result = pll_rate / div;
1349 if (desired_rate < result) {
1351 * Trying to get lower clock freq than desired clock,
1352 * by increasing the divisor value by 0.5
1354 result = (pll_rate << 1) / ((div << 1) + index);
1361 static void tegra_sdhci_clock_set_parent(struct sdhci_host *host,
1362 unsigned long desired_rate)
1364 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1365 struct sdhci_tegra *tegra_host = pltfm_host->priv;
1366 struct clk *parent_clk;
1367 unsigned long pll_c_freq;
1368 unsigned long pll_p_freq;
1371 #ifdef CONFIG_TEGRA_FPGA_PLATFORM
1375 * Currently pll_p and pll_c are used as clock sources for SDMMC. If clk
1376 * rate is missing for either of them, then no selection is needed and
1377 * the default parent is used.
1379 if (!pll_c_rate || !pll_p_rate)
1382 pll_c_freq = get_nearest_clock_freq(pll_c_rate, desired_rate);
1383 pll_p_freq = get_nearest_clock_freq(pll_p_rate, desired_rate);
1386 * For low freq requests, both the desired rates might be higher than
1387 * the requested clock frequency. In such cases, select the parent
1388 * with the lower frequency rate.
1390 if ((pll_c_freq > desired_rate) && (pll_p_freq > desired_rate)) {
1391 if (pll_p_freq <= pll_c_freq) {
1392 desired_rate = pll_p_freq;
1395 desired_rate = pll_c_freq;
1398 rc = clk_set_rate(pltfm_host->clk, desired_rate);
1401 if (pll_c_freq > pll_p_freq) {
1402 if (!tegra_host->is_parent_pllc) {
1404 tegra_host->is_parent_pllc = true;
1405 clk_set_rate(pltfm_host->clk, DEFAULT_SDHOST_FREQ);
1408 } else if (tegra_host->is_parent_pllc) {
1410 tegra_host->is_parent_pllc = false;
1414 rc = clk_set_parent(pltfm_host->clk, parent_clk);
1416 pr_err("%s: failed to set pll parent clock %d\n",
1417 mmc_hostname(host->mmc), rc);
1420 static void tegra_sdhci_set_clk_rate(struct sdhci_host *sdhci,
1423 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
1424 struct sdhci_tegra *tegra_host = pltfm_host->priv;
1425 unsigned int clk_rate;
1426 #ifdef CONFIG_MMC_FREQ_SCALING
1427 unsigned int tap_value;
1428 struct tegra_tuning_data *tuning_data;
1431 if (sdhci->mmc->ios.timing == MMC_TIMING_UHS_DDR50) {
1433 * In ddr mode, tegra sdmmc controller clock frequency
1434 * should be double the card clock frequency.
1436 if (tegra_host->ddr_clk_limit)
1437 clk_rate = tegra_host->ddr_clk_limit * 2;
1439 clk_rate = clock * 2;
1444 if (sdhci->mmc->ios.timing == MMC_TIMING_UHS_SDR50)
1445 clk_rate = tegra_host->soc_data->tuning_freq_list[0];
1447 if (tegra_host->max_clk_limit &&
1448 (clk_rate > tegra_host->max_clk_limit))
1449 clk_rate = tegra_host->max_clk_limit;
1451 if (clk_rate > clk_get_max_rate(pltfm_host->clk))
1452 clk_rate = clk_get_max_rate(pltfm_host->clk);
1454 tegra_sdhci_clock_set_parent(sdhci, clk_rate);
1455 clk_set_rate(pltfm_host->clk, clk_rate);
1456 sdhci->max_clk = clk_get_rate(pltfm_host->clk);
1458 /* FPGA supports 26MHz of clock for SDMMC. */
1459 if (tegra_platform_is_fpga())
1460 sdhci->max_clk = 26000000;
1462 #ifdef CONFIG_MMC_FREQ_SCALING
1463 /* Set the tap delay if tuning is done and dfs is enabled */
1464 if (sdhci->mmc->df &&
1465 (tegra_host->tuning_status == TUNING_STATUS_DONE)) {
1466 tuning_data = sdhci_tegra_get_tuning_data(sdhci, clock);
1467 tap_value = (tegra_host->tap_cmd == TAP_CMD_TRIM_HIGH_VOLTAGE) ?
1468 tuning_data->nom_best_tap_value :
1469 tuning_data->best_tap_value;
1470 sdhci_tegra_set_tap_delay(sdhci, tap_value);
1475 static void tegra_sdhci_set_clock(struct sdhci_host *sdhci, unsigned int clock)
1477 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
1478 struct sdhci_tegra *tegra_host = pltfm_host->priv;
1479 struct platform_device *pdev = to_platform_device(mmc_dev(sdhci->mmc));
1483 mutex_lock(&tegra_host->set_clock_mutex);
1484 pr_debug("%s %s %u enabled=%u\n", __func__,
1485 mmc_hostname(sdhci->mmc), clock, tegra_host->clk_enabled);
1487 if (!tegra_host->clk_enabled) {
1488 pm_runtime_get_sync(&pdev->dev);
1489 ret = clk_prepare_enable(pltfm_host->clk);
1491 dev_err(mmc_dev(sdhci->mmc),
1492 "clock enable is failed, ret: %d\n", ret);
1495 tegra_host->clk_enabled = true;
1496 sdhci->is_clk_on = tegra_host->clk_enabled;
1497 ctrl = sdhci_readb(sdhci, SDHCI_VNDR_CLK_CTRL);
1498 ctrl |= SDHCI_VNDR_CLK_CTRL_SDMMC_CLK;
1499 sdhci_writeb(sdhci, ctrl, SDHCI_VNDR_CLK_CTRL);
1501 tegra_sdhci_set_clk_rate(sdhci, clock);
1503 if (tegra_host->emc_clk && (!tegra_host->is_sdmmc_emc_clk_on)) {
1504 ret = clk_prepare_enable(tegra_host->emc_clk);
1506 dev_err(mmc_dev(sdhci->mmc),
1507 "clock enable is failed, ret: %d\n", ret);
1510 tegra_host->is_sdmmc_emc_clk_on = true;
1512 if (tegra_host->sclk && (!tegra_host->is_sdmmc_sclk_on)) {
1513 ret = clk_prepare_enable(tegra_host->sclk);
1515 dev_err(mmc_dev(sdhci->mmc),
1516 "clock enable is failed, ret: %d\n", ret);
1519 tegra_host->is_sdmmc_sclk_on = true;
1521 } else if (!clock && tegra_host->clk_enabled) {
1522 if (tegra_host->emc_clk && tegra_host->is_sdmmc_emc_clk_on) {
1523 clk_disable_unprepare(tegra_host->emc_clk);
1524 tegra_host->is_sdmmc_emc_clk_on = false;
1526 if (tegra_host->sclk && tegra_host->is_sdmmc_sclk_on) {
1527 clk_disable_unprepare(tegra_host->sclk);
1528 tegra_host->is_sdmmc_sclk_on = false;
1530 ctrl = sdhci_readb(sdhci, SDHCI_VNDR_CLK_CTRL);
1531 ctrl &= ~SDHCI_VNDR_CLK_CTRL_SDMMC_CLK;
1532 sdhci_writeb(sdhci, ctrl, SDHCI_VNDR_CLK_CTRL);
1533 clk_disable_unprepare(pltfm_host->clk);
1534 tegra_host->clk_enabled = false;
1535 sdhci->is_clk_on = tegra_host->clk_enabled;
1536 pm_runtime_put_sync(&pdev->dev);
1538 mutex_unlock(&tegra_host->set_clock_mutex);
1541 static void tegra_sdhci_do_calibration(struct sdhci_host *sdhci,
1542 unsigned char signal_voltage)
1545 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
1546 struct sdhci_tegra *tegra_host = pltfm_host->priv;
1547 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
1548 unsigned int timeout = 10;
1549 unsigned int calib_offsets = 0;
1551 /* No Calibration for sdmmc4 */
1552 if (unlikely(soc_data->nvquirks & NVQUIRK_DISABLE_SDMMC4_CALIB) &&
1553 (tegra_host->instance == 3))
1556 if (unlikely(soc_data->nvquirks & NVQUIRK_DISABLE_AUTO_CALIBRATION))
1559 val = sdhci_readl(sdhci, SDMMC_SDMEMCOMPPADCTRL);
1560 val &= ~SDMMC_SDMEMCOMPPADCTRL_VREF_SEL_MASK;
1561 if (soc_data->nvquirks & NVQUIRK_SET_PAD_E_INPUT_OR_E_PWRD)
1562 val |= SDMMC_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD_MASK;
1564 sdhci_writel(sdhci, val, SDMMC_SDMEMCOMPPADCTRL);
1566 /* Enable Auto Calibration*/
1567 val = sdhci_readl(sdhci, SDMMC_AUTO_CAL_CONFIG);
1568 val |= SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_ENABLE;
1569 val |= SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_START;
1570 if (unlikely(soc_data->nvquirks & NVQUIRK_SET_CALIBRATION_OFFSETS)) {
1571 if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
1572 calib_offsets = tegra_host->plat->calib_3v3_offsets;
1573 else if (signal_voltage == MMC_SIGNAL_VOLTAGE_180)
1574 calib_offsets = tegra_host->plat->calib_1v8_offsets;
1575 if (calib_offsets) {
1576 /* Program Auto cal PD offset(bits 8:14) */
1578 SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_PD_OFFSET_SHIFT);
1579 val |= (((calib_offsets >> 8) & 0xFF) <<
1580 SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_PD_OFFSET_SHIFT);
1581 /* Program Auto cal PU offset(bits 0:6) */
1583 val |= (calib_offsets & 0xFF);
1586 sdhci_writel(sdhci, val, SDMMC_AUTO_CAL_CONFIG);
1588 /* Wait until the calibration is done */
1590 if (!(sdhci_readl(sdhci, SDMMC_AUTO_CAL_STATUS) &
1591 SDMMC_AUTO_CAL_STATUS_AUTO_CAL_ACTIVE))
1599 dev_err(mmc_dev(sdhci->mmc), "Auto calibration failed\n");
1601 if (soc_data->nvquirks & NVQUIRK_SET_PAD_E_INPUT_OR_E_PWRD) {
1602 val = sdhci_readl(sdhci, SDMMC_SDMEMCOMPPADCTRL);
1603 val &= ~SDMMC_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD_MASK;
1604 sdhci_writel(sdhci, val, SDMMC_SDMEMCOMPPADCTRL);
1607 if (unlikely(soc_data->nvquirks & NVQUIRK_SET_DRIVE_STRENGTH)) {
1608 unsigned int pulldown_code;
1609 unsigned int pullup_code;
1613 /* Disable Auto calibration */
1614 val = sdhci_readl(sdhci, SDMMC_AUTO_CAL_CONFIG);
1615 val &= ~SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_ENABLE;
1616 sdhci_writel(sdhci, val, SDMMC_AUTO_CAL_CONFIG);
1618 pg = tegra_drive_get_pingroup(mmc_dev(sdhci->mmc));
1620 /* Get the pull down codes from auto cal status reg */
1622 sdhci_readl(sdhci, SDMMC_AUTO_CAL_STATUS) >>
1623 SDMMC_AUTO_CAL_STATUS_PULLDOWN_OFFSET);
1624 /* Set the pull down in the pinmux reg */
1625 err = tegra_drive_pinmux_set_pull_down(pg,
1628 dev_err(mmc_dev(sdhci->mmc),
1629 "Failed to set pulldown codes %d err %d\n",
1630 pulldown_code, err);
1632 /* Calculate the pull up codes */
1633 pullup_code = pulldown_code + PULLUP_ADJUSTMENT_OFFSET;
1634 if (pullup_code >= TEGRA_MAX_PULL)
1635 pullup_code = TEGRA_MAX_PULL - 1;
1636 /* Set the pull up code in the pinmux reg */
1637 err = tegra_drive_pinmux_set_pull_up(pg, pullup_code);
1639 dev_err(mmc_dev(sdhci->mmc),
1640 "Failed to set pullup codes %d err %d\n",
1646 static int tegra_sdhci_signal_voltage_switch(struct sdhci_host *sdhci,
1647 unsigned int signal_voltage)
1649 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
1650 struct sdhci_tegra *tegra_host = pltfm_host->priv;
1651 unsigned int min_uV = tegra_host->vddio_min_uv;
1652 unsigned int max_uV = tegra_host->vddio_max_uv;
1653 unsigned int rc = 0;
1657 ctrl = sdhci_readw(sdhci, SDHCI_HOST_CONTROL2);
1658 if (signal_voltage == MMC_SIGNAL_VOLTAGE_180) {
1659 ctrl |= SDHCI_CTRL_VDD_180;
1660 min_uV = SDHOST_LOW_VOLT_MIN;
1661 max_uV = SDHOST_LOW_VOLT_MAX;
1662 } else if (signal_voltage == MMC_SIGNAL_VOLTAGE_330) {
1663 if (ctrl & SDHCI_CTRL_VDD_180)
1664 ctrl &= ~SDHCI_CTRL_VDD_180;
1667 /* Check if the slot can support the required voltage */
1668 if (min_uV > tegra_host->vddio_max_uv)
1671 /* Set/clear the 1.8V signalling */
1672 sdhci_writew(sdhci, ctrl, SDHCI_HOST_CONTROL2);
1674 /* Switch the I/O rail voltage */
1675 rc = tegra_sdhci_configure_regulators(tegra_host, CONFIG_REG_SET_VOLT,
1677 if (rc && (signal_voltage == MMC_SIGNAL_VOLTAGE_180)) {
1678 dev_err(mmc_dev(sdhci->mmc),
1679 "setting 1.8V failed %d. Revert to 3.3V\n", rc);
1680 rc = tegra_sdhci_configure_regulators(tegra_host,
1681 CONFIG_REG_SET_VOLT, SDHOST_HIGH_VOLT_MIN,
1682 SDHOST_HIGH_VOLT_MAX);
1688 static int tegra_sdhci_configure_regulators(struct sdhci_tegra *tegra_host,
1689 u8 option, int min_uV, int max_uV)
1695 if (!tegra_host->is_rail_enabled) {
1696 if (tegra_host->vdd_slot_reg)
1697 rc = regulator_enable(tegra_host->vdd_slot_reg);
1698 if (tegra_host->vdd_io_reg)
1699 rc = regulator_enable(tegra_host->vdd_io_reg);
1700 tegra_host->is_rail_enabled = true;
1703 case CONFIG_REG_DIS:
1704 if (tegra_host->is_rail_enabled) {
1705 if (tegra_host->vdd_io_reg)
1706 rc = regulator_disable(tegra_host->vdd_io_reg);
1707 if (tegra_host->vdd_slot_reg)
1708 rc = regulator_disable(
1709 tegra_host->vdd_slot_reg);
1710 tegra_host->is_rail_enabled = false;
1713 case CONFIG_REG_SET_VOLT:
1714 if (tegra_host->vdd_io_reg)
1715 rc = regulator_set_voltage(tegra_host->vdd_io_reg,
1719 pr_err("Invalid argument passed to reg config %d\n", option);
1725 static void tegra_sdhci_reset(struct sdhci_host *sdhci, u8 mask)
1727 unsigned long timeout;
1729 sdhci_writeb(sdhci, mask, SDHCI_SOFTWARE_RESET);
1731 /* Wait max 100 ms */
1734 /* hw clears the bit when it's done */
1735 while (sdhci_readb(sdhci, SDHCI_SOFTWARE_RESET) & mask) {
1737 dev_err(mmc_dev(sdhci->mmc), "Reset 0x%x never"
1738 "completed.\n", (int)mask);
1745 tegra_sdhci_reset_exit(sdhci, mask);
1748 static void sdhci_tegra_set_tap_delay(struct sdhci_host *sdhci,
1749 unsigned int tap_delay)
1754 /* Max tap delay value is 255 */
1755 if (tap_delay > MAX_TAP_VALUES) {
1756 dev_err(mmc_dev(sdhci->mmc),
1757 "Valid tap range (0-255). Setting tap value %d\n",
1763 clk = sdhci_readw(sdhci, SDHCI_CLOCK_CONTROL);
1764 clk &= ~SDHCI_CLOCK_CARD_EN;
1765 sdhci_writew(sdhci, clk, SDHCI_CLOCK_CONTROL);
1767 vendor_ctrl = sdhci_readl(sdhci, SDHCI_VNDR_CLK_CTRL);
1768 vendor_ctrl &= ~(SDHCI_VNDR_CLK_CTRL_TAP_VALUE_MASK <<
1769 SDHCI_VNDR_CLK_CTRL_TAP_VALUE_SHIFT);
1770 vendor_ctrl |= (tap_delay << SDHCI_VNDR_CLK_CTRL_TAP_VALUE_SHIFT);
1771 sdhci_writel(sdhci, vendor_ctrl, SDHCI_VNDR_CLK_CTRL);
1773 clk = sdhci_readw(sdhci, SDHCI_CLOCK_CONTROL);
1774 clk |= SDHCI_CLOCK_CARD_EN;
1775 sdhci_writew(sdhci, clk, SDHCI_CLOCK_CONTROL);
1779 static void sdhci_tegra_set_trim_delay(struct sdhci_host *sdhci,
1780 unsigned int trim_delay)
1784 vendor_ctrl = sdhci_readl(sdhci, SDHCI_VNDR_CLK_CTRL);
1785 vendor_ctrl &= ~(SDHCI_VNDR_CLK_CTRL_TRIM_VALUE_MASK <<
1786 SDHCI_VNDR_CLK_CTRL_TRIM_VALUE_SHIFT);
1787 vendor_ctrl |= (trim_delay << SDHCI_VNDR_CLK_CTRL_TRIM_VALUE_SHIFT);
1788 sdhci_writel(sdhci, vendor_ctrl, SDHCI_VNDR_CLK_CTRL);
1791 static int sdhci_tegra_sd_error_stats(struct sdhci_host *host, u32 int_status)
1793 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1794 struct sdhci_tegra *tegra_host = pltfm_host->priv;
1795 struct sdhci_tegra_sd_stats *head = tegra_host->sd_stat_head;
1797 if (int_status & SDHCI_INT_DATA_CRC)
1798 head->data_crc_count++;
1799 if (int_status & SDHCI_INT_CRC)
1800 head->cmd_crc_count++;
1801 if (int_status & SDHCI_INT_TIMEOUT)
1802 head->cmd_to_count++;
1803 if (int_status & SDHCI_INT_DATA_TIMEOUT)
1804 head->data_to_count++;
1808 static struct tegra_tuning_data *sdhci_tegra_get_tuning_data(
1809 struct sdhci_host *sdhci, unsigned int clock)
1811 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
1812 struct sdhci_tegra *tegra_host = pltfm_host->priv;
1813 struct tegra_tuning_data *tuning_data;
1814 unsigned int low_freq;
1817 if (tegra_host->tuning_freq_count == 1) {
1818 tuning_data = &tegra_host->tuning_data[0];
1822 /* Get the lowest supported freq */
1823 for (i = 0; i < TUNING_FREQ_COUNT; ++i) {
1824 low_freq = tegra_host->soc_data->tuning_freq_list[i];
1829 if (clock <= low_freq)
1830 tuning_data = &tegra_host->tuning_data[0];
1832 tuning_data = &tegra_host->tuning_data[1];
1838 static void calculate_vmin_values(struct sdhci_host *sdhci,
1839 struct tegra_tuning_data *tuning_data, int vmin, int boot_mv)
1841 struct tuning_values *est_values = &tuning_data->est_values;
1842 struct tuning_values *calc_values = &tuning_data->calc_values;
1843 struct tuning_t2t_coeffs *t2t_coeffs = tuning_data->t2t_coeffs;
1844 struct tap_hole_coeffs *thole_coeffs = tuning_data->thole_coeffs;
1845 int vmin_slope, vmin_int, temp_calc_vmin;
1846 int t2t_vmax, t2t_vmin;
1847 int vmax_thole, vmin_thole;
1850 * If current vmin is equal to vmin or vmax of tuning data, use the
1851 * previously calculated estimated T2T values directly. Note that the
1852 * estimated T2T_vmax is not at Vmax specified in tuning data. It is
1853 * the T2T at the boot or max voltage for the current SKU. Hence,
1854 * boot_mv is used in place of t2t_coeffs->vmax.
1856 if (vmin == t2t_coeffs->vmin) {
1857 t2t_vmin = est_values->t2t_vmin;
1858 } else if (vmin == boot_mv) {
1859 t2t_vmin = est_values->t2t_vmax;
1862 * For any intermediate voltage between boot voltage and vmin
1863 * of tuning data, calculate the slope and intercept from the
1864 * t2t at boot_mv and vmin and calculate the actual values.
1866 t2t_vmax = 1000 / est_values->t2t_vmax;
1867 t2t_vmin = 1000 / est_values->t2t_vmin;
1868 vmin_slope = ((t2t_vmax - t2t_vmin) * 1000) /
1869 (boot_mv - t2t_coeffs->vmin);
1870 vmin_int = (t2t_vmax * 1000 - (vmin_slope * boot_mv)) / 1000;
1871 t2t_vmin = (vmin_slope * vmin) / 1000 + vmin_int;
1872 t2t_vmin = (1000 / t2t_vmin);
1875 calc_values->t2t_vmin = (t2t_vmin * calc_values->t2t_vmax) /
1876 est_values->t2t_vmax;
1878 calc_values->ui_vmin = (1000000 / (tuning_data->freq_hz / 1000000)) /
1879 calc_values->t2t_vmin;
1881 /* Calculate the vmin tap hole at vmin of tuning data */
1882 temp_calc_vmin = (est_values->t2t_vmin * calc_values->t2t_vmax) /
1883 est_values->t2t_vmax;
1884 vmin_thole = (thole_coeffs->thole_vmin_int -
1885 (thole_coeffs->thole_vmin_slope * temp_calc_vmin)) /
1887 vmax_thole = calc_values->vmax_thole;
1889 if (vmin == t2t_coeffs->vmin) {
1890 calc_values->vmin_thole = vmin_thole;
1891 } else if (vmin == boot_mv) {
1892 calc_values->vmin_thole = vmax_thole;
1895 * Interpolate the tap hole for any intermediate voltage.
1896 * Calculate the slope and intercept from the available data
1897 * and use them to calculate the actual values.
1899 vmin_slope = ((vmax_thole - vmin_thole) * 1000) /
1900 (boot_mv - t2t_coeffs->vmin);
1901 vmin_int = (vmax_thole * 1000 - (vmin_slope * boot_mv)) / 1000;
1902 calc_values->vmin_thole = (vmin_slope * vmin) / 1000 + vmin_int;
1905 /* Adjust the partial win start for Vmin boundary */
1906 if (tuning_data->is_partial_win_valid)
1907 tuning_data->final_tap_data[0].win_start =
1908 (tuning_data->final_tap_data[0].win_start *
1909 tuning_data->calc_values.t2t_vmax) /
1910 tuning_data->calc_values.t2t_vmin;
1912 pr_info("**********Tuning values*********\n");
1913 pr_info("**estimated values**\n");
1914 pr_info("T2T_Vmax %d, T2T_Vmin %d, 1'st_hole_Vmax %d, UI_Vmax %d\n",
1915 est_values->t2t_vmax, est_values->t2t_vmin,
1916 est_values->vmax_thole, est_values->ui);
1917 pr_info("**Calculated values**\n");
1918 pr_info("T2T_Vmax %d, 1'st_hole_Vmax %d, UI_Vmax %d\n",
1919 calc_values->t2t_vmax, calc_values->vmax_thole,
1921 pr_info("T2T_Vmin %d, 1'st_hole_Vmin %d, UI_Vmin %d\n",
1922 calc_values->t2t_vmin, calc_values->vmin_thole,
1923 calc_values->ui_vmin);
1924 pr_info("***********************************\n");
1927 static int slide_window_start(struct sdhci_host *sdhci,
1928 struct tegra_tuning_data *tuning_data,
1929 int tap_value, enum tap_win_edge_attr edge_attr, int tap_hole)
1933 if (edge_attr == WIN_EDGE_BOUN_START) {
1935 tap_value += (1000 / tuning_data->calc_values.t2t_vmin);
1937 tap_value += (1000 / tuning_data->calc_values.t2t_vmax);
1938 } else if (edge_attr == WIN_EDGE_HOLE) {
1939 if (tap_hole >= 0) {
1940 tap_margin = get_tuning_tap_hole_margins(sdhci,
1941 tuning_data->calc_values.t2t_vmax);
1942 tap_value += ((7 * tap_hole) / 100) + tap_margin;
1946 if (tap_value > MAX_TAP_VALUES)
1947 tap_value = MAX_TAP_VALUES;
1952 static int slide_window_end(struct sdhci_host *sdhci,
1953 struct tegra_tuning_data *tuning_data,
1954 int tap_value, enum tap_win_edge_attr edge_attr, int tap_hole)
1958 if (edge_attr == WIN_EDGE_BOUN_END) {
1959 tap_value = (tap_value * tuning_data->calc_values.t2t_vmax) /
1960 tuning_data->calc_values.t2t_vmin;
1961 tap_value -= (1000 / tuning_data->calc_values.t2t_vmin);
1962 } else if (edge_attr == WIN_EDGE_HOLE) {
1963 if (tap_hole >= 0) {
1964 tap_value = tap_hole;
1965 tap_margin = get_tuning_tap_hole_margins(sdhci,
1966 tuning_data->calc_values.t2t_vmin);
1968 tap_value -= ((7 * tap_hole) / 100) + tap_margin;
1973 static int adjust_window_boundaries(struct sdhci_host *sdhci,
1974 struct tegra_tuning_data *tuning_data,
1975 struct tap_window_data *temp_tap_data)
1977 struct tap_window_data *tap_data;
1978 int vmin_tap_hole = -1;
1979 int vmax_tap_hole = -1;
1982 for (i = 0; i < tuning_data->num_of_valid_tap_wins; i++) {
1983 tap_data = &temp_tap_data[i];
1984 /* Update with next hole if first hole is taken care of */
1985 if (tap_data->win_start_attr == WIN_EDGE_HOLE)
1986 vmax_tap_hole = tuning_data->calc_values.vmax_thole +
1987 (tap_data->hole_pos - 1) *
1988 tuning_data->calc_values.ui;
1989 tap_data->win_start = slide_window_start(sdhci, tuning_data,
1990 tap_data->win_start, tap_data->win_start_attr,
1993 /* Update with next hole if first hole is taken care of */
1994 if (tap_data->win_end_attr == WIN_EDGE_HOLE)
1995 vmin_tap_hole = tuning_data->calc_values.vmin_thole +
1996 (tap_data->hole_pos - 1) *
1997 tuning_data->calc_values.ui_vmin;
1998 tap_data->win_end = slide_window_end(sdhci, tuning_data,
1999 tap_data->win_end, tap_data->win_end_attr,
2003 pr_info("***********final tuning windows**********\n");
2004 for (i = 0; i < tuning_data->num_of_valid_tap_wins; i++) {
2005 tap_data = &temp_tap_data[i];
2006 pr_info("win[%d]: %d - %d\n", i, tap_data->win_start,
2009 pr_info("********************************\n");
2013 static int find_best_tap_value(struct tegra_tuning_data *tuning_data,
2014 struct tap_window_data *temp_tap_data, int vmin)
2016 struct tap_window_data *tap_data;
2017 u8 i = 0, sel_win = 0;
2018 int pref_win = 0, curr_win_size = 0;
2019 int best_tap_value = 0;
2021 for (i = 0; i < tuning_data->num_of_valid_tap_wins; i++) {
2022 tap_data = &temp_tap_data[i];
2023 if (!i && tuning_data->is_partial_win_valid) {
2024 pref_win = tap_data->win_end - tap_data->win_start;
2025 if ((tap_data->win_end * 2) < pref_win)
2026 pref_win = tap_data->win_end * 2;
2029 curr_win_size = tap_data->win_end - tap_data->win_start;
2030 if ((curr_win_size > 0) && (curr_win_size > pref_win)) {
2031 pref_win = curr_win_size;
2037 if (pref_win <= 0) {
2038 pr_err("No window opening for %d vmin\n", vmin);
2042 tap_data = &temp_tap_data[sel_win];
2043 if (!sel_win && tuning_data->is_partial_win_valid) {
2045 best_tap_value = tap_data->win_end - (pref_win / 2);
2046 if (best_tap_value < 0)
2049 best_tap_value = tap_data->win_start +
2050 ((tap_data->win_end - tap_data->win_start) *
2051 tuning_data->calc_values.t2t_vmin) /
2052 (tuning_data->calc_values.t2t_vmin +
2053 tuning_data->calc_values.t2t_vmax);
2056 pr_info("best tap win - (%d-%d), best tap value %d\n",
2057 tap_data->win_start, tap_data->win_end, best_tap_value);
2058 return best_tap_value;
2061 static int sdhci_tegra_calculate_best_tap(struct sdhci_host *sdhci,
2062 struct tegra_tuning_data *tuning_data)
2064 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
2065 struct sdhci_tegra *tegra_host = pltfm_host->priv;
2066 struct tap_window_data *temp_tap_data = NULL;
2067 int vmin, curr_vmin, best_tap_value = 0;
2070 curr_vmin = tegra_dvfs_predict_millivolts(pltfm_host->clk,
2071 tuning_data->freq_hz);
2073 curr_vmin = tegra_host->boot_vcore_mv;
2077 SDHCI_TEGRA_DBG("%s: checking for win opening with vmin %d\n",
2078 mmc_hostname(sdhci->mmc), vmin);
2079 if ((best_tap_value < 0) &&
2080 (vmin > tegra_host->boot_vcore_mv)) {
2081 dev_err(mmc_dev(sdhci->mmc),
2082 "No best tap for any vcore range\n");
2083 kfree(temp_tap_data);
2084 temp_tap_data = NULL;
2088 calculate_vmin_values(sdhci, tuning_data, vmin,
2089 tegra_host->boot_vcore_mv);
2091 if (temp_tap_data == NULL) {
2092 temp_tap_data = kzalloc(sizeof(struct tap_window_data) *
2093 tuning_data->num_of_valid_tap_wins, GFP_KERNEL);
2094 if (IS_ERR_OR_NULL(temp_tap_data)) {
2095 dev_err(mmc_dev(sdhci->mmc),
2096 "No memory for final tap value calculation\n");
2101 memcpy(temp_tap_data, tuning_data->final_tap_data,
2102 sizeof(struct tap_window_data) *
2103 tuning_data->num_of_valid_tap_wins);
2105 adjust_window_boundaries(sdhci, tuning_data, temp_tap_data);
2107 best_tap_value = find_best_tap_value(tuning_data,
2108 temp_tap_data, vmin);
2110 if (best_tap_value < 0)
2112 } while (best_tap_value < 0);
2114 tuning_data->best_tap_value = best_tap_value;
2115 tuning_data->nom_best_tap_value = best_tap_value;
2118 * Set the new vmin if there is any change. If dvfs overrides are
2119 * disabled, then print the error message but continue execution
2120 * rather than disabling tuning altogether.
2122 if ((tuning_data->best_tap_value >= 0) && (curr_vmin != vmin)) {
2123 err = tegra_dvfs_set_fmax_at_vmin(pltfm_host->clk,
2124 tuning_data->freq_hz, vmin);
2125 if ((err == -EPERM) || (err == -ENOSYS)) {
2127 * tegra_dvfs_set_fmax_at_vmin: will return EPERM or
2128 * ENOSYS, when DVFS override is not enabled, continue
2129 * tuning with default core voltage.
2132 "dvfs overrides disabled. Vmin not updated\n");
2136 kfree(temp_tap_data);
2140 static int sdhci_tegra_issue_tuning_cmd(struct sdhci_host *sdhci)
2142 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
2143 struct sdhci_tegra *tegra_host = pltfm_host->priv;
2147 unsigned int timeout = 10;
2151 mask = SDHCI_CMD_INHIBIT | SDHCI_DATA_INHIBIT;
2152 while (sdhci_readl(sdhci, SDHCI_PRESENT_STATE) & mask) {
2154 dev_err(mmc_dev(sdhci->mmc), "Controller never"
2155 "released inhibit bit(s).\n");
2163 ctrl = sdhci_readb(sdhci, SDHCI_HOST_CONTROL2);
2164 ctrl &= ~SDHCI_CTRL_TUNED_CLK;
2165 sdhci_writeb(sdhci, ctrl, SDHCI_HOST_CONTROL2);
2167 ctrl = sdhci_readb(sdhci, SDHCI_HOST_CONTROL2);
2168 ctrl |= SDHCI_CTRL_EXEC_TUNING;
2169 sdhci_writeb(sdhci, ctrl, SDHCI_HOST_CONTROL2);
2172 * In response to CMD19, the card sends 64 bytes of tuning
2173 * block to the Host Controller. So we set the block size
2175 * In response to CMD21, the card sends 128 bytes of tuning
2176 * block for MMC_BUS_WIDTH_8 and 64 bytes for MMC_BUS_WIDTH_4
2177 * to the Host Controller. So we set the block size to 64 here.
2179 sdhci_writew(sdhci, SDHCI_MAKE_BLKSZ(7, tegra_host->tuning_bsize),
2182 sdhci_writeb(sdhci, 0xE, SDHCI_TIMEOUT_CONTROL);
2184 sdhci_writew(sdhci, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE);
2186 sdhci_writel(sdhci, 0x0, SDHCI_ARGUMENT);
2188 /* Set the cmd flags */
2189 flags = SDHCI_CMD_RESP_SHORT | SDHCI_CMD_CRC | SDHCI_CMD_DATA;
2190 /* Issue the command */
2191 sdhci_writew(sdhci, SDHCI_MAKE_CMD(
2192 tegra_host->tuning_opcode, flags), SDHCI_COMMAND);
2198 intstatus = sdhci_readl(sdhci, SDHCI_INT_STATUS);
2200 sdhci_writel(sdhci, intstatus, SDHCI_INT_STATUS);
2205 if ((intstatus & SDHCI_INT_DATA_AVAIL) &&
2206 !(intstatus & SDHCI_INT_DATA_CRC)) {
2208 sdhci->tuning_done = 1;
2210 tegra_sdhci_reset(sdhci, SDHCI_RESET_DATA);
2211 tegra_sdhci_reset(sdhci, SDHCI_RESET_CMD);
2215 if (sdhci->tuning_done) {
2216 sdhci->tuning_done = 0;
2217 ctrl = sdhci_readb(sdhci, SDHCI_HOST_CONTROL2);
2218 if (!(ctrl & SDHCI_CTRL_EXEC_TUNING) &&
2219 (ctrl & SDHCI_CTRL_TUNED_CLK))
2229 static int sdhci_tegra_scan_tap_values(struct sdhci_host *sdhci,
2230 unsigned int starting_tap, bool expect_failure)
2232 unsigned int tap_value = starting_tap;
2234 unsigned int retry = TUNING_RETRIES;
2237 /* Set the tap delay */
2238 sdhci_tegra_set_tap_delay(sdhci, tap_value);
2240 /* Run frequency tuning */
2241 err = sdhci_tegra_issue_tuning_cmd(sdhci);
2246 retry = TUNING_RETRIES;
2247 if ((expect_failure && !err) ||
2248 (!expect_failure && err))
2252 } while (tap_value <= MAX_TAP_VALUES);
2257 static int calculate_actual_tuning_values(int speedo,
2258 struct tegra_tuning_data *tuning_data, int voltage_mv)
2260 struct tuning_t2t_coeffs *t2t_coeffs = tuning_data->t2t_coeffs;
2261 struct tap_hole_coeffs *thole_coeffs = tuning_data->thole_coeffs;
2262 struct tuning_values *calc_values = &tuning_data->calc_values;
2264 int vmax_thole, vmin_thole;
2266 /* T2T_Vmax = (1000000/freq_MHz)/Calc_UI */
2267 calc_values->t2t_vmax = (1000000 / (tuning_data->freq_hz / 1000000)) /
2271 * Interpolate the tap hole.
2272 * Vmax_1'st_hole = (Calc_T2T_Vmax*(-thole_slope)+thole_tint.
2274 vmax_thole = (thole_coeffs->thole_vmax_int -
2275 (thole_coeffs->thole_vmax_slope * calc_values->t2t_vmax)) /
2277 vmin_thole = (thole_coeffs->thole_vmin_int -
2278 (thole_coeffs->thole_vmin_slope * calc_values->t2t_vmax)) /
2280 if (voltage_mv == t2t_coeffs->vmin) {
2281 calc_values->vmax_thole = vmin_thole;
2282 } else if (voltage_mv == t2t_coeffs->vmax) {
2283 calc_values->vmax_thole = vmax_thole;
2285 slope = (vmax_thole - vmin_thole) /
2286 (t2t_coeffs->vmax - t2t_coeffs->vmin);
2287 inpt = ((vmax_thole * 1000) - (slope * 1250)) / 1000;
2288 calc_values->vmax_thole = slope * voltage_mv + inpt;
2295 * All coeffs are filled up in the table after multiplying by 1000. So, all
2296 * calculations should have a divide by 1000 at the end.
2298 static int calculate_estimated_tuning_values(int speedo,
2299 struct tegra_tuning_data *tuning_data, int voltage_mv)
2301 struct tuning_t2t_coeffs *t2t_coeffs = tuning_data->t2t_coeffs;
2302 struct tap_hole_coeffs *thole_coeffs = tuning_data->thole_coeffs;
2303 struct tuning_values *est_values = &tuning_data->est_values;
2305 int vmax_t2t, vmin_t2t;
2306 int vmax_thole, vmin_thole;
2308 /* Est_T2T_Vmax = (speedo*(-t2t_slope)+t2t_int */
2309 vmax_t2t = (t2t_coeffs->t2t_vmax_int - (speedo *
2310 t2t_coeffs->t2t_vmax_slope)) / 1000;
2311 vmin_t2t = (t2t_coeffs->t2t_vmin_int - (speedo *
2312 t2t_coeffs->t2t_vmin_slope)) / 1000;
2313 est_values->t2t_vmin = vmin_t2t;
2315 if (voltage_mv == t2t_coeffs->vmin) {
2316 est_values->t2t_vmax = vmin_t2t;
2317 } else if (voltage_mv == t2t_coeffs->vmax) {
2318 est_values->t2t_vmax = vmax_t2t;
2320 vmax_t2t = PRECISION_FOR_ESTIMATE / vmax_t2t;
2321 vmin_t2t = PRECISION_FOR_ESTIMATE / vmin_t2t;
2323 * For any intermediate voltage between 0.95V and max vcore,
2324 * calculate the slope and intercept from the T2T and tap hole
2325 * values of 0.95V and max vcore and use them to calculate the
2326 * actual values. 1/T2T is a linear function of voltage.
2328 slope = ((vmax_t2t - vmin_t2t) * PRECISION_FOR_ESTIMATE) /
2329 (t2t_coeffs->vmax - t2t_coeffs->vmin);
2330 inpt = (vmax_t2t * PRECISION_FOR_ESTIMATE -
2331 (slope * t2t_coeffs->vmax)) / PRECISION_FOR_ESTIMATE;
2332 est_values->t2t_vmax = ((slope * voltage_mv) /
2333 PRECISION_FOR_ESTIMATE + inpt);
2334 est_values->t2t_vmax = (PRECISION_FOR_ESTIMATE /
2335 est_values->t2t_vmax);
2338 /* Est_UI = (1000000/freq_MHz)/Est_T2T_Vmax */
2339 est_values->ui = (1000000 / (thole_coeffs->freq_khz / 1000)) /
2340 est_values->t2t_vmax;
2343 * Est_1'st_hole = (Est_T2T_Vmax*(-thole_slope)) + thole_int.
2345 vmax_thole = (thole_coeffs->thole_vmax_int -
2346 (thole_coeffs->thole_vmax_slope * est_values->t2t_vmax)) / 1000;
2347 vmin_thole = (thole_coeffs->thole_vmin_int -
2348 (thole_coeffs->thole_vmin_slope * est_values->t2t_vmax)) / 1000;
2350 if (voltage_mv == t2t_coeffs->vmin) {
2351 est_values->vmax_thole = vmin_thole;
2352 } else if (voltage_mv == t2t_coeffs->vmax) {
2353 est_values->vmax_thole = vmax_thole;
2356 * For any intermediate voltage between 0.95V and max vcore,
2357 * calculate the slope and intercept from the t2t and tap hole
2358 * values of 0.95V and max vcore and use them to calculate the
2359 * actual values. Tap hole is a linear function of voltage.
2361 slope = ((vmax_thole - vmin_thole) * PRECISION_FOR_ESTIMATE) /
2362 (t2t_coeffs->vmax - t2t_coeffs->vmin);
2363 inpt = (vmax_thole * PRECISION_FOR_ESTIMATE -
2364 (slope * t2t_coeffs->vmax)) / PRECISION_FOR_ESTIMATE;
2365 est_values->vmax_thole = (slope * voltage_mv) /
2366 PRECISION_FOR_ESTIMATE + inpt;
2368 est_values->vmin_thole = vmin_thole;
2374 * Insert the calculated holes and get the final tap windows
2375 * with the boundaries and holes set.
2377 static int adjust_holes_in_tap_windows(struct sdhci_host *sdhci,
2378 struct tegra_tuning_data *tuning_data)
2380 struct tap_window_data *tap_data;
2381 struct tap_window_data *final_tap_data;
2382 struct tuning_values *calc_values = &tuning_data->calc_values;
2383 int tap_hole, size = 0;
2384 u8 i = 0, j = 0, num_of_wins, hole_pos = 0;
2386 tuning_data->final_tap_data =
2387 devm_kzalloc(mmc_dev(sdhci->mmc),
2388 sizeof(struct tap_window_data) * 42, GFP_KERNEL);
2389 if (IS_ERR_OR_NULL(tuning_data->final_tap_data)) {
2390 dev_err(mmc_dev(sdhci->mmc), "No mem for final tap wins\n");
2394 num_of_wins = tuning_data->num_of_valid_tap_wins;
2395 tap_hole = calc_values->vmax_thole;
2398 tap_data = &tuning_data->tap_data[i];
2399 final_tap_data = &tuning_data->final_tap_data[j];
2400 if (tap_hole < tap_data->win_start) {
2401 tap_hole += calc_values->ui;
2404 } else if (tap_hole > tap_data->win_end) {
2405 memcpy(final_tap_data, tap_data,
2406 sizeof(struct tap_window_data));
2411 } else if ((tap_hole >= tap_data->win_start) &&
2412 (tap_hole <= tap_data->win_end)) {
2413 size = tap_data->win_end - tap_data->win_start;
2416 &tuning_data->final_tap_data[j];
2417 if (tap_hole == tap_data->win_start) {
2418 final_tap_data->win_start =
2420 final_tap_data->win_start_attr =
2422 final_tap_data->hole_pos = hole_pos;
2423 tap_hole += calc_values->ui;
2426 final_tap_data->win_start =
2427 tap_data->win_start;
2428 final_tap_data->win_start_attr =
2429 WIN_EDGE_BOUN_START;
2431 if (tap_hole <= tap_data->win_end) {
2432 final_tap_data->win_end = tap_hole - 1;
2433 final_tap_data->win_end_attr =
2435 final_tap_data->hole_pos = hole_pos;
2436 tap_data->win_start = tap_hole;
2437 } else if (tap_hole > tap_data->win_end) {
2438 final_tap_data->win_end =
2440 final_tap_data->win_end_attr =
2442 tap_data->win_start =
2445 size = tap_data->win_end - tap_data->win_start;
2451 } while (num_of_wins > 0);
2453 /* Update the num of valid wins count after tap holes insertion */
2454 tuning_data->num_of_valid_tap_wins = j;
2456 pr_info("********tuning windows after inserting holes*****\n");
2457 pr_info("WIN_ATTR legend: 0-BOUN_ST, 1-BOUN_END, 2-HOLE\n");
2458 for (i = 0; i < tuning_data->num_of_valid_tap_wins; i++) {
2459 final_tap_data = &tuning_data->final_tap_data[i];
2460 pr_info("win[%d]:%d(%d) - %d(%d)\n", i,
2461 final_tap_data->win_start,
2462 final_tap_data->win_start_attr,
2463 final_tap_data->win_end, final_tap_data->win_end_attr);
2465 pr_info("***********************************************\n");
2471 * Insert the boundaries from negative margin calculations into the windows
2474 static int insert_boundaries_in_tap_windows(struct sdhci_host *sdhci,
2475 struct tegra_tuning_data *tuning_data, u8 boun_end)
2477 struct tap_window_data *tap_data;
2478 struct tap_window_data *new_tap_data;
2479 struct tap_window_data *temp_tap_data;
2480 struct tuning_values *calc_values = &tuning_data->calc_values;
2482 u8 i = 0, j = 0, num_of_wins;
2483 bool get_next_boun = false;
2485 temp_tap_data = devm_kzalloc(mmc_dev(sdhci->mmc),
2486 sizeof(struct tap_window_data) * 42, GFP_KERNEL);
2487 if (IS_ERR_OR_NULL(temp_tap_data)) {
2488 dev_err(mmc_dev(sdhci->mmc), "No mem for final tap wins\n");
2492 num_of_wins = tuning_data->num_of_valid_tap_wins;
2493 curr_boun = boun_end % calc_values->ui;
2495 if (get_next_boun) {
2496 curr_boun += calc_values->ui;
2498 * If the boun_end exceeds the intial boundary end,
2499 * just copy remaining windows and return.
2501 if (curr_boun >= boun_end)
2502 curr_boun += MAX_TAP_VALUES;
2505 tap_data = &tuning_data->tap_data[i];
2506 new_tap_data = &temp_tap_data[j];
2507 if (curr_boun <= tap_data->win_start) {
2508 get_next_boun = true;
2510 } else if (curr_boun >= tap_data->win_end) {
2511 memcpy(new_tap_data, tap_data,
2512 sizeof(struct tap_window_data));
2516 get_next_boun = false;
2518 } else if ((curr_boun >= tap_data->win_start) &&
2519 (curr_boun <= tap_data->win_end)) {
2520 new_tap_data->win_start = tap_data->win_start;
2521 new_tap_data->win_start_attr =
2522 tap_data->win_start_attr;
2523 new_tap_data->win_end = curr_boun - 1;
2524 new_tap_data->win_end_attr =
2525 tap_data->win_end_attr;
2527 new_tap_data = &temp_tap_data[j];
2528 new_tap_data->win_start = curr_boun;
2529 new_tap_data->win_end = curr_boun;
2530 new_tap_data->win_start_attr =
2531 WIN_EDGE_BOUN_START;
2532 new_tap_data->win_end_attr =
2535 new_tap_data = &temp_tap_data[j];
2536 new_tap_data->win_start = curr_boun + 1;
2537 new_tap_data->win_start_attr = WIN_EDGE_BOUN_START;
2538 new_tap_data->win_end = tap_data->win_end;
2539 new_tap_data->win_end_attr =
2540 tap_data->win_end_attr;
2544 get_next_boun = true;
2546 } while (num_of_wins > 0);
2548 /* Update the num of valid wins count after tap holes insertion */
2549 tuning_data->num_of_valid_tap_wins = j;
2551 memcpy(tuning_data->tap_data, temp_tap_data,
2552 j * sizeof(struct tap_window_data));
2553 SDHCI_TEGRA_DBG("***tuning windows after inserting boundaries***\n");
2554 SDHCI_TEGRA_DBG("WIN_ATTR legend: 0-BOUN_ST, 1-BOUN_END, 2-HOLE\n");
2555 for (i = 0; i < tuning_data->num_of_valid_tap_wins; i++) {
2556 new_tap_data = &tuning_data->tap_data[i];
2557 SDHCI_TEGRA_DBG("win[%d]:%d(%d) - %d(%d)\n", i,
2558 new_tap_data->win_start,
2559 new_tap_data->win_start_attr,
2560 new_tap_data->win_end, new_tap_data->win_end_attr);
2562 SDHCI_TEGRA_DBG("***********************************************\n");
2568 * Scan for all tap values and get all passing tap windows.
2570 static int sdhci_tegra_get_tap_window_data(struct sdhci_host *sdhci,
2571 struct tegra_tuning_data *tuning_data)
2573 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
2574 struct sdhci_tegra *tegra_host = pltfm_host->priv;
2575 struct tap_window_data *tap_data;
2576 struct tuning_ui tuning_ui[10];
2577 int err = 0, partial_win_start = 0, temp_margin = 0;
2578 unsigned int tap_value, calc_ui = 0;
2579 u8 prev_boundary_end = 0, num_of_wins = 0;
2580 u8 num_of_uis = 0, valid_num_uis = 0;
2581 u8 ref_ui, first_valid_full_win = 0;
2582 u8 boun_end = 0, next_boun_end = 0;
2584 bool valid_ui_found = false;
2587 * Assume there are a max of 10 windows and allocate tap window
2588 * structures for the same. If there are more windows, the array
2589 * size can be adjusted later using realloc.
2591 tuning_data->tap_data = devm_kzalloc(mmc_dev(sdhci->mmc),
2592 sizeof(struct tap_window_data) * 42, GFP_KERNEL);
2593 if (IS_ERR_OR_NULL(tuning_data->tap_data)) {
2594 dev_err(mmc_dev(sdhci->mmc), "No memory for tap data\n");
2598 spin_lock(&sdhci->lock);
2601 tap_data = &tuning_data->tap_data[num_of_wins];
2602 /* Get the window start */
2603 tap_value = sdhci_tegra_scan_tap_values(sdhci, tap_value, true);
2604 tap_data->win_start = min_t(u8, tap_value, MAX_TAP_VALUES);
2606 if (tap_value >= MAX_TAP_VALUES) {
2607 /* If it's first iteration, then all taps failed */
2609 dev_err(mmc_dev(sdhci->mmc),
2610 "All tap values(0-255) failed\n");
2611 spin_unlock(&sdhci->lock);
2614 /* All windows obtained */
2619 /* Get the window end */
2620 tap_value = sdhci_tegra_scan_tap_values(sdhci,
2622 tap_data->win_end = min_t(u8, (tap_value - 1), MAX_TAP_VALUES);
2623 tap_data->win_size = tap_data->win_end - tap_data->win_start;
2627 * If the size of window is more than 4 taps wide, then it is a
2628 * valid window. If tap value 0 has passed, then a partial
2629 * window exists. Mark all the window edges as boundary edges.
2631 if (tap_data->win_size > 4) {
2632 if (tap_data->win_start == 0)
2633 tuning_data->is_partial_win_valid = true;
2634 tap_data->win_start_attr = WIN_EDGE_BOUN_START;
2635 tap_data->win_end_attr = WIN_EDGE_BOUN_END;
2637 /* Invalid window as size is less than 5 taps */
2638 SDHCI_TEGRA_DBG("Invalid tuning win (%d-%d) ignored\n",
2639 tap_data->win_start, tap_data->win_end);
2643 /* Ignore first and last partial UIs */
2644 if (tap_data->win_end_attr == WIN_EDGE_BOUN_END) {
2645 tuning_ui[num_of_uis].ui = tap_data->win_end -
2647 tuning_ui[num_of_uis].is_valid_ui = true;
2649 prev_boundary_end = tap_data->win_end;
2652 } while (tap_value < MAX_TAP_VALUES);
2653 spin_unlock(&sdhci->lock);
2655 tuning_data->num_of_valid_tap_wins = num_of_wins;
2656 valid_num_uis = num_of_uis;
2658 /* Print info of all tap windows */
2659 pr_info("**********Auto tuning windows*************\n");
2660 pr_info("WIN_ATTR legend: 0-BOUN_ST, 1-BOUN_END, 2-HOLE\n");
2661 for (j = 0; j < tuning_data->num_of_valid_tap_wins; j++) {
2662 tap_data = &tuning_data->tap_data[j];
2663 pr_info("win[%d]: %d(%d) - %d(%d)\n",
2664 j, tap_data->win_start, tap_data->win_start_attr,
2665 tap_data->win_end, tap_data->win_end_attr);
2667 pr_info("***************************************\n");
2669 /* Mark the first last partial UIs as invalid */
2670 tuning_ui[0].is_valid_ui = false;
2671 tuning_ui[num_of_uis - 1].is_valid_ui = false;
2674 /* Discredit all uis at either end with size less than 30% of est ui */
2675 ref_ui = (30 * tuning_data->est_values.ui) / 100;
2676 for (j = 0; j < num_of_uis; j++) {
2677 if (tuning_ui[j].is_valid_ui) {
2678 tuning_ui[j].is_valid_ui = false;
2681 if (tuning_ui[j].ui > ref_ui)
2685 for (j = num_of_uis; j > 0; j--) {
2686 if (tuning_ui[j - 1].ui < ref_ui) {
2687 if (tuning_ui[j - 1].is_valid_ui) {
2688 tuning_ui[j - 1].is_valid_ui = false;
2695 /* Calculate 0.75*est_UI */
2696 ref_ui = (75 * tuning_data->est_values.ui) / 100;
2699 * Check for valid UIs and discredit invalid UIs. A UI is considered
2700 * valid if it's greater than (0.75*est_UI). If an invalid UI is found,
2701 * also discredit the smaller of the two adjacent windows.
2703 for (j = 1; j < (num_of_uis - 1); j++) {
2704 if (tuning_ui[j].ui > ref_ui && tuning_ui[j].is_valid_ui) {
2705 tuning_ui[j].is_valid_ui = true;
2707 if (tuning_ui[j].is_valid_ui) {
2708 tuning_ui[j].is_valid_ui = false;
2711 if (!tuning_ui[j + 1].is_valid_ui ||
2712 !tuning_ui[j - 1].is_valid_ui) {
2713 if (tuning_ui[j - 1].is_valid_ui) {
2714 tuning_ui[j - 1].is_valid_ui = false;
2716 } else if (tuning_ui[j + 1].is_valid_ui) {
2717 tuning_ui[j + 1].is_valid_ui = false;
2722 if (tuning_ui[j - 1].ui > tuning_ui[j + 1].ui)
2723 tuning_ui[j + 1].is_valid_ui = false;
2725 tuning_ui[j - 1].is_valid_ui = false;
2731 /* Calculate the cumulative UI if there are valid UIs left */
2732 if (valid_num_uis) {
2733 for (j = 0; j < num_of_uis; j++)
2734 if (tuning_ui[j].is_valid_ui) {
2735 calc_ui += tuning_ui[j].ui;
2736 if (!first_valid_full_win)
2737 first_valid_full_win = j;
2742 tuning_data->calc_values.ui = (calc_ui / valid_num_uis);
2743 valid_ui_found = true;
2745 tuning_data->calc_values.ui = tuning_data->est_values.ui;
2746 valid_ui_found = false;
2749 SDHCI_TEGRA_DBG("****Tuning UIs***********\n");
2750 for (j = 0; j < num_of_uis; j++)
2751 SDHCI_TEGRA_DBG("Tuning UI[%d] : %d, Is valid[%d]\n",
2752 j, tuning_ui[j].ui, tuning_ui[j].is_valid_ui);
2753 SDHCI_TEGRA_DBG("*************************\n");
2755 /* Get the calculated tuning values */
2756 err = calculate_actual_tuning_values(tegra_host->speedo, tuning_data,
2757 tegra_host->boot_vcore_mv);
2760 * Calculate negative margin if partial win is valid. There are two
2762 * Case 1: If Avg_UI is found, then keep subtracting avg_ui from start
2763 * of first valid full window until a value <=0 is obtained.
2764 * Case 2: If Avg_UI is not found, subtract avg_ui from all boundary
2765 * starts until a value <=0 is found.
2767 if (tuning_data->is_partial_win_valid && (num_of_wins > 1)) {
2768 if (valid_ui_found) {
2770 tuning_data->tap_data[first_valid_full_win].win_start;
2771 boun_end = partial_win_start;
2772 partial_win_start %= tuning_data->calc_values.ui;
2773 partial_win_start -= tuning_data->calc_values.ui;
2775 for (j = 0; j < NEG_MAR_CHK_WIN_COUNT; j++) {
2777 tuning_data->tap_data[j + 1].win_start;
2779 boun_end = temp_margin;
2780 else if (!next_boun_end)
2781 next_boun_end = temp_margin;
2782 temp_margin %= tuning_data->calc_values.ui;
2783 temp_margin -= tuning_data->calc_values.ui;
2784 if (!partial_win_start ||
2785 (temp_margin > partial_win_start))
2786 partial_win_start = temp_margin;
2789 if (partial_win_start <= 0)
2790 tuning_data->tap_data[0].win_start = partial_win_start;
2794 insert_boundaries_in_tap_windows(sdhci, tuning_data, boun_end);
2796 insert_boundaries_in_tap_windows(sdhci, tuning_data, next_boun_end);
2798 /* Insert calculated holes into the windows */
2799 err = adjust_holes_in_tap_windows(sdhci, tuning_data);
2804 static void sdhci_tegra_dump_tuning_constraints(struct sdhci_host *sdhci)
2806 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
2807 struct sdhci_tegra *tegra_host = pltfm_host->priv;
2808 struct tegra_tuning_data *tuning_data;
2811 SDHCI_TEGRA_DBG("%s: Num of tuning frequencies%d\n",
2812 mmc_hostname(sdhci->mmc), tegra_host->tuning_freq_count);
2813 for (i = 0; i < tegra_host->tuning_freq_count; ++i) {
2814 tuning_data = &tegra_host->tuning_data[i];
2815 SDHCI_TEGRA_DBG("%s: Tuning freq[%d]: %d, freq band %d\n",
2816 mmc_hostname(sdhci->mmc), i,
2817 tuning_data->freq_hz, tuning_data->freq_band);
2821 static unsigned int get_tuning_voltage(struct sdhci_tegra *tegra_host, u8 *mask)
2828 case NOMINAL_VCORE_TUN:
2829 return tegra_host->nominal_vcore_mv;
2830 case BOOT_VCORE_TUN:
2831 return tegra_host->boot_vcore_mv;
2832 case MIN_OVERRIDE_VCORE_TUN:
2833 return tegra_host->min_vcore_override_mv;
2836 return tegra_host->boot_vcore_mv;
2839 static u8 sdhci_tegra_get_freq_point(struct sdhci_host *sdhci)
2841 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
2842 struct sdhci_tegra *tegra_host = pltfm_host->priv;
2843 const unsigned int *freq_list;
2847 curr_clock = sdhci->max_clk;
2848 freq_list = tegra_host->soc_data->tuning_freq_list;
2850 for (i = 0; i < TUNING_FREQ_COUNT; ++i)
2851 if (curr_clock <= freq_list[i])
2854 return TUNING_MAX_FREQ;
2857 static int get_tuning_tap_hole_margins(struct sdhci_host *sdhci,
2858 int t2t_tuning_value)
2860 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
2861 struct sdhci_tegra *tegra_host = pltfm_host->priv;
2862 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
2863 struct tuning_tap_hole_margins *tap_hole;
2868 if (soc_data->nvquirks & NVQUIRK_SELECT_FIXED_TAP_HOLE_MARGINS &&
2869 soc_data->tap_hole_margins) {
2870 tap_hole = soc_data->tap_hole_margins;
2871 dev_id = dev_name(mmc_dev(sdhci->mmc));
2872 for (i = 0; i < soc_data->tap_hole_margins_count; i++) {
2873 if (!strcmp(dev_id, tap_hole->dev_id))
2874 return tap_hole->tap_hole_margin;
2878 dev_info(mmc_dev(sdhci->mmc),
2879 "Tap hole margins missing\n");
2880 /* if no margin are available calculate tap margin */
2881 tap_margin = (((2 * (450 / t2t_tuning_value)) +
2888 * The frequency tuning algorithm tries to calculate the tap-to-tap delay
2889 * UI and estimate holes using equations and predetermined coefficients from
2890 * the characterization data. The algorithm will not work without this data.
2892 static int find_tuning_coeffs_data(struct sdhci_host *sdhci,
2893 bool force_retuning)
2895 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
2896 struct sdhci_tegra *tegra_host = pltfm_host->priv;
2897 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
2898 struct tegra_tuning_data *tuning_data;
2899 struct tuning_t2t_coeffs *t2t_coeffs;
2900 struct tap_hole_coeffs *thole_coeffs;
2902 unsigned int freq_khz;
2904 bool coeffs_set = false;
2906 dev_id = dev_name(mmc_dev(sdhci->mmc));
2907 /* Find the coeffs data for all supported frequencies */
2908 for (i = 0; i < tegra_host->tuning_freq_count; i++) {
2909 tuning_data = &tegra_host->tuning_data[i];
2911 /* Skip if T2T coeffs are already found */
2912 if (tuning_data->t2t_coeffs == NULL || force_retuning) {
2913 t2t_coeffs = soc_data->t2t_coeffs;
2914 for (j = 0; j < soc_data->t2t_coeffs_count; j++) {
2915 if (!strcmp(dev_id, t2t_coeffs->dev_id)) {
2916 tuning_data->t2t_coeffs = t2t_coeffs;
2918 dev_info(mmc_dev(sdhci->mmc),
2919 "Found T2T coeffs data\n");
2925 dev_err(mmc_dev(sdhci->mmc),
2926 "T2T coeffs data missing\n");
2927 tuning_data->t2t_coeffs = NULL;
2933 /* Skip if tap hole coeffs are already found */
2934 if (tuning_data->thole_coeffs == NULL || force_retuning) {
2935 thole_coeffs = soc_data->tap_hole_coeffs;
2936 freq_khz = tuning_data->freq_hz / 1000;
2937 for (j = 0; j < soc_data->tap_hole_coeffs_count; j++) {
2938 if (!strcmp(dev_id, thole_coeffs->dev_id) &&
2939 (freq_khz == thole_coeffs->freq_khz)) {
2940 tuning_data->thole_coeffs =
2943 dev_info(mmc_dev(sdhci->mmc),
2944 "%dMHz tap hole coeffs found\n",
2952 dev_err(mmc_dev(sdhci->mmc),
2953 "%dMHz Tap hole coeffs data missing\n",
2955 tuning_data->thole_coeffs = NULL;
2965 * Determines the numbers of frequencies required and then fills up the tuning
2966 * constraints for each of the frequencies. The data of lower frequency is
2967 * filled first and then the higher frequency data. Max supported frequencies
2970 static int setup_freq_constraints(struct sdhci_host *sdhci,
2971 const unsigned int *freq_list)
2973 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
2974 struct sdhci_tegra *tegra_host = pltfm_host->priv;
2975 struct tegra_tuning_data *tuning_data;
2979 if ((sdhci->mmc->ios.timing != MMC_TIMING_UHS_SDR50) &&
2980 (sdhci->mmc->caps2 & MMC_CAP2_FREQ_SCALING))
2981 freq_count = DFS_FREQ_COUNT;
2985 freq_band = sdhci_tegra_get_freq_point(sdhci);
2986 /* Fill up the req frequencies */
2987 switch (freq_count) {
2989 tuning_data = &tegra_host->tuning_data[0];
2990 tuning_data->freq_hz = sdhci->max_clk;
2991 tuning_data->freq_band = freq_band;
2992 tuning_data->constraints.vcore_mask =
2993 tuning_vcore_constraints[freq_band].vcore_mask;
2994 tuning_data->nr_voltages =
2995 hweight32(tuning_data->constraints.vcore_mask);
2998 tuning_data = &tegra_host->tuning_data[1];
2999 tuning_data->freq_hz = sdhci->max_clk;
3000 tuning_data->freq_band = freq_band;
3001 tuning_data->constraints.vcore_mask =
3002 tuning_vcore_constraints[freq_band].vcore_mask;
3003 tuning_data->nr_voltages =
3004 hweight32(tuning_data->constraints.vcore_mask);
3006 tuning_data = &tegra_host->tuning_data[0];
3007 for (i = (freq_band - 1); i >= 0; i--) {
3010 tuning_data->freq_hz = freq_list[i];
3011 tuning_data->freq_band = i;
3012 tuning_data->nr_voltages = 1;
3013 tuning_data->constraints.vcore_mask =
3014 tuning_vcore_constraints[i].vcore_mask;
3015 tuning_data->nr_voltages =
3016 hweight32(tuning_data->constraints.vcore_mask);
3020 dev_err(mmc_dev(sdhci->mmc), "Unsupported freq count\n");
3028 * Get the supported frequencies and other tuning related constraints for each
3029 * frequency. The supported frequencies should be determined from the list of
3030 * frequencies in the soc data and also consider the platform clock limits as
3031 * well as any DFS related restrictions.
3033 static int sdhci_tegra_get_tuning_constraints(struct sdhci_host *sdhci,
3034 bool force_retuning)
3036 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
3037 struct sdhci_tegra *tegra_host = pltfm_host->priv;
3038 const unsigned int *freq_list;
3041 /* A valid freq count means freq constraints are already set up */
3042 if (!tegra_host->tuning_freq_count || force_retuning) {
3043 freq_list = tegra_host->soc_data->tuning_freq_list;
3044 tegra_host->tuning_freq_count =
3045 setup_freq_constraints(sdhci, freq_list);
3046 if (tegra_host->tuning_freq_count < 0) {
3047 dev_err(mmc_dev(sdhci->mmc),
3048 "Invalid tuning freq count\n");
3053 err = find_tuning_coeffs_data(sdhci, force_retuning);
3057 sdhci_tegra_dump_tuning_constraints(sdhci);
3063 * During boot, only boot voltage for vcore can be set. Check if the current
3064 * voltage is allowed to be used. Nominal and min override voltages can be
3065 * set once boot is done. This will be notified through late subsys init call.
3067 static int sdhci_tegra_set_tuning_voltage(struct sdhci_host *sdhci,
3068 unsigned int voltage)
3070 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
3071 struct sdhci_tegra *tegra_host = pltfm_host->priv;
3073 bool nom_emc_freq_set = false;
3075 if (voltage && (voltage != tegra_host->boot_vcore_mv) &&
3076 !vcore_overrides_allowed) {
3077 SDHCI_TEGRA_DBG("%s: Override vcore %dmv not allowed\n",
3078 mmc_hostname(sdhci->mmc), voltage);
3082 SDHCI_TEGRA_DBG("%s: Setting vcore override %d\n",
3083 mmc_hostname(sdhci->mmc), voltage);
3085 * First clear any previous dvfs override settings. If dvfs overrides
3086 * are disabled, then print the error message but continue execution
3087 * rather than failing tuning altogether.
3089 err = tegra_dvfs_override_core_voltage(pltfm_host->clk, 0);
3090 if ((err == -EPERM) || (err == -ENOSYS)) {
3092 * tegra_dvfs_override_core_voltage will return EPERM or ENOSYS,
3093 * when DVFS override is not enabled. Continue tuning
3094 * with default core voltage
3096 SDHCI_TEGRA_DBG("dvfs overrides disabled. Nothing to clear\n");
3102 /* EMC clock freq boost might be required for nominal core voltage */
3103 if ((voltage == tegra_host->nominal_vcore_mv) &&
3104 tegra_host->plat->en_nominal_vcore_tuning &&
3105 tegra_host->emc_clk) {
3106 err = clk_set_rate(tegra_host->emc_clk,
3107 SDMMC_EMC_NOM_VOLT_FREQ);
3109 dev_err(mmc_dev(sdhci->mmc),
3110 "Failed to set emc nom clk freq %d\n", err);
3112 nom_emc_freq_set = true;
3116 * If dvfs overrides are disabled, then print the error message but
3117 * continue tuning execution rather than failing tuning altogether.
3119 err = tegra_dvfs_override_core_voltage(pltfm_host->clk, voltage);
3120 if ((err == -EPERM) || (err == -ENOSYS)) {
3122 * tegra_dvfs_override_core_voltage will return EPERM or ENOSYS,
3123 * when DVFS override is not enabled. Continue tuning
3124 * with default core voltage
3126 SDHCI_TEGRA_DBG("dvfs overrides disabled. No overrides set\n");
3129 dev_err(mmc_dev(sdhci->mmc),
3130 "failed to set vcore override %dmv\n", voltage);
3132 /* Revert emc clock to normal freq */
3133 if (nom_emc_freq_set) {
3134 err = clk_set_rate(tegra_host->emc_clk, SDMMC_EMC_MAX_FREQ);
3136 dev_err(mmc_dev(sdhci->mmc),
3137 "Failed to revert emc nom clk freq %d\n", err);
3143 static int sdhci_tegra_run_tuning(struct sdhci_host *sdhci,
3144 struct tegra_tuning_data *tuning_data)
3146 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
3147 struct sdhci_tegra *tegra_host = pltfm_host->priv;
3150 u8 i, vcore_mask = 0;
3152 vcore_mask = tuning_data->constraints.vcore_mask;
3153 for (i = 0; i < tuning_data->nr_voltages; i++) {
3154 voltage = get_tuning_voltage(tegra_host, &vcore_mask);
3155 err = sdhci_tegra_set_tuning_voltage(sdhci, voltage);
3157 dev_err(mmc_dev(sdhci->mmc),
3158 "Unable to set override voltage.\n");
3162 /* Get the tuning window info */
3163 SDHCI_TEGRA_DBG("Getting tuning windows...\n");
3164 err = sdhci_tegra_get_tap_window_data(sdhci, tuning_data);
3166 dev_err(mmc_dev(sdhci->mmc),
3167 "Failed to get tap win %d\n", err);
3170 SDHCI_TEGRA_DBG("%s: %d tuning window data obtained\n",
3171 mmc_hostname(sdhci->mmc), tuning_data->freq_hz);
3176 static int sdhci_tegra_verify_best_tap(struct sdhci_host *sdhci)
3178 struct tegra_tuning_data *tuning_data;
3181 tuning_data = sdhci_tegra_get_tuning_data(sdhci, sdhci->max_clk);
3182 if ((tuning_data->best_tap_value < 0) ||
3183 (tuning_data->best_tap_value > MAX_TAP_VALUES)) {
3184 dev_err(mmc_dev(sdhci->mmc),
3185 "Trying to verify invalid best tap value\n");
3188 dev_info(mmc_dev(sdhci->mmc),
3189 "%s: tuning freq %dhz, best tap %d\n",
3190 __func__, tuning_data->freq_hz,
3191 tuning_data->best_tap_value);
3194 /* Set the best tap value */
3195 sdhci_tegra_set_tap_delay(sdhci, tuning_data->best_tap_value);
3197 /* Run tuning after setting the best tap value */
3198 err = sdhci_tegra_issue_tuning_cmd(sdhci);
3200 dev_err(mmc_dev(sdhci->mmc),
3201 "%dMHz best tap value verification failed %d\n",
3202 tuning_data->freq_hz, err);
3206 static int sdhci_tegra_execute_tuning(struct sdhci_host *sdhci, u32 opcode)
3208 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
3209 struct sdhci_tegra *tegra_host = pltfm_host->priv;
3210 struct tegra_tuning_data *tuning_data;
3211 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
3216 u8 i, set_retuning = 0;
3217 bool force_retuning = false;
3220 /* Tuning is valid only in SDR104 and SDR50 modes */
3221 ctrl_2 = sdhci_readw(sdhci, SDHCI_HOST_CONTROL2);
3222 if (!(((ctrl_2 & SDHCI_CTRL_UHS_MASK) == SDHCI_CTRL_UHS_SDR104) ||
3223 (((ctrl_2 & SDHCI_CTRL_UHS_MASK) == SDHCI_CTRL_UHS_SDR50) &&
3224 (sdhci->flags & SDHCI_SDR50_NEEDS_TUNING))))
3227 /* Tuning should be done only for MMC_BUS_WIDTH_8 and MMC_BUS_WIDTH_4 */
3228 if (sdhci->mmc->ios.bus_width == MMC_BUS_WIDTH_8)
3229 tegra_host->tuning_bsize = MMC_TUNING_BLOCK_SIZE_BUS_WIDTH_8;
3230 else if (sdhci->mmc->ios.bus_width == MMC_BUS_WIDTH_4)
3231 tegra_host->tuning_bsize = MMC_TUNING_BLOCK_SIZE_BUS_WIDTH_4;
3235 SDHCI_TEGRA_DBG("%s: Starting freq tuning\n", mmc_hostname(sdhci->mmc));
3236 enable_lb_clk = (soc_data->nvquirks &
3237 NVQUIRK_DISABLE_EXTERNAL_LOOPBACK) &&
3238 (tegra_host->instance == 2);
3239 if (enable_lb_clk) {
3240 misc_ctrl = sdhci_readl(sdhci, SDHCI_VNDR_MISC_CTRL);
3242 SDHCI_VNDR_MISC_CTRL_EN_EXT_LOOPBACK_SHIFT);
3243 sdhci_writel(sdhci, misc_ctrl, SDHCI_VNDR_MISC_CTRL);
3245 mutex_lock(&tuning_mutex);
3247 /* Set the tuning command to be used */
3248 tegra_host->tuning_opcode = opcode;
3251 * Disable all interrupts signalling.Enable interrupt status
3252 * detection for buffer read ready and data crc. We use
3253 * polling for tuning as it involves less overhead.
3255 ier = sdhci_readl(sdhci, SDHCI_INT_ENABLE);
3256 sdhci_writel(sdhci, 0, SDHCI_SIGNAL_ENABLE);
3257 sdhci_writel(sdhci, SDHCI_INT_DATA_AVAIL |
3258 SDHCI_INT_DATA_CRC, SDHCI_INT_ENABLE);
3261 * If tuning is already done and retune request is not set, then skip
3262 * best tap value calculation and use the old best tap value. If the
3263 * previous best tap value verification failed, force retuning.
3265 if (tegra_host->tuning_status == TUNING_STATUS_DONE) {
3266 err = sdhci_tegra_verify_best_tap(sdhci);
3268 dev_err(mmc_dev(sdhci->mmc),
3269 "Prev best tap failed. Re-running tuning\n");
3270 force_retuning = true;
3276 if (tegra_host->force_retune == true) {
3277 force_retuning = true;
3278 tegra_host->force_retune = false;
3281 tegra_host->tuning_status = 0;
3282 err = sdhci_tegra_get_tuning_constraints(sdhci, force_retuning);
3284 dev_err(mmc_dev(sdhci->mmc),
3285 "Failed to get tuning constraints\n");
3289 for (i = 0; i < tegra_host->tuning_freq_count; i++) {
3290 tuning_data = &tegra_host->tuning_data[i];
3291 if (tuning_data->tuning_done && !force_retuning)
3294 SDHCI_TEGRA_DBG("%s: Setting tuning freq%d\n",
3295 mmc_hostname(sdhci->mmc), tuning_data->freq_hz);
3296 tegra_sdhci_set_clock(sdhci, tuning_data->freq_hz);
3298 SDHCI_TEGRA_DBG("%s: Calculating estimated tuning values\n",
3299 mmc_hostname(sdhci->mmc));
3300 err = calculate_estimated_tuning_values(tegra_host->speedo,
3301 tuning_data, tegra_host->boot_vcore_mv);
3305 SDHCI_TEGRA_DBG("Running tuning...\n");
3306 err = sdhci_tegra_run_tuning(sdhci, tuning_data);
3310 SDHCI_TEGRA_DBG("calculating best tap value\n");
3311 err = sdhci_tegra_calculate_best_tap(sdhci, tuning_data);
3315 err = sdhci_tegra_verify_best_tap(sdhci);
3316 if (!err && !set_retuning) {
3317 tuning_data->tuning_done = true;
3318 tegra_host->tuning_status |= TUNING_STATUS_DONE;
3320 tegra_host->tuning_status |= TUNING_STATUS_RETUNE;
3324 /* Release any override core voltages set */
3325 sdhci_tegra_set_tuning_voltage(sdhci, 0);
3327 /* Enable interrupts. Enable full range for core voltage */
3328 sdhci_writel(sdhci, ier, SDHCI_INT_ENABLE);
3329 sdhci_writel(sdhci, ier, SDHCI_SIGNAL_ENABLE);
3330 mutex_unlock(&tuning_mutex);
3332 SDHCI_TEGRA_DBG("%s: Freq tuning done\n", mmc_hostname(sdhci->mmc));
3333 if (enable_lb_clk) {
3334 misc_ctrl = sdhci_readl(sdhci, SDHCI_VNDR_MISC_CTRL);
3336 /* Tuning is failed and card will try to enumerate in
3337 * Legacy High Speed mode. So, Enable External Loopback
3341 SDHCI_VNDR_MISC_CTRL_EN_EXT_LOOPBACK_SHIFT);
3344 SDHCI_VNDR_MISC_CTRL_EN_EXT_LOOPBACK_SHIFT);
3346 sdhci_writel(sdhci, misc_ctrl, SDHCI_VNDR_MISC_CTRL);
3351 static int __init sdhci_tegra_enable_vcore_override_tuning(void)
3353 vcore_overrides_allowed = true;
3354 maintain_boot_voltage = false;
3357 late_initcall(sdhci_tegra_enable_vcore_override_tuning);
3359 static int tegra_sdhci_suspend(struct sdhci_host *sdhci)
3361 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
3362 struct sdhci_tegra *tegra_host = pltfm_host->priv;
3364 struct platform_device *pdev = to_platform_device(mmc_dev(sdhci->mmc));
3365 const struct tegra_sdhci_platform_data *plat;
3366 unsigned int cd_irq;
3368 tegra_sdhci_set_clock(sdhci, 0);
3370 /* Disable the power rails if any */
3371 if (tegra_host->card_present) {
3372 err = tegra_sdhci_configure_regulators(tegra_host,
3373 CONFIG_REG_DIS, 0, 0);
3375 dev_err(mmc_dev(sdhci->mmc),
3376 "Regulators disable in suspend failed %d\n", err);
3378 plat = pdev->dev.platform_data;
3379 if (plat && gpio_is_valid(plat->cd_gpio)) {
3380 if (!plat->cd_wakeup_incapable) {
3381 /* Enable wake irq at end of suspend */
3382 cd_irq = gpio_to_irq(plat->cd_gpio);
3383 err = enable_irq_wake(cd_irq);
3385 dev_err(mmc_dev(sdhci->mmc),
3386 "SD card wake-up event registration for irq=%d failed with error: %d\n",
3393 static int tegra_sdhci_resume(struct sdhci_host *sdhci)
3395 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
3396 struct sdhci_tegra *tegra_host = pltfm_host->priv;
3397 struct platform_device *pdev;
3398 struct tegra_sdhci_platform_data *plat;
3399 unsigned int signal_voltage = 0;
3401 unsigned int cd_irq;
3403 pdev = to_platform_device(mmc_dev(sdhci->mmc));
3404 plat = pdev->dev.platform_data;
3406 if (plat && gpio_is_valid(plat->cd_gpio)) {
3407 /* disable wake capability at start of resume */
3408 if (!plat->cd_wakeup_incapable) {
3409 cd_irq = gpio_to_irq(plat->cd_gpio);
3410 disable_irq_wake(cd_irq);
3412 tegra_host->card_present =
3413 (gpio_get_value_cansleep(plat->cd_gpio) == 0);
3416 /* Setting the min identification clock of freq 400KHz */
3417 tegra_sdhci_set_clock(sdhci, 400000);
3419 /* Enable the power rails if any */
3420 if (tegra_host->card_present) {
3421 err = tegra_sdhci_configure_regulators(tegra_host,
3422 CONFIG_REG_EN, 0, 0);
3424 dev_err(mmc_dev(sdhci->mmc),
3425 "Regulators enable in resume failed %d\n", err);
3428 if (tegra_host->vdd_io_reg) {
3429 if (plat && (plat->mmc_data.ocr_mask &
3430 SDHOST_1V8_OCR_MASK))
3431 signal_voltage = MMC_SIGNAL_VOLTAGE_180;
3433 signal_voltage = MMC_SIGNAL_VOLTAGE_330;
3434 tegra_sdhci_signal_voltage_switch(sdhci,
3439 /* Reset the controller and power on if MMC_KEEP_POWER flag is set*/
3440 if (sdhci->mmc->pm_flags & MMC_PM_KEEP_POWER) {
3441 tegra_sdhci_reset(sdhci, SDHCI_RESET_ALL);
3442 sdhci_writeb(sdhci, SDHCI_POWER_ON, SDHCI_POWER_CONTROL);
3445 tegra_sdhci_do_calibration(sdhci, signal_voltage);
3451 static void tegra_sdhci_post_resume(struct sdhci_host *sdhci)
3453 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
3454 struct sdhci_tegra *tegra_host = pltfm_host->priv;
3456 /* Turn OFF the clocks if the device is not present */
3457 if ((!tegra_host->card_present || !sdhci->mmc->card) &&
3458 tegra_host->clk_enabled)
3459 tegra_sdhci_set_clock(sdhci, 0);
3463 * For tegra specific tuning, core voltage has to be fixed at different
3464 * voltages to get the tap values. Fixing the core voltage during tuning for one
3465 * device might affect transfers of other SDMMC devices. Check if tuning mutex
3466 * is locked before starting a data transfer. The new tuning procedure might
3467 * take at max 1.5s for completion for a single run. Taking DFS into count,
3468 * setting the max timeout for tuning mutex check a 3 secs. Since tuning is
3469 * run only during boot or the first time device is inserted, there wouldn't
3470 * be any delays in cmd/xfer execution once devices enumeration is done.
3472 static void tegra_sdhci_get_bus(struct sdhci_host *sdhci)
3474 unsigned int timeout = 300;
3476 while (mutex_is_locked(&tuning_mutex)) {
3480 dev_err(mmc_dev(sdhci->mmc),
3481 "Tuning mutex locked for long time\n");
3488 * The host/device can be powered off before the retuning request is handled in
3489 * case of SDIDO being off if Wifi is turned off, sd card removal etc. In such
3490 * cases, cancel the pending tuning timer and remove any core voltage
3491 * constraints that are set earlier.
3493 static void tegra_sdhci_power_off(struct sdhci_host *sdhci, u8 power_mode)
3495 int retuning_req_set = 0;
3497 retuning_req_set = (timer_pending(&sdhci->tuning_timer) ||
3498 (sdhci->flags & SDHCI_NEEDS_RETUNING));
3500 if (retuning_req_set) {
3501 del_timer_sync(&sdhci->tuning_timer);
3503 if (boot_volt_req_refcount)
3504 --boot_volt_req_refcount;
3506 if (!boot_volt_req_refcount) {
3507 sdhci_tegra_set_tuning_voltage(sdhci, 0);
3508 SDHCI_TEGRA_DBG("%s: Release override as host is off\n",
3509 mmc_hostname(sdhci->mmc));
3514 static int show_polling_period(void *data, u64 *value)
3516 struct sdhci_host *host = (struct sdhci_host *)data;
3518 if (host->mmc->dev_stats != NULL)
3519 *value = host->mmc->dev_stats->polling_interval;
3524 static int set_polling_period(void *data, u64 value)
3526 struct sdhci_host *host = (struct sdhci_host *)data;
3528 if (host->mmc->dev_stats != NULL) {
3529 /* Limiting the maximum polling period to 1 sec */
3532 host->mmc->dev_stats->polling_interval = value;
3537 static int show_active_load_high_threshold(void *data, u64 *value)
3539 struct sdhci_host *host = (struct sdhci_host *)data;
3540 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3541 struct sdhci_tegra *tegra_host = pltfm_host->priv;
3542 struct tegra_freq_gov_data *gov_data = tegra_host->gov_data;
3544 if (gov_data != NULL)
3545 *value = gov_data->act_load_high_threshold;
3550 static int set_active_load_high_threshold(void *data, u64 value)
3552 struct sdhci_host *host = (struct sdhci_host *)data;
3553 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3554 struct sdhci_tegra *tegra_host = pltfm_host->priv;
3555 struct tegra_freq_gov_data *gov_data = tegra_host->gov_data;
3557 if (gov_data != NULL) {
3558 /* Maximum threshold load percentage is 100.*/
3561 gov_data->act_load_high_threshold = value;
3567 static int show_disableclkgating_value(void *data, u64 *value)
3569 struct sdhci_host *host = (struct sdhci_host *)data;
3571 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3572 struct sdhci_tegra *tegra_host = pltfm_host->priv;
3573 if (tegra_host != NULL)
3574 *value = tegra_host->dbg_cfg.clk_ungated;
3579 static int set_disableclkgating_value(void *data, u64 value)
3581 struct sdhci_host *host = (struct sdhci_host *)data;
3583 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3584 if (pltfm_host != NULL) {
3585 struct sdhci_tegra *tegra_host = pltfm_host->priv;
3586 /* Set the CAPS2 register to reflect
3587 * the clk gating value
3589 if (tegra_host != NULL) {
3591 host->mmc->ops->set_ios(host->mmc,
3593 tegra_host->dbg_cfg.clk_ungated = true;
3595 ~MMC_CAP2_CLOCK_GATING;
3597 tegra_host->dbg_cfg.clk_ungated = false;
3599 MMC_CAP2_CLOCK_GATING;
3607 static int set_trim_override_value(void *data, u64 value)
3609 struct sdhci_host *host = (struct sdhci_host *)data;
3611 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3612 if (pltfm_host != NULL) {
3613 struct sdhci_tegra *tegra_host = pltfm_host->priv;
3614 if (tegra_host != NULL) {
3615 /* Make sure clock gating is disabled */
3616 if ((tegra_host->dbg_cfg.clk_ungated) &&
3617 (tegra_host->clk_enabled)) {
3618 sdhci_tegra_set_trim_delay(host, value);
3619 tegra_host->dbg_cfg.trim_val =
3622 pr_info("%s: Disable clock gating before setting value\n",
3623 mmc_hostname(host->mmc));
3631 static int show_trim_override_value(void *data, u64 *value)
3633 struct sdhci_host *host = (struct sdhci_host *)data;
3635 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3636 if (pltfm_host != NULL) {
3637 struct sdhci_tegra *tegra_host = pltfm_host->priv;
3638 if (tegra_host != NULL)
3639 *value = tegra_host->dbg_cfg.trim_val;
3645 static int show_tap_override_value(void *data, u64 *value)
3647 struct sdhci_host *host = (struct sdhci_host *)data;
3649 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3650 if (pltfm_host != NULL) {
3651 struct sdhci_tegra *tegra_host = pltfm_host->priv;
3652 if (tegra_host != NULL)
3653 *value = tegra_host->dbg_cfg.tap_val;
3659 static int set_tap_override_value(void *data, u64 value)
3661 struct sdhci_host *host = (struct sdhci_host *)data;
3663 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3664 if (pltfm_host != NULL) {
3665 struct sdhci_tegra *tegra_host = pltfm_host->priv;
3666 if (tegra_host != NULL) {
3667 /* Make sure clock gating is disabled */
3668 if ((tegra_host->dbg_cfg.clk_ungated) &&
3669 (tegra_host->clk_enabled)) {
3670 sdhci_tegra_set_tap_delay(host, value);
3671 tegra_host->dbg_cfg.tap_val = value;
3673 pr_info("%s: Disable clock gating before setting value\n",
3674 mmc_hostname(host->mmc));
3681 DEFINE_SIMPLE_ATTRIBUTE(sdhci_polling_period_fops, show_polling_period,
3682 set_polling_period, "%llu\n");
3683 DEFINE_SIMPLE_ATTRIBUTE(sdhci_active_load_high_threshold_fops,
3684 show_active_load_high_threshold,
3685 set_active_load_high_threshold, "%llu\n");
3686 DEFINE_SIMPLE_ATTRIBUTE(sdhci_disable_clkgating_fops,
3687 show_disableclkgating_value,
3688 set_disableclkgating_value, "%llu\n");
3689 DEFINE_SIMPLE_ATTRIBUTE(sdhci_override_trim_data_fops,
3690 show_trim_override_value,
3691 set_trim_override_value, "%llu\n");
3692 DEFINE_SIMPLE_ATTRIBUTE(sdhci_override_tap_data_fops,
3693 show_tap_override_value,
3694 set_tap_override_value, "%llu\n");
3696 static void sdhci_tegra_error_stats_debugfs(struct sdhci_host *host)
3698 struct dentry *root = host->debugfs_root;
3699 struct dentry *dfs_root;
3700 unsigned saved_line;
3703 root = debugfs_create_dir(dev_name(mmc_dev(host->mmc)), NULL);
3704 if (IS_ERR_OR_NULL(root)) {
3705 saved_line = __LINE__;
3708 host->debugfs_root = root;
3711 dfs_root = debugfs_create_dir("dfs_stats_dir", root);
3712 if (IS_ERR_OR_NULL(dfs_root)) {
3713 saved_line = __LINE__;
3717 if (!debugfs_create_file("error_stats", S_IRUSR, root, host,
3718 &sdhci_host_fops)) {
3719 saved_line = __LINE__;
3722 if (!debugfs_create_file("dfs_stats", S_IRUSR, dfs_root, host,
3723 &sdhci_host_dfs_fops)) {
3724 saved_line = __LINE__;
3727 if (!debugfs_create_file("polling_period", 0644, dfs_root, (void *)host,
3728 &sdhci_polling_period_fops)) {
3729 saved_line = __LINE__;
3732 if (!debugfs_create_file("active_load_high_threshold", 0644,
3733 dfs_root, (void *)host,
3734 &sdhci_active_load_high_threshold_fops)) {
3735 saved_line = __LINE__;
3739 dfs_root = debugfs_create_dir("override_data", root);
3740 if (IS_ERR_OR_NULL(dfs_root)) {
3741 saved_line = __LINE__;
3745 if (!debugfs_create_file("clk_gate_disabled", 0644,
3746 dfs_root, (void *)host,
3747 &sdhci_disable_clkgating_fops)) {
3748 saved_line = __LINE__;
3752 if (!debugfs_create_file("tap_value", 0644,
3753 dfs_root, (void *)host,
3754 &sdhci_override_tap_data_fops)) {
3755 saved_line = __LINE__;
3759 if (!debugfs_create_file("trim_value", 0644,
3760 dfs_root, (void *)host,
3761 &sdhci_override_trim_data_fops)) {
3762 saved_line = __LINE__;
3765 if (IS_QUIRKS2_DELAYED_CLK_GATE(host)) {
3766 host->clk_gate_tmout_ticks = -1;
3767 if (!debugfs_create_u32("clk_gate_tmout_ticks",
3769 root, (u32 *)&host->clk_gate_tmout_ticks)) {
3770 saved_line = __LINE__;
3778 debugfs_remove_recursive(root);
3779 host->debugfs_root = NULL;
3781 pr_err("%s %s: Failed to initialize debugfs functionality at line=%d\n", __func__,
3782 mmc_hostname(host->mmc), saved_line);
3786 static ssize_t sdhci_handle_boost_mode_tap(struct device *dev,
3787 struct device_attribute *attr, const char *buf, size_t count)
3790 struct mmc_card *card;
3791 char *p = (char *)buf;
3792 struct sdhci_host *host = dev_get_drvdata(dev);
3793 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3794 struct sdhci_tegra *tegra_host = pltfm_host->priv;
3795 struct tegra_tuning_data *tuning_data;
3798 bool clk_set_for_tap_prog = false;
3800 tap_cmd = memparse(p, &p);
3802 card = host->mmc->card;
3806 /* if not uhs -- no tuning and no tap value to set */
3807 if (!mmc_sd_card_uhs(card) && !mmc_card_hs200(card))
3810 /* if no change in tap value -- just exit */
3811 if (tap_cmd == tegra_host->tap_cmd)
3814 if ((tap_cmd != TAP_CMD_TRIM_DEFAULT_VOLTAGE) &&
3815 (tap_cmd != TAP_CMD_TRIM_HIGH_VOLTAGE)) {
3816 pr_info("echo 1 > cmd_state # to set normal voltage\n");
3817 pr_info("echo 2 > cmd_state # to set high voltage\n");
3821 tegra_host->tap_cmd = tap_cmd;
3822 tuning_data = sdhci_tegra_get_tuning_data(host, host->max_clk);
3823 /* Check if host clock is enabled */
3824 if (!tegra_host->clk_enabled) {
3825 /* Nothing to do if the host is not powered ON */
3826 if (host->mmc->ios.power_mode != MMC_POWER_ON)
3829 tegra_sdhci_set_clock(host, host->mmc->ios.clock);
3830 clk_set_for_tap_prog = true;
3834 /* Wait for any on-going data transfers */
3835 present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
3836 while (present_state & (SDHCI_DOING_WRITE | SDHCI_DOING_READ)) {
3841 present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
3844 spin_lock(&host->lock);
3846 case TAP_CMD_TRIM_DEFAULT_VOLTAGE:
3847 /* set tap value for voltage range 1.1 to 1.25 */
3848 sdhci_tegra_set_tap_delay(host, tuning_data->best_tap_value);
3851 case TAP_CMD_TRIM_HIGH_VOLTAGE:
3852 /* set tap value for voltage range 1.25 to 1.39 */
3853 sdhci_tegra_set_tap_delay(host,
3854 tuning_data->nom_best_tap_value);
3857 spin_unlock(&host->lock);
3858 if (clk_set_for_tap_prog) {
3859 tegra_sdhci_set_clock(host, 0);
3860 clk_set_for_tap_prog = false;
3865 static ssize_t sdhci_show_turbo_mode(struct device *dev,
3866 struct device_attribute *attr, char *buf)
3868 struct sdhci_host *host = dev_get_drvdata(dev);
3869 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3870 struct sdhci_tegra *tegra_host = pltfm_host->priv;
3872 return sprintf(buf, "%d\n", tegra_host->tap_cmd);
3875 static DEVICE_ATTR(cmd_state, 0644, sdhci_show_turbo_mode,
3876 sdhci_handle_boost_mode_tap);
3878 static int tegra_sdhci_reboot_notify(struct notifier_block *nb,
3879 unsigned long event, void *data)
3881 struct sdhci_tegra *tegra_host =
3882 container_of(nb, struct sdhci_tegra, reboot_notify);
3888 err = tegra_sdhci_configure_regulators(tegra_host,
3889 CONFIG_REG_DIS, 0, 0);
3891 pr_err("Disable regulator in reboot notify failed %d\n",
3898 void tegra_sdhci_ios_config_enter(struct sdhci_host *sdhci, struct mmc_ios *ios)
3900 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
3901 struct sdhci_tegra *tegra_host = pltfm_host->priv;
3902 struct clk *new_mode_clk;
3903 bool change_clk = false;
3906 * Tegra sdmmc controllers require clock to be enabled for any register
3907 * access. Set the minimum controller clock if no clock is requested.
3909 if (!sdhci->clock && !ios->clock) {
3910 tegra_sdhci_set_clock(sdhci, sdhci->mmc->f_min);
3911 sdhci->clock = sdhci->mmc->f_min;
3912 } else if (ios->clock && (ios->clock != sdhci->clock)) {
3913 tegra_sdhci_set_clock(sdhci, ios->clock);
3917 * Check for DDR50 mode setting and set ddr_clk if not already
3918 * done. Return if only one clock option is available.
3920 if (!tegra_host->ddr_clk || !tegra_host->sdr_clk) {
3923 if ((ios->timing == MMC_TIMING_UHS_DDR50) &&
3924 !tegra_host->is_ddr_clk_set) {
3926 new_mode_clk = tegra_host->ddr_clk;
3927 } else if ((ios->timing != MMC_TIMING_UHS_DDR50) &&
3928 tegra_host->is_ddr_clk_set) {
3930 new_mode_clk = tegra_host->sdr_clk;
3934 tegra_sdhci_set_clock(sdhci, 0);
3935 pltfm_host->clk = new_mode_clk;
3936 /* Restore the previous frequency */
3937 tegra_sdhci_set_clock(sdhci, sdhci->max_clk);
3938 tegra_host->is_ddr_clk_set =
3939 !tegra_host->is_ddr_clk_set;
3944 void tegra_sdhci_ios_config_exit(struct sdhci_host *sdhci, struct mmc_ios *ios)
3947 * Do any required handling for retuning requests before powering off
3950 if (ios->power_mode == MMC_POWER_OFF)
3951 tegra_sdhci_power_off(sdhci, ios->power_mode);
3954 * In case of power off, turn off controller clock now as all the
3955 * required register accesses are already done.
3957 if (!ios->clock && !sdhci->mmc->skip_host_clkgate)
3958 tegra_sdhci_set_clock(sdhci, 0);
3961 static int tegra_sdhci_get_drive_strength(struct sdhci_host *sdhci,
3962 unsigned int max_dtr, int host_drv, int card_drv)
3964 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
3965 struct sdhci_tegra *tegra_host = pltfm_host->priv;
3966 const struct tegra_sdhci_platform_data *plat = tegra_host->plat;
3968 return plat->default_drv_type;
3971 static const struct sdhci_ops tegra_sdhci_ops = {
3972 .get_ro = tegra_sdhci_get_ro,
3973 .get_cd = tegra_sdhci_get_cd,
3974 .read_l = tegra_sdhci_readl,
3975 .read_w = tegra_sdhci_readw,
3976 .write_l = tegra_sdhci_writel,
3977 .write_w = tegra_sdhci_writew,
3978 .platform_bus_width = tegra_sdhci_buswidth,
3979 .set_clock = tegra_sdhci_set_clock,
3980 .suspend = tegra_sdhci_suspend,
3981 .resume = tegra_sdhci_resume,
3982 .platform_resume = tegra_sdhci_post_resume,
3983 .platform_reset_exit = tegra_sdhci_reset_exit,
3984 .platform_get_bus = tegra_sdhci_get_bus,
3985 .platform_ios_config_enter = tegra_sdhci_ios_config_enter,
3986 .platform_ios_config_exit = tegra_sdhci_ios_config_exit,
3987 .set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
3988 .switch_signal_voltage = tegra_sdhci_signal_voltage_switch,
3989 .switch_signal_voltage_exit = tegra_sdhci_do_calibration,
3990 .execute_freq_tuning = sdhci_tegra_execute_tuning,
3991 .sd_error_stats = sdhci_tegra_sd_error_stats,
3992 #ifdef CONFIG_MMC_FREQ_SCALING
3993 .dfs_gov_init = sdhci_tegra_freq_gov_init,
3994 .dfs_gov_get_target_freq = sdhci_tegra_get_target_freq,
3996 .get_drive_strength = tegra_sdhci_get_drive_strength,
3999 static struct sdhci_pltfm_data sdhci_tegra11_pdata = {
4000 .quirks = TEGRA_SDHCI_QUIRKS,
4001 .quirks2 = TEGRA_SDHCI_QUIRKS2,
4002 .ops = &tegra_sdhci_ops,
4005 static struct sdhci_tegra_soc_data soc_data_tegra11 = {
4006 .pdata = &sdhci_tegra11_pdata,
4007 .nvquirks = TEGRA_SDHCI_NVQUIRKS |
4008 NVQUIRK_SET_DRIVE_STRENGTH |
4009 NVQUIRK_SET_TRIM_DELAY |
4010 NVQUIRK_ENABLE_DDR50 |
4011 NVQUIRK_ENABLE_HS200 |
4012 NVQUIRK_INFINITE_ERASE_TIMEOUT |
4013 NVQUIRK_DISABLE_EXTERNAL_LOOPBACK |
4014 NVQUIRK_DISABLE_SDMMC4_CALIB,
4015 .parent_clk_list = {"pll_p", "pll_c"},
4016 .tuning_freq_list = {81600000, 156000000, 200000000},
4017 .t2t_coeffs = t11x_tuning_coeffs,
4018 .t2t_coeffs_count = 3,
4019 .tap_hole_coeffs = t11x_tap_hole_coeffs,
4020 .tap_hole_coeffs_count = 12,
4023 static struct sdhci_pltfm_data sdhci_tegra12_pdata = {
4024 .quirks = TEGRA_SDHCI_QUIRKS,
4025 .quirks2 = TEGRA_SDHCI_QUIRKS2 |
4026 SDHCI_QUIRK2_HOST_OFF_CARD_ON |
4027 SDHCI_QUIRK2_SUPPORT_64BIT_DMA |
4028 SDHCI_QUIRK2_USE_64BIT_ADDR,
4029 .ops = &tegra_sdhci_ops,
4032 static struct sdhci_tegra_soc_data soc_data_tegra12 = {
4033 .pdata = &sdhci_tegra12_pdata,
4034 .nvquirks = TEGRA_SDHCI_NVQUIRKS |
4035 NVQUIRK_SET_TRIM_DELAY |
4036 NVQUIRK_ENABLE_DDR50 |
4037 NVQUIRK_ENABLE_HS200 |
4038 NVQUIRK_INFINITE_ERASE_TIMEOUT |
4039 NVQUIRK_SET_PAD_E_INPUT_OR_E_PWRD |
4040 NVQUIRK_HIGH_FREQ_TAP_PROCEDURE |
4041 NVQUIRK_SET_CALIBRATION_OFFSETS |
4042 NVQUIRK_DISABLE_EXTERNAL_LOOPBACK,
4043 .parent_clk_list = {"pll_p", "pll_c"},
4044 .tuning_freq_list = {81600000, 136000000, 200000000},
4045 .t2t_coeffs = t12x_tuning_coeffs,
4046 .t2t_coeffs_count = 3,
4047 .tap_hole_coeffs = t12x_tap_hole_coeffs,
4048 .tap_hole_coeffs_count = 13,
4051 static const struct of_device_id sdhci_tegra_dt_match[] = {
4052 { .compatible = "nvidia,tegra124-sdhci", .data = &soc_data_tegra12 },
4053 { .compatible = "nvidia,tegra114-sdhci", .data = &soc_data_tegra11 },
4056 MODULE_DEVICE_TABLE(of, sdhci_dt_ids);
4058 static struct tegra_sdhci_platform_data *sdhci_tegra_dt_parse_pdata(
4059 struct platform_device *pdev)
4062 struct tegra_sdhci_platform_data *plat;
4063 struct device_node *np = pdev->dev.of_node;
4069 plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
4071 dev_err(&pdev->dev, "Can't allocate platform data\n");
4075 plat->cd_gpio = of_get_named_gpio(np, "cd-gpios", 0);
4076 plat->wp_gpio = of_get_named_gpio(np, "wp-gpios", 0);
4077 plat->power_gpio = of_get_named_gpio(np, "power-gpios", 0);
4079 if (of_property_read_u32(np, "bus-width", &bus_width) == 0 &&
4083 of_property_read_u32(np, "tap-delay", &plat->tap_delay);
4084 of_property_read_u32(np, "trim-delay", &plat->trim_delay);
4085 of_property_read_u32(np, "ddr-clk-limit", &plat->ddr_clk_limit);
4086 of_property_read_u32(np, "max-clk-limit", &plat->max_clk_limit);
4088 of_property_read_u32(np, "uhs_mask", &plat->uhs_mask);
4090 if (of_find_property(np, "built-in", NULL))
4091 plat->mmc_data.built_in = 1;
4093 if (!of_property_read_u32(np, "mmc-ocr-mask", &val)) {
4095 plat->mmc_data.ocr_mask = MMC_OCR_1V8_MASK;
4097 plat->mmc_data.ocr_mask = MMC_OCR_2V8_MASK;
4099 plat->mmc_data.ocr_mask = MMC_OCR_3V2_MASK;
4101 plat->mmc_data.ocr_mask = MMC_OCR_3V3_MASK;
4106 static int sdhci_tegra_probe(struct platform_device *pdev)
4108 const struct of_device_id *match;
4109 const struct sdhci_tegra_soc_data *soc_data;
4110 struct sdhci_host *host;
4111 struct sdhci_pltfm_host *pltfm_host;
4112 struct tegra_sdhci_platform_data *plat;
4113 struct sdhci_tegra *tegra_host;
4114 unsigned int low_freq;
4118 match = of_match_device(sdhci_tegra_dt_match, &pdev->dev);
4120 soc_data = match->data;
4122 /* Use id tables and remove the following chip defines */
4123 #if defined(CONFIG_ARCH_TEGRA_11x_SOC)
4124 soc_data = &soc_data_tegra11;
4126 soc_data = &soc_data_tegra12;
4130 host = sdhci_pltfm_init(pdev, soc_data->pdata);
4132 /* sdio delayed clock gate quirk in sdhci_host used */
4133 host->quirks2 |= SDHCI_QUIRK2_DELAYED_CLK_GATE;
4136 return PTR_ERR(host);
4138 pltfm_host = sdhci_priv(host);
4140 plat = pdev->dev.platform_data;
4143 plat = sdhci_tegra_dt_parse_pdata(pdev);
4146 dev_err(mmc_dev(host->mmc), "missing platform data\n");
4151 /* FIXME: This is for until dma-mask binding is supported in DT.
4152 * Set coherent_dma_mask for each Tegra SKUs.
4153 * If dma_mask is NULL, set it to coherent_dma_mask. */
4154 if (soc_data == &soc_data_tegra11)
4155 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
4157 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(64);
4159 if (!pdev->dev.dma_mask)
4160 pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
4162 tegra_host = devm_kzalloc(&pdev->dev, sizeof(*tegra_host), GFP_KERNEL);
4164 dev_err(mmc_dev(host->mmc), "failed to allocate tegra_host\n");
4169 tegra_host->plat = plat;
4170 pdev->dev.platform_data = plat;
4172 tegra_host->sd_stat_head = devm_kzalloc(&pdev->dev,
4173 sizeof(struct sdhci_tegra_sd_stats), GFP_KERNEL);
4174 if (!tegra_host->sd_stat_head) {
4175 dev_err(mmc_dev(host->mmc), "failed to allocate sd_stat_head\n");
4180 tegra_host->soc_data = soc_data;
4181 pltfm_host->priv = tegra_host;
4183 for (i = 0; i < ARRAY_SIZE(soc_data->parent_clk_list); i++) {
4184 if (!soc_data->parent_clk_list[i])
4186 if (!strcmp(soc_data->parent_clk_list[i], "pll_c")) {
4187 pll_c = clk_get_sys(NULL, "pll_c");
4188 if (IS_ERR(pll_c)) {
4189 rc = PTR_ERR(pll_c);
4190 dev_err(mmc_dev(host->mmc),
4191 "clk error in getting pll_c: %d\n", rc);
4193 pll_c_rate = clk_get_rate(pll_c);
4196 if (!strcmp(soc_data->parent_clk_list[i], "pll_p")) {
4197 pll_p = clk_get_sys(NULL, "pll_p");
4198 if (IS_ERR(pll_p)) {
4199 rc = PTR_ERR(pll_p);
4200 dev_err(mmc_dev(host->mmc),
4201 "clk error in getting pll_p: %d\n", rc);
4203 pll_p_rate = clk_get_rate(pll_p);
4207 #ifdef CONFIG_MMC_EMBEDDED_SDIO
4208 if (plat->mmc_data.embedded_sdio)
4209 mmc_set_embedded_sdio_data(host->mmc,
4210 &plat->mmc_data.embedded_sdio->cis,
4211 &plat->mmc_data.embedded_sdio->cccr,
4212 plat->mmc_data.embedded_sdio->funcs,
4213 plat->mmc_data.embedded_sdio->num_funcs);
4216 if (gpio_is_valid(plat->power_gpio)) {
4217 rc = gpio_request(plat->power_gpio, "sdhci_power");
4219 dev_err(mmc_dev(host->mmc),
4220 "failed to allocate power gpio\n");
4223 gpio_direction_output(plat->power_gpio, 1);
4226 if (gpio_is_valid(plat->cd_gpio)) {
4227 rc = gpio_request(plat->cd_gpio, "sdhci_cd");
4229 dev_err(mmc_dev(host->mmc),
4230 "failed to allocate cd gpio\n");
4233 gpio_direction_input(plat->cd_gpio);
4235 tegra_host->card_present =
4236 (gpio_get_value_cansleep(plat->cd_gpio) == 0);
4238 } else if (plat->mmc_data.register_status_notify) {
4239 plat->mmc_data.register_status_notify(sdhci_status_notify_cb, host);
4242 if (plat->mmc_data.status) {
4243 plat->mmc_data.card_present = plat->mmc_data.status(mmc_dev(host->mmc));
4246 if (gpio_is_valid(plat->wp_gpio)) {
4247 rc = gpio_request(plat->wp_gpio, "sdhci_wp");
4249 dev_err(mmc_dev(host->mmc),
4250 "failed to allocate wp gpio\n");
4253 gpio_direction_input(plat->wp_gpio);
4257 * If there is no card detect gpio, assume that the
4258 * card is always present.
4260 if (!gpio_is_valid(plat->cd_gpio))
4261 tegra_host->card_present = 1;
4263 if (plat->mmc_data.ocr_mask & SDHOST_1V8_OCR_MASK) {
4264 tegra_host->vddio_min_uv = SDHOST_LOW_VOLT_MIN;
4265 tegra_host->vddio_max_uv = SDHOST_LOW_VOLT_MAX;
4266 } else if (plat->mmc_data.ocr_mask & MMC_OCR_2V8_MASK) {
4267 tegra_host->vddio_min_uv = SDHOST_HIGH_VOLT_2V8;
4268 tegra_host->vddio_max_uv = SDHOST_HIGH_VOLT_MAX;
4269 } else if (plat->mmc_data.ocr_mask & MMC_OCR_3V2_MASK) {
4270 tegra_host->vddio_min_uv = SDHOST_HIGH_VOLT_3V2;
4271 tegra_host->vddio_max_uv = SDHOST_HIGH_VOLT_MAX;
4272 } else if (plat->mmc_data.ocr_mask & MMC_OCR_3V3_MASK) {
4273 tegra_host->vddio_min_uv = SDHOST_HIGH_VOLT_3V3;
4274 tegra_host->vddio_max_uv = SDHOST_HIGH_VOLT_MAX;
4277 * Set the minV and maxV to default
4278 * voltage range of 2.7V - 3.6V
4280 tegra_host->vddio_min_uv = SDHOST_HIGH_VOLT_MIN;
4281 tegra_host->vddio_max_uv = SDHOST_HIGH_VOLT_MAX;
4284 tegra_host->vdd_io_reg = regulator_get(mmc_dev(host->mmc),
4286 if (IS_ERR_OR_NULL(tegra_host->vdd_io_reg)) {
4287 dev_info(mmc_dev(host->mmc), "%s regulator not found: %ld."
4288 "Assuming vddio_sdmmc is not required.\n",
4289 "vddio_sdmmc", PTR_ERR(tegra_host->vdd_io_reg));
4290 tegra_host->vdd_io_reg = NULL;
4292 rc = tegra_sdhci_configure_regulators(tegra_host,
4293 CONFIG_REG_SET_VOLT,
4294 tegra_host->vddio_min_uv,
4295 tegra_host->vddio_max_uv);
4297 dev_err(mmc_dev(host->mmc),
4298 "Init volt(%duV-%duV) setting failed %d\n",
4299 tegra_host->vddio_min_uv,
4300 tegra_host->vddio_max_uv, rc);
4301 regulator_put(tegra_host->vdd_io_reg);
4302 tegra_host->vdd_io_reg = NULL;
4306 tegra_host->vdd_slot_reg = regulator_get(mmc_dev(host->mmc),
4308 if (IS_ERR_OR_NULL(tegra_host->vdd_slot_reg)) {
4309 dev_info(mmc_dev(host->mmc), "%s regulator not found: %ld."
4310 " Assuming vddio_sd_slot is not required.\n",
4311 "vddio_sd_slot", PTR_ERR(tegra_host->vdd_slot_reg));
4312 tegra_host->vdd_slot_reg = NULL;
4315 if (tegra_host->card_present) {
4316 rc = tegra_sdhci_configure_regulators(tegra_host, CONFIG_REG_EN,
4319 dev_err(mmc_dev(host->mmc),
4320 "Enable regulators failed in probe %d\n", rc);
4325 tegra_pd_add_device(&pdev->dev);
4326 pm_runtime_enable(&pdev->dev);
4328 /* Get the ddr clock */
4329 tegra_host->ddr_clk = clk_get(mmc_dev(host->mmc), "ddr");
4330 if (IS_ERR(tegra_host->ddr_clk)) {
4331 dev_err(mmc_dev(host->mmc), "ddr clk err\n");
4332 tegra_host->ddr_clk = NULL;
4335 /* Get high speed clock */
4336 tegra_host->sdr_clk = clk_get(mmc_dev(host->mmc), NULL);
4337 if (IS_ERR(tegra_host->sdr_clk)) {
4338 dev_err(mmc_dev(host->mmc), "sdr clk err\n");
4339 tegra_host->sdr_clk = NULL;
4340 /* If both ddr and sdr clks are missing, then fail probe */
4341 if (!tegra_host->ddr_clk && !tegra_host->sdr_clk) {
4342 dev_err(mmc_dev(host->mmc),
4343 "Failed to get ddr and sdr clks\n");
4349 if (tegra_host->sdr_clk) {
4350 pltfm_host->clk = tegra_host->sdr_clk;
4351 tegra_host->is_ddr_clk_set = false;
4353 pltfm_host->clk = tegra_host->ddr_clk;
4354 tegra_host->is_ddr_clk_set = true;
4357 if (clk_get_parent(pltfm_host->clk) == pll_c)
4358 tegra_host->is_parent_pllc = true;
4360 pm_runtime_get_sync(&pdev->dev);
4361 rc = clk_prepare_enable(pltfm_host->clk);
4365 tegra_host->emc_clk = devm_clk_get(mmc_dev(host->mmc), "emc");
4366 if (IS_ERR_OR_NULL(tegra_host->emc_clk)) {
4367 dev_err(mmc_dev(host->mmc), "Can't get emc clk\n");
4368 tegra_host->emc_clk = NULL;
4370 clk_set_rate(tegra_host->emc_clk, SDMMC_EMC_MAX_FREQ);
4373 tegra_host->sclk = devm_clk_get(mmc_dev(host->mmc), "sclk");
4374 if (IS_ERR_OR_NULL(tegra_host->sclk)) {
4375 dev_err(mmc_dev(host->mmc), "Can't get sclk clock\n");
4376 tegra_host->sclk = NULL;
4378 clk_set_rate(tegra_host->sclk, SDMMC_AHB_MAX_FREQ);
4380 pltfm_host->priv = tegra_host;
4381 tegra_host->clk_enabled = true;
4382 host->is_clk_on = tegra_host->clk_enabled;
4383 mutex_init(&tegra_host->set_clock_mutex);
4385 tegra_host->max_clk_limit = plat->max_clk_limit;
4386 tegra_host->ddr_clk_limit = plat->ddr_clk_limit;
4387 tegra_host->instance = pdev->id;
4388 tegra_host->tap_cmd = TAP_CMD_TRIM_DEFAULT_VOLTAGE;
4389 tegra_host->speedo = plat->cpu_speedo;
4390 dev_info(mmc_dev(host->mmc), "Speedo value %d\n", tegra_host->speedo);
4392 /* update t2t and tap_hole for automotive speedo */
4393 if (tegra_is_soc_automotive_speedo() &&
4394 (soc_data == &soc_data_tegra12)) {
4395 soc_data_tegra12.t2t_coeffs = t12x_automotive_tuning_coeffs;
4396 soc_data_tegra12.t2t_coeffs_count =
4397 ARRAY_SIZE(t12x_automotive_tuning_coeffs);
4398 soc_data_tegra12.tap_hole_coeffs =
4399 t12x_automotive_tap_hole_coeffs;
4400 soc_data_tegra12.tap_hole_coeffs_count =
4401 ARRAY_SIZE(t12x_automotive_tap_hole_coeffs);
4402 /* For automotive SDR50 mode POR frequency is 99Mhz */
4403 soc_data_tegra12.tuning_freq_list[0] = 99000000;
4404 soc_data_tegra12.nvquirks |=
4405 NVQUIRK_SELECT_FIXED_TAP_HOLE_MARGINS;
4406 soc_data_tegra12.tap_hole_margins =
4407 t12x_automotive_tap_hole_margins;
4408 soc_data_tegra12.tap_hole_margins_count =
4409 ARRAY_SIZE(t12x_automotive_tap_hole_margins);
4411 host->mmc->pm_caps |= plat->pm_caps;
4412 host->mmc->pm_flags |= plat->pm_flags;
4414 host->mmc->caps |= MMC_CAP_ERASE;
4415 /* enable 1/8V DDR capable */
4416 host->mmc->caps |= MMC_CAP_1_8V_DDR;
4418 host->mmc->caps |= MMC_CAP_8_BIT_DATA;
4419 host->mmc->caps |= MMC_CAP_SDIO_IRQ;
4420 host->mmc->pm_caps |= MMC_PM_KEEP_POWER | MMC_PM_IGNORE_PM_NOTIFY;
4421 if (plat->mmc_data.built_in) {
4422 host->mmc->caps |= MMC_CAP_NONREMOVABLE;
4424 host->mmc->pm_flags |= MMC_PM_IGNORE_PM_NOTIFY;
4426 /* disable access to boot partitions */
4427 host->mmc->caps2 |= MMC_CAP2_BOOTPART_NOACC;
4429 #if !defined(CONFIG_ARCH_TEGRA_2x_SOC) && !defined(CONFIG_ARCH_TEGRA_3x_SOC)
4430 if (soc_data->nvquirks & NVQUIRK_ENABLE_HS200)
4431 host->mmc->caps2 |= MMC_CAP2_HS200;
4432 #ifdef CONFIG_TEGRA_FPGA_PLATFORM
4433 /* Enable HS200 mode */
4434 host->mmc->caps2 |= MMC_CAP2_HS200;
4436 host->mmc->caps2 |= MMC_CAP2_CACHE_CTRL;
4437 host->mmc->caps |= MMC_CAP_CMD23;
4438 host->mmc->caps2 |= MMC_CAP2_PACKED_CMD;
4443 * Enable dyamic frequency scaling support only if the platform clock
4444 * limit is higher than the lowest supported frequency by tuning.
4446 for (i = 0; i < TUNING_FREQ_COUNT; i++) {
4447 low_freq = soc_data->tuning_freq_list[i];
4451 if (plat->en_freq_scaling && (plat->max_clk_limit > low_freq))
4452 host->mmc->caps2 |= MMC_CAP2_FREQ_SCALING;
4454 if (!plat->disable_clock_gate)
4455 host->mmc->caps2 |= MMC_CAP2_CLOCK_GATING;
4457 if (plat->nominal_vcore_mv)
4458 tegra_host->nominal_vcore_mv = plat->nominal_vcore_mv;
4459 if (plat->min_vcore_override_mv)
4460 tegra_host->min_vcore_override_mv = plat->min_vcore_override_mv;
4461 if (plat->boot_vcore_mv)
4462 tegra_host->boot_vcore_mv = plat->boot_vcore_mv;
4463 dev_info(mmc_dev(host->mmc),
4464 "Tuning constraints: nom_mv %d, boot_mv %d, min_or_mv %d\n",
4465 tegra_host->nominal_vcore_mv, tegra_host->boot_vcore_mv,
4466 tegra_host->min_vcore_override_mv);
4469 * If nominal voltage is equal to boot voltage, there is no need for
4470 * nominal voltage tuning.
4472 if (plat->nominal_vcore_mv <= plat->boot_vcore_mv)
4473 plat->en_nominal_vcore_tuning = false;
4475 INIT_DELAYED_WORK(&host->delayed_clk_gate_wrk, delayed_clk_gate_cb);
4476 rc = sdhci_add_host(host);
4480 if (gpio_is_valid(plat->cd_gpio)) {
4481 rc = request_threaded_irq(gpio_to_irq(plat->cd_gpio), NULL,
4483 IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING | IRQF_ONESHOT,
4484 mmc_hostname(host->mmc), host);
4486 dev_err(mmc_dev(host->mmc), "request irq error\n");
4487 goto err_cd_irq_req;
4490 sdhci_tegra_error_stats_debugfs(host);
4491 device_create_file(&pdev->dev, &dev_attr_cmd_state);
4493 /* Enable async suspend/resume to reduce LP0 latency */
4494 device_enable_async_suspend(&pdev->dev);
4496 if (plat->power_off_rail) {
4497 tegra_host->reboot_notify.notifier_call =
4498 tegra_sdhci_reboot_notify;
4499 register_reboot_notifier(&tegra_host->reboot_notify);
4501 #ifdef CONFIG_DEBUG_FS
4502 tegra_host->dbg_cfg.tap_val =
4504 tegra_host->dbg_cfg.trim_val =
4505 plat->ddr_trim_delay;
4506 tegra_host->dbg_cfg.clk_ungated =
4507 plat->disable_clock_gate;
4512 if (gpio_is_valid(plat->cd_gpio))
4513 gpio_free(plat->cd_gpio);
4515 if (tegra_host->is_ddr_clk_set)
4516 clk_disable_unprepare(tegra_host->ddr_clk);
4518 clk_disable_unprepare(tegra_host->sdr_clk);
4519 pm_runtime_put_sync(&pdev->dev);
4521 if (tegra_host->ddr_clk)
4522 clk_put(tegra_host->ddr_clk);
4523 if (tegra_host->sdr_clk)
4524 clk_put(tegra_host->sdr_clk);
4526 if (gpio_is_valid(plat->wp_gpio))
4527 gpio_free(plat->wp_gpio);
4529 if (gpio_is_valid(plat->cd_gpio))
4530 free_irq(gpio_to_irq(plat->cd_gpio), host);
4532 if (gpio_is_valid(plat->power_gpio))
4533 gpio_free(plat->power_gpio);
4536 sdhci_pltfm_free(pdev);
4540 static int sdhci_tegra_remove(struct platform_device *pdev)
4542 struct sdhci_host *host = platform_get_drvdata(pdev);
4543 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4544 struct sdhci_tegra *tegra_host = pltfm_host->priv;
4545 const struct tegra_sdhci_platform_data *plat = tegra_host->plat;
4546 int dead = (readl(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff);
4549 sdhci_remove_host(host, dead);
4551 rc = tegra_sdhci_configure_regulators(tegra_host, CONFIG_REG_DIS, 0, 0);
4553 dev_err(mmc_dev(host->mmc),
4554 "Regulator disable in remove failed %d\n", rc);
4556 if (tegra_host->vdd_slot_reg)
4557 regulator_put(tegra_host->vdd_slot_reg);
4558 if (tegra_host->vdd_io_reg)
4559 regulator_put(tegra_host->vdd_io_reg);
4561 if (gpio_is_valid(plat->wp_gpio))
4562 gpio_free(plat->wp_gpio);
4564 if (gpio_is_valid(plat->cd_gpio)) {
4565 free_irq(gpio_to_irq(plat->cd_gpio), host);
4566 gpio_free(plat->cd_gpio);
4569 if (gpio_is_valid(plat->power_gpio))
4570 gpio_free(plat->power_gpio);
4572 if (tegra_host->clk_enabled) {
4573 if (tegra_host->is_ddr_clk_set)
4574 clk_disable_unprepare(tegra_host->ddr_clk);
4576 clk_disable_unprepare(tegra_host->sdr_clk);
4577 pm_runtime_put_sync(&pdev->dev);
4580 if (tegra_host->ddr_clk)
4581 clk_put(tegra_host->ddr_clk);
4582 if (tegra_host->sdr_clk)
4583 clk_put(tegra_host->sdr_clk);
4585 if (tegra_host->emc_clk && tegra_host->is_sdmmc_emc_clk_on)
4586 clk_disable_unprepare(tegra_host->emc_clk);
4587 if (tegra_host->sclk && tegra_host->is_sdmmc_sclk_on)
4588 clk_disable_unprepare(tegra_host->sclk);
4589 if (plat->power_off_rail)
4590 unregister_reboot_notifier(&tegra_host->reboot_notify);
4592 sdhci_pltfm_free(pdev);
4597 static struct platform_driver sdhci_tegra_driver = {
4599 .name = "sdhci-tegra",
4600 .owner = THIS_MODULE,
4601 .of_match_table = sdhci_tegra_dt_match,
4602 .pm = SDHCI_PLTFM_PMOPS,
4604 .probe = sdhci_tegra_probe,
4605 .remove = sdhci_tegra_remove,
4608 module_platform_driver(sdhci_tegra_driver);
4610 MODULE_DESCRIPTION("SDHCI driver for Tegra");
4611 MODULE_AUTHOR("Google, Inc.");
4612 MODULE_LICENSE("GPL v2");