2 * drivers/platform/tegra/tegra_cl_dvfs.c
4 * Copyright (c) 2012-2015 NVIDIA Corporation. All rights reserved.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 #include <linux/kernel.h>
20 #include <linux/spinlock.h>
21 #include <linux/delay.h>
22 #include <linux/err.h>
24 #include <linux/clk.h>
25 #include <linux/interrupt.h>
26 #include <linux/suspend.h>
27 #include <linux/debugfs.h>
28 #include <linux/seq_file.h>
29 #include <linux/uaccess.h>
30 #include <linux/module.h>
31 #include <linux/platform_device.h>
32 #include <linux/of_platform.h>
33 #include <linux/gpio.h>
34 #include <linux/of_gpio.h>
35 #include <linux/regulator/consumer.h>
36 #include <linux/regulator/tegra-dfll-bypass-regulator.h>
37 #include <linux/tegra-soc.h>
38 #include <linux/pinctrl/pinconf-tegra.h>
40 #include <mach/irqs.h>
42 #include <linux/platform/tegra/tegra_cl_dvfs.h>
43 #include <linux/platform/tegra/clock.h>
44 #include <linux/platform/tegra/dvfs.h>
46 #include "tegra_simon.h"
50 #define CL_DVFS_CTRL 0x00
51 #define CL_DVFS_CONFIG 0x04
52 #define CL_DVFS_CONFIG_DIV_MASK 0xff
54 #define CL_DVFS_PARAMS 0x08
55 #define CL_DVFS_PARAMS_CG_SCALE (0x1 << 24)
56 #define CL_DVFS_PARAMS_FORCE_MODE_SHIFT 22
57 #define CL_DVFS_PARAMS_FORCE_MODE_MASK (0x3 << CL_DVFS_PARAMS_FORCE_MODE_SHIFT)
58 #define CL_DVFS_PARAMS_CF_PARAM_SHIFT 16
59 #define CL_DVFS_PARAMS_CF_PARAM_MASK (0x3f << CL_DVFS_PARAMS_CF_PARAM_SHIFT)
60 #define CL_DVFS_PARAMS_CI_PARAM_SHIFT 8
61 #define CL_DVFS_PARAMS_CI_PARAM_MASK (0x7 << CL_DVFS_PARAMS_CI_PARAM_SHIFT)
62 #define CL_DVFS_PARAMS_CG_PARAM_SHIFT 0
63 #define CL_DVFS_PARAMS_CG_PARAM_MASK (0xff << CL_DVFS_PARAMS_CG_PARAM_SHIFT)
65 #define CL_DVFS_TUNE0 0x0c
66 #define CL_DVFS_TUNE1 0x10
68 #define CL_DVFS_FREQ_REQ 0x14
69 #define CL_DVFS_FREQ_REQ_FORCE_ENABLE (0x1 << 28)
70 #define CL_DVFS_FREQ_REQ_FORCE_SHIFT 16
71 #define CL_DVFS_FREQ_REQ_FORCE_MASK (0xfff << CL_DVFS_FREQ_REQ_FORCE_SHIFT)
72 #define FORCE_MAX 2047
73 #define FORCE_MIN -2048
74 #define CL_DVFS_FREQ_REQ_SCALE_SHIFT 8
75 #define CL_DVFS_FREQ_REQ_SCALE_MASK (0xff << CL_DVFS_FREQ_REQ_SCALE_SHIFT)
77 #define CL_DVFS_FREQ_REQ_FREQ_VALID (0x1 << 7)
78 #define CL_DVFS_FREQ_REQ_FREQ_SHIFT 0
79 #define CL_DVFS_FREQ_REQ_FREQ_MASK (0x7f << CL_DVFS_FREQ_REQ_FREQ_SHIFT)
82 #define CL_DVFS_SCALE_RAMP 0x18
84 #define CL_DVFS_DROOP_CTRL 0x1c
85 #define CL_DVFS_DROOP_CTRL_MIN_FREQ_SHIFT 16
86 #define CL_DVFS_DROOP_CTRL_MIN_FREQ_MASK \
87 (0xff << CL_DVFS_DROOP_CTRL_MIN_FREQ_SHIFT)
88 #define CL_DVFS_DROOP_CTRL_CUT_SHIFT 8
89 #define CL_DVFS_DROOP_CTRL_CUT_MASK (0xf << CL_DVFS_DROOP_CTRL_CUT_SHIFT)
90 #define CL_DVFS_DROOP_CTRL_RAMP_SHIFT 0
91 #define CL_DVFS_DROOP_CTRL_RAMP_MASK (0xff << CL_DVFS_DROOP_CTRL_RAMP_SHIFT)
93 #define CL_DVFS_OUTPUT_CFG 0x20
94 #define CL_DVFS_OUTPUT_CFG_I2C_ENABLE (0x1 << 30)
95 #define CL_DVFS_OUTPUT_CFG_SAFE_SHIFT 24
96 #define CL_DVFS_OUTPUT_CFG_SAFE_MASK \
97 (OUT_MASK << CL_DVFS_OUTPUT_CFG_SAFE_SHIFT)
98 #define CL_DVFS_OUTPUT_CFG_MAX_SHIFT 16
99 #define CL_DVFS_OUTPUT_CFG_MAX_MASK \
100 (OUT_MASK << CL_DVFS_OUTPUT_CFG_MAX_SHIFT)
101 #define CL_DVFS_OUTPUT_CFG_MIN_SHIFT 8
102 #define CL_DVFS_OUTPUT_CFG_MIN_MASK \
103 (OUT_MASK << CL_DVFS_OUTPUT_CFG_MIN_SHIFT)
104 #define CL_DVFS_OUTPUT_CFG_PWM_DELTA (0x1 << 7)
105 #define CL_DVFS_OUTPUT_CFG_PWM_ENABLE (0x1 << 6)
106 #define CL_DVFS_OUTPUT_CFG_PWM_DIV_SHIFT 0
107 #define CL_DVFS_OUTPUT_CFG_PWM_DIV_MASK \
108 (OUT_MASK << CL_DVFS_OUTPUT_CFG_PWM_DIV_SHIFT)
110 #define CL_DVFS_OUTPUT_FORCE 0x24
111 #define CL_DVFS_OUTPUT_FORCE_ENABLE (0x1 << 6)
112 #define CL_DVFS_OUTPUT_FORCE_VALUE_SHIFT 0
113 #define CL_DVFS_OUTPUT_FORCE_VALUE_MASK \
114 (OUT_MASK << CL_DVFS_OUTPUT_FORCE_VALUE_SHIFT)
116 #define CL_DVFS_MONITOR_CTRL 0x28
117 #define CL_DVFS_MONITOR_CTRL_DISABLE 0
118 #define CL_DVFS_MONITOR_CTRL_OUT 5
119 #define CL_DVFS_MONITOR_CTRL_FREQ 6
120 #define CL_DVFS_MONITOR_DATA 0x2c
121 #define CL_DVFS_MONITOR_DATA_NEW (0x1 << 16)
122 #define CL_DVFS_MONITOR_DATA_MASK 0xFFFF
124 #define CL_DVFS_I2C_CFG 0x40
125 #define CL_DVFS_I2C_CFG_ARB_ENABLE (0x1 << 20)
126 #define CL_DVFS_I2C_CFG_HS_CODE_SHIFT 16
127 #define CL_DVFS_I2C_CFG_HS_CODE_MASK (0x7 << CL_DVFS_I2C_CFG_HS_CODE_SHIFT)
128 #define CL_DVFS_I2C_CFG_PACKET_ENABLE (0x1 << 15)
129 #define CL_DVFS_I2C_CFG_SIZE_SHIFT 12
130 #define CL_DVFS_I2C_CFG_SIZE_MASK (0x7 << CL_DVFS_I2C_CFG_SIZE_SHIFT)
131 #define CL_DVFS_I2C_CFG_SLAVE_ADDR_10 (0x1 << 10)
132 #define CL_DVFS_I2C_CFG_SLAVE_ADDR_SHIFT 0
133 #define CL_DVFS_I2C_CFG_SLAVE_ADDR_MASK \
134 (0x3ff << CL_DVFS_I2C_CFG_SLAVE_ADDR_SHIFT)
136 #define CL_DVFS_I2C_VDD_REG_ADDR 0x44
137 #define CL_DVFS_I2C_STS 0x48
138 #define CL_DVFS_I2C_STS_I2C_LAST_SHIFT 1
139 #define CL_DVFS_I2C_STS_I2C_REQ_PENDING 0x1
141 #define CL_DVFS_INTR_STS 0x5c
142 #define CL_DVFS_INTR_EN 0x60
143 #define CL_DVFS_INTR_MIN_MASK 0x1
144 #define CL_DVFS_INTR_MAX_MASK 0x2
146 #define CL_DVFS_CC4_HVC 0x74
147 #define CL_DVFS_CC4_HVC_CTRL_SHIFT 0
148 #define CL_DVFS_CC4_HVC_CTRL_MASK (0x3 << CL_DVFS_CC4_HVC_CTRL_SHIFT)
149 #define CL_DVFS_CC4_HVC_FORCE_VAL_SHIFT 2
150 #define CL_DVFS_CC4_HVC_FORCE_VAL_MASK \
151 (OUT_MASK << CL_DVFS_CC4_HVC_FORCE_VAL_SHIFT)
152 #define CL_DVFS_CC4_HVC_FORCE_EN (0x1 << 8)
154 #define CL_DVFS_I2C_CNTRL 0x100
155 #define CL_DVFS_I2C_CLK_DIVISOR 0x16c
156 #define CL_DVFS_I2C_CLK_DIVISOR_MASK 0xffff
157 #define CL_DVFS_I2C_CLK_DIVISOR_FS_SHIFT 16
158 #define CL_DVFS_I2C_CLK_DIVISOR_HS_SHIFT 0
160 #define CL_DVFS_OUTPUT_LUT 0x200
162 #define CL_DVFS_APERTURE 0x400
164 #define IS_I2C_OFFS(offs) \
165 ((((offs) >= CL_DVFS_I2C_CFG) && ((offs) <= CL_DVFS_INTR_EN)) || \
166 ((offs) >= CL_DVFS_I2C_CNTRL))
168 #define CL_DVFS_CALIBR_TIME 40000
169 #define CL_DVFS_OUTPUT_PENDING_TIMEOUT 1000
170 #define CL_DVFS_OUTPUT_RAMP_DELAY 100
171 #define CL_DVFS_TUNE_HIGH_DELAY 2000
173 #define CL_DVFS_TUNE_HIGH_MARGIN_MV 20
174 #define CL_DVFS_CAP_GUARD_BAND_STEPS 2
176 enum tegra_cl_dvfs_ctrl_mode {
177 TEGRA_CL_DVFS_UNINITIALIZED = 0,
178 TEGRA_CL_DVFS_DISABLED = 1,
179 TEGRA_CL_DVFS_OPEN_LOOP = 2,
180 TEGRA_CL_DVFS_CLOSED_LOOP = 3,
184 * enum tegra_cl_dvfs_tune_state - state of the voltage-regime switching code
185 * @TEGRA_CL_DVFS_TUNE_LOW: DFLL is in the low-voltage range (or open-loop mode)
186 * @TEGRA_CL_DVFS_TUNE_HIGH_REQUEST: waiting for DFLL I2C output to reach high
187 * @TEGRA_CL_DVFS_TUNE_HIGH_REQUEST_2: waiting for PMIC to react to DFLL output
188 * @TEGRA_CL_DVFS_TUNE_HIGH: DFLL in the high-voltage range
190 * These are software states, not hardware states.
192 enum tegra_cl_dvfs_tune_state {
193 TEGRA_CL_DVFS_TUNE_LOW = 0,
194 TEGRA_CL_DVFS_TUNE_HIGH_REQUEST,
195 TEGRA_CL_DVFS_TUNE_HIGH_REQUEST_2,
196 TEGRA_CL_DVFS_TUNE_HIGH,
199 struct dfll_rate_req {
207 struct voltage_limits {
210 seqcount_t vmin_seqcnt;
211 seqcount_t vmax_seqcnt;
215 struct tegra_cl_dvfs {
218 struct tegra_cl_dvfs_platform_data *p_data;
220 struct dvfs *safe_dvfs;
221 struct thermal_cooling_device *vmax_cdev;
222 struct thermal_cooling_device *vmin_cdev;
223 struct work_struct init_cdev_work;
228 struct clk *dfll_clk;
229 unsigned long ref_rate;
230 unsigned long i2c_rate;
232 /* output voltage mapping:
233 * legacy dvfs table index -to- cl_dvfs output LUT index
234 * cl_dvfs output LUT index -to- PMU value/voltage pair ptr
236 u8 clk_dvfs_map[MAX_DVFS_FREQS];
237 struct voltage_reg_map *out_map[MAX_CL_DVFS_VOLTAGES];
240 u8 rail_relations_out_min;
245 u8 tune_high_out_start;
246 u8 tune_high_out_min;
247 unsigned long tune_high_dvco_rate_min;
248 unsigned long tune_high_target_rate_min;
251 u8 thermal_out_caps[MAX_THERMAL_LIMITS];
252 u8 thermal_out_floors[MAX_THERMAL_LIMITS+1];
253 int thermal_mv_floors[MAX_THERMAL_LIMITS];
255 int therm_floors_num;
256 unsigned long dvco_rate_floors[MAX_THERMAL_LIMITS+1];
257 unsigned long dvco_rate_min;
259 struct voltage_limits v_limits;
263 u32 suspended_force_out;
266 struct dfll_rate_req last_req;
267 enum tegra_cl_dvfs_tune_state tune_state;
268 enum tegra_cl_dvfs_ctrl_mode mode;
270 struct hrtimer tune_timer;
275 struct timer_list calibration_timer;
276 unsigned long calibration_delay;
277 ktime_t last_calibration;
278 unsigned long calibration_range_min;
279 unsigned long calibration_range_max;
281 struct notifier_block simon_grade_nb;
284 struct tegra_cl_dvfs_soc_match_data {
288 /* Conversion macros (different scales for frequency request, and monitored
289 rate is not a typo) */
290 #define RATE_STEP(cld) ((cld)->ref_rate / 2)
291 #define GET_REQUEST_FREQ(rate, ref_rate) ((rate) / ((ref_rate) / 2))
292 #define GET_REQUEST_RATE(freq, ref_rate) ((freq) * ((ref_rate) / 2))
293 #define GET_MONITORED_RATE(freq, ref_rate) ((freq) * ((ref_rate) / 4))
294 #define GET_DROOP_FREQ(rate, ref_rate) ((rate) / ((ref_rate) / 4))
295 #define ROUND_MIN_RATE(rate, ref_rate) \
296 (DIV_ROUND_UP(rate, (ref_rate) / 2) * ((ref_rate) / 2))
297 #define GET_DIV(ref_rate, out_rate, scale) \
298 DIV_ROUND_UP((ref_rate), (out_rate) * (scale))
299 #define GET_SAMPLE_PERIOD(cld) \
300 DIV_ROUND_UP(1000000, (cld)->p_data->cfg_param->sample_rate)
302 static const char *mode_name[] = {
303 [TEGRA_CL_DVFS_UNINITIALIZED] = "uninitialized",
304 [TEGRA_CL_DVFS_DISABLED] = "disabled",
305 [TEGRA_CL_DVFS_OPEN_LOOP] = "open_loop",
306 [TEGRA_CL_DVFS_CLOSED_LOOP] = "closed_loop",
310 * In some h/w configurations CL-DVFS module registers have two different
311 * address bases: one for I2C control/status registers, and one for all other
312 * registers. Registers accessors are separated below accordingly just by
313 * comparing register offset with start of I2C section - CL_DVFS_I2C_CFG. One
314 * special case is CL_DVFS_OUTPUT_CFG register: when I2C controls are separated
315 * I2C_ENABLE bit of this register is accessed from I2C base, and all other bits
316 * are accessed from the main base.
318 static inline u32 cl_dvfs_i2c_readl(struct tegra_cl_dvfs *cld, u32 offs)
320 return __raw_readl(cld->cl_i2c_base + offs);
322 static inline void cl_dvfs_i2c_writel(struct tegra_cl_dvfs *cld,
325 __raw_writel(val, cld->cl_i2c_base + offs);
327 static inline void cl_dvfs_i2c_wmb(struct tegra_cl_dvfs *cld)
329 cl_dvfs_i2c_readl(cld, CL_DVFS_I2C_CFG);
333 static inline u32 cl_dvfs_readl(struct tegra_cl_dvfs *cld, u32 offs)
335 if (IS_I2C_OFFS(offs))
336 return cl_dvfs_i2c_readl(cld, offs);
337 return __raw_readl((void *)cld->cl_base + offs);
339 static inline void cl_dvfs_writel(struct tegra_cl_dvfs *cld, u32 val, u32 offs)
341 if (IS_I2C_OFFS(offs)) {
342 cl_dvfs_i2c_writel(cld, val, offs);
345 __raw_writel(val, (void *)cld->cl_base + offs);
347 static inline void cl_dvfs_wmb(struct tegra_cl_dvfs *cld)
349 cl_dvfs_readl(cld, CL_DVFS_CTRL);
353 static inline void switch_monitor(struct tegra_cl_dvfs *cld, u32 selector)
355 /* delay to make sure selector has switched */
356 cl_dvfs_writel(cld, selector, CL_DVFS_MONITOR_CTRL);
361 static inline void invalidate_request(struct tegra_cl_dvfs *cld)
363 u32 val = cl_dvfs_readl(cld, CL_DVFS_FREQ_REQ);
364 val &= ~CL_DVFS_FREQ_REQ_FREQ_VALID;
365 cl_dvfs_writel(cld, val, CL_DVFS_FREQ_REQ);
369 static inline void set_request_scale(struct tegra_cl_dvfs *cld, u8 scale)
371 u32 val = cl_dvfs_readl(cld, CL_DVFS_FREQ_REQ);
372 val &= ~CL_DVFS_FREQ_REQ_SCALE_MASK;
373 val |= scale << CL_DVFS_FREQ_REQ_SCALE_SHIFT;
374 cl_dvfs_writel(cld, val, CL_DVFS_FREQ_REQ);
378 static inline u32 output_force_set_val(struct tegra_cl_dvfs *cld, u8 out_val)
380 u32 val = cl_dvfs_readl(cld, CL_DVFS_OUTPUT_FORCE);
381 val = (val & CL_DVFS_OUTPUT_FORCE_ENABLE) | (out_val & OUT_MASK);
382 cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_FORCE);
383 return cl_dvfs_readl(cld, CL_DVFS_OUTPUT_FORCE);
386 static inline void output_force_enable(struct tegra_cl_dvfs *cld, u32 val)
388 val |= CL_DVFS_OUTPUT_FORCE_ENABLE;
389 cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_FORCE);
393 static inline void output_force_disable(struct tegra_cl_dvfs *cld)
395 u32 val = cl_dvfs_readl(cld, CL_DVFS_OUTPUT_FORCE);
396 val &= ~CL_DVFS_OUTPUT_FORCE_ENABLE;
397 cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_FORCE);
402 * Reading monitor data concurrently with the update may render intermediate
403 * (neither "old" nor "new") values. Synchronization with the "rising edge"
404 * of DATA_NEW makes it very unlikely, but still possible. Use simple filter:
405 * compare 2 consecutive readings for data consistency within 2 LSb range.
406 * Return error otherwise. On the platform that does not allow to use DATA_NEW
407 * at all check for consistency of consecutive reads is the only protection.
409 static int filter_monitor_data(struct tegra_cl_dvfs *cld, u32 *data)
411 u32 val = cl_dvfs_readl(cld, CL_DVFS_MONITOR_DATA) &
412 CL_DVFS_MONITOR_DATA_MASK;
413 *data &= CL_DVFS_MONITOR_DATA_MASK;
414 if (abs(*data - val) <= 2)
417 *data = cl_dvfs_readl(cld, CL_DVFS_MONITOR_DATA) &
418 CL_DVFS_MONITOR_DATA_MASK;
419 if (abs(*data - val) <= 2)
425 static inline void wait_data_new(struct tegra_cl_dvfs *cld, u32 *data)
427 cl_dvfs_readl(cld, CL_DVFS_MONITOR_DATA); /* clear data new */
428 if (!(cld->p_data->flags & TEGRA_CL_DVFS_DATA_NEW_NO_USE)) {
430 *data = cl_dvfs_readl(cld, CL_DVFS_MONITOR_DATA);
431 } while (!(*data & CL_DVFS_MONITOR_DATA_NEW) &&
432 (cld->mode > TEGRA_CL_DVFS_DISABLED));
436 static inline u32 get_last_output(struct tegra_cl_dvfs *cld)
438 switch_monitor(cld, CL_DVFS_MONITOR_CTRL_OUT);
439 return cl_dvfs_readl(cld, CL_DVFS_MONITOR_DATA) &
440 CL_DVFS_MONITOR_DATA_MASK;
443 /* out monitored before forced value applied - return the latter if enabled */
444 static inline u32 cl_dvfs_get_output(struct tegra_cl_dvfs *cld)
446 u32 val = cl_dvfs_readl(cld, CL_DVFS_OUTPUT_FORCE);
447 if (val & CL_DVFS_OUTPUT_FORCE_ENABLE)
448 return val & OUT_MASK;
450 switch_monitor(cld, CL_DVFS_MONITOR_CTRL_OUT);
451 wait_data_new(cld, &val);
452 return filter_monitor_data(cld, &val) ? : val;
455 static inline bool is_i2c(struct tegra_cl_dvfs *cld)
457 return cld->p_data->pmu_if == TEGRA_CL_DVFS_PMU_I2C;
460 static inline u8 get_output_bottom(struct tegra_cl_dvfs *cld)
462 return is_i2c(cld) ? 0 : cld->out_map[0]->reg_value;
465 static inline u8 get_output_top(struct tegra_cl_dvfs *cld)
467 return is_i2c(cld) ? cld->num_voltages - 1 :
468 cld->out_map[cld->num_voltages - 1]->reg_value;
471 static inline int get_mv(struct tegra_cl_dvfs *cld, u32 out_val)
473 return is_i2c(cld) ? cld->out_map[out_val]->reg_uV / 1000 :
474 cld->p_data->vdd_map[out_val].reg_uV / 1000;
477 static inline bool is_vmin_delivered(struct tegra_cl_dvfs *cld)
480 u32 val = cl_dvfs_readl(cld, CL_DVFS_I2C_STS);
481 val = (val >> CL_DVFS_I2C_STS_I2C_LAST_SHIFT) & OUT_MASK;
482 return val >= cld->lut_min;
484 /* PWM cannot be stalled */
488 static int tegra_pinctrl_set_tristate(struct tegra_cl_dvfs_platform_data *d,
489 int group_sel, int tristate)
492 unsigned long config = TEGRA_PINCONF_PACK(TEGRA_PINCONF_PARAM_TRISTATE,
494 if (!d->u.pmu_pwm.pinctrl_dev) {
495 pr_err("%s(): ERROR: No Tegra pincontrol driver\n", __func__);
499 ret = pinctrl_set_config_for_group_sel_any_context(
500 d->u.pmu_pwm.pinctrl_dev, group_sel, config);
502 pr_err("%s(): ERROR: pinconfig for pin group %d failed: %d\n",
503 __func__, group_sel, ret);
507 static int output_enable(struct tegra_cl_dvfs *cld)
510 u32 val = cl_dvfs_i2c_readl(cld, CL_DVFS_OUTPUT_CFG);
511 val |= CL_DVFS_OUTPUT_CFG_I2C_ENABLE;
512 cl_dvfs_i2c_writel(cld, val, CL_DVFS_OUTPUT_CFG);
513 cl_dvfs_i2c_wmb(cld);
515 u32 val = cl_dvfs_readl(cld, CL_DVFS_OUTPUT_CFG);
516 struct tegra_cl_dvfs_platform_data *d = cld->p_data;
517 if (d->u.pmu_pwm.pwm_bus == TEGRA_CL_DVFS_PWM_1WIRE_BUFFER) {
518 int gpio = d->u.pmu_pwm.out_gpio;
519 int v = d->u.pmu_pwm.out_enable_high ? 1 : 0;
520 __gpio_set_value(gpio, v);
524 if (d->u.pmu_pwm.pwm_bus == TEGRA_CL_DVFS_PWM_1WIRE_DIRECT) {
525 int pg = d->u.pmu_pwm.pwm_pingroup;
526 tegra_pinctrl_set_tristate(d, pg, TEGRA_PIN_DISABLE);
530 val |= CL_DVFS_OUTPUT_CFG_PWM_ENABLE;
531 cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_CFG);
538 static int output_disable_pwm(struct tegra_cl_dvfs *cld)
541 struct tegra_cl_dvfs_platform_data *d = cld->p_data;
543 if (d->u.pmu_pwm.pwm_bus == TEGRA_CL_DVFS_PWM_1WIRE_BUFFER) {
544 int gpio = d->u.pmu_pwm.out_gpio;
545 int v = d->u.pmu_pwm.out_enable_high ? 0 : 1;
546 __gpio_set_value(gpio, v);
550 if (d->u.pmu_pwm.pwm_bus == TEGRA_CL_DVFS_PWM_1WIRE_DIRECT) {
551 int pg = d->u.pmu_pwm.pwm_pingroup;
552 tegra_pinctrl_set_tristate(d, pg, TEGRA_PIN_ENABLE);
556 val = cl_dvfs_readl(cld, CL_DVFS_OUTPUT_CFG);
557 val &= ~CL_DVFS_OUTPUT_CFG_PWM_ENABLE;
558 cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_CFG);
563 static noinline int output_flush_disable(struct tegra_cl_dvfs *cld)
567 u32 val = cl_dvfs_i2c_readl(cld, CL_DVFS_OUTPUT_CFG);
569 /* Flush transactions in flight, and then disable */
570 for (i = 0; i < CL_DVFS_OUTPUT_PENDING_TIMEOUT / 2; i++) {
571 sts = cl_dvfs_readl(cld, CL_DVFS_I2C_STS);
573 if (!(sts & CL_DVFS_I2C_STS_I2C_REQ_PENDING)) {
574 sts = cl_dvfs_readl(cld, CL_DVFS_I2C_STS);
575 if (!(sts & CL_DVFS_I2C_STS_I2C_REQ_PENDING)) {
576 val &= ~CL_DVFS_OUTPUT_CFG_I2C_ENABLE;
577 cl_dvfs_i2c_writel(cld, val, CL_DVFS_OUTPUT_CFG);
579 sts = cl_dvfs_readl(cld, CL_DVFS_I2C_STS);
580 if (!(sts & CL_DVFS_I2C_STS_I2C_REQ_PENDING))
581 return 0; /* no pending rqst */
583 /* Re-enable, continue wait */
584 val |= CL_DVFS_OUTPUT_CFG_I2C_ENABLE;
585 cl_dvfs_i2c_writel(cld, val, CL_DVFS_OUTPUT_CFG);
591 /* I2C request is still pending - disable, anyway, but report error */
592 val &= ~CL_DVFS_OUTPUT_CFG_I2C_ENABLE;
593 cl_dvfs_i2c_writel(cld, val, CL_DVFS_OUTPUT_CFG);
594 cl_dvfs_i2c_wmb(cld);
598 static noinline int output_disable_flush(struct tegra_cl_dvfs *cld)
602 u32 val = cl_dvfs_i2c_readl(cld, CL_DVFS_OUTPUT_CFG);
604 /* Disable output interface right away */
605 val &= ~CL_DVFS_OUTPUT_CFG_I2C_ENABLE;
606 cl_dvfs_i2c_writel(cld, val, CL_DVFS_OUTPUT_CFG);
607 cl_dvfs_i2c_wmb(cld);
609 /* Flush possible transaction in flight */
610 for (i = 0; i < CL_DVFS_OUTPUT_PENDING_TIMEOUT / 2; i++) {
611 sts = cl_dvfs_readl(cld, CL_DVFS_I2C_STS);
613 if (!(sts & CL_DVFS_I2C_STS_I2C_REQ_PENDING)) {
614 sts = cl_dvfs_readl(cld, CL_DVFS_I2C_STS);
615 if (!(sts & CL_DVFS_I2C_STS_I2C_REQ_PENDING))
620 /* I2C request is still pending - report error */
624 static inline int output_disable_ol_prepare(struct tegra_cl_dvfs *cld)
626 /* PWM output control */
629 * Keep PWM running in open loop mode. External idle controller
630 * would take care of switching PWM output off/on if override
633 if (cld->p_data->flags & TEGRA_CL_DVFS_HAS_IDLE_OVERRIDE)
635 return output_disable_pwm(cld);
639 * If cl-dvfs h/w does not require output to be quiet before disable,
640 * s/w can stop I2C communications at any time (including operations
641 * in closed loop mode), and I2C bus integrity is guaranteed even in
642 * case of flush timeout.
644 if (!(cld->p_data->flags & TEGRA_CL_DVFS_FLAGS_I2C_WAIT_QUIET)) {
645 int ret = output_disable_flush(cld);
647 pr_debug("cl_dvfs: I2C pending timeout ol_prepare\n");
653 static inline int output_disable_post_ol(struct tegra_cl_dvfs *cld)
655 /* PWM output control */
660 * If cl-dvfs h/w requires output to be quiet before disable, s/w
661 * should stop I2C communications only after the switch to open loop
662 * mode, and I2C bus integrity is not guaranteed in case of flush
665 if (cld->p_data->flags & TEGRA_CL_DVFS_FLAGS_I2C_WAIT_QUIET) {
666 int ret = output_flush_disable(cld);
668 pr_err("cl_dvfs: I2C pending timeout post_ol\n");
674 static inline void set_mode(struct tegra_cl_dvfs *cld,
675 enum tegra_cl_dvfs_ctrl_mode mode)
678 cl_dvfs_writel(cld, mode - 1, CL_DVFS_CTRL);
680 if (cld->p_data->flags & TEGRA_CL_DVFS_HAS_IDLE_OVERRIDE) {
681 /* Override mode follows active mode up to open loop */
682 u32 val = cl_dvfs_readl(cld, CL_DVFS_CC4_HVC);
683 val &= ~(CL_DVFS_CC4_HVC_CTRL_MASK | CL_DVFS_CC4_HVC_FORCE_EN);
684 if (mode >= TEGRA_CL_DVFS_OPEN_LOOP) {
685 val |= (TEGRA_CL_DVFS_OPEN_LOOP - 1);
686 val |= CL_DVFS_CC4_HVC_FORCE_EN;
688 cl_dvfs_writel(cld, val, CL_DVFS_CC4_HVC);
693 static inline u8 get_output_cap(struct tegra_cl_dvfs *cld,
694 struct dfll_rate_req *req)
696 u32 thermal_cap = get_output_top(cld);
698 if (cld->therm_cap_idx && (cld->therm_cap_idx <= cld->therm_caps_num))
699 thermal_cap = cld->thermal_out_caps[cld->therm_cap_idx - 1];
700 if (req && (req->cap < thermal_cap))
705 static inline u8 get_output_min(struct tegra_cl_dvfs *cld)
707 u32 tune_min = get_output_bottom(cld);
708 u32 thermal_min = tune_min;
710 tune_min = cld->tune_state == TEGRA_CL_DVFS_TUNE_LOW ?
711 tune_min : cld->tune_high_out_min;
713 if (cld->therm_floor_idx < cld->therm_floors_num)
714 thermal_min = cld->thermal_out_floors[cld->therm_floor_idx];
716 /* return max of all the possible output min settings */
717 return max_t(u8, max(tune_min, thermal_min),
718 cld->rail_relations_out_min);
721 static inline void _load_lut(struct tegra_cl_dvfs *cld)
726 val = cld->out_map[cld->lut_min]->reg_value;
727 for (i = 0; i <= cld->lut_min; i++)
728 cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_LUT + i * 4);
730 for (; i < cld->lut_max; i++) {
731 val = cld->out_map[i]->reg_value;
732 cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_LUT + i * 4);
735 val = cld->out_map[cld->lut_max]->reg_value;
736 for (; i < cld->num_voltages; i++)
737 cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_LUT + i * 4);
739 cl_dvfs_i2c_wmb(cld);
742 static void cl_dvfs_load_lut(struct tegra_cl_dvfs *cld)
744 u32 val = cl_dvfs_i2c_readl(cld, CL_DVFS_OUTPUT_CFG);
745 bool disable_out_for_load =
746 !(cld->p_data->flags & TEGRA_CL_DVFS_FLAGS_I2C_WAIT_QUIET) &&
747 (val & CL_DVFS_OUTPUT_CFG_I2C_ENABLE);
749 if (disable_out_for_load) {
750 val &= ~CL_DVFS_OUTPUT_CFG_I2C_ENABLE;
751 cl_dvfs_i2c_writel(cld, val, CL_DVFS_OUTPUT_CFG);
752 cl_dvfs_i2c_wmb(cld);
753 udelay(2); /* 2us (big margin) window for disable propafation */
758 if (disable_out_for_load) {
759 val |= CL_DVFS_OUTPUT_CFG_I2C_ENABLE;
760 cl_dvfs_i2c_writel(cld, val, CL_DVFS_OUTPUT_CFG);
761 cl_dvfs_i2c_wmb(cld);
765 #define set_tune_state(cld, state) \
767 cld->tune_state = state; \
768 pr_debug("%s: set tune state %d\n", __func__, state); \
771 static inline void tune_low(struct tegra_cl_dvfs *cld)
773 /* a must order: 1st tune dfll low, then tune trimmers low */
774 cl_dvfs_writel(cld, cld->tune0_low, CL_DVFS_TUNE0);
776 if (cld->safe_dvfs->dfll_data.tune_trimmers)
777 cld->safe_dvfs->dfll_data.tune_trimmers(false);
780 static inline void tune_high(struct tegra_cl_dvfs *cld)
782 /* a must order: 1st tune trimmers high, then tune dfll high */
783 if (cld->safe_dvfs->dfll_data.tune_trimmers)
784 cld->safe_dvfs->dfll_data.tune_trimmers(true);
785 cl_dvfs_writel(cld, cld->tune0_high, CL_DVFS_TUNE0);
789 static inline int cl_tune_target(struct tegra_cl_dvfs *cld, unsigned long rate)
791 bool tune_low_at_cold = cld->safe_dvfs->dfll_data.tune0_low_at_cold;
793 if ((rate >= cld->tune_high_target_rate_min) &&
794 (!tune_low_at_cold || cld->therm_floor_idx))
795 return TEGRA_CL_DVFS_TUNE_HIGH;
796 return TEGRA_CL_DVFS_TUNE_LOW;
799 static void set_output_limits(struct tegra_cl_dvfs *cld, u8 out_min, u8 out_max)
801 seqcount_t *vmin_seqcnt = NULL;
802 seqcount_t *vmax_seqcnt = NULL;
804 if (cld->v_limits.clamped)
807 if ((cld->lut_min != out_min) || (cld->lut_max != out_max)) {
808 /* limits update tracking start */
809 if (cld->lut_min != out_min) {
810 vmin_seqcnt = &cld->v_limits.vmin_seqcnt;
811 write_seqcount_begin(vmin_seqcnt);
812 cld->v_limits.vmin = get_mv(cld, out_min);
814 if (cld->lut_max != out_max) {
815 vmax_seqcnt = &cld->v_limits.vmax_seqcnt;
816 write_seqcount_begin(vmax_seqcnt);
817 cld->v_limits.vmax = get_mv(cld, out_max);
820 cld->lut_min = out_min;
821 cld->lut_max = out_max;
822 if (cld->p_data->flags & TEGRA_CL_DVFS_DYN_OUTPUT_CFG) {
823 u32 val = cl_dvfs_readl(cld, CL_DVFS_OUTPUT_CFG);
824 val &= ~(CL_DVFS_OUTPUT_CFG_MAX_MASK |
825 CL_DVFS_OUTPUT_CFG_MIN_MASK);
826 val |= out_max << CL_DVFS_OUTPUT_CFG_MAX_SHIFT;
827 val |= out_min << CL_DVFS_OUTPUT_CFG_MIN_SHIFT;
828 cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_CFG);
830 cl_dvfs_load_lut(cld);
834 (cld->p_data->flags & TEGRA_CL_DVFS_HAS_IDLE_OVERRIDE)) {
835 /* Override mode force value follows active mode Vmin */
836 u32 val = cl_dvfs_readl(cld, CL_DVFS_CC4_HVC);
837 val &= ~CL_DVFS_CC4_HVC_FORCE_VAL_MASK;
838 val |= out_min << CL_DVFS_CC4_HVC_FORCE_VAL_SHIFT;
839 cl_dvfs_writel(cld, val, CL_DVFS_CC4_HVC);
843 /* limits update tracking end */
845 write_seqcount_end(vmin_seqcnt);
847 write_seqcount_end(vmax_seqcnt);
849 pr_debug("cl_dvfs limits_mV [%d : %d]\n",
850 cld->v_limits.vmin, cld->v_limits.vmax);
854 static void cl_dvfs_set_force_out_min(struct tegra_cl_dvfs *cld);
855 static void set_cl_config(struct tegra_cl_dvfs *cld, struct dfll_rate_req *req)
857 bool sample_tune_out_last = false;
858 u8 cap_gb = CL_DVFS_CAP_GUARD_BAND_STEPS;
860 u8 out_cap = get_output_cap(cld, req);
861 struct dvfs_rail *rail = cld->safe_dvfs->dvfs_rail;
863 switch (cld->tune_state) {
864 case TEGRA_CL_DVFS_TUNE_LOW:
865 if (cl_tune_target(cld, req->rate) > TEGRA_CL_DVFS_TUNE_LOW) {
866 set_tune_state(cld, TEGRA_CL_DVFS_TUNE_HIGH_REQUEST);
867 hrtimer_start(&cld->tune_timer, cld->tune_delay,
869 cl_dvfs_set_force_out_min(cld);
870 sample_tune_out_last = true;
874 case TEGRA_CL_DVFS_TUNE_HIGH:
875 case TEGRA_CL_DVFS_TUNE_HIGH_REQUEST:
876 case TEGRA_CL_DVFS_TUNE_HIGH_REQUEST_2:
877 if (cl_tune_target(cld, req->rate) == TEGRA_CL_DVFS_TUNE_LOW) {
878 set_tune_state(cld, TEGRA_CL_DVFS_TUNE_LOW);
880 cl_dvfs_set_force_out_min(cld);
888 * Criteria to select new request and output boundaries. Listed in
889 * the order of priorities to resolve conflicts (if any).
891 * 1) out_min is at/above minimum voltage level for current temperature
893 * 2) out_max is at/above PMIC guard-band forced minimum
894 * 3) new request has at least on step room for regulation: request +/-1
895 * within [out_min, out_max] interval
896 * 4) new request is at least CL_DVFS_CAP_GUARD_BAND_STEPS below out_max
897 * 5) - if no other rail depends on DFLL rail, out_max is at/above
898 * minimax level to provide better convergence accuracy for rates
899 * close to tuning range boundaries
900 * - if some other rail depends on DFLL rail, out_max should match
901 * voltage from safe dvfs table used by s/w DVFS on other rails to
902 * resolve dependencies
904 out_min = get_output_min(cld);
905 if (out_cap > (out_min + cap_gb)) {
906 req->output = out_cap - cap_gb;
909 req->output = out_min + 1;
910 out_max = req->output + 1;
913 if (req->output == cld->safe_output) {
915 out_max = max(out_max, (u8)(req->output + 1));
918 if (list_empty(&rail->relationships_to))
919 out_max = max(out_max, cld->minimax_output);
921 out_max = max(out_max, cld->force_out_min);
923 set_output_limits(cld, out_min, out_max);
925 /* Must be sampled after new out_min is set */
926 if (sample_tune_out_last && is_i2c(cld)) {
927 u32 val = cl_dvfs_readl(cld, CL_DVFS_I2C_STS);
929 (val >> CL_DVFS_I2C_STS_I2C_LAST_SHIFT) & OUT_MASK;
933 static void set_ol_config(struct tegra_cl_dvfs *cld)
935 u32 val, out_min, out_max;
937 /* always unclamp and restore limits before open loop */
938 if (cld->v_limits.clamped) {
939 cld->v_limits.clamped = false;
940 set_cl_config(cld, &cld->last_req);
942 out_min = cld->lut_min;
943 out_max = cld->lut_max;
945 /* always tune low (safe) in open loop */
946 if (cld->tune_state != TEGRA_CL_DVFS_TUNE_LOW) {
947 set_tune_state(cld, TEGRA_CL_DVFS_TUNE_LOW);
950 out_min = get_output_min(cld);
952 set_output_limits(cld, out_min, out_max);
954 /* 1:1 scaling in open loop */
955 val = cl_dvfs_readl(cld, CL_DVFS_FREQ_REQ);
956 if (!(cld->p_data->flags & TEGRA_CL_DVFS_SCALE_IN_OPEN_LOOP))
957 val |= (SCALE_MAX - 1) << CL_DVFS_FREQ_REQ_SCALE_SHIFT;
958 val &= ~CL_DVFS_FREQ_REQ_FORCE_ENABLE;
959 cl_dvfs_writel(cld, val, CL_DVFS_FREQ_REQ);
962 static enum hrtimer_restart tune_timer_cb(struct hrtimer *timer)
965 u32 val, out_min, out_last;
966 struct tegra_cl_dvfs *cld =
967 container_of(timer, struct tegra_cl_dvfs, tune_timer);
969 clk_lock_save(cld->dfll_clk, &flags);
971 if (cld->tune_state == TEGRA_CL_DVFS_TUNE_HIGH_REQUEST) {
972 out_min = cld->lut_min;
973 val = cl_dvfs_readl(cld, CL_DVFS_I2C_STS);
974 out_last = is_i2c(cld) ?
975 (val >> CL_DVFS_I2C_STS_I2C_LAST_SHIFT) & OUT_MASK :
976 out_min; /* no way to stall PWM: out_last >= out_min */
979 * Update high tune settings if both last I2C value and minimum
980 * output are above high range output threshold, provided I2C
981 * transaction that might be in flight when minimum output was
982 * set has been completed. The latter condition is true if no
983 * transaction is pending or I2C last value has changed since
984 * minimum limit was set.
986 * Since PWM mode never has pending indicator set, high tune
987 * settings are updated always.
989 if (!(val & CL_DVFS_I2C_STS_I2C_REQ_PENDING) ||
990 (cld->tune_out_last != out_last)) {
991 cld->tune_out_last = cld->num_voltages;
994 if ((cld->tune_out_last == cld->num_voltages) &&
995 (out_last >= cld->tune_high_out_min) &&
996 (out_min >= cld->tune_high_out_min)) {
997 set_tune_state(cld, TEGRA_CL_DVFS_TUNE_HIGH_REQUEST_2);
998 hrtimer_start(&cld->tune_timer, cld->tune_ramp,
1001 hrtimer_start(&cld->tune_timer, cld->tune_delay,
1004 } else if (cld->tune_state == TEGRA_CL_DVFS_TUNE_HIGH_REQUEST_2) {
1005 set_tune_state(cld, TEGRA_CL_DVFS_TUNE_HIGH);
1008 clk_unlock_restore(cld->dfll_clk, &flags);
1010 return HRTIMER_NORESTART;
1013 static inline void calibration_timer_update(struct tegra_cl_dvfs *cld)
1016 * Forced output must be disabled in closed loop mode outside of
1017 * calibration. It may be temporarily enabled during calibration;
1018 * use timer update to clean up.
1020 output_force_disable(cld);
1022 if (cld->calibration_delay)
1023 mod_timer(&cld->calibration_timer,
1024 jiffies + cld->calibration_delay + 1);
1027 static void cl_dvfs_calibrate(struct tegra_cl_dvfs *cld)
1032 unsigned long step = RATE_STEP(cld);
1033 unsigned long rate_min = cld->dvco_rate_min;
1034 u8 out_min = get_output_min(cld);
1036 if (!cld->calibration_delay)
1039 * Enter calibration procedure only if
1040 * - closed loop operations
1041 * - last request engaged clock skipper
1042 * - at least specified time after the last calibration attempt
1044 if ((cld->mode != TEGRA_CL_DVFS_CLOSED_LOOP) ||
1045 (cld->last_req.rate > rate_min))
1049 if (ktime_us_delta(now, cld->last_calibration) <
1050 jiffies_to_usecs(cld->calibration_delay))
1052 cld->last_calibration = now;
1054 /* Defer calibration if in the middle of tuning transition */
1055 if ((cld->tune_state > TEGRA_CL_DVFS_TUNE_LOW) &&
1056 (cld->tune_state < TEGRA_CL_DVFS_TUNE_HIGH)) {
1057 calibration_timer_update(cld);
1061 /* Defer calibration if forced output was left enabled */
1062 val = cl_dvfs_readl(cld, CL_DVFS_OUTPUT_FORCE);
1063 if (val & CL_DVFS_OUTPUT_FORCE_ENABLE) {
1064 calibration_timer_update(cld);
1069 * Check if we need to force minimum output during calibration.
1071 * Considerations for selecting TEGRA_CL_DVFS_CALIBRATE_FORCE_VMIN.
1072 * - if there is no voltage enforcement underneath this driver, no need
1073 * to select defer option.
1075 * - if SoC has internal pm controller that controls voltage while CPU
1076 * cluster is idle, and restores force_val on idle exit, the following
1077 * trade-offs applied:
1079 * a) force: DVCO calibration is accurate, but calibration time is
1080 * increased by 2 sample periods and target module maybe under-clocked
1082 * b) don't force: calibration results depend on whether flag
1083 * TEGRA_CL_DVFS_DEFER_FORCE_CALIBRATE is set -- see description below.
1085 if (cld->p_data->flags & TEGRA_CL_DVFS_CALIBRATE_FORCE_VMIN) {
1086 int delay = 2 * GET_SAMPLE_PERIOD(cld);
1087 val = output_force_set_val(cld, out_min);
1088 output_force_enable(cld, val);
1092 /* Synchronize with sample period, and get rate measurements */
1093 switch_monitor(cld, CL_DVFS_MONITOR_CTRL_FREQ);
1095 if (cld->p_data->flags & TEGRA_CL_DVFS_DATA_NEW_NO_USE) {
1096 /* Cannot use DATA_NEW synch - get data after one full sample
1097 period (with 10us margin) */
1098 int delay = GET_SAMPLE_PERIOD(cld) + 10;
1101 wait_data_new(cld, &data);
1102 wait_data_new(cld, &data);
1104 /* Defer calibration if data reading is not consistent */
1105 if (filter_monitor_data(cld, &data)) {
1106 calibration_timer_update(cld);
1110 /* Get output (voltage) measurements */
1112 /* Defer calibration if I2C transaction is pending */
1113 val = cl_dvfs_readl(cld, CL_DVFS_I2C_STS);
1114 if (val & CL_DVFS_I2C_STS_I2C_REQ_PENDING) {
1115 calibration_timer_update(cld);
1118 val = (val >> CL_DVFS_I2C_STS_I2C_LAST_SHIFT) & OUT_MASK;
1119 } else if (cld->p_data->flags & TEGRA_CL_DVFS_CALIBRATE_FORCE_VMIN) {
1120 /* Use forced value (cannot read it back from PWM interface) */
1123 /* Get last output (there is no such thing as pending PWM) */
1124 val = get_last_output(cld);
1126 /* Defer calibration if data reading is not consistent */
1127 if (filter_monitor_data(cld, &val)) {
1128 calibration_timer_update(cld);
1133 if (cld->p_data->flags & TEGRA_CL_DVFS_CALIBRATE_FORCE_VMIN) {
1134 /* Defer calibration if forced and read outputs do not match */
1135 if (val != out_min) {
1136 calibration_timer_update(cld);
1139 output_force_disable(cld);
1143 * Check if we need to defer calibration when voltage is matching
1144 * request force_val.
1146 * Considerations for selecting TEGRA_CL_DVFS_DEFER_FORCE_CALIBRATE.
1147 * - if there is no voltage enforcement underneath this driver, no need
1148 * to select defer option.
1150 * - if SoC has internal pm controller that controls voltage while CPU
1151 * cluster is idle, and restores force_val on idle exit, the following
1152 * trade-offs applied:
1154 * a) defer: DVCO minimum maybe slightly over-estimated, all frequencies
1155 * below DVCO minimum are skipped-to accurately, but voltage at low
1156 * frequencies would fluctuate between Vmin and Vmin + 1 LUT/PWM step.
1157 * b) don't defer: DVCO minimum rate is underestimated, maybe down to
1158 * calibration_range_min, respectively actual frequencies below DVCO
1159 * minimum are configured higher than requested, but voltage at low
1160 * frequencies is saturated at Vmin.
1162 if ((val == cld->last_req.output) &&
1163 (cld->p_data->flags & TEGRA_CL_DVFS_DEFER_FORCE_CALIBRATE)) {
1164 calibration_timer_update(cld);
1168 /* Adjust minimum rate */
1169 rate = GET_MONITORED_RATE(data, cld->ref_rate);
1170 if ((val > out_min) || (rate < (rate_min - step)))
1172 else if (rate > (cld->dvco_rate_min + step))
1175 if ((cld->tune_state == TEGRA_CL_DVFS_TUNE_HIGH) &&
1176 (cld->tune_high_out_min == out_min)) {
1177 cld->tune_high_dvco_rate_min = rate_min;
1180 if (cld->thermal_out_floors[cld->therm_floor_idx] == out_min)
1181 cld->dvco_rate_floors[cld->therm_floor_idx] = rate_min;
1185 cld->dvco_rate_min = clamp(rate_min,
1186 cld->calibration_range_min, cld->calibration_range_max);
1187 calibration_timer_update(cld);
1188 pr_debug("%s: calibrated dvco_rate_min %lu (%lu)\n",
1189 __func__, cld->dvco_rate_min, rate_min);
1192 static void calibration_timer_cb(unsigned long data)
1194 unsigned long flags, rate_min;
1195 struct tegra_cl_dvfs *cld = (struct tegra_cl_dvfs *)data;
1197 pr_debug("%s\n", __func__);
1199 clk_lock_save(cld->dfll_clk, &flags);
1200 rate_min = cld->dvco_rate_min;
1201 cl_dvfs_calibrate(cld);
1202 if (rate_min != cld->dvco_rate_min) {
1203 tegra_cl_dvfs_request_rate(cld,
1204 tegra_cl_dvfs_request_get(cld));
1206 clk_unlock_restore(cld->dfll_clk, &flags);
1209 static void set_request(struct tegra_cl_dvfs *cld, struct dfll_rate_req *req)
1212 int force_val = req->output - cld->safe_output;
1213 int coef = 128; /* FIXME: cld->p_data->cfg_param->cg_scale? */;
1215 /* If going down apply force output floor */
1216 val = cl_dvfs_readl(cld, CL_DVFS_FREQ_REQ);
1217 f = (val & CL_DVFS_FREQ_REQ_FREQ_MASK) >> CL_DVFS_FREQ_REQ_FREQ_SHIFT;
1218 if ((!(val & CL_DVFS_FREQ_REQ_FREQ_VALID) || (f > req->freq)) &&
1219 (cld->force_out_min > req->output))
1220 force_val = cld->force_out_min - cld->safe_output;
1222 force_val = force_val * coef / cld->p_data->cfg_param->cg;
1223 force_val = clamp(force_val, FORCE_MIN, FORCE_MAX);
1226 * 1st set new frequency request and force values, then set force enable
1227 * bit (if not set already). Use same CL_DVFS_FREQ_REQ register read
1228 * (not other cl_dvfs register) plus explicit delay as a fence.
1230 val &= CL_DVFS_FREQ_REQ_FORCE_ENABLE;
1231 val |= req->freq << CL_DVFS_FREQ_REQ_FREQ_SHIFT;
1232 val |= req->scale << CL_DVFS_FREQ_REQ_SCALE_SHIFT;
1233 val |= ((u32)force_val << CL_DVFS_FREQ_REQ_FORCE_SHIFT) &
1234 CL_DVFS_FREQ_REQ_FORCE_MASK;
1235 val |= CL_DVFS_FREQ_REQ_FREQ_VALID;
1236 cl_dvfs_writel(cld, val, CL_DVFS_FREQ_REQ);
1238 val = cl_dvfs_readl(cld, CL_DVFS_FREQ_REQ);
1240 if (!(val & CL_DVFS_FREQ_REQ_FORCE_ENABLE)) {
1241 udelay(1); /* 1us (big margin) window for force value settle */
1242 val |= CL_DVFS_FREQ_REQ_FORCE_ENABLE;
1243 cl_dvfs_writel(cld, val, CL_DVFS_FREQ_REQ);
1248 static u8 find_mv_out_cap(struct tegra_cl_dvfs *cld, int mv)
1253 for (cap = 0; cap < cld->num_voltages; cap++) {
1254 uv = cld->out_map[cap]->reg_uV;
1255 if (uv / 1000 >= mv)
1256 return is_i2c(cld) ? cap : cld->out_map[cap]->reg_value;
1258 return get_output_top(cld); /* maximum possible output */
1261 static u8 find_mv_out_floor(struct tegra_cl_dvfs *cld, int mv)
1266 for (floor = 0; floor < cld->num_voltages; floor++) {
1267 uv = cld->out_map[floor]->reg_uV;
1268 if (uv / 1000 > mv) {
1269 if (!floor) /* minimum possible output */
1270 return get_output_bottom(cld);
1274 return is_i2c(cld) ? floor - 1 : cld->out_map[floor - 1]->reg_value;
1277 static int find_safe_output(
1278 struct tegra_cl_dvfs *cld, unsigned long rate, u8 *safe_output)
1281 int n = cld->safe_dvfs->num_freqs;
1282 unsigned long *freqs = cld->safe_dvfs->freqs;
1284 for (i = 0; i < n; i++) {
1285 if (freqs[i] >= rate) {
1286 *safe_output = cld->clk_dvfs_map[i];
1293 /* Return rate with predicted voltage closest/below or equal out_min */
1294 static unsigned long get_dvco_rate_below(struct tegra_cl_dvfs *cld, u8 out_min)
1298 for (i = 0; i < cld->safe_dvfs->num_freqs; i++) {
1299 if (cld->clk_dvfs_map[i] > out_min)
1303 return cld->safe_dvfs->freqs[i];
1306 /* Return rate with predicted voltage closest/above out_min */
1307 static unsigned long get_dvco_rate_above(struct tegra_cl_dvfs *cld, u8 out_min)
1311 for (i = 0; i < cld->safe_dvfs->num_freqs; i++) {
1312 if (cld->clk_dvfs_map[i] > out_min)
1313 return cld->safe_dvfs->freqs[i];
1315 return cld->safe_dvfs->freqs[i-1];
1318 static void cl_dvfs_set_dvco_rate_min(struct tegra_cl_dvfs *cld,
1319 struct dfll_rate_req *req)
1321 unsigned long tune_high_range_min = 0;
1322 unsigned long rate = cld->dvco_rate_floors[cld->therm_floor_idx];
1324 rate = cld->safe_dvfs->dfll_data.out_rate_min;
1325 if (cld->therm_floor_idx < cld->therm_floors_num)
1326 rate = get_dvco_rate_below(cld,
1327 cld->thermal_out_floors[cld->therm_floor_idx]);
1330 if (cl_tune_target(cld, req->rate) > TEGRA_CL_DVFS_TUNE_LOW) {
1331 rate = max(rate, cld->tune_high_dvco_rate_min);
1332 tune_high_range_min = cld->tune_high_target_rate_min;
1335 /* round minimum rate to request unit (ref_rate/2) boundary */
1336 cld->dvco_rate_min = ROUND_MIN_RATE(rate, cld->ref_rate);
1337 pr_debug("%s: calibrated dvco_rate_min %lu\n",
1338 __func__, cld->dvco_rate_min);
1340 /* dvco min rate is under-estimated - skewed range up */
1341 cld->calibration_range_min = cld->dvco_rate_min - 8 * RATE_STEP(cld);
1342 if (cld->calibration_range_min < tune_high_range_min)
1343 cld->calibration_range_min = tune_high_range_min;
1344 if (cld->calibration_range_min < cld->safe_dvfs->freqs[0])
1345 cld->calibration_range_min = cld->safe_dvfs->freqs[0];
1346 cld->calibration_range_max = cld->dvco_rate_min + 24 * RATE_STEP(cld);
1347 rate = cld->safe_dvfs->freqs[cld->safe_dvfs->num_freqs - 1];
1348 if (cld->calibration_range_max > rate)
1349 cld->calibration_range_max = rate;
1352 static void cl_dvfs_set_force_out_min(struct tegra_cl_dvfs *cld)
1355 int force_mv_min = cld->p_data->pmu_undershoot_gb;
1357 if (!force_mv_min) {
1358 cld->force_out_min = get_output_bottom(cld);
1362 WARN_ONCE(!list_empty(&cld->safe_dvfs->dvfs_rail->relationships_to),
1363 "%s: PMIC undershoot must fit DFLL rail dependency-to slack",
1366 force_out_min = get_output_min(cld);
1367 force_mv_min += get_mv(cld, force_out_min);
1368 force_out_min = find_mv_out_cap(cld, force_mv_min);
1369 if (force_out_min == cld->safe_output)
1371 cld->force_out_min = force_out_min;
1374 static struct voltage_reg_map *find_vdd_map_entry(
1375 struct tegra_cl_dvfs *cld, int mV, bool exact)
1377 int i, uninitialized_var(reg_mV);
1379 for (i = 0; i < cld->p_data->vdd_map_size; i++) {
1380 /* round down to 1mV */
1381 reg_mV = cld->p_data->vdd_map[i].reg_uV / 1000;
1386 if (i < cld->p_data->vdd_map_size) {
1387 if (!exact || (mV == reg_mV))
1388 return &cld->p_data->vdd_map[i];
1393 static void cl_dvfs_init_maps(struct tegra_cl_dvfs *cld)
1395 int i, j, v, v_max, n;
1396 const int *millivolts;
1397 struct voltage_reg_map *m;
1399 BUILD_BUG_ON(MAX_CL_DVFS_VOLTAGES > OUT_MASK + 1);
1401 n = cld->safe_dvfs->num_freqs;
1402 BUG_ON(n >= MAX_CL_DVFS_VOLTAGES);
1404 millivolts = cld->safe_dvfs->dfll_millivolts;
1405 v_max = millivolts[n - 1];
1407 v = cld->safe_dvfs->dfll_data.min_millivolts;
1408 BUG_ON(v > millivolts[0]);
1410 cld->out_map[0] = find_vdd_map_entry(cld, v, true);
1411 BUG_ON(!cld->out_map[0]);
1413 for (i = 0, j = 1; i < n; i++) {
1415 v += max(1, (v_max - v) / (MAX_CL_DVFS_VOLTAGES - j));
1416 if (v >= millivolts[i])
1419 m = find_vdd_map_entry(cld, v, false);
1421 if (m != cld->out_map[j - 1])
1422 cld->out_map[j++] = m;
1425 v = (j == MAX_CL_DVFS_VOLTAGES - 1) ? v_max : millivolts[i];
1426 m = find_vdd_map_entry(cld, v, true);
1428 if (m != cld->out_map[j - 1])
1429 cld->out_map[j++] = m;
1431 cld->clk_dvfs_map[i] = j - 1;
1433 cld->clk_dvfs_map[i] = cld->out_map[j - 1]->reg_value;
1434 BUG_ON(cld->clk_dvfs_map[i] > OUT_MASK + 1);
1440 cld->num_voltages = j;
1442 /* hit Vmax before last freq was mapped: map the rest to max output */
1443 for (j = i++; i < n; i++)
1444 cld->clk_dvfs_map[i] = cld->clk_dvfs_map[j];
1447 static void cl_dvfs_init_tuning_thresholds(struct tegra_cl_dvfs *cld)
1452 * Convert high tuning voltage threshold into output LUT index, and
1453 * add necessary margin. If voltage threshold is outside operating
1454 * range set it at maximum output level to effectively disable tuning
1455 * parameters adjustment.
1457 cld->tune_high_out_min = get_output_top(cld);
1458 cld->tune_high_out_start = cld->tune_high_out_min;
1459 cld->tune_high_dvco_rate_min = ULONG_MAX;
1460 cld->tune_high_target_rate_min = ULONG_MAX;
1462 mv = cld->safe_dvfs->dfll_data.tune_high_min_millivolts;
1463 if (mv >= cld->safe_dvfs->dfll_data.min_millivolts) {
1464 int margin = cld->safe_dvfs->dfll_data.tune_high_margin_mv ? :
1465 CL_DVFS_TUNE_HIGH_MARGIN_MV;
1466 u8 out_min = find_mv_out_cap(cld, mv);
1467 u8 out_start = find_mv_out_cap(cld, mv + margin);
1468 out_start = max(out_start, (u8)(out_min + 1));
1469 if (out_start < get_output_top(cld)) {
1470 cld->tune_high_out_min = out_min;
1471 cld->tune_high_out_start = out_start;
1472 if (cld->minimax_output <= out_start)
1473 cld->minimax_output = out_start + 1;
1474 cld->tune_high_dvco_rate_min =
1475 get_dvco_rate_above(cld, out_start + 1);
1476 cld->tune_high_target_rate_min =
1477 get_dvco_rate_above(cld, out_min);
1482 static void cl_dvfs_init_hot_output_cap(struct tegra_cl_dvfs *cld)
1485 if (!cld->safe_dvfs->dvfs_rail->therm_mv_caps ||
1486 !cld->safe_dvfs->dvfs_rail->therm_mv_caps_num)
1489 if (!cld->safe_dvfs->dvfs_rail->vmax_cdev)
1490 WARN(1, "%s: missing dfll cap cooling device\n",
1491 cld->safe_dvfs->dvfs_rail->reg_id);
1493 * Convert monotonically decreasing thermal caps at high temperature
1494 * into output LUT indexes; make sure there is a room for regulation
1495 * below minimum thermal cap.
1497 cld->therm_caps_num = cld->safe_dvfs->dvfs_rail->therm_mv_caps_num;
1498 for (i = 0; i < cld->therm_caps_num; i++) {
1499 cld->thermal_out_caps[i] = find_mv_out_floor(
1500 cld, cld->safe_dvfs->dvfs_rail->therm_mv_caps[i]);
1502 BUG_ON(cld->thermal_out_caps[cld->therm_caps_num - 1] <
1503 cld->minimax_output);
1506 static void cl_dvfs_convert_cold_output_floor(struct tegra_cl_dvfs *cld,
1510 struct dvfs_rail *rail = cld->safe_dvfs->dvfs_rail;
1513 * Convert monotonically decreasing thermal floors at low temperature
1514 * into output LUT indexes; make sure there is a room for regulation
1515 * above maximum thermal floor. The latter is also exempt from offset
1518 cld->therm_floors_num = rail->therm_mv_floors_num;
1519 for (i = 0; i < cld->therm_floors_num; i++) {
1520 int mv = rail->therm_mv_floors[i] + (i ? offset : 0);
1521 u8 out = cld->thermal_out_floors[i] = find_mv_out_cap(cld, mv);
1522 cld->thermal_mv_floors[i] = get_mv(cld, out);
1524 BUG_ON(cld->thermal_out_floors[0] + 1 >= get_output_top(cld));
1525 if (!rail->therm_mv_dfll_floors) {
1527 rail->therm_mv_dfll_floors = cld->thermal_mv_floors;
1531 static void cl_dvfs_init_cold_output_floor(struct tegra_cl_dvfs *cld)
1533 if (!cld->safe_dvfs->dvfs_rail->therm_mv_floors ||
1534 !cld->safe_dvfs->dvfs_rail->therm_mv_floors_num)
1537 if (!cld->safe_dvfs->dvfs_rail->vmin_cdev)
1538 WARN(1, "%s: missing dfll floor cooling device\n",
1539 cld->safe_dvfs->dvfs_rail->reg_id);
1541 /* Most conservative offset 0 always safe */
1542 cl_dvfs_convert_cold_output_floor(cld, 0);
1544 if (cld->minimax_output <= cld->thermal_out_floors[0])
1545 cld->minimax_output = cld->thermal_out_floors[0] + 1;
1548 static void cl_dvfs_init_output_thresholds(struct tegra_cl_dvfs *cld)
1550 cld->minimax_output = 0;
1551 cl_dvfs_init_tuning_thresholds(cld);
1552 cl_dvfs_init_cold_output_floor(cld);
1554 /* Append minimum output to thermal floors */
1555 cld->thermal_out_floors[cld->therm_floors_num] = get_output_bottom(cld);
1557 /* make sure safe output is safe at any temperature */
1558 cld->safe_output = max(cld->thermal_out_floors[0],
1559 (u8)(get_output_bottom(cld) + 1));
1560 if (cld->minimax_output <= cld->safe_output)
1561 cld->minimax_output = cld->safe_output + 1;
1563 /* init caps after minimax output is determined */
1564 cl_dvfs_init_hot_output_cap(cld);
1567 static void cl_dvfs_init_pwm_if(struct tegra_cl_dvfs *cld)
1570 struct tegra_cl_dvfs_platform_data *p_data = cld->p_data;
1571 bool delta_mode = p_data->u.pmu_pwm.delta_mode;
1572 int pg = p_data->u.pmu_pwm.pwm_pingroup;
1573 int pcg = p_data->u.pmu_pwm.pwm_clk_pingroup;
1575 div = GET_DIV(cld->ref_rate, p_data->u.pmu_pwm.pwm_rate, 1);
1577 val = cl_dvfs_readl(cld, CL_DVFS_OUTPUT_CFG);
1578 val |= delta_mode ? CL_DVFS_OUTPUT_CFG_PWM_DELTA : 0;
1579 val |= (div << CL_DVFS_OUTPUT_CFG_PWM_DIV_SHIFT) &
1580 CL_DVFS_OUTPUT_CFG_PWM_DIV_MASK;
1583 * Different ways to enable/disable PWM depending on board design:
1584 * a) Use native CL-DVFS output PWM_ENABLE control (2WIRE bus)
1585 * b) Use gpio control of external buffer (1WIRE bus with buffer)
1586 * c) Use tristate PWM pingroup control (1WIRE bus with direct connect)
1587 * in cases (b) and (c) keep CL-DVFS native control always enabled
1590 switch (p_data->u.pmu_pwm.pwm_bus) {
1591 case TEGRA_CL_DVFS_PWM_1WIRE_BUFFER:
1592 tegra_pinctrl_set_tristate(p_data, pg, TEGRA_PIN_DISABLE);
1593 val |= CL_DVFS_OUTPUT_CFG_PWM_ENABLE;
1596 case TEGRA_CL_DVFS_PWM_1WIRE_DIRECT:
1597 tegra_pinctrl_set_tristate(p_data, pg, TEGRA_PIN_ENABLE);
1598 val |= CL_DVFS_OUTPUT_CFG_PWM_ENABLE;
1601 case TEGRA_CL_DVFS_PWM_2WIRE:
1602 tegra_pinctrl_set_tristate(p_data, pg, TEGRA_PIN_DISABLE);
1603 tegra_pinctrl_set_tristate(p_data, pcg, TEGRA_PIN_DISABLE);
1610 cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_CFG);
1614 static void cl_dvfs_init_i2c_if(struct tegra_cl_dvfs *cld)
1617 struct tegra_cl_dvfs_platform_data *p_data = cld->p_data;
1618 bool hs_mode = p_data->u.pmu_i2c.hs_rate;
1620 /* PMU slave address, vdd register offset, and transfer mode */
1621 val = p_data->u.pmu_i2c.slave_addr << CL_DVFS_I2C_CFG_SLAVE_ADDR_SHIFT;
1622 if (p_data->u.pmu_i2c.addr_10)
1623 val |= CL_DVFS_I2C_CFG_SLAVE_ADDR_10;
1625 val |= p_data->u.pmu_i2c.hs_master_code <<
1626 CL_DVFS_I2C_CFG_HS_CODE_SHIFT;
1627 val |= CL_DVFS_I2C_CFG_PACKET_ENABLE;
1629 val |= CL_DVFS_I2C_CFG_SIZE_MASK;
1630 val |= CL_DVFS_I2C_CFG_ARB_ENABLE;
1631 cl_dvfs_writel(cld, val, CL_DVFS_I2C_CFG);
1632 cl_dvfs_writel(cld, p_data->u.pmu_i2c.reg, CL_DVFS_I2C_VDD_REG_ADDR);
1635 val = GET_DIV(cld->i2c_rate, p_data->u.pmu_i2c.fs_rate, 8);
1636 BUG_ON(!val || (val > CL_DVFS_I2C_CLK_DIVISOR_MASK));
1637 val = (val - 1) << CL_DVFS_I2C_CLK_DIVISOR_FS_SHIFT;
1639 div = GET_DIV(cld->i2c_rate, p_data->u.pmu_i2c.hs_rate, 12);
1640 BUG_ON(!div || (div > CL_DVFS_I2C_CLK_DIVISOR_MASK));
1642 div = 2; /* default hs divisor just in case */
1644 val |= (div - 1) << CL_DVFS_I2C_CLK_DIVISOR_HS_SHIFT;
1645 cl_dvfs_writel(cld, val, CL_DVFS_I2C_CLK_DIVISOR);
1646 cl_dvfs_i2c_wmb(cld);
1649 static void cl_dvfs_init_out_if(struct tegra_cl_dvfs *cld)
1651 u32 val, out_min, out_max;
1654 * Disable output, and set safe voltage and output limits;
1655 * disable and clear limit interrupts.
1657 cld->tune_state = TEGRA_CL_DVFS_TUNE_LOW;
1658 cld->therm_cap_idx = cld->therm_caps_num;
1659 cld->therm_floor_idx = 0;
1660 cl_dvfs_set_dvco_rate_min(cld, &cld->last_req);
1661 cl_dvfs_set_force_out_min(cld);
1663 if (cld->p_data->flags & TEGRA_CL_DVFS_DYN_OUTPUT_CFG) {
1665 * If h/w supports dynamic chanage of output register, limit
1666 * LUT * index range using cl_dvfs h/w controls, and load full
1667 * range LUT table once.
1669 out_min = get_output_min(cld);
1670 out_max = get_output_cap(cld, NULL);
1671 cld->lut_min = get_output_bottom(cld);
1672 cld->lut_max = get_output_top(cld);
1674 /* LUT available only for I2C, no dynamic config WAR for PWM */
1675 BUG_ON(!is_i2c(cld));
1678 * Allow the entire range of LUT indexes, but limit output
1679 * voltage in LUT mapping (this "indirect" application of limits
1680 * is used, because h/w does not support dynamic change of index
1681 * limits, but dynamic reload of LUT is fine).
1683 out_min = get_output_bottom(cld);
1684 out_max = get_output_top(cld);
1685 cld->lut_min = get_output_min(cld);
1686 cld->lut_max = get_output_cap(cld, NULL);
1690 * Disable output interface. If configuration and I2C address spaces
1691 * are separated, output enable/disable control and output limits are
1692 * in different apertures and output must be disabled 1st to avoid
1693 * spurious I2C transaction. If configuration and I2C address spaces
1694 * are combined output enable/disable control and output limits are
1695 * in the same register, and it is safe to just clear it.
1697 cl_dvfs_i2c_writel(cld, 0, CL_DVFS_OUTPUT_CFG);
1698 cl_dvfs_i2c_wmb(cld);
1700 val = (cld->safe_output << CL_DVFS_OUTPUT_CFG_SAFE_SHIFT) |
1701 (out_max << CL_DVFS_OUTPUT_CFG_MAX_SHIFT) |
1702 (out_min << CL_DVFS_OUTPUT_CFG_MIN_SHIFT);
1703 cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_CFG);
1704 if (cld->p_data->flags & TEGRA_CL_DVFS_HAS_IDLE_OVERRIDE) {
1705 val = out_min << CL_DVFS_CC4_HVC_FORCE_VAL_SHIFT;
1706 cl_dvfs_writel(cld, val, CL_DVFS_CC4_HVC);
1710 cl_dvfs_writel(cld, 0, CL_DVFS_OUTPUT_FORCE);
1711 cl_dvfs_writel(cld, 0, CL_DVFS_INTR_EN);
1712 cl_dvfs_writel(cld, CL_DVFS_INTR_MAX_MASK | CL_DVFS_INTR_MIN_MASK,
1715 /* fill in LUT table */
1717 cl_dvfs_load_lut(cld);
1719 if (cld->p_data->flags & TEGRA_CL_DVFS_DYN_OUTPUT_CFG) {
1720 /* dynamic update of output register allowed - no need to reload
1721 lut - use lut limits as output register setting shadow */
1722 cld->lut_min = out_min;
1723 cld->lut_max = out_max;
1725 cld->v_limits.vmin = get_mv(cld, cld->lut_min);
1726 cld->v_limits.vmax = get_mv(cld, cld->lut_max);
1728 /* configure transport */
1730 cl_dvfs_init_i2c_if(cld);
1732 cl_dvfs_init_pwm_if(cld);
1735 static void cl_dvfs_init_cntrl_logic(struct tegra_cl_dvfs *cld)
1738 struct tegra_cl_dvfs_cfg_param *param = cld->p_data->cfg_param;
1740 /* configure mode, control loop parameters, DFLL tuning */
1741 set_mode(cld, TEGRA_CL_DVFS_DISABLED);
1743 val = GET_DIV(cld->ref_rate, param->sample_rate, 32);
1744 BUG_ON(val > CL_DVFS_CONFIG_DIV_MASK);
1745 cl_dvfs_writel(cld, val, CL_DVFS_CONFIG);
1747 val = (param->force_mode << CL_DVFS_PARAMS_FORCE_MODE_SHIFT) |
1748 (param->cf << CL_DVFS_PARAMS_CF_PARAM_SHIFT) |
1749 (param->ci << CL_DVFS_PARAMS_CI_PARAM_SHIFT) |
1750 ((u8)param->cg << CL_DVFS_PARAMS_CG_PARAM_SHIFT) |
1751 (param->cg_scale ? CL_DVFS_PARAMS_CG_SCALE : 0);
1752 cl_dvfs_writel(cld, val, CL_DVFS_PARAMS);
1754 cl_dvfs_writel(cld, cld->tune0_low, CL_DVFS_TUNE0);
1755 cl_dvfs_writel(cld, cld->safe_dvfs->dfll_data.tune1, CL_DVFS_TUNE1);
1757 if (cld->safe_dvfs->dfll_data.tune_trimmers)
1758 cld->safe_dvfs->dfll_data.tune_trimmers(false);
1760 /* configure droop (skipper 1) and scale (skipper 2) */
1761 val = GET_DROOP_FREQ(cld->safe_dvfs->dfll_data.droop_rate_min,
1762 cld->ref_rate) << CL_DVFS_DROOP_CTRL_MIN_FREQ_SHIFT;
1763 BUG_ON(val > CL_DVFS_DROOP_CTRL_MIN_FREQ_MASK);
1764 val |= (param->droop_cut_value << CL_DVFS_DROOP_CTRL_CUT_SHIFT);
1765 val |= (param->droop_restore_ramp << CL_DVFS_DROOP_CTRL_RAMP_SHIFT);
1766 cl_dvfs_writel(cld, val, CL_DVFS_DROOP_CTRL);
1768 val = cl_dvfs_readl(cld, CL_DVFS_FREQ_REQ) &
1769 CL_DVFS_FREQ_REQ_SCALE_MASK;
1770 cld->last_req.scale = val >> CL_DVFS_FREQ_REQ_SCALE_SHIFT;
1771 cld->last_req.cap = 0;
1772 cld->last_req.freq = 0;
1773 cld->last_req.output = 0;
1774 cl_dvfs_writel(cld, val, CL_DVFS_FREQ_REQ);
1775 cl_dvfs_writel(cld, param->scale_out_ramp, CL_DVFS_SCALE_RAMP);
1777 /* select frequency for monitoring */
1778 cl_dvfs_writel(cld, CL_DVFS_MONITOR_CTRL_FREQ, CL_DVFS_MONITOR_CTRL);
1782 static int cl_dvfs_enable_clocks(struct tegra_cl_dvfs *cld)
1785 clk_enable(cld->i2c_clk);
1787 clk_enable(cld->ref_clk);
1788 clk_enable(cld->soc_clk);
1792 static void cl_dvfs_disable_clocks(struct tegra_cl_dvfs *cld)
1795 clk_disable(cld->i2c_clk);
1797 clk_disable(cld->ref_clk);
1798 clk_disable(cld->soc_clk);
1801 static int sync_tune_state(struct tegra_cl_dvfs *cld)
1803 u32 val = cl_dvfs_readl(cld, CL_DVFS_TUNE0);
1804 if (cld->tune0_low == val)
1805 set_tune_state(cld, TEGRA_CL_DVFS_TUNE_LOW);
1806 else if (cld->tune0_high == val)
1807 set_tune_state(cld, TEGRA_CL_DVFS_TUNE_HIGH);
1809 pr_err("\n %s: Failed to sync cl_dvfs tune state\n", __func__);
1816 * When bootloader enables cl_dvfs, then this function
1817 * can be used to set cl_dvfs sw sate to be in sync with
1820 static int cl_dvfs_sync(struct tegra_cl_dvfs *cld)
1824 unsigned long int rate;
1825 unsigned long int dfll_boot_req_khz =
1826 cld->safe_dvfs->dfll_data.dfll_boot_khz;
1828 if (!dfll_boot_req_khz) {
1829 pr_err("%s: Failed to sync DFLL boot rate\n", __func__);
1835 val = cl_dvfs_readl(cld, CL_DVFS_FREQ_REQ) &
1836 CL_DVFS_FREQ_REQ_SCALE_MASK;
1837 cld->last_req.scale = val >> CL_DVFS_FREQ_REQ_SCALE_SHIFT;
1838 cld->last_req.rate = dfll_boot_req_khz * 1000;
1839 cld->last_req.freq = GET_REQUEST_FREQ(cld->last_req.rate,
1841 val = cld->last_req.freq;
1842 rate = GET_REQUEST_RATE(val, cld->ref_rate);
1843 if (find_safe_output(cld, rate, &(cld->last_req.output))) {
1844 pr_err("%s: Failed to find safe output for rate %lu\n",
1848 cld->last_req.cap = cld->last_req.output;
1849 cld->mode = TEGRA_CL_DVFS_CLOSED_LOOP;
1850 status = sync_tune_state(cld);
1856 static bool is_cl_dvfs_closed_loop(struct tegra_cl_dvfs *cld)
1859 mode = cl_dvfs_readl(cld, CL_DVFS_CTRL) + 1;
1860 if (mode == TEGRA_CL_DVFS_CLOSED_LOOP)
1865 static int cl_dvfs_init(struct tegra_cl_dvfs *cld)
1867 int ret, gpio, flags;
1869 /* Enable output inerface clock */
1870 if (cld->p_data->pmu_if == TEGRA_CL_DVFS_PMU_I2C) {
1871 ret = clk_enable(cld->i2c_clk);
1873 pr_err("%s: Failed to enable %s\n",
1874 __func__, cld->i2c_clk->name);
1877 cld->i2c_rate = clk_get_rate(cld->i2c_clk);
1878 } else if (cld->p_data->pmu_if == TEGRA_CL_DVFS_PMU_PWM) {
1879 int pwm_bus = cld->p_data->u.pmu_pwm.pwm_bus;
1880 if (pwm_bus > TEGRA_CL_DVFS_PWM_1WIRE_DIRECT) {
1881 /* FIXME: PWM 2-wire support */
1882 pr_err("%s: not supported PWM 2-wire bus\n", __func__);
1884 } else if (pwm_bus == TEGRA_CL_DVFS_PWM_1WIRE_BUFFER) {
1885 gpio = cld->p_data->u.pmu_pwm.out_gpio;
1886 flags = cld->p_data->u.pmu_pwm.out_enable_high ?
1887 GPIOF_OUT_INIT_LOW : GPIOF_OUT_INIT_HIGH;
1888 if (gpio_request_one(gpio, flags, "cl_dvfs_pwm")) {
1889 pr_err("%s: Failed to request pwm gpio %d\n",
1895 pr_err("%s: unknown PMU interface\n", __func__);
1899 /* Enable module clocks, release control logic reset */
1900 ret = clk_enable(cld->ref_clk);
1902 pr_err("%s: Failed to enable %s\n",
1903 __func__, cld->ref_clk->name);
1906 ret = clk_enable(cld->soc_clk);
1908 pr_err("%s: Failed to enable %s\n",
1909 __func__, cld->ref_clk->name);
1912 cld->ref_rate = clk_get_rate(cld->ref_clk);
1913 BUG_ON(!cld->ref_rate);
1915 /* init tuning timer */
1916 hrtimer_init(&cld->tune_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1917 cld->tune_timer.function = tune_timer_cb;
1918 cld->tune_delay = ktime_set(0, CL_DVFS_TUNE_HIGH_DELAY * 1000);
1919 if (!cld->p_data->tune_ramp_delay)
1920 cld->p_data->tune_ramp_delay = CL_DVFS_OUTPUT_RAMP_DELAY;
1921 cld->tune_ramp = ktime_set(0, cld->p_data->tune_ramp_delay * 1000);
1923 /* init forced output resume delay */
1924 if (!cld->p_data->resume_ramp_delay)
1925 cld->p_data->resume_ramp_delay = CL_DVFS_OUTPUT_RAMP_DELAY;
1927 /* init calibration timer */
1928 init_timer_deferrable(&cld->calibration_timer);
1929 cld->calibration_timer.function = calibration_timer_cb;
1930 cld->calibration_timer.data = (unsigned long)cld;
1931 cld->calibration_delay = usecs_to_jiffies(CL_DVFS_CALIBR_TIME);
1933 /* Init tune0 settings */
1934 cld->tune0_low = cld->safe_dvfs->dfll_data.tune0;
1935 cld->tune0_high = cld->safe_dvfs->dfll_data.tune0_high_mv;
1937 /* Get ready ouput voltage mapping*/
1938 cl_dvfs_init_maps(cld);
1940 /* Setup output range thresholds */
1941 cl_dvfs_init_output_thresholds(cld);
1943 /* Setup PMU interface */
1944 cl_dvfs_init_out_if(cld);
1946 if (is_cl_dvfs_closed_loop(cld)) {
1947 ret = cl_dvfs_sync(cld);
1952 * Configure control registers in disabled mode
1953 * and disable clocks
1955 cl_dvfs_init_cntrl_logic(cld);
1956 cl_dvfs_disable_clocks(cld);
1959 /* Set target clock cl_dvfs data */
1960 tegra_dfll_set_cl_dvfs_data(cld->dfll_clk, cld);
1965 * Re-initialize and enable target device clock in open loop mode. Called
1966 * directly from SoC clock resume syscore operation. Closed loop will be
1967 * re-entered in platform syscore ops as well.
1969 void tegra_cl_dvfs_resume(struct tegra_cl_dvfs *cld)
1971 enum tegra_cl_dvfs_ctrl_mode mode = cld->mode;
1972 struct dfll_rate_req req = cld->last_req;
1974 cl_dvfs_enable_clocks(cld);
1976 /* Setup PMU interface, and configure controls in disabled mode */
1977 cl_dvfs_init_out_if(cld);
1978 cl_dvfs_init_cntrl_logic(cld);
1980 /* Restore force output */
1981 cl_dvfs_writel(cld, cld->suspended_force_out, CL_DVFS_OUTPUT_FORCE);
1983 cl_dvfs_disable_clocks(cld);
1985 /* Restore last request and mode */
1986 cld->last_req = req;
1987 if (mode != TEGRA_CL_DVFS_DISABLED) {
1988 set_mode(cld, TEGRA_CL_DVFS_OPEN_LOOP);
1989 if (WARN(mode > TEGRA_CL_DVFS_OPEN_LOOP,
1990 "DFLL was left locked in suspend\n"))
1994 /* Re-enable bypass output if it was forced before suspend */
1995 if ((cld->p_data->u.pmu_pwm.dfll_bypass_dev) &&
1996 (cld->suspended_force_out & CL_DVFS_OUTPUT_FORCE_ENABLE)) {
1997 if (!cld->safe_dvfs->dfll_data.is_bypass_down ||
1998 !cld->safe_dvfs->dfll_data.is_bypass_down()) {
2001 udelay(cld->p_data->resume_ramp_delay);
2006 #ifdef CONFIG_THERMAL
2007 /* cl_dvfs cap cooling device */
2008 static int tegra_cl_dvfs_get_vmax_cdev_max_state(
2009 struct thermal_cooling_device *cdev, unsigned long *max_state)
2011 struct tegra_cl_dvfs *cld = (struct tegra_cl_dvfs *)cdev->devdata;
2012 *max_state = cld->therm_caps_num;
2016 static int tegra_cl_dvfs_get_vmax_cdev_cur_state(
2017 struct thermal_cooling_device *cdev, unsigned long *cur_state)
2019 struct tegra_cl_dvfs *cld = (struct tegra_cl_dvfs *)cdev->devdata;
2020 *cur_state = cld->therm_cap_idx;
2024 static int tegra_cl_dvfs_set_vmax_cdev_state(
2025 struct thermal_cooling_device *cdev, unsigned long cur_state)
2027 unsigned long flags;
2028 struct tegra_cl_dvfs *cld = (struct tegra_cl_dvfs *)cdev->devdata;
2030 clk_lock_save(cld->dfll_clk, &flags);
2032 if (cld->therm_cap_idx != cur_state) {
2033 cld->therm_cap_idx = cur_state;
2034 if (cld->mode == TEGRA_CL_DVFS_CLOSED_LOOP) {
2035 tegra_cl_dvfs_request_rate(cld,
2036 tegra_cl_dvfs_request_get(cld));
2039 clk_unlock_restore(cld->dfll_clk, &flags);
2043 static struct thermal_cooling_device_ops tegra_cl_dvfs_vmax_cool_ops = {
2044 .get_max_state = tegra_cl_dvfs_get_vmax_cdev_max_state,
2045 .get_cur_state = tegra_cl_dvfs_get_vmax_cdev_cur_state,
2046 .set_cur_state = tegra_cl_dvfs_set_vmax_cdev_state,
2049 /* cl_dvfs vmin cooling device */
2050 static int tegra_cl_dvfs_get_vmin_cdev_max_state(
2051 struct thermal_cooling_device *cdev, unsigned long *max_state)
2053 struct tegra_cl_dvfs *cld = (struct tegra_cl_dvfs *)cdev->devdata;
2054 *max_state = cld->therm_floors_num;
2058 static int tegra_cl_dvfs_get_vmin_cdev_cur_state(
2059 struct thermal_cooling_device *cdev, unsigned long *cur_state)
2061 struct tegra_cl_dvfs *cld = (struct tegra_cl_dvfs *)cdev->devdata;
2062 *cur_state = cld->therm_floor_idx;
2066 static int tegra_cl_dvfs_set_vmin_cdev_state(
2067 struct thermal_cooling_device *cdev, unsigned long cur_state)
2069 unsigned long flags;
2070 struct tegra_cl_dvfs *cld = (struct tegra_cl_dvfs *)cdev->devdata;
2072 clk_lock_save(cld->dfll_clk, &flags);
2074 if (cld->therm_floor_idx != cur_state) {
2075 cld->therm_floor_idx = cur_state;
2076 cl_dvfs_set_dvco_rate_min(cld, &cld->last_req);
2077 cl_dvfs_set_force_out_min(cld);
2078 if (cld->mode == TEGRA_CL_DVFS_CLOSED_LOOP) {
2079 tegra_cl_dvfs_request_rate(cld,
2080 tegra_cl_dvfs_request_get(cld));
2081 /* Delay to make sure new Vmin delivery started */
2082 udelay(2 * GET_SAMPLE_PERIOD(cld));
2085 clk_unlock_restore(cld->dfll_clk, &flags);
2089 static struct thermal_cooling_device_ops tegra_cl_dvfs_vmin_cool_ops = {
2090 .get_max_state = tegra_cl_dvfs_get_vmin_cdev_max_state,
2091 .get_cur_state = tegra_cl_dvfs_get_vmin_cdev_cur_state,
2092 .set_cur_state = tegra_cl_dvfs_set_vmin_cdev_state,
2095 static void tegra_cl_dvfs_init_cdev(struct work_struct *work)
2098 char dt_type[THERMAL_NAME_LENGTH];
2099 struct device_node *dn;
2100 struct tegra_cl_dvfs *cld = container_of(
2101 work, struct tegra_cl_dvfs, init_cdev_work);
2103 /* just report error - initialized at WC temperature, anyway */
2104 if (cld->safe_dvfs->dvfs_rail->vmin_cdev) {
2105 type = cld->safe_dvfs->dvfs_rail->vmin_cdev->cdev_type;
2106 snprintf(dt_type, sizeof(dt_type), "%s_dfll", type);
2107 dn = cld->safe_dvfs->dvfs_rail->vmin_cdev->cdev_dn;
2108 cld->vmin_cdev = dn ?
2109 thermal_of_cooling_device_register(dn, dt_type,
2110 (void *)cld, &tegra_cl_dvfs_vmin_cool_ops) :
2111 thermal_cooling_device_register(type,
2112 (void *)cld, &tegra_cl_dvfs_vmin_cool_ops);
2114 if (IS_ERR_OR_NULL(cld->vmin_cdev) ||
2115 list_empty(&cld->vmin_cdev->thermal_instances)) {
2116 cld->vmin_cdev = NULL;
2117 pr_err("%s: tegra cooling device %s failed to register\n",
2121 pr_info("%s: %s cooling device registered\n", __func__, type);
2124 if (cld->safe_dvfs->dvfs_rail->vmax_cdev) {
2125 type = cld->safe_dvfs->dvfs_rail->vmax_cdev->cdev_type;
2126 snprintf(dt_type, sizeof(dt_type), "%s_dfll", type);
2127 dn = cld->safe_dvfs->dvfs_rail->vmax_cdev->cdev_dn;
2128 cld->vmax_cdev = dn ?
2129 thermal_of_cooling_device_register(dn, dt_type,
2130 (void *)cld, &tegra_cl_dvfs_vmax_cool_ops) :
2131 thermal_cooling_device_register(type,
2132 (void *)cld, &tegra_cl_dvfs_vmax_cool_ops);
2134 if (IS_ERR_OR_NULL(cld->vmax_cdev) ||
2135 list_empty(&cld->vmax_cdev->thermal_instances)) {
2136 cld->vmax_cdev = NULL;
2137 pr_err("%s: tegra cooling device %s failed to register\n",
2141 pr_info("%s: %s cooling device registered\n", __func__, type);
2146 #ifdef CONFIG_PM_SLEEP
2148 * cl_dvfs controls clock/voltage to other devices, including CPU. Therefore,
2149 * cl_dvfs driver pm suspend callback does not stop cl-dvfs operations. It is
2150 * only used to enforce cold/hot volatge limit, since temperature may change in
2151 * suspend without waking up. The correct temperature zone after supend will
2152 * be updated via cl_dvfs cooling device interface during resume of temperature
2155 static int tegra_cl_dvfs_suspend_cl(struct device *dev)
2157 unsigned long flags;
2158 struct tegra_cl_dvfs *cld = dev_get_drvdata(dev);
2160 clk_lock_save(cld->dfll_clk, &flags);
2162 cld->vmax_cdev->updated = false;
2163 cld->therm_cap_idx = cld->therm_caps_num;
2165 cld->vmin_cdev->updated = false;
2166 cld->therm_floor_idx = 0;
2167 cl_dvfs_set_dvco_rate_min(cld, &cld->last_req);
2168 cl_dvfs_set_force_out_min(cld);
2169 if (cld->mode == TEGRA_CL_DVFS_CLOSED_LOOP) {
2170 set_cl_config(cld, &cld->last_req);
2171 set_request(cld, &cld->last_req);
2172 /* Delay to make sure new Vmin delivery started */
2173 udelay(2 * GET_SAMPLE_PERIOD(cld));
2175 cld->suspended_force_out = cl_dvfs_readl(cld, CL_DVFS_OUTPUT_FORCE);
2176 clk_unlock_restore(cld->dfll_clk, &flags);
2178 pr_debug("%s: closed loop thermal control suspended\n", __func__);
2183 static const struct dev_pm_ops tegra_cl_dvfs_pm_ops = {
2184 .suspend_noirq = tegra_cl_dvfs_suspend_cl,
2189 * These dfll bypass APIs provide direct access to force output register.
2190 * Set operation always updates force value, but applies it only in open loop,
2191 * or disabled mode. Get operation returns force value back if it is applied,
2192 * and return monitored output, otherwise. Hence, get value matches real output
2195 static int tegra_cl_dvfs_force_output(void *data, unsigned int out_sel)
2198 unsigned long flags;
2199 struct tegra_cl_dvfs *cld = data;
2201 if (out_sel > OUT_MASK)
2204 clk_lock_save(cld->dfll_clk, &flags);
2206 val = output_force_set_val(cld, out_sel);
2207 if ((cld->mode < TEGRA_CL_DVFS_CLOSED_LOOP) &&
2208 !(val & CL_DVFS_OUTPUT_FORCE_ENABLE)) {
2209 output_force_enable(cld, val);
2210 /* enable output only if bypass h/w is alive */
2211 if (!cld->safe_dvfs->dfll_data.is_bypass_down ||
2212 !cld->safe_dvfs->dfll_data.is_bypass_down())
2216 clk_unlock_restore(cld->dfll_clk, &flags);
2220 static int tegra_cl_dvfs_get_output(void *data)
2223 unsigned long flags;
2224 struct tegra_cl_dvfs *cld = data;
2226 clk_lock_save(cld->dfll_clk, &flags);
2227 val = cl_dvfs_get_output(cld);
2228 clk_unlock_restore(cld->dfll_clk, &flags);
2232 static void cl_dvfs_init_pwm_bypass(struct tegra_cl_dvfs *cld,
2233 struct platform_device *byp_dev)
2235 struct tegra_dfll_bypass_platform_data *p_data =
2236 byp_dev->dev.platform_data;
2238 int vinit = cld->p_data->u.pmu_pwm.init_uV;
2239 int vmin = cld->p_data->u.pmu_pwm.min_uV;
2240 int vstep = cld->p_data->u.pmu_pwm.step_uV;
2242 /* Sync initial voltage and setup bypass callbacks */
2243 if ((vinit >= vmin) && vstep) {
2244 unsigned int vsel = DIV_ROUND_UP((vinit - vmin), vstep);
2245 tegra_cl_dvfs_force_output(cld, vsel);
2248 p_data->set_bypass_sel = tegra_cl_dvfs_force_output;
2249 p_data->get_bypass_sel = tegra_cl_dvfs_get_output;
2250 p_data->dfll_data = cld;
2255 * The Silicon Monitor (SiMon) notification provides grade information on
2256 * the DFLL controlled rail. The resepctive minimum voltage offset is applied
2257 * to thermal floors profile. SiMon offsets are negative, the higher the grade
2258 * the lower the floor. In addition SiMon grade may affect tuning settings: more
2259 * aggressive settings may be used at grades above zero.
2261 static void update_simon_tuning(struct tegra_cl_dvfs *cld, unsigned long grade)
2264 struct dvfs_dfll_data *dfll_data = &cld->safe_dvfs->dfll_data;
2265 u32 mask = dfll_data->tune0_simon_mask;
2272 * - switch to settings for low voltage tuning range at current grade
2273 * - update both low/high voltage range settings to match new grade
2274 * notification (note that same toggle mask is applied to settings
2275 * in both low and high voltage ranges).
2276 * - switch to settings for low voltage tuning range at new grade
2277 * - switch to settings for high voltage range at new grade if tuning
2282 pr_debug("tune0: 0x%x\n", cl_dvfs_readl(cld, CL_DVFS_TUNE0));
2284 cld->tune0_low = dfll_data->tune0 ^ (grade ? mask : 0);
2285 cld->tune0_high = dfll_data->tune0_high_mv ^ (grade ? mask : 0);
2289 pr_debug("tune0: 0x%x\n", cl_dvfs_readl(cld, CL_DVFS_TUNE0));
2291 if (cld->tune_state == TEGRA_CL_DVFS_TUNE_HIGH) {
2293 pr_debug("tune0: 0x%x\n", cl_dvfs_readl(cld, CL_DVFS_TUNE0));
2297 static int cl_dvfs_simon_grade_notify_cb(struct notifier_block *nb,
2298 unsigned long grade, void *v)
2300 unsigned long flags;
2301 int i, simon_offset;
2302 int curr_domain = (int)((long)v);
2303 struct tegra_cl_dvfs *cld = container_of(
2304 nb, struct tegra_cl_dvfs, simon_grade_nb);
2305 struct dvfs_rail *rail = cld->safe_dvfs->dvfs_rail;
2307 if (!cld->therm_floors_num || (curr_domain != rail->simon_domain))
2310 if (grade >= rail->simon_vmin_offs_num)
2311 grade = rail->simon_vmin_offs_num - 1;
2312 simon_offset = rail->simon_vmin_offsets[grade];
2313 BUG_ON(simon_offset > 0);
2315 clk_lock_save(cld->dfll_clk, &flags);
2317 /* Update tuning based on SiMon grade */
2318 update_simon_tuning(cld, grade);
2320 /* Convert new floors and invalidate minimum rates */
2321 cl_dvfs_convert_cold_output_floor(cld, simon_offset);
2322 for (i = 0; i < cld->therm_floors_num; i++)
2323 cld->dvco_rate_floors[i] = 0;
2325 cl_dvfs_set_dvco_rate_min(cld, &cld->last_req);
2326 cl_dvfs_set_force_out_min(cld);
2327 if (cld->mode == TEGRA_CL_DVFS_CLOSED_LOOP) {
2328 tegra_cl_dvfs_request_rate(cld,
2329 tegra_cl_dvfs_request_get(cld));
2332 clk_unlock_restore(cld->dfll_clk, &flags);
2334 pr_info("tegra_dvfs: set %s simon grade %lu\n", rail->reg_id, grade);
2339 static void tegra_cl_dvfs_register_simon_notifier(struct tegra_cl_dvfs *cld)
2341 struct dvfs_rail *rail = cld->safe_dvfs->dvfs_rail;
2343 /* Stay at default if no simon offsets */
2344 if (!rail->simon_vmin_offsets)
2347 cld->simon_grade_nb.notifier_call = cl_dvfs_simon_grade_notify_cb;
2349 if (tegra_register_simon_notifier(&cld->simon_grade_nb)) {
2350 pr_err("tegra_dvfs: failed to register %s simon notifier\n",
2355 pr_info("tegra_dvfs: registered %s simon notifier\n", rail->reg_id);
2360 * Two mechanisms to build vdd_map dynamically:
2362 * 1. Use regulator interface to match voltage selector to voltage level,
2363 * and platform data coefficients to convert selector to register values.
2364 * Applied when vdd supply with I2C inteface and internal voltage selection
2365 * register is connected.
2367 * 2. Directly map PWM duty cycle selector to voltage level using platform data
2368 * coefficients. Applied when vdd supply driven by PWM data output is connected.
2370 static int build_regulator_vdd_map(struct tegra_cl_dvfs_platform_data *p_data,
2371 struct regulator *reg, struct voltage_reg_map **p_vdd_map)
2375 struct voltage_reg_map *vdd_map;
2380 n = regulator_count_voltages(reg);
2384 vdd_map = kzalloc(sizeof(*vdd_map) * n, GFP_KERNEL);
2388 for (i = 0, sel = 0; sel < n; sel++) {
2389 int v = regulator_list_voltage(reg, sel);
2391 vdd_map[i].reg_uV = v;
2392 vdd_map[i].reg_value = sel * p_data->u.pmu_i2c.sel_mul +
2393 p_data->u.pmu_i2c.sel_offs;
2398 p_data->vdd_map_size = i;
2399 p_data->vdd_map = vdd_map;
2400 *p_vdd_map = vdd_map;
2401 return i ? 0 : -EINVAL;
2404 static int build_direct_vdd_map(struct tegra_cl_dvfs_platform_data *p_data,
2405 struct voltage_reg_map **p_vdd_map)
2408 struct voltage_reg_map *vdd_map =
2409 kzalloc(sizeof(*vdd_map) * MAX_CL_DVFS_VOLTAGES, GFP_KERNEL);
2414 for (i = 0; i < MAX_CL_DVFS_VOLTAGES; i++) {
2415 vdd_map[i].reg_uV = i * p_data->u.pmu_pwm.step_uV +
2416 p_data->u.pmu_pwm.min_uV;
2417 vdd_map[i].reg_value = i;
2420 p_data->vdd_map_size = i;
2421 p_data->vdd_map = vdd_map;
2422 *p_vdd_map = vdd_map;
2426 /* cl_dvfs comaptibility tables */
2427 static struct tegra_cl_dvfs_soc_match_data t132_data = {
2428 .flags = TEGRA_CL_DVFS_DEFER_FORCE_CALIBRATE,
2431 static struct tegra_cl_dvfs_soc_match_data t210_data = {
2432 .flags = TEGRA_CL_DVFS_HAS_IDLE_OVERRIDE |
2433 TEGRA_CL_DVFS_SCALE_IN_OPEN_LOOP,
2436 static struct of_device_id tegra_cl_dvfs_of_match[] = {
2437 { .compatible = "nvidia,tegra114-dfll", },
2438 { .compatible = "nvidia,tegra124-dfll", },
2439 { .compatible = "nvidia,tegra132-dfll", .data = &t132_data, },
2440 { .compatible = "nvidia,tegra148-dfll", },
2441 { .compatible = "nvidia,tegra210-dfll", .data = &t210_data, },
2445 /* cl_dvfs dt parsing */
2448 #define OF_READ_U32_OPT(node, name, var) \
2451 if (!of_property_read_u32((node), #name, &val)) { \
2453 dev_dbg(&pdev->dev, "DT: " #name " = %u\n", val); \
2457 #define OF_READ_U32(node, name, var) \
2460 if (of_property_read_u32((node), #name, &val)) { \
2461 dev_err(&pdev->dev, "missing " #name " in DT data\n"); \
2465 dev_dbg(&pdev->dev, "DT: " #name " = %u\n", val); \
2468 #define OF_GET_GPIO(node, name, pin, flags) \
2470 (pin) = of_get_named_gpio_flags((node), #name, 0, &(flags)); \
2472 dev_err(&pdev->dev, "missing " #name " in DT data\n"); \
2475 dev_dbg(&pdev->dev, "DT: " #name " = %u\n", (pin)); \
2478 #define OF_READ_BOOL(node, name, var) \
2480 (var) = of_property_read_bool((node), #name); \
2481 dev_dbg(&pdev->dev, "DT: " #name " = %s\n", (var) ? "true" : "false"); \
2484 #define TEGRA_DFLL_OF_PWM_PERIOD_CELL 1
2486 static int dt_parse_pwm_regulator(struct platform_device *pdev,
2487 struct device_node *r_dn, struct tegra_cl_dvfs_platform_data *p_data)
2490 int min_uV, max_uV, step_uV, init_uV;
2491 struct of_phandle_args args;
2492 struct platform_device *rdev = of_find_device_by_node(r_dn);
2494 if (of_parse_phandle_with_args(r_dn, "pwms", "#pwm-cells", 0, &args)) {
2495 dev_err(&pdev->dev, "DT: failed to parse pwms property\n");
2498 of_node_put(args.np);
2500 if (args.args_count <= TEGRA_DFLL_OF_PWM_PERIOD_CELL) {
2501 dev_err(&pdev->dev, "DT: low #pwm-cells %d\n", args.args_count);
2505 /* convert pwm period in ns to cl_dvfs pwm clock rate in Hz */
2506 val = args.args[TEGRA_DFLL_OF_PWM_PERIOD_CELL];
2507 val = (NSEC_PER_SEC / val) * (MAX_CL_DVFS_VOLTAGES - 1);
2508 p_data->u.pmu_pwm.pwm_rate = val;
2509 dev_dbg(&pdev->dev, "DT: pwm-rate: %lu\n", val);
2511 /* voltage boundaries and step */
2512 OF_READ_U32(r_dn, regulator-min-microvolt, min_uV);
2513 OF_READ_U32(r_dn, regulator-max-microvolt, max_uV);
2514 OF_READ_U32(r_dn, regulator-init-microvolt, init_uV);
2516 step_uV = (max_uV - min_uV) / (MAX_CL_DVFS_VOLTAGES - 1);
2518 dev_err(&pdev->dev, "DT: invalid pwm step %d\n", step_uV);
2521 if ((max_uV - min_uV) % (MAX_CL_DVFS_VOLTAGES - 1))
2522 dev_warn(&pdev->dev,
2523 "DT: pwm range [%d...%d] is not aligned on %d steps\n",
2524 min_uV, max_uV, MAX_CL_DVFS_VOLTAGES - 1);
2526 p_data->u.pmu_pwm.min_uV = min_uV;
2527 p_data->u.pmu_pwm.step_uV = step_uV;
2528 p_data->u.pmu_pwm.init_uV = init_uV;
2531 * For pwm regulator access from the regulator driver, without
2532 * interference with closed loop operations, cl_dvfs provides
2533 * dfll bypass callbacks in device platform data
2535 if (rdev && rdev->dev.platform_data)
2536 p_data->u.pmu_pwm.dfll_bypass_dev = rdev;
2546 static int dt_parse_pwm_pmic_params(struct platform_device *pdev,
2547 struct device_node *pmic_dn, struct tegra_cl_dvfs_platform_data *p_data)
2550 enum of_gpio_flags f;
2551 bool pwm_1wire_buffer, pwm_1wire_direct, pwm_2wire;
2552 struct device_node *r_dn =
2553 of_parse_phandle(pmic_dn, "pwm-regulator", 0);
2555 /* pwm regulator device */
2557 dev_err(&pdev->dev, "missing DT pwm regulator data\n");
2561 if (dt_parse_pwm_regulator(pdev, r_dn, p_data)) {
2562 dev_err(&pdev->dev, "failed to parse DT pwm regulator\n");
2566 /* pwm config data */
2567 OF_READ_BOOL(pmic_dn, pwm-1wire-buffer, pwm_1wire_buffer);
2568 OF_READ_BOOL(pmic_dn, pwm-1wire-direct, pwm_1wire_direct);
2569 OF_READ_BOOL(pmic_dn, pwm-2wire, pwm_2wire);
2570 if (pwm_1wire_buffer) {
2572 p_data->u.pmu_pwm.pwm_bus = TEGRA_CL_DVFS_PWM_1WIRE_BUFFER;
2574 if (pwm_1wire_direct) {
2576 p_data->u.pmu_pwm.pwm_bus = TEGRA_CL_DVFS_PWM_1WIRE_DIRECT;
2580 p_data->u.pmu_pwm.pwm_bus = TEGRA_CL_DVFS_PWM_2WIRE;
2583 dev_err(&pdev->dev, "%s pwm_bus in DT board data\n",
2584 i ? "inconsistent" : "missing");
2589 OF_GET_GPIO(pmic_dn, pwm-data-gpio, pin, f);
2590 p_data->u.pmu_pwm.pinctrl_dev = pinctrl_get_dev_from_gpio(pin);
2591 if (!p_data->u.pmu_pwm.pinctrl_dev) {
2592 dev_err(&pdev->dev, "No tegra pincontrol driver\n");
2595 p_data->u.pmu_pwm.pwm_pingroup = pinctrl_get_selector_from_gpio(
2596 p_data->u.pmu_pwm.pinctrl_dev, pin);
2597 if (p_data->u.pmu_pwm.pwm_pingroup < 0) {
2598 dev_err(&pdev->dev, "invalid gpio %d\n", pin);
2602 if (pwm_1wire_buffer) {
2603 OF_GET_GPIO(pmic_dn, pwm-buffer-ctrl-gpio, pin, f);
2604 p_data->u.pmu_pwm.out_enable_high = !(f & OF_GPIO_ACTIVE_LOW);
2605 p_data->u.pmu_pwm.out_gpio = pin;
2606 } else if (pwm_2wire) {
2607 OF_GET_GPIO(pmic_dn, pwm-clk-gpio, pin, f);
2608 p_data->u.pmu_pwm.pwm_clk_pingroup =
2609 pinctrl_get_selector_from_gpio(
2610 p_data->u.pmu_pwm.pinctrl_dev, pin);
2611 if (p_data->u.pmu_pwm.pwm_pingroup < 0) {
2612 dev_err(&pdev->dev, "invalid gpio %d\n", pin);
2615 OF_READ_BOOL(pmic_dn, pwm-delta-mode,
2616 p_data->u.pmu_pwm.delta_mode);
2619 of_node_put(pmic_dn);
2623 of_node_put(pmic_dn);
2627 static int dt_parse_i2c_pmic_params(struct platform_device *pdev,
2628 struct device_node *pmic_dn, struct tegra_cl_dvfs_platform_data *p_data)
2630 OF_READ_U32(pmic_dn, pmic-i2c-address, p_data->u.pmu_i2c.slave_addr);
2631 OF_READ_U32(pmic_dn, pmic-i2c-voltage-register, p_data->u.pmu_i2c.reg);
2633 OF_READ_BOOL(pmic_dn, i2c-10-bit-addresses, p_data->u.pmu_i2c.addr_10);
2635 OF_READ_U32(pmic_dn, sel-conversion-slope, p_data->u.pmu_i2c.sel_mul);
2636 OF_READ_U32_OPT(pmic_dn, sel-conversion-offset,
2637 p_data->u.pmu_i2c.sel_offs);
2638 OF_READ_U32_OPT(pmic_dn, pmic-undershoot-gb, p_data->pmu_undershoot_gb);
2640 OF_READ_U32(pmic_dn, i2c-fs-rate, p_data->u.pmu_i2c.fs_rate);
2641 OF_READ_U32_OPT(pmic_dn, i2c-hs-rate, p_data->u.pmu_i2c.hs_rate);
2642 if (p_data->u.pmu_i2c.hs_rate)
2643 OF_READ_U32(pmic_dn, i2c-hs-master-code,
2644 p_data->u.pmu_i2c.hs_master_code);
2646 of_node_put(pmic_dn);
2650 of_node_put(pmic_dn);
2654 static int dt_parse_board_params(struct platform_device *pdev,
2655 struct device_node *b_dn, struct tegra_cl_dvfs_cfg_param *p_cfg)
2658 bool fixed_forcing, auto_forcing, no_forcing;
2660 OF_READ_U32(b_dn, sample-rate, p_cfg->sample_rate);
2661 OF_READ_U32(b_dn, cf, p_cfg->cf);
2662 OF_READ_U32(b_dn, ci, p_cfg->ci);
2663 OF_READ_U32(b_dn, cg, p_cfg->cg);
2664 OF_READ_U32(b_dn, droop-cut-value, p_cfg->droop_cut_value);
2665 OF_READ_U32(b_dn, droop-restore-ramp, p_cfg->droop_restore_ramp);
2666 OF_READ_U32(b_dn, scale-out-ramp, p_cfg->scale_out_ramp);
2668 OF_READ_BOOL(b_dn, cg-scale, p_cfg->cg_scale);
2670 OF_READ_BOOL(b_dn, fixed-output-forcing, fixed_forcing);
2671 OF_READ_BOOL(b_dn, auto-output-forcing, auto_forcing);
2672 OF_READ_BOOL(b_dn, no-output-forcing, no_forcing);
2673 if (fixed_forcing) {
2675 p_cfg->force_mode = TEGRA_CL_DVFS_FORCE_FIXED;
2679 p_cfg->force_mode = TEGRA_CL_DVFS_FORCE_AUTO;
2683 p_cfg->force_mode = TEGRA_CL_DVFS_FORCE_NONE;
2686 dev_err(&pdev->dev, "%s force_mode in DT board data\n",
2687 i ? "inconsistent" : "missing");
2699 static int cl_dvfs_dt_parse_pdata(struct platform_device *pdev,
2700 struct tegra_cl_dvfs_platform_data *p_data)
2704 struct device_node *dn = pdev->dev.of_node;
2705 struct device_node *i2c_dn, *pwm_dn, *b_dn;
2706 const struct of_device_id *match;
2708 ret = of_property_read_string(dn, "out-clock-name",
2709 &p_data->dfll_clk_name);
2711 dev_err(&pdev->dev, "missing target clock name in DT data\n");
2714 dev_dbg(&pdev->dev, "DT: target clock: %s\n", p_data->dfll_clk_name);
2716 match = of_match_node(tegra_cl_dvfs_of_match, dn);
2717 if (match && match->data) {
2718 const struct tegra_cl_dvfs_soc_match_data *data = match->data;
2719 flags |= data->flags;
2722 if (of_find_property(dn, "i2c-quiet-output-workaround", NULL))
2723 flags |= TEGRA_CL_DVFS_FLAGS_I2C_WAIT_QUIET;
2724 if (of_find_property(dn, "monitor-data-new-workaround", NULL))
2725 flags |= TEGRA_CL_DVFS_DATA_NEW_NO_USE;
2726 if (!of_find_property(dn, "dynamic-output-lut-workaround", NULL))
2727 flags |= TEGRA_CL_DVFS_DYN_OUTPUT_CFG; /* inverse polarity */
2728 if (flags & TEGRA_CL_DVFS_HAS_IDLE_OVERRIDE) {
2729 /* Properties below are accepted only with idle override */
2730 if (of_find_property(dn, "defer-force-calibrate", NULL))
2731 flags |= TEGRA_CL_DVFS_DEFER_FORCE_CALIBRATE;
2732 if (of_find_property(dn, "calibrate-force-vmin", NULL))
2733 flags |= TEGRA_CL_DVFS_CALIBRATE_FORCE_VMIN;
2735 p_data->flags = flags;
2736 dev_dbg(&pdev->dev, "DT: flags: 0x%x\n", p_data->flags);
2738 OF_READ_U32_OPT(dn, tune-ramp-delay, p_data->tune_ramp_delay);
2739 OF_READ_U32_OPT(dn, resume-ramp-delay, p_data->resume_ramp_delay);
2741 /* pmic integration */
2742 i2c_dn = of_parse_phandle(dn, "i2c-pmic-integration", 0);
2743 pwm_dn = of_get_child_by_name(dn, "pwm-pmic-integration");
2744 if (!i2c_dn == !pwm_dn) {
2745 of_node_put(i2c_dn);
2746 of_node_put(pwm_dn);
2747 dev_err(&pdev->dev, "%s DT pmic data\n",
2748 i2c_dn ? "inconsistent" : "missing");
2752 ret = i2c_dn ? dt_parse_i2c_pmic_params(pdev, i2c_dn, p_data) :
2753 dt_parse_pwm_pmic_params(pdev, pwm_dn, p_data);
2755 dev_err(&pdev->dev, "failed to parse DT pmic data\n");
2758 p_data->pmu_if = i2c_dn ? TEGRA_CL_DVFS_PMU_I2C : TEGRA_CL_DVFS_PMU_PWM;
2760 /* board configuration parameters */
2761 b_dn = of_parse_phandle(dn, "board-params", 0);
2763 dev_err(&pdev->dev, "missing DT board data\n");
2767 ret = dt_parse_board_params(pdev, b_dn, p_data->cfg_param);
2769 dev_err(&pdev->dev, "failed to parse DT board data\n");
2773 dev_info(&pdev->dev, "DT data retrieved successfully\n");
2777 static void *tegra_cl_dvfs_dt_parse_pdata(struct platform_device *pdev)
2783 static int __init tegra_cl_dvfs_probe(struct platform_device *pdev)
2786 struct tegra_cl_dvfs_platform_data *p_data;
2787 struct resource *res, *res_i2c = NULL;
2788 struct tegra_cl_dvfs_cfg_param *p_cfg = NULL;
2789 struct voltage_reg_map *p_vdd_map = NULL;
2790 struct tegra_cl_dvfs *cld = NULL;
2791 struct clk *ref_clk, *soc_clk, *i2c_clk, *safe_dvfs_clk, *dfll_clk;
2794 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2796 dev_err(&pdev->dev, "missing register base\n");
2799 dev_dbg(&pdev->dev, "DFLL MMIO [0x%lx ... 0x%lx]\n",
2800 (unsigned long)res->start, (unsigned long)res->end);
2802 if (pdev->num_resources > 1) {
2803 res_i2c = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2805 dev_err(&pdev->dev, "missing i2c register base\n");
2808 dev_dbg(&pdev->dev, "DFLL I2C MMIO [0x%lx ... 0x%lx]\n",
2809 (unsigned long)res_i2c->start,
2810 (unsigned long)res_i2c->end);
2813 p_data = pdev->dev.platform_data;
2815 p_data = kzalloc(sizeof(*p_data), GFP_KERNEL);
2817 dev_err(&pdev->dev, "failed to allocate p_data\n");
2821 p_cfg = kzalloc(sizeof(*p_cfg), GFP_KERNEL);
2823 dev_err(&pdev->dev, "failed to allocate p_cfg\n");
2828 p_data->cfg_param = p_cfg;
2829 ret = cl_dvfs_dt_parse_pdata(pdev, p_data);
2831 dev_err(&pdev->dev, "failed to parse DT p_data\n");
2834 } else if (!p_data->cfg_param) {
2835 dev_err(&pdev->dev, "missing platform data\n");
2840 ref_clk = clk_get(&pdev->dev, "ref");
2841 soc_clk = clk_get(&pdev->dev, "soc");
2842 i2c_clk = clk_get(&pdev->dev, "i2c");
2843 safe_dvfs_clk = clk_get(&pdev->dev, "safe_dvfs");
2844 dfll_clk = clk_get(&pdev->dev, p_data->dfll_clk_name);
2845 if (IS_ERR(ref_clk) || IS_ERR(soc_clk) || IS_ERR(i2c_clk)) {
2846 dev_err(&pdev->dev, "missing control clock\n");
2850 if (IS_ERR(safe_dvfs_clk)) {
2851 dev_err(&pdev->dev, "missing safe dvfs source clock\n");
2852 ret = PTR_ERR(safe_dvfs_clk);
2855 if (IS_ERR(dfll_clk)) {
2856 dev_err(&pdev->dev, "missing target dfll clock\n");
2857 ret = PTR_ERR(dfll_clk);
2860 if (!safe_dvfs_clk->dvfs || !safe_dvfs_clk->dvfs->dvfs_rail) {
2861 dev_err(&pdev->dev, "invalid safe dvfs source\n");
2866 /* Build vdd_map if not specified by platform data */
2867 if (!p_data->vdd_map || !p_data->vdd_map_size) {
2868 struct regulator *reg = safe_dvfs_clk->dvfs->dvfs_rail->reg;
2869 if (p_data->pmu_if == TEGRA_CL_DVFS_PMU_PWM)
2870 ret = build_direct_vdd_map(p_data, &p_vdd_map);
2872 ret = build_regulator_vdd_map(p_data, reg, &p_vdd_map);
2875 dev_err(&pdev->dev, "missing vdd_map (%d)\n", ret);
2880 /* Allocate cl_dvfs object and populate resource accessors */
2881 cld = kzalloc(sizeof(*cld), GFP_KERNEL);
2883 dev_err(&pdev->dev, "failed to allocate cl_dvfs object\n");
2888 cld->cl_base = IO_ADDRESS(res->start);
2889 cld->cl_i2c_base = res_i2c ? IO_ADDRESS(res_i2c->start) : cld->cl_base;
2890 cld->p_data = p_data;
2891 cld->ref_clk = ref_clk;
2892 cld->soc_clk = soc_clk;
2893 cld->i2c_clk = i2c_clk;
2894 cld->dfll_clk = dfll_clk;
2895 cld->safe_dvfs = safe_dvfs_clk->dvfs;
2896 #ifdef CONFIG_THERMAL
2897 INIT_WORK(&cld->init_cdev_work, tegra_cl_dvfs_init_cdev);
2899 /* Initialize cl_dvfs */
2900 ret = cl_dvfs_init(cld);
2904 /* From now on probe would not fail */
2905 platform_set_drvdata(pdev, cld);
2908 * I2C interface mux is embedded into cl_dvfs h/w, so the attached
2909 * regulator can be accessed by s/w independently. PWM interface,
2910 * on the other hand, is accessible solely through cl_dvfs registers.
2911 * Hence, bypass device is supported in PWM mode only.
2913 if ((p_data->pmu_if == TEGRA_CL_DVFS_PMU_PWM) &&
2914 p_data->u.pmu_pwm.dfll_bypass_dev) {
2915 clk_enable(cld->soc_clk);
2916 cl_dvfs_init_pwm_bypass(cld, p_data->u.pmu_pwm.dfll_bypass_dev);
2919 /* Register SiMon notifier */
2920 tegra_cl_dvfs_register_simon_notifier(cld);
2923 * Schedule cooling device registration as a separate work to address
2924 * the following race: when cl_dvfs is probed the DFLL child clock
2925 * (e.g., CPU) cannot be changed; on the other hand cooling device
2926 * registration will update the entire thermal zone, and may trigger
2927 * rate change of the target clock
2929 if (cld->safe_dvfs->dvfs_rail->vmin_cdev ||
2930 cld->safe_dvfs->dvfs_rail->vmax_cdev)
2931 schedule_work(&cld->init_cdev_work);
2935 if (p_data && p_vdd_map)
2936 p_data->vdd_map = NULL;
2939 if (!pdev->dev.platform_data) {
2946 static struct platform_driver tegra_cl_dvfs_driver = {
2948 .name = "tegra_cl_dvfs",
2949 .owner = THIS_MODULE,
2950 .of_match_table = tegra_cl_dvfs_of_match,
2951 #ifdef CONFIG_PM_SLEEP
2952 .pm = &tegra_cl_dvfs_pm_ops,
2957 int __init tegra_init_cl_dvfs(void)
2959 return platform_driver_probe(&tegra_cl_dvfs_driver,
2960 tegra_cl_dvfs_probe);
2966 * - DISABLED: control logic mode - DISABLED, output interface disabled,
2968 * - OPEN_LOOP: control logic mode - OPEN_LOOP, output interface disabled,
2969 * dfll is running "unlocked"
2970 * - CLOSED_LOOP: control logic mode - CLOSED_LOOP, output interface enabled,
2971 * dfll is running "locked"
2974 /* Switch from any other state to DISABLED state */
2975 void tegra_cl_dvfs_disable(struct tegra_cl_dvfs *cld)
2977 switch (cld->mode) {
2978 case TEGRA_CL_DVFS_CLOSED_LOOP:
2979 WARN(1, "DFLL is disabled directly from closed loop mode\n");
2981 output_disable_ol_prepare(cld);
2982 set_mode(cld, TEGRA_CL_DVFS_DISABLED);
2983 output_disable_post_ol(cld);
2984 invalidate_request(cld);
2985 cl_dvfs_disable_clocks(cld);
2988 case TEGRA_CL_DVFS_OPEN_LOOP:
2989 set_mode(cld, TEGRA_CL_DVFS_DISABLED);
2990 invalidate_request(cld);
2991 cl_dvfs_disable_clocks(cld);
2995 BUG_ON(cld->mode > TEGRA_CL_DVFS_CLOSED_LOOP);
3000 /* Switch from DISABLE state to OPEN_LOOP state */
3001 int tegra_cl_dvfs_enable(struct tegra_cl_dvfs *cld)
3003 if (cld->mode == TEGRA_CL_DVFS_UNINITIALIZED) {
3004 pr_err("%s: Cannot enable DFLL in %s mode\n",
3005 __func__, mode_name[cld->mode]);
3009 if (cld->mode != TEGRA_CL_DVFS_DISABLED)
3012 cl_dvfs_enable_clocks(cld);
3013 if (cld->p_data->flags & TEGRA_CL_DVFS_SCALE_IN_OPEN_LOOP)
3014 set_request_scale(cld, cld->last_req.scale);
3015 set_mode(cld, TEGRA_CL_DVFS_OPEN_LOOP);
3020 /* Switch from OPEN_LOOP state to CLOSED_LOOP state */
3021 int tegra_cl_dvfs_lock(struct tegra_cl_dvfs *cld)
3023 struct dfll_rate_req *req = &cld->last_req;
3025 switch (cld->mode) {
3026 case TEGRA_CL_DVFS_CLOSED_LOOP:
3029 case TEGRA_CL_DVFS_OPEN_LOOP:
3030 if (req->freq == 0) {
3031 pr_err("%s: Cannot lock DFLL at rate 0\n", __func__);
3036 * Update control logic setting with last rate request;
3037 * sync output limits with current tuning and thermal state,
3038 * enable output and switch to closed loop mode. Make sure
3039 * forced output does not interfere with closed loop.
3041 set_cl_config(cld, req);
3043 set_mode(cld, TEGRA_CL_DVFS_CLOSED_LOOP);
3044 set_request(cld, req);
3045 calibration_timer_update(cld);
3049 BUG_ON(cld->mode > TEGRA_CL_DVFS_CLOSED_LOOP);
3050 pr_err("%s: Cannot lock DFLL in %s mode\n",
3051 __func__, mode_name[cld->mode]);
3056 /* Switch from CLOSED_LOOP state to OPEN_LOOP state */
3057 int tegra_cl_dvfs_unlock(struct tegra_cl_dvfs *cld)
3062 switch (cld->mode) {
3063 case TEGRA_CL_DVFS_CLOSED_LOOP:
3065 in_range = is_vmin_delivered(cld);
3067 /* allow grace 2 sample periods to get in range */
3069 udelay(2 * GET_SAMPLE_PERIOD(cld));
3071 ret = output_disable_ol_prepare(cld);
3072 set_mode(cld, TEGRA_CL_DVFS_OPEN_LOOP);
3074 ret = output_disable_post_ol(cld);
3076 if (!ret && !in_range && !is_vmin_delivered(cld)) {
3077 pr_err("cl_dvfs: exiting closed loop out of range\n");
3082 case TEGRA_CL_DVFS_OPEN_LOOP:
3086 BUG_ON(cld->mode > TEGRA_CL_DVFS_CLOSED_LOOP);
3087 pr_err("%s: Cannot unlock DFLL in %s mode\n",
3088 __func__, mode_name[cld->mode]);
3094 * Convert requested rate into the control logic settings. In CLOSED_LOOP mode,
3095 * update new settings immediately to adjust DFLL output rate accordingly.
3096 * Otherwise, just save them until next switch to closed loop.
3098 int tegra_cl_dvfs_request_rate(struct tegra_cl_dvfs *cld, unsigned long rate)
3101 bool dvco_min_crossed, dvco_min_updated;
3102 struct dfll_rate_req req;
3105 if (cld->mode == TEGRA_CL_DVFS_UNINITIALIZED) {
3106 pr_err("%s: Cannot set DFLL rate in %s mode\n",
3107 __func__, mode_name[cld->mode]);
3111 /* Calibrate dfll minimum rate */
3112 cl_dvfs_calibrate(cld);
3114 /* Update minimum dvco rate if we are crossing tuning threshold */
3115 dvco_min_updated = cl_tune_target(cld, rate) !=
3116 cl_tune_target(cld, cld->last_req.rate);
3117 if (dvco_min_updated)
3118 cl_dvfs_set_dvco_rate_min(cld, &req);
3120 /* Determine DFLL output scale */
3121 req.scale = SCALE_MAX - 1;
3122 if (rate < cld->dvco_rate_min) {
3123 int scale = DIV_ROUND_CLOSEST((rate / 1000 * SCALE_MAX),
3124 (cld->dvco_rate_min / 1000));
3126 pr_err("%s: Rate %lu is below scalable range\n",
3130 req.scale = scale - 1;
3131 rate = cld->dvco_rate_min;
3133 dvco_min_crossed = (rate == cld->dvco_rate_min) &&
3134 (cld->last_req.rate > cld->dvco_rate_min);
3136 /* Convert requested rate into frequency request and scale settings */
3137 val = GET_REQUEST_FREQ(rate, cld->ref_rate);
3138 if (val > FREQ_MAX) {
3139 pr_err("%s: Rate %lu is above dfll range\n", __func__, rate);
3143 rate = GET_REQUEST_RATE(val, cld->ref_rate);
3145 /* Find safe voltage for requested rate */
3146 if (find_safe_output(cld, rate, &req.output)) {
3147 pr_err("%s: Failed to find safe output for rate %lu\n",
3151 req.cap = req.output;
3154 * Save validated request, and in CLOSED_LOOP mode actually update
3155 * control logic settings; use request output to set maximum voltage
3156 * limit, but keep one LUT step room above safe voltage
3158 cld->last_req = req;
3160 if (cld->mode == TEGRA_CL_DVFS_CLOSED_LOOP) {
3161 set_cl_config(cld, &cld->last_req);
3162 set_request(cld, &cld->last_req);
3163 if (dvco_min_crossed || dvco_min_updated)
3164 calibration_timer_update(cld);
3165 } else if ((cld->mode == TEGRA_CL_DVFS_OPEN_LOOP) &&
3166 (cld->p_data->flags & TEGRA_CL_DVFS_SCALE_IN_OPEN_LOOP)) {
3167 set_request_scale(cld, req.scale);
3172 /* Restore dvco rate minimum */
3173 if (dvco_min_updated)
3174 cl_dvfs_set_dvco_rate_min(cld, &cld->last_req);
3179 unsigned long tegra_cl_dvfs_request_get(struct tegra_cl_dvfs *cld)
3181 struct dfll_rate_req *req = &cld->last_req;
3184 * If running below dvco minimum rate with skipper resolution:
3185 * dvco min rate / 256 - return last requested rate rounded to 1kHz.
3186 * If running above dvco minimum, with closed loop resolution:
3187 * ref rate / 2 - return cl_dvfs target rate.
3189 if ((req->scale + 1) < SCALE_MAX)
3190 return req->rate / 1000 * 1000;
3192 return GET_REQUEST_RATE(req->freq, cld->ref_rate);
3196 * Compare actually set (last delivered) and required Vmin. These levels may
3197 * be different if temperature or SiMon grade changes while cl-dvfs output
3198 * interface is disabled, and new required setting is not delivered to PMIC.
3199 * It actually may happen while cl_dvfs is disabled, or during transition
3200 * to/from disabled state.
3203 * 0 if levels are equal,
3204 * +1 if last Vmin is above required,
3205 * -1 if last Vmin is below required.
3207 int tegra_dvfs_cmp_dfll_vmin_tfloor(struct clk *dfll_clk, int *tfloor)
3210 unsigned long flags;
3211 u8 needed_out_min, last_out_min;
3212 struct tegra_cl_dvfs *cld;
3217 cld = tegra_dfll_get_cl_dvfs_data(dfll_clk);
3219 return PTR_ERR(cld);
3221 clk_lock_save(dfll_clk, &flags);
3222 needed_out_min = get_output_min(cld);
3223 last_out_min = cld->lut_min;
3225 if (last_out_min > needed_out_min)
3227 else if (last_out_min < needed_out_min)
3231 *tfloor = get_mv(cld, needed_out_min);
3233 clk_unlock_restore(dfll_clk, &flags);
3238 * Voltage clamping interface: set maximum and minimum voltage limits at the
3239 * same lowest safe (for current temperature and tuning range) level. Allows
3240 * temporary fix output voltage in closed loop mode. Clock rate target in this
3241 * state is ignored, DFLL rate is just determined by the fixed limits. Clamping
3242 * request is rejected if limits are already clamped, or DFLL is not in closed
3245 * This interface is tailored for fixing voltage during SiMon grading; no other
3246 * s/w should use it.
3248 * Return: fixed positive voltage if clamping request was successful, or
3249 * 0 if un-clamping request was successful, or -EPERM if request is rejected.
3252 int tegra_dvfs_clamp_dfll_at_vmin(struct clk *dfll_clk, bool clamp)
3254 struct tegra_cl_dvfs *cld;
3255 unsigned long flags;
3261 cld = tegra_dfll_get_cl_dvfs_data(dfll_clk);
3263 return PTR_ERR(cld);
3265 clk_lock_save(dfll_clk, &flags);
3266 if (cld->mode == TEGRA_CL_DVFS_CLOSED_LOOP) {
3267 if (clamp && !cld->v_limits.clamped) {
3268 u8 out_min = max(cld->lut_min, cld->force_out_min);
3269 set_output_limits(cld, out_min, out_min);
3270 cld->v_limits.clamped = true;
3271 ret = cld->v_limits.vmin;
3272 } else if (!clamp) {
3273 if (cld->v_limits.clamped) {
3274 cld->v_limits.clamped = false;
3275 set_cl_config(cld, &cld->last_req);
3276 set_request(cld, &cld->last_req);
3281 clk_unlock_restore(dfll_clk, &flags);
3284 EXPORT_SYMBOL(tegra_dvfs_clamp_dfll_at_vmin);
3287 * Get the new Vmin setting from external rail that is connected to same CPU
3290 int tegra_dvfs_set_rail_relations_dfll_vmin(struct clk *dfll_clk,
3291 int rail_relations_vmin)
3293 struct tegra_cl_dvfs *cld;
3294 unsigned long flags;
3295 u8 rail_relations_out_min;
3300 /* get handle to cl_dvfs from dfll_clk */
3301 cld = tegra_dfll_get_cl_dvfs_data(dfll_clk);
3303 return PTR_ERR(cld);
3305 clk_lock_save(cld->dfll_clk, &flags);
3307 /* convert mv to output value of cl_dvfs */
3308 rail_relations_out_min = find_mv_out_cap(cld, rail_relations_vmin);
3310 if (cld->rail_relations_out_min != rail_relations_out_min) {
3311 cld->rail_relations_out_min = rail_relations_out_min;
3312 if (cld->mode == TEGRA_CL_DVFS_CLOSED_LOOP) {
3313 tegra_cl_dvfs_request_rate(cld,
3314 tegra_cl_dvfs_request_get(cld));
3315 /* Delay to make sure new Vmin delivery started */
3316 udelay(2 * GET_SAMPLE_PERIOD(cld));
3319 clk_unlock_restore(cld->dfll_clk, &flags);
3323 #ifdef CONFIG_DEBUG_FS
3325 static int lock_get(void *data, u64 *val)
3327 struct tegra_cl_dvfs *cld = ((struct clk *)data)->u.dfll.cl_dvfs;
3328 *val = cld->mode == TEGRA_CL_DVFS_CLOSED_LOOP;
3331 static int lock_set(void *data, u64 val)
3333 struct clk *c = (struct clk *)data;
3334 return tegra_clk_cfg_ex(c, TEGRA_CLK_DFLL_LOCK, val);
3336 DEFINE_SIMPLE_ATTRIBUTE(lock_fops, lock_get, lock_set, "%llu\n");
3338 static int flags_get(void *data, u64 *val)
3340 struct tegra_cl_dvfs *cld = ((struct clk *)data)->u.dfll.cl_dvfs;
3341 *val = cld->p_data->flags;
3344 DEFINE_SIMPLE_ATTRIBUTE(flags_fops, flags_get, NULL, "0x%llx\n");
3346 static int monitor_get(void *data, u64 *val)
3349 unsigned long flags;
3350 struct clk *c = (struct clk *)data;
3351 struct tegra_cl_dvfs *cld = ((struct clk *)data)->u.dfll.cl_dvfs;
3353 clk_enable(cld->soc_clk);
3354 clk_lock_save(c, &flags);
3356 switch_monitor(cld, CL_DVFS_MONITOR_CTRL_FREQ);
3357 wait_data_new(cld, &v);
3358 filter_monitor_data(cld, &v); /* ignore error, use "some value" */
3360 v = GET_MONITORED_RATE(v, cld->ref_rate);
3361 s = cl_dvfs_readl(cld, CL_DVFS_FREQ_REQ);
3362 s = (s & CL_DVFS_FREQ_REQ_SCALE_MASK) >> CL_DVFS_FREQ_REQ_SCALE_SHIFT;
3363 *val = (u64)v * (s + 1) / 256;
3365 clk_unlock_restore(c, &flags);
3366 clk_disable(cld->soc_clk);
3369 DEFINE_SIMPLE_ATTRIBUTE(monitor_fops, monitor_get, NULL, "%llu\n");
3371 static int output_get(void *data, u64 *val)
3374 unsigned long flags;
3375 struct clk *c = (struct clk *)data;
3376 struct tegra_cl_dvfs *cld = ((struct clk *)data)->u.dfll.cl_dvfs;
3378 clk_enable(cld->soc_clk);
3379 clk_lock_save(c, &flags);
3381 v = cl_dvfs_get_output(cld);
3382 if (IS_ERR_VALUE(v))
3383 v = get_last_output(cld); /* ignore error, use "some value" */
3384 *val = get_mv(cld, v);
3386 clk_unlock_restore(c, &flags);
3387 clk_disable(cld->soc_clk);
3390 DEFINE_SIMPLE_ATTRIBUTE(output_fops, output_get, NULL, "%llu\n");
3392 static int vmax_get(void *data, u64 *val)
3394 struct tegra_cl_dvfs *cld = ((struct clk *)data)->u.dfll.cl_dvfs;
3395 *val = cld->v_limits.vmax;
3398 DEFINE_SIMPLE_ATTRIBUTE(vmax_fops, vmax_get, NULL, "%llu\n");
3400 static int vmin_get(void *data, u64 *val)
3402 struct tegra_cl_dvfs *cld = ((struct clk *)data)->u.dfll.cl_dvfs;
3403 *val = cld->v_limits.vmin;
3406 DEFINE_SIMPLE_ATTRIBUTE(vmin_fops, vmin_get, NULL, "%llu\n");
3408 static int tune_high_mv_get(void *data, u64 *val)
3410 struct tegra_cl_dvfs *cld = ((struct clk *)data)->u.dfll.cl_dvfs;
3411 *val = cld->safe_dvfs->dfll_data.tune_high_min_millivolts;
3414 static int tune_high_mv_set(void *data, u64 val)
3416 unsigned long flags;
3417 struct clk *c = (struct clk *)data;
3418 struct tegra_cl_dvfs *cld = c->u.dfll.cl_dvfs;
3420 clk_lock_save(c, &flags);
3422 cld->safe_dvfs->dfll_data.tune_high_min_millivolts = val;
3423 cl_dvfs_init_output_thresholds(cld);
3424 if (cld->mode == TEGRA_CL_DVFS_CLOSED_LOOP) {
3425 tegra_cl_dvfs_request_rate(cld,
3426 tegra_cl_dvfs_request_get(cld));
3429 clk_unlock_restore(c, &flags);
3432 DEFINE_SIMPLE_ATTRIBUTE(tune_high_mv_fops, tune_high_mv_get, tune_high_mv_set,
3435 static int fout_mv_get(void *data, u64 *val)
3438 struct tegra_cl_dvfs *cld = ((struct clk *)data)->u.dfll.cl_dvfs;
3439 v = cl_dvfs_readl(cld, CL_DVFS_OUTPUT_FORCE) & OUT_MASK;
3440 *val = cld->p_data->vdd_map[v].reg_uV / 1000;
3443 static int fout_mv_set(void *data, u64 val)
3446 unsigned long flags;
3447 struct clk *c = (struct clk *)data;
3448 struct tegra_cl_dvfs *cld = c->u.dfll.cl_dvfs;
3450 clk_enable(cld->soc_clk);
3451 clk_lock_save(c, &flags);
3454 u8 out_v = is_i2c(cld) ? find_mv_out_cap(cld, (int)val) :
3455 find_vdd_map_entry(cld, (int)val, false)->reg_value;
3456 v = output_force_set_val(cld, out_v);
3457 if (!(v & CL_DVFS_OUTPUT_FORCE_ENABLE))
3458 output_force_enable(cld, v);
3460 output_force_disable(cld);
3463 clk_unlock_restore(c, &flags);
3464 clk_disable(cld->soc_clk);
3467 DEFINE_SIMPLE_ATTRIBUTE(fout_mv_fops, fout_mv_get, fout_mv_set, "%llu\n");
3469 static int fmin_get(void *data, u64 *val)
3471 struct tegra_cl_dvfs *cld = ((struct clk *)data)->u.dfll.cl_dvfs;
3472 *val = cld->dvco_rate_min;
3475 DEFINE_SIMPLE_ATTRIBUTE(dvco_rate_min_fops, fmin_get, NULL, "%llu\n");
3477 static int calibr_delay_get(void *data, u64 *val)
3479 struct tegra_cl_dvfs *cld = ((struct clk *)data)->u.dfll.cl_dvfs;
3480 *val = jiffies_to_msecs(cld->calibration_delay);
3483 static int calibr_delay_set(void *data, u64 val)
3485 unsigned long flags;
3486 struct clk *c = (struct clk *)data;
3487 struct tegra_cl_dvfs *cld = c->u.dfll.cl_dvfs;
3489 clk_lock_save(c, &flags);
3490 cld->calibration_delay = msecs_to_jiffies(val);
3491 clk_unlock_restore(c, &flags);
3494 DEFINE_SIMPLE_ATTRIBUTE(calibr_delay_fops, calibr_delay_get, calibr_delay_set,
3497 static int undershoot_get(void *data, u64 *val)
3499 struct tegra_cl_dvfs *cld = ((struct clk *)data)->u.dfll.cl_dvfs;
3500 *val = cld->p_data->pmu_undershoot_gb;
3503 static int undershoot_set(void *data, u64 val)
3505 unsigned long flags;
3506 struct clk *c = (struct clk *)data;
3507 struct tegra_cl_dvfs *cld = c->u.dfll.cl_dvfs;
3509 clk_lock_save(c, &flags);
3510 cld->p_data->pmu_undershoot_gb = val;
3511 cl_dvfs_set_force_out_min(cld);
3512 clk_unlock_restore(c, &flags);
3515 DEFINE_SIMPLE_ATTRIBUTE(undershoot_fops, undershoot_get, undershoot_set,
3518 static int clamp_get(void *data, u64 *val)
3520 struct tegra_cl_dvfs *cld = ((struct clk *)data)->u.dfll.cl_dvfs;
3521 *val = cld->v_limits.clamped ? cld->v_limits.vmin : 0;
3524 static int clamp_set(void *data, u64 val)
3526 struct clk *dfll_clk = data;
3527 int ret = tegra_dvfs_clamp_dfll_at_vmin(dfll_clk, val);
3528 return ret < 0 ? ret : 0;
3530 DEFINE_SIMPLE_ATTRIBUTE(clamp_fops, clamp_get, clamp_set, "%llu\n");
3532 static int cl_profiles_show(struct seq_file *s, void *data)
3537 struct clk *c = s->private;
3538 struct tegra_cl_dvfs *cld = c->u.dfll.cl_dvfs;
3540 seq_printf(s, "THERM CAPS:%s\n", cld->therm_caps_num ? "" : " NONE");
3541 for (i = 0; i < cld->therm_caps_num; i++) {
3542 v = cld->thermal_out_caps[i];
3543 trips = cld->safe_dvfs->dvfs_rail->vmax_cdev->trip_temperatures;
3544 seq_printf(s, "%3dC.. %5dmV\n", trips[i], get_mv(cld, v));
3547 if (cld->tune_high_target_rate_min == ULONG_MAX) {
3548 seq_puts(s, "TUNE HIGH: NONE\n");
3550 seq_puts(s, "TUNE HIGH:\n");
3551 seq_printf(s, "min %5dmV%9lukHz\n",
3552 get_mv(cld, cld->tune_high_out_min),
3553 cld->tune_high_dvco_rate_min / 1000);
3554 seq_printf(s, "%-14s%9lukHz\n", "rate threshold",
3555 cld->tune_high_target_rate_min / 1000);
3558 seq_printf(s, "THERM FLOORS:%s\n", cld->therm_floors_num ? "" : " NONE");
3559 for (i = 0; i < cld->therm_floors_num; i++) {
3560 v = cld->thermal_out_floors[i];
3561 r = cld->dvco_rate_floors[i];
3562 trips = cld->safe_dvfs->dvfs_rail->vmin_cdev->trip_temperatures;
3563 seq_printf(s, " ..%3dC%5dmV%9lukHz%s\n",
3564 trips[i], get_mv(cld, v),
3565 (r ? : get_dvco_rate_below(cld, v)) / 1000,
3566 r ? " (calibrated)" : "");
3568 r = cld->dvco_rate_floors[i];
3569 seq_printf(s, " vmin:%5dmV%9lukHz%s\n", cld->out_map[0]->reg_uV / 1000,
3570 (r ? : cld->safe_dvfs->dfll_data.out_rate_min) / 1000,
3571 r ? " (calibrated)" : "");
3576 static int cl_profiles_open(struct inode *inode, struct file *file)
3578 return single_open(file, cl_profiles_show, inode->i_private);
3581 static const struct file_operations cl_profiles_fops = {
3582 .open = cl_profiles_open,
3584 .llseek = seq_lseek,
3585 .release = single_release,
3588 static int cl_register_show(struct seq_file *s, void *data)
3591 struct clk *c = s->private;
3592 struct tegra_cl_dvfs *cld = c->u.dfll.cl_dvfs;
3594 clk_enable(cld->soc_clk);
3596 seq_printf(s, "CONTROL REGISTERS:\n");
3597 for (offs = 0; offs <= CL_DVFS_MONITOR_DATA; offs += 4)
3598 seq_printf(s, "[0x%02x] = 0x%08x\n",
3599 offs, cl_dvfs_readl(cld, offs));
3601 seq_printf(s, "\nI2C and INTR REGISTERS:\n");
3602 for (offs = CL_DVFS_I2C_CFG; offs <= CL_DVFS_I2C_STS; offs += 4)
3603 seq_printf(s, "[0x%02x] = 0x%08x\n",
3604 offs, cl_dvfs_readl(cld, offs));
3606 offs = CL_DVFS_INTR_STS;
3607 seq_printf(s, "[0x%02x] = 0x%08x\n", offs, cl_dvfs_readl(cld, offs));
3608 offs = CL_DVFS_INTR_EN;
3609 seq_printf(s, "[0x%02x] = 0x%08x\n", offs, cl_dvfs_readl(cld, offs));
3611 if (cld->p_data->flags & TEGRA_CL_DVFS_HAS_IDLE_OVERRIDE) {
3612 seq_printf(s, "\nOVERRIDE REGISTERS:\n");
3613 offs = CL_DVFS_CC4_HVC;
3614 seq_printf(s, "[0x%02x] = 0x%08x\n", offs,
3615 cl_dvfs_readl(cld, offs));
3618 seq_printf(s, "\nLUT:\n");
3619 for (offs = CL_DVFS_OUTPUT_LUT;
3620 offs < CL_DVFS_OUTPUT_LUT + 4 * MAX_CL_DVFS_VOLTAGES;
3622 seq_printf(s, "[0x%02x] = 0x%08x\n",
3623 offs, cl_dvfs_readl(cld, offs));
3625 clk_disable(cld->soc_clk);
3629 static int cl_register_open(struct inode *inode, struct file *file)
3631 return single_open(file, cl_register_show, inode->i_private);
3634 static ssize_t cl_register_write(struct file *file,
3635 const char __user *userbuf, size_t count, loff_t *ppos)
3640 struct clk *c = file->f_path.dentry->d_inode->i_private;
3641 struct tegra_cl_dvfs *cld = c->u.dfll.cl_dvfs;
3643 if (sizeof(buf) <= count)
3646 if (copy_from_user(buf, userbuf, count))
3649 /* terminate buffer and trim - white spaces may be appended
3650 * at the end when invoked from shell command line */
3654 if (sscanf(buf, "[0x%x] = 0x%x", &offs, &val) != 2)
3657 if (offs >= CL_DVFS_APERTURE)
3660 clk_enable(cld->soc_clk);
3661 cl_dvfs_writel(cld, val, offs & (~0x3));
3662 clk_disable(cld->soc_clk);
3666 static const struct file_operations cl_register_fops = {
3667 .open = cl_register_open,
3669 .write = cl_register_write,
3670 .llseek = seq_lseek,
3671 .release = single_release,
3674 int __init tegra_cl_dvfs_debug_init(struct clk *dfll_clk)
3676 struct dentry *cl_dvfs_dentry;
3678 if (!dfll_clk || !dfll_clk->dent || (dfll_clk->state == UNINITIALIZED))
3681 if (!debugfs_create_file("lock", S_IRUGO | S_IWUSR,
3682 dfll_clk->dent, dfll_clk, &lock_fops))
3685 cl_dvfs_dentry = debugfs_create_dir("cl_dvfs", dfll_clk->dent);
3686 if (!cl_dvfs_dentry)
3689 if (!debugfs_create_file("flags", S_IRUGO,
3690 cl_dvfs_dentry, dfll_clk, &flags_fops))
3693 if (!debugfs_create_file("monitor", S_IRUGO,
3694 cl_dvfs_dentry, dfll_clk, &monitor_fops))
3697 if (!debugfs_create_file("output_mv", S_IRUGO,
3698 cl_dvfs_dentry, dfll_clk, &output_fops))
3701 if (!debugfs_create_file("vmax_mv", S_IRUGO,
3702 cl_dvfs_dentry, dfll_clk, &vmax_fops))
3705 if (!debugfs_create_file("vmin_mv", S_IRUGO,
3706 cl_dvfs_dentry, dfll_clk, &vmin_fops))
3709 if (!debugfs_create_file("tune_high_mv", S_IRUGO | S_IWUSR,
3710 cl_dvfs_dentry, dfll_clk, &tune_high_mv_fops))
3713 if (!debugfs_create_file("force_out_mv", S_IRUGO,
3714 cl_dvfs_dentry, dfll_clk, &fout_mv_fops))
3717 if (!debugfs_create_file("dvco_min", S_IRUGO,
3718 cl_dvfs_dentry, dfll_clk, &dvco_rate_min_fops))
3721 if (!debugfs_create_file("calibr_delay", S_IRUGO,
3722 cl_dvfs_dentry, dfll_clk, &calibr_delay_fops))
3725 if (!debugfs_create_file("pmu_undershoot_gb", S_IRUGO,
3726 cl_dvfs_dentry, dfll_clk, &undershoot_fops))
3729 if (!debugfs_create_file("clamp_at_min", S_IRUGO | S_IWUSR,
3730 cl_dvfs_dentry, dfll_clk, &clamp_fops))
3733 if (!debugfs_create_file("profiles", S_IRUGO,
3734 cl_dvfs_dentry, dfll_clk, &cl_profiles_fops))
3737 if (!debugfs_create_file("registers", S_IRUGO | S_IWUSR,
3738 cl_dvfs_dentry, dfll_clk, &cl_register_fops))
3744 debugfs_remove_recursive(dfll_clk->dent);