]> rtime.felk.cvut.cz Git - sojka/nv-tegra/linux-3.10.git/blob - drivers/platform/tegra/tegra_cl_dvfs.c
dvfs: tegra: Validate CLDVFS register address
[sojka/nv-tegra/linux-3.10.git] / drivers / platform / tegra / tegra_cl_dvfs.c
1 /*
2  * drivers/platform/tegra/tegra_cl_dvfs.c
3  *
4  * Copyright (c) 2012-2015 NVIDIA Corporation. All rights reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18
19 #include <linux/kernel.h>
20 #include <linux/spinlock.h>
21 #include <linux/delay.h>
22 #include <linux/err.h>
23 #include <linux/io.h>
24 #include <linux/clk.h>
25 #include <linux/interrupt.h>
26 #include <linux/suspend.h>
27 #include <linux/debugfs.h>
28 #include <linux/seq_file.h>
29 #include <linux/uaccess.h>
30 #include <linux/module.h>
31 #include <linux/platform_device.h>
32 #include <linux/of_platform.h>
33 #include <linux/gpio.h>
34 #include <linux/of_gpio.h>
35 #include <linux/regulator/consumer.h>
36 #include <linux/regulator/tegra-dfll-bypass-regulator.h>
37 #include <linux/tegra-soc.h>
38 #include <linux/pinctrl/pinconf-tegra.h>
39
40 #include <mach/irqs.h>
41
42 #include <linux/platform/tegra/tegra_cl_dvfs.h>
43 #include <linux/platform/tegra/clock.h>
44 #include <linux/platform/tegra/dvfs.h>
45 #include "iomap.h"
46 #include "tegra_simon.h"
47
48 #define OUT_MASK                        0x3f
49
50 #define CL_DVFS_CTRL                    0x00
51 #define CL_DVFS_CONFIG                  0x04
52 #define CL_DVFS_CONFIG_DIV_MASK         0xff
53
54 #define CL_DVFS_PARAMS                  0x08
55 #define CL_DVFS_PARAMS_CG_SCALE         (0x1 << 24)
56 #define CL_DVFS_PARAMS_FORCE_MODE_SHIFT 22
57 #define CL_DVFS_PARAMS_FORCE_MODE_MASK  (0x3 << CL_DVFS_PARAMS_FORCE_MODE_SHIFT)
58 #define CL_DVFS_PARAMS_CF_PARAM_SHIFT   16
59 #define CL_DVFS_PARAMS_CF_PARAM_MASK    (0x3f << CL_DVFS_PARAMS_CF_PARAM_SHIFT)
60 #define CL_DVFS_PARAMS_CI_PARAM_SHIFT   8
61 #define CL_DVFS_PARAMS_CI_PARAM_MASK    (0x7 << CL_DVFS_PARAMS_CI_PARAM_SHIFT)
62 #define CL_DVFS_PARAMS_CG_PARAM_SHIFT   0
63 #define CL_DVFS_PARAMS_CG_PARAM_MASK    (0xff << CL_DVFS_PARAMS_CG_PARAM_SHIFT)
64
65 #define CL_DVFS_TUNE0                   0x0c
66 #define CL_DVFS_TUNE1                   0x10
67
68 #define CL_DVFS_FREQ_REQ                0x14
69 #define CL_DVFS_FREQ_REQ_FORCE_ENABLE   (0x1 << 28)
70 #define CL_DVFS_FREQ_REQ_FORCE_SHIFT    16
71 #define CL_DVFS_FREQ_REQ_FORCE_MASK     (0xfff << CL_DVFS_FREQ_REQ_FORCE_SHIFT)
72 #define FORCE_MAX                       2047
73 #define FORCE_MIN                       -2048
74 #define CL_DVFS_FREQ_REQ_SCALE_SHIFT    8
75 #define CL_DVFS_FREQ_REQ_SCALE_MASK     (0xff << CL_DVFS_FREQ_REQ_SCALE_SHIFT)
76 #define SCALE_MAX                       256
77 #define CL_DVFS_FREQ_REQ_FREQ_VALID     (0x1 << 7)
78 #define CL_DVFS_FREQ_REQ_FREQ_SHIFT     0
79 #define CL_DVFS_FREQ_REQ_FREQ_MASK      (0x7f << CL_DVFS_FREQ_REQ_FREQ_SHIFT)
80 #define FREQ_MAX                        127
81
82 #define CL_DVFS_SCALE_RAMP              0x18
83
84 #define CL_DVFS_DROOP_CTRL              0x1c
85 #define CL_DVFS_DROOP_CTRL_MIN_FREQ_SHIFT 16
86 #define CL_DVFS_DROOP_CTRL_MIN_FREQ_MASK  \
87                 (0xff << CL_DVFS_DROOP_CTRL_MIN_FREQ_SHIFT)
88 #define CL_DVFS_DROOP_CTRL_CUT_SHIFT    8
89 #define CL_DVFS_DROOP_CTRL_CUT_MASK     (0xf << CL_DVFS_DROOP_CTRL_CUT_SHIFT)
90 #define CL_DVFS_DROOP_CTRL_RAMP_SHIFT   0
91 #define CL_DVFS_DROOP_CTRL_RAMP_MASK    (0xff << CL_DVFS_DROOP_CTRL_RAMP_SHIFT)
92
93 #define CL_DVFS_OUTPUT_CFG              0x20
94 #define CL_DVFS_OUTPUT_CFG_I2C_ENABLE   (0x1 << 30)
95 #define CL_DVFS_OUTPUT_CFG_SAFE_SHIFT   24
96 #define CL_DVFS_OUTPUT_CFG_SAFE_MASK    \
97                 (OUT_MASK << CL_DVFS_OUTPUT_CFG_SAFE_SHIFT)
98 #define CL_DVFS_OUTPUT_CFG_MAX_SHIFT    16
99 #define CL_DVFS_OUTPUT_CFG_MAX_MASK     \
100                 (OUT_MASK << CL_DVFS_OUTPUT_CFG_MAX_SHIFT)
101 #define CL_DVFS_OUTPUT_CFG_MIN_SHIFT    8
102 #define CL_DVFS_OUTPUT_CFG_MIN_MASK     \
103                 (OUT_MASK << CL_DVFS_OUTPUT_CFG_MIN_SHIFT)
104 #define CL_DVFS_OUTPUT_CFG_PWM_DELTA    (0x1 << 7)
105 #define CL_DVFS_OUTPUT_CFG_PWM_ENABLE   (0x1 << 6)
106 #define CL_DVFS_OUTPUT_CFG_PWM_DIV_SHIFT 0
107 #define CL_DVFS_OUTPUT_CFG_PWM_DIV_MASK  \
108                 (OUT_MASK << CL_DVFS_OUTPUT_CFG_PWM_DIV_SHIFT)
109
110 #define CL_DVFS_OUTPUT_FORCE            0x24
111 #define CL_DVFS_OUTPUT_FORCE_ENABLE     (0x1 << 6)
112 #define CL_DVFS_OUTPUT_FORCE_VALUE_SHIFT 0
113 #define CL_DVFS_OUTPUT_FORCE_VALUE_MASK  \
114                 (OUT_MASK << CL_DVFS_OUTPUT_FORCE_VALUE_SHIFT)
115
116 #define CL_DVFS_MONITOR_CTRL            0x28
117 #define CL_DVFS_MONITOR_CTRL_DISABLE    0
118 #define CL_DVFS_MONITOR_CTRL_OUT        5
119 #define CL_DVFS_MONITOR_CTRL_FREQ       6
120 #define CL_DVFS_MONITOR_DATA            0x2c
121 #define CL_DVFS_MONITOR_DATA_NEW        (0x1 << 16)
122 #define CL_DVFS_MONITOR_DATA_MASK       0xFFFF
123
124 #define CL_DVFS_I2C_CFG                 0x40
125 #define CL_DVFS_I2C_CFG_ARB_ENABLE      (0x1 << 20)
126 #define CL_DVFS_I2C_CFG_HS_CODE_SHIFT   16
127 #define CL_DVFS_I2C_CFG_HS_CODE_MASK    (0x7 << CL_DVFS_I2C_CFG_HS_CODE_SHIFT)
128 #define CL_DVFS_I2C_CFG_PACKET_ENABLE   (0x1 << 15)
129 #define CL_DVFS_I2C_CFG_SIZE_SHIFT      12
130 #define CL_DVFS_I2C_CFG_SIZE_MASK       (0x7 << CL_DVFS_I2C_CFG_SIZE_SHIFT)
131 #define CL_DVFS_I2C_CFG_SLAVE_ADDR_10   (0x1 << 10)
132 #define CL_DVFS_I2C_CFG_SLAVE_ADDR_SHIFT 0
133 #define CL_DVFS_I2C_CFG_SLAVE_ADDR_MASK \
134                 (0x3ff << CL_DVFS_I2C_CFG_SLAVE_ADDR_SHIFT)
135
136 #define CL_DVFS_I2C_VDD_REG_ADDR        0x44
137 #define CL_DVFS_I2C_STS                 0x48
138 #define CL_DVFS_I2C_STS_I2C_LAST_SHIFT  1
139 #define CL_DVFS_I2C_STS_I2C_REQ_PENDING 0x1
140
141 #define CL_DVFS_INTR_STS                0x5c
142 #define CL_DVFS_INTR_EN                 0x60
143 #define CL_DVFS_INTR_MIN_MASK           0x1
144 #define CL_DVFS_INTR_MAX_MASK           0x2
145
146 #define CL_DVFS_CC4_HVC                 0x74
147 #define CL_DVFS_CC4_HVC_CTRL_SHIFT      0
148 #define CL_DVFS_CC4_HVC_CTRL_MASK       (0x3 << CL_DVFS_CC4_HVC_CTRL_SHIFT)
149 #define CL_DVFS_CC4_HVC_FORCE_VAL_SHIFT 2
150 #define CL_DVFS_CC4_HVC_FORCE_VAL_MASK \
151         (OUT_MASK << CL_DVFS_CC4_HVC_FORCE_VAL_SHIFT)
152 #define CL_DVFS_CC4_HVC_FORCE_EN        (0x1 << 8)
153
154 #define CL_DVFS_I2C_CNTRL               0x100
155 #define CL_DVFS_I2C_CLK_DIVISOR         0x16c
156 #define CL_DVFS_I2C_CLK_DIVISOR_MASK    0xffff
157 #define CL_DVFS_I2C_CLK_DIVISOR_FS_SHIFT 16
158 #define CL_DVFS_I2C_CLK_DIVISOR_HS_SHIFT 0
159
160 #define CL_DVFS_OUTPUT_LUT              0x200
161
162 #define CL_DVFS_APERTURE                0x400
163
164 #define IS_I2C_OFFS(offs)               \
165         ((((offs) >= CL_DVFS_I2C_CFG) && ((offs) <= CL_DVFS_INTR_EN)) || \
166         ((offs) >= CL_DVFS_I2C_CNTRL))
167
168 #define CL_DVFS_CALIBR_TIME             40000
169 #define CL_DVFS_OUTPUT_PENDING_TIMEOUT  1000
170 #define CL_DVFS_OUTPUT_RAMP_DELAY       100
171 #define CL_DVFS_TUNE_HIGH_DELAY         2000
172
173 #define CL_DVFS_TUNE_HIGH_MARGIN_MV     20
174 #define CL_DVFS_CAP_GUARD_BAND_STEPS    2
175
176 enum tegra_cl_dvfs_ctrl_mode {
177         TEGRA_CL_DVFS_UNINITIALIZED = 0,
178         TEGRA_CL_DVFS_DISABLED = 1,
179         TEGRA_CL_DVFS_OPEN_LOOP = 2,
180         TEGRA_CL_DVFS_CLOSED_LOOP = 3,
181 };
182
183 /**
184  * enum tegra_cl_dvfs_tune_state - state of the voltage-regime switching code
185  * @TEGRA_CL_DVFS_TUNE_LOW: DFLL is in the low-voltage range (or open-loop mode)
186  * @TEGRA_CL_DVFS_TUNE_HIGH_REQUEST: waiting for DFLL I2C output to reach high
187  * @TEGRA_CL_DVFS_TUNE_HIGH_REQUEST_2: waiting for PMIC to react to DFLL output
188  * @TEGRA_CL_DVFS_TUNE_HIGH: DFLL in the high-voltage range
189  *
190  * These are software states, not hardware states.
191  */
192 enum tegra_cl_dvfs_tune_state {
193         TEGRA_CL_DVFS_TUNE_LOW = 0,
194         TEGRA_CL_DVFS_TUNE_HIGH_REQUEST,
195         TEGRA_CL_DVFS_TUNE_HIGH_REQUEST_2,
196         TEGRA_CL_DVFS_TUNE_HIGH,
197 };
198
199 struct dfll_rate_req {
200         u8      freq;
201         u8      scale;
202         u8      output;
203         u8      cap;
204         unsigned long rate;
205 };
206
207 struct voltage_limits {
208         int             vmin;
209         int             vmax;
210         seqcount_t      vmin_seqcnt;
211         seqcount_t      vmax_seqcnt;
212         bool            clamped;
213 };
214
215 struct tegra_cl_dvfs {
216         void                                    *cl_base;
217         void                                    *cl_i2c_base;
218         struct tegra_cl_dvfs_platform_data      *p_data;
219
220         struct dvfs                     *safe_dvfs;
221         struct thermal_cooling_device   *vmax_cdev;
222         struct thermal_cooling_device   *vmin_cdev;
223         struct work_struct              init_cdev_work;
224
225         struct clk                      *soc_clk;
226         struct clk                      *ref_clk;
227         struct clk                      *i2c_clk;
228         struct clk                      *dfll_clk;
229         unsigned long                   ref_rate;
230         unsigned long                   i2c_rate;
231
232         /* output voltage mapping:
233          * legacy dvfs table index -to- cl_dvfs output LUT index
234          * cl_dvfs output LUT index -to- PMU value/voltage pair ptr
235          */
236         u8                              clk_dvfs_map[MAX_DVFS_FREQS];
237         struct voltage_reg_map          *out_map[MAX_CL_DVFS_VOLTAGES];
238         u8                              num_voltages;
239         u8                              safe_output;
240         u8                              rail_relations_out_min;
241
242         u32                             tune0_low;
243         u32                             tune0_high;
244
245         u8                              tune_high_out_start;
246         u8                              tune_high_out_min;
247         unsigned long                   tune_high_dvco_rate_min;
248         unsigned long                   tune_high_target_rate_min;
249
250         u8                              minimax_output;
251         u8                              thermal_out_caps[MAX_THERMAL_LIMITS];
252         u8                              thermal_out_floors[MAX_THERMAL_LIMITS+1];
253         int                             thermal_mv_floors[MAX_THERMAL_LIMITS];
254         int                             therm_caps_num;
255         int                             therm_floors_num;
256         unsigned long                   dvco_rate_floors[MAX_THERMAL_LIMITS+1];
257         unsigned long                   dvco_rate_min;
258
259         struct voltage_limits           v_limits;
260         u8                              lut_min;
261         u8                              lut_max;
262         u8                              force_out_min;
263         u32                             suspended_force_out;
264         int                             therm_cap_idx;
265         int                             therm_floor_idx;
266         struct dfll_rate_req            last_req;
267         enum tegra_cl_dvfs_tune_state   tune_state;
268         enum tegra_cl_dvfs_ctrl_mode    mode;
269
270         struct hrtimer                  tune_timer;
271         ktime_t                         tune_delay;
272         ktime_t                         tune_ramp;
273         u8                              tune_out_last;
274
275         struct timer_list               calibration_timer;
276         unsigned long                   calibration_delay;
277         ktime_t                         last_calibration;
278         unsigned long                   calibration_range_min;
279         unsigned long                   calibration_range_max;
280
281         struct notifier_block           simon_grade_nb;
282 };
283
284 struct tegra_cl_dvfs_soc_match_data {
285         u32 flags;
286 };
287
288 /* Conversion macros (different scales for frequency request, and monitored
289    rate is not a typo) */
290 #define RATE_STEP(cld)                          ((cld)->ref_rate / 2)
291 #define GET_REQUEST_FREQ(rate, ref_rate)        ((rate) / ((ref_rate) / 2))
292 #define GET_REQUEST_RATE(freq, ref_rate)        ((freq) * ((ref_rate) / 2))
293 #define GET_MONITORED_RATE(freq, ref_rate)      ((freq) * ((ref_rate) / 4))
294 #define GET_DROOP_FREQ(rate, ref_rate)          ((rate) / ((ref_rate) / 4))
295 #define ROUND_MIN_RATE(rate, ref_rate)          \
296                 (DIV_ROUND_UP(rate, (ref_rate) / 2) * ((ref_rate) / 2))
297 #define GET_DIV(ref_rate, out_rate, scale)      \
298                 DIV_ROUND_UP((ref_rate), (out_rate) * (scale))
299 #define GET_SAMPLE_PERIOD(cld)  \
300                 DIV_ROUND_UP(1000000, (cld)->p_data->cfg_param->sample_rate)
301
302 static const char *mode_name[] = {
303         [TEGRA_CL_DVFS_UNINITIALIZED] = "uninitialized",
304         [TEGRA_CL_DVFS_DISABLED] = "disabled",
305         [TEGRA_CL_DVFS_OPEN_LOOP] = "open_loop",
306         [TEGRA_CL_DVFS_CLOSED_LOOP] = "closed_loop",
307 };
308
309 /*
310  * In some h/w configurations CL-DVFS module registers have two different
311  * address bases: one for I2C control/status registers, and one for all other
312  * registers. Registers accessors are separated below accordingly just by
313  * comparing register offset with start of I2C section - CL_DVFS_I2C_CFG. One
314  * special case is CL_DVFS_OUTPUT_CFG register: when I2C controls are separated
315  * I2C_ENABLE bit of this register is accessed from I2C base, and all other bits
316  * are accessed from the main base.
317  */
318 static inline u32 cl_dvfs_i2c_readl(struct tegra_cl_dvfs *cld, u32 offs)
319 {
320         return __raw_readl(cld->cl_i2c_base + offs);
321 }
322 static inline void cl_dvfs_i2c_writel(struct tegra_cl_dvfs *cld,
323                                       u32 val, u32 offs)
324 {
325         __raw_writel(val, cld->cl_i2c_base + offs);
326 }
327 static inline void cl_dvfs_i2c_wmb(struct tegra_cl_dvfs *cld)
328 {
329         cl_dvfs_i2c_readl(cld, CL_DVFS_I2C_CFG);
330         dsb();
331 }
332
333 static inline u32 cl_dvfs_readl(struct tegra_cl_dvfs *cld, u32 offs)
334 {
335         if (IS_I2C_OFFS(offs))
336                 return cl_dvfs_i2c_readl(cld, offs);
337         return __raw_readl((void *)cld->cl_base + offs);
338 }
339 static inline void cl_dvfs_writel(struct tegra_cl_dvfs *cld, u32 val, u32 offs)
340 {
341         if (IS_I2C_OFFS(offs)) {
342                 cl_dvfs_i2c_writel(cld, val, offs);
343                 return;
344         }
345         __raw_writel(val, (void *)cld->cl_base + offs);
346 }
347 static inline void cl_dvfs_wmb(struct tegra_cl_dvfs *cld)
348 {
349         cl_dvfs_readl(cld, CL_DVFS_CTRL);
350         dsb();
351 }
352
353 static inline void switch_monitor(struct tegra_cl_dvfs *cld, u32 selector)
354 {
355         /* delay to make sure selector has switched */
356         cl_dvfs_writel(cld, selector, CL_DVFS_MONITOR_CTRL);
357         cl_dvfs_wmb(cld);
358         udelay(1);
359 }
360
361 static inline void invalidate_request(struct tegra_cl_dvfs *cld)
362 {
363         u32 val = cl_dvfs_readl(cld, CL_DVFS_FREQ_REQ);
364         val &= ~CL_DVFS_FREQ_REQ_FREQ_VALID;
365         cl_dvfs_writel(cld, val, CL_DVFS_FREQ_REQ);
366         cl_dvfs_wmb(cld);
367 }
368
369 static inline void set_request_scale(struct tegra_cl_dvfs *cld, u8 scale)
370 {
371         u32 val = cl_dvfs_readl(cld, CL_DVFS_FREQ_REQ);
372         val &= ~CL_DVFS_FREQ_REQ_SCALE_MASK;
373         val |= scale << CL_DVFS_FREQ_REQ_SCALE_SHIFT;
374         cl_dvfs_writel(cld, val, CL_DVFS_FREQ_REQ);
375         cl_dvfs_wmb(cld);
376 }
377
378 static inline u32 output_force_set_val(struct tegra_cl_dvfs *cld, u8 out_val)
379 {
380         u32 val = cl_dvfs_readl(cld, CL_DVFS_OUTPUT_FORCE);
381         val = (val & CL_DVFS_OUTPUT_FORCE_ENABLE) | (out_val & OUT_MASK);
382         cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_FORCE);
383         return cl_dvfs_readl(cld, CL_DVFS_OUTPUT_FORCE);
384 }
385
386 static inline void output_force_enable(struct tegra_cl_dvfs *cld, u32 val)
387 {
388         val |= CL_DVFS_OUTPUT_FORCE_ENABLE;
389         cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_FORCE);
390         cl_dvfs_wmb(cld);
391 }
392
393 static inline void output_force_disable(struct tegra_cl_dvfs *cld)
394 {
395         u32 val = cl_dvfs_readl(cld, CL_DVFS_OUTPUT_FORCE);
396         val &= ~CL_DVFS_OUTPUT_FORCE_ENABLE;
397         cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_FORCE);
398         cl_dvfs_wmb(cld);
399 }
400
401 /*
402  * Reading monitor data concurrently with the update may render intermediate
403  * (neither "old" nor "new") values. Synchronization with the "rising edge"
404  * of DATA_NEW makes it very unlikely, but still possible. Use simple filter:
405  * compare 2 consecutive readings for data consistency within 2 LSb range.
406  * Return error otherwise. On the platform that does not allow to use DATA_NEW
407  * at all check for consistency of consecutive reads is the only protection.
408  */
409 static int filter_monitor_data(struct tegra_cl_dvfs *cld, u32 *data)
410 {
411         u32 val = cl_dvfs_readl(cld, CL_DVFS_MONITOR_DATA) &
412                 CL_DVFS_MONITOR_DATA_MASK;
413         *data &= CL_DVFS_MONITOR_DATA_MASK;
414         if (abs(*data - val) <= 2)
415                 return 0;
416
417         *data = cl_dvfs_readl(cld, CL_DVFS_MONITOR_DATA) &
418                 CL_DVFS_MONITOR_DATA_MASK;
419         if (abs(*data - val) <= 2)
420                 return 0;
421
422         return -EINVAL;
423 }
424
425 static inline void wait_data_new(struct tegra_cl_dvfs *cld, u32 *data)
426 {
427         cl_dvfs_readl(cld, CL_DVFS_MONITOR_DATA); /* clear data new */
428         if (!(cld->p_data->flags & TEGRA_CL_DVFS_DATA_NEW_NO_USE)) {
429                 do {
430                         *data = cl_dvfs_readl(cld, CL_DVFS_MONITOR_DATA);
431                 } while (!(*data & CL_DVFS_MONITOR_DATA_NEW) &&
432                          (cld->mode > TEGRA_CL_DVFS_DISABLED));
433         }
434 }
435
436 static inline u32 get_last_output(struct tegra_cl_dvfs *cld)
437 {
438         switch_monitor(cld, CL_DVFS_MONITOR_CTRL_OUT);
439         return cl_dvfs_readl(cld, CL_DVFS_MONITOR_DATA) &
440                 CL_DVFS_MONITOR_DATA_MASK;
441 }
442
443 /* out monitored before forced value applied - return the latter if enabled */
444 static inline u32 cl_dvfs_get_output(struct tegra_cl_dvfs *cld)
445 {
446         u32 val = cl_dvfs_readl(cld, CL_DVFS_OUTPUT_FORCE);
447         if (val & CL_DVFS_OUTPUT_FORCE_ENABLE)
448                 return val & OUT_MASK;
449
450         switch_monitor(cld, CL_DVFS_MONITOR_CTRL_OUT);
451         wait_data_new(cld, &val);
452         return filter_monitor_data(cld, &val) ? : val;
453 }
454
455 static inline bool is_i2c(struct tegra_cl_dvfs *cld)
456 {
457         return cld->p_data->pmu_if == TEGRA_CL_DVFS_PMU_I2C;
458 }
459
460 static inline u8 get_output_bottom(struct tegra_cl_dvfs *cld)
461 {
462         return is_i2c(cld) ? 0 : cld->out_map[0]->reg_value;
463 }
464
465 static inline u8 get_output_top(struct tegra_cl_dvfs *cld)
466 {
467         return is_i2c(cld) ?  cld->num_voltages - 1 :
468                 cld->out_map[cld->num_voltages - 1]->reg_value;
469 }
470
471 static inline int get_mv(struct tegra_cl_dvfs *cld, u32 out_val)
472 {
473         return is_i2c(cld) ? cld->out_map[out_val]->reg_uV / 1000 :
474                 cld->p_data->vdd_map[out_val].reg_uV / 1000;
475 }
476
477 static inline bool is_vmin_delivered(struct tegra_cl_dvfs *cld)
478 {
479         if (is_i2c(cld)) {
480                 u32 val = cl_dvfs_readl(cld, CL_DVFS_I2C_STS);
481                 val = (val >> CL_DVFS_I2C_STS_I2C_LAST_SHIFT) & OUT_MASK;
482                 return val >= cld->lut_min;
483         }
484         /* PWM cannot be stalled */
485         return true;
486 }
487
488 static int tegra_pinctrl_set_tristate(struct tegra_cl_dvfs_platform_data *d,
489                 int group_sel, int tristate)
490 {
491         int ret;
492         unsigned long config = TEGRA_PINCONF_PACK(TEGRA_PINCONF_PARAM_TRISTATE,
493                                         tristate);
494         if (!d->u.pmu_pwm.pinctrl_dev) {
495                 pr_err("%s(): ERROR: No Tegra pincontrol driver\n", __func__);
496                 return -EINVAL;
497         }
498
499         ret = pinctrl_set_config_for_group_sel_any_context(
500                 d->u.pmu_pwm.pinctrl_dev, group_sel, config);
501         if (ret < 0)
502                 pr_err("%s(): ERROR: pinconfig for pin group %d failed: %d\n",
503                         __func__, group_sel, ret);
504         return ret;
505 }
506
507 static int output_enable(struct tegra_cl_dvfs *cld)
508 {
509         if (is_i2c(cld)) {
510                 u32 val = cl_dvfs_i2c_readl(cld, CL_DVFS_OUTPUT_CFG);
511                 val |= CL_DVFS_OUTPUT_CFG_I2C_ENABLE;
512                 cl_dvfs_i2c_writel(cld, val, CL_DVFS_OUTPUT_CFG);
513                 cl_dvfs_i2c_wmb(cld);
514         } else {
515                 u32 val = cl_dvfs_readl(cld, CL_DVFS_OUTPUT_CFG);
516                 struct tegra_cl_dvfs_platform_data *d = cld->p_data;
517                 if (d->u.pmu_pwm.pwm_bus == TEGRA_CL_DVFS_PWM_1WIRE_BUFFER) {
518                         int gpio = d->u.pmu_pwm.out_gpio;
519                         int v = d->u.pmu_pwm.out_enable_high ? 1 : 0;
520                         __gpio_set_value(gpio, v);
521                         return 0;
522                 }
523
524                 if (d->u.pmu_pwm.pwm_bus == TEGRA_CL_DVFS_PWM_1WIRE_DIRECT) {
525                         int pg = d->u.pmu_pwm.pwm_pingroup;
526                         tegra_pinctrl_set_tristate(d, pg, TEGRA_PIN_DISABLE);
527                         return 0;
528                 }
529
530                 val |= CL_DVFS_OUTPUT_CFG_PWM_ENABLE;
531                 cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_CFG);
532                 cl_dvfs_wmb(cld);
533         }
534
535         return  0;
536 }
537
538 static int output_disable_pwm(struct tegra_cl_dvfs *cld)
539 {
540         u32 val;
541         struct tegra_cl_dvfs_platform_data *d = cld->p_data;
542
543         if (d->u.pmu_pwm.pwm_bus == TEGRA_CL_DVFS_PWM_1WIRE_BUFFER) {
544                 int gpio = d->u.pmu_pwm.out_gpio;
545                 int v = d->u.pmu_pwm.out_enable_high ? 0 : 1;
546                 __gpio_set_value(gpio, v);
547                 return 0;
548         }
549
550         if (d->u.pmu_pwm.pwm_bus == TEGRA_CL_DVFS_PWM_1WIRE_DIRECT) {
551                 int pg = d->u.pmu_pwm.pwm_pingroup;
552                 tegra_pinctrl_set_tristate(d, pg, TEGRA_PIN_ENABLE);
553                 return 0;
554         }
555
556         val = cl_dvfs_readl(cld, CL_DVFS_OUTPUT_CFG);
557         val &= ~CL_DVFS_OUTPUT_CFG_PWM_ENABLE;
558         cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_CFG);
559         cl_dvfs_wmb(cld);
560         return  0;
561 }
562
563 static noinline int output_flush_disable(struct tegra_cl_dvfs *cld)
564 {
565         int i;
566         u32 sts;
567         u32 val = cl_dvfs_i2c_readl(cld, CL_DVFS_OUTPUT_CFG);
568
569         /* Flush transactions in flight, and then disable */
570         for (i = 0; i < CL_DVFS_OUTPUT_PENDING_TIMEOUT / 2; i++) {
571                 sts = cl_dvfs_readl(cld, CL_DVFS_I2C_STS);
572                 udelay(2);
573                 if (!(sts & CL_DVFS_I2C_STS_I2C_REQ_PENDING)) {
574                         sts = cl_dvfs_readl(cld, CL_DVFS_I2C_STS);
575                         if (!(sts & CL_DVFS_I2C_STS_I2C_REQ_PENDING)) {
576                                 val &= ~CL_DVFS_OUTPUT_CFG_I2C_ENABLE;
577                                 cl_dvfs_i2c_writel(cld, val, CL_DVFS_OUTPUT_CFG);
578                                 wmb();
579                                 sts = cl_dvfs_readl(cld, CL_DVFS_I2C_STS);
580                                 if (!(sts & CL_DVFS_I2C_STS_I2C_REQ_PENDING))
581                                         return 0; /* no pending rqst */
582
583                                 /* Re-enable, continue wait */
584                                 val |= CL_DVFS_OUTPUT_CFG_I2C_ENABLE;
585                                 cl_dvfs_i2c_writel(cld, val, CL_DVFS_OUTPUT_CFG);
586                                 wmb();
587                         }
588                 }
589         }
590
591         /* I2C request is still pending - disable, anyway, but report error */
592         val &= ~CL_DVFS_OUTPUT_CFG_I2C_ENABLE;
593         cl_dvfs_i2c_writel(cld, val, CL_DVFS_OUTPUT_CFG);
594         cl_dvfs_i2c_wmb(cld);
595         return -ETIMEDOUT;
596 }
597
598 static noinline int output_disable_flush(struct tegra_cl_dvfs *cld)
599 {
600         int i;
601         u32 sts;
602         u32 val = cl_dvfs_i2c_readl(cld, CL_DVFS_OUTPUT_CFG);
603
604         /* Disable output interface right away */
605         val &= ~CL_DVFS_OUTPUT_CFG_I2C_ENABLE;
606         cl_dvfs_i2c_writel(cld, val, CL_DVFS_OUTPUT_CFG);
607         cl_dvfs_i2c_wmb(cld);
608
609         /* Flush possible transaction in flight */
610         for (i = 0; i < CL_DVFS_OUTPUT_PENDING_TIMEOUT / 2; i++) {
611                 sts = cl_dvfs_readl(cld, CL_DVFS_I2C_STS);
612                 udelay(2);
613                 if (!(sts & CL_DVFS_I2C_STS_I2C_REQ_PENDING)) {
614                         sts = cl_dvfs_readl(cld, CL_DVFS_I2C_STS);
615                         if (!(sts & CL_DVFS_I2C_STS_I2C_REQ_PENDING))
616                                 return 0;
617                 }
618         }
619
620         /* I2C request is still pending - report error */
621         return -ETIMEDOUT;
622 }
623
624 static inline int output_disable_ol_prepare(struct tegra_cl_dvfs *cld)
625 {
626         /* PWM output control */
627         if (!is_i2c(cld)) {
628                 /*
629                  * Keep PWM running in open loop mode. External idle controller
630                  * would take care of switching PWM output off/on if override
631                  * is supported.
632                  */
633                 if (cld->p_data->flags & TEGRA_CL_DVFS_HAS_IDLE_OVERRIDE)
634                         return 0;
635                 return output_disable_pwm(cld);
636         }
637
638         /*
639          * If cl-dvfs h/w does not require output to be quiet before disable,
640          * s/w can stop I2C communications at any time (including operations
641          * in closed loop mode), and I2C bus integrity is guaranteed even in
642          * case of flush timeout.
643          */
644         if (!(cld->p_data->flags & TEGRA_CL_DVFS_FLAGS_I2C_WAIT_QUIET)) {
645                 int ret = output_disable_flush(cld);
646                 if (ret)
647                         pr_debug("cl_dvfs: I2C pending timeout ol_prepare\n");
648                 return ret;
649         }
650         return 0;
651 }
652
653 static inline int output_disable_post_ol(struct tegra_cl_dvfs *cld)
654 {
655         /* PWM output control */
656         if (!is_i2c(cld))
657                 return 0;
658
659         /*
660          * If cl-dvfs h/w requires output to be quiet before disable, s/w
661          * should stop I2C communications only after the switch to open loop
662          * mode, and I2C bus integrity is not guaranteed in case of flush
663          * timeout
664         */
665         if (cld->p_data->flags & TEGRA_CL_DVFS_FLAGS_I2C_WAIT_QUIET) {
666                 int ret = output_flush_disable(cld);
667                 if (ret)
668                         pr_err("cl_dvfs: I2C pending timeout post_ol\n");
669                 return ret;
670         }
671         return 0;
672 }
673
674 static inline void set_mode(struct tegra_cl_dvfs *cld,
675                             enum tegra_cl_dvfs_ctrl_mode mode)
676 {
677         cld->mode = mode;
678         cl_dvfs_writel(cld, mode - 1, CL_DVFS_CTRL);
679
680         if (cld->p_data->flags & TEGRA_CL_DVFS_HAS_IDLE_OVERRIDE) {
681                 /* Override mode follows active mode up to open loop */
682                 u32 val = cl_dvfs_readl(cld, CL_DVFS_CC4_HVC);
683                 val &= ~(CL_DVFS_CC4_HVC_CTRL_MASK | CL_DVFS_CC4_HVC_FORCE_EN);
684                 if (mode >= TEGRA_CL_DVFS_OPEN_LOOP) {
685                         val |= (TEGRA_CL_DVFS_OPEN_LOOP - 1);
686                         val |= CL_DVFS_CC4_HVC_FORCE_EN;
687                 }
688                 cl_dvfs_writel(cld, val, CL_DVFS_CC4_HVC);
689         }
690         cl_dvfs_wmb(cld);
691 }
692
693 static inline u8 get_output_cap(struct tegra_cl_dvfs *cld,
694                                 struct dfll_rate_req *req)
695 {
696         u32 thermal_cap = get_output_top(cld);
697
698         if (cld->therm_cap_idx && (cld->therm_cap_idx <= cld->therm_caps_num))
699                 thermal_cap = cld->thermal_out_caps[cld->therm_cap_idx - 1];
700         if (req && (req->cap < thermal_cap))
701                 return req->cap;
702         return thermal_cap;
703 }
704
705 static inline u8 get_output_min(struct tegra_cl_dvfs *cld)
706 {
707         u32 tune_min = get_output_bottom(cld);
708         u32 thermal_min = tune_min;
709
710         tune_min = cld->tune_state == TEGRA_CL_DVFS_TUNE_LOW ?
711                 tune_min : cld->tune_high_out_min;
712
713         if (cld->therm_floor_idx < cld->therm_floors_num)
714                 thermal_min = cld->thermal_out_floors[cld->therm_floor_idx];
715
716         /* return max of all the possible output min settings */
717         return max_t(u8, max(tune_min, thermal_min),
718                                         cld->rail_relations_out_min);
719 }
720
721 static inline void _load_lut(struct tegra_cl_dvfs *cld)
722 {
723         int i;
724         u32 val;
725
726         val = cld->out_map[cld->lut_min]->reg_value;
727         for (i = 0; i <= cld->lut_min; i++)
728                 cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_LUT + i * 4);
729
730         for (; i < cld->lut_max; i++) {
731                 val = cld->out_map[i]->reg_value;
732                 cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_LUT + i * 4);
733         }
734
735         val = cld->out_map[cld->lut_max]->reg_value;
736         for (; i < cld->num_voltages; i++)
737                 cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_LUT + i * 4);
738
739         cl_dvfs_i2c_wmb(cld);
740 }
741
742 static void cl_dvfs_load_lut(struct tegra_cl_dvfs *cld)
743 {
744         u32 val = cl_dvfs_i2c_readl(cld, CL_DVFS_OUTPUT_CFG);
745         bool disable_out_for_load =
746                 !(cld->p_data->flags & TEGRA_CL_DVFS_FLAGS_I2C_WAIT_QUIET) &&
747                 (val & CL_DVFS_OUTPUT_CFG_I2C_ENABLE);
748
749         if (disable_out_for_load) {
750                 val &= ~CL_DVFS_OUTPUT_CFG_I2C_ENABLE;
751                 cl_dvfs_i2c_writel(cld, val, CL_DVFS_OUTPUT_CFG);
752                 cl_dvfs_i2c_wmb(cld);
753                 udelay(2); /* 2us (big margin) window for disable propafation */
754         }
755
756         _load_lut(cld);
757
758         if (disable_out_for_load) {
759                 val |= CL_DVFS_OUTPUT_CFG_I2C_ENABLE;
760                 cl_dvfs_i2c_writel(cld, val, CL_DVFS_OUTPUT_CFG);
761                 cl_dvfs_i2c_wmb(cld);
762         }
763 }
764
765 #define set_tune_state(cld, state) \
766         do {                                                            \
767                 cld->tune_state = state;                                \
768                 pr_debug("%s: set tune state %d\n", __func__, state);   \
769         } while (0)
770
771 static inline void tune_low(struct tegra_cl_dvfs *cld)
772 {
773         /* a must order: 1st tune dfll low, then tune trimmers low */
774         cl_dvfs_writel(cld, cld->tune0_low, CL_DVFS_TUNE0);
775         cl_dvfs_wmb(cld);
776         if (cld->safe_dvfs->dfll_data.tune_trimmers)
777                 cld->safe_dvfs->dfll_data.tune_trimmers(false);
778 }
779
780 static inline void tune_high(struct tegra_cl_dvfs *cld)
781 {
782         /* a must order: 1st tune trimmers high, then tune dfll high */
783         if (cld->safe_dvfs->dfll_data.tune_trimmers)
784                 cld->safe_dvfs->dfll_data.tune_trimmers(true);
785         cl_dvfs_writel(cld, cld->tune0_high, CL_DVFS_TUNE0);
786         cl_dvfs_wmb(cld);
787 }
788
789 static inline int cl_tune_target(struct tegra_cl_dvfs *cld, unsigned long rate)
790 {
791         bool tune_low_at_cold = cld->safe_dvfs->dfll_data.tune0_low_at_cold;
792
793         if ((rate >= cld->tune_high_target_rate_min) &&
794             (!tune_low_at_cold || cld->therm_floor_idx))
795                 return TEGRA_CL_DVFS_TUNE_HIGH;
796         return TEGRA_CL_DVFS_TUNE_LOW;
797 }
798
799 static void set_output_limits(struct tegra_cl_dvfs *cld, u8 out_min, u8 out_max)
800 {
801         seqcount_t *vmin_seqcnt = NULL;
802         seqcount_t *vmax_seqcnt = NULL;
803
804         if (cld->v_limits.clamped)
805                 return;
806
807         if ((cld->lut_min != out_min) || (cld->lut_max != out_max)) {
808                 /* limits update tracking start */
809                 if (cld->lut_min != out_min) {
810                         vmin_seqcnt = &cld->v_limits.vmin_seqcnt;
811                         write_seqcount_begin(vmin_seqcnt);
812                         cld->v_limits.vmin = get_mv(cld, out_min);
813                 }
814                 if (cld->lut_max != out_max) {
815                         vmax_seqcnt = &cld->v_limits.vmax_seqcnt;
816                         write_seqcount_begin(vmax_seqcnt);
817                         cld->v_limits.vmax = get_mv(cld, out_max);
818                 }
819
820                 cld->lut_min = out_min;
821                 cld->lut_max = out_max;
822                 if (cld->p_data->flags & TEGRA_CL_DVFS_DYN_OUTPUT_CFG) {
823                         u32 val = cl_dvfs_readl(cld, CL_DVFS_OUTPUT_CFG);
824                         val &= ~(CL_DVFS_OUTPUT_CFG_MAX_MASK |
825                                  CL_DVFS_OUTPUT_CFG_MIN_MASK);
826                         val |= out_max << CL_DVFS_OUTPUT_CFG_MAX_SHIFT;
827                         val |= out_min << CL_DVFS_OUTPUT_CFG_MIN_SHIFT;
828                         cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_CFG);
829                 } else {
830                         cl_dvfs_load_lut(cld);
831                 }
832
833                 if (vmin_seqcnt &&
834                     (cld->p_data->flags & TEGRA_CL_DVFS_HAS_IDLE_OVERRIDE)) {
835                         /* Override mode force value follows active mode Vmin */
836                         u32 val = cl_dvfs_readl(cld, CL_DVFS_CC4_HVC);
837                         val &= ~CL_DVFS_CC4_HVC_FORCE_VAL_MASK;
838                         val |= out_min << CL_DVFS_CC4_HVC_FORCE_VAL_SHIFT;
839                         cl_dvfs_writel(cld, val, CL_DVFS_CC4_HVC);
840                 }
841                 cl_dvfs_wmb(cld);
842
843                 /* limits update tracking end */
844                 if (vmin_seqcnt)
845                         write_seqcount_end(vmin_seqcnt);
846                 if (vmax_seqcnt)
847                         write_seqcount_end(vmax_seqcnt);
848
849                 pr_debug("cl_dvfs limits_mV [%d : %d]\n",
850                          cld->v_limits.vmin, cld->v_limits.vmax);
851         }
852 }
853
854 static void cl_dvfs_set_force_out_min(struct tegra_cl_dvfs *cld);
855 static void set_cl_config(struct tegra_cl_dvfs *cld, struct dfll_rate_req *req)
856 {
857         bool sample_tune_out_last = false;
858         u8 cap_gb = CL_DVFS_CAP_GUARD_BAND_STEPS;
859         u8 out_max, out_min;
860         u8 out_cap = get_output_cap(cld, req);
861         struct dvfs_rail *rail = cld->safe_dvfs->dvfs_rail;
862
863         switch (cld->tune_state) {
864         case TEGRA_CL_DVFS_TUNE_LOW:
865                 if (cl_tune_target(cld, req->rate) > TEGRA_CL_DVFS_TUNE_LOW) {
866                         set_tune_state(cld, TEGRA_CL_DVFS_TUNE_HIGH_REQUEST);
867                         hrtimer_start(&cld->tune_timer, cld->tune_delay,
868                                       HRTIMER_MODE_REL);
869                         cl_dvfs_set_force_out_min(cld);
870                         sample_tune_out_last = true;
871                 }
872                 break;
873
874         case TEGRA_CL_DVFS_TUNE_HIGH:
875         case TEGRA_CL_DVFS_TUNE_HIGH_REQUEST:
876         case TEGRA_CL_DVFS_TUNE_HIGH_REQUEST_2:
877                 if (cl_tune_target(cld, req->rate) == TEGRA_CL_DVFS_TUNE_LOW) {
878                         set_tune_state(cld, TEGRA_CL_DVFS_TUNE_LOW);
879                         tune_low(cld);
880                         cl_dvfs_set_force_out_min(cld);
881                 }
882                 break;
883         default:
884                 BUG();
885         }
886
887         /*
888          * Criteria to select new request and output boundaries. Listed in
889          * the order of priorities to resolve conflicts (if any).
890          *
891          * 1) out_min is at/above minimum voltage level for current temperature
892          *    and tuning ranges
893          * 2) out_max is at/above PMIC guard-band forced minimum
894          * 3) new request has at least on step room for regulation: request +/-1
895          *    within [out_min, out_max] interval
896          * 4) new request is at least CL_DVFS_CAP_GUARD_BAND_STEPS below out_max
897          * 5) - if no other rail depends on DFLL rail, out_max is at/above
898          *    minimax level to provide better convergence accuracy for rates
899          *    close to tuning range boundaries
900          *    - if some other rail depends on DFLL rail, out_max should match
901          *    voltage from safe dvfs table used by s/w DVFS on other rails to
902          *    resolve dependencies
903          */
904         out_min = get_output_min(cld);
905         if (out_cap > (out_min + cap_gb)) {
906                 req->output = out_cap - cap_gb;
907                 out_max = out_cap;
908         } else {
909                 req->output = out_min + 1;
910                 out_max = req->output + 1;
911         }
912
913         if (req->output == cld->safe_output) {
914                 req->output++;
915                 out_max = max(out_max, (u8)(req->output + 1));
916         }
917
918         if (list_empty(&rail->relationships_to))
919                 out_max = max(out_max, cld->minimax_output);
920
921         out_max = max(out_max, cld->force_out_min);
922
923         set_output_limits(cld, out_min, out_max);
924
925         /* Must be sampled after new out_min is set */
926         if (sample_tune_out_last && is_i2c(cld)) {
927                 u32 val = cl_dvfs_readl(cld, CL_DVFS_I2C_STS);
928                 cld->tune_out_last =
929                         (val >> CL_DVFS_I2C_STS_I2C_LAST_SHIFT) & OUT_MASK;
930         }
931 }
932
933 static void set_ol_config(struct tegra_cl_dvfs *cld)
934 {
935         u32 val, out_min, out_max;
936
937         /* always unclamp and restore limits before open loop */
938         if (cld->v_limits.clamped) {
939                 cld->v_limits.clamped = false;
940                 set_cl_config(cld, &cld->last_req);
941         }
942         out_min = cld->lut_min;
943         out_max = cld->lut_max;
944
945         /* always tune low (safe) in open loop */
946         if (cld->tune_state != TEGRA_CL_DVFS_TUNE_LOW) {
947                 set_tune_state(cld, TEGRA_CL_DVFS_TUNE_LOW);
948                 tune_low(cld);
949
950                 out_min = get_output_min(cld);
951         }
952         set_output_limits(cld, out_min, out_max);
953
954         /* 1:1 scaling in open loop */
955         val = cl_dvfs_readl(cld, CL_DVFS_FREQ_REQ);
956         if (!(cld->p_data->flags & TEGRA_CL_DVFS_SCALE_IN_OPEN_LOOP))
957                 val |= (SCALE_MAX - 1) << CL_DVFS_FREQ_REQ_SCALE_SHIFT;
958         val &= ~CL_DVFS_FREQ_REQ_FORCE_ENABLE;
959         cl_dvfs_writel(cld, val, CL_DVFS_FREQ_REQ);
960 }
961
962 static enum hrtimer_restart tune_timer_cb(struct hrtimer *timer)
963 {
964         unsigned long flags;
965         u32 val, out_min, out_last;
966         struct tegra_cl_dvfs *cld =
967                 container_of(timer, struct tegra_cl_dvfs, tune_timer);
968
969         clk_lock_save(cld->dfll_clk, &flags);
970
971         if (cld->tune_state == TEGRA_CL_DVFS_TUNE_HIGH_REQUEST) {
972                 out_min = cld->lut_min;
973                 val = cl_dvfs_readl(cld, CL_DVFS_I2C_STS);
974                 out_last = is_i2c(cld) ?
975                         (val >> CL_DVFS_I2C_STS_I2C_LAST_SHIFT) & OUT_MASK :
976                         out_min; /* no way to stall PWM: out_last >= out_min */
977
978                 /*
979                  * Update high tune settings if both last I2C value and minimum
980                  * output are above high range output threshold, provided I2C
981                  * transaction that might be in flight when minimum output was
982                  * set has been completed. The latter condition is true if no
983                  * transaction is pending or I2C last value has changed since
984                  * minimum limit was set.
985                  *
986                  * Since PWM mode never has pending indicator set, high tune
987                  * settings are updated always.
988                  */
989                 if (!(val & CL_DVFS_I2C_STS_I2C_REQ_PENDING) ||
990                     (cld->tune_out_last != out_last)) {
991                         cld->tune_out_last = cld->num_voltages;
992                 }
993
994                 if ((cld->tune_out_last == cld->num_voltages) &&
995                     (out_last >= cld->tune_high_out_min)  &&
996                     (out_min >= cld->tune_high_out_min)) {
997                         set_tune_state(cld, TEGRA_CL_DVFS_TUNE_HIGH_REQUEST_2);
998                         hrtimer_start(&cld->tune_timer, cld->tune_ramp,
999                                       HRTIMER_MODE_REL);
1000                 } else {
1001                         hrtimer_start(&cld->tune_timer, cld->tune_delay,
1002                                       HRTIMER_MODE_REL);
1003                 }
1004         } else if (cld->tune_state == TEGRA_CL_DVFS_TUNE_HIGH_REQUEST_2) {
1005                 set_tune_state(cld, TEGRA_CL_DVFS_TUNE_HIGH);
1006                 tune_high(cld);
1007         }
1008         clk_unlock_restore(cld->dfll_clk, &flags);
1009
1010         return HRTIMER_NORESTART;
1011 }
1012
1013 static inline void calibration_timer_update(struct tegra_cl_dvfs *cld)
1014 {
1015         /*
1016          * Forced output must be disabled in closed loop mode outside of
1017          * calibration. It may be temporarily enabled during calibration;
1018          * use timer update to clean up.
1019          */
1020         output_force_disable(cld);
1021
1022         if (cld->calibration_delay)
1023                 mod_timer(&cld->calibration_timer,
1024                           jiffies + cld->calibration_delay + 1);
1025 }
1026
1027 static void cl_dvfs_calibrate(struct tegra_cl_dvfs *cld)
1028 {
1029         u32 val, data;
1030         ktime_t now;
1031         unsigned long rate;
1032         unsigned long step = RATE_STEP(cld);
1033         unsigned long rate_min = cld->dvco_rate_min;
1034         u8 out_min = get_output_min(cld);
1035
1036         if (!cld->calibration_delay)
1037                 return;
1038         /*
1039          *  Enter calibration procedure only if
1040          *  - closed loop operations
1041          *  - last request engaged clock skipper
1042          *  - at least specified time after the last calibration attempt
1043          */
1044         if ((cld->mode != TEGRA_CL_DVFS_CLOSED_LOOP) ||
1045             (cld->last_req.rate > rate_min))
1046                 return;
1047
1048         now = ktime_get();
1049         if (ktime_us_delta(now, cld->last_calibration) <
1050             jiffies_to_usecs(cld->calibration_delay))
1051                 return;
1052         cld->last_calibration = now;
1053
1054         /* Defer calibration if in the middle of tuning transition */
1055         if ((cld->tune_state > TEGRA_CL_DVFS_TUNE_LOW) &&
1056             (cld->tune_state < TEGRA_CL_DVFS_TUNE_HIGH)) {
1057                 calibration_timer_update(cld);
1058                 return;
1059         }
1060
1061         /* Defer calibration if forced output was left enabled */
1062         val = cl_dvfs_readl(cld, CL_DVFS_OUTPUT_FORCE);
1063         if (val & CL_DVFS_OUTPUT_FORCE_ENABLE) {
1064                 calibration_timer_update(cld);
1065                 return;
1066         }
1067
1068         /*
1069          * Check if we need to force minimum output during calibration.
1070          *
1071          * Considerations for selecting TEGRA_CL_DVFS_CALIBRATE_FORCE_VMIN.
1072          * - if there is no voltage enforcement underneath this driver, no need
1073          * to select defer option.
1074          *
1075          *  - if SoC has internal pm controller that controls voltage while CPU
1076          * cluster is idle, and restores force_val on idle exit, the following
1077          * trade-offs applied:
1078          *
1079          * a) force: DVCO calibration is accurate, but calibration time is
1080          * increased by 2 sample periods and target module maybe under-clocked
1081          * during that time,
1082          * b) don't force: calibration results depend on whether flag
1083          * TEGRA_CL_DVFS_DEFER_FORCE_CALIBRATE is set -- see description below.
1084          */
1085         if (cld->p_data->flags & TEGRA_CL_DVFS_CALIBRATE_FORCE_VMIN) {
1086                 int delay = 2 * GET_SAMPLE_PERIOD(cld);
1087                 val = output_force_set_val(cld, out_min);
1088                 output_force_enable(cld, val);
1089                 udelay(delay);
1090         }
1091
1092         /* Synchronize with sample period, and get rate measurements */
1093         switch_monitor(cld, CL_DVFS_MONITOR_CTRL_FREQ);
1094
1095         if (cld->p_data->flags & TEGRA_CL_DVFS_DATA_NEW_NO_USE) {
1096                 /* Cannot use DATA_NEW synch - get data after one full sample
1097                    period (with 10us margin) */
1098                 int delay = GET_SAMPLE_PERIOD(cld) + 10;
1099                 udelay(delay);
1100         }
1101         wait_data_new(cld, &data);
1102         wait_data_new(cld, &data);
1103
1104         /* Defer calibration if data reading is not consistent */
1105         if (filter_monitor_data(cld, &data)) {
1106                 calibration_timer_update(cld);
1107                 return;
1108         }
1109
1110         /* Get output (voltage) measurements */
1111         if (is_i2c(cld)) {
1112                 /* Defer calibration if I2C transaction is pending */
1113                 val = cl_dvfs_readl(cld, CL_DVFS_I2C_STS);
1114                 if (val & CL_DVFS_I2C_STS_I2C_REQ_PENDING) {
1115                         calibration_timer_update(cld);
1116                         return;
1117                 }
1118                 val = (val >> CL_DVFS_I2C_STS_I2C_LAST_SHIFT) & OUT_MASK;
1119         } else if (cld->p_data->flags & TEGRA_CL_DVFS_CALIBRATE_FORCE_VMIN) {
1120                 /* Use forced value (cannot read it back from PWM interface) */
1121                 val = out_min;
1122         } else {
1123                 /* Get last output (there is no such thing as pending PWM) */
1124                 val = get_last_output(cld);
1125
1126                 /* Defer calibration if data reading is not consistent */
1127                 if (filter_monitor_data(cld, &val)) {
1128                         calibration_timer_update(cld);
1129                         return;
1130                 }
1131         }
1132
1133         if (cld->p_data->flags & TEGRA_CL_DVFS_CALIBRATE_FORCE_VMIN) {
1134                 /* Defer calibration if forced and read outputs do not match */
1135                 if (val != out_min) {
1136                         calibration_timer_update(cld);
1137                         return;
1138                 }
1139                 output_force_disable(cld);
1140         }
1141
1142         /*
1143          * Check if we need to defer calibration when voltage is matching
1144          * request force_val.
1145          *
1146          * Considerations for selecting TEGRA_CL_DVFS_DEFER_FORCE_CALIBRATE.
1147          * - if there is no voltage enforcement underneath this driver, no need
1148          * to select defer option.
1149          *
1150          *  - if SoC has internal pm controller that controls voltage while CPU
1151          * cluster is idle, and restores force_val on idle exit, the following
1152          * trade-offs applied:
1153          *
1154          * a) defer: DVCO minimum maybe slightly over-estimated, all frequencies
1155          * below DVCO minimum are skipped-to accurately, but voltage at low
1156          * frequencies would fluctuate between Vmin and Vmin + 1 LUT/PWM step.
1157          * b) don't defer: DVCO minimum rate is underestimated, maybe down to
1158          * calibration_range_min, respectively actual frequencies below DVCO
1159          * minimum are configured higher than requested, but voltage at low
1160          * frequencies is saturated at Vmin.
1161          */
1162         if ((val == cld->last_req.output) &&
1163             (cld->p_data->flags & TEGRA_CL_DVFS_DEFER_FORCE_CALIBRATE)) {
1164                 calibration_timer_update(cld);
1165                 return;
1166         }
1167
1168         /* Adjust minimum rate */
1169         rate = GET_MONITORED_RATE(data, cld->ref_rate);
1170         if ((val > out_min) || (rate < (rate_min - step)))
1171                 rate_min -= step;
1172         else if (rate > (cld->dvco_rate_min + step))
1173                 rate_min += step;
1174         else {
1175                 if ((cld->tune_state == TEGRA_CL_DVFS_TUNE_HIGH) &&
1176                     (cld->tune_high_out_min == out_min)) {
1177                         cld->tune_high_dvco_rate_min = rate_min;
1178                         return;
1179                 }
1180                 if (cld->thermal_out_floors[cld->therm_floor_idx] == out_min)
1181                         cld->dvco_rate_floors[cld->therm_floor_idx] = rate_min;
1182                 return;
1183         }
1184
1185         cld->dvco_rate_min = clamp(rate_min,
1186                         cld->calibration_range_min, cld->calibration_range_max);
1187         calibration_timer_update(cld);
1188         pr_debug("%s: calibrated dvco_rate_min %lu (%lu)\n",
1189                  __func__, cld->dvco_rate_min, rate_min);
1190 }
1191
1192 static void calibration_timer_cb(unsigned long data)
1193 {
1194         unsigned long flags, rate_min;
1195         struct tegra_cl_dvfs *cld = (struct tegra_cl_dvfs *)data;
1196
1197         pr_debug("%s\n", __func__);
1198
1199         clk_lock_save(cld->dfll_clk, &flags);
1200         rate_min = cld->dvco_rate_min;
1201         cl_dvfs_calibrate(cld);
1202         if (rate_min != cld->dvco_rate_min) {
1203                 tegra_cl_dvfs_request_rate(cld,
1204                         tegra_cl_dvfs_request_get(cld));
1205         }
1206         clk_unlock_restore(cld->dfll_clk, &flags);
1207 }
1208
1209 static void set_request(struct tegra_cl_dvfs *cld, struct dfll_rate_req *req)
1210 {
1211         u32 val, f;
1212         int force_val = req->output - cld->safe_output;
1213         int coef = 128; /* FIXME: cld->p_data->cfg_param->cg_scale? */;
1214
1215         /* If going down apply force output floor */
1216         val = cl_dvfs_readl(cld, CL_DVFS_FREQ_REQ);
1217         f = (val & CL_DVFS_FREQ_REQ_FREQ_MASK) >> CL_DVFS_FREQ_REQ_FREQ_SHIFT;
1218         if ((!(val & CL_DVFS_FREQ_REQ_FREQ_VALID) || (f > req->freq)) &&
1219             (cld->force_out_min > req->output))
1220                 force_val = cld->force_out_min - cld->safe_output;
1221
1222         force_val = force_val * coef / cld->p_data->cfg_param->cg;
1223         force_val = clamp(force_val, FORCE_MIN, FORCE_MAX);
1224
1225         /*
1226          * 1st set new frequency request and force values, then set force enable
1227          * bit (if not set already). Use same CL_DVFS_FREQ_REQ register read
1228          * (not other cl_dvfs register) plus explicit delay as a fence.
1229          */
1230         val &= CL_DVFS_FREQ_REQ_FORCE_ENABLE;
1231         val |= req->freq << CL_DVFS_FREQ_REQ_FREQ_SHIFT;
1232         val |= req->scale << CL_DVFS_FREQ_REQ_SCALE_SHIFT;
1233         val |= ((u32)force_val << CL_DVFS_FREQ_REQ_FORCE_SHIFT) &
1234                 CL_DVFS_FREQ_REQ_FORCE_MASK;
1235         val |= CL_DVFS_FREQ_REQ_FREQ_VALID;
1236         cl_dvfs_writel(cld, val, CL_DVFS_FREQ_REQ);
1237         wmb();
1238         val = cl_dvfs_readl(cld, CL_DVFS_FREQ_REQ);
1239
1240         if (!(val & CL_DVFS_FREQ_REQ_FORCE_ENABLE)) {
1241                 udelay(1);  /* 1us (big margin) window for force value settle */
1242                 val |= CL_DVFS_FREQ_REQ_FORCE_ENABLE;
1243                 cl_dvfs_writel(cld, val, CL_DVFS_FREQ_REQ);
1244                 cl_dvfs_wmb(cld);
1245         }
1246 }
1247
1248 static u8 find_mv_out_cap(struct tegra_cl_dvfs *cld, int mv)
1249 {
1250         u8 cap;
1251         int uv;
1252
1253         for (cap = 0; cap < cld->num_voltages; cap++) {
1254                 uv = cld->out_map[cap]->reg_uV;
1255                 if (uv / 1000 >= mv)
1256                         return is_i2c(cld) ? cap : cld->out_map[cap]->reg_value;
1257         }
1258         return get_output_top(cld);     /* maximum possible output */
1259 }
1260
1261 static u8 find_mv_out_floor(struct tegra_cl_dvfs *cld, int mv)
1262 {
1263         u8 floor;
1264         int uv;
1265
1266         for (floor = 0; floor < cld->num_voltages; floor++) {
1267                 uv = cld->out_map[floor]->reg_uV;
1268                 if (uv / 1000 > mv) {
1269                         if (!floor)     /* minimum possible output */
1270                                 return get_output_bottom(cld);
1271                         break;
1272                 }
1273         }
1274         return is_i2c(cld) ? floor - 1 : cld->out_map[floor - 1]->reg_value;
1275 }
1276
1277 static int find_safe_output(
1278         struct tegra_cl_dvfs *cld, unsigned long rate, u8 *safe_output)
1279 {
1280         int i;
1281         int n = cld->safe_dvfs->num_freqs;
1282         unsigned long *freqs = cld->safe_dvfs->freqs;
1283
1284         for (i = 0; i < n; i++) {
1285                 if (freqs[i] >= rate) {
1286                         *safe_output = cld->clk_dvfs_map[i];
1287                         return 0;
1288                 }
1289         }
1290         return -EINVAL;
1291 }
1292
1293 /* Return rate with predicted voltage closest/below or equal out_min */
1294 static unsigned long get_dvco_rate_below(struct tegra_cl_dvfs *cld, u8 out_min)
1295 {
1296         int i;
1297
1298         for (i = 0; i < cld->safe_dvfs->num_freqs; i++) {
1299                 if (cld->clk_dvfs_map[i] > out_min)
1300                         break;
1301         }
1302         i = i ? i-1 : 0;
1303         return cld->safe_dvfs->freqs[i];
1304 }
1305
1306 /* Return rate with predicted voltage closest/above out_min */
1307 static unsigned long get_dvco_rate_above(struct tegra_cl_dvfs *cld, u8 out_min)
1308 {
1309         int i;
1310
1311         for (i = 0; i < cld->safe_dvfs->num_freqs; i++) {
1312                 if (cld->clk_dvfs_map[i] > out_min)
1313                         return cld->safe_dvfs->freqs[i];
1314         }
1315         return cld->safe_dvfs->freqs[i-1];
1316 }
1317
1318 static void cl_dvfs_set_dvco_rate_min(struct tegra_cl_dvfs *cld,
1319                                       struct dfll_rate_req *req)
1320 {
1321         unsigned long tune_high_range_min = 0;
1322         unsigned long rate = cld->dvco_rate_floors[cld->therm_floor_idx];
1323         if (!rate) {
1324                 rate = cld->safe_dvfs->dfll_data.out_rate_min;
1325                 if (cld->therm_floor_idx < cld->therm_floors_num)
1326                         rate = get_dvco_rate_below(cld,
1327                                 cld->thermal_out_floors[cld->therm_floor_idx]);
1328         }
1329
1330         if (cl_tune_target(cld, req->rate) > TEGRA_CL_DVFS_TUNE_LOW) {
1331                 rate = max(rate, cld->tune_high_dvco_rate_min);
1332                 tune_high_range_min = cld->tune_high_target_rate_min;
1333         }
1334
1335         /* round minimum rate to request unit (ref_rate/2) boundary */
1336         cld->dvco_rate_min = ROUND_MIN_RATE(rate, cld->ref_rate);
1337         pr_debug("%s: calibrated dvco_rate_min %lu\n",
1338                  __func__, cld->dvco_rate_min);
1339
1340         /* dvco min rate is under-estimated - skewed range up */
1341         cld->calibration_range_min = cld->dvco_rate_min - 8 * RATE_STEP(cld);
1342         if (cld->calibration_range_min < tune_high_range_min)
1343                 cld->calibration_range_min = tune_high_range_min;
1344         if (cld->calibration_range_min < cld->safe_dvfs->freqs[0])
1345                 cld->calibration_range_min = cld->safe_dvfs->freqs[0];
1346         cld->calibration_range_max = cld->dvco_rate_min + 24 * RATE_STEP(cld);
1347         rate = cld->safe_dvfs->freqs[cld->safe_dvfs->num_freqs - 1];
1348         if (cld->calibration_range_max > rate)
1349                 cld->calibration_range_max = rate;
1350 }
1351
1352 static void cl_dvfs_set_force_out_min(struct tegra_cl_dvfs *cld)
1353 {
1354         u8 force_out_min;
1355         int force_mv_min = cld->p_data->pmu_undershoot_gb;
1356
1357         if (!force_mv_min) {
1358                 cld->force_out_min = get_output_bottom(cld);
1359                 return;
1360         }
1361
1362         WARN_ONCE(!list_empty(&cld->safe_dvfs->dvfs_rail->relationships_to),
1363                   "%s: PMIC undershoot must fit DFLL rail dependency-to slack",
1364                   __func__);
1365
1366         force_out_min = get_output_min(cld);
1367         force_mv_min += get_mv(cld, force_out_min);
1368         force_out_min = find_mv_out_cap(cld, force_mv_min);
1369         if (force_out_min == cld->safe_output)
1370                 force_out_min++;
1371         cld->force_out_min = force_out_min;
1372 }
1373
1374 static struct voltage_reg_map *find_vdd_map_entry(
1375         struct tegra_cl_dvfs *cld, int mV, bool exact)
1376 {
1377         int i, uninitialized_var(reg_mV);
1378
1379         for (i = 0; i < cld->p_data->vdd_map_size; i++) {
1380                 /* round down to 1mV */
1381                 reg_mV = cld->p_data->vdd_map[i].reg_uV / 1000;
1382                 if (mV <= reg_mV)
1383                         break;
1384         }
1385
1386         if (i < cld->p_data->vdd_map_size) {
1387                 if (!exact || (mV == reg_mV))
1388                         return &cld->p_data->vdd_map[i];
1389         }
1390         return NULL;
1391 }
1392
1393 static void cl_dvfs_init_maps(struct tegra_cl_dvfs *cld)
1394 {
1395         int i, j, v, v_max, n;
1396         const int *millivolts;
1397         struct voltage_reg_map *m;
1398
1399         BUILD_BUG_ON(MAX_CL_DVFS_VOLTAGES > OUT_MASK + 1);
1400
1401         n = cld->safe_dvfs->num_freqs;
1402         BUG_ON(n >= MAX_CL_DVFS_VOLTAGES);
1403
1404         millivolts = cld->safe_dvfs->dfll_millivolts;
1405         v_max = millivolts[n - 1];
1406
1407         v = cld->safe_dvfs->dfll_data.min_millivolts;
1408         BUG_ON(v > millivolts[0]);
1409
1410         cld->out_map[0] = find_vdd_map_entry(cld, v, true);
1411         BUG_ON(!cld->out_map[0]);
1412
1413         for (i = 0, j = 1; i < n; i++) {
1414                 for (;;) {
1415                         v += max(1, (v_max - v) / (MAX_CL_DVFS_VOLTAGES - j));
1416                         if (v >= millivolts[i])
1417                                 break;
1418
1419                         m = find_vdd_map_entry(cld, v, false);
1420                         BUG_ON(!m);
1421                         if (m != cld->out_map[j - 1])
1422                                 cld->out_map[j++] = m;
1423                 }
1424
1425                 v = (j == MAX_CL_DVFS_VOLTAGES - 1) ? v_max : millivolts[i];
1426                 m = find_vdd_map_entry(cld, v, true);
1427                 BUG_ON(!m);
1428                 if (m != cld->out_map[j - 1])
1429                         cld->out_map[j++] = m;
1430                 if (is_i2c(cld)) {
1431                         cld->clk_dvfs_map[i] = j - 1;
1432                 } else {
1433                         cld->clk_dvfs_map[i] = cld->out_map[j - 1]->reg_value;
1434                         BUG_ON(cld->clk_dvfs_map[i] > OUT_MASK + 1);
1435                 }
1436
1437                 if (v >= v_max)
1438                         break;
1439         }
1440         cld->num_voltages = j;
1441
1442         /* hit Vmax before last freq was mapped: map the rest to max output */
1443         for (j = i++; i < n; i++)
1444                 cld->clk_dvfs_map[i] = cld->clk_dvfs_map[j];
1445 }
1446
1447 static void cl_dvfs_init_tuning_thresholds(struct tegra_cl_dvfs *cld)
1448 {
1449         int mv;
1450
1451         /*
1452          * Convert high tuning voltage threshold into output LUT index, and
1453          * add necessary margin.  If voltage threshold is outside operating
1454          * range set it at maximum output level to effectively disable tuning
1455          * parameters adjustment.
1456          */
1457         cld->tune_high_out_min = get_output_top(cld);
1458         cld->tune_high_out_start = cld->tune_high_out_min;
1459         cld->tune_high_dvco_rate_min = ULONG_MAX;
1460         cld->tune_high_target_rate_min = ULONG_MAX;
1461
1462         mv = cld->safe_dvfs->dfll_data.tune_high_min_millivolts;
1463         if (mv >= cld->safe_dvfs->dfll_data.min_millivolts) {
1464                 int margin = cld->safe_dvfs->dfll_data.tune_high_margin_mv ? :
1465                                 CL_DVFS_TUNE_HIGH_MARGIN_MV;
1466                 u8 out_min = find_mv_out_cap(cld, mv);
1467                 u8 out_start = find_mv_out_cap(cld, mv + margin);
1468                 out_start = max(out_start, (u8)(out_min + 1));
1469                 if (out_start < get_output_top(cld)) {
1470                         cld->tune_high_out_min = out_min;
1471                         cld->tune_high_out_start = out_start;
1472                         if (cld->minimax_output <= out_start)
1473                                 cld->minimax_output = out_start + 1;
1474                         cld->tune_high_dvco_rate_min =
1475                                 get_dvco_rate_above(cld, out_start + 1);
1476                         cld->tune_high_target_rate_min =
1477                                 get_dvco_rate_above(cld, out_min);
1478                 }
1479         }
1480 }
1481
1482 static void cl_dvfs_init_hot_output_cap(struct tegra_cl_dvfs *cld)
1483 {
1484         int i;
1485         if (!cld->safe_dvfs->dvfs_rail->therm_mv_caps ||
1486             !cld->safe_dvfs->dvfs_rail->therm_mv_caps_num)
1487                 return;
1488
1489         if (!cld->safe_dvfs->dvfs_rail->vmax_cdev)
1490                 WARN(1, "%s: missing dfll cap cooling device\n",
1491                      cld->safe_dvfs->dvfs_rail->reg_id);
1492         /*
1493          * Convert monotonically decreasing thermal caps at high temperature
1494          * into output LUT indexes; make sure there is a room for regulation
1495          * below minimum thermal cap.
1496          */
1497         cld->therm_caps_num = cld->safe_dvfs->dvfs_rail->therm_mv_caps_num;
1498         for (i = 0; i < cld->therm_caps_num; i++) {
1499                 cld->thermal_out_caps[i] = find_mv_out_floor(
1500                         cld, cld->safe_dvfs->dvfs_rail->therm_mv_caps[i]);
1501         }
1502         BUG_ON(cld->thermal_out_caps[cld->therm_caps_num - 1] <
1503                cld->minimax_output);
1504 }
1505
1506 static void cl_dvfs_convert_cold_output_floor(struct tegra_cl_dvfs *cld,
1507                                               int offset)
1508 {
1509         int i;
1510         struct dvfs_rail *rail = cld->safe_dvfs->dvfs_rail;
1511
1512         /*
1513          * Convert monotonically decreasing thermal floors at low temperature
1514          * into output LUT indexes; make sure there is a room for regulation
1515          * above maximum thermal floor. The latter is also exempt from offset
1516          * application.
1517          */
1518         cld->therm_floors_num = rail->therm_mv_floors_num;
1519         for (i = 0; i < cld->therm_floors_num; i++) {
1520                 int mv = rail->therm_mv_floors[i] + (i ? offset : 0);
1521                 u8 out = cld->thermal_out_floors[i] = find_mv_out_cap(cld, mv);
1522                 cld->thermal_mv_floors[i] = get_mv(cld, out);
1523         }
1524         BUG_ON(cld->thermal_out_floors[0] + 1 >= get_output_top(cld));
1525         if (!rail->therm_mv_dfll_floors) {
1526                 wmb();
1527                 rail->therm_mv_dfll_floors = cld->thermal_mv_floors;
1528         }
1529 }
1530
1531 static void cl_dvfs_init_cold_output_floor(struct tegra_cl_dvfs *cld)
1532 {
1533         if (!cld->safe_dvfs->dvfs_rail->therm_mv_floors ||
1534             !cld->safe_dvfs->dvfs_rail->therm_mv_floors_num)
1535                 return;
1536
1537         if (!cld->safe_dvfs->dvfs_rail->vmin_cdev)
1538                 WARN(1, "%s: missing dfll floor cooling device\n",
1539                      cld->safe_dvfs->dvfs_rail->reg_id);
1540
1541         /* Most conservative offset 0 always safe */
1542         cl_dvfs_convert_cold_output_floor(cld, 0);
1543
1544         if (cld->minimax_output <= cld->thermal_out_floors[0])
1545                 cld->minimax_output = cld->thermal_out_floors[0] + 1;
1546 }
1547
1548 static void cl_dvfs_init_output_thresholds(struct tegra_cl_dvfs *cld)
1549 {
1550         cld->minimax_output = 0;
1551         cl_dvfs_init_tuning_thresholds(cld);
1552         cl_dvfs_init_cold_output_floor(cld);
1553
1554         /* Append minimum output to thermal floors */
1555         cld->thermal_out_floors[cld->therm_floors_num] = get_output_bottom(cld);
1556
1557         /* make sure safe output is safe at any temperature */
1558         cld->safe_output = max(cld->thermal_out_floors[0],
1559                 (u8)(get_output_bottom(cld) + 1));
1560         if (cld->minimax_output <= cld->safe_output)
1561                 cld->minimax_output = cld->safe_output + 1;
1562
1563         /* init caps after minimax output is determined */
1564         cl_dvfs_init_hot_output_cap(cld);
1565 }
1566
1567 static void cl_dvfs_init_pwm_if(struct tegra_cl_dvfs *cld)
1568 {
1569         u32 val, div;
1570         struct tegra_cl_dvfs_platform_data *p_data = cld->p_data;
1571         bool delta_mode = p_data->u.pmu_pwm.delta_mode;
1572         int pg = p_data->u.pmu_pwm.pwm_pingroup;
1573         int pcg = p_data->u.pmu_pwm.pwm_clk_pingroup;
1574
1575         div = GET_DIV(cld->ref_rate, p_data->u.pmu_pwm.pwm_rate, 1);
1576
1577         val = cl_dvfs_readl(cld, CL_DVFS_OUTPUT_CFG);
1578         val |= delta_mode ? CL_DVFS_OUTPUT_CFG_PWM_DELTA : 0;
1579         val |= (div << CL_DVFS_OUTPUT_CFG_PWM_DIV_SHIFT) &
1580                 CL_DVFS_OUTPUT_CFG_PWM_DIV_MASK;
1581
1582         /*
1583          * Different ways to enable/disable PWM depending on board design:
1584          * a) Use native CL-DVFS output PWM_ENABLE control (2WIRE bus)
1585          * b) Use gpio control of external buffer (1WIRE bus with buffer)
1586          * c) Use tristate PWM pingroup control (1WIRE bus with direct connect)
1587          * in cases (b) and (c) keep CL-DVFS native control always enabled
1588          */
1589
1590         switch (p_data->u.pmu_pwm.pwm_bus) {
1591         case TEGRA_CL_DVFS_PWM_1WIRE_BUFFER:
1592                 tegra_pinctrl_set_tristate(p_data, pg, TEGRA_PIN_DISABLE);
1593                 val |= CL_DVFS_OUTPUT_CFG_PWM_ENABLE;
1594                 break;
1595
1596         case TEGRA_CL_DVFS_PWM_1WIRE_DIRECT:
1597                 tegra_pinctrl_set_tristate(p_data, pg, TEGRA_PIN_ENABLE);
1598                 val |= CL_DVFS_OUTPUT_CFG_PWM_ENABLE;
1599                 break;
1600
1601         case TEGRA_CL_DVFS_PWM_2WIRE:
1602                 tegra_pinctrl_set_tristate(p_data, pg, TEGRA_PIN_DISABLE);
1603                 tegra_pinctrl_set_tristate(p_data, pcg, TEGRA_PIN_DISABLE);
1604                 break;
1605
1606         default:
1607                 BUG();
1608         }
1609
1610         cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_CFG);
1611         cl_dvfs_wmb(cld);
1612 }
1613
1614 static void cl_dvfs_init_i2c_if(struct tegra_cl_dvfs *cld)
1615 {
1616         u32 val, div;
1617         struct tegra_cl_dvfs_platform_data *p_data = cld->p_data;
1618         bool hs_mode = p_data->u.pmu_i2c.hs_rate;
1619
1620         /* PMU slave address, vdd register offset, and transfer mode */
1621         val = p_data->u.pmu_i2c.slave_addr << CL_DVFS_I2C_CFG_SLAVE_ADDR_SHIFT;
1622         if (p_data->u.pmu_i2c.addr_10)
1623                 val |= CL_DVFS_I2C_CFG_SLAVE_ADDR_10;
1624         if (hs_mode) {
1625                 val |= p_data->u.pmu_i2c.hs_master_code <<
1626                         CL_DVFS_I2C_CFG_HS_CODE_SHIFT;
1627                 val |= CL_DVFS_I2C_CFG_PACKET_ENABLE;
1628         }
1629         val |= CL_DVFS_I2C_CFG_SIZE_MASK;
1630         val |= CL_DVFS_I2C_CFG_ARB_ENABLE;
1631         cl_dvfs_writel(cld, val, CL_DVFS_I2C_CFG);
1632         cl_dvfs_writel(cld, p_data->u.pmu_i2c.reg, CL_DVFS_I2C_VDD_REG_ADDR);
1633
1634
1635         val = GET_DIV(cld->i2c_rate, p_data->u.pmu_i2c.fs_rate, 8);
1636         BUG_ON(!val || (val > CL_DVFS_I2C_CLK_DIVISOR_MASK));
1637         val = (val - 1) << CL_DVFS_I2C_CLK_DIVISOR_FS_SHIFT;
1638         if (hs_mode) {
1639                 div = GET_DIV(cld->i2c_rate, p_data->u.pmu_i2c.hs_rate, 12);
1640                 BUG_ON(!div || (div > CL_DVFS_I2C_CLK_DIVISOR_MASK));
1641         } else {
1642                 div = 2;        /* default hs divisor just in case */
1643         }
1644         val |= (div - 1) << CL_DVFS_I2C_CLK_DIVISOR_HS_SHIFT;
1645         cl_dvfs_writel(cld, val, CL_DVFS_I2C_CLK_DIVISOR);
1646         cl_dvfs_i2c_wmb(cld);
1647 }
1648
1649 static void cl_dvfs_init_out_if(struct tegra_cl_dvfs *cld)
1650 {
1651         u32 val, out_min, out_max;
1652
1653         /*
1654          * Disable output, and set safe voltage and output limits;
1655          * disable and clear limit interrupts.
1656          */
1657         cld->tune_state = TEGRA_CL_DVFS_TUNE_LOW;
1658         cld->therm_cap_idx = cld->therm_caps_num;
1659         cld->therm_floor_idx = 0;
1660         cl_dvfs_set_dvco_rate_min(cld, &cld->last_req);
1661         cl_dvfs_set_force_out_min(cld);
1662
1663         if (cld->p_data->flags & TEGRA_CL_DVFS_DYN_OUTPUT_CFG) {
1664                 /*
1665                  * If h/w supports dynamic chanage of output register, limit
1666                  * LUT * index range using cl_dvfs h/w controls, and load full
1667                  * range LUT table once.
1668                  */
1669                 out_min = get_output_min(cld);
1670                 out_max = get_output_cap(cld, NULL);
1671                 cld->lut_min = get_output_bottom(cld);
1672                 cld->lut_max = get_output_top(cld);
1673         } else {
1674                 /* LUT available only for I2C, no dynamic config WAR for PWM */
1675                 BUG_ON(!is_i2c(cld));
1676
1677                 /*
1678                  * Allow the entire range of LUT indexes, but limit output
1679                  * voltage in LUT mapping (this "indirect" application of limits
1680                  * is used, because h/w does not support dynamic change of index
1681                  * limits, but dynamic reload of LUT is fine).
1682                  */
1683                 out_min = get_output_bottom(cld);
1684                 out_max = get_output_top(cld);
1685                 cld->lut_min = get_output_min(cld);
1686                 cld->lut_max = get_output_cap(cld, NULL);
1687         }
1688
1689         /*
1690          * Disable output interface. If configuration and I2C address spaces
1691          * are separated, output enable/disable control and output limits are
1692          * in different apertures and output must be disabled 1st to avoid
1693          * spurious I2C transaction. If configuration and I2C address spaces
1694          * are combined output enable/disable control and output limits are
1695          * in the same register, and it is safe to just clear it.
1696          */
1697         cl_dvfs_i2c_writel(cld, 0, CL_DVFS_OUTPUT_CFG);
1698         cl_dvfs_i2c_wmb(cld);
1699
1700         val = (cld->safe_output << CL_DVFS_OUTPUT_CFG_SAFE_SHIFT) |
1701                 (out_max << CL_DVFS_OUTPUT_CFG_MAX_SHIFT) |
1702                 (out_min << CL_DVFS_OUTPUT_CFG_MIN_SHIFT);
1703         cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_CFG);
1704         if (cld->p_data->flags & TEGRA_CL_DVFS_HAS_IDLE_OVERRIDE) {
1705                 val = out_min << CL_DVFS_CC4_HVC_FORCE_VAL_SHIFT;
1706                 cl_dvfs_writel(cld, val, CL_DVFS_CC4_HVC);
1707         }
1708         cl_dvfs_wmb(cld);
1709
1710         cl_dvfs_writel(cld, 0, CL_DVFS_OUTPUT_FORCE);
1711         cl_dvfs_writel(cld, 0, CL_DVFS_INTR_EN);
1712         cl_dvfs_writel(cld, CL_DVFS_INTR_MAX_MASK | CL_DVFS_INTR_MIN_MASK,
1713                        CL_DVFS_INTR_STS);
1714
1715         /* fill in LUT table */
1716         if (is_i2c(cld))
1717                 cl_dvfs_load_lut(cld);
1718
1719         if (cld->p_data->flags & TEGRA_CL_DVFS_DYN_OUTPUT_CFG) {
1720                 /* dynamic update of output register allowed - no need to reload
1721                    lut - use lut limits as output register setting shadow */
1722                 cld->lut_min = out_min;
1723                 cld->lut_max = out_max;
1724         }
1725         cld->v_limits.vmin = get_mv(cld, cld->lut_min);
1726         cld->v_limits.vmax = get_mv(cld, cld->lut_max);
1727
1728         /* configure transport */
1729         if (is_i2c(cld))
1730                 cl_dvfs_init_i2c_if(cld);
1731         else
1732                 cl_dvfs_init_pwm_if(cld);
1733 }
1734
1735 static void cl_dvfs_init_cntrl_logic(struct tegra_cl_dvfs *cld)
1736 {
1737         u32 val;
1738         struct tegra_cl_dvfs_cfg_param *param = cld->p_data->cfg_param;
1739
1740         /* configure mode, control loop parameters, DFLL tuning */
1741         set_mode(cld, TEGRA_CL_DVFS_DISABLED);
1742
1743         val = GET_DIV(cld->ref_rate, param->sample_rate, 32);
1744         BUG_ON(val > CL_DVFS_CONFIG_DIV_MASK);
1745         cl_dvfs_writel(cld, val, CL_DVFS_CONFIG);
1746
1747         val = (param->force_mode << CL_DVFS_PARAMS_FORCE_MODE_SHIFT) |
1748                 (param->cf << CL_DVFS_PARAMS_CF_PARAM_SHIFT) |
1749                 (param->ci << CL_DVFS_PARAMS_CI_PARAM_SHIFT) |
1750                 ((u8)param->cg << CL_DVFS_PARAMS_CG_PARAM_SHIFT) |
1751                 (param->cg_scale ? CL_DVFS_PARAMS_CG_SCALE : 0);
1752         cl_dvfs_writel(cld, val, CL_DVFS_PARAMS);
1753
1754         cl_dvfs_writel(cld, cld->tune0_low, CL_DVFS_TUNE0);
1755         cl_dvfs_writel(cld, cld->safe_dvfs->dfll_data.tune1, CL_DVFS_TUNE1);
1756         cl_dvfs_wmb(cld);
1757         if (cld->safe_dvfs->dfll_data.tune_trimmers)
1758                 cld->safe_dvfs->dfll_data.tune_trimmers(false);
1759
1760         /* configure droop (skipper 1) and scale (skipper 2) */
1761         val = GET_DROOP_FREQ(cld->safe_dvfs->dfll_data.droop_rate_min,
1762                         cld->ref_rate) << CL_DVFS_DROOP_CTRL_MIN_FREQ_SHIFT;
1763         BUG_ON(val > CL_DVFS_DROOP_CTRL_MIN_FREQ_MASK);
1764         val |= (param->droop_cut_value << CL_DVFS_DROOP_CTRL_CUT_SHIFT);
1765         val |= (param->droop_restore_ramp << CL_DVFS_DROOP_CTRL_RAMP_SHIFT);
1766         cl_dvfs_writel(cld, val, CL_DVFS_DROOP_CTRL);
1767
1768         val = cl_dvfs_readl(cld, CL_DVFS_FREQ_REQ) &
1769                 CL_DVFS_FREQ_REQ_SCALE_MASK;
1770         cld->last_req.scale = val >> CL_DVFS_FREQ_REQ_SCALE_SHIFT;
1771         cld->last_req.cap = 0;
1772         cld->last_req.freq = 0;
1773         cld->last_req.output = 0;
1774         cl_dvfs_writel(cld, val, CL_DVFS_FREQ_REQ);
1775         cl_dvfs_writel(cld, param->scale_out_ramp, CL_DVFS_SCALE_RAMP);
1776
1777         /* select frequency for monitoring */
1778         cl_dvfs_writel(cld, CL_DVFS_MONITOR_CTRL_FREQ, CL_DVFS_MONITOR_CTRL);
1779         cl_dvfs_wmb(cld);
1780 }
1781
1782 static int cl_dvfs_enable_clocks(struct tegra_cl_dvfs *cld)
1783 {
1784         if (is_i2c(cld))
1785                 clk_enable(cld->i2c_clk);
1786
1787         clk_enable(cld->ref_clk);
1788         clk_enable(cld->soc_clk);
1789         return 0;
1790 }
1791
1792 static void cl_dvfs_disable_clocks(struct tegra_cl_dvfs *cld)
1793 {
1794         if (is_i2c(cld))
1795                 clk_disable(cld->i2c_clk);
1796
1797         clk_disable(cld->ref_clk);
1798         clk_disable(cld->soc_clk);
1799 }
1800
1801 static int sync_tune_state(struct tegra_cl_dvfs *cld)
1802 {
1803         u32 val = cl_dvfs_readl(cld, CL_DVFS_TUNE0);
1804         if (cld->tune0_low == val)
1805                 set_tune_state(cld, TEGRA_CL_DVFS_TUNE_LOW);
1806         else if (cld->tune0_high == val)
1807                 set_tune_state(cld, TEGRA_CL_DVFS_TUNE_HIGH);
1808         else {
1809                 pr_err("\n %s: Failed to sync cl_dvfs tune state\n", __func__);
1810                 return -EINVAL;
1811         }
1812         return 0;
1813 }
1814
1815 /*
1816  * When bootloader enables cl_dvfs, then this function
1817  * can be used to set cl_dvfs sw sate to be in sync with
1818  * cl_dvfs HW sate.
1819  */
1820 static int cl_dvfs_sync(struct tegra_cl_dvfs *cld)
1821 {
1822         u32 val;
1823         int status;
1824         unsigned long int rate;
1825         unsigned long int dfll_boot_req_khz =
1826                 cld->safe_dvfs->dfll_data.dfll_boot_khz;
1827
1828         if (!dfll_boot_req_khz) {
1829                 pr_err("%s: Failed to sync DFLL boot rate\n", __func__);
1830                 return -EINVAL;
1831         }
1832
1833         output_enable(cld);
1834
1835         val = cl_dvfs_readl(cld, CL_DVFS_FREQ_REQ) &
1836                 CL_DVFS_FREQ_REQ_SCALE_MASK;
1837         cld->last_req.scale = val >> CL_DVFS_FREQ_REQ_SCALE_SHIFT;
1838         cld->last_req.rate = dfll_boot_req_khz * 1000;
1839         cld->last_req.freq = GET_REQUEST_FREQ(cld->last_req.rate,
1840                                                 cld->ref_rate);
1841         val = cld->last_req.freq;
1842         rate = GET_REQUEST_RATE(val, cld->ref_rate);
1843         if (find_safe_output(cld, rate, &(cld->last_req.output))) {
1844                 pr_err("%s: Failed to find safe output for rate %lu\n",
1845                         __func__, rate);
1846                 return -EINVAL;
1847         }
1848         cld->last_req.cap = cld->last_req.output;
1849         cld->mode = TEGRA_CL_DVFS_CLOSED_LOOP;
1850         status = sync_tune_state(cld);
1851         if (status)
1852                 return status;
1853         return 0;
1854 }
1855
1856 static bool is_cl_dvfs_closed_loop(struct tegra_cl_dvfs *cld)
1857 {
1858         u32 mode;
1859         mode = cl_dvfs_readl(cld, CL_DVFS_CTRL) + 1;
1860         if (mode == TEGRA_CL_DVFS_CLOSED_LOOP)
1861                 return true;
1862         return false;
1863 }
1864
1865 static int cl_dvfs_init(struct tegra_cl_dvfs *cld)
1866 {
1867         int ret, gpio, flags;
1868
1869         /* Enable output inerface clock */
1870         if (cld->p_data->pmu_if == TEGRA_CL_DVFS_PMU_I2C) {
1871                 ret = clk_enable(cld->i2c_clk);
1872                 if (ret) {
1873                         pr_err("%s: Failed to enable %s\n",
1874                                __func__, cld->i2c_clk->name);
1875                         return ret;
1876                 }
1877                 cld->i2c_rate = clk_get_rate(cld->i2c_clk);
1878         } else if (cld->p_data->pmu_if == TEGRA_CL_DVFS_PMU_PWM) {
1879                 int pwm_bus = cld->p_data->u.pmu_pwm.pwm_bus;
1880                 if (pwm_bus > TEGRA_CL_DVFS_PWM_1WIRE_DIRECT) {
1881                         /* FIXME: PWM 2-wire support */
1882                         pr_err("%s: not supported PWM 2-wire bus\n", __func__);
1883                         return -ENOSYS;
1884                 } else if (pwm_bus == TEGRA_CL_DVFS_PWM_1WIRE_BUFFER) {
1885                         gpio = cld->p_data->u.pmu_pwm.out_gpio;
1886                         flags = cld->p_data->u.pmu_pwm.out_enable_high ?
1887                                 GPIOF_OUT_INIT_LOW : GPIOF_OUT_INIT_HIGH;
1888                         if (gpio_request_one(gpio, flags, "cl_dvfs_pwm")) {
1889                                 pr_err("%s: Failed to request pwm gpio %d\n",
1890                                        __func__, gpio);
1891                                 return -EPERM;
1892                         }
1893                 }
1894         } else {
1895                 pr_err("%s: unknown PMU interface\n", __func__);
1896                 return -EINVAL;
1897         }
1898
1899         /* Enable module clocks, release control logic reset */
1900         ret = clk_enable(cld->ref_clk);
1901         if (ret) {
1902                 pr_err("%s: Failed to enable %s\n",
1903                        __func__, cld->ref_clk->name);
1904                 return ret;
1905         }
1906         ret = clk_enable(cld->soc_clk);
1907         if (ret) {
1908                 pr_err("%s: Failed to enable %s\n",
1909                        __func__, cld->ref_clk->name);
1910                 return ret;
1911         }
1912         cld->ref_rate = clk_get_rate(cld->ref_clk);
1913         BUG_ON(!cld->ref_rate);
1914
1915         /* init tuning timer */
1916         hrtimer_init(&cld->tune_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1917         cld->tune_timer.function = tune_timer_cb;
1918         cld->tune_delay = ktime_set(0, CL_DVFS_TUNE_HIGH_DELAY * 1000);
1919         if (!cld->p_data->tune_ramp_delay)
1920                 cld->p_data->tune_ramp_delay = CL_DVFS_OUTPUT_RAMP_DELAY;
1921         cld->tune_ramp = ktime_set(0, cld->p_data->tune_ramp_delay * 1000);
1922
1923         /* init forced output resume delay */
1924         if (!cld->p_data->resume_ramp_delay)
1925                 cld->p_data->resume_ramp_delay = CL_DVFS_OUTPUT_RAMP_DELAY;
1926
1927         /* init calibration timer */
1928         init_timer_deferrable(&cld->calibration_timer);
1929         cld->calibration_timer.function = calibration_timer_cb;
1930         cld->calibration_timer.data = (unsigned long)cld;
1931         cld->calibration_delay = usecs_to_jiffies(CL_DVFS_CALIBR_TIME);
1932
1933         /* Init tune0 settings */
1934         cld->tune0_low = cld->safe_dvfs->dfll_data.tune0;
1935         cld->tune0_high = cld->safe_dvfs->dfll_data.tune0_high_mv;
1936
1937         /* Get ready ouput voltage mapping*/
1938         cl_dvfs_init_maps(cld);
1939
1940         /* Setup output range thresholds */
1941         cl_dvfs_init_output_thresholds(cld);
1942
1943         /* Setup PMU interface */
1944         cl_dvfs_init_out_if(cld);
1945
1946         if (is_cl_dvfs_closed_loop(cld)) {
1947                 ret = cl_dvfs_sync(cld);
1948                 if (ret)
1949                         return ret;
1950         } else {
1951                 /*
1952                  * Configure control registers in disabled mode
1953                  * and disable clocks
1954                  */
1955                 cl_dvfs_init_cntrl_logic(cld);
1956                 cl_dvfs_disable_clocks(cld);
1957         }
1958
1959         /* Set target clock cl_dvfs data */
1960         tegra_dfll_set_cl_dvfs_data(cld->dfll_clk, cld);
1961         return 0;
1962 }
1963
1964 /*
1965  * Re-initialize and enable target device clock in open loop mode. Called
1966  * directly from SoC clock resume syscore operation. Closed loop will be
1967  * re-entered in platform syscore ops as well.
1968  */
1969 void tegra_cl_dvfs_resume(struct tegra_cl_dvfs *cld)
1970 {
1971         enum tegra_cl_dvfs_ctrl_mode mode = cld->mode;
1972         struct dfll_rate_req req = cld->last_req;
1973
1974         cl_dvfs_enable_clocks(cld);
1975
1976         /* Setup PMU interface, and configure controls in disabled mode */
1977         cl_dvfs_init_out_if(cld);
1978         cl_dvfs_init_cntrl_logic(cld);
1979
1980         /* Restore force output */
1981         cl_dvfs_writel(cld, cld->suspended_force_out, CL_DVFS_OUTPUT_FORCE);
1982
1983         cl_dvfs_disable_clocks(cld);
1984
1985         /* Restore last request and mode */
1986         cld->last_req = req;
1987         if (mode != TEGRA_CL_DVFS_DISABLED) {
1988                 set_mode(cld, TEGRA_CL_DVFS_OPEN_LOOP);
1989                 if (WARN(mode > TEGRA_CL_DVFS_OPEN_LOOP,
1990                          "DFLL was left locked in suspend\n"))
1991                         return;
1992         }
1993
1994         /* Re-enable bypass output if it was forced before suspend */
1995         if ((cld->p_data->u.pmu_pwm.dfll_bypass_dev) &&
1996             (cld->suspended_force_out & CL_DVFS_OUTPUT_FORCE_ENABLE)) {
1997                 if (!cld->safe_dvfs->dfll_data.is_bypass_down ||
1998                     !cld->safe_dvfs->dfll_data.is_bypass_down()) {
1999                         cl_dvfs_wmb(cld);
2000                         output_enable(cld);
2001                         udelay(cld->p_data->resume_ramp_delay);
2002                 }
2003         }
2004 }
2005
2006 #ifdef CONFIG_THERMAL
2007 /* cl_dvfs cap cooling device */
2008 static int tegra_cl_dvfs_get_vmax_cdev_max_state(
2009         struct thermal_cooling_device *cdev, unsigned long *max_state)
2010 {
2011         struct tegra_cl_dvfs *cld = (struct tegra_cl_dvfs *)cdev->devdata;
2012         *max_state = cld->therm_caps_num;
2013         return 0;
2014 }
2015
2016 static int tegra_cl_dvfs_get_vmax_cdev_cur_state(
2017         struct thermal_cooling_device *cdev, unsigned long *cur_state)
2018 {
2019         struct tegra_cl_dvfs *cld = (struct tegra_cl_dvfs *)cdev->devdata;
2020         *cur_state = cld->therm_cap_idx;
2021         return 0;
2022 }
2023
2024 static int tegra_cl_dvfs_set_vmax_cdev_state(
2025         struct thermal_cooling_device *cdev, unsigned long cur_state)
2026 {
2027         unsigned long flags;
2028         struct tegra_cl_dvfs *cld = (struct tegra_cl_dvfs *)cdev->devdata;
2029
2030         clk_lock_save(cld->dfll_clk, &flags);
2031
2032         if (cld->therm_cap_idx != cur_state) {
2033                 cld->therm_cap_idx = cur_state;
2034                 if (cld->mode == TEGRA_CL_DVFS_CLOSED_LOOP) {
2035                         tegra_cl_dvfs_request_rate(cld,
2036                                 tegra_cl_dvfs_request_get(cld));
2037                 }
2038         }
2039         clk_unlock_restore(cld->dfll_clk, &flags);
2040         return 0;
2041 }
2042
2043 static struct thermal_cooling_device_ops tegra_cl_dvfs_vmax_cool_ops = {
2044         .get_max_state = tegra_cl_dvfs_get_vmax_cdev_max_state,
2045         .get_cur_state = tegra_cl_dvfs_get_vmax_cdev_cur_state,
2046         .set_cur_state = tegra_cl_dvfs_set_vmax_cdev_state,
2047 };
2048
2049 /* cl_dvfs vmin cooling device */
2050 static int tegra_cl_dvfs_get_vmin_cdev_max_state(
2051         struct thermal_cooling_device *cdev, unsigned long *max_state)
2052 {
2053         struct tegra_cl_dvfs *cld = (struct tegra_cl_dvfs *)cdev->devdata;
2054         *max_state = cld->therm_floors_num;
2055         return 0;
2056 }
2057
2058 static int tegra_cl_dvfs_get_vmin_cdev_cur_state(
2059         struct thermal_cooling_device *cdev, unsigned long *cur_state)
2060 {
2061         struct tegra_cl_dvfs *cld = (struct tegra_cl_dvfs *)cdev->devdata;
2062         *cur_state = cld->therm_floor_idx;
2063         return 0;
2064 }
2065
2066 static int tegra_cl_dvfs_set_vmin_cdev_state(
2067         struct thermal_cooling_device *cdev, unsigned long cur_state)
2068 {
2069         unsigned long flags;
2070         struct tegra_cl_dvfs *cld = (struct tegra_cl_dvfs *)cdev->devdata;
2071
2072         clk_lock_save(cld->dfll_clk, &flags);
2073
2074         if (cld->therm_floor_idx != cur_state) {
2075                 cld->therm_floor_idx = cur_state;
2076                 cl_dvfs_set_dvco_rate_min(cld, &cld->last_req);
2077                 cl_dvfs_set_force_out_min(cld);
2078                 if (cld->mode == TEGRA_CL_DVFS_CLOSED_LOOP) {
2079                         tegra_cl_dvfs_request_rate(cld,
2080                                 tegra_cl_dvfs_request_get(cld));
2081                         /* Delay to make sure new Vmin delivery started */
2082                         udelay(2 * GET_SAMPLE_PERIOD(cld));
2083                 }
2084         }
2085         clk_unlock_restore(cld->dfll_clk, &flags);
2086         return 0;
2087 }
2088
2089 static struct thermal_cooling_device_ops tegra_cl_dvfs_vmin_cool_ops = {
2090         .get_max_state = tegra_cl_dvfs_get_vmin_cdev_max_state,
2091         .get_cur_state = tegra_cl_dvfs_get_vmin_cdev_cur_state,
2092         .set_cur_state = tegra_cl_dvfs_set_vmin_cdev_state,
2093 };
2094
2095 static void tegra_cl_dvfs_init_cdev(struct work_struct *work)
2096 {
2097         char *type;
2098         char dt_type[THERMAL_NAME_LENGTH];
2099         struct device_node *dn;
2100         struct tegra_cl_dvfs *cld = container_of(
2101                 work, struct tegra_cl_dvfs, init_cdev_work);
2102
2103         /* just report error - initialized at WC temperature, anyway */
2104         if (cld->safe_dvfs->dvfs_rail->vmin_cdev) {
2105                 type = cld->safe_dvfs->dvfs_rail->vmin_cdev->cdev_type;
2106                 snprintf(dt_type, sizeof(dt_type), "%s_dfll", type);
2107                 dn = cld->safe_dvfs->dvfs_rail->vmin_cdev->cdev_dn;
2108                 cld->vmin_cdev = dn ?
2109                         thermal_of_cooling_device_register(dn, dt_type,
2110                                 (void *)cld, &tegra_cl_dvfs_vmin_cool_ops) :
2111                         thermal_cooling_device_register(type,
2112                                 (void *)cld, &tegra_cl_dvfs_vmin_cool_ops);
2113
2114                 if (IS_ERR_OR_NULL(cld->vmin_cdev)  ||
2115                     list_empty(&cld->vmin_cdev->thermal_instances)) {
2116                         cld->vmin_cdev = NULL;
2117                         pr_err("%s: tegra cooling device %s failed to register\n",
2118                                __func__, type);
2119                         return;
2120                 }
2121                 pr_info("%s: %s cooling device registered\n", __func__, type);
2122         }
2123
2124         if (cld->safe_dvfs->dvfs_rail->vmax_cdev) {
2125                 type = cld->safe_dvfs->dvfs_rail->vmax_cdev->cdev_type;
2126                 snprintf(dt_type, sizeof(dt_type), "%s_dfll", type);
2127                 dn = cld->safe_dvfs->dvfs_rail->vmax_cdev->cdev_dn;
2128                 cld->vmax_cdev = dn ?
2129                         thermal_of_cooling_device_register(dn, dt_type,
2130                                 (void *)cld, &tegra_cl_dvfs_vmax_cool_ops) :
2131                         thermal_cooling_device_register(type,
2132                                 (void *)cld, &tegra_cl_dvfs_vmax_cool_ops);
2133
2134                 if (IS_ERR_OR_NULL(cld->vmax_cdev) ||
2135                     list_empty(&cld->vmax_cdev->thermal_instances)) {
2136                         cld->vmax_cdev = NULL;
2137                         pr_err("%s: tegra cooling device %s failed to register\n",
2138                                __func__, type);
2139                         return;
2140                 }
2141                 pr_info("%s: %s cooling device registered\n", __func__, type);
2142         }
2143 }
2144 #endif
2145
2146 #ifdef CONFIG_PM_SLEEP
2147 /*
2148  * cl_dvfs controls clock/voltage to other devices, including CPU. Therefore,
2149  * cl_dvfs driver pm suspend callback does not stop cl-dvfs operations. It is
2150  * only used to enforce cold/hot volatge limit, since temperature may change in
2151  * suspend without waking up. The correct temperature zone after supend will
2152  * be updated via cl_dvfs cooling device interface during resume of temperature
2153  * sensor.
2154  */
2155 static int tegra_cl_dvfs_suspend_cl(struct device *dev)
2156 {
2157         unsigned long flags;
2158         struct tegra_cl_dvfs *cld = dev_get_drvdata(dev);
2159
2160         clk_lock_save(cld->dfll_clk, &flags);
2161         if (cld->vmax_cdev)
2162                 cld->vmax_cdev->updated = false;
2163         cld->therm_cap_idx = cld->therm_caps_num;
2164         if (cld->vmin_cdev)
2165                 cld->vmin_cdev->updated = false;
2166         cld->therm_floor_idx = 0;
2167         cl_dvfs_set_dvco_rate_min(cld, &cld->last_req);
2168         cl_dvfs_set_force_out_min(cld);
2169         if (cld->mode == TEGRA_CL_DVFS_CLOSED_LOOP) {
2170                 set_cl_config(cld, &cld->last_req);
2171                 set_request(cld, &cld->last_req);
2172                 /* Delay to make sure new Vmin delivery started */
2173                 udelay(2 * GET_SAMPLE_PERIOD(cld));
2174         }
2175         cld->suspended_force_out = cl_dvfs_readl(cld, CL_DVFS_OUTPUT_FORCE);
2176         clk_unlock_restore(cld->dfll_clk, &flags);
2177
2178         pr_debug("%s: closed loop thermal control suspended\n", __func__);
2179
2180         return 0;
2181 }
2182
2183 static const struct dev_pm_ops tegra_cl_dvfs_pm_ops = {
2184         .suspend_noirq = tegra_cl_dvfs_suspend_cl,
2185 };
2186 #endif
2187
2188 /*
2189  * These dfll bypass APIs provide direct access to force output register.
2190  * Set operation always updates force value, but applies it only in open loop,
2191  * or disabled mode. Get operation returns force value back if it is applied,
2192  * and return monitored output, otherwise. Hence, get value matches real output
2193  * in any mode.
2194  */
2195 static int tegra_cl_dvfs_force_output(void *data, unsigned int out_sel)
2196 {
2197         u32 val;
2198         unsigned long flags;
2199         struct tegra_cl_dvfs *cld = data;
2200
2201         if (out_sel > OUT_MASK)
2202                 return -EINVAL;
2203
2204         clk_lock_save(cld->dfll_clk, &flags);
2205
2206         val = output_force_set_val(cld, out_sel);
2207         if ((cld->mode < TEGRA_CL_DVFS_CLOSED_LOOP) &&
2208             !(val & CL_DVFS_OUTPUT_FORCE_ENABLE)) {
2209                 output_force_enable(cld, val);
2210                 /* enable output only if bypass h/w is alive */
2211                 if (!cld->safe_dvfs->dfll_data.is_bypass_down ||
2212                     !cld->safe_dvfs->dfll_data.is_bypass_down())
2213                         output_enable(cld);
2214         }
2215
2216         clk_unlock_restore(cld->dfll_clk, &flags);
2217         return 0;
2218 }
2219
2220 static int tegra_cl_dvfs_get_output(void *data)
2221 {
2222         u32 val;
2223         unsigned long flags;
2224         struct tegra_cl_dvfs *cld = data;
2225
2226         clk_lock_save(cld->dfll_clk, &flags);
2227         val = cl_dvfs_get_output(cld);
2228         clk_unlock_restore(cld->dfll_clk, &flags);
2229         return val;
2230 }
2231
2232 static void cl_dvfs_init_pwm_bypass(struct tegra_cl_dvfs *cld,
2233                                            struct platform_device *byp_dev)
2234 {
2235         struct tegra_dfll_bypass_platform_data *p_data =
2236                 byp_dev->dev.platform_data;
2237
2238         int vinit = cld->p_data->u.pmu_pwm.init_uV;
2239         int vmin = cld->p_data->u.pmu_pwm.min_uV;
2240         int vstep = cld->p_data->u.pmu_pwm.step_uV;
2241
2242         /* Sync initial voltage and setup bypass callbacks */
2243         if ((vinit >= vmin) && vstep) {
2244                 unsigned int vsel = DIV_ROUND_UP((vinit - vmin), vstep);
2245                 tegra_cl_dvfs_force_output(cld, vsel);
2246         }
2247
2248         p_data->set_bypass_sel = tegra_cl_dvfs_force_output;
2249         p_data->get_bypass_sel = tegra_cl_dvfs_get_output;
2250         p_data->dfll_data = cld;
2251         wmb();
2252 }
2253
2254 /*
2255  * The Silicon Monitor (SiMon) notification provides grade information on
2256  * the DFLL controlled rail. The resepctive minimum voltage offset is applied
2257  * to thermal floors profile. SiMon offsets are negative, the higher the grade
2258  * the lower the floor. In addition SiMon grade may affect tuning settings: more
2259  * aggressive settings may be used at grades above zero.
2260  */
2261 static void update_simon_tuning(struct tegra_cl_dvfs *cld, unsigned long grade)
2262 {
2263
2264         struct dvfs_dfll_data *dfll_data = &cld->safe_dvfs->dfll_data;
2265         u32 mask = dfll_data->tune0_simon_mask;
2266
2267         if (!mask)
2268                 return;
2269
2270         /*
2271          * Safe order:
2272          * - switch to settings for low voltage tuning range at current grade
2273          * - update both low/high voltage range settings to match new grade
2274          *   notification (note that same toggle mask is applied to settings
2275          *   in both low and high voltage ranges).
2276          * - switch to settings for low voltage tuning range at new grade
2277          * - switch to settings for high voltage range at new grade if tuning
2278          *   state was high
2279          */
2280         tune_low(cld);
2281         udelay(1);
2282         pr_debug("tune0: 0x%x\n", cl_dvfs_readl(cld, CL_DVFS_TUNE0));
2283
2284         cld->tune0_low = dfll_data->tune0 ^ (grade ? mask : 0);
2285         cld->tune0_high = dfll_data->tune0_high_mv ^ (grade ? mask : 0);
2286
2287         tune_low(cld);
2288         udelay(1);
2289         pr_debug("tune0: 0x%x\n", cl_dvfs_readl(cld, CL_DVFS_TUNE0));
2290
2291         if (cld->tune_state == TEGRA_CL_DVFS_TUNE_HIGH) {
2292                 tune_high(cld);
2293                 pr_debug("tune0: 0x%x\n", cl_dvfs_readl(cld, CL_DVFS_TUNE0));
2294         }
2295 }
2296
2297 static int cl_dvfs_simon_grade_notify_cb(struct notifier_block *nb,
2298                                          unsigned long grade, void *v)
2299 {
2300         unsigned long flags;
2301         int i, simon_offset;
2302         int curr_domain = (int)((long)v);
2303         struct tegra_cl_dvfs *cld = container_of(
2304                 nb, struct tegra_cl_dvfs, simon_grade_nb);
2305         struct dvfs_rail *rail = cld->safe_dvfs->dvfs_rail;
2306
2307         if (!cld->therm_floors_num || (curr_domain != rail->simon_domain))
2308                 return NOTIFY_DONE;
2309
2310         if (grade >= rail->simon_vmin_offs_num)
2311                 grade = rail->simon_vmin_offs_num - 1;
2312         simon_offset = rail->simon_vmin_offsets[grade];
2313         BUG_ON(simon_offset > 0);
2314
2315         clk_lock_save(cld->dfll_clk, &flags);
2316
2317         /* Update tuning based on SiMon grade */
2318         update_simon_tuning(cld, grade);
2319
2320         /* Convert new floors and invalidate minimum rates */
2321         cl_dvfs_convert_cold_output_floor(cld, simon_offset);
2322         for (i = 0; i < cld->therm_floors_num; i++)
2323                 cld->dvco_rate_floors[i] = 0;
2324
2325         cl_dvfs_set_dvco_rate_min(cld, &cld->last_req);
2326         cl_dvfs_set_force_out_min(cld);
2327         if (cld->mode == TEGRA_CL_DVFS_CLOSED_LOOP) {
2328                 tegra_cl_dvfs_request_rate(cld,
2329                         tegra_cl_dvfs_request_get(cld));
2330         }
2331
2332         clk_unlock_restore(cld->dfll_clk, &flags);
2333
2334         pr_info("tegra_dvfs: set %s simon grade %lu\n", rail->reg_id, grade);
2335
2336         return NOTIFY_OK;
2337 };
2338
2339 static void tegra_cl_dvfs_register_simon_notifier(struct tegra_cl_dvfs *cld)
2340 {
2341         struct dvfs_rail *rail = cld->safe_dvfs->dvfs_rail;
2342
2343         /* Stay at default if no simon offsets */
2344         if (!rail->simon_vmin_offsets)
2345                 return;
2346
2347         cld->simon_grade_nb.notifier_call = cl_dvfs_simon_grade_notify_cb;
2348
2349         if (tegra_register_simon_notifier(&cld->simon_grade_nb)) {
2350                 pr_err("tegra_dvfs: failed to register %s simon notifier\n",
2351                        rail->reg_id);
2352                 return;
2353         }
2354
2355         pr_info("tegra_dvfs: registered %s simon notifier\n", rail->reg_id);
2356         return;
2357 }
2358
2359 /*
2360  * Two mechanisms to build vdd_map dynamically:
2361  *
2362  * 1. Use regulator interface to match voltage selector to voltage level,
2363  * and platform data coefficients to convert selector to register values.
2364  * Applied when vdd supply with I2C inteface and internal voltage selection
2365  * register is connected.
2366  *
2367  * 2. Directly map PWM duty cycle selector to voltage level using platform data
2368  * coefficients. Applied when vdd supply driven by PWM data output is connected.
2369  */
2370 static int build_regulator_vdd_map(struct tegra_cl_dvfs_platform_data *p_data,
2371         struct regulator *reg, struct voltage_reg_map **p_vdd_map)
2372 {
2373         int n;
2374         u32 sel, i;
2375         struct voltage_reg_map *vdd_map;
2376
2377         if (!reg)
2378                 return -ENOSYS;
2379
2380         n = regulator_count_voltages(reg);
2381         if (n <= 0)
2382                 return -ENODATA;
2383
2384         vdd_map = kzalloc(sizeof(*vdd_map) * n, GFP_KERNEL);
2385         if (!vdd_map)
2386                 return -ENOMEM;
2387
2388         for (i = 0, sel = 0; sel < n; sel++) {
2389                 int v = regulator_list_voltage(reg, sel);
2390                 if (v > 0) {
2391                         vdd_map[i].reg_uV = v;
2392                         vdd_map[i].reg_value = sel * p_data->u.pmu_i2c.sel_mul +
2393                                 p_data->u.pmu_i2c.sel_offs;
2394                         i++;
2395                 }
2396         }
2397
2398         p_data->vdd_map_size = i;
2399         p_data->vdd_map = vdd_map;
2400         *p_vdd_map = vdd_map;
2401         return i ? 0 : -EINVAL;
2402 }
2403
2404 static int build_direct_vdd_map(struct tegra_cl_dvfs_platform_data *p_data,
2405                                 struct voltage_reg_map **p_vdd_map)
2406 {
2407         int i;
2408         struct voltage_reg_map *vdd_map =
2409                 kzalloc(sizeof(*vdd_map) * MAX_CL_DVFS_VOLTAGES, GFP_KERNEL);
2410
2411         if (!vdd_map)
2412                 return -ENOMEM;
2413
2414         for (i = 0; i < MAX_CL_DVFS_VOLTAGES; i++) {
2415                 vdd_map[i].reg_uV = i * p_data->u.pmu_pwm.step_uV +
2416                         p_data->u.pmu_pwm.min_uV;
2417                 vdd_map[i].reg_value = i;
2418         }
2419
2420         p_data->vdd_map_size = i;
2421         p_data->vdd_map = vdd_map;
2422         *p_vdd_map = vdd_map;
2423         return 0;
2424 }
2425
2426 /* cl_dvfs comaptibility tables */
2427 static struct tegra_cl_dvfs_soc_match_data t132_data = {
2428         .flags = TEGRA_CL_DVFS_DEFER_FORCE_CALIBRATE,
2429 };
2430
2431 static struct tegra_cl_dvfs_soc_match_data t210_data = {
2432         .flags = TEGRA_CL_DVFS_HAS_IDLE_OVERRIDE |
2433                         TEGRA_CL_DVFS_SCALE_IN_OPEN_LOOP,
2434 };
2435
2436 static struct of_device_id tegra_cl_dvfs_of_match[] = {
2437         { .compatible = "nvidia,tegra114-dfll", },
2438         { .compatible = "nvidia,tegra124-dfll", },
2439         { .compatible = "nvidia,tegra132-dfll", .data = &t132_data, },
2440         { .compatible = "nvidia,tegra148-dfll", },
2441         { .compatible = "nvidia,tegra210-dfll", .data = &t210_data, },
2442         { },
2443 };
2444
2445 /* cl_dvfs dt parsing */
2446 #ifdef CONFIG_OF
2447
2448 #define OF_READ_U32_OPT(node, name, var)                                       \
2449 do {                                                                           \
2450         u32 val;                                                               \
2451         if (!of_property_read_u32((node), #name, &val)) {                      \
2452                 (var) = val;                                                   \
2453                 dev_dbg(&pdev->dev, "DT: " #name " = %u\n", val);              \
2454         }                                                                      \
2455 } while (0)
2456
2457 #define OF_READ_U32(node, name, var)                                           \
2458 do {                                                                           \
2459         u32 val;                                                               \
2460         if (of_property_read_u32((node), #name, &val)) {                       \
2461                 dev_err(&pdev->dev, "missing " #name " in DT data\n");         \
2462                 goto err_out;                                                  \
2463         }                                                                      \
2464         (var) = val;                                                           \
2465         dev_dbg(&pdev->dev, "DT: " #name " = %u\n", val);                      \
2466 } while (0)
2467
2468 #define OF_GET_GPIO(node, name, pin, flags)                                    \
2469 do {                                                                           \
2470         (pin) = of_get_named_gpio_flags((node), #name, 0, &(flags));           \
2471         if ((pin) < 0) {                                                       \
2472                 dev_err(&pdev->dev, "missing " #name " in DT data\n");         \
2473                 goto err_out;                                                  \
2474         }                                                                      \
2475         dev_dbg(&pdev->dev, "DT: " #name " = %u\n", (pin));                    \
2476 } while (0)
2477
2478 #define OF_READ_BOOL(node, name, var)                                          \
2479 do {                                                                           \
2480         (var) = of_property_read_bool((node), #name);                          \
2481         dev_dbg(&pdev->dev, "DT: " #name " = %s\n", (var) ? "true" : "false"); \
2482 } while (0)
2483
2484 #define TEGRA_DFLL_OF_PWM_PERIOD_CELL 1
2485
2486 static int dt_parse_pwm_regulator(struct platform_device *pdev,
2487         struct device_node *r_dn, struct tegra_cl_dvfs_platform_data *p_data)
2488 {
2489         unsigned long val;
2490         int min_uV, max_uV, step_uV, init_uV;
2491         struct of_phandle_args args;
2492         struct platform_device *rdev = of_find_device_by_node(r_dn);
2493
2494         if (of_parse_phandle_with_args(r_dn, "pwms", "#pwm-cells", 0, &args)) {
2495                 dev_err(&pdev->dev, "DT: failed to parse pwms property\n");
2496                 goto err_out;
2497         }
2498         of_node_put(args.np);
2499
2500         if (args.args_count <= TEGRA_DFLL_OF_PWM_PERIOD_CELL) {
2501                 dev_err(&pdev->dev, "DT: low #pwm-cells %d\n", args.args_count);
2502                 goto err_out;
2503         }
2504
2505         /* convert pwm period in ns to cl_dvfs pwm clock rate in Hz */
2506         val = args.args[TEGRA_DFLL_OF_PWM_PERIOD_CELL];
2507         val = (NSEC_PER_SEC / val) * (MAX_CL_DVFS_VOLTAGES - 1);
2508         p_data->u.pmu_pwm.pwm_rate = val;
2509         dev_dbg(&pdev->dev, "DT: pwm-rate: %lu\n", val);
2510
2511         /* voltage boundaries and step */
2512         OF_READ_U32(r_dn, regulator-min-microvolt, min_uV);
2513         OF_READ_U32(r_dn, regulator-max-microvolt, max_uV);
2514         OF_READ_U32(r_dn, regulator-init-microvolt, init_uV);
2515
2516         step_uV = (max_uV - min_uV) / (MAX_CL_DVFS_VOLTAGES - 1);
2517         if (step_uV <= 0) {
2518                 dev_err(&pdev->dev, "DT: invalid pwm step %d\n", step_uV);
2519                 goto err_out;
2520         }
2521         if ((max_uV - min_uV) % (MAX_CL_DVFS_VOLTAGES - 1))
2522                 dev_warn(&pdev->dev,
2523                          "DT: pwm range [%d...%d] is not aligned on %d steps\n",
2524                          min_uV, max_uV, MAX_CL_DVFS_VOLTAGES - 1);
2525
2526         p_data->u.pmu_pwm.min_uV = min_uV;
2527         p_data->u.pmu_pwm.step_uV = step_uV;
2528         p_data->u.pmu_pwm.init_uV = init_uV;
2529
2530         /*
2531          * For pwm regulator access from the regulator driver, without
2532          * interference with closed loop operations, cl_dvfs provides
2533          * dfll bypass callbacks in device platform data
2534          */
2535         if (rdev && rdev->dev.platform_data)
2536                 p_data->u.pmu_pwm.dfll_bypass_dev = rdev;
2537
2538         of_node_put(r_dn);
2539         return 0;
2540
2541 err_out:
2542         of_node_put(r_dn);
2543         return -EINVAL;
2544 }
2545
2546 static int dt_parse_pwm_pmic_params(struct platform_device *pdev,
2547         struct device_node *pmic_dn, struct tegra_cl_dvfs_platform_data *p_data)
2548 {
2549         int pin, i = 0;
2550         enum of_gpio_flags f;
2551         bool pwm_1wire_buffer, pwm_1wire_direct, pwm_2wire;
2552         struct device_node *r_dn =
2553                  of_parse_phandle(pmic_dn, "pwm-regulator", 0);
2554
2555         /* pwm regulator device */
2556         if (!r_dn) {
2557                 dev_err(&pdev->dev, "missing DT pwm regulator data\n");
2558                 goto err_out;
2559         }
2560
2561         if (dt_parse_pwm_regulator(pdev, r_dn, p_data)) {
2562                 dev_err(&pdev->dev, "failed to parse DT pwm regulator\n");
2563                 goto err_out;
2564         }
2565
2566         /* pwm config data */
2567         OF_READ_BOOL(pmic_dn, pwm-1wire-buffer, pwm_1wire_buffer);
2568         OF_READ_BOOL(pmic_dn, pwm-1wire-direct, pwm_1wire_direct);
2569         OF_READ_BOOL(pmic_dn, pwm-2wire, pwm_2wire);
2570         if (pwm_1wire_buffer) {
2571                 i++;
2572                 p_data->u.pmu_pwm.pwm_bus = TEGRA_CL_DVFS_PWM_1WIRE_BUFFER;
2573         }
2574         if (pwm_1wire_direct) {
2575                 i++;
2576                 p_data->u.pmu_pwm.pwm_bus = TEGRA_CL_DVFS_PWM_1WIRE_DIRECT;
2577         }
2578         if (pwm_2wire) {
2579                 i++;
2580                 p_data->u.pmu_pwm.pwm_bus = TEGRA_CL_DVFS_PWM_2WIRE;
2581         }
2582         if (i != 1) {
2583                 dev_err(&pdev->dev, "%s pwm_bus in DT board data\n",
2584                         i ? "inconsistent" : "missing");
2585                 goto err_out;
2586         }
2587
2588         /* pwm pins data */
2589         OF_GET_GPIO(pmic_dn, pwm-data-gpio, pin, f);
2590         p_data->u.pmu_pwm.pinctrl_dev = pinctrl_get_dev_from_gpio(pin);
2591         if (!p_data->u.pmu_pwm.pinctrl_dev) {
2592                 dev_err(&pdev->dev, "No tegra pincontrol driver\n");
2593                 goto err_out;
2594         }
2595         p_data->u.pmu_pwm.pwm_pingroup = pinctrl_get_selector_from_gpio(
2596                                         p_data->u.pmu_pwm.pinctrl_dev, pin);
2597         if (p_data->u.pmu_pwm.pwm_pingroup < 0) {
2598                 dev_err(&pdev->dev, "invalid gpio %d\n", pin);
2599                 goto err_out;
2600         }
2601
2602         if (pwm_1wire_buffer) {
2603                 OF_GET_GPIO(pmic_dn, pwm-buffer-ctrl-gpio, pin, f);
2604                 p_data->u.pmu_pwm.out_enable_high = !(f & OF_GPIO_ACTIVE_LOW);
2605                 p_data->u.pmu_pwm.out_gpio = pin;
2606         } else if (pwm_2wire) {
2607                 OF_GET_GPIO(pmic_dn, pwm-clk-gpio, pin, f);
2608                 p_data->u.pmu_pwm.pwm_clk_pingroup =
2609                         pinctrl_get_selector_from_gpio(
2610                                 p_data->u.pmu_pwm.pinctrl_dev, pin);
2611                 if (p_data->u.pmu_pwm.pwm_pingroup < 0) {
2612                         dev_err(&pdev->dev, "invalid gpio %d\n", pin);
2613                         goto err_out;
2614                 }
2615                 OF_READ_BOOL(pmic_dn, pwm-delta-mode,
2616                              p_data->u.pmu_pwm.delta_mode);
2617         }
2618
2619         of_node_put(pmic_dn);
2620         return 0;
2621
2622 err_out:
2623         of_node_put(pmic_dn);
2624         return -EINVAL;
2625 }
2626
2627 static int dt_parse_i2c_pmic_params(struct platform_device *pdev,
2628         struct device_node *pmic_dn, struct tegra_cl_dvfs_platform_data *p_data)
2629 {
2630         OF_READ_U32(pmic_dn, pmic-i2c-address, p_data->u.pmu_i2c.slave_addr);
2631         OF_READ_U32(pmic_dn, pmic-i2c-voltage-register, p_data->u.pmu_i2c.reg);
2632
2633         OF_READ_BOOL(pmic_dn, i2c-10-bit-addresses, p_data->u.pmu_i2c.addr_10);
2634
2635         OF_READ_U32(pmic_dn, sel-conversion-slope, p_data->u.pmu_i2c.sel_mul);
2636         OF_READ_U32_OPT(pmic_dn, sel-conversion-offset,
2637                         p_data->u.pmu_i2c.sel_offs);
2638         OF_READ_U32_OPT(pmic_dn, pmic-undershoot-gb, p_data->pmu_undershoot_gb);
2639
2640         OF_READ_U32(pmic_dn, i2c-fs-rate, p_data->u.pmu_i2c.fs_rate);
2641         OF_READ_U32_OPT(pmic_dn, i2c-hs-rate, p_data->u.pmu_i2c.hs_rate);
2642         if (p_data->u.pmu_i2c.hs_rate)
2643                 OF_READ_U32(pmic_dn, i2c-hs-master-code,
2644                             p_data->u.pmu_i2c.hs_master_code);
2645
2646         of_node_put(pmic_dn);
2647         return 0;
2648
2649 err_out:
2650         of_node_put(pmic_dn);
2651         return -EINVAL;
2652 }
2653
2654 static int dt_parse_board_params(struct platform_device *pdev,
2655         struct device_node *b_dn, struct tegra_cl_dvfs_cfg_param *p_cfg)
2656 {
2657         int i = 0;
2658         bool fixed_forcing, auto_forcing, no_forcing;
2659
2660         OF_READ_U32(b_dn, sample-rate, p_cfg->sample_rate);
2661         OF_READ_U32(b_dn, cf, p_cfg->cf);
2662         OF_READ_U32(b_dn, ci, p_cfg->ci);
2663         OF_READ_U32(b_dn, cg, p_cfg->cg);
2664         OF_READ_U32(b_dn, droop-cut-value, p_cfg->droop_cut_value);
2665         OF_READ_U32(b_dn, droop-restore-ramp, p_cfg->droop_restore_ramp);
2666         OF_READ_U32(b_dn, scale-out-ramp, p_cfg->scale_out_ramp);
2667
2668         OF_READ_BOOL(b_dn, cg-scale, p_cfg->cg_scale);
2669
2670         OF_READ_BOOL(b_dn, fixed-output-forcing, fixed_forcing);
2671         OF_READ_BOOL(b_dn, auto-output-forcing, auto_forcing);
2672         OF_READ_BOOL(b_dn, no-output-forcing, no_forcing);
2673         if (fixed_forcing) {
2674                 i++;
2675                 p_cfg->force_mode = TEGRA_CL_DVFS_FORCE_FIXED;
2676         }
2677         if (auto_forcing) {
2678                 i++;
2679                 p_cfg->force_mode = TEGRA_CL_DVFS_FORCE_AUTO;
2680         }
2681         if (no_forcing) {
2682                 i++;
2683                 p_cfg->force_mode = TEGRA_CL_DVFS_FORCE_NONE;
2684         }
2685         if (i != 1) {
2686                 dev_err(&pdev->dev, "%s force_mode in DT board data\n",
2687                         i ? "inconsistent" : "missing");
2688                 goto err_out;
2689         }
2690
2691         of_node_put(b_dn);
2692         return 0;
2693
2694 err_out:
2695         of_node_put(b_dn);
2696         return -EINVAL;
2697 }
2698
2699 static int cl_dvfs_dt_parse_pdata(struct platform_device *pdev,
2700                                   struct tegra_cl_dvfs_platform_data *p_data)
2701 {
2702         int ret;
2703         u32 flags = 0;
2704         struct device_node *dn = pdev->dev.of_node;
2705         struct device_node *i2c_dn, *pwm_dn, *b_dn;
2706         const struct of_device_id *match;
2707
2708         ret = of_property_read_string(dn, "out-clock-name",
2709                                       &p_data->dfll_clk_name);
2710         if (ret) {
2711                 dev_err(&pdev->dev, "missing target clock name in DT data\n");
2712                 return ret;
2713         }
2714         dev_dbg(&pdev->dev, "DT: target clock: %s\n", p_data->dfll_clk_name);
2715
2716         match = of_match_node(tegra_cl_dvfs_of_match, dn);
2717         if (match && match->data) {
2718                 const struct tegra_cl_dvfs_soc_match_data *data = match->data;
2719                 flags |= data->flags;
2720         }
2721
2722         if (of_find_property(dn, "i2c-quiet-output-workaround", NULL))
2723                 flags |= TEGRA_CL_DVFS_FLAGS_I2C_WAIT_QUIET;
2724         if (of_find_property(dn, "monitor-data-new-workaround", NULL))
2725                 flags |= TEGRA_CL_DVFS_DATA_NEW_NO_USE;
2726         if (!of_find_property(dn, "dynamic-output-lut-workaround", NULL))
2727                 flags |= TEGRA_CL_DVFS_DYN_OUTPUT_CFG;  /* inverse polarity */
2728         if (flags & TEGRA_CL_DVFS_HAS_IDLE_OVERRIDE) {
2729                 /* Properties below are accepted only with idle override */
2730                 if (of_find_property(dn, "defer-force-calibrate", NULL))
2731                         flags |= TEGRA_CL_DVFS_DEFER_FORCE_CALIBRATE;
2732                 if (of_find_property(dn, "calibrate-force-vmin", NULL))
2733                         flags |= TEGRA_CL_DVFS_CALIBRATE_FORCE_VMIN;
2734         }
2735         p_data->flags = flags;
2736         dev_dbg(&pdev->dev, "DT: flags: 0x%x\n", p_data->flags);
2737
2738         OF_READ_U32_OPT(dn, tune-ramp-delay, p_data->tune_ramp_delay);
2739         OF_READ_U32_OPT(dn, resume-ramp-delay, p_data->resume_ramp_delay);
2740
2741         /* pmic integration */
2742         i2c_dn = of_parse_phandle(dn, "i2c-pmic-integration", 0);
2743         pwm_dn = of_get_child_by_name(dn, "pwm-pmic-integration");
2744         if (!i2c_dn == !pwm_dn) {
2745                 of_node_put(i2c_dn);
2746                 of_node_put(pwm_dn);
2747                 dev_err(&pdev->dev, "%s DT pmic data\n",
2748                         i2c_dn ? "inconsistent" : "missing");
2749                 return -ENODATA;
2750         }
2751
2752         ret = i2c_dn ? dt_parse_i2c_pmic_params(pdev, i2c_dn, p_data) :
2753                         dt_parse_pwm_pmic_params(pdev, pwm_dn, p_data);
2754         if (ret) {
2755                 dev_err(&pdev->dev, "failed to parse DT pmic data\n");
2756                 return ret;
2757         }
2758         p_data->pmu_if = i2c_dn ? TEGRA_CL_DVFS_PMU_I2C : TEGRA_CL_DVFS_PMU_PWM;
2759
2760         /* board configuration parameters */
2761         b_dn = of_parse_phandle(dn, "board-params", 0);
2762         if (!b_dn) {
2763                 dev_err(&pdev->dev, "missing DT board data\n");
2764                 return -ENODATA;
2765         }
2766
2767         ret = dt_parse_board_params(pdev, b_dn, p_data->cfg_param);
2768         if (ret) {
2769                 dev_err(&pdev->dev, "failed to parse DT board data\n");
2770                 return ret;
2771         }
2772
2773         dev_info(&pdev->dev, "DT data retrieved successfully\n");
2774         return 0;
2775 }
2776 #else
2777 static void *tegra_cl_dvfs_dt_parse_pdata(struct platform_device *pdev)
2778 {
2779         return NULL;
2780 }
2781 #endif
2782
2783 static int __init tegra_cl_dvfs_probe(struct platform_device *pdev)
2784 {
2785         int ret;
2786         struct tegra_cl_dvfs_platform_data *p_data;
2787         struct resource *res, *res_i2c = NULL;
2788         struct tegra_cl_dvfs_cfg_param *p_cfg = NULL;
2789         struct voltage_reg_map *p_vdd_map = NULL;
2790         struct tegra_cl_dvfs *cld = NULL;
2791         struct clk *ref_clk, *soc_clk, *i2c_clk, *safe_dvfs_clk, *dfll_clk;
2792
2793         /* Get resources */
2794         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2795         if (!res) {
2796                 dev_err(&pdev->dev, "missing register base\n");
2797                 return -ENOMEM;
2798         }
2799         dev_dbg(&pdev->dev, "DFLL MMIO [0x%lx ... 0x%lx]\n",
2800                 (unsigned long)res->start, (unsigned long)res->end);
2801
2802         if (pdev->num_resources > 1) {
2803                 res_i2c = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2804                 if (!res_i2c) {
2805                         dev_err(&pdev->dev, "missing i2c register base\n");
2806                         return -ENOMEM;
2807                 }
2808                 dev_dbg(&pdev->dev, "DFLL I2C MMIO [0x%lx ... 0x%lx]\n",
2809                         (unsigned long)res_i2c->start,
2810                         (unsigned long)res_i2c->end);
2811         }
2812
2813         p_data = pdev->dev.platform_data;
2814         if (!p_data) {
2815                 p_data = kzalloc(sizeof(*p_data), GFP_KERNEL);
2816                 if (!p_data) {
2817                         dev_err(&pdev->dev, "failed to allocate p_data\n");
2818                         ret = -ENOMEM;
2819                         goto err_out;
2820                 }
2821                 p_cfg = kzalloc(sizeof(*p_cfg), GFP_KERNEL);
2822                 if (!p_cfg) {
2823                         dev_err(&pdev->dev, "failed to allocate p_cfg\n");
2824                         ret = -ENOMEM;
2825                         goto err_out;
2826                 }
2827
2828                 p_data->cfg_param = p_cfg;
2829                 ret = cl_dvfs_dt_parse_pdata(pdev, p_data);
2830                 if (ret) {
2831                         dev_err(&pdev->dev, "failed to parse DT p_data\n");
2832                         goto err_out;
2833                 }
2834         } else if (!p_data->cfg_param) {
2835                 dev_err(&pdev->dev, "missing platform data\n");
2836                 ret = -ENODATA;
2837                 goto err_out;
2838         }
2839
2840         ref_clk = clk_get(&pdev->dev, "ref");
2841         soc_clk = clk_get(&pdev->dev, "soc");
2842         i2c_clk = clk_get(&pdev->dev, "i2c");
2843         safe_dvfs_clk = clk_get(&pdev->dev, "safe_dvfs");
2844         dfll_clk = clk_get(&pdev->dev, p_data->dfll_clk_name);
2845         if (IS_ERR(ref_clk) || IS_ERR(soc_clk) || IS_ERR(i2c_clk)) {
2846                 dev_err(&pdev->dev, "missing control clock\n");
2847                 ret = -ENOENT;
2848                 goto err_out;
2849         }
2850         if (IS_ERR(safe_dvfs_clk)) {
2851                 dev_err(&pdev->dev, "missing safe dvfs source clock\n");
2852                 ret = PTR_ERR(safe_dvfs_clk);
2853                 goto err_out;
2854         }
2855         if (IS_ERR(dfll_clk)) {
2856                 dev_err(&pdev->dev, "missing target dfll clock\n");
2857                 ret = PTR_ERR(dfll_clk);
2858                 goto err_out;
2859         }
2860         if (!safe_dvfs_clk->dvfs || !safe_dvfs_clk->dvfs->dvfs_rail) {
2861                 dev_err(&pdev->dev, "invalid safe dvfs source\n");
2862                 ret = -EINVAL;
2863                 goto err_out;
2864         }
2865
2866         /* Build vdd_map if not specified by platform data */
2867         if (!p_data->vdd_map || !p_data->vdd_map_size) {
2868                 struct regulator *reg = safe_dvfs_clk->dvfs->dvfs_rail->reg;
2869                 if (p_data->pmu_if == TEGRA_CL_DVFS_PMU_PWM)
2870                         ret = build_direct_vdd_map(p_data, &p_vdd_map);
2871                 else
2872                         ret = build_regulator_vdd_map(p_data, reg, &p_vdd_map);
2873
2874                 if (ret) {
2875                         dev_err(&pdev->dev, "missing vdd_map (%d)\n", ret);
2876                         goto err_out;
2877                 }
2878         }
2879
2880         /* Allocate cl_dvfs object and populate resource accessors */
2881         cld = kzalloc(sizeof(*cld), GFP_KERNEL);
2882         if (!cld) {
2883                 dev_err(&pdev->dev, "failed to allocate cl_dvfs object\n");
2884                 ret = -ENOMEM;
2885                 goto err_out;
2886         }
2887
2888         cld->cl_base = IO_ADDRESS(res->start);
2889         cld->cl_i2c_base = res_i2c ? IO_ADDRESS(res_i2c->start) : cld->cl_base;
2890         cld->p_data = p_data;
2891         cld->ref_clk = ref_clk;
2892         cld->soc_clk = soc_clk;
2893         cld->i2c_clk = i2c_clk;
2894         cld->dfll_clk = dfll_clk;
2895         cld->safe_dvfs = safe_dvfs_clk->dvfs;
2896 #ifdef CONFIG_THERMAL
2897         INIT_WORK(&cld->init_cdev_work, tegra_cl_dvfs_init_cdev);
2898 #endif
2899         /* Initialize cl_dvfs */
2900         ret = cl_dvfs_init(cld);
2901         if (ret)
2902                 goto err_out;
2903
2904         /* From now on probe would not fail */
2905         platform_set_drvdata(pdev, cld);
2906
2907         /*
2908          *  I2C interface mux is embedded into cl_dvfs h/w, so the attached
2909          *  regulator can be accessed by s/w independently. PWM interface,
2910          *  on the other hand, is accessible solely through cl_dvfs registers.
2911          *  Hence, bypass device is supported in PWM mode only.
2912          */
2913         if ((p_data->pmu_if == TEGRA_CL_DVFS_PMU_PWM) &&
2914             p_data->u.pmu_pwm.dfll_bypass_dev) {
2915                 clk_enable(cld->soc_clk);
2916                 cl_dvfs_init_pwm_bypass(cld, p_data->u.pmu_pwm.dfll_bypass_dev);
2917         }
2918
2919         /* Register SiMon notifier */
2920         tegra_cl_dvfs_register_simon_notifier(cld);
2921
2922         /*
2923          * Schedule cooling device registration as a separate work to address
2924          * the following race: when cl_dvfs is probed the DFLL child clock
2925          * (e.g., CPU) cannot be changed; on the other hand cooling device
2926          * registration will update the entire thermal zone, and may trigger
2927          * rate change of the target clock
2928          */
2929         if (cld->safe_dvfs->dvfs_rail->vmin_cdev ||
2930             cld->safe_dvfs->dvfs_rail->vmax_cdev)
2931                 schedule_work(&cld->init_cdev_work);
2932         return 0;
2933
2934 err_out:
2935         if (p_data && p_vdd_map)
2936                 p_data->vdd_map = NULL;
2937         kfree(p_vdd_map);
2938         kfree(cld);
2939         if (!pdev->dev.platform_data) {
2940                 kfree(p_cfg);
2941                 kfree(p_data);
2942         }
2943         return ret;
2944 }
2945
2946 static struct platform_driver tegra_cl_dvfs_driver = {
2947         .driver         = {
2948                 .name   = "tegra_cl_dvfs",
2949                 .owner  = THIS_MODULE,
2950                 .of_match_table = tegra_cl_dvfs_of_match,
2951 #ifdef CONFIG_PM_SLEEP
2952                 .pm = &tegra_cl_dvfs_pm_ops,
2953 #endif
2954         },
2955 };
2956
2957 int __init tegra_init_cl_dvfs(void)
2958 {
2959         return platform_driver_probe(&tegra_cl_dvfs_driver,
2960                                      tegra_cl_dvfs_probe);
2961 }
2962
2963 /*
2964  * CL_DVFS states:
2965  *
2966  * - DISABLED: control logic mode - DISABLED, output interface disabled,
2967  *   dfll in reset
2968  * - OPEN_LOOP: control logic mode - OPEN_LOOP, output interface disabled,
2969  *   dfll is running "unlocked"
2970  * - CLOSED_LOOP: control logic mode - CLOSED_LOOP, output interface enabled,
2971  *   dfll is running "locked"
2972  */
2973
2974 /* Switch from any other state to DISABLED state */
2975 void tegra_cl_dvfs_disable(struct tegra_cl_dvfs *cld)
2976 {
2977         switch (cld->mode) {
2978         case TEGRA_CL_DVFS_CLOSED_LOOP:
2979                 WARN(1, "DFLL is disabled directly from closed loop mode\n");
2980                 set_ol_config(cld);
2981                 output_disable_ol_prepare(cld);
2982                 set_mode(cld, TEGRA_CL_DVFS_DISABLED);
2983                 output_disable_post_ol(cld);
2984                 invalidate_request(cld);
2985                 cl_dvfs_disable_clocks(cld);
2986                 return;
2987
2988         case TEGRA_CL_DVFS_OPEN_LOOP:
2989                 set_mode(cld, TEGRA_CL_DVFS_DISABLED);
2990                 invalidate_request(cld);
2991                 cl_dvfs_disable_clocks(cld);
2992                 return;
2993
2994         default:
2995                 BUG_ON(cld->mode > TEGRA_CL_DVFS_CLOSED_LOOP);
2996                 return;
2997         }
2998 }
2999
3000 /* Switch from DISABLE state to OPEN_LOOP state */
3001 int tegra_cl_dvfs_enable(struct tegra_cl_dvfs *cld)
3002 {
3003         if (cld->mode == TEGRA_CL_DVFS_UNINITIALIZED) {
3004                 pr_err("%s: Cannot enable DFLL in %s mode\n",
3005                        __func__, mode_name[cld->mode]);
3006                 return -EPERM;
3007         }
3008
3009         if (cld->mode != TEGRA_CL_DVFS_DISABLED)
3010                 return 0;
3011
3012         cl_dvfs_enable_clocks(cld);
3013         if (cld->p_data->flags & TEGRA_CL_DVFS_SCALE_IN_OPEN_LOOP)
3014                 set_request_scale(cld, cld->last_req.scale);
3015         set_mode(cld, TEGRA_CL_DVFS_OPEN_LOOP);
3016         udelay(1);
3017         return 0;
3018 }
3019
3020 /* Switch from OPEN_LOOP state to CLOSED_LOOP state */
3021 int tegra_cl_dvfs_lock(struct tegra_cl_dvfs *cld)
3022 {
3023         struct dfll_rate_req *req = &cld->last_req;
3024
3025         switch (cld->mode) {
3026         case TEGRA_CL_DVFS_CLOSED_LOOP:
3027                 return 0;
3028
3029         case TEGRA_CL_DVFS_OPEN_LOOP:
3030                 if (req->freq == 0) {
3031                         pr_err("%s: Cannot lock DFLL at rate 0\n", __func__);
3032                         return -EINVAL;
3033                 }
3034
3035                 /*
3036                  * Update control logic setting with last rate request;
3037                  * sync output limits with current tuning and thermal state,
3038                  * enable output and switch to closed loop mode. Make sure
3039                  * forced output does not interfere with closed loop.
3040                  */
3041                 set_cl_config(cld, req);
3042                 output_enable(cld);
3043                 set_mode(cld, TEGRA_CL_DVFS_CLOSED_LOOP);
3044                 set_request(cld, req);
3045                 calibration_timer_update(cld);
3046                 return 0;
3047
3048         default:
3049                 BUG_ON(cld->mode > TEGRA_CL_DVFS_CLOSED_LOOP);
3050                 pr_err("%s: Cannot lock DFLL in %s mode\n",
3051                        __func__, mode_name[cld->mode]);
3052                 return -EPERM;
3053         }
3054 }
3055
3056 /* Switch from CLOSED_LOOP state to OPEN_LOOP state */
3057 int tegra_cl_dvfs_unlock(struct tegra_cl_dvfs *cld)
3058 {
3059         int ret;
3060         bool in_range;
3061
3062         switch (cld->mode) {
3063         case TEGRA_CL_DVFS_CLOSED_LOOP:
3064                 set_ol_config(cld);
3065                 in_range = is_vmin_delivered(cld);
3066
3067                 /* allow grace 2 sample periods to get in range */
3068                 if (!in_range)
3069                         udelay(2 * GET_SAMPLE_PERIOD(cld));
3070
3071                 ret = output_disable_ol_prepare(cld);
3072                 set_mode(cld, TEGRA_CL_DVFS_OPEN_LOOP);
3073                 if (!ret)
3074                         ret = output_disable_post_ol(cld);
3075
3076                 if (!ret && !in_range && !is_vmin_delivered(cld)) {
3077                         pr_err("cl_dvfs: exiting closed loop out of range\n");
3078                         return -EINVAL;
3079                 }
3080                 return ret;
3081
3082         case TEGRA_CL_DVFS_OPEN_LOOP:
3083                 return 0;
3084
3085         default:
3086                 BUG_ON(cld->mode > TEGRA_CL_DVFS_CLOSED_LOOP);
3087                 pr_err("%s: Cannot unlock DFLL in %s mode\n",
3088                        __func__, mode_name[cld->mode]);
3089                 return -EPERM;
3090         }
3091 }
3092
3093 /*
3094  * Convert requested rate into the control logic settings. In CLOSED_LOOP mode,
3095  * update new settings immediately to adjust DFLL output rate accordingly.
3096  * Otherwise, just save them until next switch to closed loop.
3097  */
3098 int tegra_cl_dvfs_request_rate(struct tegra_cl_dvfs *cld, unsigned long rate)
3099 {
3100         u32 val;
3101         bool dvco_min_crossed, dvco_min_updated;
3102         struct dfll_rate_req req;
3103         req.rate = rate;
3104
3105         if (cld->mode == TEGRA_CL_DVFS_UNINITIALIZED) {
3106                 pr_err("%s: Cannot set DFLL rate in %s mode\n",
3107                        __func__, mode_name[cld->mode]);
3108                 return -EPERM;
3109         }
3110
3111         /* Calibrate dfll minimum rate */
3112         cl_dvfs_calibrate(cld);
3113
3114         /* Update minimum dvco rate if we are crossing tuning threshold */
3115         dvco_min_updated = cl_tune_target(cld, rate) !=
3116                 cl_tune_target(cld, cld->last_req.rate);
3117         if (dvco_min_updated)
3118                 cl_dvfs_set_dvco_rate_min(cld, &req);
3119
3120         /* Determine DFLL output scale */
3121         req.scale = SCALE_MAX - 1;
3122         if (rate < cld->dvco_rate_min) {
3123                 int scale = DIV_ROUND_CLOSEST((rate / 1000 * SCALE_MAX),
3124                         (cld->dvco_rate_min / 1000));
3125                 if (!scale) {
3126                         pr_err("%s: Rate %lu is below scalable range\n",
3127                                __func__, rate);
3128                         goto req_err;
3129                 }
3130                 req.scale = scale - 1;
3131                 rate = cld->dvco_rate_min;
3132         }
3133         dvco_min_crossed = (rate == cld->dvco_rate_min) &&
3134                 (cld->last_req.rate > cld->dvco_rate_min);
3135
3136         /* Convert requested rate into frequency request and scale settings */
3137         val = GET_REQUEST_FREQ(rate, cld->ref_rate);
3138         if (val > FREQ_MAX) {
3139                 pr_err("%s: Rate %lu is above dfll range\n", __func__, rate);
3140                 goto req_err;
3141         }
3142         req.freq = val;
3143         rate = GET_REQUEST_RATE(val, cld->ref_rate);
3144
3145         /* Find safe voltage for requested rate */
3146         if (find_safe_output(cld, rate, &req.output)) {
3147                 pr_err("%s: Failed to find safe output for rate %lu\n",
3148                        __func__, rate);
3149                 goto req_err;
3150         }
3151         req.cap = req.output;
3152
3153         /*
3154          * Save validated request, and in CLOSED_LOOP mode actually update
3155          * control logic settings; use request output to set maximum voltage
3156          * limit, but keep one LUT step room above safe voltage
3157          */
3158         cld->last_req = req;
3159
3160         if (cld->mode == TEGRA_CL_DVFS_CLOSED_LOOP) {
3161                 set_cl_config(cld, &cld->last_req);
3162                 set_request(cld, &cld->last_req);
3163                 if (dvco_min_crossed || dvco_min_updated)
3164                         calibration_timer_update(cld);
3165         } else if ((cld->mode == TEGRA_CL_DVFS_OPEN_LOOP) &&
3166                    (cld->p_data->flags & TEGRA_CL_DVFS_SCALE_IN_OPEN_LOOP)) {
3167                 set_request_scale(cld, req.scale);
3168         }
3169         return 0;
3170
3171 req_err:
3172         /* Restore dvco rate minimum */
3173         if (dvco_min_updated)
3174                 cl_dvfs_set_dvco_rate_min(cld, &cld->last_req);
3175         return -EINVAL;
3176
3177 }
3178
3179 unsigned long tegra_cl_dvfs_request_get(struct tegra_cl_dvfs *cld)
3180 {
3181         struct dfll_rate_req *req = &cld->last_req;
3182
3183         /*
3184          * If running below dvco minimum rate with skipper resolution:
3185          * dvco min rate / 256 - return last requested rate rounded to 1kHz.
3186          * If running above dvco minimum, with closed loop resolution:
3187          * ref rate / 2 - return cl_dvfs target rate.
3188          */
3189         if ((req->scale + 1) < SCALE_MAX)
3190                 return req->rate / 1000 * 1000;
3191
3192         return GET_REQUEST_RATE(req->freq, cld->ref_rate);
3193 }
3194
3195 /*
3196  * Compare actually set (last delivered) and required Vmin. These levels may
3197  * be different if temperature or SiMon grade changes while cl-dvfs output
3198  * interface is disabled, and new required setting is not delivered to PMIC.
3199  * It actually may happen while cl_dvfs is disabled, or during transition
3200  * to/from disabled state.
3201  *
3202  * Return:
3203  * 0 if levels are equal,
3204  * +1 if last Vmin is above required,
3205  * -1 if last Vmin is below required.
3206  */
3207 int tegra_dvfs_cmp_dfll_vmin_tfloor(struct clk *dfll_clk, int *tfloor)
3208 {
3209         int ret = 0;
3210         unsigned long flags;
3211         u8 needed_out_min, last_out_min;
3212         struct tegra_cl_dvfs *cld;
3213
3214         if (!dfll_clk)
3215                 return -EINVAL;
3216
3217         cld = tegra_dfll_get_cl_dvfs_data(dfll_clk);
3218         if (IS_ERR(cld))
3219                 return PTR_ERR(cld);
3220
3221         clk_lock_save(dfll_clk, &flags);
3222         needed_out_min = get_output_min(cld);
3223         last_out_min = cld->lut_min;
3224
3225         if (last_out_min > needed_out_min)
3226                 ret = 1;
3227         else if (last_out_min < needed_out_min)
3228                 ret = -1;
3229
3230         if (tfloor)
3231                 *tfloor = get_mv(cld, needed_out_min);
3232
3233         clk_unlock_restore(dfll_clk, &flags);
3234         return ret;
3235 }
3236
3237 /*
3238  * Voltage clamping interface: set maximum and minimum voltage limits at the
3239  * same lowest safe (for current temperature and tuning range) level. Allows
3240  * temporary fix output voltage in closed loop mode. Clock rate target in this
3241  * state is ignored, DFLL rate is just determined by the fixed limits. Clamping
3242  * request is rejected if limits are already clamped, or DFLL is not in closed
3243  * loop mode.
3244  *
3245  * This interface is tailored for fixing voltage during SiMon grading; no other
3246  * s/w should use it.
3247  *
3248  * Return: fixed positive voltage if clamping request was successful, or
3249  * 0 if un-clamping request was successful, or -EPERM if request is rejected.
3250  *
3251  */
3252 int tegra_dvfs_clamp_dfll_at_vmin(struct clk *dfll_clk, bool clamp)
3253 {
3254         struct tegra_cl_dvfs *cld;
3255         unsigned long flags;
3256         int ret = -EPERM;
3257
3258         if (!dfll_clk)
3259                 return -EINVAL;
3260
3261         cld = tegra_dfll_get_cl_dvfs_data(dfll_clk);
3262         if (IS_ERR(cld))
3263                 return PTR_ERR(cld);
3264
3265         clk_lock_save(dfll_clk, &flags);
3266         if (cld->mode == TEGRA_CL_DVFS_CLOSED_LOOP) {
3267                 if (clamp && !cld->v_limits.clamped) {
3268                         u8 out_min = max(cld->lut_min, cld->force_out_min);
3269                         set_output_limits(cld, out_min, out_min);
3270                         cld->v_limits.clamped = true;
3271                         ret = cld->v_limits.vmin;
3272                 } else if (!clamp) {
3273                         if (cld->v_limits.clamped) {
3274                                 cld->v_limits.clamped = false;
3275                                 set_cl_config(cld, &cld->last_req);
3276                                 set_request(cld, &cld->last_req);
3277                         }
3278                         ret = 0;
3279                 }
3280         }
3281         clk_unlock_restore(dfll_clk, &flags);
3282         return ret;
3283 }
3284 EXPORT_SYMBOL(tegra_dvfs_clamp_dfll_at_vmin);
3285
3286 /*
3287  * Get the new Vmin setting from external rail that is connected to same CPU
3288  * regulator.
3289  */
3290 int tegra_dvfs_set_rail_relations_dfll_vmin(struct clk *dfll_clk,
3291                                                 int rail_relations_vmin)
3292 {
3293         struct tegra_cl_dvfs *cld;
3294         unsigned long flags;
3295         u8 rail_relations_out_min;
3296
3297         if (!dfll_clk)
3298                 return -EINVAL;
3299
3300         /* get handle to cl_dvfs from dfll_clk */
3301         cld = tegra_dfll_get_cl_dvfs_data(dfll_clk);
3302         if (IS_ERR(cld))
3303                 return PTR_ERR(cld);
3304
3305         clk_lock_save(cld->dfll_clk, &flags);
3306
3307         /* convert mv to output value of cl_dvfs */
3308         rail_relations_out_min = find_mv_out_cap(cld, rail_relations_vmin);
3309
3310         if (cld->rail_relations_out_min != rail_relations_out_min) {
3311                 cld->rail_relations_out_min = rail_relations_out_min;
3312                 if (cld->mode == TEGRA_CL_DVFS_CLOSED_LOOP) {
3313                         tegra_cl_dvfs_request_rate(cld,
3314                                 tegra_cl_dvfs_request_get(cld));
3315                         /* Delay to make sure new Vmin delivery started */
3316                         udelay(2 * GET_SAMPLE_PERIOD(cld));
3317                 }
3318         }
3319         clk_unlock_restore(cld->dfll_clk, &flags);
3320         return 0;
3321 }
3322
3323 #ifdef CONFIG_DEBUG_FS
3324
3325 static int lock_get(void *data, u64 *val)
3326 {
3327         struct tegra_cl_dvfs *cld = ((struct clk *)data)->u.dfll.cl_dvfs;
3328         *val = cld->mode == TEGRA_CL_DVFS_CLOSED_LOOP;
3329         return 0;
3330 }
3331 static int lock_set(void *data, u64 val)
3332 {
3333         struct clk *c = (struct clk *)data;
3334         return tegra_clk_cfg_ex(c, TEGRA_CLK_DFLL_LOCK, val);
3335 }
3336 DEFINE_SIMPLE_ATTRIBUTE(lock_fops, lock_get, lock_set, "%llu\n");
3337
3338 static int flags_get(void *data, u64 *val)
3339 {
3340         struct tegra_cl_dvfs *cld = ((struct clk *)data)->u.dfll.cl_dvfs;
3341         *val = cld->p_data->flags;
3342         return 0;
3343 }
3344 DEFINE_SIMPLE_ATTRIBUTE(flags_fops, flags_get, NULL, "0x%llx\n");
3345
3346 static int monitor_get(void *data, u64 *val)
3347 {
3348         u32 v, s;
3349         unsigned long flags;
3350         struct clk *c = (struct clk *)data;
3351         struct tegra_cl_dvfs *cld = ((struct clk *)data)->u.dfll.cl_dvfs;
3352
3353         clk_enable(cld->soc_clk);
3354         clk_lock_save(c, &flags);
3355
3356         switch_monitor(cld, CL_DVFS_MONITOR_CTRL_FREQ);
3357         wait_data_new(cld, &v);
3358         filter_monitor_data(cld, &v); /* ignore error, use "some value" */
3359
3360         v = GET_MONITORED_RATE(v, cld->ref_rate);
3361         s = cl_dvfs_readl(cld, CL_DVFS_FREQ_REQ);
3362         s = (s & CL_DVFS_FREQ_REQ_SCALE_MASK) >> CL_DVFS_FREQ_REQ_SCALE_SHIFT;
3363         *val = (u64)v * (s + 1) / 256;
3364
3365         clk_unlock_restore(c, &flags);
3366         clk_disable(cld->soc_clk);
3367         return 0;
3368 }
3369 DEFINE_SIMPLE_ATTRIBUTE(monitor_fops, monitor_get, NULL, "%llu\n");
3370
3371 static int output_get(void *data, u64 *val)
3372 {
3373         u32 v;
3374         unsigned long flags;
3375         struct clk *c = (struct clk *)data;
3376         struct tegra_cl_dvfs *cld = ((struct clk *)data)->u.dfll.cl_dvfs;
3377
3378         clk_enable(cld->soc_clk);
3379         clk_lock_save(c, &flags);
3380
3381         v = cl_dvfs_get_output(cld);
3382         if (IS_ERR_VALUE(v))
3383                 v = get_last_output(cld); /* ignore error, use "some value" */
3384         *val = get_mv(cld, v);
3385
3386         clk_unlock_restore(c, &flags);
3387         clk_disable(cld->soc_clk);
3388         return 0;
3389 }
3390 DEFINE_SIMPLE_ATTRIBUTE(output_fops, output_get, NULL, "%llu\n");
3391
3392 static int vmax_get(void *data, u64 *val)
3393 {
3394         struct tegra_cl_dvfs *cld = ((struct clk *)data)->u.dfll.cl_dvfs;
3395         *val = cld->v_limits.vmax;
3396         return 0;
3397 }
3398 DEFINE_SIMPLE_ATTRIBUTE(vmax_fops, vmax_get, NULL, "%llu\n");
3399
3400 static int vmin_get(void *data, u64 *val)
3401 {
3402         struct tegra_cl_dvfs *cld = ((struct clk *)data)->u.dfll.cl_dvfs;
3403         *val = cld->v_limits.vmin;
3404         return 0;
3405 }
3406 DEFINE_SIMPLE_ATTRIBUTE(vmin_fops, vmin_get, NULL, "%llu\n");
3407
3408 static int tune_high_mv_get(void *data, u64 *val)
3409 {
3410         struct tegra_cl_dvfs *cld = ((struct clk *)data)->u.dfll.cl_dvfs;
3411         *val = cld->safe_dvfs->dfll_data.tune_high_min_millivolts;
3412         return 0;
3413 }
3414 static int tune_high_mv_set(void *data, u64 val)
3415 {
3416         unsigned long flags;
3417         struct clk *c = (struct clk *)data;
3418         struct tegra_cl_dvfs *cld = c->u.dfll.cl_dvfs;
3419
3420         clk_lock_save(c, &flags);
3421
3422         cld->safe_dvfs->dfll_data.tune_high_min_millivolts = val;
3423         cl_dvfs_init_output_thresholds(cld);
3424         if (cld->mode == TEGRA_CL_DVFS_CLOSED_LOOP) {
3425                 tegra_cl_dvfs_request_rate(cld,
3426                         tegra_cl_dvfs_request_get(cld));
3427         }
3428
3429         clk_unlock_restore(c, &flags);
3430         return 0;
3431 }
3432 DEFINE_SIMPLE_ATTRIBUTE(tune_high_mv_fops, tune_high_mv_get, tune_high_mv_set,
3433                         "%llu\n");
3434
3435 static int fout_mv_get(void *data, u64 *val)
3436 {
3437         u32 v;
3438         struct tegra_cl_dvfs *cld = ((struct clk *)data)->u.dfll.cl_dvfs;
3439         v = cl_dvfs_readl(cld, CL_DVFS_OUTPUT_FORCE) & OUT_MASK;
3440         *val = cld->p_data->vdd_map[v].reg_uV / 1000;
3441         return 0;
3442 }
3443 static int fout_mv_set(void *data, u64 val)
3444 {
3445         u32 v;
3446         unsigned long flags;
3447         struct clk *c = (struct clk *)data;
3448         struct tegra_cl_dvfs *cld = c->u.dfll.cl_dvfs;
3449
3450         clk_enable(cld->soc_clk);
3451         clk_lock_save(c, &flags);
3452
3453         if (val) {
3454                 u8 out_v = is_i2c(cld) ? find_mv_out_cap(cld, (int)val) :
3455                         find_vdd_map_entry(cld, (int)val, false)->reg_value;
3456                 v = output_force_set_val(cld, out_v);
3457                 if (!(v & CL_DVFS_OUTPUT_FORCE_ENABLE))
3458                         output_force_enable(cld, v);
3459         } else {
3460                 output_force_disable(cld);
3461         }
3462
3463         clk_unlock_restore(c, &flags);
3464         clk_disable(cld->soc_clk);
3465         return 0;
3466 }
3467 DEFINE_SIMPLE_ATTRIBUTE(fout_mv_fops, fout_mv_get, fout_mv_set, "%llu\n");
3468
3469 static int fmin_get(void *data, u64 *val)
3470 {
3471         struct tegra_cl_dvfs *cld = ((struct clk *)data)->u.dfll.cl_dvfs;
3472         *val = cld->dvco_rate_min;
3473         return 0;
3474 }
3475 DEFINE_SIMPLE_ATTRIBUTE(dvco_rate_min_fops, fmin_get, NULL, "%llu\n");
3476
3477 static int calibr_delay_get(void *data, u64 *val)
3478 {
3479         struct tegra_cl_dvfs *cld = ((struct clk *)data)->u.dfll.cl_dvfs;
3480         *val = jiffies_to_msecs(cld->calibration_delay);
3481         return 0;
3482 }
3483 static int calibr_delay_set(void *data, u64 val)
3484 {
3485         unsigned long flags;
3486         struct clk *c = (struct clk *)data;
3487         struct tegra_cl_dvfs *cld = c->u.dfll.cl_dvfs;
3488
3489         clk_lock_save(c, &flags);
3490         cld->calibration_delay = msecs_to_jiffies(val);
3491         clk_unlock_restore(c, &flags);
3492         return 0;
3493 }
3494 DEFINE_SIMPLE_ATTRIBUTE(calibr_delay_fops, calibr_delay_get, calibr_delay_set,
3495                         "%llu\n");
3496
3497 static int undershoot_get(void *data, u64 *val)
3498 {
3499         struct tegra_cl_dvfs *cld = ((struct clk *)data)->u.dfll.cl_dvfs;
3500         *val = cld->p_data->pmu_undershoot_gb;
3501         return 0;
3502 }
3503 static int undershoot_set(void *data, u64 val)
3504 {
3505         unsigned long flags;
3506         struct clk *c = (struct clk *)data;
3507         struct tegra_cl_dvfs *cld = c->u.dfll.cl_dvfs;
3508
3509         clk_lock_save(c, &flags);
3510         cld->p_data->pmu_undershoot_gb = val;
3511         cl_dvfs_set_force_out_min(cld);
3512         clk_unlock_restore(c, &flags);
3513         return 0;
3514 }
3515 DEFINE_SIMPLE_ATTRIBUTE(undershoot_fops, undershoot_get, undershoot_set,
3516                         "%llu\n");
3517
3518 static int clamp_get(void *data, u64 *val)
3519 {
3520         struct tegra_cl_dvfs *cld = ((struct clk *)data)->u.dfll.cl_dvfs;
3521         *val = cld->v_limits.clamped ? cld->v_limits.vmin : 0;
3522         return 0;
3523 }
3524 static int clamp_set(void *data, u64 val)
3525 {
3526         struct clk *dfll_clk = data;
3527         int ret = tegra_dvfs_clamp_dfll_at_vmin(dfll_clk, val);
3528         return ret < 0 ? ret : 0;
3529 }
3530 DEFINE_SIMPLE_ATTRIBUTE(clamp_fops, clamp_get, clamp_set, "%llu\n");
3531
3532 static int cl_profiles_show(struct seq_file *s, void *data)
3533 {
3534         u8 v;
3535         int i, *trips;
3536         unsigned long r;
3537         struct clk *c = s->private;
3538         struct tegra_cl_dvfs *cld = c->u.dfll.cl_dvfs;
3539
3540         seq_printf(s, "THERM CAPS:%s\n", cld->therm_caps_num ? "" : " NONE");
3541         for (i = 0; i < cld->therm_caps_num; i++) {
3542                 v = cld->thermal_out_caps[i];
3543                 trips = cld->safe_dvfs->dvfs_rail->vmax_cdev->trip_temperatures;
3544                 seq_printf(s, "%3dC.. %5dmV\n", trips[i], get_mv(cld, v));
3545         }
3546
3547         if (cld->tune_high_target_rate_min == ULONG_MAX) {
3548                 seq_puts(s, "TUNE HIGH: NONE\n");
3549         } else {
3550                 seq_puts(s, "TUNE HIGH:\n");
3551                 seq_printf(s, "min    %5dmV%9lukHz\n",
3552                            get_mv(cld, cld->tune_high_out_min),
3553                            cld->tune_high_dvco_rate_min / 1000);
3554                 seq_printf(s, "%-14s%9lukHz\n", "rate threshold",
3555                            cld->tune_high_target_rate_min / 1000);
3556         }
3557
3558         seq_printf(s, "THERM FLOORS:%s\n", cld->therm_floors_num ? "" : " NONE");
3559         for (i = 0; i < cld->therm_floors_num; i++) {
3560                 v = cld->thermal_out_floors[i];
3561                 r = cld->dvco_rate_floors[i];
3562                 trips = cld->safe_dvfs->dvfs_rail->vmin_cdev->trip_temperatures;
3563                 seq_printf(s, " ..%3dC%5dmV%9lukHz%s\n",
3564                            trips[i], get_mv(cld, v),
3565                            (r ? : get_dvco_rate_below(cld, v)) / 1000,
3566                            r ? " (calibrated)"  : "");
3567         }
3568         r = cld->dvco_rate_floors[i];
3569         seq_printf(s, "  vmin:%5dmV%9lukHz%s\n", cld->out_map[0]->reg_uV / 1000,
3570                    (r ? : cld->safe_dvfs->dfll_data.out_rate_min) / 1000,
3571                    r ? " (calibrated)"  : "");
3572
3573         return 0;
3574 }
3575
3576 static int cl_profiles_open(struct inode *inode, struct file *file)
3577 {
3578         return single_open(file, cl_profiles_show, inode->i_private);
3579 }
3580
3581 static const struct file_operations cl_profiles_fops = {
3582         .open           = cl_profiles_open,
3583         .read           = seq_read,
3584         .llseek         = seq_lseek,
3585         .release        = single_release,
3586 };
3587
3588 static int cl_register_show(struct seq_file *s, void *data)
3589 {
3590         u32 offs;
3591         struct clk *c = s->private;
3592         struct tegra_cl_dvfs *cld = c->u.dfll.cl_dvfs;
3593
3594         clk_enable(cld->soc_clk);
3595
3596         seq_printf(s, "CONTROL REGISTERS:\n");
3597         for (offs = 0; offs <= CL_DVFS_MONITOR_DATA; offs += 4)
3598                 seq_printf(s, "[0x%02x] = 0x%08x\n",
3599                            offs, cl_dvfs_readl(cld, offs));
3600
3601         seq_printf(s, "\nI2C and INTR REGISTERS:\n");
3602         for (offs = CL_DVFS_I2C_CFG; offs <= CL_DVFS_I2C_STS; offs += 4)
3603                 seq_printf(s, "[0x%02x] = 0x%08x\n",
3604                            offs, cl_dvfs_readl(cld, offs));
3605
3606         offs = CL_DVFS_INTR_STS;
3607         seq_printf(s, "[0x%02x] = 0x%08x\n", offs, cl_dvfs_readl(cld, offs));
3608         offs = CL_DVFS_INTR_EN;
3609         seq_printf(s, "[0x%02x] = 0x%08x\n", offs, cl_dvfs_readl(cld, offs));
3610
3611         if (cld->p_data->flags & TEGRA_CL_DVFS_HAS_IDLE_OVERRIDE) {
3612                 seq_printf(s, "\nOVERRIDE REGISTERS:\n");
3613                 offs = CL_DVFS_CC4_HVC;
3614                 seq_printf(s, "[0x%02x] = 0x%08x\n", offs,
3615                            cl_dvfs_readl(cld, offs));
3616         }
3617
3618         seq_printf(s, "\nLUT:\n");
3619         for (offs = CL_DVFS_OUTPUT_LUT;
3620              offs < CL_DVFS_OUTPUT_LUT + 4 * MAX_CL_DVFS_VOLTAGES;
3621              offs += 4)
3622                 seq_printf(s, "[0x%02x] = 0x%08x\n",
3623                            offs, cl_dvfs_readl(cld, offs));
3624
3625         clk_disable(cld->soc_clk);
3626         return 0;
3627 }
3628
3629 static int cl_register_open(struct inode *inode, struct file *file)
3630 {
3631         return single_open(file, cl_register_show, inode->i_private);
3632 }
3633
3634 static ssize_t cl_register_write(struct file *file,
3635         const char __user *userbuf, size_t count, loff_t *ppos)
3636 {
3637         char buf[80];
3638         u32 offs;
3639         u32 val;
3640         struct clk *c = file->f_path.dentry->d_inode->i_private;
3641         struct tegra_cl_dvfs *cld = c->u.dfll.cl_dvfs;
3642
3643         if (sizeof(buf) <= count)
3644                 return -EINVAL;
3645
3646         if (copy_from_user(buf, userbuf, count))
3647                 return -EFAULT;
3648
3649         /* terminate buffer and trim - white spaces may be appended
3650          *  at the end when invoked from shell command line */
3651         buf[count] = '\0';
3652         strim(buf);
3653
3654         if (sscanf(buf, "[0x%x] = 0x%x", &offs, &val) != 2)
3655                 return -1;
3656
3657         if (offs >= CL_DVFS_APERTURE)
3658                 return -1;
3659
3660         clk_enable(cld->soc_clk);
3661         cl_dvfs_writel(cld, val, offs & (~0x3));
3662         clk_disable(cld->soc_clk);
3663         return count;
3664 }
3665
3666 static const struct file_operations cl_register_fops = {
3667         .open           = cl_register_open,
3668         .read           = seq_read,
3669         .write          = cl_register_write,
3670         .llseek         = seq_lseek,
3671         .release        = single_release,
3672 };
3673
3674 int __init tegra_cl_dvfs_debug_init(struct clk *dfll_clk)
3675 {
3676         struct dentry *cl_dvfs_dentry;
3677
3678         if (!dfll_clk || !dfll_clk->dent || (dfll_clk->state == UNINITIALIZED))
3679                 return 0;
3680
3681         if (!debugfs_create_file("lock", S_IRUGO | S_IWUSR,
3682                 dfll_clk->dent, dfll_clk, &lock_fops))
3683                 goto err_out;
3684
3685         cl_dvfs_dentry = debugfs_create_dir("cl_dvfs", dfll_clk->dent);
3686         if (!cl_dvfs_dentry)
3687                 goto err_out;
3688
3689         if (!debugfs_create_file("flags", S_IRUGO,
3690                 cl_dvfs_dentry, dfll_clk, &flags_fops))
3691                 goto err_out;
3692
3693         if (!debugfs_create_file("monitor", S_IRUGO,
3694                 cl_dvfs_dentry, dfll_clk, &monitor_fops))
3695                 goto err_out;
3696
3697         if (!debugfs_create_file("output_mv", S_IRUGO,
3698                 cl_dvfs_dentry, dfll_clk, &output_fops))
3699                 goto err_out;
3700
3701         if (!debugfs_create_file("vmax_mv", S_IRUGO,
3702                 cl_dvfs_dentry, dfll_clk, &vmax_fops))
3703                 goto err_out;
3704
3705         if (!debugfs_create_file("vmin_mv", S_IRUGO,
3706                 cl_dvfs_dentry, dfll_clk, &vmin_fops))
3707                 goto err_out;
3708
3709         if (!debugfs_create_file("tune_high_mv", S_IRUGO | S_IWUSR,
3710                 cl_dvfs_dentry, dfll_clk, &tune_high_mv_fops))
3711                 goto err_out;
3712
3713         if (!debugfs_create_file("force_out_mv", S_IRUGO,
3714                 cl_dvfs_dentry, dfll_clk, &fout_mv_fops))
3715                 goto err_out;
3716
3717         if (!debugfs_create_file("dvco_min", S_IRUGO,
3718                 cl_dvfs_dentry, dfll_clk, &dvco_rate_min_fops))
3719                 goto err_out;
3720
3721         if (!debugfs_create_file("calibr_delay", S_IRUGO,
3722                 cl_dvfs_dentry, dfll_clk, &calibr_delay_fops))
3723                 goto err_out;
3724
3725         if (!debugfs_create_file("pmu_undershoot_gb", S_IRUGO,
3726                 cl_dvfs_dentry, dfll_clk, &undershoot_fops))
3727                 goto err_out;
3728
3729         if (!debugfs_create_file("clamp_at_min", S_IRUGO | S_IWUSR,
3730                 cl_dvfs_dentry, dfll_clk, &clamp_fops))
3731                 goto err_out;
3732
3733         if (!debugfs_create_file("profiles", S_IRUGO,
3734                 cl_dvfs_dentry, dfll_clk, &cl_profiles_fops))
3735                 goto err_out;
3736
3737         if (!debugfs_create_file("registers", S_IRUGO | S_IWUSR,
3738                 cl_dvfs_dentry, dfll_clk, &cl_register_fops))
3739                 goto err_out;
3740
3741         return 0;
3742
3743 err_out:
3744         debugfs_remove_recursive(dfll_clk->dent);
3745         return -ENOMEM;
3746 }
3747 #endif