3 * Copyright (C) 2010 Google, Inc.
6 * Colin Cross <ccross@google.com>
8 * Copyright (C) 2010-2014 NVIDIA CORPORATION. All rights reserved.
10 * This software is licensed under the terms of the GNU General Public
11 * License version 2, as published by the Free Software Foundation, and
12 * may be copied, distributed, and modified under those terms.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
21 #include <linux/kernel.h>
22 #include <linux/clk.h>
23 #include <linux/clkdev.h>
24 #include <linux/debugfs.h>
25 #include <linux/init.h>
26 #include <linux/list.h>
27 #include <linux/list_sort.h>
28 #include <linux/module.h>
29 #include <linux/regulator/consumer.h>
30 #include <linux/seq_file.h>
31 #include <linux/slab.h>
32 #include <linux/suspend.h>
33 #include <linux/delay.h>
34 #include <linux/clk/tegra.h>
35 #include <linux/reboot.h>
36 #include <linux/clk/tegra.h>
37 #include <linux/tegra-soc.h>
42 #include "tegra_cl_dvfs.h"
44 #define DVFS_RAIL_STATS_BIN 12500
46 struct dvfs_rail *tegra_cpu_rail;
47 struct dvfs_rail *tegra_core_rail;
48 struct dvfs_rail *tegra_gpu_rail;
50 static LIST_HEAD(dvfs_rail_list);
51 static DEFINE_MUTEX(dvfs_lock);
52 static DEFINE_MUTEX(rail_disable_lock);
54 static int dvfs_rail_update(struct dvfs_rail *rail);
56 static inline int tegra_dvfs_rail_get_disable_level(struct dvfs_rail *rail)
58 return rail->disable_millivolts ? : rail->nominal_millivolts;
61 static inline int tegra_dvfs_rail_get_suspend_level(struct dvfs_rail *rail)
63 return rail->suspend_millivolts ? : rail->nominal_millivolts;
66 void tegra_dvfs_add_relationships(struct dvfs_relationship *rels, int n)
69 struct dvfs_relationship *rel;
71 mutex_lock(&dvfs_lock);
73 for (i = 0; i < n; i++) {
75 list_add_tail(&rel->from_node, &rel->to->relationships_from);
76 list_add_tail(&rel->to_node, &rel->from->relationships_to);
78 /* Overriding dependent rail below nominal may not be safe */
79 rel->to->min_override_millivolts = rel->to->nominal_millivolts;
82 mutex_unlock(&dvfs_lock);
85 /* Make sure there is a matching cooling device for thermal limit profile. */
86 static void dvfs_validate_cdevs(struct dvfs_rail *rail)
88 if (!rail->therm_mv_caps != !rail->therm_mv_caps_num) {
89 rail->therm_mv_caps_num = 0;
90 rail->therm_mv_caps = NULL;
91 WARN(1, "%s: not matching thermal caps/num\n", rail->reg_id);
94 if (rail->therm_mv_caps && !rail->vmax_cdev)
95 WARN(1, "%s: missing vmax cooling device\n", rail->reg_id);
97 if (!rail->therm_mv_floors != !rail->therm_mv_floors_num) {
98 rail->therm_mv_floors_num = 0;
99 rail->therm_mv_floors = NULL;
100 WARN(1, "%s: not matching thermal floors/num\n", rail->reg_id);
103 if (rail->therm_mv_floors && !rail->vmin_cdev)
104 WARN(1, "%s: missing vmin cooling device\n", rail->reg_id);
106 /* Limit override range to maximum floor */
107 if (rail->therm_mv_floors)
108 rail->min_override_millivolts = rail->therm_mv_floors[0];
110 /* Only GPU thermal dvfs is supported */
111 if (rail->vts_cdev && (rail != tegra_gpu_rail)) {
112 rail->vts_cdev = NULL;
113 WARN(1, "%s: thermal dvfs is not supported\n", rail->reg_id);
116 if (!rail->simon_vmin_offsets != !rail->simon_vmin_offs_num) {
117 rail->simon_vmin_offs_num = 0;
118 rail->simon_vmin_offsets = NULL;
119 WARN(1, "%s: not matching simon offsets/num\n", rail->reg_id);
123 int tegra_dvfs_init_rails(struct dvfs_rail *rails[], int n)
127 mutex_lock(&dvfs_lock);
129 for (i = 0; i < n; i++) {
130 INIT_LIST_HEAD(&rails[i]->dvfs);
131 INIT_LIST_HEAD(&rails[i]->relationships_from);
132 INIT_LIST_HEAD(&rails[i]->relationships_to);
134 mv = rails[i]->nominal_millivolts;
135 if (rails[i]->boot_millivolts > mv)
136 WARN(1, "%s: boot voltage %d above nominal %d\n",
137 rails[i]->reg_id, rails[i]->boot_millivolts, mv);
138 if (rails[i]->disable_millivolts > mv)
139 rails[i]->disable_millivolts = mv;
140 if (rails[i]->suspend_millivolts > mv)
141 rails[i]->suspend_millivolts = mv;
143 mv = tegra_dvfs_rail_get_boot_level(rails[i]);
144 rails[i]->millivolts = mv;
145 rails[i]->new_millivolts = mv;
147 rails[i]->step = rails[i]->max_millivolts;
148 if (!rails[i]->step_up)
149 rails[i]->step_up = rails[i]->step;
151 list_add_tail(&rails[i]->node, &dvfs_rail_list);
153 if (!strcmp("vdd_cpu", rails[i]->reg_id))
154 tegra_cpu_rail = rails[i];
155 else if (!strcmp("vdd_gpu", rails[i]->reg_id))
156 tegra_gpu_rail = rails[i];
157 else if (!strcmp("vdd_core", rails[i]->reg_id))
158 tegra_core_rail = rails[i];
160 dvfs_validate_cdevs(rails[i]);
163 mutex_unlock(&dvfs_lock);
168 static int dvfs_solve_relationship(struct dvfs_relationship *rel)
170 return rel->solve(rel->from, rel->to);
173 /* rail statistic - called during rail init, or under dfs_lock, or with
174 CPU0 only on-line, and interrupts disabled */
175 static void dvfs_rail_stats_init(struct dvfs_rail *rail, int millivolts)
177 int dvfs_rail_stats_range;
179 if (!rail->stats.bin_uV)
180 rail->stats.bin_uV = DVFS_RAIL_STATS_BIN;
182 dvfs_rail_stats_range =
183 (DVFS_RAIL_STATS_TOP_BIN - 1) * rail->stats.bin_uV / 1000;
185 rail->stats.last_update = ktime_get();
186 if (millivolts >= rail->min_millivolts) {
187 int i = 1 + (2 * (millivolts - rail->min_millivolts) * 1000 +
188 rail->stats.bin_uV) / (2 * rail->stats.bin_uV);
189 rail->stats.last_index = min(i, DVFS_RAIL_STATS_TOP_BIN);
192 if (rail->max_millivolts >
193 rail->min_millivolts + dvfs_rail_stats_range)
194 pr_warn("tegra_dvfs: %s: stats above %d mV will be squashed\n",
196 rail->min_millivolts + dvfs_rail_stats_range);
199 static void dvfs_rail_stats_update(
200 struct dvfs_rail *rail, int millivolts, ktime_t now)
202 rail->stats.time_at_mv[rail->stats.last_index] = ktime_add(
203 rail->stats.time_at_mv[rail->stats.last_index], ktime_sub(
204 now, rail->stats.last_update));
205 rail->stats.last_update = now;
210 if (millivolts >= rail->min_millivolts) {
211 int i = 1 + (2 * (millivolts - rail->min_millivolts) * 1000 +
212 rail->stats.bin_uV) / (2 * rail->stats.bin_uV);
213 rail->stats.last_index = min(i, DVFS_RAIL_STATS_TOP_BIN);
214 } else if (millivolts == 0)
215 rail->stats.last_index = 0;
218 static void dvfs_rail_stats_pause(struct dvfs_rail *rail,
219 ktime_t delta, bool on)
221 int i = on ? rail->stats.last_index : 0;
222 rail->stats.time_at_mv[i] = ktime_add(rail->stats.time_at_mv[i], delta);
225 void tegra_dvfs_rail_off(struct dvfs_rail *rail, ktime_t now)
228 dvfs_rail_stats_update(rail, 0, now);
229 rail->stats.off = true;
233 void tegra_dvfs_rail_on(struct dvfs_rail *rail, ktime_t now)
236 rail->stats.off = false;
237 dvfs_rail_stats_update(rail, rail->millivolts, now);
241 void tegra_dvfs_rail_pause(struct dvfs_rail *rail, ktime_t delta, bool on)
244 dvfs_rail_stats_pause(rail, delta, on);
247 static int dvfs_rail_set_voltage_reg(struct dvfs_rail *rail, int millivolts)
252 * safely return success for low voltage requests on fixed regulator
253 * (higher requests will go through and fail, as they should)
255 if (rail->fixed_millivolts && (millivolts <= rail->fixed_millivolts))
258 rail->updating = true;
259 rail->reg_max_millivolts = rail->reg_max_millivolts ==
260 rail->max_millivolts ?
261 rail->max_millivolts + 1 : rail->max_millivolts;
262 ret = regulator_set_voltage(rail->reg,
264 rail->reg_max_millivolts * 1000);
265 rail->updating = false;
267 pr_debug("%s: request_mV [%d, %d] from %s regulator (%d)\n", __func__,
268 millivolts, rail->reg_max_millivolts, rail->reg_id, ret);
273 /* Sets the voltage on a dvfs rail to a specific value, and updates any
274 * rails that depend on this rail. */
275 static int dvfs_rail_set_voltage(struct dvfs_rail *rail, int millivolts)
278 struct dvfs_relationship *rel;
285 if (millivolts == rail->millivolts)
291 if (millivolts > rail->millivolts) {
292 step = rail->step_up;
299 /* Voltage change is always happening in DFLL mode */
300 if (rail->disabled && !rail->dfll_mode)
303 rail->resolving_to = true;
304 jmp_to_zero = rail->jmp_to_zero &&
305 ((millivolts == 0) || (rail->millivolts == 0));
307 if (jmp_to_zero || rail->dfll_mode)
310 steps = DIV_ROUND_UP(abs(millivolts - rail->millivolts), step);
312 for (i = 0; i < steps; i++) {
314 rail->new_millivolts = rail->millivolts + offset;
316 rail->new_millivolts = millivolts;
318 /* Before changing the voltage, tell each rail that depends
319 * on this rail that the voltage will change.
320 * This rail will be the "from" rail in the relationship,
321 * the rail that depends on this rail will be the "to" rail.
322 * from->millivolts will be the old voltage
323 * from->new_millivolts will be the new voltage */
324 list_for_each_entry(rel, &rail->relationships_to, to_node) {
325 ret = dvfs_rail_update(rel->to);
331 * DFLL adjusts voltage automatically - don't touch regulator,
332 * but update stats, anyway.
334 if (!rail->dfll_mode) {
335 ret = dvfs_rail_set_voltage_reg(rail,
336 rail->new_millivolts);
338 pr_err("Failed to set dvfs regulator %s\n",
344 rail->millivolts = rail->new_millivolts;
345 dvfs_rail_stats_update(rail, rail->millivolts, ktime_get());
347 /* After changing the voltage, tell each rail that depends
348 * on this rail that the voltage has changed.
349 * from->millivolts and from->new_millivolts will be the
351 list_for_each_entry(rel, &rail->relationships_to, to_node) {
352 ret = dvfs_rail_update(rel->to);
358 if (unlikely(rail->millivolts != millivolts)) {
359 pr_err("%s: rail didn't reach target %d in %d steps (%d)\n",
360 __func__, millivolts, steps, rail->millivolts);
365 rail->resolving_to = false;
369 /* Determine the minimum valid voltage for a rail, taking into account
370 * the dvfs clocks and any rails that this rail depends on. Calls
371 * dvfs_rail_set_voltage with the new voltage, which will call
372 * dvfs_rail_update on any rails that depend on this rail. */
373 static inline int dvfs_rail_apply_limits(struct dvfs_rail *rail, int millivolts)
375 int min_mv = rail->min_millivolts;
376 min_mv = max(min_mv, tegra_dvfs_rail_get_thermal_floor(rail));
378 if (rail->override_millivolts) {
379 millivolts = rail->override_millivolts;
381 /* apply offset and clip up to pll mode fixed mv */
382 millivolts += rail->dbg_mv_offs;
383 if (!rail->dfll_mode && rail->fixed_millivolts &&
384 (millivolts < rail->fixed_millivolts))
385 millivolts = rail->fixed_millivolts;
388 if (millivolts < min_mv)
394 static int dvfs_rail_update(struct dvfs_rail *rail)
398 struct dvfs_relationship *rel;
402 /* if dvfs is suspended, return and handle it during resume */
406 /* if regulators are not connected yet, return and handle it later */
410 /* if no clock has requested voltage since boot, defer update */
414 /* if rail update is entered while resolving circular dependencies,
416 if (rail->resolving_to)
419 /* Find the maximum voltage requested by any clock */
420 list_for_each_entry(d, &rail->dvfs, reg_node)
421 millivolts = max(d->cur_millivolts, millivolts);
423 /* Apply offset and min/max limits if any clock is requesting voltage */
425 millivolts = dvfs_rail_apply_limits(rail, millivolts);
426 /* Keep current voltage if regulator is to be disabled via explicitly */
427 else if (rail->in_band_pm)
429 /* Keep current voltage if regulator must not be disabled at run time */
430 else if (!rail->jmp_to_zero) {
431 WARN(1, "%s cannot be turned off by dvfs\n", rail->reg_id);
434 /* else: fall thru if regulator is turned off by side band signaling */
436 /* retry update if limited by from-relationship to account for
437 circular dependencies */
438 steps = DIV_ROUND_UP(abs(millivolts - rail->millivolts), rail->step);
439 for (; steps >= 0; steps--) {
440 rail->new_millivolts = millivolts;
442 /* Check any rails that this rail depends on */
443 list_for_each_entry(rel, &rail->relationships_from, from_node)
444 rail->new_millivolts = dvfs_solve_relationship(rel);
446 if (rail->new_millivolts == rail->millivolts)
449 ret = dvfs_rail_set_voltage(rail, rail->new_millivolts);
455 static struct regulator *get_fixed_regulator(struct dvfs_rail *rail)
457 struct regulator *reg;
461 unsigned long dfll_boost;
463 strcpy(reg_id, rail->reg_id);
464 strcat(reg_id, "_fixed");
465 reg = regulator_get(NULL, reg_id);
469 v = regulator_get_voltage(reg) / 1000;
470 if ((v < rail->min_millivolts) || (v > rail->nominal_millivolts) ||
471 (rail->therm_mv_floors && v < rail->therm_mv_floors[0])) {
472 pr_err("tegra_dvfs: ivalid fixed %s voltage %d\n",
474 return ERR_PTR(-EINVAL);
478 * Only fixed at nominal voltage vdd_core regulator is allowed, same
479 * is true for cpu rail if dfll mode is not supported at all. No thermal
480 * capping can be implemented in this case.
482 if (!IS_ENABLED(CONFIG_ARCH_TEGRA_HAS_CL_DVFS) ||
483 (rail != tegra_cpu_rail)) {
484 if (v != rail->nominal_millivolts) {
485 pr_err("tegra_dvfs: %s fixed below nominal at %d\n",
487 return ERR_PTR(-EINVAL);
489 if (rail->therm_mv_caps) {
490 pr_err("tegra_dvfs: cannot fix %s with thermal caps\n",
492 return ERR_PTR(-ENOSYS);
498 * If dfll mode is supported, fixed vdd_cpu regulator may be below
499 * nominal in pll mode - maximum cpu rate in pll mode is limited
500 * respectively. Regulator is required to allow automatic scaling
503 * FIXME: platform data to explicitly identify such "hybrid" regulator?
505 d = list_first_entry(&rail->dvfs, struct dvfs, reg_node);
506 for (i = 0; i < d->num_freqs; i++) {
507 if (d->millivolts[i] > v)
512 pr_err("tegra_dvfs: %s fixed at %d: too low for min rate\n",
514 return ERR_PTR(-EINVAL);
517 dfll_boost = (d->freqs[d->num_freqs - 1] - d->freqs[i - 1]);
518 if (d->dfll_data.max_rate_boost < dfll_boost)
519 d->dfll_data.max_rate_boost = dfll_boost;
521 rail->fixed_millivolts = v;
525 static int dvfs_rail_connect_to_regulator(struct dvfs_rail *rail)
527 struct regulator *reg;
531 reg = regulator_get(NULL, rail->reg_id);
533 reg = get_fixed_regulator(rail);
535 pr_err("tegra_dvfs: failed to connect %s rail\n",
539 pr_info("tegra_dvfs: %s rail is fixed in pll mode\n",
545 v = regulator_enable(rail->reg);
547 pr_err("tegra_dvfs: failed on enabling regulator %s\n, err %d",
552 v = regulator_get_voltage(rail->reg);
554 pr_err("tegra_dvfs: failed initial get %s voltage\n",
558 rail->millivolts = v / 1000;
559 rail->new_millivolts = rail->millivolts;
560 dvfs_rail_stats_init(rail, rail->millivolts);
562 if (rail->boot_millivolts &&
563 (rail->boot_millivolts != rail->millivolts)) {
564 WARN(1, "%s boot voltage %d does not match expected %d\n",
565 rail->reg_id, rail->millivolts, rail->boot_millivolts);
566 rail->boot_millivolts = rail->millivolts;
569 pr_info("tegra_dvfs: %s connected to regulator\n", rail->reg_id);
573 static inline unsigned long *dvfs_get_freqs(struct dvfs *d)
575 return d->alt_freqs && d->use_alt_freqs ? d->alt_freqs : &d->freqs[0];
578 static inline const int *dvfs_get_millivolts(struct dvfs *d, unsigned long rate)
580 if (tegra_dvfs_is_dfll_scale(d, rate))
581 return d->dfll_millivolts;
583 return tegra_dvfs_get_millivolts_pll(d);
587 __tegra_dvfs_set_rate(struct dvfs *d, unsigned long rate)
590 int ret, mv, detach_mv;
591 unsigned long *freqs = dvfs_get_freqs(d);
592 const int *millivolts = dvfs_get_millivolts(d, rate);
594 if (freqs == NULL || millivolts == NULL)
597 /* On entry to dfll range limit 1st step to range bottom (full ramp of
598 voltage/rate is completed automatically in dfll mode) */
599 if (tegra_dvfs_is_dfll_range_entry(d, rate))
600 rate = d->dfll_data.use_dfll_rate_min;
602 if (rate > freqs[d->num_freqs - 1]) {
603 pr_warn("tegra_dvfs: rate %lu too high for dvfs on %s\n", rate,
609 d->cur_millivolts = 0;
611 * For single clock GPU rail keep DVFS rate unchanged when clock
612 * is disabled. Rail is turned off explicitly, in any case, but
613 * with non-zero rate voltage level at regulator is updated when
614 * temperature is changes while rail is off.
616 if (d->dvfs_rail == tegra_gpu_rail)
621 while (i < d->num_freqs && rate > freqs[i])
626 if ((d->max_millivolts) && (mv > d->max_millivolts)) {
627 pr_warn("tegra_dvfs: voltage %d too high for dvfs on %s\n",
632 detach_mv = tegra_dvfs_rail_get_boot_level(d->dvfs_rail);
633 if (!d->dvfs_rail->reg && (mv > detach_mv)) {
634 pr_warn("%s: %s: voltage %d above boot limit %d\n",
635 __func__, d->clk_name, mv, detach_mv);
639 detach_mv = tegra_dvfs_rail_get_disable_level(d->dvfs_rail);
640 if (d->dvfs_rail->disabled && (mv > detach_mv)) {
641 pr_warn("%s: %s: voltage %d above disable limit %d\n",
642 __func__, d->clk_name, mv, detach_mv);
646 detach_mv = tegra_dvfs_rail_get_suspend_level(d->dvfs_rail);
647 if (d->dvfs_rail->suspended && (mv > detach_mv)) {
648 pr_warn("%s: %s: voltage %d above disable limit %d\n",
649 __func__, d->clk_name, mv, detach_mv);
653 detach_mv = d->dvfs_rail->override_millivolts;
654 if (detach_mv && (mv > detach_mv)) {
655 pr_warn("%s: %s: voltage %d above override level %d\n",
656 __func__, d->clk_name, mv, detach_mv);
659 d->cur_millivolts = mv;
664 d->dvfs_rail->rate_set = true;
665 ret = dvfs_rail_update(d->dvfs_rail);
667 pr_err("Failed to set regulator %s for clock %s to %d mV\n",
668 d->dvfs_rail->reg_id, d->clk_name, d->cur_millivolts);
674 * Some clocks may have alternative frequency ladder that provides lower minimum
675 * voltage at the same rate (or complimentary: higher maximum rate at the same
676 * voltage). Interfaces below allows dvfs clients to install such ladder, and
677 * switch between primary and alternative frequencies in flight.
679 static int alt_freqs_validate(struct dvfs *d, unsigned long *alt_freqs)
684 for (i = 0; i < d->num_freqs; i++) {
685 if (d->freqs[i] > alt_freqs[i]) {
686 pr_err("%s: Invalid alt freqs for %s\n",
687 __func__, d->clk_name);
695 int tegra_dvfs_alt_freqs_install(struct dvfs *d, unsigned long *alt_freqs)
699 mutex_lock(&dvfs_lock);
701 ret = alt_freqs_validate(d, alt_freqs);
703 d->alt_freqs = alt_freqs;
705 mutex_unlock(&dvfs_lock);
709 int tegra_dvfs_use_alt_freqs_on_clk(struct clk *c, bool use_alt_freq)
712 struct dvfs *d = c->dvfs;
714 mutex_lock(&dvfs_lock);
716 if (d && d->alt_freqs) {
718 if (d->use_alt_freqs != use_alt_freq) {
719 d->use_alt_freqs = use_alt_freq;
720 ret = __tegra_dvfs_set_rate(d, d->cur_rate);
724 mutex_unlock(&dvfs_lock);
728 int tegra_dvfs_alt_freqs_set(struct dvfs *d, unsigned long *alt_freqs)
732 mutex_lock(&dvfs_lock);
734 if (d->alt_freqs != alt_freqs) {
735 ret = alt_freqs_validate(d, alt_freqs);
737 d->use_alt_freqs = !!alt_freqs;
738 d->alt_freqs = alt_freqs;
739 ret = __tegra_dvfs_set_rate(d, d->cur_rate);
743 mutex_unlock(&dvfs_lock);
748 * Some clocks may need run-time voltage ladder replacement. Allow it only if
749 * peak voltages across all possible ladders are specified, and new voltages
750 * do not violate peaks.
752 static int new_voltages_validate(struct dvfs *d, const int *new_millivolts,
753 int freqs_num, int ranges_num)
755 const int *millivolts;
756 int freq_idx, therm_idx;
758 for (therm_idx = 0; therm_idx < ranges_num; therm_idx++) {
759 millivolts = new_millivolts + therm_idx * MAX_DVFS_FREQS;
760 for (freq_idx = 0; freq_idx < freqs_num; freq_idx++) {
761 if (millivolts[freq_idx] >
762 d->peak_millivolts[freq_idx]) {
763 pr_err("%s: Invalid new voltages for %s\n",
764 __func__, d->clk_name);
772 int tegra_dvfs_replace_voltage_table(struct dvfs *d, const int *new_millivolts)
777 mutex_lock(&dvfs_lock);
779 if (!d->peak_millivolts) {
784 if (d->therm_dvfs && d->dvfs_rail->vts_cdev)
785 ranges_num += d->dvfs_rail->vts_cdev->trip_temperatures_num;
787 if (new_voltages_validate(d, new_millivolts,
788 d->num_freqs, ranges_num)) {
793 d->millivolts = new_millivolts;
794 if (__tegra_dvfs_set_rate(d, d->cur_rate))
797 mutex_unlock(&dvfs_lock);
802 * Using non alt frequencies always results in peak voltage
803 * (enforced by alt_freqs_validate())
805 static int predict_non_alt_millivolts(struct clk *c, const int *millivolts,
813 for (i = 0; i < c->dvfs->num_freqs; i++) {
814 if (rate <= c->dvfs->freqs[i])
818 if (i == c->dvfs->num_freqs)
821 return millivolts[i];
824 static int predict_millivolts(struct clk *c, const int *millivolts,
828 * Predicted voltage can not be used across the switch to alternative
829 * frequency limits. For now, just fail the call for clock that has
830 * alternative limits initialized.
832 if (c->dvfs->alt_freqs)
835 return predict_non_alt_millivolts(c, millivolts, rate);
838 int tegra_dvfs_predict_millivolts(struct clk *c, unsigned long rate)
840 const int *millivolts;
842 if (!rate || !c->dvfs)
845 millivolts = tegra_dvfs_is_dfll_range(c->dvfs, rate) ?
846 c->dvfs->dfll_millivolts :
847 tegra_dvfs_get_millivolts_pll(c->dvfs);
848 return predict_millivolts(c, millivolts, rate);
850 EXPORT_SYMBOL(tegra_dvfs_predict_millivolts);
852 int tegra_dvfs_predict_peak_millivolts(struct clk *c, unsigned long rate)
855 const int *millivolts;
857 if (!rate || !c->dvfs)
860 millivolts = tegra_dvfs_is_dfll_range(c->dvfs, rate) ?
861 c->dvfs->dfll_millivolts : c->dvfs->peak_millivolts ? :
862 tegra_dvfs_get_millivolts_pll(c->dvfs);
864 mv = predict_non_alt_millivolts(c, millivolts, rate);
868 if (c->dvfs->dvfs_rail->therm_mv_floors)
869 mv = max(mv, c->dvfs->dvfs_rail->therm_mv_floors[0]);
870 if (c->dvfs->dvfs_rail->therm_mv_dfll_floors)
871 mv = max(mv, c->dvfs->dvfs_rail->therm_mv_dfll_floors[0]);
875 const int *tegra_dvfs_get_millivolts_pll(struct dvfs *d)
878 int therm_idx = d->dvfs_rail->therm_scale_idx;
879 return d->millivolts + therm_idx * MAX_DVFS_FREQS;
881 return d->millivolts;
884 int tegra_dvfs_set_rate(struct clk *c, unsigned long rate)
892 suspended = timekeeping_suspended && c->dvfs->dvfs_rail->suspended;
894 if (mutex_is_locked(&dvfs_lock))
895 WARN(1, "%s: Entered suspend with DVFS mutex locked\n",
898 mutex_lock(&dvfs_lock);
901 ret = __tegra_dvfs_set_rate(c->dvfs, rate);
904 mutex_unlock(&dvfs_lock);
908 EXPORT_SYMBOL(tegra_dvfs_set_rate);
910 int tegra_dvfs_get_freqs(struct clk *c, unsigned long **freqs, int *num_freqs)
915 if (c->dvfs->alt_freqs)
918 *num_freqs = c->dvfs->num_freqs;
919 *freqs = c->dvfs->freqs;
923 EXPORT_SYMBOL(tegra_dvfs_get_freqs);
925 static inline int dvfs_rail_get_override_floor(struct dvfs_rail *rail)
927 return rail->override_unresolved ? rail->nominal_millivolts :
928 rail->min_override_millivolts;
931 #ifdef CONFIG_TEGRA_VDD_CORE_OVERRIDE
932 static DEFINE_MUTEX(rail_override_lock);
934 static int dvfs_override_core_voltage(int override_mv)
936 int ret, floor, ceiling;
937 struct dvfs_rail *rail = tegra_core_rail;
942 if (rail->fixed_millivolts)
945 mutex_lock(&rail_override_lock);
947 floor = dvfs_rail_get_override_floor(rail);
948 ceiling = rail->nominal_millivolts;
949 if (override_mv && ((override_mv < floor) || (override_mv > ceiling))) {
950 pr_err("%s: override level %d outside the range [%d...%d]\n",
951 __func__, override_mv, floor, ceiling);
952 mutex_unlock(&rail_override_lock);
956 if (override_mv == rail->override_millivolts) {
962 ret = tegra_dvfs_override_core_cap_apply(override_mv);
964 pr_err("%s: failed to set cap for override level %d\n",
965 __func__, override_mv);
970 mutex_lock(&dvfs_lock);
971 if (rail->disabled || rail->suspended) {
972 pr_err("%s: cannot scale %s rail\n", __func__,
973 rail->disabled ? "disabled" : "suspended");
976 mutex_unlock(&dvfs_lock);
980 rail->override_millivolts = override_mv;
981 ret = dvfs_rail_update(rail);
983 pr_err("%s: failed to set override level %d\n",
984 __func__, override_mv);
985 rail->override_millivolts = 0;
986 dvfs_rail_update(rail);
989 mutex_unlock(&dvfs_lock);
991 if (!override_mv || ret)
992 tegra_dvfs_override_core_cap_apply(0);
994 mutex_unlock(&rail_override_lock);
998 int tegra_dvfs_resolve_override(struct clk *c, unsigned long max_rate)
1001 struct dvfs *d = c->dvfs;
1002 struct dvfs_rail *rail;
1006 rail = d->dvfs_rail;
1008 mutex_lock(&rail_override_lock);
1009 mutex_lock(&dvfs_lock);
1011 if (d->defer_override && rail->override_unresolved) {
1012 d->defer_override = false;
1014 mv = tegra_dvfs_predict_peak_millivolts(c, max_rate);
1015 if (rail->min_override_millivolts < mv)
1016 rail->min_override_millivolts = mv;
1018 rail->override_unresolved--;
1019 if (!rail->override_unresolved && rail->resolve_override)
1020 rail->resolve_override(rail->min_override_millivolts);
1022 mutex_unlock(&dvfs_lock);
1023 mutex_unlock(&rail_override_lock);
1027 int tegra_dvfs_rail_get_override_floor(struct dvfs_rail *rail)
1031 mutex_lock(&rail_override_lock);
1032 mv = dvfs_rail_get_override_floor(rail);
1033 mutex_unlock(&rail_override_lock);
1039 static int dvfs_set_fmax_at_vmin(struct clk *c, unsigned long f_max, int v_min)
1042 struct dvfs *d = c->dvfs;
1043 unsigned long f_min = 1000; /* 1kHz min rate in DVFS tables */
1045 mutex_lock(&rail_override_lock);
1046 mutex_lock(&dvfs_lock);
1048 if (v_min > d->dvfs_rail->override_millivolts) {
1049 pr_err("%s: new %s vmin %dmV is above override voltage %dmV\n",
1050 __func__, c->name, v_min,
1051 d->dvfs_rail->override_millivolts);
1056 if (v_min >= d->max_millivolts) {
1057 pr_err("%s: new %s vmin %dmV is at/above max voltage %dmV\n",
1058 __func__, c->name, v_min, d->max_millivolts);
1064 * dvfs table update:
1065 * - for voltages below new v_min the respective frequencies are shifted
1066 * below new f_max to the levels already present in the table; if the
1067 * 1st table entry has frequency above new fmax, all entries below v_min
1068 * are filled in with 1kHz (min rate used in DVFS tables).
1069 * - for voltages above new v_min, the respective frequencies are
1070 * increased to at least new f_max
1071 * - if new v_min is already in the table set the respective frequency
1074 for (i = 0; i < d->num_freqs; i++) {
1075 int mv = d->millivolts[i];
1076 unsigned long f = d->freqs[i];
1079 if (d->freqs[i] >= f_max)
1080 d->freqs[i] = i ? d->freqs[i-1] : f_min;
1081 } else if (mv > v_min) {
1082 d->freqs[i] = max(f, f_max);
1084 d->freqs[i] = f_max;
1086 ret = __tegra_dvfs_set_rate(d, d->cur_rate);
1089 mutex_unlock(&dvfs_lock);
1090 mutex_unlock(&rail_override_lock);
1095 static int dvfs_override_core_voltage(int override_mv)
1097 pr_err("%s: vdd core override is not supported\n", __func__);
1101 static int dvfs_set_fmax_at_vmin(struct clk *c, unsigned long f_max, int v_min)
1103 pr_err("%s: vdd core override is not supported\n", __func__);
1108 int tegra_dvfs_override_core_voltage(struct clk *c, int override_mv)
1110 if (!c->dvfs || !c->dvfs->can_override) {
1111 pr_err("%s: %s cannot override vdd core\n", __func__, c->name);
1114 return dvfs_override_core_voltage(override_mv);
1116 EXPORT_SYMBOL(tegra_dvfs_override_core_voltage);
1118 int tegra_dvfs_set_fmax_at_vmin(struct clk *c, unsigned long f_max, int v_min)
1120 if (!c->dvfs || !c->dvfs->can_override) {
1121 pr_err("%s: %s cannot set fmax_at_vmin)\n", __func__, c->name);
1124 return dvfs_set_fmax_at_vmin(c, f_max, v_min);
1126 EXPORT_SYMBOL(tegra_dvfs_set_fmax_at_vmin);
1128 /* May only be called during clock init, does not take any locks on clock c. */
1129 int __init tegra_enable_dvfs_on_clk(struct clk *c, struct dvfs *d)
1134 pr_err("Error when enabling dvfs on %s for clock %s:\n",
1135 d->dvfs_rail->reg_id, c->name);
1136 pr_err("DVFS already enabled for %s\n",
1137 c->dvfs->dvfs_rail->reg_id);
1141 for (i = 0; i < MAX_DVFS_FREQS; i++) {
1142 if (d->millivolts[i] == 0)
1145 d->freqs[i] *= d->freqs_mult;
1147 /* If final frequencies are 0, pad with previous frequency */
1148 if (d->freqs[i] == 0 && i > 1)
1149 d->freqs[i] = d->freqs[i - 1];
1154 c->auto_dvfs = true;
1155 clk_set_cansleep(c);
1161 * Minimum core override level is determined as maximum voltage required
1162 * for clocks outside shared buses (shared bus rates can be capped to
1163 * safe levels when override limit is set)
1165 if (i && c->ops && !c->ops->shared_bus_update &&
1166 !(c->flags & PERIPH_ON_CBUS) && !d->can_override) {
1167 int mv = tegra_dvfs_predict_peak_millivolts(c, d->freqs[i-1]);
1168 struct dvfs_rail *rail = d->dvfs_rail;
1169 if (d->defer_override)
1170 rail->override_unresolved++;
1171 else if (rail->min_override_millivolts < mv)
1172 rail->min_override_millivolts =
1173 min(mv, rail->nominal_millivolts);
1176 mutex_lock(&dvfs_lock);
1177 list_add_tail(&d->reg_node, &d->dvfs_rail->dvfs);
1178 mutex_unlock(&dvfs_lock);
1183 static bool tegra_dvfs_all_rails_suspended(void)
1185 struct dvfs_rail *rail;
1186 bool all_suspended = true;
1188 list_for_each_entry(rail, &dvfs_rail_list, node)
1189 if (!rail->suspended && !rail->disabled)
1190 all_suspended = false;
1192 return all_suspended;
1195 static bool is_solved_at_suspend(struct dvfs_rail *to,
1196 struct dvfs_relationship *rel)
1198 if (rel->solved_at_suspend)
1201 if (rel->solved_at_nominal) {
1202 int mv = tegra_dvfs_rail_get_suspend_level(to);
1203 if (mv == to->nominal_millivolts)
1209 static bool tegra_dvfs_from_rails_suspended_or_solved(struct dvfs_rail *to)
1211 struct dvfs_relationship *rel;
1212 bool all_suspended = true;
1214 list_for_each_entry(rel, &to->relationships_from, from_node)
1215 if (!rel->from->suspended && !rel->from->disabled &&
1216 !is_solved_at_suspend(to, rel))
1217 all_suspended = false;
1219 return all_suspended;
1222 static int tegra_dvfs_suspend_one(void)
1224 struct dvfs_rail *rail;
1227 list_for_each_entry(rail, &dvfs_rail_list, node) {
1228 if (!rail->suspended && !rail->disabled &&
1229 tegra_dvfs_from_rails_suspended_or_solved(rail)) {
1230 if (rail->dfll_mode) {
1231 /* s/w doesn't change voltage in dfll mode */
1232 mv = rail->millivolts;
1233 } else if (rail->fixed_millivolts) {
1234 /* Safe: pll mode rate capped to fixed level */
1235 mv = rail->fixed_millivolts;
1237 mv = tegra_dvfs_rail_get_suspend_level(rail);
1238 mv = dvfs_rail_apply_limits(rail, mv);
1241 /* apply suspend limit only if it is above current mv */
1243 if (mv >= rail->millivolts)
1244 ret = dvfs_rail_set_voltage(rail, mv);
1246 pr_err("tegra_dvfs: failed %s suspend at %d\n",
1247 rail->reg_id, rail->millivolts);
1251 rail->suspended = true;
1259 static void tegra_dvfs_resume(void)
1261 struct dvfs_rail *rail;
1263 mutex_lock(&dvfs_lock);
1265 list_for_each_entry(rail, &dvfs_rail_list, node)
1266 rail->suspended = false;
1268 list_for_each_entry(rail, &dvfs_rail_list, node)
1269 dvfs_rail_update(rail);
1271 mutex_unlock(&dvfs_lock);
1274 static int tegra_dvfs_suspend(void)
1278 mutex_lock(&dvfs_lock);
1280 while (!tegra_dvfs_all_rails_suspended()) {
1281 ret = tegra_dvfs_suspend_one();
1286 mutex_unlock(&dvfs_lock);
1289 tegra_dvfs_resume();
1294 static int tegra_dvfs_pm_suspend(struct notifier_block *nb,
1295 unsigned long event, void *data)
1297 if (event == PM_SUSPEND_PREPARE) {
1298 if (tegra_dvfs_suspend())
1300 pr_info("tegra_dvfs: suspended\n");
1305 static int tegra_dvfs_pm_resume(struct notifier_block *nb,
1306 unsigned long event, void *data)
1308 if (event == PM_POST_SUSPEND) {
1309 tegra_dvfs_resume();
1310 pr_info("tegra_dvfs: resumed\n");
1315 static struct notifier_block tegra_dvfs_suspend_nb = {
1316 .notifier_call = tegra_dvfs_pm_suspend,
1320 static struct notifier_block tegra_dvfs_resume_nb = {
1321 .notifier_call = tegra_dvfs_pm_resume,
1325 static int tegra_dvfs_reboot_notify(struct notifier_block *nb,
1326 unsigned long event, void *data)
1332 tegra_dvfs_suspend();
1338 static struct notifier_block tegra_dvfs_reboot_nb = {
1339 .notifier_call = tegra_dvfs_reboot_notify,
1342 /* must be called with dvfs lock held */
1343 static void __tegra_dvfs_rail_disable(struct dvfs_rail *rail)
1348 /* don't set voltage in DFLL mode - won't work, but break stats */
1349 if (rail->dfll_mode) {
1350 rail->disabled = true;
1354 /* Safe, as pll mode rate is capped to fixed level */
1355 if (!rail->dfll_mode && rail->fixed_millivolts) {
1356 mv = rail->fixed_millivolts;
1358 mv = tegra_dvfs_rail_get_disable_level(rail);
1359 mv = dvfs_rail_apply_limits(rail, mv);
1362 /* apply detach mode limit provided it is above current volatge */
1363 if (mv >= rail->millivolts)
1364 ret = dvfs_rail_set_voltage(rail, mv);
1366 pr_err("tegra_dvfs: failed to disable %s at %d\n",
1367 rail->reg_id, rail->millivolts);
1370 rail->disabled = true;
1373 /* must be called with dvfs lock held */
1374 static void __tegra_dvfs_rail_enable(struct dvfs_rail *rail)
1376 rail->disabled = false;
1377 dvfs_rail_update(rail);
1380 void tegra_dvfs_rail_enable(struct dvfs_rail *rail)
1385 mutex_lock(&rail_disable_lock);
1387 if (rail->disabled) {
1388 mutex_lock(&dvfs_lock);
1389 __tegra_dvfs_rail_enable(rail);
1390 mutex_unlock(&dvfs_lock);
1392 tegra_dvfs_rail_post_enable(rail);
1394 mutex_unlock(&rail_disable_lock);
1397 void tegra_dvfs_rail_disable(struct dvfs_rail *rail)
1402 mutex_lock(&rail_disable_lock);
1406 /* rail disable will set it to nominal voltage underneath clock
1407 framework - need to re-configure clock rates that are not safe
1408 at nominal (yes, unsafe at nominal is ugly, but possible). Rate
1409 change must be done outside of dvfs lock. */
1410 if (tegra_dvfs_rail_disable_prepare(rail)) {
1411 pr_info("dvfs: failed to prepare regulator %s to disable\n",
1416 mutex_lock(&dvfs_lock);
1417 __tegra_dvfs_rail_disable(rail);
1418 mutex_unlock(&dvfs_lock);
1420 mutex_unlock(&rail_disable_lock);
1423 int tegra_dvfs_rail_disable_by_name(const char *reg_id)
1425 struct dvfs_rail *rail = tegra_dvfs_get_rail_by_name(reg_id);
1429 tegra_dvfs_rail_disable(rail);
1433 struct dvfs_rail *tegra_dvfs_get_rail_by_name(const char *reg_id)
1435 struct dvfs_rail *rail;
1437 mutex_lock(&dvfs_lock);
1438 list_for_each_entry(rail, &dvfs_rail_list, node) {
1439 if (!strcmp(reg_id, rail->reg_id)) {
1440 mutex_unlock(&dvfs_lock);
1444 mutex_unlock(&dvfs_lock);
1448 int tegra_dvfs_rail_power_up(struct dvfs_rail *rail)
1452 if (!rail || !rail->in_band_pm)
1455 mutex_lock(&dvfs_lock);
1457 ret = regulator_enable(rail->reg);
1458 if (!ret && !timekeeping_suspended)
1459 tegra_dvfs_rail_on(rail, ktime_get());
1461 mutex_unlock(&dvfs_lock);
1465 int tegra_dvfs_rail_power_down(struct dvfs_rail *rail)
1469 if (!rail || !rail->in_band_pm)
1472 mutex_lock(&dvfs_lock);
1474 ret = regulator_disable(rail->reg);
1475 if (!ret && !timekeeping_suspended)
1476 tegra_dvfs_rail_off(rail, ktime_get());
1478 mutex_unlock(&dvfs_lock);
1482 bool tegra_dvfs_is_rail_up(struct dvfs_rail *rail)
1489 if (!rail->in_band_pm)
1492 mutex_lock(&dvfs_lock);
1494 ret = regulator_is_enabled(rail->reg) > 0;
1495 mutex_unlock(&dvfs_lock);
1499 int tegra_dvfs_rail_set_mode(struct dvfs_rail *rail, unsigned int mode)
1502 unsigned int cur_mode;
1504 if (!rail || !rail->reg)
1507 if (regulator_can_set_mode(rail->reg)) {
1508 pr_debug("%s: updating %s mode to %u\n", __func__,
1509 rail->reg_id, mode);
1510 ret = regulator_set_mode(rail->reg, mode);
1512 pr_err("%s: failed to set dvfs regulator %s mode %u\n",
1513 __func__, rail->reg_id, mode);
1518 * Set mode is not supported - check request against current mode
1519 * (if the latter is unknown, assume NORMAL).
1521 cur_mode = regulator_get_mode(rail->reg);
1522 if (IS_ERR_VALUE(cur_mode))
1523 cur_mode = REGULATOR_MODE_NORMAL;
1525 if (WARN_ONCE(cur_mode != mode,
1526 "%s: dvfs regulator %s cannot change mode from %u\n",
1527 __func__, rail->reg_id, cur_mode))
1533 int tegra_dvfs_rail_register_notifier(struct dvfs_rail *rail,
1534 struct notifier_block *nb)
1536 if (!rail || !rail->reg)
1539 return regulator_register_notifier(rail->reg, nb);
1542 int tegra_dvfs_rail_unregister_notifier(struct dvfs_rail *rail,
1543 struct notifier_block *nb)
1545 if (!rail || !rail->reg)
1548 return regulator_unregister_notifier(rail->reg, nb);
1551 bool tegra_dvfs_rail_updating(struct clk *clk)
1553 return (!clk ? false :
1554 (!clk->dvfs ? false :
1555 (!clk->dvfs->dvfs_rail ? false :
1556 (clk->dvfs->dvfs_rail->updating ||
1557 clk->dvfs->dvfs_rail->dfll_mode_updating))));
1561 int __init of_tegra_dvfs_init(const struct of_device_id *matches)
1564 struct device_node *np;
1566 for_each_matching_node(np, matches) {
1567 const struct of_device_id *match = of_match_node(matches, np);
1568 of_tegra_dvfs_init_cb_t dvfs_init_cb = match->data;
1569 ret = dvfs_init_cb(np);
1571 pr_err("dt: Failed to read %s tables from DT\n",
1579 int tegra_dvfs_dfll_mode_set(struct dvfs *d, unsigned long rate)
1581 mutex_lock(&dvfs_lock);
1582 if (!d->dvfs_rail->dfll_mode) {
1583 d->dvfs_rail->dfll_mode = true;
1584 __tegra_dvfs_set_rate(d, rate);
1587 * Report error, but continue: DFLL is functional, anyway, and
1588 * no error with proper regulator driver update
1590 if (regulator_set_vsel_volatile(d->dvfs_rail->reg, true))
1591 WARN_ONCE(1, "%s: failed to set vsel volatile\n",
1594 mutex_unlock(&dvfs_lock);
1598 int tegra_dvfs_dfll_mode_clear(struct dvfs *d, unsigned long rate)
1602 mutex_lock(&dvfs_lock);
1603 if (d->dvfs_rail->dfll_mode) {
1604 d->dvfs_rail->dfll_mode = false;
1605 regulator_set_vsel_volatile(d->dvfs_rail->reg, false);
1607 /* avoid false detection of matching target (voltage in dfll
1608 mode is fluctuating, and recorded level is just estimate) */
1609 d->dvfs_rail->millivolts--;
1610 if (d->dvfs_rail->disabled) {
1611 d->dvfs_rail->disabled = false;
1612 __tegra_dvfs_rail_disable(d->dvfs_rail);
1614 ret = __tegra_dvfs_set_rate(d, rate);
1616 mutex_unlock(&dvfs_lock);
1620 struct tegra_cooling_device *tegra_dvfs_get_cpu_vmax_cdev(void)
1623 return tegra_cpu_rail->vmax_cdev;
1627 struct tegra_cooling_device *tegra_dvfs_get_cpu_vmin_cdev(void)
1630 return tegra_cpu_rail->vmin_cdev;
1634 struct tegra_cooling_device *tegra_dvfs_get_core_vmax_cdev(void)
1636 if (tegra_core_rail)
1637 return tegra_core_rail->vmax_cdev;
1641 struct tegra_cooling_device *tegra_dvfs_get_core_vmin_cdev(void)
1643 if (tegra_core_rail)
1644 return tegra_core_rail->vmin_cdev;
1648 struct tegra_cooling_device *tegra_dvfs_get_gpu_vmin_cdev(void)
1651 return tegra_gpu_rail->vmin_cdev;
1655 struct tegra_cooling_device *tegra_dvfs_get_gpu_vts_cdev(void)
1658 return tegra_gpu_rail->vts_cdev;
1662 static void make_safe_thermal_dvfs(struct dvfs_rail *rail)
1666 mutex_lock(&dvfs_lock);
1667 list_for_each_entry(d, &rail->dvfs, reg_node) {
1668 if (d->therm_dvfs) {
1669 BUG_ON(!d->peak_millivolts);
1670 d->millivolts = d->peak_millivolts;
1671 d->therm_dvfs = false;
1674 mutex_unlock(&dvfs_lock);
1677 #ifdef CONFIG_THERMAL
1678 /* Cooling device limits minimum rail voltage at cold temperature in pll mode */
1679 static int tegra_dvfs_rail_get_vmin_cdev_max_state(
1680 struct thermal_cooling_device *cdev, unsigned long *max_state)
1682 struct dvfs_rail *rail = (struct dvfs_rail *)cdev->devdata;
1683 *max_state = rail->vmin_cdev->trip_temperatures_num;
1687 static int tegra_dvfs_rail_get_vmin_cdev_cur_state(
1688 struct thermal_cooling_device *cdev, unsigned long *cur_state)
1690 struct dvfs_rail *rail = (struct dvfs_rail *)cdev->devdata;
1691 *cur_state = rail->therm_floor_idx;
1695 static int tegra_dvfs_rail_set_vmin_cdev_state(
1696 struct thermal_cooling_device *cdev, unsigned long cur_state)
1698 struct dvfs_rail *rail = (struct dvfs_rail *)cdev->devdata;
1700 mutex_lock(&dvfs_lock);
1701 if (rail->therm_floor_idx != cur_state) {
1702 rail->therm_floor_idx = cur_state;
1703 dvfs_rail_update(rail);
1705 mutex_unlock(&dvfs_lock);
1709 static struct thermal_cooling_device_ops tegra_dvfs_vmin_cooling_ops = {
1710 .get_max_state = tegra_dvfs_rail_get_vmin_cdev_max_state,
1711 .get_cur_state = tegra_dvfs_rail_get_vmin_cdev_cur_state,
1712 .set_cur_state = tegra_dvfs_rail_set_vmin_cdev_state,
1715 static void tegra_dvfs_rail_register_vmin_cdev(struct dvfs_rail *rail)
1717 if (!rail->vmin_cdev)
1720 /* just report error - initialized for cold temperature, anyway */
1721 if (IS_ERR_OR_NULL(thermal_cooling_device_register(
1722 rail->vmin_cdev->cdev_type, (void *)rail,
1723 &tegra_dvfs_vmin_cooling_ops)))
1724 pr_err("tegra cooling device %s failed to register\n",
1725 rail->vmin_cdev->cdev_type);
1729 * Cooling device limits frequencies of the clocks in pll mode based on rail
1730 * vmax thermal profile. Supported for core rail only, and applied only to
1731 * shared buses selected by platform specific code.
1733 static int tegra_dvfs_rail_get_vmax_cdev_max_state(
1734 struct thermal_cooling_device *cdev, unsigned long *max_state)
1736 struct dvfs_rail *rail = (struct dvfs_rail *)cdev->devdata;
1737 *max_state = rail->vmax_cdev->trip_temperatures_num;
1741 static int tegra_dvfs_rail_get_vmax_cdev_cur_state(
1742 struct thermal_cooling_device *cdev, unsigned long *cur_state)
1744 struct dvfs_rail *rail = (struct dvfs_rail *)cdev->devdata;
1745 *cur_state = rail->therm_cap_idx;
1749 static int tegra_dvfs_rail_set_vmax_cdev_state(
1750 struct thermal_cooling_device *cdev, unsigned long cur_state)
1752 struct dvfs_rail *rail = (struct dvfs_rail *)cdev->devdata;
1753 int cur_cap = cur_state ? rail->therm_mv_caps[cur_state - 1] : 0;
1755 return tegra_dvfs_therm_vmax_core_cap_apply(&rail->therm_cap_idx,
1756 cur_state, cur_cap);
1759 static struct thermal_cooling_device_ops tegra_dvfs_vmax_cooling_ops = {
1760 .get_max_state = tegra_dvfs_rail_get_vmax_cdev_max_state,
1761 .get_cur_state = tegra_dvfs_rail_get_vmax_cdev_cur_state,
1762 .set_cur_state = tegra_dvfs_rail_set_vmax_cdev_state,
1765 void tegra_dvfs_rail_register_vmax_cdev(struct dvfs_rail *rail)
1767 struct thermal_cooling_device *dev;
1769 if (!rail || !rail->vmax_cdev || (rail != tegra_core_rail))
1772 dev = thermal_cooling_device_register(rail->vmax_cdev->cdev_type,
1773 (void *)rail, &tegra_dvfs_vmax_cooling_ops);
1775 if (IS_ERR_OR_NULL(dev) || list_empty(&dev->thermal_instances)) {
1776 /* report error & set the most agressive caps */
1777 int cur_state = rail->vmax_cdev->trip_temperatures_num;
1778 int cur_cap = rail->therm_mv_caps[cur_state - 1];
1779 tegra_dvfs_therm_vmax_core_cap_apply(&rail->therm_cap_idx,
1780 cur_state, cur_cap);
1781 pr_err("tegra cooling device %s failed to register\n",
1782 rail->vmax_cdev->cdev_type);
1786 /* Cooling device to scale voltage with temperature in pll mode */
1787 static int tegra_dvfs_rail_get_vts_cdev_max_state(
1788 struct thermal_cooling_device *cdev, unsigned long *max_state)
1790 struct dvfs_rail *rail = (struct dvfs_rail *)cdev->devdata;
1791 *max_state = rail->vts_cdev->trip_temperatures_num;
1795 static int tegra_dvfs_rail_get_vts_cdev_cur_state(
1796 struct thermal_cooling_device *cdev, unsigned long *cur_state)
1798 struct dvfs_rail *rail = (struct dvfs_rail *)cdev->devdata;
1799 *cur_state = rail->therm_scale_idx;
1803 static int tegra_dvfs_rail_set_vts_cdev_state(
1804 struct thermal_cooling_device *cdev, unsigned long cur_state)
1806 struct dvfs_rail *rail = (struct dvfs_rail *)cdev->devdata;
1809 mutex_lock(&dvfs_lock);
1810 if (rail->therm_scale_idx != cur_state) {
1811 rail->therm_scale_idx = cur_state;
1812 list_for_each_entry(d, &rail->dvfs, reg_node) {
1814 __tegra_dvfs_set_rate(d, d->cur_rate);
1817 mutex_unlock(&dvfs_lock);
1821 static struct thermal_cooling_device_ops tegra_dvfs_vts_cooling_ops = {
1822 .get_max_state = tegra_dvfs_rail_get_vts_cdev_max_state,
1823 .get_cur_state = tegra_dvfs_rail_get_vts_cdev_cur_state,
1824 .set_cur_state = tegra_dvfs_rail_set_vts_cdev_state,
1827 static void tegra_dvfs_rail_register_vts_cdev(struct dvfs_rail *rail)
1829 struct thermal_cooling_device *dev;
1831 if (!rail->vts_cdev)
1834 dev = thermal_cooling_device_register(rail->vts_cdev->cdev_type,
1835 (void *)rail, &tegra_dvfs_vts_cooling_ops);
1836 /* report error & set max limits across thermal ranges as safe dvfs */
1837 if (IS_ERR_OR_NULL(dev) || list_empty(&dev->thermal_instances)) {
1838 pr_err("tegra cooling device %s failed to register\n",
1839 rail->vts_cdev->cdev_type);
1840 make_safe_thermal_dvfs(rail);
1845 #define tegra_dvfs_rail_register_vmin_cdev(rail)
1846 void tegra_dvfs_rail_register_vmax_cdev(struct dvfs_rail *rail)
1848 static inline void tegra_dvfs_rail_register_vts_cdev(struct dvfs_rail *rail)
1850 make_safe_thermal_dvfs(rail);
1854 #ifdef CONFIG_TEGRA_USE_SIMON
1856 * Validate rail SiMon Vmin offsets. Valid offsets should be negative,
1857 * descending, starting from zero.
1859 void __init tegra_dvfs_rail_init_simon_vmin_offsets(
1860 int *offsets, int offs_num, struct dvfs_rail *rail)
1864 if (!offsets || !offs_num || offsets[0]) {
1865 WARN(1, "%s: invalid initial SiMon offset\n", rail->reg_id);
1869 for (i = 0; i < offs_num - 1; i++) {
1870 if (offsets[i] < offsets[i+1]) {
1871 WARN(1, "%s: SiMon offsets are not ordered\n",
1876 rail->simon_vmin_offsets = offsets;
1877 rail->simon_vmin_offs_num = offs_num;
1882 * Validate rail thermal profile, and get its size. Valid profile:
1883 * - voltage limits are descending with temperature increasing
1884 * - the lowest limit is above rail minimum voltage in pll and
1885 * in dfll mode (if applicable)
1886 * - the highest limit is below rail nominal voltage (required only
1889 static int __init get_thermal_profile_size(
1890 int *trips_table, int *limits_table,
1891 struct dvfs_rail *rail, struct dvfs_dfll_data *d)
1895 for (i = 0; i < MAX_THERMAL_LIMITS - 1; i++) {
1896 if (!limits_table[i+1])
1899 if ((trips_table[i] >= trips_table[i+1]) ||
1900 (limits_table[i] < limits_table[i+1])) {
1901 pr_warn("%s: not ordered profile\n", rail->reg_id);
1906 min_mv = max(rail->min_millivolts, d ? d->min_millivolts : 0);
1907 if (limits_table[i] < min_mv) {
1908 pr_warn("%s: thermal profile below Vmin\n", rail->reg_id);
1915 void __init tegra_dvfs_rail_init_vmax_thermal_profile(
1916 int *therm_trips_table, int *therm_caps_table,
1917 struct dvfs_rail *rail, struct dvfs_dfll_data *d)
1919 int i = get_thermal_profile_size(therm_trips_table,
1920 therm_caps_table, rail, d);
1922 rail->vmax_cdev = NULL;
1923 WARN(1, "%s: invalid Vmax thermal profile\n", rail->reg_id);
1927 /* Install validated thermal caps */
1928 rail->therm_mv_caps = therm_caps_table;
1929 rail->therm_mv_caps_num = i;
1931 /* Setup trip-points if applicable */
1932 if (rail->vmax_cdev) {
1933 rail->vmax_cdev->trip_temperatures_num = i;
1934 rail->vmax_cdev->trip_temperatures = therm_trips_table;
1938 void __init tegra_dvfs_rail_init_vmin_thermal_profile(
1939 int *therm_trips_table, int *therm_floors_table,
1940 struct dvfs_rail *rail, struct dvfs_dfll_data *d)
1942 int i = get_thermal_profile_size(therm_trips_table,
1943 therm_floors_table, rail, d);
1945 if (i <= 0 || therm_floors_table[0] > rail->nominal_millivolts) {
1946 rail->vmin_cdev = NULL;
1947 WARN(1, "%s: invalid Vmin thermal profile\n", rail->reg_id);
1951 /* Install validated thermal floors */
1952 rail->therm_mv_floors = therm_floors_table;
1953 rail->therm_mv_floors_num = i;
1955 /* Setup trip-points if applicable */
1956 if (rail->vmin_cdev) {
1957 rail->vmin_cdev->trip_temperatures_num = i;
1958 rail->vmin_cdev->trip_temperatures = therm_trips_table;
1963 * Validate thermal dvfs settings:
1964 * - trip-points are montonically increasing
1965 * - voltages in any temperature range are montonically increasing with
1966 * frequency (can go up/down across ranges at iso frequency)
1967 * - voltage for any frequency/thermal range combination must be within
1968 * rail minimum/maximum limits
1970 int __init tegra_dvfs_rail_init_thermal_dvfs_trips(
1971 int *therm_trips_table, struct dvfs_rail *rail)
1975 if (!rail->vts_cdev) {
1976 WARN(1, "%s: missing thermal dvfs cooling device\n",
1981 for (i = 0; i < MAX_THERMAL_LIMITS - 1; i++) {
1982 if (therm_trips_table[i] >= therm_trips_table[i+1])
1986 rail->vts_cdev->trip_temperatures_num = i + 1;
1987 rail->vts_cdev->trip_temperatures = therm_trips_table;
1991 int __init tegra_dvfs_init_thermal_dvfs_voltages(int *therm_voltages,
1992 int *peak_voltages, int freqs_num, int ranges_num, struct dvfs *d)
1995 int freq_idx, therm_idx;
1997 for (therm_idx = 0; therm_idx < ranges_num; therm_idx++) {
1998 millivolts = therm_voltages + therm_idx * MAX_DVFS_FREQS;
1999 for (freq_idx = 0; freq_idx < freqs_num; freq_idx++) {
2000 int mv = millivolts[freq_idx];
2001 if ((mv > d->dvfs_rail->max_millivolts) ||
2002 (mv < d->dvfs_rail->min_millivolts) ||
2003 (freq_idx && (mv < millivolts[freq_idx - 1]))) {
2004 WARN(1, "%s: invalid thermal dvfs entry %d(%d, %d)\n",
2005 d->clk_name, mv, freq_idx, therm_idx);
2008 if (mv > peak_voltages[freq_idx])
2009 peak_voltages[freq_idx] = mv;
2013 d->millivolts = therm_voltages;
2014 d->peak_millivolts = peak_voltages;
2015 d->therm_dvfs = true;
2019 /* Directly set cold temperature limit in dfll mode */
2020 int tegra_dvfs_rail_dfll_mode_set_cold(struct dvfs_rail *rail,
2021 struct clk *dfll_clk)
2025 /* No thermal floors - nothing to do */
2026 if (!rail || !rail->therm_mv_floors)
2030 * Compare last set Vmin with requirement based on current temperature,
2031 * and set cold limit at regulator only Vmin is below requirement.
2033 mutex_lock(&dvfs_lock);
2034 if (rail->dfll_mode) {
2036 cmp = tegra_cl_dvfs_vmin_cmp_needed(
2037 tegra_dfll_get_cl_dvfs_data(dfll_clk), &mv);
2039 ret = dvfs_rail_set_voltage_reg(rail, mv);
2041 mutex_unlock(&dvfs_lock);
2046 /* Get current thermal floor */
2047 int tegra_dvfs_rail_get_thermal_floor(struct dvfs_rail *rail)
2049 if (rail && rail->therm_mv_floors &&
2050 (rail->therm_floor_idx < rail->therm_mv_floors_num)) {
2051 int i = rail->therm_floor_idx;
2052 if (rail->dfll_mode) {
2053 BUG_ON(!rail->therm_mv_dfll_floors);
2054 return rail->therm_mv_dfll_floors[i];
2056 return rail->therm_mv_floors[i];
2062 * Iterate through all the dvfs regulators, finding the regulator exported
2063 * by the regulator api for each one. Must be called in late init, after
2064 * all the regulator api's regulators are initialized.
2067 #ifdef CONFIG_TEGRA_DVFS_RAIL_CONNECT_ALL
2069 * Enable voltage scaling only if all the rails connect successfully
2071 int __init tegra_dvfs_rail_connect_regulators(void)
2073 bool connected = true;
2074 struct dvfs_rail *rail;
2076 mutex_lock(&dvfs_lock);
2078 list_for_each_entry(rail, &dvfs_rail_list, node)
2079 if (dvfs_rail_connect_to_regulator(rail))
2082 list_for_each_entry(rail, &dvfs_rail_list, node) {
2084 dvfs_rail_update(rail);
2085 if (!rail->disabled)
2087 /* Don't rely on boot level - force disabled voltage */
2088 rail->disabled = false;
2090 __tegra_dvfs_rail_disable(rail);
2092 mutex_unlock(&dvfs_lock);
2094 if (!connected && tegra_platform_is_silicon()) {
2095 pr_warn("tegra_dvfs: DVFS regulators connection failed\n"
2096 " !!!! voltage scaling is disabled !!!!\n");
2103 int __init tegra_dvfs_rail_connect_regulators(void)
2105 struct dvfs_rail *rail;
2107 mutex_lock(&dvfs_lock);
2109 list_for_each_entry(rail, &dvfs_rail_list, node) {
2110 if (!dvfs_rail_connect_to_regulator(rail)) {
2111 dvfs_rail_update(rail);
2112 if (!rail->disabled)
2114 /* Don't rely on boot level - force disabled voltage */
2115 rail->disabled = false;
2117 __tegra_dvfs_rail_disable(rail);
2120 mutex_unlock(&dvfs_lock);
2126 int __init tegra_dvfs_rail_register_notifiers(void)
2128 struct dvfs_rail *rail;
2130 register_pm_notifier(&tegra_dvfs_suspend_nb);
2131 register_pm_notifier(&tegra_dvfs_resume_nb);
2132 register_reboot_notifier(&tegra_dvfs_reboot_nb);
2134 list_for_each_entry(rail, &dvfs_rail_list, node) {
2135 tegra_dvfs_rail_register_vmin_cdev(rail);
2136 tegra_dvfs_rail_register_vts_cdev(rail);
2142 static int rail_stats_save_to_buf(char *buf, int len)
2145 struct dvfs_rail *rail;
2147 char *end = buf + len;
2149 str += scnprintf(str, end - str, "%-12s %-10s\n", "millivolts", "time");
2151 mutex_lock(&dvfs_lock);
2153 list_for_each_entry(rail, &dvfs_rail_list, node) {
2154 str += scnprintf(str, end - str, "%s (bin: %d.%dmV)\n",
2156 rail->stats.bin_uV / 1000,
2157 (rail->stats.bin_uV / 10) % 100);
2159 dvfs_rail_stats_update(rail, -1, ktime_get());
2161 str += scnprintf(str, end - str, "%-12d %-10llu\n", 0,
2162 cputime64_to_clock_t(msecs_to_jiffies(
2163 ktime_to_ms(rail->stats.time_at_mv[0]))));
2165 for (i = 1; i <= DVFS_RAIL_STATS_TOP_BIN; i++) {
2166 ktime_t ktime_zero = ktime_set(0, 0);
2167 if (ktime_equal(rail->stats.time_at_mv[i], ktime_zero))
2169 str += scnprintf(str, end - str, "%-12d %-10llu\n",
2170 rail->min_millivolts +
2171 (i - 1) * rail->stats.bin_uV / 1000,
2172 cputime64_to_clock_t(msecs_to_jiffies(
2173 ktime_to_ms(rail->stats.time_at_mv[i])))
2177 mutex_unlock(&dvfs_lock);
2181 #ifdef CONFIG_DEBUG_FS
2182 static int dvfs_tree_sort_cmp(void *p, struct list_head *a, struct list_head *b)
2184 struct dvfs *da = list_entry(a, struct dvfs, reg_node);
2185 struct dvfs *db = list_entry(b, struct dvfs, reg_node);
2188 ret = strcmp(da->dvfs_rail->reg_id, db->dvfs_rail->reg_id);
2192 if (da->cur_millivolts < db->cur_millivolts)
2194 if (da->cur_millivolts > db->cur_millivolts)
2197 return strcmp(da->clk_name, db->clk_name);
2200 /* To emulate and show rail relations with 0 mV on dependent rail-to */
2201 static struct dvfs_rail show_to;
2202 static struct dvfs_relationship show_rel;
2204 static int dvfs_tree_show(struct seq_file *s, void *data)
2207 struct dvfs_rail *rail;
2208 struct dvfs_relationship *rel;
2210 seq_printf(s, " clock rate mV\n");
2211 seq_printf(s, "--------------------------------\n");
2213 mutex_lock(&dvfs_lock);
2215 list_for_each_entry(rail, &dvfs_rail_list, node) {
2216 int thermal_mv_floor = 0;
2218 seq_printf(s, "%s %d mV%s:\n", rail->reg_id,
2219 rail->stats.off ? 0 : rail->millivolts,
2220 rail->dfll_mode ? " dfll mode" :
2221 rail->disabled ? " disabled" : "");
2222 list_for_each_entry(rel, &rail->relationships_from, from_node) {
2224 show_rel.to = &show_to;
2226 show_to.millivolts = show_to.new_millivolts = 0;
2227 seq_printf(s, " %-10s %-7d mV %-4d mV .. %-4d mV\n",
2228 rel->from->reg_id, rel->from->millivolts,
2229 dvfs_solve_relationship(&show_rel),
2230 dvfs_solve_relationship(rel));
2232 seq_printf(s, " nominal %-7d mV\n",
2233 rail->nominal_millivolts);
2234 seq_printf(s, " offset %-7d mV\n", rail->dbg_mv_offs);
2236 thermal_mv_floor = tegra_dvfs_rail_get_thermal_floor(rail);
2237 seq_printf(s, " thermal %-7d mV\n", thermal_mv_floor);
2239 if (rail == tegra_core_rail) {
2240 seq_printf(s, " override %-7d mV [%-4d...%-4d]",
2241 rail->override_millivolts,
2242 dvfs_rail_get_override_floor(rail),
2243 rail->nominal_millivolts);
2244 if (rail->override_unresolved)
2245 seq_printf(s, " unresolved %d",
2246 rail->override_unresolved);
2250 list_sort(NULL, &rail->dvfs, dvfs_tree_sort_cmp);
2252 list_for_each_entry(d, &rail->dvfs, reg_node) {
2253 seq_printf(s, " %-10s %-10lu %-4d mV\n", d->clk_name,
2254 d->cur_rate, d->cur_millivolts);
2258 mutex_unlock(&dvfs_lock);
2263 static int dvfs_tree_open(struct inode *inode, struct file *file)
2265 return single_open(file, dvfs_tree_show, inode->i_private);
2268 static const struct file_operations dvfs_tree_fops = {
2269 .open = dvfs_tree_open,
2271 .llseek = seq_lseek,
2272 .release = single_release,
2275 static int rail_stats_show(struct seq_file *s, void *data)
2277 char *buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
2283 size = rail_stats_save_to_buf(buf, PAGE_SIZE);
2284 seq_write(s, buf, size);
2289 static int rail_stats_open(struct inode *inode, struct file *file)
2291 return single_open(file, rail_stats_show, inode->i_private);
2294 static const struct file_operations rail_stats_fops = {
2295 .open = rail_stats_open,
2297 .llseek = seq_lseek,
2298 .release = single_release,
2301 static int rail_offs_set(struct dvfs_rail *rail, int offs)
2304 mutex_lock(&dvfs_lock);
2305 rail->dbg_mv_offs = offs;
2306 dvfs_rail_update(rail);
2307 mutex_unlock(&dvfs_lock);
2313 static int cpu_offs_get(void *data, u64 *val)
2315 if (tegra_cpu_rail) {
2316 *val = (u64)tegra_cpu_rail->dbg_mv_offs;
2322 static int cpu_offs_set(void *data, u64 val)
2324 return rail_offs_set(tegra_cpu_rail, (int)val);
2326 DEFINE_SIMPLE_ATTRIBUTE(cpu_offs_fops, cpu_offs_get, cpu_offs_set, "%lld\n");
2328 static int gpu_offs_get(void *data, u64 *val)
2330 if (tegra_gpu_rail) {
2331 *val = (u64)tegra_gpu_rail->dbg_mv_offs;
2337 static int gpu_offs_set(void *data, u64 val)
2339 return rail_offs_set(tegra_gpu_rail, (int)val);
2341 DEFINE_SIMPLE_ATTRIBUTE(gpu_offs_fops, gpu_offs_get, gpu_offs_set, "%lld\n");
2343 static int core_offs_get(void *data, u64 *val)
2345 if (tegra_core_rail) {
2346 *val = (u64)tegra_core_rail->dbg_mv_offs;
2352 static int core_offs_set(void *data, u64 val)
2354 return rail_offs_set(tegra_core_rail, (int)val);
2356 DEFINE_SIMPLE_ATTRIBUTE(core_offs_fops, core_offs_get, core_offs_set, "%lld\n");
2358 static int core_override_get(void *data, u64 *val)
2360 if (tegra_core_rail) {
2361 *val = (u64)tegra_core_rail->override_millivolts;
2367 static int core_override_set(void *data, u64 val)
2369 return dvfs_override_core_voltage((int)val);
2371 DEFINE_SIMPLE_ATTRIBUTE(core_override_fops,
2372 core_override_get, core_override_set, "%llu\n");
2374 static int rail_mv_get(void *data, u64 *val)
2376 struct dvfs_rail *rail = data;
2378 *val = rail->stats.off ? 0 : rail->millivolts;
2384 DEFINE_SIMPLE_ATTRIBUTE(rail_mv_fops, rail_mv_get, NULL, "%llu\n");
2386 static int gpu_dvfs_t_show(struct seq_file *s, void *data)
2392 struct dvfs_rail *rail = tegra_gpu_rail;
2393 int max_mv[MAX_DVFS_FREQS] = {};
2395 if (!tegra_gpu_rail) {
2396 seq_printf(s, "Only supported for T124 or higher\n");
2400 mutex_lock(&dvfs_lock);
2402 d = list_first_entry(&rail->dvfs, struct dvfs, reg_node);
2403 if (rail->vts_cdev && d->therm_dvfs) {
2404 num_ranges = rail->vts_cdev->trip_temperatures_num + 1;
2405 trips = rail->vts_cdev->trip_temperatures;
2408 seq_printf(s, "%-11s", "T(C)\\F(kHz)");
2409 for (i = 0; i < d->num_freqs; i++) {
2410 unsigned int f = d->freqs[i]/1000;
2411 seq_printf(s, " %7u", f);
2413 seq_printf(s, "\n");
2415 for (j = 0; j < num_ranges; j++) {
2416 seq_printf(s, "%s", j == rail->therm_scale_idx ? ">" : " ");
2418 if (!trips || (num_ranges == 1))
2419 seq_printf(s, "%4s..%-4s", "", "");
2421 seq_printf(s, "%4s..%-4d", "", trips[j]);
2422 else if (j == num_ranges - 1)
2423 seq_printf(s, "%4d..%-4s", trips[j], "");
2425 seq_printf(s, "%4d..%-4d", trips[j-1], trips[j]);
2427 for (i = 0; i < d->num_freqs; i++) {
2428 int mv = *(d->millivolts + j * MAX_DVFS_FREQS + i);
2429 seq_printf(s, " %7d", mv);
2430 max_mv[i] = max(max_mv[i], mv);
2432 seq_printf(s, " mV\n");
2435 seq_printf(s, "%3s%-8s\n", "", "------");
2436 seq_printf(s, "%3s%-8s", "", "max(T)");
2437 for (i = 0; i < d->num_freqs; i++)
2438 seq_printf(s, " %7d", max_mv[i]);
2439 seq_printf(s, " mV\n");
2441 mutex_unlock(&dvfs_lock);
2446 static int gpu_dvfs_t_open(struct inode *inode, struct file *file)
2448 return single_open(file, gpu_dvfs_t_show, NULL);
2451 static const struct file_operations gpu_dvfs_t_fops = {
2452 .open = gpu_dvfs_t_open,
2454 .llseek = seq_lseek,
2455 .release = single_release,
2458 static int dvfs_table_show(struct seq_file *s, void *data)
2462 struct dvfs_rail *rail;
2463 const int *v_pll, *last_v_pll = NULL;
2464 const int *v_dfll, *last_v_dfll = NULL;
2466 seq_printf(s, "DVFS tables: units mV/MHz\n");
2468 mutex_lock(&dvfs_lock);
2470 list_for_each_entry(rail, &dvfs_rail_list, node) {
2471 if (rail->version) {
2472 seq_printf(s, "%-9s table version: ", rail->reg_id);
2473 seq_printf(s, "%-16s\n", rail->version);
2477 list_for_each_entry(rail, &dvfs_rail_list, node) {
2478 list_for_each_entry(d, &rail->dvfs, reg_node) {
2479 bool mv_done = false;
2480 v_pll = tegra_dvfs_get_millivolts_pll(d);
2481 v_dfll = d->dfll_millivolts;
2483 if (v_pll && (last_v_pll != v_pll)) {
2485 seq_printf(s, "\n");
2489 seq_printf(s, "%-16s", rail->reg_id);
2490 for (i = 0; i < d->num_freqs; i++)
2491 seq_printf(s, "%7d", v_pll[i]);
2492 seq_printf(s, "\n");
2495 if (v_dfll && (last_v_dfll != v_dfll)) {
2497 seq_printf(s, "\n");
2500 last_v_dfll = v_dfll;
2501 seq_printf(s, "%-8s (dfll) ", rail->reg_id);
2502 for (i = 0; i < d->num_freqs; i++)
2503 seq_printf(s, "%7d", v_dfll[i]);
2504 seq_printf(s, "\n");
2507 seq_printf(s, "%-16s", d->clk_name);
2508 for (i = 0; i < d->num_freqs; i++) {
2509 unsigned long *freqs = dvfs_get_freqs(d);
2510 unsigned int f = freqs[i]/100000;
2511 seq_printf(s, " %4u.%u", f/10, f%10);
2513 seq_printf(s, "\n");
2517 mutex_unlock(&dvfs_lock);
2522 static int dvfs_table_open(struct inode *inode, struct file *file)
2524 return single_open(file, dvfs_table_show, inode->i_private);
2527 static const struct file_operations dvfs_table_fops = {
2528 .open = dvfs_table_open,
2530 .llseek = seq_lseek,
2531 .release = single_release,
2534 int __init dvfs_debugfs_init(struct dentry *clk_debugfs_root)
2538 d = debugfs_create_file("dvfs", S_IRUGO, clk_debugfs_root, NULL,
2543 d = debugfs_create_file("rails", S_IRUGO, clk_debugfs_root, NULL,
2548 d = debugfs_create_file("vdd_cpu_offs", S_IRUGO | S_IWUSR,
2549 clk_debugfs_root, NULL, &cpu_offs_fops);
2553 d = debugfs_create_file("vdd_gpu_offs", S_IRUGO | S_IWUSR,
2554 clk_debugfs_root, NULL, &gpu_offs_fops);
2558 d = debugfs_create_file("vdd_core_offs", S_IRUGO | S_IWUSR,
2559 clk_debugfs_root, NULL, &core_offs_fops);
2563 d = debugfs_create_file("vdd_core_override", S_IRUGO | S_IWUSR,
2564 clk_debugfs_root, NULL, &core_override_fops);
2568 d = debugfs_create_file("vdd_cpu_mv", S_IRUGO, clk_debugfs_root,
2569 tegra_cpu_rail, &rail_mv_fops);
2573 d = debugfs_create_file("vdd_gpu_mv", S_IRUGO, clk_debugfs_root,
2574 tegra_gpu_rail, &rail_mv_fops);
2578 d = debugfs_create_file("vdd_core_mv", S_IRUGO, clk_debugfs_root,
2579 tegra_core_rail, &rail_mv_fops);
2583 d = debugfs_create_file("gpu_dvfs_t", S_IRUGO | S_IWUSR,
2584 clk_debugfs_root, NULL, &gpu_dvfs_t_fops);
2588 d = debugfs_create_file("dvfs_table", S_IRUGO, clk_debugfs_root, NULL,
2599 static ssize_t tegra_rail_stats_show(struct kobject *kobj,
2600 struct kobj_attribute *attr,
2603 return rail_stats_save_to_buf(buf, PAGE_SIZE);
2606 static struct kobj_attribute rail_stats_attr =
2607 __ATTR_RO(tegra_rail_stats);
2609 static int __init tegra_dvfs_sysfs_stats_init(void)
2612 error = sysfs_create_file(power_kobj, &rail_stats_attr.attr);
2615 late_initcall(tegra_dvfs_sysfs_stats_init);