2 * linux/drivers/cpufreq/cpufreq.c
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
7 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
8 * Added handling for CPU hotplug
9 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
10 * Fix handling for CPU hotplug -- affected CPUs
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/init.h>
23 #include <linux/notifier.h>
24 #include <linux/cpufreq.h>
25 #include <linux/delay.h>
26 #include <linux/interrupt.h>
27 #include <linux/spinlock.h>
28 #include <linux/device.h>
29 #include <linux/slab.h>
30 #include <linux/cpu.h>
31 #include <linux/completion.h>
32 #include <linux/mutex.h>
33 #include <linux/syscore_ops.h>
35 #include <trace/events/power.h>
38 * The "cpufreq driver" - the arch- or hardware-dependent low
39 * level driver of CPUFreq support, and its spinlock. This lock
40 * also protects the cpufreq_cpu_data array.
42 static struct cpufreq_driver *cpufreq_driver;
43 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
44 #ifdef CONFIG_HOTPLUG_CPU
45 /* This one keeps track of the previously set governor of a removed CPU */
46 static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
48 static DEFINE_RWLOCK(cpufreq_driver_lock);
51 * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
52 * all cpufreq/hotplug/workqueue/etc related lock issues.
54 * The rules for this semaphore:
55 * - Any routine that wants to read from the policy structure will
56 * do a down_read on this semaphore.
57 * - Any routine that will write to the policy structure and/or may take away
58 * the policy altogether (eg. CPU hotplug), will hold this lock in write
59 * mode before doing so.
62 * - Governor routines that can be called in cpufreq hotplug path should not
63 * take this sem as top level hotplug notifier handler takes this.
64 * - Lock should not be held across
65 * __cpufreq_governor(data, CPUFREQ_GOV_STOP);
67 static DEFINE_PER_CPU(int, cpufreq_policy_cpu);
68 static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
70 #define lock_policy_rwsem(mode, cpu) \
71 static int lock_policy_rwsem_##mode(int cpu) \
73 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
74 BUG_ON(policy_cpu == -1); \
75 down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
80 lock_policy_rwsem(read, cpu);
81 lock_policy_rwsem(write, cpu);
83 #define unlock_policy_rwsem(mode, cpu) \
84 static void unlock_policy_rwsem_##mode(int cpu) \
86 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
87 BUG_ON(policy_cpu == -1); \
88 up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
91 unlock_policy_rwsem(read, cpu);
92 unlock_policy_rwsem(write, cpu);
94 /* internal prototypes */
95 static int __cpufreq_governor(struct cpufreq_policy *policy,
97 static unsigned int __cpufreq_get(unsigned int cpu);
98 static void handle_update(struct work_struct *work);
101 * Two notifier lists: the "policy" list is involved in the
102 * validation process for a new CPU frequency policy; the
103 * "transition" list for kernel code that needs to handle
104 * changes to devices when the CPU clock speed changes.
105 * The mutex locks both lists.
107 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
108 static struct srcu_notifier_head cpufreq_transition_notifier_list;
110 static bool init_cpufreq_transition_notifier_list_called;
111 static int __init init_cpufreq_transition_notifier_list(void)
113 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
114 init_cpufreq_transition_notifier_list_called = true;
117 pure_initcall(init_cpufreq_transition_notifier_list);
119 static int off __read_mostly;
120 static int cpufreq_disabled(void)
124 void disable_cpufreq(void)
128 static LIST_HEAD(cpufreq_governor_list);
129 static DEFINE_MUTEX(cpufreq_governor_mutex);
131 bool have_governor_per_policy(void)
133 return cpufreq_driver->have_governor_per_policy;
136 static struct cpufreq_policy *__cpufreq_cpu_get(unsigned int cpu, bool sysfs)
138 struct cpufreq_policy *data;
141 if (cpu >= nr_cpu_ids)
144 /* get the cpufreq driver */
145 read_lock_irqsave(&cpufreq_driver_lock, flags);
150 if (!try_module_get(cpufreq_driver->owner))
155 data = per_cpu(cpufreq_cpu_data, cpu);
158 goto err_out_put_module;
160 if (!sysfs && !kobject_get(&data->kobj))
161 goto err_out_put_module;
163 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
167 module_put(cpufreq_driver->owner);
169 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
174 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
176 if (cpufreq_disabled())
179 return __cpufreq_cpu_get(cpu, false);
181 EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
183 static struct cpufreq_policy *cpufreq_cpu_get_sysfs(unsigned int cpu)
185 return __cpufreq_cpu_get(cpu, true);
188 static void __cpufreq_cpu_put(struct cpufreq_policy *data, bool sysfs)
191 kobject_put(&data->kobj);
192 module_put(cpufreq_driver->owner);
195 void cpufreq_cpu_put(struct cpufreq_policy *data)
197 if (cpufreq_disabled())
200 __cpufreq_cpu_put(data, false);
202 EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
204 static void cpufreq_cpu_put_sysfs(struct cpufreq_policy *data)
206 __cpufreq_cpu_put(data, true);
209 /*********************************************************************
210 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
211 *********************************************************************/
214 * adjust_jiffies - adjust the system "loops_per_jiffy"
216 * This function alters the system "loops_per_jiffy" for the clock
217 * speed change. Note that loops_per_jiffy cannot be updated on SMP
218 * systems as each CPU might be scaled differently. So, use the arch
219 * per-CPU loops_per_jiffy value wherever possible.
222 static unsigned long l_p_j_ref;
223 static unsigned int l_p_j_ref_freq;
225 static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
227 if (ci->flags & CPUFREQ_CONST_LOOPS)
230 if (!l_p_j_ref_freq) {
231 l_p_j_ref = loops_per_jiffy;
232 l_p_j_ref_freq = ci->old;
233 pr_debug("saving %lu as reference value for loops_per_jiffy; "
234 "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
236 if ((val == CPUFREQ_POSTCHANGE && ci->old != ci->new) ||
237 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
238 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
240 pr_debug("scaling loops_per_jiffy to %lu "
241 "for frequency %u kHz\n", loops_per_jiffy, ci->new);
245 static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
253 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
254 * on frequency transition.
256 * This function calls the transition notifiers and the "adjust_jiffies"
257 * function. It is called twice on all CPU frequency changes that have
260 void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state)
262 struct cpufreq_policy *policy;
265 BUG_ON(irqs_disabled());
267 if (cpufreq_disabled())
270 freqs->flags = cpufreq_driver->flags;
271 pr_debug("notification %u of frequency transition to %u kHz\n",
274 read_lock_irqsave(&cpufreq_driver_lock, flags);
275 policy = per_cpu(cpufreq_cpu_data, freqs->cpu);
276 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
280 case CPUFREQ_PRECHANGE:
281 /* detect if the driver reported a value as "old frequency"
282 * which is not equal to what the cpufreq core thinks is
285 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
286 if ((policy) && (policy->cpu == freqs->cpu) &&
287 (policy->cur) && (policy->cur != freqs->old)) {
288 pr_debug("Warning: CPU frequency is"
289 " %u, cpufreq assumed %u kHz.\n",
290 freqs->old, policy->cur);
291 freqs->old = policy->cur;
294 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
295 CPUFREQ_PRECHANGE, freqs);
296 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
299 case CPUFREQ_POSTCHANGE:
300 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
301 pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new,
302 (unsigned long)freqs->cpu);
303 trace_cpu_frequency(freqs->new, freqs->cpu);
304 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
305 CPUFREQ_POSTCHANGE, freqs);
306 if (likely(policy) && likely(policy->cpu == freqs->cpu))
307 policy->cur = freqs->new;
311 EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
315 /*********************************************************************
317 *********************************************************************/
319 static struct cpufreq_governor *__find_governor(const char *str_governor)
321 struct cpufreq_governor *t;
323 list_for_each_entry(t, &cpufreq_governor_list, governor_list)
324 if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN))
331 * cpufreq_parse_governor - parse a governor string
333 static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
334 struct cpufreq_governor **governor)
341 if (cpufreq_driver->setpolicy) {
342 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
343 *policy = CPUFREQ_POLICY_PERFORMANCE;
345 } else if (!strnicmp(str_governor, "powersave",
347 *policy = CPUFREQ_POLICY_POWERSAVE;
350 } else if (cpufreq_driver->target) {
351 struct cpufreq_governor *t;
353 mutex_lock(&cpufreq_governor_mutex);
355 t = __find_governor(str_governor);
360 mutex_unlock(&cpufreq_governor_mutex);
361 ret = request_module("cpufreq_%s", str_governor);
362 mutex_lock(&cpufreq_governor_mutex);
365 t = __find_governor(str_governor);
373 mutex_unlock(&cpufreq_governor_mutex);
381 * cpufreq_per_cpu_attr_read() / show_##file_name() -
382 * print out cpufreq information
384 * Write out information from cpufreq_driver->policy[cpu]; object must be
388 #define show_one(file_name, object) \
389 static ssize_t show_##file_name \
390 (struct cpufreq_policy *policy, char *buf) \
392 return sprintf(buf, "%u\n", policy->object); \
395 show_one(cpuinfo_min_freq, cpuinfo.min_freq);
396 show_one(cpuinfo_max_freq, cpuinfo.max_freq);
397 show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
398 show_one(scaling_min_freq, min);
399 show_one(scaling_max_freq, max);
400 show_one(scaling_cur_freq, cur);
402 static int __cpufreq_set_policy(struct cpufreq_policy *data,
403 struct cpufreq_policy *policy);
406 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
408 #define store_one(file_name, object) \
409 static ssize_t store_##file_name \
410 (struct cpufreq_policy *policy, const char *buf, size_t count) \
413 struct cpufreq_policy new_policy; \
415 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
419 ret = sscanf(buf, "%u", &new_policy.object); \
423 ret = __cpufreq_set_policy(policy, &new_policy); \
424 policy->user_policy.object = policy->object; \
426 return ret ? ret : count; \
429 store_one(scaling_min_freq, min);
430 store_one(scaling_max_freq, max);
433 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
435 static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
438 unsigned int cur_freq = __cpufreq_get(policy->cpu);
440 return sprintf(buf, "<unknown>");
441 return sprintf(buf, "%u\n", cur_freq);
446 * show_scaling_governor - show the current policy for the specified CPU
448 static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
450 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
451 return sprintf(buf, "powersave\n");
452 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
453 return sprintf(buf, "performance\n");
454 else if (policy->governor)
455 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
456 policy->governor->name);
462 * store_scaling_governor - store policy for the specified CPU
464 static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
465 const char *buf, size_t count)
468 char str_governor[16];
469 struct cpufreq_policy new_policy;
471 ret = cpufreq_get_policy(&new_policy, policy->cpu);
475 ret = sscanf(buf, "%15s", str_governor);
479 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
480 &new_policy.governor))
483 /* Do not use cpufreq_set_policy here or the user_policy.max
484 will be wrongly overridden */
485 ret = __cpufreq_set_policy(policy, &new_policy);
487 policy->user_policy.policy = policy->policy;
488 policy->user_policy.governor = policy->governor;
497 * show_scaling_driver - show the cpufreq driver currently loaded
499 static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
501 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
505 * show_scaling_available_governors - show the available CPUfreq governors
507 static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
511 struct cpufreq_governor *t;
513 if (!cpufreq_driver->target) {
514 i += sprintf(buf, "performance powersave");
518 list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
519 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
520 - (CPUFREQ_NAME_LEN + 2)))
522 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
525 i += sprintf(&buf[i], "\n");
529 static ssize_t show_cpus(const struct cpumask *mask, char *buf)
534 for_each_cpu(cpu, mask) {
536 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
537 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
538 if (i >= (PAGE_SIZE - 5))
541 i += sprintf(&buf[i], "\n");
546 * show_related_cpus - show the CPUs affected by each transition even if
547 * hw coordination is in use
549 static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
551 return show_cpus(policy->related_cpus, buf);
555 * show_affected_cpus - show the CPUs affected by each transition
557 static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
559 return show_cpus(policy->cpus, buf);
562 static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
563 const char *buf, size_t count)
565 unsigned int freq = 0;
568 if (!policy->governor || !policy->governor->store_setspeed)
571 ret = sscanf(buf, "%u", &freq);
575 policy->governor->store_setspeed(policy, freq);
580 static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
582 if (!policy->governor || !policy->governor->show_setspeed)
583 return sprintf(buf, "<unsupported>\n");
585 return policy->governor->show_setspeed(policy, buf);
589 * show_bios_limit - show the current cpufreq HW/BIOS limitation
591 static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
595 if (cpufreq_driver->bios_limit) {
596 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
598 return sprintf(buf, "%u\n", limit);
600 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
603 cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
604 cpufreq_freq_attr_ro(cpuinfo_min_freq);
605 cpufreq_freq_attr_ro(cpuinfo_max_freq);
606 cpufreq_freq_attr_ro(cpuinfo_transition_latency);
607 cpufreq_freq_attr_ro(scaling_available_governors);
608 cpufreq_freq_attr_ro(scaling_driver);
609 cpufreq_freq_attr_ro(scaling_cur_freq);
610 cpufreq_freq_attr_ro(bios_limit);
611 cpufreq_freq_attr_ro(related_cpus);
612 cpufreq_freq_attr_ro(affected_cpus);
613 cpufreq_freq_attr_rw(scaling_min_freq);
614 cpufreq_freq_attr_rw(scaling_max_freq);
615 cpufreq_freq_attr_rw(scaling_governor);
616 cpufreq_freq_attr_rw(scaling_setspeed);
618 static struct attribute *default_attrs[] = {
619 &cpuinfo_min_freq.attr,
620 &cpuinfo_max_freq.attr,
621 &cpuinfo_transition_latency.attr,
622 &scaling_min_freq.attr,
623 &scaling_max_freq.attr,
626 &scaling_governor.attr,
627 &scaling_driver.attr,
628 &scaling_available_governors.attr,
629 &scaling_setspeed.attr,
633 struct kobject *cpufreq_global_kobject;
634 EXPORT_SYMBOL(cpufreq_global_kobject);
636 #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
637 #define to_attr(a) container_of(a, struct freq_attr, attr)
639 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
641 struct cpufreq_policy *policy = to_policy(kobj);
642 struct freq_attr *fattr = to_attr(attr);
643 ssize_t ret = -EINVAL;
644 policy = cpufreq_cpu_get_sysfs(policy->cpu);
648 if (lock_policy_rwsem_read(policy->cpu) < 0)
652 ret = fattr->show(policy, buf);
656 unlock_policy_rwsem_read(policy->cpu);
658 cpufreq_cpu_put_sysfs(policy);
663 static ssize_t store(struct kobject *kobj, struct attribute *attr,
664 const char *buf, size_t count)
666 struct cpufreq_policy *policy = to_policy(kobj);
667 struct freq_attr *fattr = to_attr(attr);
668 ssize_t ret = -EINVAL;
669 policy = cpufreq_cpu_get_sysfs(policy->cpu);
673 if (lock_policy_rwsem_write(policy->cpu) < 0)
677 ret = fattr->store(policy, buf, count);
681 unlock_policy_rwsem_write(policy->cpu);
683 cpufreq_cpu_put_sysfs(policy);
688 static void cpufreq_sysfs_release(struct kobject *kobj)
690 struct cpufreq_policy *policy = to_policy(kobj);
691 pr_debug("last reference is dropped\n");
692 complete(&policy->kobj_unregister);
695 static const struct sysfs_ops sysfs_ops = {
700 static struct kobj_type ktype_cpufreq = {
701 .sysfs_ops = &sysfs_ops,
702 .default_attrs = default_attrs,
703 .release = cpufreq_sysfs_release,
706 /* symlink affected CPUs */
707 static int cpufreq_add_dev_symlink(unsigned int cpu,
708 struct cpufreq_policy *policy)
713 for_each_cpu(j, policy->cpus) {
714 struct cpufreq_policy *managed_policy;
715 struct device *cpu_dev;
720 pr_debug("CPU %u already managed, adding link\n", j);
721 managed_policy = cpufreq_cpu_get(cpu);
722 cpu_dev = get_cpu_device(j);
723 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
726 cpufreq_cpu_put(managed_policy);
733 static int cpufreq_add_dev_interface(unsigned int cpu,
734 struct cpufreq_policy *policy,
737 struct cpufreq_policy new_policy;
738 struct freq_attr **drv_attr;
743 /* prepare interface data */
744 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
745 &dev->kobj, "cpufreq");
749 /* set up files for this cpu device */
750 drv_attr = cpufreq_driver->attr;
751 while ((drv_attr) && (*drv_attr)) {
752 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
754 goto err_out_kobj_put;
757 if (cpufreq_driver->get) {
758 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
760 goto err_out_kobj_put;
762 if (cpufreq_driver->target) {
763 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
765 goto err_out_kobj_put;
767 if (cpufreq_driver->bios_limit) {
768 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
770 goto err_out_kobj_put;
773 write_lock_irqsave(&cpufreq_driver_lock, flags);
774 for_each_cpu(j, policy->cpus) {
775 per_cpu(cpufreq_cpu_data, j) = policy;
776 per_cpu(cpufreq_policy_cpu, j) = policy->cpu;
778 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
780 ret = cpufreq_add_dev_symlink(cpu, policy);
782 goto err_out_kobj_put;
784 memcpy(&new_policy, policy, sizeof(struct cpufreq_policy));
785 /* assure that the starting sequence is run in __cpufreq_set_policy */
786 policy->governor = NULL;
788 /* set default policy */
789 ret = __cpufreq_set_policy(policy, &new_policy);
790 policy->user_policy.policy = policy->policy;
791 policy->user_policy.governor = policy->governor;
794 pr_debug("setting policy failed\n");
795 if (cpufreq_driver->exit)
796 cpufreq_driver->exit(policy);
801 kobject_put(&policy->kobj);
802 wait_for_completion(&policy->kobj_unregister);
806 #ifdef CONFIG_HOTPLUG_CPU
807 static int cpufreq_add_policy_cpu(unsigned int cpu, unsigned int sibling,
810 struct cpufreq_policy *policy;
814 policy = cpufreq_cpu_get(sibling);
817 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
819 lock_policy_rwsem_write(sibling);
821 write_lock_irqsave(&cpufreq_driver_lock, flags);
823 cpumask_set_cpu(cpu, policy->cpus);
824 per_cpu(cpufreq_policy_cpu, cpu) = policy->cpu;
825 per_cpu(cpufreq_cpu_data, cpu) = policy;
826 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
828 unlock_policy_rwsem_write(sibling);
830 __cpufreq_governor(policy, CPUFREQ_GOV_START);
831 __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
833 ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
835 cpufreq_cpu_put(policy);
844 * cpufreq_add_dev - add a CPU device
846 * Adds the cpufreq interface for a CPU device.
848 * The Oracle says: try running cpufreq registration/unregistration concurrently
849 * with with cpu hotplugging and all hell will break loose. Tried to clean this
850 * mess up, but more thorough testing is needed. - Mathieu
852 static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
854 unsigned int j, cpu = dev->id;
856 struct cpufreq_policy *policy;
858 #ifdef CONFIG_HOTPLUG_CPU
859 struct cpufreq_governor *gov;
863 if (cpu_is_offline(cpu))
866 pr_debug("adding CPU %u\n", cpu);
869 /* check whether a different CPU already registered this
870 * CPU because it is in the same boat. */
871 policy = cpufreq_cpu_get(cpu);
872 if (unlikely(policy)) {
873 cpufreq_cpu_put(policy);
877 #ifdef CONFIG_HOTPLUG_CPU
878 /* Check if this cpu was hot-unplugged earlier and has siblings */
879 read_lock_irqsave(&cpufreq_driver_lock, flags);
880 for_each_online_cpu(sibling) {
881 struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling);
882 if (cp && cpumask_test_cpu(cpu, cp->related_cpus)) {
883 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
884 return cpufreq_add_policy_cpu(cpu, sibling, dev);
887 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
891 if (!try_module_get(cpufreq_driver->owner)) {
896 policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL);
900 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
901 goto err_free_policy;
903 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
904 goto err_free_cpumask;
907 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
908 cpumask_copy(policy->cpus, cpumask_of(cpu));
910 /* Initially set CPU itself as the policy_cpu */
911 per_cpu(cpufreq_policy_cpu, cpu) = cpu;
913 init_completion(&policy->kobj_unregister);
914 INIT_WORK(&policy->update, handle_update);
916 /* call driver. From then on the cpufreq must be able
917 * to accept all calls to ->verify and ->setpolicy for this CPU
919 ret = cpufreq_driver->init(policy);
921 pr_debug("initialization failed\n");
922 goto err_set_policy_cpu;
925 /* related cpus should atleast have policy->cpus */
926 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
929 * affected cpus must always be the one, which are online. We aren't
930 * managing offline cpus here.
932 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
934 policy->user_policy.min = policy->min;
935 policy->user_policy.max = policy->max;
937 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
938 CPUFREQ_START, policy);
940 #ifdef CONFIG_HOTPLUG_CPU
941 gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
943 policy->governor = gov;
944 pr_debug("Restoring governor %s for cpu %d\n",
945 policy->governor->name, cpu);
949 ret = cpufreq_add_dev_interface(cpu, policy, dev);
951 goto err_out_unregister;
953 kobject_uevent(&policy->kobj, KOBJ_ADD);
954 module_put(cpufreq_driver->owner);
955 pr_debug("initialization complete\n");
960 write_lock_irqsave(&cpufreq_driver_lock, flags);
961 for_each_cpu(j, policy->cpus)
962 per_cpu(cpufreq_cpu_data, j) = NULL;
963 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
965 kobject_put(&policy->kobj);
966 wait_for_completion(&policy->kobj_unregister);
969 per_cpu(cpufreq_policy_cpu, cpu) = -1;
970 free_cpumask_var(policy->related_cpus);
972 free_cpumask_var(policy->cpus);
976 module_put(cpufreq_driver->owner);
981 static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
985 policy->last_cpu = policy->cpu;
988 for_each_cpu(j, policy->cpus)
989 per_cpu(cpufreq_policy_cpu, j) = cpu;
991 #ifdef CONFIG_CPU_FREQ_TABLE
992 cpufreq_frequency_table_update_policy_cpu(policy);
994 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
995 CPUFREQ_UPDATE_POLICY_CPU, policy);
999 * __cpufreq_remove_dev - remove a CPU device
1001 * Removes the cpufreq interface for a CPU device.
1002 * Caller should already have policy_rwsem in write mode for this CPU.
1003 * This routine frees the rwsem before returning.
1005 static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1007 unsigned int cpu = dev->id, ret, cpus;
1008 unsigned long flags;
1009 struct cpufreq_policy *data;
1010 struct kobject *kobj;
1011 struct completion *cmp;
1012 struct device *cpu_dev;
1014 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1016 write_lock_irqsave(&cpufreq_driver_lock, flags);
1018 data = per_cpu(cpufreq_cpu_data, cpu);
1019 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1021 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1024 pr_debug("%s: No cpu_data found\n", __func__);
1028 if (cpufreq_driver->target)
1029 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
1031 #ifdef CONFIG_HOTPLUG_CPU
1032 if (!cpufreq_driver->setpolicy)
1033 strncpy(per_cpu(cpufreq_cpu_governor, cpu),
1034 data->governor->name, CPUFREQ_NAME_LEN);
1037 WARN_ON(lock_policy_rwsem_write(cpu));
1038 cpus = cpumask_weight(data->cpus);
1039 cpumask_clear_cpu(cpu, data->cpus);
1040 unlock_policy_rwsem_write(cpu);
1042 if (cpu != data->cpu) {
1043 sysfs_remove_link(&dev->kobj, "cpufreq");
1044 } else if (cpus > 1) {
1045 /* first sibling now owns the new sysfs dir */
1046 cpu_dev = get_cpu_device(cpumask_first(data->cpus));
1047 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
1048 ret = kobject_move(&data->kobj, &cpu_dev->kobj);
1050 pr_err("%s: Failed to move kobj: %d", __func__, ret);
1052 WARN_ON(lock_policy_rwsem_write(cpu));
1053 cpumask_set_cpu(cpu, data->cpus);
1055 write_lock_irqsave(&cpufreq_driver_lock, flags);
1056 per_cpu(cpufreq_cpu_data, cpu) = data;
1057 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1059 unlock_policy_rwsem_write(cpu);
1061 ret = sysfs_create_link(&cpu_dev->kobj, &data->kobj,
1066 WARN_ON(lock_policy_rwsem_write(cpu));
1067 update_policy_cpu(data, cpu_dev->id);
1068 unlock_policy_rwsem_write(cpu);
1069 pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
1070 __func__, cpu_dev->id, cpu);
1073 pr_debug("%s: removing link, cpu: %d\n", __func__, cpu);
1074 cpufreq_cpu_put(data);
1076 /* If cpu is last user of policy, free policy */
1078 __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT);
1080 lock_policy_rwsem_read(cpu);
1082 cmp = &data->kobj_unregister;
1083 unlock_policy_rwsem_read(cpu);
1086 /* we need to make sure that the underlying kobj is actually
1087 * not referenced anymore by anybody before we proceed with
1090 pr_debug("waiting for dropping of refcount\n");
1091 wait_for_completion(cmp);
1092 pr_debug("wait complete\n");
1094 if (cpufreq_driver->exit)
1095 cpufreq_driver->exit(data);
1097 free_cpumask_var(data->related_cpus);
1098 free_cpumask_var(data->cpus);
1100 } else if (cpufreq_driver->target) {
1101 __cpufreq_governor(data, CPUFREQ_GOV_START);
1102 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
1105 per_cpu(cpufreq_policy_cpu, cpu) = -1;
1110 static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1112 unsigned int cpu = dev->id;
1115 if (cpu_is_offline(cpu))
1118 retval = __cpufreq_remove_dev(dev, sif);
1123 static void handle_update(struct work_struct *work)
1125 struct cpufreq_policy *policy =
1126 container_of(work, struct cpufreq_policy, update);
1127 unsigned int cpu = policy->cpu;
1128 pr_debug("handle_update for cpu %u called\n", cpu);
1129 cpufreq_update_policy(cpu);
1133 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're in deep trouble.
1135 * @old_freq: CPU frequency the kernel thinks the CPU runs at
1136 * @new_freq: CPU frequency the CPU actually runs at
1138 * We adjust to current frequency first, and need to clean up later.
1139 * So either call to cpufreq_update_policy() or schedule handle_update()).
1141 static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1142 unsigned int new_freq)
1144 struct cpufreq_freqs freqs;
1146 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing "
1147 "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
1150 freqs.old = old_freq;
1151 freqs.new = new_freq;
1152 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
1153 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
1158 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1161 * This is the last known freq, without actually getting it from the driver.
1162 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1164 unsigned int cpufreq_quick_get(unsigned int cpu)
1166 struct cpufreq_policy *policy;
1167 unsigned int ret_freq = 0;
1169 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
1170 return cpufreq_driver->get(cpu);
1172 policy = cpufreq_cpu_get(cpu);
1174 ret_freq = policy->cur;
1175 cpufreq_cpu_put(policy);
1180 EXPORT_SYMBOL(cpufreq_quick_get);
1183 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1186 * Just return the max possible frequency for a given CPU.
1188 unsigned int cpufreq_quick_get_max(unsigned int cpu)
1190 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1191 unsigned int ret_freq = 0;
1194 ret_freq = policy->max;
1195 cpufreq_cpu_put(policy);
1200 EXPORT_SYMBOL(cpufreq_quick_get_max);
1203 static unsigned int __cpufreq_get(unsigned int cpu)
1205 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1206 unsigned int ret_freq = 0;
1208 if (!cpufreq_driver->get)
1211 ret_freq = cpufreq_driver->get(cpu);
1213 if (ret_freq && policy->cur &&
1214 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1215 /* verify no discrepancy between actual and
1216 saved value exists */
1217 if (unlikely(ret_freq != policy->cur)) {
1218 cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
1219 schedule_work(&policy->update);
1227 * cpufreq_get - get the current CPU frequency (in kHz)
1230 * Get the CPU current (static) CPU frequency
1232 unsigned int cpufreq_get(unsigned int cpu)
1234 unsigned int ret_freq = 0;
1235 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1240 if (unlikely(lock_policy_rwsem_read(cpu)))
1243 ret_freq = __cpufreq_get(cpu);
1245 unlock_policy_rwsem_read(cpu);
1248 cpufreq_cpu_put(policy);
1252 EXPORT_SYMBOL(cpufreq_get);
1254 static struct subsys_interface cpufreq_interface = {
1256 .subsys = &cpu_subsys,
1257 .add_dev = cpufreq_add_dev,
1258 .remove_dev = cpufreq_remove_dev,
1263 * cpufreq_bp_suspend - Prepare the boot CPU for system suspend.
1265 * This function is only executed for the boot processor. The other CPUs
1266 * have been put offline by means of CPU hotplug.
1268 static int cpufreq_bp_suspend(void)
1272 int cpu = smp_processor_id();
1273 struct cpufreq_policy *cpu_policy;
1275 pr_debug("suspending cpu %u\n", cpu);
1277 /* If there's no policy for the boot CPU, we have nothing to do. */
1278 cpu_policy = cpufreq_cpu_get(cpu);
1282 if (cpufreq_driver->suspend) {
1283 ret = cpufreq_driver->suspend(cpu_policy);
1285 printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
1286 "step on CPU %u\n", cpu_policy->cpu);
1289 cpufreq_cpu_put(cpu_policy);
1294 * cpufreq_bp_resume - Restore proper frequency handling of the boot CPU.
1296 * 1.) resume CPUfreq hardware support (cpufreq_driver->resume())
1297 * 2.) schedule call cpufreq_update_policy() ASAP as interrupts are
1298 * restored. It will verify that the current freq is in sync with
1299 * what we believe it to be. This is a bit later than when it
1300 * should be, but nonethteless it's better than calling
1301 * cpufreq_driver->get() here which might re-enable interrupts...
1303 * This function is only executed for the boot CPU. The other CPUs have not
1304 * been turned on yet.
1306 static void cpufreq_bp_resume(void)
1310 int cpu = smp_processor_id();
1311 struct cpufreq_policy *cpu_policy;
1313 pr_debug("resuming cpu %u\n", cpu);
1315 /* If there's no policy for the boot CPU, we have nothing to do. */
1316 cpu_policy = cpufreq_cpu_get(cpu);
1320 if (cpufreq_driver->resume) {
1321 ret = cpufreq_driver->resume(cpu_policy);
1323 printk(KERN_ERR "cpufreq: resume failed in ->resume "
1324 "step on CPU %u\n", cpu_policy->cpu);
1329 schedule_work(&cpu_policy->update);
1332 cpufreq_cpu_put(cpu_policy);
1335 static struct syscore_ops cpufreq_syscore_ops = {
1336 .suspend = cpufreq_bp_suspend,
1337 .resume = cpufreq_bp_resume,
1341 * cpufreq_get_current_driver - return current driver's name
1343 * Return the name string of the currently loaded cpufreq driver
1346 const char *cpufreq_get_current_driver(void)
1349 return cpufreq_driver->name;
1353 EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1355 /*********************************************************************
1356 * NOTIFIER LISTS INTERFACE *
1357 *********************************************************************/
1360 * cpufreq_register_notifier - register a driver with cpufreq
1361 * @nb: notifier function to register
1362 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1364 * Add a driver to one of two lists: either a list of drivers that
1365 * are notified about clock rate changes (once before and once after
1366 * the transition), or a list of drivers that are notified about
1367 * changes in cpufreq policy.
1369 * This function may sleep, and has the same return conditions as
1370 * blocking_notifier_chain_register.
1372 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1376 if (cpufreq_disabled())
1379 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1382 case CPUFREQ_TRANSITION_NOTIFIER:
1383 ret = srcu_notifier_chain_register(
1384 &cpufreq_transition_notifier_list, nb);
1386 case CPUFREQ_POLICY_NOTIFIER:
1387 ret = blocking_notifier_chain_register(
1388 &cpufreq_policy_notifier_list, nb);
1396 EXPORT_SYMBOL(cpufreq_register_notifier);
1400 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1401 * @nb: notifier block to be unregistered
1402 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1404 * Remove a driver from the CPU frequency notifier list.
1406 * This function may sleep, and has the same return conditions as
1407 * blocking_notifier_chain_unregister.
1409 int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1413 if (cpufreq_disabled())
1417 case CPUFREQ_TRANSITION_NOTIFIER:
1418 ret = srcu_notifier_chain_unregister(
1419 &cpufreq_transition_notifier_list, nb);
1421 case CPUFREQ_POLICY_NOTIFIER:
1422 ret = blocking_notifier_chain_unregister(
1423 &cpufreq_policy_notifier_list, nb);
1431 EXPORT_SYMBOL(cpufreq_unregister_notifier);
1434 /*********************************************************************
1436 *********************************************************************/
1439 int __cpufreq_driver_target(struct cpufreq_policy *policy,
1440 unsigned int target_freq,
1441 unsigned int relation)
1443 int retval = -EINVAL;
1444 unsigned int old_target_freq = target_freq;
1446 if (cpufreq_disabled())
1449 /* Make sure that target_freq is within supported range */
1450 if (target_freq > policy->max)
1451 target_freq = policy->max;
1452 if (target_freq < policy->min)
1453 target_freq = policy->min;
1455 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1456 policy->cpu, target_freq, relation, old_target_freq);
1458 if (target_freq == policy->cur)
1461 if (cpufreq_driver->target)
1462 retval = cpufreq_driver->target(policy, target_freq, relation);
1466 EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1468 int cpufreq_driver_target(struct cpufreq_policy *policy,
1469 unsigned int target_freq,
1470 unsigned int relation)
1474 policy = cpufreq_cpu_get(policy->cpu);
1478 if (unlikely(lock_policy_rwsem_write(policy->cpu)))
1481 ret = __cpufreq_driver_target(policy, target_freq, relation);
1483 unlock_policy_rwsem_write(policy->cpu);
1486 cpufreq_cpu_put(policy);
1490 EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1492 int __cpufreq_driver_getavg(struct cpufreq_policy *policy, unsigned int cpu)
1496 if (cpufreq_disabled())
1499 if (!cpufreq_driver->getavg)
1502 policy = cpufreq_cpu_get(policy->cpu);
1506 ret = cpufreq_driver->getavg(policy, cpu);
1508 cpufreq_cpu_put(policy);
1511 EXPORT_SYMBOL_GPL(__cpufreq_driver_getavg);
1514 * when "event" is CPUFREQ_GOV_LIMITS
1517 static int __cpufreq_governor(struct cpufreq_policy *policy,
1522 /* Only must be defined when default governor is known to have latency
1523 restrictions, like e.g. conservative or ondemand.
1524 That this is the case is already ensured in Kconfig
1526 #ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1527 struct cpufreq_governor *gov = &cpufreq_gov_performance;
1529 struct cpufreq_governor *gov = NULL;
1532 if (policy->governor->max_transition_latency &&
1533 policy->cpuinfo.transition_latency >
1534 policy->governor->max_transition_latency) {
1538 printk(KERN_WARNING "%s governor failed, too long"
1539 " transition latency of HW, fallback"
1540 " to %s governor\n",
1541 policy->governor->name,
1543 policy->governor = gov;
1547 if (!try_module_get(policy->governor->owner))
1550 pr_debug("__cpufreq_governor for CPU %u, event %u\n",
1551 policy->cpu, event);
1552 ret = policy->governor->governor(policy, event);
1555 if (event == CPUFREQ_GOV_POLICY_INIT)
1556 policy->governor->initialized++;
1557 else if (event == CPUFREQ_GOV_POLICY_EXIT)
1558 policy->governor->initialized--;
1561 /* we keep one module reference alive for
1562 each CPU governed by this CPU */
1563 if ((event != CPUFREQ_GOV_START) || ret)
1564 module_put(policy->governor->owner);
1565 if ((event == CPUFREQ_GOV_STOP) && !ret)
1566 module_put(policy->governor->owner);
1572 int cpufreq_register_governor(struct cpufreq_governor *governor)
1579 if (cpufreq_disabled())
1582 mutex_lock(&cpufreq_governor_mutex);
1584 governor->initialized = 0;
1586 if (__find_governor(governor->name) == NULL) {
1588 list_add(&governor->governor_list, &cpufreq_governor_list);
1591 mutex_unlock(&cpufreq_governor_mutex);
1594 EXPORT_SYMBOL_GPL(cpufreq_register_governor);
1597 void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1599 #ifdef CONFIG_HOTPLUG_CPU
1606 if (cpufreq_disabled())
1609 #ifdef CONFIG_HOTPLUG_CPU
1610 for_each_present_cpu(cpu) {
1611 if (cpu_online(cpu))
1613 if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
1614 strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
1618 mutex_lock(&cpufreq_governor_mutex);
1619 list_del(&governor->governor_list);
1620 mutex_unlock(&cpufreq_governor_mutex);
1623 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
1627 /*********************************************************************
1628 * POLICY INTERFACE *
1629 *********************************************************************/
1632 * cpufreq_get_policy - get the current cpufreq_policy
1633 * @policy: struct cpufreq_policy into which the current cpufreq_policy
1636 * Reads the current cpufreq policy.
1638 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
1640 struct cpufreq_policy *cpu_policy;
1644 cpu_policy = cpufreq_cpu_get(cpu);
1648 memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy));
1650 cpufreq_cpu_put(cpu_policy);
1653 EXPORT_SYMBOL(cpufreq_get_policy);
1657 * data : current policy.
1658 * policy : policy to be set.
1660 static int __cpufreq_set_policy(struct cpufreq_policy *data,
1661 struct cpufreq_policy *policy)
1663 int ret = 0, failed = 1;
1665 pr_debug("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu,
1666 policy->min, policy->max);
1668 memcpy(&policy->cpuinfo, &data->cpuinfo,
1669 sizeof(struct cpufreq_cpuinfo));
1671 if (policy->min > data->max || policy->max < data->min) {
1676 /* verify the cpu speed can be set within this limit */
1677 ret = cpufreq_driver->verify(policy);
1681 /* adjust if necessary - all reasons */
1682 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1683 CPUFREQ_ADJUST, policy);
1685 /* adjust if necessary - hardware incompatibility*/
1686 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1687 CPUFREQ_INCOMPATIBLE, policy);
1689 /* verify the cpu speed can be set within this limit,
1690 which might be different to the first one */
1691 ret = cpufreq_driver->verify(policy);
1695 /* notification of the new policy */
1696 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1697 CPUFREQ_NOTIFY, policy);
1699 data->min = policy->min;
1700 data->max = policy->max;
1702 pr_debug("new min and max freqs are %u - %u kHz\n",
1703 data->min, data->max);
1705 if (cpufreq_driver->setpolicy) {
1706 data->policy = policy->policy;
1707 pr_debug("setting range\n");
1708 ret = cpufreq_driver->setpolicy(policy);
1710 if (policy->governor != data->governor) {
1711 /* save old, working values */
1712 struct cpufreq_governor *old_gov = data->governor;
1714 pr_debug("governor switch\n");
1716 /* end old governor */
1717 if (data->governor) {
1718 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
1719 __cpufreq_governor(data,
1720 CPUFREQ_GOV_POLICY_EXIT);
1723 /* start new governor */
1724 data->governor = policy->governor;
1725 if (!__cpufreq_governor(data, CPUFREQ_GOV_POLICY_INIT)) {
1726 if (!__cpufreq_governor(data, CPUFREQ_GOV_START))
1729 __cpufreq_governor(data,
1730 CPUFREQ_GOV_POLICY_EXIT);
1734 /* new governor failed, so re-start old one */
1735 pr_debug("starting governor %s failed\n",
1736 data->governor->name);
1738 data->governor = old_gov;
1739 __cpufreq_governor(data,
1740 CPUFREQ_GOV_POLICY_INIT);
1741 __cpufreq_governor(data,
1747 /* might be a policy change, too, so fall through */
1749 pr_debug("governor: change or update limits\n");
1750 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
1758 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
1759 * @cpu: CPU which shall be re-evaluated
1761 * Useful for policy notifiers which have different necessities
1762 * at different times.
1764 int cpufreq_update_policy(unsigned int cpu)
1766 struct cpufreq_policy *data = cpufreq_cpu_get(cpu);
1767 struct cpufreq_policy policy;
1775 if (unlikely(lock_policy_rwsem_write(cpu))) {
1780 pr_debug("updating policy for CPU %u\n", cpu);
1781 memcpy(&policy, data, sizeof(struct cpufreq_policy));
1782 policy.min = data->user_policy.min;
1783 policy.max = data->user_policy.max;
1784 policy.policy = data->user_policy.policy;
1785 policy.governor = data->user_policy.governor;
1787 /* BIOS might change freq behind our back
1788 -> ask driver for current freq and notify governors about a change */
1789 if (cpufreq_driver->get) {
1790 policy.cur = cpufreq_driver->get(cpu);
1792 pr_debug("Driver did not initialize current freq");
1793 data->cur = policy.cur;
1795 if (data->cur != policy.cur && cpufreq_driver->target)
1796 cpufreq_out_of_sync(cpu, data->cur,
1801 ret = __cpufreq_set_policy(data, &policy);
1803 unlock_policy_rwsem_write(cpu);
1806 cpufreq_cpu_put(data);
1810 EXPORT_SYMBOL(cpufreq_update_policy);
1812 static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
1813 unsigned long action, void *hcpu)
1815 unsigned int cpu = (unsigned long)hcpu;
1818 dev = get_cpu_device(cpu);
1822 case CPU_ONLINE_FROZEN:
1823 cpufreq_add_dev(dev, NULL);
1825 case CPU_DOWN_PREPARE:
1826 case CPU_DOWN_PREPARE_FROZEN:
1827 __cpufreq_remove_dev(dev, NULL);
1829 case CPU_DOWN_FAILED:
1830 case CPU_DOWN_FAILED_FROZEN:
1831 cpufreq_add_dev(dev, NULL);
1838 static struct notifier_block __refdata cpufreq_cpu_notifier = {
1839 .notifier_call = cpufreq_cpu_callback,
1842 /*********************************************************************
1843 * REGISTER / UNREGISTER CPUFREQ DRIVER *
1844 *********************************************************************/
1847 * cpufreq_register_driver - register a CPU Frequency driver
1848 * @driver_data: A struct cpufreq_driver containing the values#
1849 * submitted by the CPU Frequency driver.
1851 * Registers a CPU Frequency driver to this core code. This code
1852 * returns zero on success, -EBUSY when another driver got here first
1853 * (and isn't unregistered in the meantime).
1856 int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1858 unsigned long flags;
1861 if (cpufreq_disabled())
1864 if (!driver_data || !driver_data->verify || !driver_data->init ||
1865 ((!driver_data->setpolicy) && (!driver_data->target)))
1868 pr_debug("trying to register driver %s\n", driver_data->name);
1870 if (driver_data->setpolicy)
1871 driver_data->flags |= CPUFREQ_CONST_LOOPS;
1873 write_lock_irqsave(&cpufreq_driver_lock, flags);
1874 if (cpufreq_driver) {
1875 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1878 cpufreq_driver = driver_data;
1879 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1881 ret = subsys_interface_register(&cpufreq_interface);
1883 goto err_null_driver;
1885 if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) {
1889 /* check for at least one working CPU */
1890 for (i = 0; i < nr_cpu_ids; i++)
1891 if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
1896 /* if all ->init() calls failed, unregister */
1898 pr_debug("no CPU initialized for driver %s\n",
1904 register_hotcpu_notifier(&cpufreq_cpu_notifier);
1905 pr_debug("driver %s up and running\n", driver_data->name);
1909 subsys_interface_unregister(&cpufreq_interface);
1911 write_lock_irqsave(&cpufreq_driver_lock, flags);
1912 cpufreq_driver = NULL;
1913 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1916 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
1920 * cpufreq_unregister_driver - unregister the current CPUFreq driver
1922 * Unregister the current CPUFreq driver. Only call this if you have
1923 * the right to do so, i.e. if you have succeeded in initialising before!
1924 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
1925 * currently not initialised.
1927 int cpufreq_unregister_driver(struct cpufreq_driver *driver)
1929 unsigned long flags;
1931 if (!cpufreq_driver || (driver != cpufreq_driver))
1934 pr_debug("unregistering driver %s\n", driver->name);
1936 subsys_interface_unregister(&cpufreq_interface);
1937 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
1939 write_lock_irqsave(&cpufreq_driver_lock, flags);
1940 cpufreq_driver = NULL;
1941 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1945 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
1947 static int __init cpufreq_core_init(void)
1951 if (cpufreq_disabled())
1954 for_each_possible_cpu(cpu) {
1955 per_cpu(cpufreq_policy_cpu, cpu) = -1;
1956 init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
1959 cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
1960 BUG_ON(!cpufreq_global_kobject);
1961 register_syscore_ops(&cpufreq_syscore_ops);
1965 core_initcall(cpufreq_core_init);