]> rtime.felk.cvut.cz Git - linux-imx.git/blob - drivers/cpufreq/cpufreq.c
cpufreq: make __cpufreq_notify_transition() static
[linux-imx.git] / drivers / cpufreq / cpufreq.c
1 /*
2  *  linux/drivers/cpufreq/cpufreq.c
3  *
4  *  Copyright (C) 2001 Russell King
5  *            (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6  *            (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
7  *
8  *  Oct 2005 - Ashok Raj <ashok.raj@intel.com>
9  *      Added handling for CPU hotplug
10  *  Feb 2006 - Jacob Shin <jacob.shin@amd.com>
11  *      Fix handling for CPU hotplug -- affected CPUs
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of the GNU General Public License version 2 as
15  * published by the Free Software Foundation.
16  */
17
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
20 #include <asm/cputime.h>
21 #include <linux/kernel.h>
22 #include <linux/kernel_stat.h>
23 #include <linux/module.h>
24 #include <linux/init.h>
25 #include <linux/notifier.h>
26 #include <linux/cpufreq.h>
27 #include <linux/delay.h>
28 #include <linux/interrupt.h>
29 #include <linux/spinlock.h>
30 #include <linux/tick.h>
31 #include <linux/device.h>
32 #include <linux/slab.h>
33 #include <linux/cpu.h>
34 #include <linux/completion.h>
35 #include <linux/mutex.h>
36 #include <linux/syscore_ops.h>
37
38 #include <trace/events/power.h>
39
40 /**
41  * The "cpufreq driver" - the arch- or hardware-dependent low
42  * level driver of CPUFreq support, and its spinlock. This lock
43  * also protects the cpufreq_cpu_data array.
44  */
45 static struct cpufreq_driver *cpufreq_driver;
46 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
47 static DEFINE_RWLOCK(cpufreq_driver_lock);
48 static DEFINE_MUTEX(cpufreq_governor_lock);
49
50 #ifdef CONFIG_HOTPLUG_CPU
51 /* This one keeps track of the previously set governor of a removed CPU */
52 static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
53 #endif
54
55 /*
56  * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
57  * all cpufreq/hotplug/workqueue/etc related lock issues.
58  *
59  * The rules for this semaphore:
60  * - Any routine that wants to read from the policy structure will
61  *   do a down_read on this semaphore.
62  * - Any routine that will write to the policy structure and/or may take away
63  *   the policy altogether (eg. CPU hotplug), will hold this lock in write
64  *   mode before doing so.
65  *
66  * Additional rules:
67  * - Governor routines that can be called in cpufreq hotplug path should not
68  *   take this sem as top level hotplug notifier handler takes this.
69  * - Lock should not be held across
70  *     __cpufreq_governor(data, CPUFREQ_GOV_STOP);
71  */
72 static DEFINE_PER_CPU(int, cpufreq_policy_cpu);
73 static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
74
75 #define lock_policy_rwsem(mode, cpu)                                    \
76 static int lock_policy_rwsem_##mode(int cpu)                            \
77 {                                                                       \
78         int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu);              \
79         BUG_ON(policy_cpu == -1);                                       \
80         down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu));            \
81                                                                         \
82         return 0;                                                       \
83 }
84
85 lock_policy_rwsem(read, cpu);
86 lock_policy_rwsem(write, cpu);
87
88 #define unlock_policy_rwsem(mode, cpu)                                  \
89 static void unlock_policy_rwsem_##mode(int cpu)                         \
90 {                                                                       \
91         int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu);              \
92         BUG_ON(policy_cpu == -1);                                       \
93         up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu));              \
94 }
95
96 unlock_policy_rwsem(read, cpu);
97 unlock_policy_rwsem(write, cpu);
98
99 /* internal prototypes */
100 static int __cpufreq_governor(struct cpufreq_policy *policy,
101                 unsigned int event);
102 static unsigned int __cpufreq_get(unsigned int cpu);
103 static void handle_update(struct work_struct *work);
104
105 /**
106  * Two notifier lists: the "policy" list is involved in the
107  * validation process for a new CPU frequency policy; the
108  * "transition" list for kernel code that needs to handle
109  * changes to devices when the CPU clock speed changes.
110  * The mutex locks both lists.
111  */
112 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
113 static struct srcu_notifier_head cpufreq_transition_notifier_list;
114
115 static bool init_cpufreq_transition_notifier_list_called;
116 static int __init init_cpufreq_transition_notifier_list(void)
117 {
118         srcu_init_notifier_head(&cpufreq_transition_notifier_list);
119         init_cpufreq_transition_notifier_list_called = true;
120         return 0;
121 }
122 pure_initcall(init_cpufreq_transition_notifier_list);
123
124 static int off __read_mostly;
125 static int cpufreq_disabled(void)
126 {
127         return off;
128 }
129 void disable_cpufreq(void)
130 {
131         off = 1;
132 }
133 static LIST_HEAD(cpufreq_governor_list);
134 static DEFINE_MUTEX(cpufreq_governor_mutex);
135
136 bool have_governor_per_policy(void)
137 {
138         return cpufreq_driver->have_governor_per_policy;
139 }
140 EXPORT_SYMBOL_GPL(have_governor_per_policy);
141
142 struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
143 {
144         if (have_governor_per_policy())
145                 return &policy->kobj;
146         else
147                 return cpufreq_global_kobject;
148 }
149 EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
150
151 static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
152 {
153         u64 idle_time;
154         u64 cur_wall_time;
155         u64 busy_time;
156
157         cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
158
159         busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
160         busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
161         busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
162         busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
163         busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
164         busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
165
166         idle_time = cur_wall_time - busy_time;
167         if (wall)
168                 *wall = cputime_to_usecs(cur_wall_time);
169
170         return cputime_to_usecs(idle_time);
171 }
172
173 u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
174 {
175         u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
176
177         if (idle_time == -1ULL)
178                 return get_cpu_idle_time_jiffy(cpu, wall);
179         else if (!io_busy)
180                 idle_time += get_cpu_iowait_time_us(cpu, wall);
181
182         return idle_time;
183 }
184 EXPORT_SYMBOL_GPL(get_cpu_idle_time);
185
186 static struct cpufreq_policy *__cpufreq_cpu_get(unsigned int cpu, bool sysfs)
187 {
188         struct cpufreq_policy *data;
189         unsigned long flags;
190
191         if (cpu >= nr_cpu_ids)
192                 goto err_out;
193
194         /* get the cpufreq driver */
195         read_lock_irqsave(&cpufreq_driver_lock, flags);
196
197         if (!cpufreq_driver)
198                 goto err_out_unlock;
199
200         if (!try_module_get(cpufreq_driver->owner))
201                 goto err_out_unlock;
202
203         /* get the CPU */
204         data = per_cpu(cpufreq_cpu_data, cpu);
205
206         if (!data)
207                 goto err_out_put_module;
208
209         if (!sysfs && !kobject_get(&data->kobj))
210                 goto err_out_put_module;
211
212         read_unlock_irqrestore(&cpufreq_driver_lock, flags);
213         return data;
214
215 err_out_put_module:
216         module_put(cpufreq_driver->owner);
217 err_out_unlock:
218         read_unlock_irqrestore(&cpufreq_driver_lock, flags);
219 err_out:
220         return NULL;
221 }
222
223 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
224 {
225         if (cpufreq_disabled())
226                 return NULL;
227
228         return __cpufreq_cpu_get(cpu, false);
229 }
230 EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
231
232 static struct cpufreq_policy *cpufreq_cpu_get_sysfs(unsigned int cpu)
233 {
234         return __cpufreq_cpu_get(cpu, true);
235 }
236
237 static void __cpufreq_cpu_put(struct cpufreq_policy *data, bool sysfs)
238 {
239         if (!sysfs)
240                 kobject_put(&data->kobj);
241         module_put(cpufreq_driver->owner);
242 }
243
244 void cpufreq_cpu_put(struct cpufreq_policy *data)
245 {
246         if (cpufreq_disabled())
247                 return;
248
249         __cpufreq_cpu_put(data, false);
250 }
251 EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
252
253 static void cpufreq_cpu_put_sysfs(struct cpufreq_policy *data)
254 {
255         __cpufreq_cpu_put(data, true);
256 }
257
258 /*********************************************************************
259  *            EXTERNALLY AFFECTING FREQUENCY CHANGES                 *
260  *********************************************************************/
261
262 /**
263  * adjust_jiffies - adjust the system "loops_per_jiffy"
264  *
265  * This function alters the system "loops_per_jiffy" for the clock
266  * speed change. Note that loops_per_jiffy cannot be updated on SMP
267  * systems as each CPU might be scaled differently. So, use the arch
268  * per-CPU loops_per_jiffy value wherever possible.
269  */
270 #ifndef CONFIG_SMP
271 static unsigned long l_p_j_ref;
272 static unsigned int l_p_j_ref_freq;
273
274 static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
275 {
276         if (ci->flags & CPUFREQ_CONST_LOOPS)
277                 return;
278
279         if (!l_p_j_ref_freq) {
280                 l_p_j_ref = loops_per_jiffy;
281                 l_p_j_ref_freq = ci->old;
282                 pr_debug("saving %lu as reference value for loops_per_jiffy; "
283                         "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
284         }
285         if ((val == CPUFREQ_POSTCHANGE && ci->old != ci->new) ||
286             (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
287                 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
288                                                                 ci->new);
289                 pr_debug("scaling loops_per_jiffy to %lu "
290                         "for frequency %u kHz\n", loops_per_jiffy, ci->new);
291         }
292 }
293 #else
294 static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
295 {
296         return;
297 }
298 #endif
299
300 static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
301                 struct cpufreq_freqs *freqs, unsigned int state)
302 {
303         BUG_ON(irqs_disabled());
304
305         if (cpufreq_disabled())
306                 return;
307
308         freqs->flags = cpufreq_driver->flags;
309         pr_debug("notification %u of frequency transition to %u kHz\n",
310                 state, freqs->new);
311
312         switch (state) {
313
314         case CPUFREQ_PRECHANGE:
315                 /* detect if the driver reported a value as "old frequency"
316                  * which is not equal to what the cpufreq core thinks is
317                  * "old frequency".
318                  */
319                 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
320                         if ((policy) && (policy->cpu == freqs->cpu) &&
321                             (policy->cur) && (policy->cur != freqs->old)) {
322                                 pr_debug("Warning: CPU frequency is"
323                                         " %u, cpufreq assumed %u kHz.\n",
324                                         freqs->old, policy->cur);
325                                 freqs->old = policy->cur;
326                         }
327                 }
328                 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
329                                 CPUFREQ_PRECHANGE, freqs);
330                 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
331                 break;
332
333         case CPUFREQ_POSTCHANGE:
334                 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
335                 pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new,
336                         (unsigned long)freqs->cpu);
337                 trace_cpu_frequency(freqs->new, freqs->cpu);
338                 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
339                                 CPUFREQ_POSTCHANGE, freqs);
340                 if (likely(policy) && likely(policy->cpu == freqs->cpu))
341                         policy->cur = freqs->new;
342                 break;
343         }
344 }
345
346 /**
347  * cpufreq_notify_transition - call notifier chain and adjust_jiffies
348  * on frequency transition.
349  *
350  * This function calls the transition notifiers and the "adjust_jiffies"
351  * function. It is called twice on all CPU frequency changes that have
352  * external effects.
353  */
354 void cpufreq_notify_transition(struct cpufreq_policy *policy,
355                 struct cpufreq_freqs *freqs, unsigned int state)
356 {
357         for_each_cpu(freqs->cpu, policy->cpus)
358                 __cpufreq_notify_transition(policy, freqs, state);
359 }
360 EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
361
362
363 /*********************************************************************
364  *                          SYSFS INTERFACE                          *
365  *********************************************************************/
366
367 static struct cpufreq_governor *__find_governor(const char *str_governor)
368 {
369         struct cpufreq_governor *t;
370
371         list_for_each_entry(t, &cpufreq_governor_list, governor_list)
372                 if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN))
373                         return t;
374
375         return NULL;
376 }
377
378 /**
379  * cpufreq_parse_governor - parse a governor string
380  */
381 static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
382                                 struct cpufreq_governor **governor)
383 {
384         int err = -EINVAL;
385
386         if (!cpufreq_driver)
387                 goto out;
388
389         if (cpufreq_driver->setpolicy) {
390                 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
391                         *policy = CPUFREQ_POLICY_PERFORMANCE;
392                         err = 0;
393                 } else if (!strnicmp(str_governor, "powersave",
394                                                 CPUFREQ_NAME_LEN)) {
395                         *policy = CPUFREQ_POLICY_POWERSAVE;
396                         err = 0;
397                 }
398         } else if (cpufreq_driver->target) {
399                 struct cpufreq_governor *t;
400
401                 mutex_lock(&cpufreq_governor_mutex);
402
403                 t = __find_governor(str_governor);
404
405                 if (t == NULL) {
406                         int ret;
407
408                         mutex_unlock(&cpufreq_governor_mutex);
409                         ret = request_module("cpufreq_%s", str_governor);
410                         mutex_lock(&cpufreq_governor_mutex);
411
412                         if (ret == 0)
413                                 t = __find_governor(str_governor);
414                 }
415
416                 if (t != NULL) {
417                         *governor = t;
418                         err = 0;
419                 }
420
421                 mutex_unlock(&cpufreq_governor_mutex);
422         }
423 out:
424         return err;
425 }
426
427 /**
428  * cpufreq_per_cpu_attr_read() / show_##file_name() -
429  * print out cpufreq information
430  *
431  * Write out information from cpufreq_driver->policy[cpu]; object must be
432  * "unsigned int".
433  */
434
435 #define show_one(file_name, object)                     \
436 static ssize_t show_##file_name                         \
437 (struct cpufreq_policy *policy, char *buf)              \
438 {                                                       \
439         return sprintf(buf, "%u\n", policy->object);    \
440 }
441
442 show_one(cpuinfo_min_freq, cpuinfo.min_freq);
443 show_one(cpuinfo_max_freq, cpuinfo.max_freq);
444 show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
445 show_one(scaling_min_freq, min);
446 show_one(scaling_max_freq, max);
447 show_one(scaling_cur_freq, cur);
448
449 static int __cpufreq_set_policy(struct cpufreq_policy *data,
450                                 struct cpufreq_policy *policy);
451
452 /**
453  * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
454  */
455 #define store_one(file_name, object)                    \
456 static ssize_t store_##file_name                                        \
457 (struct cpufreq_policy *policy, const char *buf, size_t count)          \
458 {                                                                       \
459         unsigned int ret;                                               \
460         struct cpufreq_policy new_policy;                               \
461                                                                         \
462         ret = cpufreq_get_policy(&new_policy, policy->cpu);             \
463         if (ret)                                                        \
464                 return -EINVAL;                                         \
465                                                                         \
466         ret = sscanf(buf, "%u", &new_policy.object);                    \
467         if (ret != 1)                                                   \
468                 return -EINVAL;                                         \
469                                                                         \
470         ret = __cpufreq_set_policy(policy, &new_policy);                \
471         policy->user_policy.object = policy->object;                    \
472                                                                         \
473         return ret ? ret : count;                                       \
474 }
475
476 store_one(scaling_min_freq, min);
477 store_one(scaling_max_freq, max);
478
479 /**
480  * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
481  */
482 static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
483                                         char *buf)
484 {
485         unsigned int cur_freq = __cpufreq_get(policy->cpu);
486         if (!cur_freq)
487                 return sprintf(buf, "<unknown>");
488         return sprintf(buf, "%u\n", cur_freq);
489 }
490
491 /**
492  * show_scaling_governor - show the current policy for the specified CPU
493  */
494 static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
495 {
496         if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
497                 return sprintf(buf, "powersave\n");
498         else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
499                 return sprintf(buf, "performance\n");
500         else if (policy->governor)
501                 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
502                                 policy->governor->name);
503         return -EINVAL;
504 }
505
506 /**
507  * store_scaling_governor - store policy for the specified CPU
508  */
509 static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
510                                         const char *buf, size_t count)
511 {
512         unsigned int ret;
513         char    str_governor[16];
514         struct cpufreq_policy new_policy;
515
516         ret = cpufreq_get_policy(&new_policy, policy->cpu);
517         if (ret)
518                 return ret;
519
520         ret = sscanf(buf, "%15s", str_governor);
521         if (ret != 1)
522                 return -EINVAL;
523
524         if (cpufreq_parse_governor(str_governor, &new_policy.policy,
525                                                 &new_policy.governor))
526                 return -EINVAL;
527
528         /*
529          * Do not use cpufreq_set_policy here or the user_policy.max
530          * will be wrongly overridden
531          */
532         ret = __cpufreq_set_policy(policy, &new_policy);
533
534         policy->user_policy.policy = policy->policy;
535         policy->user_policy.governor = policy->governor;
536
537         if (ret)
538                 return ret;
539         else
540                 return count;
541 }
542
543 /**
544  * show_scaling_driver - show the cpufreq driver currently loaded
545  */
546 static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
547 {
548         return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
549 }
550
551 /**
552  * show_scaling_available_governors - show the available CPUfreq governors
553  */
554 static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
555                                                 char *buf)
556 {
557         ssize_t i = 0;
558         struct cpufreq_governor *t;
559
560         if (!cpufreq_driver->target) {
561                 i += sprintf(buf, "performance powersave");
562                 goto out;
563         }
564
565         list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
566                 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
567                     - (CPUFREQ_NAME_LEN + 2)))
568                         goto out;
569                 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
570         }
571 out:
572         i += sprintf(&buf[i], "\n");
573         return i;
574 }
575
576 static ssize_t show_cpus(const struct cpumask *mask, char *buf)
577 {
578         ssize_t i = 0;
579         unsigned int cpu;
580
581         for_each_cpu(cpu, mask) {
582                 if (i)
583                         i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
584                 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
585                 if (i >= (PAGE_SIZE - 5))
586                         break;
587         }
588         i += sprintf(&buf[i], "\n");
589         return i;
590 }
591
592 /**
593  * show_related_cpus - show the CPUs affected by each transition even if
594  * hw coordination is in use
595  */
596 static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
597 {
598         return show_cpus(policy->related_cpus, buf);
599 }
600
601 /**
602  * show_affected_cpus - show the CPUs affected by each transition
603  */
604 static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
605 {
606         return show_cpus(policy->cpus, buf);
607 }
608
609 static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
610                                         const char *buf, size_t count)
611 {
612         unsigned int freq = 0;
613         unsigned int ret;
614
615         if (!policy->governor || !policy->governor->store_setspeed)
616                 return -EINVAL;
617
618         ret = sscanf(buf, "%u", &freq);
619         if (ret != 1)
620                 return -EINVAL;
621
622         policy->governor->store_setspeed(policy, freq);
623
624         return count;
625 }
626
627 static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
628 {
629         if (!policy->governor || !policy->governor->show_setspeed)
630                 return sprintf(buf, "<unsupported>\n");
631
632         return policy->governor->show_setspeed(policy, buf);
633 }
634
635 /**
636  * show_bios_limit - show the current cpufreq HW/BIOS limitation
637  */
638 static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
639 {
640         unsigned int limit;
641         int ret;
642         if (cpufreq_driver->bios_limit) {
643                 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
644                 if (!ret)
645                         return sprintf(buf, "%u\n", limit);
646         }
647         return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
648 }
649
650 cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
651 cpufreq_freq_attr_ro(cpuinfo_min_freq);
652 cpufreq_freq_attr_ro(cpuinfo_max_freq);
653 cpufreq_freq_attr_ro(cpuinfo_transition_latency);
654 cpufreq_freq_attr_ro(scaling_available_governors);
655 cpufreq_freq_attr_ro(scaling_driver);
656 cpufreq_freq_attr_ro(scaling_cur_freq);
657 cpufreq_freq_attr_ro(bios_limit);
658 cpufreq_freq_attr_ro(related_cpus);
659 cpufreq_freq_attr_ro(affected_cpus);
660 cpufreq_freq_attr_rw(scaling_min_freq);
661 cpufreq_freq_attr_rw(scaling_max_freq);
662 cpufreq_freq_attr_rw(scaling_governor);
663 cpufreq_freq_attr_rw(scaling_setspeed);
664
665 static struct attribute *default_attrs[] = {
666         &cpuinfo_min_freq.attr,
667         &cpuinfo_max_freq.attr,
668         &cpuinfo_transition_latency.attr,
669         &scaling_min_freq.attr,
670         &scaling_max_freq.attr,
671         &affected_cpus.attr,
672         &related_cpus.attr,
673         &scaling_governor.attr,
674         &scaling_driver.attr,
675         &scaling_available_governors.attr,
676         &scaling_setspeed.attr,
677         NULL
678 };
679
680 #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
681 #define to_attr(a) container_of(a, struct freq_attr, attr)
682
683 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
684 {
685         struct cpufreq_policy *policy = to_policy(kobj);
686         struct freq_attr *fattr = to_attr(attr);
687         ssize_t ret = -EINVAL;
688         policy = cpufreq_cpu_get_sysfs(policy->cpu);
689         if (!policy)
690                 goto no_policy;
691
692         if (lock_policy_rwsem_read(policy->cpu) < 0)
693                 goto fail;
694
695         if (fattr->show)
696                 ret = fattr->show(policy, buf);
697         else
698                 ret = -EIO;
699
700         unlock_policy_rwsem_read(policy->cpu);
701 fail:
702         cpufreq_cpu_put_sysfs(policy);
703 no_policy:
704         return ret;
705 }
706
707 static ssize_t store(struct kobject *kobj, struct attribute *attr,
708                      const char *buf, size_t count)
709 {
710         struct cpufreq_policy *policy = to_policy(kobj);
711         struct freq_attr *fattr = to_attr(attr);
712         ssize_t ret = -EINVAL;
713         policy = cpufreq_cpu_get_sysfs(policy->cpu);
714         if (!policy)
715                 goto no_policy;
716
717         if (lock_policy_rwsem_write(policy->cpu) < 0)
718                 goto fail;
719
720         if (fattr->store)
721                 ret = fattr->store(policy, buf, count);
722         else
723                 ret = -EIO;
724
725         unlock_policy_rwsem_write(policy->cpu);
726 fail:
727         cpufreq_cpu_put_sysfs(policy);
728 no_policy:
729         return ret;
730 }
731
732 static void cpufreq_sysfs_release(struct kobject *kobj)
733 {
734         struct cpufreq_policy *policy = to_policy(kobj);
735         pr_debug("last reference is dropped\n");
736         complete(&policy->kobj_unregister);
737 }
738
739 static const struct sysfs_ops sysfs_ops = {
740         .show   = show,
741         .store  = store,
742 };
743
744 static struct kobj_type ktype_cpufreq = {
745         .sysfs_ops      = &sysfs_ops,
746         .default_attrs  = default_attrs,
747         .release        = cpufreq_sysfs_release,
748 };
749
750 struct kobject *cpufreq_global_kobject;
751 EXPORT_SYMBOL(cpufreq_global_kobject);
752
753 static int cpufreq_global_kobject_usage;
754
755 int cpufreq_get_global_kobject(void)
756 {
757         if (!cpufreq_global_kobject_usage++)
758                 return kobject_add(cpufreq_global_kobject,
759                                 &cpu_subsys.dev_root->kobj, "%s", "cpufreq");
760
761         return 0;
762 }
763 EXPORT_SYMBOL(cpufreq_get_global_kobject);
764
765 void cpufreq_put_global_kobject(void)
766 {
767         if (!--cpufreq_global_kobject_usage)
768                 kobject_del(cpufreq_global_kobject);
769 }
770 EXPORT_SYMBOL(cpufreq_put_global_kobject);
771
772 int cpufreq_sysfs_create_file(const struct attribute *attr)
773 {
774         int ret = cpufreq_get_global_kobject();
775
776         if (!ret) {
777                 ret = sysfs_create_file(cpufreq_global_kobject, attr);
778                 if (ret)
779                         cpufreq_put_global_kobject();
780         }
781
782         return ret;
783 }
784 EXPORT_SYMBOL(cpufreq_sysfs_create_file);
785
786 void cpufreq_sysfs_remove_file(const struct attribute *attr)
787 {
788         sysfs_remove_file(cpufreq_global_kobject, attr);
789         cpufreq_put_global_kobject();
790 }
791 EXPORT_SYMBOL(cpufreq_sysfs_remove_file);
792
793 /* symlink affected CPUs */
794 static int cpufreq_add_dev_symlink(unsigned int cpu,
795                                    struct cpufreq_policy *policy)
796 {
797         unsigned int j;
798         int ret = 0;
799
800         for_each_cpu(j, policy->cpus) {
801                 struct cpufreq_policy *managed_policy;
802                 struct device *cpu_dev;
803
804                 if (j == cpu)
805                         continue;
806
807                 pr_debug("CPU %u already managed, adding link\n", j);
808                 managed_policy = cpufreq_cpu_get(cpu);
809                 cpu_dev = get_cpu_device(j);
810                 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
811                                         "cpufreq");
812                 if (ret) {
813                         cpufreq_cpu_put(managed_policy);
814                         return ret;
815                 }
816         }
817         return ret;
818 }
819
820 static int cpufreq_add_dev_interface(unsigned int cpu,
821                                      struct cpufreq_policy *policy,
822                                      struct device *dev)
823 {
824         struct cpufreq_policy new_policy;
825         struct freq_attr **drv_attr;
826         unsigned long flags;
827         int ret = 0;
828         unsigned int j;
829
830         /* prepare interface data */
831         ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
832                                    &dev->kobj, "cpufreq");
833         if (ret)
834                 return ret;
835
836         /* set up files for this cpu device */
837         drv_attr = cpufreq_driver->attr;
838         while ((drv_attr) && (*drv_attr)) {
839                 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
840                 if (ret)
841                         goto err_out_kobj_put;
842                 drv_attr++;
843         }
844         if (cpufreq_driver->get) {
845                 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
846                 if (ret)
847                         goto err_out_kobj_put;
848         }
849         if (cpufreq_driver->target) {
850                 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
851                 if (ret)
852                         goto err_out_kobj_put;
853         }
854         if (cpufreq_driver->bios_limit) {
855                 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
856                 if (ret)
857                         goto err_out_kobj_put;
858         }
859
860         write_lock_irqsave(&cpufreq_driver_lock, flags);
861         for_each_cpu(j, policy->cpus) {
862                 per_cpu(cpufreq_cpu_data, j) = policy;
863                 per_cpu(cpufreq_policy_cpu, j) = policy->cpu;
864         }
865         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
866
867         ret = cpufreq_add_dev_symlink(cpu, policy);
868         if (ret)
869                 goto err_out_kobj_put;
870
871         memcpy(&new_policy, policy, sizeof(struct cpufreq_policy));
872         /* assure that the starting sequence is run in __cpufreq_set_policy */
873         policy->governor = NULL;
874
875         /* set default policy */
876         ret = __cpufreq_set_policy(policy, &new_policy);
877         policy->user_policy.policy = policy->policy;
878         policy->user_policy.governor = policy->governor;
879
880         if (ret) {
881                 pr_debug("setting policy failed\n");
882                 if (cpufreq_driver->exit)
883                         cpufreq_driver->exit(policy);
884         }
885         return ret;
886
887 err_out_kobj_put:
888         kobject_put(&policy->kobj);
889         wait_for_completion(&policy->kobj_unregister);
890         return ret;
891 }
892
893 #ifdef CONFIG_HOTPLUG_CPU
894 static int cpufreq_add_policy_cpu(unsigned int cpu, unsigned int sibling,
895                                   struct device *dev)
896 {
897         struct cpufreq_policy *policy;
898         int ret = 0, has_target = !!cpufreq_driver->target;
899         unsigned long flags;
900
901         policy = cpufreq_cpu_get(sibling);
902         WARN_ON(!policy);
903
904         if (has_target)
905                 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
906
907         lock_policy_rwsem_write(sibling);
908
909         write_lock_irqsave(&cpufreq_driver_lock, flags);
910
911         cpumask_set_cpu(cpu, policy->cpus);
912         per_cpu(cpufreq_policy_cpu, cpu) = policy->cpu;
913         per_cpu(cpufreq_cpu_data, cpu) = policy;
914         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
915
916         unlock_policy_rwsem_write(sibling);
917
918         if (has_target) {
919                 __cpufreq_governor(policy, CPUFREQ_GOV_START);
920                 __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
921         }
922
923         ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
924         if (ret) {
925                 cpufreq_cpu_put(policy);
926                 return ret;
927         }
928
929         return 0;
930 }
931 #endif
932
933 /**
934  * cpufreq_add_dev - add a CPU device
935  *
936  * Adds the cpufreq interface for a CPU device.
937  *
938  * The Oracle says: try running cpufreq registration/unregistration concurrently
939  * with with cpu hotplugging and all hell will break loose. Tried to clean this
940  * mess up, but more thorough testing is needed. - Mathieu
941  */
942 static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
943 {
944         unsigned int j, cpu = dev->id;
945         int ret = -ENOMEM;
946         struct cpufreq_policy *policy;
947         unsigned long flags;
948 #ifdef CONFIG_HOTPLUG_CPU
949         struct cpufreq_governor *gov;
950         int sibling;
951 #endif
952
953         if (cpu_is_offline(cpu))
954                 return 0;
955
956         pr_debug("adding CPU %u\n", cpu);
957
958 #ifdef CONFIG_SMP
959         /* check whether a different CPU already registered this
960          * CPU because it is in the same boat. */
961         policy = cpufreq_cpu_get(cpu);
962         if (unlikely(policy)) {
963                 cpufreq_cpu_put(policy);
964                 return 0;
965         }
966
967 #ifdef CONFIG_HOTPLUG_CPU
968         /* Check if this cpu was hot-unplugged earlier and has siblings */
969         read_lock_irqsave(&cpufreq_driver_lock, flags);
970         for_each_online_cpu(sibling) {
971                 struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling);
972                 if (cp && cpumask_test_cpu(cpu, cp->related_cpus)) {
973                         read_unlock_irqrestore(&cpufreq_driver_lock, flags);
974                         return cpufreq_add_policy_cpu(cpu, sibling, dev);
975                 }
976         }
977         read_unlock_irqrestore(&cpufreq_driver_lock, flags);
978 #endif
979 #endif
980
981         if (!try_module_get(cpufreq_driver->owner)) {
982                 ret = -EINVAL;
983                 goto module_out;
984         }
985
986         policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL);
987         if (!policy)
988                 goto nomem_out;
989
990         if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
991                 goto err_free_policy;
992
993         if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
994                 goto err_free_cpumask;
995
996         policy->cpu = cpu;
997         policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
998         cpumask_copy(policy->cpus, cpumask_of(cpu));
999
1000         /* Initially set CPU itself as the policy_cpu */
1001         per_cpu(cpufreq_policy_cpu, cpu) = cpu;
1002
1003         init_completion(&policy->kobj_unregister);
1004         INIT_WORK(&policy->update, handle_update);
1005
1006         /* call driver. From then on the cpufreq must be able
1007          * to accept all calls to ->verify and ->setpolicy for this CPU
1008          */
1009         ret = cpufreq_driver->init(policy);
1010         if (ret) {
1011                 pr_debug("initialization failed\n");
1012                 goto err_set_policy_cpu;
1013         }
1014
1015         /* related cpus should atleast have policy->cpus */
1016         cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
1017
1018         /*
1019          * affected cpus must always be the one, which are online. We aren't
1020          * managing offline cpus here.
1021          */
1022         cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1023
1024         policy->user_policy.min = policy->min;
1025         policy->user_policy.max = policy->max;
1026
1027         blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1028                                      CPUFREQ_START, policy);
1029
1030 #ifdef CONFIG_HOTPLUG_CPU
1031         gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
1032         if (gov) {
1033                 policy->governor = gov;
1034                 pr_debug("Restoring governor %s for cpu %d\n",
1035                        policy->governor->name, cpu);
1036         }
1037 #endif
1038
1039         ret = cpufreq_add_dev_interface(cpu, policy, dev);
1040         if (ret)
1041                 goto err_out_unregister;
1042
1043         kobject_uevent(&policy->kobj, KOBJ_ADD);
1044         module_put(cpufreq_driver->owner);
1045         pr_debug("initialization complete\n");
1046
1047         return 0;
1048
1049 err_out_unregister:
1050         write_lock_irqsave(&cpufreq_driver_lock, flags);
1051         for_each_cpu(j, policy->cpus)
1052                 per_cpu(cpufreq_cpu_data, j) = NULL;
1053         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1054
1055         kobject_put(&policy->kobj);
1056         wait_for_completion(&policy->kobj_unregister);
1057
1058 err_set_policy_cpu:
1059         per_cpu(cpufreq_policy_cpu, cpu) = -1;
1060         free_cpumask_var(policy->related_cpus);
1061 err_free_cpumask:
1062         free_cpumask_var(policy->cpus);
1063 err_free_policy:
1064         kfree(policy);
1065 nomem_out:
1066         module_put(cpufreq_driver->owner);
1067 module_out:
1068         return ret;
1069 }
1070
1071 static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
1072 {
1073         int j;
1074
1075         policy->last_cpu = policy->cpu;
1076         policy->cpu = cpu;
1077
1078         for_each_cpu(j, policy->cpus)
1079                 per_cpu(cpufreq_policy_cpu, j) = cpu;
1080
1081 #ifdef CONFIG_CPU_FREQ_TABLE
1082         cpufreq_frequency_table_update_policy_cpu(policy);
1083 #endif
1084         blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1085                         CPUFREQ_UPDATE_POLICY_CPU, policy);
1086 }
1087
1088 /**
1089  * __cpufreq_remove_dev - remove a CPU device
1090  *
1091  * Removes the cpufreq interface for a CPU device.
1092  * Caller should already have policy_rwsem in write mode for this CPU.
1093  * This routine frees the rwsem before returning.
1094  */
1095 static int __cpufreq_remove_dev(struct device *dev,
1096                 struct subsys_interface *sif)
1097 {
1098         unsigned int cpu = dev->id, ret, cpus;
1099         unsigned long flags;
1100         struct cpufreq_policy *data;
1101         struct kobject *kobj;
1102         struct completion *cmp;
1103         struct device *cpu_dev;
1104
1105         pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1106
1107         write_lock_irqsave(&cpufreq_driver_lock, flags);
1108
1109         data = per_cpu(cpufreq_cpu_data, cpu);
1110         per_cpu(cpufreq_cpu_data, cpu) = NULL;
1111
1112         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1113
1114         if (!data) {
1115                 pr_debug("%s: No cpu_data found\n", __func__);
1116                 return -EINVAL;
1117         }
1118
1119         if (cpufreq_driver->target)
1120                 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
1121
1122 #ifdef CONFIG_HOTPLUG_CPU
1123         if (!cpufreq_driver->setpolicy)
1124                 strncpy(per_cpu(cpufreq_cpu_governor, cpu),
1125                         data->governor->name, CPUFREQ_NAME_LEN);
1126 #endif
1127
1128         WARN_ON(lock_policy_rwsem_write(cpu));
1129         cpus = cpumask_weight(data->cpus);
1130
1131         if (cpus > 1)
1132                 cpumask_clear_cpu(cpu, data->cpus);
1133         unlock_policy_rwsem_write(cpu);
1134
1135         if (cpu != data->cpu) {
1136                 sysfs_remove_link(&dev->kobj, "cpufreq");
1137         } else if (cpus > 1) {
1138                 /* first sibling now owns the new sysfs dir */
1139                 cpu_dev = get_cpu_device(cpumask_first(data->cpus));
1140                 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
1141                 ret = kobject_move(&data->kobj, &cpu_dev->kobj);
1142                 if (ret) {
1143                         pr_err("%s: Failed to move kobj: %d", __func__, ret);
1144
1145                         WARN_ON(lock_policy_rwsem_write(cpu));
1146                         cpumask_set_cpu(cpu, data->cpus);
1147
1148                         write_lock_irqsave(&cpufreq_driver_lock, flags);
1149                         per_cpu(cpufreq_cpu_data, cpu) = data;
1150                         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1151
1152                         unlock_policy_rwsem_write(cpu);
1153
1154                         ret = sysfs_create_link(&cpu_dev->kobj, &data->kobj,
1155                                         "cpufreq");
1156                         return -EINVAL;
1157                 }
1158
1159                 WARN_ON(lock_policy_rwsem_write(cpu));
1160                 update_policy_cpu(data, cpu_dev->id);
1161                 unlock_policy_rwsem_write(cpu);
1162                 pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
1163                                 __func__, cpu_dev->id, cpu);
1164         }
1165
1166         if ((cpus == 1) && (cpufreq_driver->target))
1167                 __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT);
1168
1169         pr_debug("%s: removing link, cpu: %d\n", __func__, cpu);
1170         cpufreq_cpu_put(data);
1171
1172         /* If cpu is last user of policy, free policy */
1173         if (cpus == 1) {
1174                 lock_policy_rwsem_read(cpu);
1175                 kobj = &data->kobj;
1176                 cmp = &data->kobj_unregister;
1177                 unlock_policy_rwsem_read(cpu);
1178                 kobject_put(kobj);
1179
1180                 /* we need to make sure that the underlying kobj is actually
1181                  * not referenced anymore by anybody before we proceed with
1182                  * unloading.
1183                  */
1184                 pr_debug("waiting for dropping of refcount\n");
1185                 wait_for_completion(cmp);
1186                 pr_debug("wait complete\n");
1187
1188                 if (cpufreq_driver->exit)
1189                         cpufreq_driver->exit(data);
1190
1191                 free_cpumask_var(data->related_cpus);
1192                 free_cpumask_var(data->cpus);
1193                 kfree(data);
1194         } else if (cpufreq_driver->target) {
1195                 __cpufreq_governor(data, CPUFREQ_GOV_START);
1196                 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
1197         }
1198
1199         per_cpu(cpufreq_policy_cpu, cpu) = -1;
1200         return 0;
1201 }
1202
1203 static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1204 {
1205         unsigned int cpu = dev->id;
1206         int retval;
1207
1208         if (cpu_is_offline(cpu))
1209                 return 0;
1210
1211         retval = __cpufreq_remove_dev(dev, sif);
1212         return retval;
1213 }
1214
1215 static void handle_update(struct work_struct *work)
1216 {
1217         struct cpufreq_policy *policy =
1218                 container_of(work, struct cpufreq_policy, update);
1219         unsigned int cpu = policy->cpu;
1220         pr_debug("handle_update for cpu %u called\n", cpu);
1221         cpufreq_update_policy(cpu);
1222 }
1223
1224 /**
1225  *      cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1226  *      in deep trouble.
1227  *      @cpu: cpu number
1228  *      @old_freq: CPU frequency the kernel thinks the CPU runs at
1229  *      @new_freq: CPU frequency the CPU actually runs at
1230  *
1231  *      We adjust to current frequency first, and need to clean up later.
1232  *      So either call to cpufreq_update_policy() or schedule handle_update()).
1233  */
1234 static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1235                                 unsigned int new_freq)
1236 {
1237         struct cpufreq_policy *policy;
1238         struct cpufreq_freqs freqs;
1239         unsigned long flags;
1240
1241         pr_debug("Warning: CPU frequency out of sync: cpufreq and timing "
1242                "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
1243
1244         freqs.old = old_freq;
1245         freqs.new = new_freq;
1246
1247         read_lock_irqsave(&cpufreq_driver_lock, flags);
1248         policy = per_cpu(cpufreq_cpu_data, cpu);
1249         read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1250
1251         cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
1252         cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
1253 }
1254
1255 /**
1256  * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1257  * @cpu: CPU number
1258  *
1259  * This is the last known freq, without actually getting it from the driver.
1260  * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1261  */
1262 unsigned int cpufreq_quick_get(unsigned int cpu)
1263 {
1264         struct cpufreq_policy *policy;
1265         unsigned int ret_freq = 0;
1266
1267         if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
1268                 return cpufreq_driver->get(cpu);
1269
1270         policy = cpufreq_cpu_get(cpu);
1271         if (policy) {
1272                 ret_freq = policy->cur;
1273                 cpufreq_cpu_put(policy);
1274         }
1275
1276         return ret_freq;
1277 }
1278 EXPORT_SYMBOL(cpufreq_quick_get);
1279
1280 /**
1281  * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1282  * @cpu: CPU number
1283  *
1284  * Just return the max possible frequency for a given CPU.
1285  */
1286 unsigned int cpufreq_quick_get_max(unsigned int cpu)
1287 {
1288         struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1289         unsigned int ret_freq = 0;
1290
1291         if (policy) {
1292                 ret_freq = policy->max;
1293                 cpufreq_cpu_put(policy);
1294         }
1295
1296         return ret_freq;
1297 }
1298 EXPORT_SYMBOL(cpufreq_quick_get_max);
1299
1300 static unsigned int __cpufreq_get(unsigned int cpu)
1301 {
1302         struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1303         unsigned int ret_freq = 0;
1304
1305         if (!cpufreq_driver->get)
1306                 return ret_freq;
1307
1308         ret_freq = cpufreq_driver->get(cpu);
1309
1310         if (ret_freq && policy->cur &&
1311                 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1312                 /* verify no discrepancy between actual and
1313                                         saved value exists */
1314                 if (unlikely(ret_freq != policy->cur)) {
1315                         cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
1316                         schedule_work(&policy->update);
1317                 }
1318         }
1319
1320         return ret_freq;
1321 }
1322
1323 /**
1324  * cpufreq_get - get the current CPU frequency (in kHz)
1325  * @cpu: CPU number
1326  *
1327  * Get the CPU current (static) CPU frequency
1328  */
1329 unsigned int cpufreq_get(unsigned int cpu)
1330 {
1331         unsigned int ret_freq = 0;
1332         struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1333
1334         if (!policy)
1335                 goto out;
1336
1337         if (unlikely(lock_policy_rwsem_read(cpu)))
1338                 goto out_policy;
1339
1340         ret_freq = __cpufreq_get(cpu);
1341
1342         unlock_policy_rwsem_read(cpu);
1343
1344 out_policy:
1345         cpufreq_cpu_put(policy);
1346 out:
1347         return ret_freq;
1348 }
1349 EXPORT_SYMBOL(cpufreq_get);
1350
1351 static struct subsys_interface cpufreq_interface = {
1352         .name           = "cpufreq",
1353         .subsys         = &cpu_subsys,
1354         .add_dev        = cpufreq_add_dev,
1355         .remove_dev     = cpufreq_remove_dev,
1356 };
1357
1358 /**
1359  * cpufreq_bp_suspend - Prepare the boot CPU for system suspend.
1360  *
1361  * This function is only executed for the boot processor.  The other CPUs
1362  * have been put offline by means of CPU hotplug.
1363  */
1364 static int cpufreq_bp_suspend(void)
1365 {
1366         int ret = 0;
1367
1368         int cpu = smp_processor_id();
1369         struct cpufreq_policy *cpu_policy;
1370
1371         pr_debug("suspending cpu %u\n", cpu);
1372
1373         /* If there's no policy for the boot CPU, we have nothing to do. */
1374         cpu_policy = cpufreq_cpu_get(cpu);
1375         if (!cpu_policy)
1376                 return 0;
1377
1378         if (cpufreq_driver->suspend) {
1379                 ret = cpufreq_driver->suspend(cpu_policy);
1380                 if (ret)
1381                         printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
1382                                         "step on CPU %u\n", cpu_policy->cpu);
1383         }
1384
1385         cpufreq_cpu_put(cpu_policy);
1386         return ret;
1387 }
1388
1389 /**
1390  * cpufreq_bp_resume - Restore proper frequency handling of the boot CPU.
1391  *
1392  *      1.) resume CPUfreq hardware support (cpufreq_driver->resume())
1393  *      2.) schedule call cpufreq_update_policy() ASAP as interrupts are
1394  *          restored. It will verify that the current freq is in sync with
1395  *          what we believe it to be. This is a bit later than when it
1396  *          should be, but nonethteless it's better than calling
1397  *          cpufreq_driver->get() here which might re-enable interrupts...
1398  *
1399  * This function is only executed for the boot CPU.  The other CPUs have not
1400  * been turned on yet.
1401  */
1402 static void cpufreq_bp_resume(void)
1403 {
1404         int ret = 0;
1405
1406         int cpu = smp_processor_id();
1407         struct cpufreq_policy *cpu_policy;
1408
1409         pr_debug("resuming cpu %u\n", cpu);
1410
1411         /* If there's no policy for the boot CPU, we have nothing to do. */
1412         cpu_policy = cpufreq_cpu_get(cpu);
1413         if (!cpu_policy)
1414                 return;
1415
1416         if (cpufreq_driver->resume) {
1417                 ret = cpufreq_driver->resume(cpu_policy);
1418                 if (ret) {
1419                         printk(KERN_ERR "cpufreq: resume failed in ->resume "
1420                                         "step on CPU %u\n", cpu_policy->cpu);
1421                         goto fail;
1422                 }
1423         }
1424
1425         schedule_work(&cpu_policy->update);
1426
1427 fail:
1428         cpufreq_cpu_put(cpu_policy);
1429 }
1430
1431 static struct syscore_ops cpufreq_syscore_ops = {
1432         .suspend        = cpufreq_bp_suspend,
1433         .resume         = cpufreq_bp_resume,
1434 };
1435
1436 /**
1437  *      cpufreq_get_current_driver - return current driver's name
1438  *
1439  *      Return the name string of the currently loaded cpufreq driver
1440  *      or NULL, if none.
1441  */
1442 const char *cpufreq_get_current_driver(void)
1443 {
1444         if (cpufreq_driver)
1445                 return cpufreq_driver->name;
1446
1447         return NULL;
1448 }
1449 EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1450
1451 /*********************************************************************
1452  *                     NOTIFIER LISTS INTERFACE                      *
1453  *********************************************************************/
1454
1455 /**
1456  *      cpufreq_register_notifier - register a driver with cpufreq
1457  *      @nb: notifier function to register
1458  *      @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1459  *
1460  *      Add a driver to one of two lists: either a list of drivers that
1461  *      are notified about clock rate changes (once before and once after
1462  *      the transition), or a list of drivers that are notified about
1463  *      changes in cpufreq policy.
1464  *
1465  *      This function may sleep, and has the same return conditions as
1466  *      blocking_notifier_chain_register.
1467  */
1468 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1469 {
1470         int ret;
1471
1472         if (cpufreq_disabled())
1473                 return -EINVAL;
1474
1475         WARN_ON(!init_cpufreq_transition_notifier_list_called);
1476
1477         switch (list) {
1478         case CPUFREQ_TRANSITION_NOTIFIER:
1479                 ret = srcu_notifier_chain_register(
1480                                 &cpufreq_transition_notifier_list, nb);
1481                 break;
1482         case CPUFREQ_POLICY_NOTIFIER:
1483                 ret = blocking_notifier_chain_register(
1484                                 &cpufreq_policy_notifier_list, nb);
1485                 break;
1486         default:
1487                 ret = -EINVAL;
1488         }
1489
1490         return ret;
1491 }
1492 EXPORT_SYMBOL(cpufreq_register_notifier);
1493
1494 /**
1495  *      cpufreq_unregister_notifier - unregister a driver with cpufreq
1496  *      @nb: notifier block to be unregistered
1497  *      @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1498  *
1499  *      Remove a driver from the CPU frequency notifier list.
1500  *
1501  *      This function may sleep, and has the same return conditions as
1502  *      blocking_notifier_chain_unregister.
1503  */
1504 int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1505 {
1506         int ret;
1507
1508         if (cpufreq_disabled())
1509                 return -EINVAL;
1510
1511         switch (list) {
1512         case CPUFREQ_TRANSITION_NOTIFIER:
1513                 ret = srcu_notifier_chain_unregister(
1514                                 &cpufreq_transition_notifier_list, nb);
1515                 break;
1516         case CPUFREQ_POLICY_NOTIFIER:
1517                 ret = blocking_notifier_chain_unregister(
1518                                 &cpufreq_policy_notifier_list, nb);
1519                 break;
1520         default:
1521                 ret = -EINVAL;
1522         }
1523
1524         return ret;
1525 }
1526 EXPORT_SYMBOL(cpufreq_unregister_notifier);
1527
1528
1529 /*********************************************************************
1530  *                              GOVERNORS                            *
1531  *********************************************************************/
1532
1533 int __cpufreq_driver_target(struct cpufreq_policy *policy,
1534                             unsigned int target_freq,
1535                             unsigned int relation)
1536 {
1537         int retval = -EINVAL;
1538         unsigned int old_target_freq = target_freq;
1539
1540         if (cpufreq_disabled())
1541                 return -ENODEV;
1542
1543         /* Make sure that target_freq is within supported range */
1544         if (target_freq > policy->max)
1545                 target_freq = policy->max;
1546         if (target_freq < policy->min)
1547                 target_freq = policy->min;
1548
1549         pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1550                         policy->cpu, target_freq, relation, old_target_freq);
1551
1552         if (target_freq == policy->cur)
1553                 return 0;
1554
1555         if (cpufreq_driver->target)
1556                 retval = cpufreq_driver->target(policy, target_freq, relation);
1557
1558         return retval;
1559 }
1560 EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1561
1562 int cpufreq_driver_target(struct cpufreq_policy *policy,
1563                           unsigned int target_freq,
1564                           unsigned int relation)
1565 {
1566         int ret = -EINVAL;
1567
1568         if (unlikely(lock_policy_rwsem_write(policy->cpu)))
1569                 goto fail;
1570
1571         ret = __cpufreq_driver_target(policy, target_freq, relation);
1572
1573         unlock_policy_rwsem_write(policy->cpu);
1574
1575 fail:
1576         return ret;
1577 }
1578 EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1579
1580 int __cpufreq_driver_getavg(struct cpufreq_policy *policy, unsigned int cpu)
1581 {
1582         if (cpufreq_disabled())
1583                 return 0;
1584
1585         if (!cpufreq_driver->getavg)
1586                 return 0;
1587
1588         return cpufreq_driver->getavg(policy, cpu);
1589 }
1590 EXPORT_SYMBOL_GPL(__cpufreq_driver_getavg);
1591
1592 /*
1593  * when "event" is CPUFREQ_GOV_LIMITS
1594  */
1595
1596 static int __cpufreq_governor(struct cpufreq_policy *policy,
1597                                         unsigned int event)
1598 {
1599         int ret;
1600
1601         /* Only must be defined when default governor is known to have latency
1602            restrictions, like e.g. conservative or ondemand.
1603            That this is the case is already ensured in Kconfig
1604         */
1605 #ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1606         struct cpufreq_governor *gov = &cpufreq_gov_performance;
1607 #else
1608         struct cpufreq_governor *gov = NULL;
1609 #endif
1610
1611         if (policy->governor->max_transition_latency &&
1612             policy->cpuinfo.transition_latency >
1613             policy->governor->max_transition_latency) {
1614                 if (!gov)
1615                         return -EINVAL;
1616                 else {
1617                         printk(KERN_WARNING "%s governor failed, too long"
1618                                " transition latency of HW, fallback"
1619                                " to %s governor\n",
1620                                policy->governor->name,
1621                                gov->name);
1622                         policy->governor = gov;
1623                 }
1624         }
1625
1626         if (!try_module_get(policy->governor->owner))
1627                 return -EINVAL;
1628
1629         pr_debug("__cpufreq_governor for CPU %u, event %u\n",
1630                                                 policy->cpu, event);
1631
1632         mutex_lock(&cpufreq_governor_lock);
1633         if ((!policy->governor_enabled && (event == CPUFREQ_GOV_STOP)) ||
1634             (policy->governor_enabled && (event == CPUFREQ_GOV_START))) {
1635                 mutex_unlock(&cpufreq_governor_lock);
1636                 return -EBUSY;
1637         }
1638
1639         if (event == CPUFREQ_GOV_STOP)
1640                 policy->governor_enabled = false;
1641         else if (event == CPUFREQ_GOV_START)
1642                 policy->governor_enabled = true;
1643
1644         mutex_unlock(&cpufreq_governor_lock);
1645
1646         ret = policy->governor->governor(policy, event);
1647
1648         if (!ret) {
1649                 if (event == CPUFREQ_GOV_POLICY_INIT)
1650                         policy->governor->initialized++;
1651                 else if (event == CPUFREQ_GOV_POLICY_EXIT)
1652                         policy->governor->initialized--;
1653         } else {
1654                 /* Restore original values */
1655                 mutex_lock(&cpufreq_governor_lock);
1656                 if (event == CPUFREQ_GOV_STOP)
1657                         policy->governor_enabled = true;
1658                 else if (event == CPUFREQ_GOV_START)
1659                         policy->governor_enabled = false;
1660                 mutex_unlock(&cpufreq_governor_lock);
1661         }
1662
1663         /* we keep one module reference alive for
1664                         each CPU governed by this CPU */
1665         if ((event != CPUFREQ_GOV_START) || ret)
1666                 module_put(policy->governor->owner);
1667         if ((event == CPUFREQ_GOV_STOP) && !ret)
1668                 module_put(policy->governor->owner);
1669
1670         return ret;
1671 }
1672
1673 int cpufreq_register_governor(struct cpufreq_governor *governor)
1674 {
1675         int err;
1676
1677         if (!governor)
1678                 return -EINVAL;
1679
1680         if (cpufreq_disabled())
1681                 return -ENODEV;
1682
1683         mutex_lock(&cpufreq_governor_mutex);
1684
1685         governor->initialized = 0;
1686         err = -EBUSY;
1687         if (__find_governor(governor->name) == NULL) {
1688                 err = 0;
1689                 list_add(&governor->governor_list, &cpufreq_governor_list);
1690         }
1691
1692         mutex_unlock(&cpufreq_governor_mutex);
1693         return err;
1694 }
1695 EXPORT_SYMBOL_GPL(cpufreq_register_governor);
1696
1697 void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1698 {
1699 #ifdef CONFIG_HOTPLUG_CPU
1700         int cpu;
1701 #endif
1702
1703         if (!governor)
1704                 return;
1705
1706         if (cpufreq_disabled())
1707                 return;
1708
1709 #ifdef CONFIG_HOTPLUG_CPU
1710         for_each_present_cpu(cpu) {
1711                 if (cpu_online(cpu))
1712                         continue;
1713                 if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
1714                         strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
1715         }
1716 #endif
1717
1718         mutex_lock(&cpufreq_governor_mutex);
1719         list_del(&governor->governor_list);
1720         mutex_unlock(&cpufreq_governor_mutex);
1721         return;
1722 }
1723 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
1724
1725
1726 /*********************************************************************
1727  *                          POLICY INTERFACE                         *
1728  *********************************************************************/
1729
1730 /**
1731  * cpufreq_get_policy - get the current cpufreq_policy
1732  * @policy: struct cpufreq_policy into which the current cpufreq_policy
1733  *      is written
1734  *
1735  * Reads the current cpufreq policy.
1736  */
1737 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
1738 {
1739         struct cpufreq_policy *cpu_policy;
1740         if (!policy)
1741                 return -EINVAL;
1742
1743         cpu_policy = cpufreq_cpu_get(cpu);
1744         if (!cpu_policy)
1745                 return -EINVAL;
1746
1747         memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy));
1748
1749         cpufreq_cpu_put(cpu_policy);
1750         return 0;
1751 }
1752 EXPORT_SYMBOL(cpufreq_get_policy);
1753
1754 /*
1755  * data   : current policy.
1756  * policy : policy to be set.
1757  */
1758 static int __cpufreq_set_policy(struct cpufreq_policy *data,
1759                                 struct cpufreq_policy *policy)
1760 {
1761         int ret = 0, failed = 1;
1762
1763         pr_debug("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu,
1764                 policy->min, policy->max);
1765
1766         memcpy(&policy->cpuinfo, &data->cpuinfo,
1767                                 sizeof(struct cpufreq_cpuinfo));
1768
1769         if (policy->min > data->max || policy->max < data->min) {
1770                 ret = -EINVAL;
1771                 goto error_out;
1772         }
1773
1774         /* verify the cpu speed can be set within this limit */
1775         ret = cpufreq_driver->verify(policy);
1776         if (ret)
1777                 goto error_out;
1778
1779         /* adjust if necessary - all reasons */
1780         blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1781                         CPUFREQ_ADJUST, policy);
1782
1783         /* adjust if necessary - hardware incompatibility*/
1784         blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1785                         CPUFREQ_INCOMPATIBLE, policy);
1786
1787         /*
1788          * verify the cpu speed can be set within this limit, which might be
1789          * different to the first one
1790          */
1791         ret = cpufreq_driver->verify(policy);
1792         if (ret)
1793                 goto error_out;
1794
1795         /* notification of the new policy */
1796         blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1797                         CPUFREQ_NOTIFY, policy);
1798
1799         data->min = policy->min;
1800         data->max = policy->max;
1801
1802         pr_debug("new min and max freqs are %u - %u kHz\n",
1803                                         data->min, data->max);
1804
1805         if (cpufreq_driver->setpolicy) {
1806                 data->policy = policy->policy;
1807                 pr_debug("setting range\n");
1808                 ret = cpufreq_driver->setpolicy(policy);
1809         } else {
1810                 if (policy->governor != data->governor) {
1811                         /* save old, working values */
1812                         struct cpufreq_governor *old_gov = data->governor;
1813
1814                         pr_debug("governor switch\n");
1815
1816                         /* end old governor */
1817                         if (data->governor) {
1818                                 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
1819                                 unlock_policy_rwsem_write(policy->cpu);
1820                                 __cpufreq_governor(data,
1821                                                 CPUFREQ_GOV_POLICY_EXIT);
1822                                 lock_policy_rwsem_write(policy->cpu);
1823                         }
1824
1825                         /* start new governor */
1826                         data->governor = policy->governor;
1827                         if (!__cpufreq_governor(data, CPUFREQ_GOV_POLICY_INIT)) {
1828                                 if (!__cpufreq_governor(data, CPUFREQ_GOV_START)) {
1829                                         failed = 0;
1830                                 } else {
1831                                         unlock_policy_rwsem_write(policy->cpu);
1832                                         __cpufreq_governor(data,
1833                                                         CPUFREQ_GOV_POLICY_EXIT);
1834                                         lock_policy_rwsem_write(policy->cpu);
1835                                 }
1836                         }
1837
1838                         if (failed) {
1839                                 /* new governor failed, so re-start old one */
1840                                 pr_debug("starting governor %s failed\n",
1841                                                         data->governor->name);
1842                                 if (old_gov) {
1843                                         data->governor = old_gov;
1844                                         __cpufreq_governor(data,
1845                                                         CPUFREQ_GOV_POLICY_INIT);
1846                                         __cpufreq_governor(data,
1847                                                            CPUFREQ_GOV_START);
1848                                 }
1849                                 ret = -EINVAL;
1850                                 goto error_out;
1851                         }
1852                         /* might be a policy change, too, so fall through */
1853                 }
1854                 pr_debug("governor: change or update limits\n");
1855                 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
1856         }
1857
1858 error_out:
1859         return ret;
1860 }
1861
1862 /**
1863  *      cpufreq_update_policy - re-evaluate an existing cpufreq policy
1864  *      @cpu: CPU which shall be re-evaluated
1865  *
1866  *      Useful for policy notifiers which have different necessities
1867  *      at different times.
1868  */
1869 int cpufreq_update_policy(unsigned int cpu)
1870 {
1871         struct cpufreq_policy *data = cpufreq_cpu_get(cpu);
1872         struct cpufreq_policy policy;
1873         int ret;
1874
1875         if (!data) {
1876                 ret = -ENODEV;
1877                 goto no_policy;
1878         }
1879
1880         if (unlikely(lock_policy_rwsem_write(cpu))) {
1881                 ret = -EINVAL;
1882                 goto fail;
1883         }
1884
1885         pr_debug("updating policy for CPU %u\n", cpu);
1886         memcpy(&policy, data, sizeof(struct cpufreq_policy));
1887         policy.min = data->user_policy.min;
1888         policy.max = data->user_policy.max;
1889         policy.policy = data->user_policy.policy;
1890         policy.governor = data->user_policy.governor;
1891
1892         /*
1893          * BIOS might change freq behind our back
1894          * -> ask driver for current freq and notify governors about a change
1895          */
1896         if (cpufreq_driver->get) {
1897                 policy.cur = cpufreq_driver->get(cpu);
1898                 if (!data->cur) {
1899                         pr_debug("Driver did not initialize current freq");
1900                         data->cur = policy.cur;
1901                 } else {
1902                         if (data->cur != policy.cur && cpufreq_driver->target)
1903                                 cpufreq_out_of_sync(cpu, data->cur,
1904                                                                 policy.cur);
1905                 }
1906         }
1907
1908         ret = __cpufreq_set_policy(data, &policy);
1909
1910         unlock_policy_rwsem_write(cpu);
1911
1912 fail:
1913         cpufreq_cpu_put(data);
1914 no_policy:
1915         return ret;
1916 }
1917 EXPORT_SYMBOL(cpufreq_update_policy);
1918
1919 static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
1920                                         unsigned long action, void *hcpu)
1921 {
1922         unsigned int cpu = (unsigned long)hcpu;
1923         struct device *dev;
1924
1925         dev = get_cpu_device(cpu);
1926         if (dev) {
1927                 switch (action) {
1928                 case CPU_ONLINE:
1929                         cpufreq_add_dev(dev, NULL);
1930                         break;
1931                 case CPU_DOWN_PREPARE:
1932                 case CPU_UP_CANCELED_FROZEN:
1933                         __cpufreq_remove_dev(dev, NULL);
1934                         break;
1935                 case CPU_DOWN_FAILED:
1936                         cpufreq_add_dev(dev, NULL);
1937                         break;
1938                 }
1939         }
1940         return NOTIFY_OK;
1941 }
1942
1943 static struct notifier_block __refdata cpufreq_cpu_notifier = {
1944         .notifier_call = cpufreq_cpu_callback,
1945 };
1946
1947 /*********************************************************************
1948  *               REGISTER / UNREGISTER CPUFREQ DRIVER                *
1949  *********************************************************************/
1950
1951 /**
1952  * cpufreq_register_driver - register a CPU Frequency driver
1953  * @driver_data: A struct cpufreq_driver containing the values#
1954  * submitted by the CPU Frequency driver.
1955  *
1956  * Registers a CPU Frequency driver to this core code. This code
1957  * returns zero on success, -EBUSY when another driver got here first
1958  * (and isn't unregistered in the meantime).
1959  *
1960  */
1961 int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1962 {
1963         unsigned long flags;
1964         int ret;
1965
1966         if (cpufreq_disabled())
1967                 return -ENODEV;
1968
1969         if (!driver_data || !driver_data->verify || !driver_data->init ||
1970             ((!driver_data->setpolicy) && (!driver_data->target)))
1971                 return -EINVAL;
1972
1973         pr_debug("trying to register driver %s\n", driver_data->name);
1974
1975         if (driver_data->setpolicy)
1976                 driver_data->flags |= CPUFREQ_CONST_LOOPS;
1977
1978         write_lock_irqsave(&cpufreq_driver_lock, flags);
1979         if (cpufreq_driver) {
1980                 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1981                 return -EBUSY;
1982         }
1983         cpufreq_driver = driver_data;
1984         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1985
1986         ret = subsys_interface_register(&cpufreq_interface);
1987         if (ret)
1988                 goto err_null_driver;
1989
1990         if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) {
1991                 int i;
1992                 ret = -ENODEV;
1993
1994                 /* check for at least one working CPU */
1995                 for (i = 0; i < nr_cpu_ids; i++)
1996                         if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
1997                                 ret = 0;
1998                                 break;
1999                         }
2000
2001                 /* if all ->init() calls failed, unregister */
2002                 if (ret) {
2003                         pr_debug("no CPU initialized for driver %s\n",
2004                                                         driver_data->name);
2005                         goto err_if_unreg;
2006                 }
2007         }
2008
2009         register_hotcpu_notifier(&cpufreq_cpu_notifier);
2010         pr_debug("driver %s up and running\n", driver_data->name);
2011
2012         return 0;
2013 err_if_unreg:
2014         subsys_interface_unregister(&cpufreq_interface);
2015 err_null_driver:
2016         write_lock_irqsave(&cpufreq_driver_lock, flags);
2017         cpufreq_driver = NULL;
2018         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2019         return ret;
2020 }
2021 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2022
2023 /**
2024  * cpufreq_unregister_driver - unregister the current CPUFreq driver
2025  *
2026  * Unregister the current CPUFreq driver. Only call this if you have
2027  * the right to do so, i.e. if you have succeeded in initialising before!
2028  * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2029  * currently not initialised.
2030  */
2031 int cpufreq_unregister_driver(struct cpufreq_driver *driver)
2032 {
2033         unsigned long flags;
2034
2035         if (!cpufreq_driver || (driver != cpufreq_driver))
2036                 return -EINVAL;
2037
2038         pr_debug("unregistering driver %s\n", driver->name);
2039
2040         subsys_interface_unregister(&cpufreq_interface);
2041         unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
2042
2043         write_lock_irqsave(&cpufreq_driver_lock, flags);
2044         cpufreq_driver = NULL;
2045         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2046
2047         return 0;
2048 }
2049 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
2050
2051 static int __init cpufreq_core_init(void)
2052 {
2053         int cpu;
2054
2055         if (cpufreq_disabled())
2056                 return -ENODEV;
2057
2058         for_each_possible_cpu(cpu) {
2059                 per_cpu(cpufreq_policy_cpu, cpu) = -1;
2060                 init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
2061         }
2062
2063         cpufreq_global_kobject = kobject_create();
2064         BUG_ON(!cpufreq_global_kobject);
2065         register_syscore_ops(&cpufreq_syscore_ops);
2066
2067         return 0;
2068 }
2069 core_initcall(cpufreq_core_init);