]> rtime.felk.cvut.cz Git - linux-imx.git/blob - drivers/cpufreq/cpufreq.c
cpufreq: Don't create empty /sys/devices/system/cpu/cpufreq directory
[linux-imx.git] / drivers / cpufreq / cpufreq.c
1 /*
2  *  linux/drivers/cpufreq/cpufreq.c
3  *
4  *  Copyright (C) 2001 Russell King
5  *            (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6  *
7  *  Oct 2005 - Ashok Raj <ashok.raj@intel.com>
8  *      Added handling for CPU hotplug
9  *  Feb 2006 - Jacob Shin <jacob.shin@amd.com>
10  *      Fix handling for CPU hotplug -- affected CPUs
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License version 2 as
14  * published by the Free Software Foundation.
15  *
16  */
17
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
20 #include <asm/cputime.h>
21 #include <linux/kernel.h>
22 #include <linux/kernel_stat.h>
23 #include <linux/module.h>
24 #include <linux/init.h>
25 #include <linux/notifier.h>
26 #include <linux/cpufreq.h>
27 #include <linux/delay.h>
28 #include <linux/interrupt.h>
29 #include <linux/spinlock.h>
30 #include <linux/tick.h>
31 #include <linux/device.h>
32 #include <linux/slab.h>
33 #include <linux/cpu.h>
34 #include <linux/completion.h>
35 #include <linux/mutex.h>
36 #include <linux/syscore_ops.h>
37
38 #include <trace/events/power.h>
39
40 /**
41  * The "cpufreq driver" - the arch- or hardware-dependent low
42  * level driver of CPUFreq support, and its spinlock. This lock
43  * also protects the cpufreq_cpu_data array.
44  */
45 static struct cpufreq_driver *cpufreq_driver;
46 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
47 #ifdef CONFIG_HOTPLUG_CPU
48 /* This one keeps track of the previously set governor of a removed CPU */
49 static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
50 #endif
51 static DEFINE_RWLOCK(cpufreq_driver_lock);
52
53 /*
54  * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
55  * all cpufreq/hotplug/workqueue/etc related lock issues.
56  *
57  * The rules for this semaphore:
58  * - Any routine that wants to read from the policy structure will
59  *   do a down_read on this semaphore.
60  * - Any routine that will write to the policy structure and/or may take away
61  *   the policy altogether (eg. CPU hotplug), will hold this lock in write
62  *   mode before doing so.
63  *
64  * Additional rules:
65  * - Governor routines that can be called in cpufreq hotplug path should not
66  *   take this sem as top level hotplug notifier handler takes this.
67  * - Lock should not be held across
68  *     __cpufreq_governor(data, CPUFREQ_GOV_STOP);
69  */
70 static DEFINE_PER_CPU(int, cpufreq_policy_cpu);
71 static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
72
73 #define lock_policy_rwsem(mode, cpu)                                    \
74 static int lock_policy_rwsem_##mode(int cpu)                            \
75 {                                                                       \
76         int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu);              \
77         BUG_ON(policy_cpu == -1);                                       \
78         down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu));            \
79                                                                         \
80         return 0;                                                       \
81 }
82
83 lock_policy_rwsem(read, cpu);
84 lock_policy_rwsem(write, cpu);
85
86 #define unlock_policy_rwsem(mode, cpu)                                  \
87 static void unlock_policy_rwsem_##mode(int cpu)                         \
88 {                                                                       \
89         int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu);              \
90         BUG_ON(policy_cpu == -1);                                       \
91         up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu));              \
92 }
93
94 unlock_policy_rwsem(read, cpu);
95 unlock_policy_rwsem(write, cpu);
96
97 /* internal prototypes */
98 static int __cpufreq_governor(struct cpufreq_policy *policy,
99                 unsigned int event);
100 static unsigned int __cpufreq_get(unsigned int cpu);
101 static void handle_update(struct work_struct *work);
102
103 /**
104  * Two notifier lists: the "policy" list is involved in the
105  * validation process for a new CPU frequency policy; the
106  * "transition" list for kernel code that needs to handle
107  * changes to devices when the CPU clock speed changes.
108  * The mutex locks both lists.
109  */
110 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
111 static struct srcu_notifier_head cpufreq_transition_notifier_list;
112
113 static bool init_cpufreq_transition_notifier_list_called;
114 static int __init init_cpufreq_transition_notifier_list(void)
115 {
116         srcu_init_notifier_head(&cpufreq_transition_notifier_list);
117         init_cpufreq_transition_notifier_list_called = true;
118         return 0;
119 }
120 pure_initcall(init_cpufreq_transition_notifier_list);
121
122 static int off __read_mostly;
123 static int cpufreq_disabled(void)
124 {
125         return off;
126 }
127 void disable_cpufreq(void)
128 {
129         off = 1;
130 }
131 static LIST_HEAD(cpufreq_governor_list);
132 static DEFINE_MUTEX(cpufreq_governor_mutex);
133
134 bool have_governor_per_policy(void)
135 {
136         return cpufreq_driver->have_governor_per_policy;
137 }
138 EXPORT_SYMBOL_GPL(have_governor_per_policy);
139
140 struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
141 {
142         if (have_governor_per_policy())
143                 return &policy->kobj;
144         else
145                 return cpufreq_global_kobject;
146 }
147 EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
148
149 static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
150 {
151         u64 idle_time;
152         u64 cur_wall_time;
153         u64 busy_time;
154
155         cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
156
157         busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
158         busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
159         busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
160         busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
161         busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
162         busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
163
164         idle_time = cur_wall_time - busy_time;
165         if (wall)
166                 *wall = cputime_to_usecs(cur_wall_time);
167
168         return cputime_to_usecs(idle_time);
169 }
170
171 u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
172 {
173         u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
174
175         if (idle_time == -1ULL)
176                 return get_cpu_idle_time_jiffy(cpu, wall);
177         else if (!io_busy)
178                 idle_time += get_cpu_iowait_time_us(cpu, wall);
179
180         return idle_time;
181 }
182 EXPORT_SYMBOL_GPL(get_cpu_idle_time);
183
184 static struct cpufreq_policy *__cpufreq_cpu_get(unsigned int cpu, bool sysfs)
185 {
186         struct cpufreq_policy *data;
187         unsigned long flags;
188
189         if (cpu >= nr_cpu_ids)
190                 goto err_out;
191
192         /* get the cpufreq driver */
193         read_lock_irqsave(&cpufreq_driver_lock, flags);
194
195         if (!cpufreq_driver)
196                 goto err_out_unlock;
197
198         if (!try_module_get(cpufreq_driver->owner))
199                 goto err_out_unlock;
200
201
202         /* get the CPU */
203         data = per_cpu(cpufreq_cpu_data, cpu);
204
205         if (!data)
206                 goto err_out_put_module;
207
208         if (!sysfs && !kobject_get(&data->kobj))
209                 goto err_out_put_module;
210
211         read_unlock_irqrestore(&cpufreq_driver_lock, flags);
212         return data;
213
214 err_out_put_module:
215         module_put(cpufreq_driver->owner);
216 err_out_unlock:
217         read_unlock_irqrestore(&cpufreq_driver_lock, flags);
218 err_out:
219         return NULL;
220 }
221
222 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
223 {
224         if (cpufreq_disabled())
225                 return NULL;
226
227         return __cpufreq_cpu_get(cpu, false);
228 }
229 EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
230
231 static struct cpufreq_policy *cpufreq_cpu_get_sysfs(unsigned int cpu)
232 {
233         return __cpufreq_cpu_get(cpu, true);
234 }
235
236 static void __cpufreq_cpu_put(struct cpufreq_policy *data, bool sysfs)
237 {
238         if (!sysfs)
239                 kobject_put(&data->kobj);
240         module_put(cpufreq_driver->owner);
241 }
242
243 void cpufreq_cpu_put(struct cpufreq_policy *data)
244 {
245         if (cpufreq_disabled())
246                 return;
247
248         __cpufreq_cpu_put(data, false);
249 }
250 EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
251
252 static void cpufreq_cpu_put_sysfs(struct cpufreq_policy *data)
253 {
254         __cpufreq_cpu_put(data, true);
255 }
256
257 /*********************************************************************
258  *            EXTERNALLY AFFECTING FREQUENCY CHANGES                 *
259  *********************************************************************/
260
261 /**
262  * adjust_jiffies - adjust the system "loops_per_jiffy"
263  *
264  * This function alters the system "loops_per_jiffy" for the clock
265  * speed change. Note that loops_per_jiffy cannot be updated on SMP
266  * systems as each CPU might be scaled differently. So, use the arch
267  * per-CPU loops_per_jiffy value wherever possible.
268  */
269 #ifndef CONFIG_SMP
270 static unsigned long l_p_j_ref;
271 static unsigned int  l_p_j_ref_freq;
272
273 static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
274 {
275         if (ci->flags & CPUFREQ_CONST_LOOPS)
276                 return;
277
278         if (!l_p_j_ref_freq) {
279                 l_p_j_ref = loops_per_jiffy;
280                 l_p_j_ref_freq = ci->old;
281                 pr_debug("saving %lu as reference value for loops_per_jiffy; "
282                         "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
283         }
284         if ((val == CPUFREQ_POSTCHANGE  && ci->old != ci->new) ||
285             (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
286                 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
287                                                                 ci->new);
288                 pr_debug("scaling loops_per_jiffy to %lu "
289                         "for frequency %u kHz\n", loops_per_jiffy, ci->new);
290         }
291 }
292 #else
293 static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
294 {
295         return;
296 }
297 #endif
298
299
300 void __cpufreq_notify_transition(struct cpufreq_policy *policy,
301                 struct cpufreq_freqs *freqs, unsigned int state)
302 {
303         BUG_ON(irqs_disabled());
304
305         if (cpufreq_disabled())
306                 return;
307
308         freqs->flags = cpufreq_driver->flags;
309         pr_debug("notification %u of frequency transition to %u kHz\n",
310                 state, freqs->new);
311
312         switch (state) {
313
314         case CPUFREQ_PRECHANGE:
315                 /* detect if the driver reported a value as "old frequency"
316                  * which is not equal to what the cpufreq core thinks is
317                  * "old frequency".
318                  */
319                 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
320                         if ((policy) && (policy->cpu == freqs->cpu) &&
321                             (policy->cur) && (policy->cur != freqs->old)) {
322                                 pr_debug("Warning: CPU frequency is"
323                                         " %u, cpufreq assumed %u kHz.\n",
324                                         freqs->old, policy->cur);
325                                 freqs->old = policy->cur;
326                         }
327                 }
328                 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
329                                 CPUFREQ_PRECHANGE, freqs);
330                 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
331                 break;
332
333         case CPUFREQ_POSTCHANGE:
334                 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
335                 pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new,
336                         (unsigned long)freqs->cpu);
337                 trace_cpu_frequency(freqs->new, freqs->cpu);
338                 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
339                                 CPUFREQ_POSTCHANGE, freqs);
340                 if (likely(policy) && likely(policy->cpu == freqs->cpu))
341                         policy->cur = freqs->new;
342                 break;
343         }
344 }
345 /**
346  * cpufreq_notify_transition - call notifier chain and adjust_jiffies
347  * on frequency transition.
348  *
349  * This function calls the transition notifiers and the "adjust_jiffies"
350  * function. It is called twice on all CPU frequency changes that have
351  * external effects.
352  */
353 void cpufreq_notify_transition(struct cpufreq_policy *policy,
354                 struct cpufreq_freqs *freqs, unsigned int state)
355 {
356         for_each_cpu(freqs->cpu, policy->cpus)
357                 __cpufreq_notify_transition(policy, freqs, state);
358 }
359 EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
360
361
362
363 /*********************************************************************
364  *                          SYSFS INTERFACE                          *
365  *********************************************************************/
366
367 static struct cpufreq_governor *__find_governor(const char *str_governor)
368 {
369         struct cpufreq_governor *t;
370
371         list_for_each_entry(t, &cpufreq_governor_list, governor_list)
372                 if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN))
373                         return t;
374
375         return NULL;
376 }
377
378 /**
379  * cpufreq_parse_governor - parse a governor string
380  */
381 static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
382                                 struct cpufreq_governor **governor)
383 {
384         int err = -EINVAL;
385
386         if (!cpufreq_driver)
387                 goto out;
388
389         if (cpufreq_driver->setpolicy) {
390                 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
391                         *policy = CPUFREQ_POLICY_PERFORMANCE;
392                         err = 0;
393                 } else if (!strnicmp(str_governor, "powersave",
394                                                 CPUFREQ_NAME_LEN)) {
395                         *policy = CPUFREQ_POLICY_POWERSAVE;
396                         err = 0;
397                 }
398         } else if (cpufreq_driver->target) {
399                 struct cpufreq_governor *t;
400
401                 mutex_lock(&cpufreq_governor_mutex);
402
403                 t = __find_governor(str_governor);
404
405                 if (t == NULL) {
406                         int ret;
407
408                         mutex_unlock(&cpufreq_governor_mutex);
409                         ret = request_module("cpufreq_%s", str_governor);
410                         mutex_lock(&cpufreq_governor_mutex);
411
412                         if (ret == 0)
413                                 t = __find_governor(str_governor);
414                 }
415
416                 if (t != NULL) {
417                         *governor = t;
418                         err = 0;
419                 }
420
421                 mutex_unlock(&cpufreq_governor_mutex);
422         }
423 out:
424         return err;
425 }
426
427
428 /**
429  * cpufreq_per_cpu_attr_read() / show_##file_name() -
430  * print out cpufreq information
431  *
432  * Write out information from cpufreq_driver->policy[cpu]; object must be
433  * "unsigned int".
434  */
435
436 #define show_one(file_name, object)                     \
437 static ssize_t show_##file_name                         \
438 (struct cpufreq_policy *policy, char *buf)              \
439 {                                                       \
440         return sprintf(buf, "%u\n", policy->object);    \
441 }
442
443 show_one(cpuinfo_min_freq, cpuinfo.min_freq);
444 show_one(cpuinfo_max_freq, cpuinfo.max_freq);
445 show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
446 show_one(scaling_min_freq, min);
447 show_one(scaling_max_freq, max);
448 show_one(scaling_cur_freq, cur);
449
450 static int __cpufreq_set_policy(struct cpufreq_policy *data,
451                                 struct cpufreq_policy *policy);
452
453 /**
454  * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
455  */
456 #define store_one(file_name, object)                    \
457 static ssize_t store_##file_name                                        \
458 (struct cpufreq_policy *policy, const char *buf, size_t count)          \
459 {                                                                       \
460         unsigned int ret;                                               \
461         struct cpufreq_policy new_policy;                               \
462                                                                         \
463         ret = cpufreq_get_policy(&new_policy, policy->cpu);             \
464         if (ret)                                                        \
465                 return -EINVAL;                                         \
466                                                                         \
467         ret = sscanf(buf, "%u", &new_policy.object);                    \
468         if (ret != 1)                                                   \
469                 return -EINVAL;                                         \
470                                                                         \
471         ret = __cpufreq_set_policy(policy, &new_policy);                \
472         policy->user_policy.object = policy->object;                    \
473                                                                         \
474         return ret ? ret : count;                                       \
475 }
476
477 store_one(scaling_min_freq, min);
478 store_one(scaling_max_freq, max);
479
480 /**
481  * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
482  */
483 static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
484                                         char *buf)
485 {
486         unsigned int cur_freq = __cpufreq_get(policy->cpu);
487         if (!cur_freq)
488                 return sprintf(buf, "<unknown>");
489         return sprintf(buf, "%u\n", cur_freq);
490 }
491
492
493 /**
494  * show_scaling_governor - show the current policy for the specified CPU
495  */
496 static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
497 {
498         if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
499                 return sprintf(buf, "powersave\n");
500         else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
501                 return sprintf(buf, "performance\n");
502         else if (policy->governor)
503                 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
504                                 policy->governor->name);
505         return -EINVAL;
506 }
507
508
509 /**
510  * store_scaling_governor - store policy for the specified CPU
511  */
512 static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
513                                         const char *buf, size_t count)
514 {
515         unsigned int ret;
516         char    str_governor[16];
517         struct cpufreq_policy new_policy;
518
519         ret = cpufreq_get_policy(&new_policy, policy->cpu);
520         if (ret)
521                 return ret;
522
523         ret = sscanf(buf, "%15s", str_governor);
524         if (ret != 1)
525                 return -EINVAL;
526
527         if (cpufreq_parse_governor(str_governor, &new_policy.policy,
528                                                 &new_policy.governor))
529                 return -EINVAL;
530
531         /* Do not use cpufreq_set_policy here or the user_policy.max
532            will be wrongly overridden */
533         ret = __cpufreq_set_policy(policy, &new_policy);
534
535         policy->user_policy.policy = policy->policy;
536         policy->user_policy.governor = policy->governor;
537
538         if (ret)
539                 return ret;
540         else
541                 return count;
542 }
543
544 /**
545  * show_scaling_driver - show the cpufreq driver currently loaded
546  */
547 static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
548 {
549         return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
550 }
551
552 /**
553  * show_scaling_available_governors - show the available CPUfreq governors
554  */
555 static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
556                                                 char *buf)
557 {
558         ssize_t i = 0;
559         struct cpufreq_governor *t;
560
561         if (!cpufreq_driver->target) {
562                 i += sprintf(buf, "performance powersave");
563                 goto out;
564         }
565
566         list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
567                 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
568                     - (CPUFREQ_NAME_LEN + 2)))
569                         goto out;
570                 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
571         }
572 out:
573         i += sprintf(&buf[i], "\n");
574         return i;
575 }
576
577 static ssize_t show_cpus(const struct cpumask *mask, char *buf)
578 {
579         ssize_t i = 0;
580         unsigned int cpu;
581
582         for_each_cpu(cpu, mask) {
583                 if (i)
584                         i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
585                 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
586                 if (i >= (PAGE_SIZE - 5))
587                         break;
588         }
589         i += sprintf(&buf[i], "\n");
590         return i;
591 }
592
593 /**
594  * show_related_cpus - show the CPUs affected by each transition even if
595  * hw coordination is in use
596  */
597 static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
598 {
599         return show_cpus(policy->related_cpus, buf);
600 }
601
602 /**
603  * show_affected_cpus - show the CPUs affected by each transition
604  */
605 static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
606 {
607         return show_cpus(policy->cpus, buf);
608 }
609
610 static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
611                                         const char *buf, size_t count)
612 {
613         unsigned int freq = 0;
614         unsigned int ret;
615
616         if (!policy->governor || !policy->governor->store_setspeed)
617                 return -EINVAL;
618
619         ret = sscanf(buf, "%u", &freq);
620         if (ret != 1)
621                 return -EINVAL;
622
623         policy->governor->store_setspeed(policy, freq);
624
625         return count;
626 }
627
628 static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
629 {
630         if (!policy->governor || !policy->governor->show_setspeed)
631                 return sprintf(buf, "<unsupported>\n");
632
633         return policy->governor->show_setspeed(policy, buf);
634 }
635
636 /**
637  * show_bios_limit - show the current cpufreq HW/BIOS limitation
638  */
639 static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
640 {
641         unsigned int limit;
642         int ret;
643         if (cpufreq_driver->bios_limit) {
644                 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
645                 if (!ret)
646                         return sprintf(buf, "%u\n", limit);
647         }
648         return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
649 }
650
651 cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
652 cpufreq_freq_attr_ro(cpuinfo_min_freq);
653 cpufreq_freq_attr_ro(cpuinfo_max_freq);
654 cpufreq_freq_attr_ro(cpuinfo_transition_latency);
655 cpufreq_freq_attr_ro(scaling_available_governors);
656 cpufreq_freq_attr_ro(scaling_driver);
657 cpufreq_freq_attr_ro(scaling_cur_freq);
658 cpufreq_freq_attr_ro(bios_limit);
659 cpufreq_freq_attr_ro(related_cpus);
660 cpufreq_freq_attr_ro(affected_cpus);
661 cpufreq_freq_attr_rw(scaling_min_freq);
662 cpufreq_freq_attr_rw(scaling_max_freq);
663 cpufreq_freq_attr_rw(scaling_governor);
664 cpufreq_freq_attr_rw(scaling_setspeed);
665
666 static struct attribute *default_attrs[] = {
667         &cpuinfo_min_freq.attr,
668         &cpuinfo_max_freq.attr,
669         &cpuinfo_transition_latency.attr,
670         &scaling_min_freq.attr,
671         &scaling_max_freq.attr,
672         &affected_cpus.attr,
673         &related_cpus.attr,
674         &scaling_governor.attr,
675         &scaling_driver.attr,
676         &scaling_available_governors.attr,
677         &scaling_setspeed.attr,
678         NULL
679 };
680
681 #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
682 #define to_attr(a) container_of(a, struct freq_attr, attr)
683
684 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
685 {
686         struct cpufreq_policy *policy = to_policy(kobj);
687         struct freq_attr *fattr = to_attr(attr);
688         ssize_t ret = -EINVAL;
689         policy = cpufreq_cpu_get_sysfs(policy->cpu);
690         if (!policy)
691                 goto no_policy;
692
693         if (lock_policy_rwsem_read(policy->cpu) < 0)
694                 goto fail;
695
696         if (fattr->show)
697                 ret = fattr->show(policy, buf);
698         else
699                 ret = -EIO;
700
701         unlock_policy_rwsem_read(policy->cpu);
702 fail:
703         cpufreq_cpu_put_sysfs(policy);
704 no_policy:
705         return ret;
706 }
707
708 static ssize_t store(struct kobject *kobj, struct attribute *attr,
709                      const char *buf, size_t count)
710 {
711         struct cpufreq_policy *policy = to_policy(kobj);
712         struct freq_attr *fattr = to_attr(attr);
713         ssize_t ret = -EINVAL;
714         policy = cpufreq_cpu_get_sysfs(policy->cpu);
715         if (!policy)
716                 goto no_policy;
717
718         if (lock_policy_rwsem_write(policy->cpu) < 0)
719                 goto fail;
720
721         if (fattr->store)
722                 ret = fattr->store(policy, buf, count);
723         else
724                 ret = -EIO;
725
726         unlock_policy_rwsem_write(policy->cpu);
727 fail:
728         cpufreq_cpu_put_sysfs(policy);
729 no_policy:
730         return ret;
731 }
732
733 static void cpufreq_sysfs_release(struct kobject *kobj)
734 {
735         struct cpufreq_policy *policy = to_policy(kobj);
736         pr_debug("last reference is dropped\n");
737         complete(&policy->kobj_unregister);
738 }
739
740 static const struct sysfs_ops sysfs_ops = {
741         .show   = show,
742         .store  = store,
743 };
744
745 static struct kobj_type ktype_cpufreq = {
746         .sysfs_ops      = &sysfs_ops,
747         .default_attrs  = default_attrs,
748         .release        = cpufreq_sysfs_release,
749 };
750
751 struct kobject *cpufreq_global_kobject;
752 EXPORT_SYMBOL(cpufreq_global_kobject);
753
754 static int cpufreq_global_kobject_usage;
755
756 int cpufreq_get_global_kobject(void)
757 {
758         if (!cpufreq_global_kobject_usage++)
759                 return kobject_add(cpufreq_global_kobject,
760                                 &cpu_subsys.dev_root->kobj, "%s", "cpufreq");
761
762         return 0;
763 }
764 EXPORT_SYMBOL(cpufreq_get_global_kobject);
765
766 void cpufreq_put_global_kobject(void)
767 {
768         if (!--cpufreq_global_kobject_usage)
769                 kobject_del(cpufreq_global_kobject);
770 }
771 EXPORT_SYMBOL(cpufreq_put_global_kobject);
772
773 int cpufreq_sysfs_create_file(const struct attribute *attr)
774 {
775         int ret = cpufreq_get_global_kobject();
776
777         if (!ret) {
778                 ret = sysfs_create_file(cpufreq_global_kobject, attr);
779                 if (ret)
780                         cpufreq_put_global_kobject();
781         }
782
783         return ret;
784 }
785 EXPORT_SYMBOL(cpufreq_sysfs_create_file);
786
787 void cpufreq_sysfs_remove_file(const struct attribute *attr)
788 {
789         sysfs_remove_file(cpufreq_global_kobject, attr);
790         cpufreq_put_global_kobject();
791 }
792 EXPORT_SYMBOL(cpufreq_sysfs_remove_file);
793
794 /* symlink affected CPUs */
795 static int cpufreq_add_dev_symlink(unsigned int cpu,
796                                    struct cpufreq_policy *policy)
797 {
798         unsigned int j;
799         int ret = 0;
800
801         for_each_cpu(j, policy->cpus) {
802                 struct cpufreq_policy *managed_policy;
803                 struct device *cpu_dev;
804
805                 if (j == cpu)
806                         continue;
807
808                 pr_debug("CPU %u already managed, adding link\n", j);
809                 managed_policy = cpufreq_cpu_get(cpu);
810                 cpu_dev = get_cpu_device(j);
811                 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
812                                         "cpufreq");
813                 if (ret) {
814                         cpufreq_cpu_put(managed_policy);
815                         return ret;
816                 }
817         }
818         return ret;
819 }
820
821 static int cpufreq_add_dev_interface(unsigned int cpu,
822                                      struct cpufreq_policy *policy,
823                                      struct device *dev)
824 {
825         struct cpufreq_policy new_policy;
826         struct freq_attr **drv_attr;
827         unsigned long flags;
828         int ret = 0;
829         unsigned int j;
830
831         /* prepare interface data */
832         ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
833                                    &dev->kobj, "cpufreq");
834         if (ret)
835                 return ret;
836
837         /* set up files for this cpu device */
838         drv_attr = cpufreq_driver->attr;
839         while ((drv_attr) && (*drv_attr)) {
840                 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
841                 if (ret)
842                         goto err_out_kobj_put;
843                 drv_attr++;
844         }
845         if (cpufreq_driver->get) {
846                 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
847                 if (ret)
848                         goto err_out_kobj_put;
849         }
850         if (cpufreq_driver->target) {
851                 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
852                 if (ret)
853                         goto err_out_kobj_put;
854         }
855         if (cpufreq_driver->bios_limit) {
856                 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
857                 if (ret)
858                         goto err_out_kobj_put;
859         }
860
861         write_lock_irqsave(&cpufreq_driver_lock, flags);
862         for_each_cpu(j, policy->cpus) {
863                 per_cpu(cpufreq_cpu_data, j) = policy;
864                 per_cpu(cpufreq_policy_cpu, j) = policy->cpu;
865         }
866         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
867
868         ret = cpufreq_add_dev_symlink(cpu, policy);
869         if (ret)
870                 goto err_out_kobj_put;
871
872         memcpy(&new_policy, policy, sizeof(struct cpufreq_policy));
873         /* assure that the starting sequence is run in __cpufreq_set_policy */
874         policy->governor = NULL;
875
876         /* set default policy */
877         ret = __cpufreq_set_policy(policy, &new_policy);
878         policy->user_policy.policy = policy->policy;
879         policy->user_policy.governor = policy->governor;
880
881         if (ret) {
882                 pr_debug("setting policy failed\n");
883                 if (cpufreq_driver->exit)
884                         cpufreq_driver->exit(policy);
885         }
886         return ret;
887
888 err_out_kobj_put:
889         kobject_put(&policy->kobj);
890         wait_for_completion(&policy->kobj_unregister);
891         return ret;
892 }
893
894 #ifdef CONFIG_HOTPLUG_CPU
895 static int cpufreq_add_policy_cpu(unsigned int cpu, unsigned int sibling,
896                                   struct device *dev)
897 {
898         struct cpufreq_policy *policy;
899         int ret = 0, has_target = !!cpufreq_driver->target;
900         unsigned long flags;
901
902         policy = cpufreq_cpu_get(sibling);
903         WARN_ON(!policy);
904
905         if (has_target)
906                 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
907
908         lock_policy_rwsem_write(sibling);
909
910         write_lock_irqsave(&cpufreq_driver_lock, flags);
911
912         cpumask_set_cpu(cpu, policy->cpus);
913         per_cpu(cpufreq_policy_cpu, cpu) = policy->cpu;
914         per_cpu(cpufreq_cpu_data, cpu) = policy;
915         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
916
917         unlock_policy_rwsem_write(sibling);
918
919         if (has_target) {
920                 __cpufreq_governor(policy, CPUFREQ_GOV_START);
921                 __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
922         }
923
924         ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
925         if (ret) {
926                 cpufreq_cpu_put(policy);
927                 return ret;
928         }
929
930         return 0;
931 }
932 #endif
933
934 /**
935  * cpufreq_add_dev - add a CPU device
936  *
937  * Adds the cpufreq interface for a CPU device.
938  *
939  * The Oracle says: try running cpufreq registration/unregistration concurrently
940  * with with cpu hotplugging and all hell will break loose. Tried to clean this
941  * mess up, but more thorough testing is needed. - Mathieu
942  */
943 static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
944 {
945         unsigned int j, cpu = dev->id;
946         int ret = -ENOMEM;
947         struct cpufreq_policy *policy;
948         unsigned long flags;
949 #ifdef CONFIG_HOTPLUG_CPU
950         struct cpufreq_governor *gov;
951         int sibling;
952 #endif
953
954         if (cpu_is_offline(cpu))
955                 return 0;
956
957         pr_debug("adding CPU %u\n", cpu);
958
959 #ifdef CONFIG_SMP
960         /* check whether a different CPU already registered this
961          * CPU because it is in the same boat. */
962         policy = cpufreq_cpu_get(cpu);
963         if (unlikely(policy)) {
964                 cpufreq_cpu_put(policy);
965                 return 0;
966         }
967
968 #ifdef CONFIG_HOTPLUG_CPU
969         /* Check if this cpu was hot-unplugged earlier and has siblings */
970         read_lock_irqsave(&cpufreq_driver_lock, flags);
971         for_each_online_cpu(sibling) {
972                 struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling);
973                 if (cp && cpumask_test_cpu(cpu, cp->related_cpus)) {
974                         read_unlock_irqrestore(&cpufreq_driver_lock, flags);
975                         return cpufreq_add_policy_cpu(cpu, sibling, dev);
976                 }
977         }
978         read_unlock_irqrestore(&cpufreq_driver_lock, flags);
979 #endif
980 #endif
981
982         if (!try_module_get(cpufreq_driver->owner)) {
983                 ret = -EINVAL;
984                 goto module_out;
985         }
986
987         policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL);
988         if (!policy)
989                 goto nomem_out;
990
991         if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
992                 goto err_free_policy;
993
994         if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
995                 goto err_free_cpumask;
996
997         policy->cpu = cpu;
998         policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
999         cpumask_copy(policy->cpus, cpumask_of(cpu));
1000
1001         /* Initially set CPU itself as the policy_cpu */
1002         per_cpu(cpufreq_policy_cpu, cpu) = cpu;
1003
1004         init_completion(&policy->kobj_unregister);
1005         INIT_WORK(&policy->update, handle_update);
1006
1007         /* call driver. From then on the cpufreq must be able
1008          * to accept all calls to ->verify and ->setpolicy for this CPU
1009          */
1010         ret = cpufreq_driver->init(policy);
1011         if (ret) {
1012                 pr_debug("initialization failed\n");
1013                 goto err_set_policy_cpu;
1014         }
1015
1016         /* related cpus should atleast have policy->cpus */
1017         cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
1018
1019         /*
1020          * affected cpus must always be the one, which are online. We aren't
1021          * managing offline cpus here.
1022          */
1023         cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1024
1025         policy->user_policy.min = policy->min;
1026         policy->user_policy.max = policy->max;
1027
1028         blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1029                                      CPUFREQ_START, policy);
1030
1031 #ifdef CONFIG_HOTPLUG_CPU
1032         gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
1033         if (gov) {
1034                 policy->governor = gov;
1035                 pr_debug("Restoring governor %s for cpu %d\n",
1036                        policy->governor->name, cpu);
1037         }
1038 #endif
1039
1040         ret = cpufreq_add_dev_interface(cpu, policy, dev);
1041         if (ret)
1042                 goto err_out_unregister;
1043
1044         kobject_uevent(&policy->kobj, KOBJ_ADD);
1045         module_put(cpufreq_driver->owner);
1046         pr_debug("initialization complete\n");
1047
1048         return 0;
1049
1050 err_out_unregister:
1051         write_lock_irqsave(&cpufreq_driver_lock, flags);
1052         for_each_cpu(j, policy->cpus)
1053                 per_cpu(cpufreq_cpu_data, j) = NULL;
1054         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1055
1056         kobject_put(&policy->kobj);
1057         wait_for_completion(&policy->kobj_unregister);
1058
1059 err_set_policy_cpu:
1060         per_cpu(cpufreq_policy_cpu, cpu) = -1;
1061         free_cpumask_var(policy->related_cpus);
1062 err_free_cpumask:
1063         free_cpumask_var(policy->cpus);
1064 err_free_policy:
1065         kfree(policy);
1066 nomem_out:
1067         module_put(cpufreq_driver->owner);
1068 module_out:
1069         return ret;
1070 }
1071
1072 static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
1073 {
1074         int j;
1075
1076         policy->last_cpu = policy->cpu;
1077         policy->cpu = cpu;
1078
1079         for_each_cpu(j, policy->cpus)
1080                 per_cpu(cpufreq_policy_cpu, j) = cpu;
1081
1082 #ifdef CONFIG_CPU_FREQ_TABLE
1083         cpufreq_frequency_table_update_policy_cpu(policy);
1084 #endif
1085         blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1086                         CPUFREQ_UPDATE_POLICY_CPU, policy);
1087 }
1088
1089 /**
1090  * __cpufreq_remove_dev - remove a CPU device
1091  *
1092  * Removes the cpufreq interface for a CPU device.
1093  * Caller should already have policy_rwsem in write mode for this CPU.
1094  * This routine frees the rwsem before returning.
1095  */
1096 static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1097 {
1098         unsigned int cpu = dev->id, ret, cpus;
1099         unsigned long flags;
1100         struct cpufreq_policy *data;
1101         struct kobject *kobj;
1102         struct completion *cmp;
1103         struct device *cpu_dev;
1104
1105         pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1106
1107         write_lock_irqsave(&cpufreq_driver_lock, flags);
1108
1109         data = per_cpu(cpufreq_cpu_data, cpu);
1110         per_cpu(cpufreq_cpu_data, cpu) = NULL;
1111
1112         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1113
1114         if (!data) {
1115                 pr_debug("%s: No cpu_data found\n", __func__);
1116                 return -EINVAL;
1117         }
1118
1119         if (cpufreq_driver->target)
1120                 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
1121
1122 #ifdef CONFIG_HOTPLUG_CPU
1123         if (!cpufreq_driver->setpolicy)
1124                 strncpy(per_cpu(cpufreq_cpu_governor, cpu),
1125                         data->governor->name, CPUFREQ_NAME_LEN);
1126 #endif
1127
1128         WARN_ON(lock_policy_rwsem_write(cpu));
1129         cpus = cpumask_weight(data->cpus);
1130
1131         if (cpus > 1)
1132                 cpumask_clear_cpu(cpu, data->cpus);
1133         unlock_policy_rwsem_write(cpu);
1134
1135         if (cpu != data->cpu) {
1136                 sysfs_remove_link(&dev->kobj, "cpufreq");
1137         } else if (cpus > 1) {
1138                 /* first sibling now owns the new sysfs dir */
1139                 cpu_dev = get_cpu_device(cpumask_first(data->cpus));
1140                 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
1141                 ret = kobject_move(&data->kobj, &cpu_dev->kobj);
1142                 if (ret) {
1143                         pr_err("%s: Failed to move kobj: %d", __func__, ret);
1144
1145                         WARN_ON(lock_policy_rwsem_write(cpu));
1146                         cpumask_set_cpu(cpu, data->cpus);
1147
1148                         write_lock_irqsave(&cpufreq_driver_lock, flags);
1149                         per_cpu(cpufreq_cpu_data, cpu) = data;
1150                         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1151
1152                         unlock_policy_rwsem_write(cpu);
1153
1154                         ret = sysfs_create_link(&cpu_dev->kobj, &data->kobj,
1155                                         "cpufreq");
1156                         return -EINVAL;
1157                 }
1158
1159                 WARN_ON(lock_policy_rwsem_write(cpu));
1160                 update_policy_cpu(data, cpu_dev->id);
1161                 unlock_policy_rwsem_write(cpu);
1162                 pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
1163                                 __func__, cpu_dev->id, cpu);
1164         }
1165
1166         if ((cpus == 1) && (cpufreq_driver->target))
1167                 __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT);
1168
1169         pr_debug("%s: removing link, cpu: %d\n", __func__, cpu);
1170         cpufreq_cpu_put(data);
1171
1172         /* If cpu is last user of policy, free policy */
1173         if (cpus == 1) {
1174                 lock_policy_rwsem_read(cpu);
1175                 kobj = &data->kobj;
1176                 cmp = &data->kobj_unregister;
1177                 unlock_policy_rwsem_read(cpu);
1178                 kobject_put(kobj);
1179
1180                 /* we need to make sure that the underlying kobj is actually
1181                  * not referenced anymore by anybody before we proceed with
1182                  * unloading.
1183                  */
1184                 pr_debug("waiting for dropping of refcount\n");
1185                 wait_for_completion(cmp);
1186                 pr_debug("wait complete\n");
1187
1188                 if (cpufreq_driver->exit)
1189                         cpufreq_driver->exit(data);
1190
1191                 free_cpumask_var(data->related_cpus);
1192                 free_cpumask_var(data->cpus);
1193                 kfree(data);
1194         } else if (cpufreq_driver->target) {
1195                 __cpufreq_governor(data, CPUFREQ_GOV_START);
1196                 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
1197         }
1198
1199         per_cpu(cpufreq_policy_cpu, cpu) = -1;
1200         return 0;
1201 }
1202
1203
1204 static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1205 {
1206         unsigned int cpu = dev->id;
1207         int retval;
1208
1209         if (cpu_is_offline(cpu))
1210                 return 0;
1211
1212         retval = __cpufreq_remove_dev(dev, sif);
1213         return retval;
1214 }
1215
1216
1217 static void handle_update(struct work_struct *work)
1218 {
1219         struct cpufreq_policy *policy =
1220                 container_of(work, struct cpufreq_policy, update);
1221         unsigned int cpu = policy->cpu;
1222         pr_debug("handle_update for cpu %u called\n", cpu);
1223         cpufreq_update_policy(cpu);
1224 }
1225
1226 /**
1227  *      cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're in deep trouble.
1228  *      @cpu: cpu number
1229  *      @old_freq: CPU frequency the kernel thinks the CPU runs at
1230  *      @new_freq: CPU frequency the CPU actually runs at
1231  *
1232  *      We adjust to current frequency first, and need to clean up later.
1233  *      So either call to cpufreq_update_policy() or schedule handle_update()).
1234  */
1235 static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1236                                 unsigned int new_freq)
1237 {
1238         struct cpufreq_policy *policy;
1239         struct cpufreq_freqs freqs;
1240         unsigned long flags;
1241
1242
1243         pr_debug("Warning: CPU frequency out of sync: cpufreq and timing "
1244                "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
1245
1246         freqs.old = old_freq;
1247         freqs.new = new_freq;
1248
1249         read_lock_irqsave(&cpufreq_driver_lock, flags);
1250         policy = per_cpu(cpufreq_cpu_data, cpu);
1251         read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1252
1253         cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
1254         cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
1255 }
1256
1257
1258 /**
1259  * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1260  * @cpu: CPU number
1261  *
1262  * This is the last known freq, without actually getting it from the driver.
1263  * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1264  */
1265 unsigned int cpufreq_quick_get(unsigned int cpu)
1266 {
1267         struct cpufreq_policy *policy;
1268         unsigned int ret_freq = 0;
1269
1270         if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
1271                 return cpufreq_driver->get(cpu);
1272
1273         policy = cpufreq_cpu_get(cpu);
1274         if (policy) {
1275                 ret_freq = policy->cur;
1276                 cpufreq_cpu_put(policy);
1277         }
1278
1279         return ret_freq;
1280 }
1281 EXPORT_SYMBOL(cpufreq_quick_get);
1282
1283 /**
1284  * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1285  * @cpu: CPU number
1286  *
1287  * Just return the max possible frequency for a given CPU.
1288  */
1289 unsigned int cpufreq_quick_get_max(unsigned int cpu)
1290 {
1291         struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1292         unsigned int ret_freq = 0;
1293
1294         if (policy) {
1295                 ret_freq = policy->max;
1296                 cpufreq_cpu_put(policy);
1297         }
1298
1299         return ret_freq;
1300 }
1301 EXPORT_SYMBOL(cpufreq_quick_get_max);
1302
1303
1304 static unsigned int __cpufreq_get(unsigned int cpu)
1305 {
1306         struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1307         unsigned int ret_freq = 0;
1308
1309         if (!cpufreq_driver->get)
1310                 return ret_freq;
1311
1312         ret_freq = cpufreq_driver->get(cpu);
1313
1314         if (ret_freq && policy->cur &&
1315                 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1316                 /* verify no discrepancy between actual and
1317                                         saved value exists */
1318                 if (unlikely(ret_freq != policy->cur)) {
1319                         cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
1320                         schedule_work(&policy->update);
1321                 }
1322         }
1323
1324         return ret_freq;
1325 }
1326
1327 /**
1328  * cpufreq_get - get the current CPU frequency (in kHz)
1329  * @cpu: CPU number
1330  *
1331  * Get the CPU current (static) CPU frequency
1332  */
1333 unsigned int cpufreq_get(unsigned int cpu)
1334 {
1335         unsigned int ret_freq = 0;
1336         struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1337
1338         if (!policy)
1339                 goto out;
1340
1341         if (unlikely(lock_policy_rwsem_read(cpu)))
1342                 goto out_policy;
1343
1344         ret_freq = __cpufreq_get(cpu);
1345
1346         unlock_policy_rwsem_read(cpu);
1347
1348 out_policy:
1349         cpufreq_cpu_put(policy);
1350 out:
1351         return ret_freq;
1352 }
1353 EXPORT_SYMBOL(cpufreq_get);
1354
1355 static struct subsys_interface cpufreq_interface = {
1356         .name           = "cpufreq",
1357         .subsys         = &cpu_subsys,
1358         .add_dev        = cpufreq_add_dev,
1359         .remove_dev     = cpufreq_remove_dev,
1360 };
1361
1362
1363 /**
1364  * cpufreq_bp_suspend - Prepare the boot CPU for system suspend.
1365  *
1366  * This function is only executed for the boot processor.  The other CPUs
1367  * have been put offline by means of CPU hotplug.
1368  */
1369 static int cpufreq_bp_suspend(void)
1370 {
1371         int ret = 0;
1372
1373         int cpu = smp_processor_id();
1374         struct cpufreq_policy *cpu_policy;
1375
1376         pr_debug("suspending cpu %u\n", cpu);
1377
1378         /* If there's no policy for the boot CPU, we have nothing to do. */
1379         cpu_policy = cpufreq_cpu_get(cpu);
1380         if (!cpu_policy)
1381                 return 0;
1382
1383         if (cpufreq_driver->suspend) {
1384                 ret = cpufreq_driver->suspend(cpu_policy);
1385                 if (ret)
1386                         printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
1387                                         "step on CPU %u\n", cpu_policy->cpu);
1388         }
1389
1390         cpufreq_cpu_put(cpu_policy);
1391         return ret;
1392 }
1393
1394 /**
1395  * cpufreq_bp_resume - Restore proper frequency handling of the boot CPU.
1396  *
1397  *      1.) resume CPUfreq hardware support (cpufreq_driver->resume())
1398  *      2.) schedule call cpufreq_update_policy() ASAP as interrupts are
1399  *          restored. It will verify that the current freq is in sync with
1400  *          what we believe it to be. This is a bit later than when it
1401  *          should be, but nonethteless it's better than calling
1402  *          cpufreq_driver->get() here which might re-enable interrupts...
1403  *
1404  * This function is only executed for the boot CPU.  The other CPUs have not
1405  * been turned on yet.
1406  */
1407 static void cpufreq_bp_resume(void)
1408 {
1409         int ret = 0;
1410
1411         int cpu = smp_processor_id();
1412         struct cpufreq_policy *cpu_policy;
1413
1414         pr_debug("resuming cpu %u\n", cpu);
1415
1416         /* If there's no policy for the boot CPU, we have nothing to do. */
1417         cpu_policy = cpufreq_cpu_get(cpu);
1418         if (!cpu_policy)
1419                 return;
1420
1421         if (cpufreq_driver->resume) {
1422                 ret = cpufreq_driver->resume(cpu_policy);
1423                 if (ret) {
1424                         printk(KERN_ERR "cpufreq: resume failed in ->resume "
1425                                         "step on CPU %u\n", cpu_policy->cpu);
1426                         goto fail;
1427                 }
1428         }
1429
1430         schedule_work(&cpu_policy->update);
1431
1432 fail:
1433         cpufreq_cpu_put(cpu_policy);
1434 }
1435
1436 static struct syscore_ops cpufreq_syscore_ops = {
1437         .suspend        = cpufreq_bp_suspend,
1438         .resume         = cpufreq_bp_resume,
1439 };
1440
1441 /**
1442  *      cpufreq_get_current_driver - return current driver's name
1443  *
1444  *      Return the name string of the currently loaded cpufreq driver
1445  *      or NULL, if none.
1446  */
1447 const char *cpufreq_get_current_driver(void)
1448 {
1449         if (cpufreq_driver)
1450                 return cpufreq_driver->name;
1451
1452         return NULL;
1453 }
1454 EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1455
1456 /*********************************************************************
1457  *                     NOTIFIER LISTS INTERFACE                      *
1458  *********************************************************************/
1459
1460 /**
1461  *      cpufreq_register_notifier - register a driver with cpufreq
1462  *      @nb: notifier function to register
1463  *      @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1464  *
1465  *      Add a driver to one of two lists: either a list of drivers that
1466  *      are notified about clock rate changes (once before and once after
1467  *      the transition), or a list of drivers that are notified about
1468  *      changes in cpufreq policy.
1469  *
1470  *      This function may sleep, and has the same return conditions as
1471  *      blocking_notifier_chain_register.
1472  */
1473 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1474 {
1475         int ret;
1476
1477         if (cpufreq_disabled())
1478                 return -EINVAL;
1479
1480         WARN_ON(!init_cpufreq_transition_notifier_list_called);
1481
1482         switch (list) {
1483         case CPUFREQ_TRANSITION_NOTIFIER:
1484                 ret = srcu_notifier_chain_register(
1485                                 &cpufreq_transition_notifier_list, nb);
1486                 break;
1487         case CPUFREQ_POLICY_NOTIFIER:
1488                 ret = blocking_notifier_chain_register(
1489                                 &cpufreq_policy_notifier_list, nb);
1490                 break;
1491         default:
1492                 ret = -EINVAL;
1493         }
1494
1495         return ret;
1496 }
1497 EXPORT_SYMBOL(cpufreq_register_notifier);
1498
1499
1500 /**
1501  *      cpufreq_unregister_notifier - unregister a driver with cpufreq
1502  *      @nb: notifier block to be unregistered
1503  *      @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1504  *
1505  *      Remove a driver from the CPU frequency notifier list.
1506  *
1507  *      This function may sleep, and has the same return conditions as
1508  *      blocking_notifier_chain_unregister.
1509  */
1510 int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1511 {
1512         int ret;
1513
1514         if (cpufreq_disabled())
1515                 return -EINVAL;
1516
1517         switch (list) {
1518         case CPUFREQ_TRANSITION_NOTIFIER:
1519                 ret = srcu_notifier_chain_unregister(
1520                                 &cpufreq_transition_notifier_list, nb);
1521                 break;
1522         case CPUFREQ_POLICY_NOTIFIER:
1523                 ret = blocking_notifier_chain_unregister(
1524                                 &cpufreq_policy_notifier_list, nb);
1525                 break;
1526         default:
1527                 ret = -EINVAL;
1528         }
1529
1530         return ret;
1531 }
1532 EXPORT_SYMBOL(cpufreq_unregister_notifier);
1533
1534
1535 /*********************************************************************
1536  *                              GOVERNORS                            *
1537  *********************************************************************/
1538
1539
1540 int __cpufreq_driver_target(struct cpufreq_policy *policy,
1541                             unsigned int target_freq,
1542                             unsigned int relation)
1543 {
1544         int retval = -EINVAL;
1545         unsigned int old_target_freq = target_freq;
1546
1547         if (cpufreq_disabled())
1548                 return -ENODEV;
1549
1550         /* Make sure that target_freq is within supported range */
1551         if (target_freq > policy->max)
1552                 target_freq = policy->max;
1553         if (target_freq < policy->min)
1554                 target_freq = policy->min;
1555
1556         pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1557                         policy->cpu, target_freq, relation, old_target_freq);
1558
1559         if (target_freq == policy->cur)
1560                 return 0;
1561
1562         if (cpufreq_driver->target)
1563                 retval = cpufreq_driver->target(policy, target_freq, relation);
1564
1565         return retval;
1566 }
1567 EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1568
1569 int cpufreq_driver_target(struct cpufreq_policy *policy,
1570                           unsigned int target_freq,
1571                           unsigned int relation)
1572 {
1573         int ret = -EINVAL;
1574
1575         policy = cpufreq_cpu_get(policy->cpu);
1576         if (!policy)
1577                 goto no_policy;
1578
1579         if (unlikely(lock_policy_rwsem_write(policy->cpu)))
1580                 goto fail;
1581
1582         ret = __cpufreq_driver_target(policy, target_freq, relation);
1583
1584         unlock_policy_rwsem_write(policy->cpu);
1585
1586 fail:
1587         cpufreq_cpu_put(policy);
1588 no_policy:
1589         return ret;
1590 }
1591 EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1592
1593 int __cpufreq_driver_getavg(struct cpufreq_policy *policy, unsigned int cpu)
1594 {
1595         int ret = 0;
1596
1597         if (cpufreq_disabled())
1598                 return ret;
1599
1600         if (!cpufreq_driver->getavg)
1601                 return 0;
1602
1603         policy = cpufreq_cpu_get(policy->cpu);
1604         if (!policy)
1605                 return -EINVAL;
1606
1607         ret = cpufreq_driver->getavg(policy, cpu);
1608
1609         cpufreq_cpu_put(policy);
1610         return ret;
1611 }
1612 EXPORT_SYMBOL_GPL(__cpufreq_driver_getavg);
1613
1614 /*
1615  * when "event" is CPUFREQ_GOV_LIMITS
1616  */
1617
1618 static int __cpufreq_governor(struct cpufreq_policy *policy,
1619                                         unsigned int event)
1620 {
1621         int ret;
1622
1623         /* Only must be defined when default governor is known to have latency
1624            restrictions, like e.g. conservative or ondemand.
1625            That this is the case is already ensured in Kconfig
1626         */
1627 #ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1628         struct cpufreq_governor *gov = &cpufreq_gov_performance;
1629 #else
1630         struct cpufreq_governor *gov = NULL;
1631 #endif
1632
1633         if (policy->governor->max_transition_latency &&
1634             policy->cpuinfo.transition_latency >
1635             policy->governor->max_transition_latency) {
1636                 if (!gov)
1637                         return -EINVAL;
1638                 else {
1639                         printk(KERN_WARNING "%s governor failed, too long"
1640                                " transition latency of HW, fallback"
1641                                " to %s governor\n",
1642                                policy->governor->name,
1643                                gov->name);
1644                         policy->governor = gov;
1645                 }
1646         }
1647
1648         if (!try_module_get(policy->governor->owner))
1649                 return -EINVAL;
1650
1651         pr_debug("__cpufreq_governor for CPU %u, event %u\n",
1652                                                 policy->cpu, event);
1653         ret = policy->governor->governor(policy, event);
1654
1655         if (!ret) {
1656                 if (event == CPUFREQ_GOV_POLICY_INIT)
1657                         policy->governor->initialized++;
1658                 else if (event == CPUFREQ_GOV_POLICY_EXIT)
1659                         policy->governor->initialized--;
1660         }
1661
1662         /* we keep one module reference alive for
1663                         each CPU governed by this CPU */
1664         if ((event != CPUFREQ_GOV_START) || ret)
1665                 module_put(policy->governor->owner);
1666         if ((event == CPUFREQ_GOV_STOP) && !ret)
1667                 module_put(policy->governor->owner);
1668
1669         return ret;
1670 }
1671
1672
1673 int cpufreq_register_governor(struct cpufreq_governor *governor)
1674 {
1675         int err;
1676
1677         if (!governor)
1678                 return -EINVAL;
1679
1680         if (cpufreq_disabled())
1681                 return -ENODEV;
1682
1683         mutex_lock(&cpufreq_governor_mutex);
1684
1685         governor->initialized = 0;
1686         err = -EBUSY;
1687         if (__find_governor(governor->name) == NULL) {
1688                 err = 0;
1689                 list_add(&governor->governor_list, &cpufreq_governor_list);
1690         }
1691
1692         mutex_unlock(&cpufreq_governor_mutex);
1693         return err;
1694 }
1695 EXPORT_SYMBOL_GPL(cpufreq_register_governor);
1696
1697
1698 void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1699 {
1700 #ifdef CONFIG_HOTPLUG_CPU
1701         int cpu;
1702 #endif
1703
1704         if (!governor)
1705                 return;
1706
1707         if (cpufreq_disabled())
1708                 return;
1709
1710 #ifdef CONFIG_HOTPLUG_CPU
1711         for_each_present_cpu(cpu) {
1712                 if (cpu_online(cpu))
1713                         continue;
1714                 if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
1715                         strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
1716         }
1717 #endif
1718
1719         mutex_lock(&cpufreq_governor_mutex);
1720         list_del(&governor->governor_list);
1721         mutex_unlock(&cpufreq_governor_mutex);
1722         return;
1723 }
1724 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
1725
1726
1727
1728 /*********************************************************************
1729  *                          POLICY INTERFACE                         *
1730  *********************************************************************/
1731
1732 /**
1733  * cpufreq_get_policy - get the current cpufreq_policy
1734  * @policy: struct cpufreq_policy into which the current cpufreq_policy
1735  *      is written
1736  *
1737  * Reads the current cpufreq policy.
1738  */
1739 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
1740 {
1741         struct cpufreq_policy *cpu_policy;
1742         if (!policy)
1743                 return -EINVAL;
1744
1745         cpu_policy = cpufreq_cpu_get(cpu);
1746         if (!cpu_policy)
1747                 return -EINVAL;
1748
1749         memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy));
1750
1751         cpufreq_cpu_put(cpu_policy);
1752         return 0;
1753 }
1754 EXPORT_SYMBOL(cpufreq_get_policy);
1755
1756
1757 /*
1758  * data   : current policy.
1759  * policy : policy to be set.
1760  */
1761 static int __cpufreq_set_policy(struct cpufreq_policy *data,
1762                                 struct cpufreq_policy *policy)
1763 {
1764         int ret = 0, failed = 1;
1765
1766         pr_debug("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu,
1767                 policy->min, policy->max);
1768
1769         memcpy(&policy->cpuinfo, &data->cpuinfo,
1770                                 sizeof(struct cpufreq_cpuinfo));
1771
1772         if (policy->min > data->max || policy->max < data->min) {
1773                 ret = -EINVAL;
1774                 goto error_out;
1775         }
1776
1777         /* verify the cpu speed can be set within this limit */
1778         ret = cpufreq_driver->verify(policy);
1779         if (ret)
1780                 goto error_out;
1781
1782         /* adjust if necessary - all reasons */
1783         blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1784                         CPUFREQ_ADJUST, policy);
1785
1786         /* adjust if necessary - hardware incompatibility*/
1787         blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1788                         CPUFREQ_INCOMPATIBLE, policy);
1789
1790         /* verify the cpu speed can be set within this limit,
1791            which might be different to the first one */
1792         ret = cpufreq_driver->verify(policy);
1793         if (ret)
1794                 goto error_out;
1795
1796         /* notification of the new policy */
1797         blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1798                         CPUFREQ_NOTIFY, policy);
1799
1800         data->min = policy->min;
1801         data->max = policy->max;
1802
1803         pr_debug("new min and max freqs are %u - %u kHz\n",
1804                                         data->min, data->max);
1805
1806         if (cpufreq_driver->setpolicy) {
1807                 data->policy = policy->policy;
1808                 pr_debug("setting range\n");
1809                 ret = cpufreq_driver->setpolicy(policy);
1810         } else {
1811                 if (policy->governor != data->governor) {
1812                         /* save old, working values */
1813                         struct cpufreq_governor *old_gov = data->governor;
1814
1815                         pr_debug("governor switch\n");
1816
1817                         /* end old governor */
1818                         if (data->governor) {
1819                                 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
1820                                 unlock_policy_rwsem_write(policy->cpu);
1821                                 __cpufreq_governor(data,
1822                                                 CPUFREQ_GOV_POLICY_EXIT);
1823                                 lock_policy_rwsem_write(policy->cpu);
1824                         }
1825
1826                         /* start new governor */
1827                         data->governor = policy->governor;
1828                         if (!__cpufreq_governor(data, CPUFREQ_GOV_POLICY_INIT)) {
1829                                 if (!__cpufreq_governor(data, CPUFREQ_GOV_START)) {
1830                                         failed = 0;
1831                                 } else {
1832                                         unlock_policy_rwsem_write(policy->cpu);
1833                                         __cpufreq_governor(data,
1834                                                         CPUFREQ_GOV_POLICY_EXIT);
1835                                         lock_policy_rwsem_write(policy->cpu);
1836                                 }
1837                         }
1838
1839                         if (failed) {
1840                                 /* new governor failed, so re-start old one */
1841                                 pr_debug("starting governor %s failed\n",
1842                                                         data->governor->name);
1843                                 if (old_gov) {
1844                                         data->governor = old_gov;
1845                                         __cpufreq_governor(data,
1846                                                         CPUFREQ_GOV_POLICY_INIT);
1847                                         __cpufreq_governor(data,
1848                                                            CPUFREQ_GOV_START);
1849                                 }
1850                                 ret = -EINVAL;
1851                                 goto error_out;
1852                         }
1853                         /* might be a policy change, too, so fall through */
1854                 }
1855                 pr_debug("governor: change or update limits\n");
1856                 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
1857         }
1858
1859 error_out:
1860         return ret;
1861 }
1862
1863 /**
1864  *      cpufreq_update_policy - re-evaluate an existing cpufreq policy
1865  *      @cpu: CPU which shall be re-evaluated
1866  *
1867  *      Useful for policy notifiers which have different necessities
1868  *      at different times.
1869  */
1870 int cpufreq_update_policy(unsigned int cpu)
1871 {
1872         struct cpufreq_policy *data = cpufreq_cpu_get(cpu);
1873         struct cpufreq_policy policy;
1874         int ret;
1875
1876         if (!data) {
1877                 ret = -ENODEV;
1878                 goto no_policy;
1879         }
1880
1881         if (unlikely(lock_policy_rwsem_write(cpu))) {
1882                 ret = -EINVAL;
1883                 goto fail;
1884         }
1885
1886         pr_debug("updating policy for CPU %u\n", cpu);
1887         memcpy(&policy, data, sizeof(struct cpufreq_policy));
1888         policy.min = data->user_policy.min;
1889         policy.max = data->user_policy.max;
1890         policy.policy = data->user_policy.policy;
1891         policy.governor = data->user_policy.governor;
1892
1893         /* BIOS might change freq behind our back
1894           -> ask driver for current freq and notify governors about a change */
1895         if (cpufreq_driver->get) {
1896                 policy.cur = cpufreq_driver->get(cpu);
1897                 if (!data->cur) {
1898                         pr_debug("Driver did not initialize current freq");
1899                         data->cur = policy.cur;
1900                 } else {
1901                         if (data->cur != policy.cur && cpufreq_driver->target)
1902                                 cpufreq_out_of_sync(cpu, data->cur,
1903                                                                 policy.cur);
1904                 }
1905         }
1906
1907         ret = __cpufreq_set_policy(data, &policy);
1908
1909         unlock_policy_rwsem_write(cpu);
1910
1911 fail:
1912         cpufreq_cpu_put(data);
1913 no_policy:
1914         return ret;
1915 }
1916 EXPORT_SYMBOL(cpufreq_update_policy);
1917
1918 static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
1919                                         unsigned long action, void *hcpu)
1920 {
1921         unsigned int cpu = (unsigned long)hcpu;
1922         struct device *dev;
1923
1924         dev = get_cpu_device(cpu);
1925         if (dev) {
1926                 switch (action) {
1927                 case CPU_ONLINE:
1928                         cpufreq_add_dev(dev, NULL);
1929                         break;
1930                 case CPU_DOWN_PREPARE:
1931                 case CPU_UP_CANCELED_FROZEN:
1932                         __cpufreq_remove_dev(dev, NULL);
1933                         break;
1934                 case CPU_DOWN_FAILED:
1935                         cpufreq_add_dev(dev, NULL);
1936                         break;
1937                 }
1938         }
1939         return NOTIFY_OK;
1940 }
1941
1942 static struct notifier_block __refdata cpufreq_cpu_notifier = {
1943     .notifier_call = cpufreq_cpu_callback,
1944 };
1945
1946 /*********************************************************************
1947  *               REGISTER / UNREGISTER CPUFREQ DRIVER                *
1948  *********************************************************************/
1949
1950 /**
1951  * cpufreq_register_driver - register a CPU Frequency driver
1952  * @driver_data: A struct cpufreq_driver containing the values#
1953  * submitted by the CPU Frequency driver.
1954  *
1955  *   Registers a CPU Frequency driver to this core code. This code
1956  * returns zero on success, -EBUSY when another driver got here first
1957  * (and isn't unregistered in the meantime).
1958  *
1959  */
1960 int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1961 {
1962         unsigned long flags;
1963         int ret;
1964
1965         if (cpufreq_disabled())
1966                 return -ENODEV;
1967
1968         if (!driver_data || !driver_data->verify || !driver_data->init ||
1969             ((!driver_data->setpolicy) && (!driver_data->target)))
1970                 return -EINVAL;
1971
1972         pr_debug("trying to register driver %s\n", driver_data->name);
1973
1974         if (driver_data->setpolicy)
1975                 driver_data->flags |= CPUFREQ_CONST_LOOPS;
1976
1977         write_lock_irqsave(&cpufreq_driver_lock, flags);
1978         if (cpufreq_driver) {
1979                 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1980                 return -EBUSY;
1981         }
1982         cpufreq_driver = driver_data;
1983         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1984
1985         ret = subsys_interface_register(&cpufreq_interface);
1986         if (ret)
1987                 goto err_null_driver;
1988
1989         if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) {
1990                 int i;
1991                 ret = -ENODEV;
1992
1993                 /* check for at least one working CPU */
1994                 for (i = 0; i < nr_cpu_ids; i++)
1995                         if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
1996                                 ret = 0;
1997                                 break;
1998                         }
1999
2000                 /* if all ->init() calls failed, unregister */
2001                 if (ret) {
2002                         pr_debug("no CPU initialized for driver %s\n",
2003                                                         driver_data->name);
2004                         goto err_if_unreg;
2005                 }
2006         }
2007
2008         register_hotcpu_notifier(&cpufreq_cpu_notifier);
2009         pr_debug("driver %s up and running\n", driver_data->name);
2010
2011         return 0;
2012 err_if_unreg:
2013         subsys_interface_unregister(&cpufreq_interface);
2014 err_null_driver:
2015         write_lock_irqsave(&cpufreq_driver_lock, flags);
2016         cpufreq_driver = NULL;
2017         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2018         return ret;
2019 }
2020 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2021
2022
2023 /**
2024  * cpufreq_unregister_driver - unregister the current CPUFreq driver
2025  *
2026  *    Unregister the current CPUFreq driver. Only call this if you have
2027  * the right to do so, i.e. if you have succeeded in initialising before!
2028  * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2029  * currently not initialised.
2030  */
2031 int cpufreq_unregister_driver(struct cpufreq_driver *driver)
2032 {
2033         unsigned long flags;
2034
2035         if (!cpufreq_driver || (driver != cpufreq_driver))
2036                 return -EINVAL;
2037
2038         pr_debug("unregistering driver %s\n", driver->name);
2039
2040         subsys_interface_unregister(&cpufreq_interface);
2041         unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
2042
2043         write_lock_irqsave(&cpufreq_driver_lock, flags);
2044         cpufreq_driver = NULL;
2045         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2046
2047         return 0;
2048 }
2049 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
2050
2051 static int __init cpufreq_core_init(void)
2052 {
2053         int cpu;
2054
2055         if (cpufreq_disabled())
2056                 return -ENODEV;
2057
2058         for_each_possible_cpu(cpu) {
2059                 per_cpu(cpufreq_policy_cpu, cpu) = -1;
2060                 init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
2061         }
2062
2063         cpufreq_global_kobject = kobject_create();
2064         BUG_ON(!cpufreq_global_kobject);
2065         register_syscore_ops(&cpufreq_syscore_ops);
2066
2067         return 0;
2068 }
2069 core_initcall(cpufreq_core_init);