+
+int cpufreq_add_dev_policy(unsigned int cpu, struct cpufreq_policy *policy,
+ struct sys_device *sys_dev)
+{
+ int ret = 0;
+#ifdef CONFIG_SMP
+ unsigned long flags;
+ unsigned int j;
+
+#ifdef CONFIG_HOTPLUG_CPU
+ if (per_cpu(cpufreq_cpu_governor, cpu)) {
+ policy->governor = per_cpu(cpufreq_cpu_governor, cpu);
+ dprintk("Restoring governor %s for cpu %d\n",
+ policy->governor->name, cpu);
+ }
+#endif
+
+ for_each_cpu(j, policy->cpus) {
+ struct cpufreq_policy *managed_policy;
+
+ if (cpu == j)
+ continue;
+
+ /* Check for existing affected CPUs.
+ * They may not be aware of it due to CPU Hotplug.
+ * cpufreq_cpu_put is called when the device is removed
+ * in __cpufreq_remove_dev()
+ */
+ managed_policy = cpufreq_cpu_get(j);
+ if (unlikely(managed_policy)) {
+
+ /* Set proper policy_cpu */
+ unlock_policy_rwsem_write(cpu);
+ per_cpu(policy_cpu, cpu) = managed_policy->cpu;
+
+ if (lock_policy_rwsem_write(cpu) < 0) {
+ /* Should not go through policy unlock path */
+ if (cpufreq_driver->exit)
+ cpufreq_driver->exit(policy);
+ cpufreq_cpu_put(managed_policy);
+ return -EBUSY;
+ }
+
+ spin_lock_irqsave(&cpufreq_driver_lock, flags);
+ cpumask_copy(managed_policy->cpus, policy->cpus);
+ per_cpu(cpufreq_cpu_data, cpu) = managed_policy;
+ spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+
+ dprintk("CPU already managed, adding link\n");
+ ret = sysfs_create_link(&sys_dev->kobj,
+ &managed_policy->kobj,
+ "cpufreq");
+ if (ret)
+ cpufreq_cpu_put(managed_policy);
+ /*
+ * Success. We only needed to be added to the mask.
+ * Call driver->exit() because only the cpu parent of
+ * the kobj needed to call init().
+ */
+ if (cpufreq_driver->exit)
+ cpufreq_driver->exit(policy);
+ return ret;
+ }
+ }
+#endif
+ return ret;
+}
+
+