2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
4 * This code is licenced under the GPL.
6 #include <linux/proc_fs.h>
8 #include <linux/init.h>
9 #include <linux/notifier.h>
10 #include <linux/sched.h>
11 #include <linux/unistd.h>
12 #include <linux/cpu.h>
13 #include <linux/oom.h>
14 #include <linux/rcupdate.h>
15 #include <linux/export.h>
16 #include <linux/bug.h>
17 #include <linux/kthread.h>
18 #include <linux/stop_machine.h>
19 #include <linux/mutex.h>
20 #include <linux/gfp.h>
21 #include <linux/suspend.h>
22 #include <linux/lockdep.h>
23 #include <trace/events/power.h>
28 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
29 static DEFINE_MUTEX(cpu_add_remove_lock);
32 * The following two APIs (cpu_maps_update_begin/done) must be used when
33 * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
34 * The APIs cpu_notifier_register_begin/done() must be used to protect CPU
35 * hotplug callback (un)registration performed using __register_cpu_notifier()
36 * or __unregister_cpu_notifier().
38 void cpu_maps_update_begin(void)
40 mutex_lock(&cpu_add_remove_lock);
42 EXPORT_SYMBOL(cpu_notifier_register_begin);
44 void cpu_maps_update_done(void)
46 mutex_unlock(&cpu_add_remove_lock);
48 EXPORT_SYMBOL(cpu_notifier_register_done);
50 static RAW_NOTIFIER_HEAD(cpu_chain);
52 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
53 * Should always be manipulated under cpu_add_remove_lock
55 static int cpu_hotplug_disabled;
57 #ifdef CONFIG_HOTPLUG_CPU
60 struct task_struct *active_writer;
61 /* wait queue to wake up the active_writer */
63 /* verifies that no writer will get active while readers are active */
66 * Also blocks the new readers during
67 * an ongoing cpu hotplug operation.
71 #ifdef CONFIG_DEBUG_LOCK_ALLOC
72 struct lockdep_map dep_map;
75 .active_writer = NULL,
76 .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
77 .wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq),
78 #ifdef CONFIG_DEBUG_LOCK_ALLOC
79 .dep_map = {.name = "cpu_hotplug.lock" },
83 /* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
84 #define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
85 #define cpuhp_lock_acquire_tryread() \
86 lock_map_acquire_tryread(&cpu_hotplug.dep_map)
87 #define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map)
88 #define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map)
91 * hotplug_pcp - per cpu hotplug descriptor
92 * @unplug: set when pin_current_cpu() needs to sync tasks
93 * @sync_tsk: the task that waits for tasks to finish pinned sections
94 * @refcount: counter of tasks in pinned sections
95 * @grab_lock: set when the tasks entering pinned sections should wait
96 * @synced: notifier for @sync_tsk to tell cpu_down it's finished
97 * @mutex: the mutex to make tasks wait (used when @grab_lock is true)
98 * @mutex_init: zero if the mutex hasn't been initialized yet.
100 * Although @unplug and @sync_tsk may point to the same task, the @unplug
101 * is used as a flag and still exists after @sync_tsk has exited and
102 * @sync_tsk set to NULL.
105 struct task_struct *unplug;
106 struct task_struct *sync_tsk;
109 struct completion synced;
110 struct completion unplug_wait;
111 #ifdef CONFIG_PREEMPT_RT_FULL
113 * Note, on PREEMPT_RT, the hotplug lock must save the state of
114 * the task, otherwise the mutex will cause the task to fail
115 * to sleep when required. (Because it's called from migrate_disable())
117 * The spinlock_t on PREEMPT_RT is a mutex that saves the task's
127 #ifdef CONFIG_PREEMPT_RT_FULL
128 # define hotplug_lock(hp) rt_spin_lock(&(hp)->lock)
129 # define hotplug_unlock(hp) rt_spin_unlock(&(hp)->lock)
131 # define hotplug_lock(hp) mutex_lock(&(hp)->mutex)
132 # define hotplug_unlock(hp) mutex_unlock(&(hp)->mutex)
135 static DEFINE_PER_CPU(struct hotplug_pcp, hotplug_pcp);
138 * pin_current_cpu - Prevent the current cpu from being unplugged
140 * Lightweight version of get_online_cpus() to prevent cpu from being
141 * unplugged when code runs in a migration disabled region.
143 * Must be called with preemption disabled (preempt_count = 1)!
145 void pin_current_cpu(void)
147 struct hotplug_pcp *hp;
151 hp = this_cpu_ptr(&hotplug_pcp);
153 if (!hp->unplug || hp->refcount || force || preempt_count() > 1 ||
154 hp->unplug == current) {
165 * Try to push this task off of this CPU.
169 hp = this_cpu_ptr(&hotplug_pcp);
170 if (!hp->grab_lock) {
172 * Just let it continue it's already pinned
186 * unpin_current_cpu - Allow unplug of current cpu
188 * Must be called with preemption or interrupts disabled!
190 void unpin_current_cpu(void)
192 struct hotplug_pcp *hp = this_cpu_ptr(&hotplug_pcp);
194 WARN_ON(hp->refcount <= 0);
196 /* This is safe. sync_unplug_thread is pinned to this cpu */
197 if (!--hp->refcount && hp->unplug && hp->unplug != current)
198 wake_up_process(hp->unplug);
201 static void wait_for_pinned_cpus(struct hotplug_pcp *hp)
203 set_current_state(TASK_UNINTERRUPTIBLE);
204 while (hp->refcount) {
205 schedule_preempt_disabled();
206 set_current_state(TASK_UNINTERRUPTIBLE);
210 static int sync_unplug_thread(void *data)
212 struct hotplug_pcp *hp = data;
214 wait_for_completion(&hp->unplug_wait);
216 hp->unplug = current;
217 wait_for_pinned_cpus(hp);
220 * This thread will synchronize the cpu_down() with threads
221 * that have pinned the CPU. When the pinned CPU count reaches
222 * zero, we inform the cpu_down code to continue to the next step.
224 set_current_state(TASK_UNINTERRUPTIBLE);
226 complete(&hp->synced);
229 * If all succeeds, the next step will need tasks to wait till
230 * the CPU is offline before continuing. To do this, the grab_lock
231 * is set and tasks going into pin_current_cpu() will block on the
232 * mutex. But we still need to wait for those that are already in
233 * pinned CPU sections. If the cpu_down() failed, the kthread_should_stop()
234 * will kick this thread out.
236 while (!hp->grab_lock && !kthread_should_stop()) {
238 set_current_state(TASK_UNINTERRUPTIBLE);
241 /* Make sure grab_lock is seen before we see a stale completion */
245 * Now just before cpu_down() enters stop machine, we need to make
246 * sure all tasks that are in pinned CPU sections are out, and new
247 * tasks will now grab the lock, keeping them from entering pinned
250 if (!kthread_should_stop()) {
252 wait_for_pinned_cpus(hp);
254 complete(&hp->synced);
257 set_current_state(TASK_UNINTERRUPTIBLE);
258 while (!kthread_should_stop()) {
260 set_current_state(TASK_UNINTERRUPTIBLE);
262 set_current_state(TASK_RUNNING);
265 * Force this thread off this CPU as it's going down and
266 * we don't want any more work on this CPU.
268 current->flags &= ~PF_NO_SETAFFINITY;
269 do_set_cpus_allowed(current, cpu_present_mask);
274 static void __cpu_unplug_sync(struct hotplug_pcp *hp)
276 wake_up_process(hp->sync_tsk);
277 wait_for_completion(&hp->synced);
280 static void __cpu_unplug_wait(unsigned int cpu)
282 struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
284 complete(&hp->unplug_wait);
285 wait_for_completion(&hp->synced);
289 * Start the sync_unplug_thread on the target cpu and wait for it to
292 static int cpu_unplug_begin(unsigned int cpu)
294 struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
297 /* Protected by cpu_hotplug.lock */
298 if (!hp->mutex_init) {
299 #ifdef CONFIG_PREEMPT_RT_FULL
300 spin_lock_init(&hp->lock);
302 mutex_init(&hp->mutex);
307 /* Inform the scheduler to migrate tasks off this CPU */
308 tell_sched_cpu_down_begin(cpu);
310 init_completion(&hp->synced);
311 init_completion(&hp->unplug_wait);
313 hp->sync_tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d", cpu);
314 if (IS_ERR(hp->sync_tsk)) {
315 err = PTR_ERR(hp->sync_tsk);
319 kthread_bind(hp->sync_tsk, cpu);
322 * Wait for tasks to get out of the pinned sections,
323 * it's still OK if new tasks enter. Some CPU notifiers will
324 * wait for tasks that are going to enter these sections and
325 * we must not have them block.
327 wake_up_process(hp->sync_tsk);
331 static void cpu_unplug_sync(unsigned int cpu)
333 struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
335 init_completion(&hp->synced);
336 /* The completion needs to be initialzied before setting grab_lock */
339 /* Grab the mutex before setting grab_lock */
344 * The CPU notifiers have been completed.
345 * Wait for tasks to get out of pinned CPU sections and have new
346 * tasks block until the CPU is completely down.
348 __cpu_unplug_sync(hp);
350 /* All done with the sync thread */
351 kthread_stop(hp->sync_tsk);
355 static void cpu_unplug_done(unsigned int cpu)
357 struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
360 /* Let all tasks know cpu unplug is finished before cleaning up */
364 kthread_stop(hp->sync_tsk);
368 /* protected by cpu_hotplug.lock */
371 tell_sched_cpu_down_done(cpu);
374 void get_online_cpus(void)
377 if (cpu_hotplug.active_writer == current)
379 cpuhp_lock_acquire_read();
380 mutex_lock(&cpu_hotplug.lock);
381 atomic_inc(&cpu_hotplug.refcount);
382 mutex_unlock(&cpu_hotplug.lock);
384 EXPORT_SYMBOL_GPL(get_online_cpus);
386 bool try_get_online_cpus(void)
388 if (cpu_hotplug.active_writer == current)
390 if (!mutex_trylock(&cpu_hotplug.lock))
392 cpuhp_lock_acquire_tryread();
393 atomic_inc(&cpu_hotplug.refcount);
394 mutex_unlock(&cpu_hotplug.lock);
397 EXPORT_SYMBOL_GPL(try_get_online_cpus);
399 void put_online_cpus(void)
403 if (cpu_hotplug.active_writer == current)
406 refcount = atomic_dec_return(&cpu_hotplug.refcount);
407 if (WARN_ON(refcount < 0)) /* try to fix things up */
408 atomic_inc(&cpu_hotplug.refcount);
410 if (refcount <= 0 && waitqueue_active(&cpu_hotplug.wq))
411 wake_up(&cpu_hotplug.wq);
413 cpuhp_lock_release();
416 EXPORT_SYMBOL_GPL(put_online_cpus);
419 * This ensures that the hotplug operation can begin only when the
420 * refcount goes to zero.
422 * Note that during a cpu-hotplug operation, the new readers, if any,
423 * will be blocked by the cpu_hotplug.lock
425 * Since cpu_hotplug_begin() is always called after invoking
426 * cpu_maps_update_begin(), we can be sure that only one writer is active.
428 * Note that theoretically, there is a possibility of a livelock:
429 * - Refcount goes to zero, last reader wakes up the sleeping
431 * - Last reader unlocks the cpu_hotplug.lock.
432 * - A new reader arrives at this moment, bumps up the refcount.
433 * - The writer acquires the cpu_hotplug.lock finds the refcount
434 * non zero and goes to sleep again.
436 * However, this is very difficult to achieve in practice since
437 * get_online_cpus() not an api which is called all that often.
440 void cpu_hotplug_begin(void)
444 cpu_hotplug.active_writer = current;
445 cpuhp_lock_acquire();
448 mutex_lock(&cpu_hotplug.lock);
449 prepare_to_wait(&cpu_hotplug.wq, &wait, TASK_UNINTERRUPTIBLE);
450 if (likely(!atomic_read(&cpu_hotplug.refcount)))
452 mutex_unlock(&cpu_hotplug.lock);
455 finish_wait(&cpu_hotplug.wq, &wait);
458 void cpu_hotplug_done(void)
460 cpu_hotplug.active_writer = NULL;
461 mutex_unlock(&cpu_hotplug.lock);
462 cpuhp_lock_release();
466 * Wait for currently running CPU hotplug operations to complete (if any) and
467 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
468 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
469 * hotplug path before performing hotplug operations. So acquiring that lock
470 * guarantees mutual exclusion from any currently running hotplug operations.
472 void cpu_hotplug_disable(void)
474 cpu_maps_update_begin();
475 cpu_hotplug_disabled = 1;
476 cpu_maps_update_done();
479 void cpu_hotplug_enable(void)
481 cpu_maps_update_begin();
482 cpu_hotplug_disabled = 0;
483 cpu_maps_update_done();
486 #endif /* CONFIG_HOTPLUG_CPU */
488 /* Need to know about CPUs going up/down? */
489 int __ref register_cpu_notifier(struct notifier_block *nb)
492 cpu_maps_update_begin();
493 ret = raw_notifier_chain_register(&cpu_chain, nb);
494 cpu_maps_update_done();
498 int __ref __register_cpu_notifier(struct notifier_block *nb)
500 return raw_notifier_chain_register(&cpu_chain, nb);
503 static int __cpu_notify(unsigned long val, void *v, int nr_to_call,
508 ret = __raw_notifier_call_chain(&cpu_chain, val, v, nr_to_call,
511 return notifier_to_errno(ret);
514 static int cpu_notify(unsigned long val, void *v)
516 return __cpu_notify(val, v, -1, NULL);
519 #ifdef CONFIG_HOTPLUG_CPU
521 static void cpu_notify_nofail(unsigned long val, void *v)
523 BUG_ON(cpu_notify(val, v));
525 EXPORT_SYMBOL(register_cpu_notifier);
526 EXPORT_SYMBOL(__register_cpu_notifier);
528 void __ref unregister_cpu_notifier(struct notifier_block *nb)
530 cpu_maps_update_begin();
531 raw_notifier_chain_unregister(&cpu_chain, nb);
532 cpu_maps_update_done();
534 EXPORT_SYMBOL(unregister_cpu_notifier);
536 void __ref __unregister_cpu_notifier(struct notifier_block *nb)
538 raw_notifier_chain_unregister(&cpu_chain, nb);
540 EXPORT_SYMBOL(__unregister_cpu_notifier);
543 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
546 * This function walks all processes, finds a valid mm struct for each one and
547 * then clears a corresponding bit in mm's cpumask. While this all sounds
548 * trivial, there are various non-obvious corner cases, which this function
549 * tries to solve in a safe manner.
551 * Also note that the function uses a somewhat relaxed locking scheme, so it may
552 * be called only for an already offlined CPU.
554 void clear_tasks_mm_cpumask(int cpu)
556 struct task_struct *p;
559 * This function is called after the cpu is taken down and marked
560 * offline, so its not like new tasks will ever get this cpu set in
561 * their mm mask. -- Peter Zijlstra
562 * Thus, we may use rcu_read_lock() here, instead of grabbing
563 * full-fledged tasklist_lock.
565 WARN_ON(cpu_online(cpu));
567 for_each_process(p) {
568 struct task_struct *t;
571 * Main thread might exit, but other threads may still have
572 * a valid mm. Find one.
574 t = find_lock_task_mm(p);
577 cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
583 static inline void check_for_tasks(int dead_cpu)
585 struct task_struct *g, *p;
587 read_lock_irq(&tasklist_lock);
588 do_each_thread(g, p) {
592 * We do the check with unlocked task_rq(p)->lock.
593 * Order the reading to do not warn about a task,
594 * which was running on this cpu in the past, and
595 * it's just been woken on another cpu.
598 if (task_cpu(p) != dead_cpu)
601 pr_warn("Task %s (pid=%d) is on cpu %d (state=%ld, flags=%x)\n",
602 p->comm, task_pid_nr(p), dead_cpu, p->state, p->flags);
603 } while_each_thread(g, p);
604 read_unlock_irq(&tasklist_lock);
607 struct take_cpu_down_param {
612 /* Take this CPU down. */
613 static int __ref take_cpu_down(void *_param)
615 struct take_cpu_down_param *param = _param;
618 /* Ensure this CPU doesn't handle any more interrupts. */
619 err = __cpu_disable();
623 cpu_notify(CPU_DYING | param->mod, param->hcpu);
624 /* Park the stopper thread */
625 kthread_park(current);
629 /* Requires cpu_add_remove_lock to be held */
630 static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
632 int mycpu, err, nr_calls = 0;
633 void *hcpu = (void *)(long)cpu;
634 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
635 struct take_cpu_down_param tcd_param = {
639 cpumask_var_t cpumask;
640 cpumask_var_t cpumask_org;
642 if (num_online_cpus() == 1)
645 if (!cpu_online(cpu))
648 /* Move the downtaker off the unplug cpu */
649 if (!alloc_cpumask_var(&cpumask, GFP_KERNEL))
651 if (!alloc_cpumask_var(&cpumask_org, GFP_KERNEL)) {
652 free_cpumask_var(cpumask);
656 cpumask_copy(cpumask_org, tsk_cpus_allowed(current));
657 cpumask_andnot(cpumask, cpu_online_mask, cpumask_of(cpu));
658 set_cpus_allowed_ptr(current, cpumask);
659 free_cpumask_var(cpumask);
661 mycpu = smp_processor_id();
663 printk(KERN_ERR "Yuck! Still on unplug CPU\n!");
671 err = cpu_unplug_begin(cpu);
673 printk("cpu_unplug_begin(%d) failed\n", cpu);
677 err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
680 __cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
681 pr_warn("%s: attempt to take down CPU %u failed\n",
687 * By now we've cleared cpu_active_mask, wait for all preempt-disabled
688 * and RCU users of this state to go away such that all new such users
691 * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might
692 * not imply sync_sched(), so explicitly call both.
694 * Do sync before park smpboot threads to take care the rcu boost case.
696 #ifdef CONFIG_PREEMPT
701 __cpu_unplug_wait(cpu);
702 smpboot_park_threads(cpu);
704 /* Notifiers are done. Don't let any more tasks pin this CPU. */
705 cpu_unplug_sync(cpu);
708 * So now all preempt/rcu users must observe !cpu_active().
711 err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
713 /* CPU didn't die: tell everyone. Can't complain. */
714 smpboot_unpark_threads(cpu);
715 cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);
718 BUG_ON(cpu_online(cpu));
721 * The migration_call() CPU_DYING callback will have removed all
722 * runnable tasks from the cpu, there's only the idle task left now
723 * that the migration thread is done doing the stop_machine thing.
725 * Wait for the stop thread to go away.
727 while (!idle_cpu(cpu))
730 /* This actually kills the CPU. */
733 /* CPU is completely dead: tell everyone. Too late to complain. */
734 cpu_notify_nofail(CPU_DEAD | mod, hcpu);
736 check_for_tasks(cpu);
739 cpu_unplug_done(cpu);
743 cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
745 set_cpus_allowed_ptr(current, cpumask_org);
746 free_cpumask_var(cpumask_org);
750 int __ref cpu_down(unsigned int cpu)
754 cpu_maps_update_begin();
756 if (cpu_hotplug_disabled) {
761 err = _cpu_down(cpu, 0);
764 cpu_maps_update_done();
767 EXPORT_SYMBOL(cpu_down);
768 #endif /*CONFIG_HOTPLUG_CPU*/
770 /* Requires cpu_add_remove_lock to be held */
771 static int _cpu_up(unsigned int cpu, int tasks_frozen)
773 int ret, nr_calls = 0;
774 void *hcpu = (void *)(long)cpu;
775 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
776 struct task_struct *idle;
780 if (cpu_online(cpu) || !cpu_present(cpu)) {
785 idle = idle_thread_get(cpu);
791 ret = smpboot_create_threads(cpu);
795 ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls);
798 pr_warn("%s: attempt to bring up CPU %u failed\n",
803 /* Arch-specific enabling code. */
804 ret = __cpu_up(cpu, idle);
807 BUG_ON(!cpu_online(cpu));
809 /* Wake the per cpu threads */
810 smpboot_unpark_threads(cpu);
812 /* Now call notifier in preparation. */
813 cpu_notify(CPU_ONLINE | mod, hcpu);
817 __cpu_notify(CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
824 int cpu_up(unsigned int cpu)
828 if (!cpu_possible(cpu)) {
829 pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
831 #if defined(CONFIG_IA64)
832 pr_err("please check additional_cpus= boot parameter\n");
837 err = try_online_node(cpu_to_node(cpu));
841 cpu_maps_update_begin();
843 if (cpu_hotplug_disabled) {
848 err = _cpu_up(cpu, 0);
851 cpu_maps_update_done();
854 EXPORT_SYMBOL_GPL(cpu_up);
856 #ifdef CONFIG_PM_SLEEP_SMP
857 static cpumask_var_t frozen_cpus;
859 int disable_nonboot_cpus(void)
861 int cpu, first_cpu, error = 0;
863 cpu_maps_update_begin();
864 first_cpu = cpumask_first(cpu_online_mask);
866 * We take down all of the non-boot CPUs in one shot to avoid races
867 * with the userspace trying to use the CPU hotplug at the same time
869 cpumask_clear(frozen_cpus);
871 pr_info("Disabling non-boot CPUs ...\n");
872 for_each_online_cpu(cpu) {
873 if (cpu == first_cpu)
875 trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
876 error = _cpu_down(cpu, 1);
877 trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
879 cpumask_set_cpu(cpu, frozen_cpus);
881 pr_err("Error taking CPU%d down: %d\n", cpu, error);
887 BUG_ON(num_online_cpus() > 1);
888 /* Make sure the CPUs won't be enabled by someone else */
889 cpu_hotplug_disabled = 1;
891 pr_err("Non-boot CPUs are not disabled\n");
893 cpu_maps_update_done();
897 void __weak arch_enable_nonboot_cpus_begin(void)
901 void __weak arch_enable_nonboot_cpus_end(void)
905 void __ref enable_nonboot_cpus(void)
909 /* Allow everyone to use the CPU hotplug again */
910 cpu_maps_update_begin();
911 cpu_hotplug_disabled = 0;
912 if (cpumask_empty(frozen_cpus))
915 pr_info("Enabling non-boot CPUs ...\n");
917 arch_enable_nonboot_cpus_begin();
919 for_each_cpu(cpu, frozen_cpus) {
920 trace_suspend_resume(TPS("CPU_ON"), cpu, true);
921 error = _cpu_up(cpu, 1);
922 trace_suspend_resume(TPS("CPU_ON"), cpu, false);
924 pr_info("CPU%d is up\n", cpu);
927 pr_warn("Error taking CPU%d up: %d\n", cpu, error);
930 arch_enable_nonboot_cpus_end();
932 cpumask_clear(frozen_cpus);
934 cpu_maps_update_done();
937 static int __init alloc_frozen_cpus(void)
939 if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
943 core_initcall(alloc_frozen_cpus);
946 * When callbacks for CPU hotplug notifications are being executed, we must
947 * ensure that the state of the system with respect to the tasks being frozen
948 * or not, as reported by the notification, remains unchanged *throughout the
949 * duration* of the execution of the callbacks.
950 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
952 * This synchronization is implemented by mutually excluding regular CPU
953 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
954 * Hibernate notifications.
957 cpu_hotplug_pm_callback(struct notifier_block *nb,
958 unsigned long action, void *ptr)
962 case PM_SUSPEND_PREPARE:
963 case PM_HIBERNATION_PREPARE:
964 cpu_hotplug_disable();
967 case PM_POST_SUSPEND:
968 case PM_POST_HIBERNATION:
969 cpu_hotplug_enable();
980 static int __init cpu_hotplug_pm_sync_init(void)
983 * cpu_hotplug_pm_callback has higher priority than x86
984 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
985 * to disable cpu hotplug to avoid cpu hotplug race.
987 pm_notifier(cpu_hotplug_pm_callback, 0);
990 core_initcall(cpu_hotplug_pm_sync_init);
992 #endif /* CONFIG_PM_SLEEP_SMP */
995 * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers
996 * @cpu: cpu that just started
998 * This function calls the cpu_chain notifiers with CPU_STARTING.
999 * It must be called by the arch code on the new cpu, before the new cpu
1000 * enables interrupts and before the "boot" cpu returns from __cpu_up().
1002 void notify_cpu_starting(unsigned int cpu)
1004 unsigned long val = CPU_STARTING;
1006 #ifdef CONFIG_PM_SLEEP_SMP
1007 if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus))
1008 val = CPU_STARTING_FROZEN;
1009 #endif /* CONFIG_PM_SLEEP_SMP */
1010 cpu_notify(val, (void *)(long)cpu);
1013 #endif /* CONFIG_SMP */
1016 * cpu_bit_bitmap[] is a special, "compressed" data structure that
1017 * represents all NR_CPUS bits binary values of 1<<nr.
1019 * It is used by cpumask_of() to get a constant address to a CPU
1020 * mask value that has a single bit set only.
1023 /* cpu_bit_bitmap[0] is empty - so we can back into it */
1024 #define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
1025 #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
1026 #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
1027 #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
1029 const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
1031 MASK_DECLARE_8(0), MASK_DECLARE_8(8),
1032 MASK_DECLARE_8(16), MASK_DECLARE_8(24),
1033 #if BITS_PER_LONG > 32
1034 MASK_DECLARE_8(32), MASK_DECLARE_8(40),
1035 MASK_DECLARE_8(48), MASK_DECLARE_8(56),
1038 EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
1040 const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
1041 EXPORT_SYMBOL(cpu_all_bits);
1043 #ifdef CONFIG_INIT_ALL_POSSIBLE
1044 static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly
1047 static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly;
1049 const struct cpumask *const cpu_possible_mask = to_cpumask(cpu_possible_bits);
1050 EXPORT_SYMBOL(cpu_possible_mask);
1052 static DECLARE_BITMAP(cpu_online_bits, CONFIG_NR_CPUS) __read_mostly;
1053 const struct cpumask *const cpu_online_mask = to_cpumask(cpu_online_bits);
1054 EXPORT_SYMBOL(cpu_online_mask);
1056 static DECLARE_BITMAP(cpu_present_bits, CONFIG_NR_CPUS) __read_mostly;
1057 const struct cpumask *const cpu_present_mask = to_cpumask(cpu_present_bits);
1058 EXPORT_SYMBOL(cpu_present_mask);
1060 static DECLARE_BITMAP(cpu_active_bits, CONFIG_NR_CPUS) __read_mostly;
1061 const struct cpumask *const cpu_active_mask = to_cpumask(cpu_active_bits);
1062 EXPORT_SYMBOL(cpu_active_mask);
1064 void set_cpu_possible(unsigned int cpu, bool possible)
1067 cpumask_set_cpu(cpu, to_cpumask(cpu_possible_bits));
1069 cpumask_clear_cpu(cpu, to_cpumask(cpu_possible_bits));
1072 void set_cpu_present(unsigned int cpu, bool present)
1075 cpumask_set_cpu(cpu, to_cpumask(cpu_present_bits));
1077 cpumask_clear_cpu(cpu, to_cpumask(cpu_present_bits));
1080 void set_cpu_online(unsigned int cpu, bool online)
1083 cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits));
1084 cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
1086 cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits));
1090 void set_cpu_active(unsigned int cpu, bool active)
1093 cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
1095 cpumask_clear_cpu(cpu, to_cpumask(cpu_active_bits));
1098 void init_cpu_present(const struct cpumask *src)
1100 cpumask_copy(to_cpumask(cpu_present_bits), src);
1103 void init_cpu_possible(const struct cpumask *src)
1105 cpumask_copy(to_cpumask(cpu_possible_bits), src);
1108 void init_cpu_online(const struct cpumask *src)
1110 cpumask_copy(to_cpumask(cpu_online_bits), src);