2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
4 * This code is licenced under the GPL.
6 #include <linux/proc_fs.h>
8 #include <linux/init.h>
9 #include <linux/notifier.h>
10 #include <linux/sched.h>
11 #include <linux/unistd.h>
12 #include <linux/cpu.h>
13 #include <linux/oom.h>
14 #include <linux/rcupdate.h>
15 #include <linux/export.h>
16 #include <linux/bug.h>
17 #include <linux/kthread.h>
18 #include <linux/stop_machine.h>
19 #include <linux/mutex.h>
20 #include <linux/gfp.h>
21 #include <linux/suspend.h>
22 #include <linux/lockdep.h>
23 #include <trace/events/power.h>
28 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
29 static DEFINE_MUTEX(cpu_add_remove_lock);
32 * The following two APIs (cpu_maps_update_begin/done) must be used when
33 * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
34 * The APIs cpu_notifier_register_begin/done() must be used to protect CPU
35 * hotplug callback (un)registration performed using __register_cpu_notifier()
36 * or __unregister_cpu_notifier().
38 void cpu_maps_update_begin(void)
40 mutex_lock(&cpu_add_remove_lock);
42 EXPORT_SYMBOL(cpu_notifier_register_begin);
44 void cpu_maps_update_done(void)
46 mutex_unlock(&cpu_add_remove_lock);
48 EXPORT_SYMBOL(cpu_notifier_register_done);
50 static RAW_NOTIFIER_HEAD(cpu_chain);
52 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
53 * Should always be manipulated under cpu_add_remove_lock
55 static int cpu_hotplug_disabled;
57 #ifdef CONFIG_HOTPLUG_CPU
60 struct task_struct *active_writer;
61 /* wait queue to wake up the active_writer */
63 /* verifies that no writer will get active while readers are active */
66 * Also blocks the new readers during
67 * an ongoing cpu hotplug operation.
71 #ifdef CONFIG_DEBUG_LOCK_ALLOC
72 struct lockdep_map dep_map;
75 .active_writer = NULL,
76 .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
77 .wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq),
78 #ifdef CONFIG_DEBUG_LOCK_ALLOC
79 .dep_map = {.name = "cpu_hotplug.lock" },
83 /* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
84 #define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
85 #define cpuhp_lock_acquire_tryread() \
86 lock_map_acquire_tryread(&cpu_hotplug.dep_map)
87 #define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map)
88 #define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map)
91 * hotplug_pcp - per cpu hotplug descriptor
92 * @unplug: set when pin_current_cpu() needs to sync tasks
93 * @sync_tsk: the task that waits for tasks to finish pinned sections
94 * @refcount: counter of tasks in pinned sections
95 * @grab_lock: set when the tasks entering pinned sections should wait
96 * @synced: notifier for @sync_tsk to tell cpu_down it's finished
97 * @mutex: the mutex to make tasks wait (used when @grab_lock is true)
98 * @mutex_init: zero if the mutex hasn't been initialized yet.
100 * Although @unplug and @sync_tsk may point to the same task, the @unplug
101 * is used as a flag and still exists after @sync_tsk has exited and
102 * @sync_tsk set to NULL.
105 struct task_struct *unplug;
106 struct task_struct *sync_tsk;
109 struct completion synced;
110 #ifdef CONFIG_PREEMPT_RT_FULL
112 * Note, on PREEMPT_RT, the hotplug lock must save the state of
113 * the task, otherwise the mutex will cause the task to fail
114 * to sleep when required. (Because it's called from migrate_disable())
116 * The spinlock_t on PREEMPT_RT is a mutex that saves the task's
126 #ifdef CONFIG_PREEMPT_RT_FULL
127 # define hotplug_lock(hp) rt_spin_lock(&(hp)->lock)
128 # define hotplug_unlock(hp) rt_spin_unlock(&(hp)->lock)
130 # define hotplug_lock(hp) mutex_lock(&(hp)->mutex)
131 # define hotplug_unlock(hp) mutex_unlock(&(hp)->mutex)
134 static DEFINE_PER_CPU(struct hotplug_pcp, hotplug_pcp);
137 * pin_current_cpu - Prevent the current cpu from being unplugged
139 * Lightweight version of get_online_cpus() to prevent cpu from being
140 * unplugged when code runs in a migration disabled region.
142 * Must be called with preemption disabled (preempt_count = 1)!
144 void pin_current_cpu(void)
146 struct hotplug_pcp *hp;
150 hp = this_cpu_ptr(&hotplug_pcp);
152 if (!hp->unplug || hp->refcount || force || preempt_count() > 1 ||
153 hp->unplug == current) {
164 * Try to push this task off of this CPU.
168 hp = this_cpu_ptr(&hotplug_pcp);
169 if (!hp->grab_lock) {
171 * Just let it continue it's already pinned
185 * unpin_current_cpu - Allow unplug of current cpu
187 * Must be called with preemption or interrupts disabled!
189 void unpin_current_cpu(void)
191 struct hotplug_pcp *hp = this_cpu_ptr(&hotplug_pcp);
193 WARN_ON(hp->refcount <= 0);
195 /* This is safe. sync_unplug_thread is pinned to this cpu */
196 if (!--hp->refcount && hp->unplug && hp->unplug != current)
197 wake_up_process(hp->unplug);
200 static void wait_for_pinned_cpus(struct hotplug_pcp *hp)
202 set_current_state(TASK_UNINTERRUPTIBLE);
203 while (hp->refcount) {
204 schedule_preempt_disabled();
205 set_current_state(TASK_UNINTERRUPTIBLE);
209 static int sync_unplug_thread(void *data)
211 struct hotplug_pcp *hp = data;
214 hp->unplug = current;
215 wait_for_pinned_cpus(hp);
218 * This thread will synchronize the cpu_down() with threads
219 * that have pinned the CPU. When the pinned CPU count reaches
220 * zero, we inform the cpu_down code to continue to the next step.
222 set_current_state(TASK_UNINTERRUPTIBLE);
224 complete(&hp->synced);
227 * If all succeeds, the next step will need tasks to wait till
228 * the CPU is offline before continuing. To do this, the grab_lock
229 * is set and tasks going into pin_current_cpu() will block on the
230 * mutex. But we still need to wait for those that are already in
231 * pinned CPU sections. If the cpu_down() failed, the kthread_should_stop()
232 * will kick this thread out.
234 while (!hp->grab_lock && !kthread_should_stop()) {
236 set_current_state(TASK_UNINTERRUPTIBLE);
239 /* Make sure grab_lock is seen before we see a stale completion */
243 * Now just before cpu_down() enters stop machine, we need to make
244 * sure all tasks that are in pinned CPU sections are out, and new
245 * tasks will now grab the lock, keeping them from entering pinned
248 if (!kthread_should_stop()) {
250 wait_for_pinned_cpus(hp);
252 complete(&hp->synced);
255 set_current_state(TASK_UNINTERRUPTIBLE);
256 while (!kthread_should_stop()) {
258 set_current_state(TASK_UNINTERRUPTIBLE);
260 set_current_state(TASK_RUNNING);
263 * Force this thread off this CPU as it's going down and
264 * we don't want any more work on this CPU.
266 current->flags &= ~PF_NO_SETAFFINITY;
267 do_set_cpus_allowed(current, cpu_present_mask);
272 static void __cpu_unplug_sync(struct hotplug_pcp *hp)
274 wake_up_process(hp->sync_tsk);
275 wait_for_completion(&hp->synced);
279 * Start the sync_unplug_thread on the target cpu and wait for it to
282 static int cpu_unplug_begin(unsigned int cpu)
284 struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
287 /* Protected by cpu_hotplug.lock */
288 if (!hp->mutex_init) {
289 #ifdef CONFIG_PREEMPT_RT_FULL
290 spin_lock_init(&hp->lock);
292 mutex_init(&hp->mutex);
297 /* Inform the scheduler to migrate tasks off this CPU */
298 tell_sched_cpu_down_begin(cpu);
300 init_completion(&hp->synced);
302 hp->sync_tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d", cpu);
303 if (IS_ERR(hp->sync_tsk)) {
304 err = PTR_ERR(hp->sync_tsk);
308 kthread_bind(hp->sync_tsk, cpu);
311 * Wait for tasks to get out of the pinned sections,
312 * it's still OK if new tasks enter. Some CPU notifiers will
313 * wait for tasks that are going to enter these sections and
314 * we must not have them block.
316 __cpu_unplug_sync(hp);
321 static void cpu_unplug_sync(unsigned int cpu)
323 struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
325 init_completion(&hp->synced);
326 /* The completion needs to be initialzied before setting grab_lock */
329 /* Grab the mutex before setting grab_lock */
334 * The CPU notifiers have been completed.
335 * Wait for tasks to get out of pinned CPU sections and have new
336 * tasks block until the CPU is completely down.
338 __cpu_unplug_sync(hp);
340 /* All done with the sync thread */
341 kthread_stop(hp->sync_tsk);
345 static void cpu_unplug_done(unsigned int cpu)
347 struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
350 /* Let all tasks know cpu unplug is finished before cleaning up */
354 kthread_stop(hp->sync_tsk);
358 /* protected by cpu_hotplug.lock */
361 tell_sched_cpu_down_done(cpu);
364 void get_online_cpus(void)
367 if (cpu_hotplug.active_writer == current)
369 cpuhp_lock_acquire_read();
370 mutex_lock(&cpu_hotplug.lock);
371 atomic_inc(&cpu_hotplug.refcount);
372 mutex_unlock(&cpu_hotplug.lock);
374 EXPORT_SYMBOL_GPL(get_online_cpus);
376 bool try_get_online_cpus(void)
378 if (cpu_hotplug.active_writer == current)
380 if (!mutex_trylock(&cpu_hotplug.lock))
382 cpuhp_lock_acquire_tryread();
383 atomic_inc(&cpu_hotplug.refcount);
384 mutex_unlock(&cpu_hotplug.lock);
387 EXPORT_SYMBOL_GPL(try_get_online_cpus);
389 void put_online_cpus(void)
393 if (cpu_hotplug.active_writer == current)
396 refcount = atomic_dec_return(&cpu_hotplug.refcount);
397 if (WARN_ON(refcount < 0)) /* try to fix things up */
398 atomic_inc(&cpu_hotplug.refcount);
400 if (refcount <= 0 && waitqueue_active(&cpu_hotplug.wq))
401 wake_up(&cpu_hotplug.wq);
403 cpuhp_lock_release();
406 EXPORT_SYMBOL_GPL(put_online_cpus);
409 * This ensures that the hotplug operation can begin only when the
410 * refcount goes to zero.
412 * Note that during a cpu-hotplug operation, the new readers, if any,
413 * will be blocked by the cpu_hotplug.lock
415 * Since cpu_hotplug_begin() is always called after invoking
416 * cpu_maps_update_begin(), we can be sure that only one writer is active.
418 * Note that theoretically, there is a possibility of a livelock:
419 * - Refcount goes to zero, last reader wakes up the sleeping
421 * - Last reader unlocks the cpu_hotplug.lock.
422 * - A new reader arrives at this moment, bumps up the refcount.
423 * - The writer acquires the cpu_hotplug.lock finds the refcount
424 * non zero and goes to sleep again.
426 * However, this is very difficult to achieve in practice since
427 * get_online_cpus() not an api which is called all that often.
430 void cpu_hotplug_begin(void)
434 cpu_hotplug.active_writer = current;
435 cpuhp_lock_acquire();
438 mutex_lock(&cpu_hotplug.lock);
439 prepare_to_wait(&cpu_hotplug.wq, &wait, TASK_UNINTERRUPTIBLE);
440 if (likely(!atomic_read(&cpu_hotplug.refcount)))
442 mutex_unlock(&cpu_hotplug.lock);
445 finish_wait(&cpu_hotplug.wq, &wait);
448 void cpu_hotplug_done(void)
450 cpu_hotplug.active_writer = NULL;
451 mutex_unlock(&cpu_hotplug.lock);
452 cpuhp_lock_release();
456 * Wait for currently running CPU hotplug operations to complete (if any) and
457 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
458 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
459 * hotplug path before performing hotplug operations. So acquiring that lock
460 * guarantees mutual exclusion from any currently running hotplug operations.
462 void cpu_hotplug_disable(void)
464 cpu_maps_update_begin();
465 cpu_hotplug_disabled = 1;
466 cpu_maps_update_done();
469 void cpu_hotplug_enable(void)
471 cpu_maps_update_begin();
472 cpu_hotplug_disabled = 0;
473 cpu_maps_update_done();
476 #endif /* CONFIG_HOTPLUG_CPU */
478 /* Need to know about CPUs going up/down? */
479 int __ref register_cpu_notifier(struct notifier_block *nb)
482 cpu_maps_update_begin();
483 ret = raw_notifier_chain_register(&cpu_chain, nb);
484 cpu_maps_update_done();
488 int __ref __register_cpu_notifier(struct notifier_block *nb)
490 return raw_notifier_chain_register(&cpu_chain, nb);
493 static int __cpu_notify(unsigned long val, void *v, int nr_to_call,
498 ret = __raw_notifier_call_chain(&cpu_chain, val, v, nr_to_call,
501 return notifier_to_errno(ret);
504 static int cpu_notify(unsigned long val, void *v)
506 return __cpu_notify(val, v, -1, NULL);
509 #ifdef CONFIG_HOTPLUG_CPU
511 static void cpu_notify_nofail(unsigned long val, void *v)
513 BUG_ON(cpu_notify(val, v));
515 EXPORT_SYMBOL(register_cpu_notifier);
516 EXPORT_SYMBOL(__register_cpu_notifier);
518 void __ref unregister_cpu_notifier(struct notifier_block *nb)
520 cpu_maps_update_begin();
521 raw_notifier_chain_unregister(&cpu_chain, nb);
522 cpu_maps_update_done();
524 EXPORT_SYMBOL(unregister_cpu_notifier);
526 void __ref __unregister_cpu_notifier(struct notifier_block *nb)
528 raw_notifier_chain_unregister(&cpu_chain, nb);
530 EXPORT_SYMBOL(__unregister_cpu_notifier);
533 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
536 * This function walks all processes, finds a valid mm struct for each one and
537 * then clears a corresponding bit in mm's cpumask. While this all sounds
538 * trivial, there are various non-obvious corner cases, which this function
539 * tries to solve in a safe manner.
541 * Also note that the function uses a somewhat relaxed locking scheme, so it may
542 * be called only for an already offlined CPU.
544 void clear_tasks_mm_cpumask(int cpu)
546 struct task_struct *p;
549 * This function is called after the cpu is taken down and marked
550 * offline, so its not like new tasks will ever get this cpu set in
551 * their mm mask. -- Peter Zijlstra
552 * Thus, we may use rcu_read_lock() here, instead of grabbing
553 * full-fledged tasklist_lock.
555 WARN_ON(cpu_online(cpu));
557 for_each_process(p) {
558 struct task_struct *t;
561 * Main thread might exit, but other threads may still have
562 * a valid mm. Find one.
564 t = find_lock_task_mm(p);
567 cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
573 static inline void check_for_tasks(int dead_cpu)
575 struct task_struct *g, *p;
577 read_lock_irq(&tasklist_lock);
578 do_each_thread(g, p) {
582 * We do the check with unlocked task_rq(p)->lock.
583 * Order the reading to do not warn about a task,
584 * which was running on this cpu in the past, and
585 * it's just been woken on another cpu.
588 if (task_cpu(p) != dead_cpu)
591 pr_warn("Task %s (pid=%d) is on cpu %d (state=%ld, flags=%x)\n",
592 p->comm, task_pid_nr(p), dead_cpu, p->state, p->flags);
593 } while_each_thread(g, p);
594 read_unlock_irq(&tasklist_lock);
597 struct take_cpu_down_param {
602 /* Take this CPU down. */
603 static int __ref take_cpu_down(void *_param)
605 struct take_cpu_down_param *param = _param;
608 /* Ensure this CPU doesn't handle any more interrupts. */
609 err = __cpu_disable();
613 cpu_notify(CPU_DYING | param->mod, param->hcpu);
614 /* Park the stopper thread */
615 kthread_park(current);
619 /* Requires cpu_add_remove_lock to be held */
620 static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
622 int mycpu, err, nr_calls = 0;
623 void *hcpu = (void *)(long)cpu;
624 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
625 struct take_cpu_down_param tcd_param = {
629 cpumask_var_t cpumask;
631 if (num_online_cpus() == 1)
634 if (!cpu_online(cpu))
637 /* Move the downtaker off the unplug cpu */
638 if (!alloc_cpumask_var(&cpumask, GFP_KERNEL))
640 cpumask_andnot(cpumask, cpu_online_mask, cpumask_of(cpu));
641 set_cpus_allowed_ptr(current, cpumask);
642 free_cpumask_var(cpumask);
644 mycpu = smp_processor_id();
646 printk(KERN_ERR "Yuck! Still on unplug CPU\n!");
652 err = cpu_unplug_begin(cpu);
654 printk("cpu_unplug_begin(%d) failed\n", cpu);
658 err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
661 __cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
662 pr_warn("%s: attempt to take down CPU %u failed\n",
668 * By now we've cleared cpu_active_mask, wait for all preempt-disabled
669 * and RCU users of this state to go away such that all new such users
672 * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might
673 * not imply sync_sched(), so explicitly call both.
675 * Do sync before park smpboot threads to take care the rcu boost case.
677 #ifdef CONFIG_PREEMPT
682 smpboot_park_threads(cpu);
684 /* Notifiers are done. Don't let any more tasks pin this CPU. */
685 cpu_unplug_sync(cpu);
688 * So now all preempt/rcu users must observe !cpu_active().
691 err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
693 /* CPU didn't die: tell everyone. Can't complain. */
694 smpboot_unpark_threads(cpu);
695 cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);
698 BUG_ON(cpu_online(cpu));
701 * The migration_call() CPU_DYING callback will have removed all
702 * runnable tasks from the cpu, there's only the idle task left now
703 * that the migration thread is done doing the stop_machine thing.
705 * Wait for the stop thread to go away.
707 while (!idle_cpu(cpu))
710 /* This actually kills the CPU. */
713 /* CPU is completely dead: tell everyone. Too late to complain. */
714 cpu_notify_nofail(CPU_DEAD | mod, hcpu);
716 check_for_tasks(cpu);
719 cpu_unplug_done(cpu);
724 cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
728 int __ref cpu_down(unsigned int cpu)
732 cpu_maps_update_begin();
734 if (cpu_hotplug_disabled) {
739 err = _cpu_down(cpu, 0);
742 cpu_maps_update_done();
745 EXPORT_SYMBOL(cpu_down);
746 #endif /*CONFIG_HOTPLUG_CPU*/
748 /* Requires cpu_add_remove_lock to be held */
749 static int _cpu_up(unsigned int cpu, int tasks_frozen)
751 int ret, nr_calls = 0;
752 void *hcpu = (void *)(long)cpu;
753 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
754 struct task_struct *idle;
758 if (cpu_online(cpu) || !cpu_present(cpu)) {
763 idle = idle_thread_get(cpu);
769 ret = smpboot_create_threads(cpu);
773 ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls);
776 pr_warn("%s: attempt to bring up CPU %u failed\n",
781 /* Arch-specific enabling code. */
782 ret = __cpu_up(cpu, idle);
785 BUG_ON(!cpu_online(cpu));
787 /* Wake the per cpu threads */
788 smpboot_unpark_threads(cpu);
790 /* Now call notifier in preparation. */
791 cpu_notify(CPU_ONLINE | mod, hcpu);
795 __cpu_notify(CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
802 int cpu_up(unsigned int cpu)
806 if (!cpu_possible(cpu)) {
807 pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
809 #if defined(CONFIG_IA64)
810 pr_err("please check additional_cpus= boot parameter\n");
815 err = try_online_node(cpu_to_node(cpu));
819 cpu_maps_update_begin();
821 if (cpu_hotplug_disabled) {
826 err = _cpu_up(cpu, 0);
829 cpu_maps_update_done();
832 EXPORT_SYMBOL_GPL(cpu_up);
834 #ifdef CONFIG_PM_SLEEP_SMP
835 static cpumask_var_t frozen_cpus;
837 int disable_nonboot_cpus(void)
839 int cpu, first_cpu, error = 0;
841 cpu_maps_update_begin();
842 first_cpu = cpumask_first(cpu_online_mask);
844 * We take down all of the non-boot CPUs in one shot to avoid races
845 * with the userspace trying to use the CPU hotplug at the same time
847 cpumask_clear(frozen_cpus);
849 pr_info("Disabling non-boot CPUs ...\n");
850 for_each_online_cpu(cpu) {
851 if (cpu == first_cpu)
853 trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
854 error = _cpu_down(cpu, 1);
855 trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
857 cpumask_set_cpu(cpu, frozen_cpus);
859 pr_err("Error taking CPU%d down: %d\n", cpu, error);
865 BUG_ON(num_online_cpus() > 1);
866 /* Make sure the CPUs won't be enabled by someone else */
867 cpu_hotplug_disabled = 1;
869 pr_err("Non-boot CPUs are not disabled\n");
871 cpu_maps_update_done();
875 void __weak arch_enable_nonboot_cpus_begin(void)
879 void __weak arch_enable_nonboot_cpus_end(void)
883 void __ref enable_nonboot_cpus(void)
887 /* Allow everyone to use the CPU hotplug again */
888 cpu_maps_update_begin();
889 cpu_hotplug_disabled = 0;
890 if (cpumask_empty(frozen_cpus))
893 pr_info("Enabling non-boot CPUs ...\n");
895 arch_enable_nonboot_cpus_begin();
897 for_each_cpu(cpu, frozen_cpus) {
898 trace_suspend_resume(TPS("CPU_ON"), cpu, true);
899 error = _cpu_up(cpu, 1);
900 trace_suspend_resume(TPS("CPU_ON"), cpu, false);
902 pr_info("CPU%d is up\n", cpu);
905 pr_warn("Error taking CPU%d up: %d\n", cpu, error);
908 arch_enable_nonboot_cpus_end();
910 cpumask_clear(frozen_cpus);
912 cpu_maps_update_done();
915 static int __init alloc_frozen_cpus(void)
917 if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
921 core_initcall(alloc_frozen_cpus);
924 * When callbacks for CPU hotplug notifications are being executed, we must
925 * ensure that the state of the system with respect to the tasks being frozen
926 * or not, as reported by the notification, remains unchanged *throughout the
927 * duration* of the execution of the callbacks.
928 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
930 * This synchronization is implemented by mutually excluding regular CPU
931 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
932 * Hibernate notifications.
935 cpu_hotplug_pm_callback(struct notifier_block *nb,
936 unsigned long action, void *ptr)
940 case PM_SUSPEND_PREPARE:
941 case PM_HIBERNATION_PREPARE:
942 cpu_hotplug_disable();
945 case PM_POST_SUSPEND:
946 case PM_POST_HIBERNATION:
947 cpu_hotplug_enable();
958 static int __init cpu_hotplug_pm_sync_init(void)
961 * cpu_hotplug_pm_callback has higher priority than x86
962 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
963 * to disable cpu hotplug to avoid cpu hotplug race.
965 pm_notifier(cpu_hotplug_pm_callback, 0);
968 core_initcall(cpu_hotplug_pm_sync_init);
970 #endif /* CONFIG_PM_SLEEP_SMP */
973 * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers
974 * @cpu: cpu that just started
976 * This function calls the cpu_chain notifiers with CPU_STARTING.
977 * It must be called by the arch code on the new cpu, before the new cpu
978 * enables interrupts and before the "boot" cpu returns from __cpu_up().
980 void notify_cpu_starting(unsigned int cpu)
982 unsigned long val = CPU_STARTING;
984 #ifdef CONFIG_PM_SLEEP_SMP
985 if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus))
986 val = CPU_STARTING_FROZEN;
987 #endif /* CONFIG_PM_SLEEP_SMP */
988 cpu_notify(val, (void *)(long)cpu);
991 #endif /* CONFIG_SMP */
994 * cpu_bit_bitmap[] is a special, "compressed" data structure that
995 * represents all NR_CPUS bits binary values of 1<<nr.
997 * It is used by cpumask_of() to get a constant address to a CPU
998 * mask value that has a single bit set only.
1001 /* cpu_bit_bitmap[0] is empty - so we can back into it */
1002 #define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
1003 #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
1004 #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
1005 #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
1007 const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
1009 MASK_DECLARE_8(0), MASK_DECLARE_8(8),
1010 MASK_DECLARE_8(16), MASK_DECLARE_8(24),
1011 #if BITS_PER_LONG > 32
1012 MASK_DECLARE_8(32), MASK_DECLARE_8(40),
1013 MASK_DECLARE_8(48), MASK_DECLARE_8(56),
1016 EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
1018 const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
1019 EXPORT_SYMBOL(cpu_all_bits);
1021 #ifdef CONFIG_INIT_ALL_POSSIBLE
1022 static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly
1025 static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly;
1027 const struct cpumask *const cpu_possible_mask = to_cpumask(cpu_possible_bits);
1028 EXPORT_SYMBOL(cpu_possible_mask);
1030 static DECLARE_BITMAP(cpu_online_bits, CONFIG_NR_CPUS) __read_mostly;
1031 const struct cpumask *const cpu_online_mask = to_cpumask(cpu_online_bits);
1032 EXPORT_SYMBOL(cpu_online_mask);
1034 static DECLARE_BITMAP(cpu_present_bits, CONFIG_NR_CPUS) __read_mostly;
1035 const struct cpumask *const cpu_present_mask = to_cpumask(cpu_present_bits);
1036 EXPORT_SYMBOL(cpu_present_mask);
1038 static DECLARE_BITMAP(cpu_active_bits, CONFIG_NR_CPUS) __read_mostly;
1039 const struct cpumask *const cpu_active_mask = to_cpumask(cpu_active_bits);
1040 EXPORT_SYMBOL(cpu_active_mask);
1042 void set_cpu_possible(unsigned int cpu, bool possible)
1045 cpumask_set_cpu(cpu, to_cpumask(cpu_possible_bits));
1047 cpumask_clear_cpu(cpu, to_cpumask(cpu_possible_bits));
1050 void set_cpu_present(unsigned int cpu, bool present)
1053 cpumask_set_cpu(cpu, to_cpumask(cpu_present_bits));
1055 cpumask_clear_cpu(cpu, to_cpumask(cpu_present_bits));
1058 void set_cpu_online(unsigned int cpu, bool online)
1061 cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits));
1062 cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
1064 cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits));
1068 void set_cpu_active(unsigned int cpu, bool active)
1071 cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
1073 cpumask_clear_cpu(cpu, to_cpumask(cpu_active_bits));
1076 void init_cpu_present(const struct cpumask *src)
1078 cpumask_copy(to_cpumask(cpu_present_bits), src);
1081 void init_cpu_possible(const struct cpumask *src)
1083 cpumask_copy(to_cpumask(cpu_possible_bits), src);
1086 void init_cpu_online(const struct cpumask *src)
1088 cpumask_copy(to_cpumask(cpu_online_bits), src);