2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
4 * This code is licenced under the GPL.
6 #include <linux/proc_fs.h>
8 #include <linux/init.h>
9 #include <linux/notifier.h>
10 #include <linux/sched.h>
11 #include <linux/unistd.h>
12 #include <linux/cpu.h>
13 #include <linux/oom.h>
14 #include <linux/rcupdate.h>
15 #include <linux/export.h>
16 #include <linux/bug.h>
17 #include <linux/kthread.h>
18 #include <linux/stop_machine.h>
19 #include <linux/mutex.h>
20 #include <linux/gfp.h>
21 #include <linux/suspend.h>
22 #include <linux/lockdep.h>
23 #include <trace/events/power.h>
28 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
29 static DEFINE_MUTEX(cpu_add_remove_lock);
32 * The following two APIs (cpu_maps_update_begin/done) must be used when
33 * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
34 * The APIs cpu_notifier_register_begin/done() must be used to protect CPU
35 * hotplug callback (un)registration performed using __register_cpu_notifier()
36 * or __unregister_cpu_notifier().
38 void cpu_maps_update_begin(void)
40 mutex_lock(&cpu_add_remove_lock);
42 EXPORT_SYMBOL(cpu_notifier_register_begin);
44 void cpu_maps_update_done(void)
46 mutex_unlock(&cpu_add_remove_lock);
48 EXPORT_SYMBOL(cpu_notifier_register_done);
50 static RAW_NOTIFIER_HEAD(cpu_chain);
52 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
53 * Should always be manipulated under cpu_add_remove_lock
55 static int cpu_hotplug_disabled;
57 #ifdef CONFIG_HOTPLUG_CPU
60 struct task_struct *active_writer;
62 /* wait queue to wake up the active_writer */
64 #ifdef CONFIG_PREEMPT_RT_FULL
65 /* Makes the lock keep the task's state */
68 /* verifies that no writer will get active while readers are active */
72 * Also blocks the new readers during
73 * an ongoing cpu hotplug operation.
77 #ifdef CONFIG_DEBUG_LOCK_ALLOC
78 struct lockdep_map dep_map;
81 .active_writer = NULL,
82 .wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq),
83 #ifdef CONFIG_PREEMPT_RT_FULL
84 .lock = __SPIN_LOCK_UNLOCKED(cpu_hotplug.lock),
86 .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
88 #ifdef CONFIG_DEBUG_LOCK_ALLOC
89 .dep_map = {.name = "cpu_hotplug.lock" },
93 #ifdef CONFIG_PREEMPT_RT_FULL
94 # define hotplug_lock() rt_spin_lock(&cpu_hotplug.lock)
95 # define hotplug_trylock() rt_spin_trylock(&cpu_hotplug.lock)
96 # define hotplug_unlock() rt_spin_unlock(&cpu_hotplug.lock)
98 # define hotplug_lock() mutex_lock(&cpu_hotplug.lock)
99 # define hotplug_trylock() mutex_trylock(&cpu_hotplug.lock)
100 # define hotplug_unlock() mutex_unlock(&cpu_hotplug.lock)
103 /* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
104 #define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
105 #define cpuhp_lock_acquire_tryread() \
106 lock_map_acquire_tryread(&cpu_hotplug.dep_map)
107 #define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map)
108 #define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map)
111 struct task_struct *unplug;
113 struct completion synced;
116 static DEFINE_PER_CPU(struct hotplug_pcp, hotplug_pcp);
119 * pin_current_cpu - Prevent the current cpu from being unplugged
121 * Lightweight version of get_online_cpus() to prevent cpu from being
122 * unplugged when code runs in a migration disabled region.
124 * Must be called with preemption disabled (preempt_count = 1)!
126 void pin_current_cpu(void)
128 struct hotplug_pcp *hp;
131 hp = this_cpu_ptr(&hotplug_pcp);
133 if (!hp->unplug || hp->refcount || preempt_count() > 1 ||
134 hp->unplug == current) {
146 * unpin_current_cpu - Allow unplug of current cpu
148 * Must be called with preemption or interrupts disabled!
150 void unpin_current_cpu(void)
152 struct hotplug_pcp *hp = this_cpu_ptr(&hotplug_pcp);
154 WARN_ON(hp->refcount <= 0);
156 /* This is safe. sync_unplug_thread is pinned to this cpu */
157 if (!--hp->refcount && hp->unplug && hp->unplug != current)
158 wake_up_process(hp->unplug);
162 * FIXME: Is this really correct under all circumstances ?
164 static int sync_unplug_thread(void *data)
166 struct hotplug_pcp *hp = data;
169 hp->unplug = current;
170 set_current_state(TASK_UNINTERRUPTIBLE);
171 while (hp->refcount) {
172 schedule_preempt_disabled();
173 set_current_state(TASK_UNINTERRUPTIBLE);
175 set_current_state(TASK_RUNNING);
177 complete(&hp->synced);
182 * Start the sync_unplug_thread on the target cpu and wait for it to
185 static int cpu_unplug_begin(unsigned int cpu)
187 struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
188 struct task_struct *tsk;
190 init_completion(&hp->synced);
191 tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d", cpu);
193 return (PTR_ERR(tsk));
194 kthread_bind(tsk, cpu);
195 wake_up_process(tsk);
196 wait_for_completion(&hp->synced);
200 static void cpu_unplug_done(unsigned int cpu)
202 struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
207 void get_online_cpus(void)
210 if (cpu_hotplug.active_writer == current)
212 cpuhp_lock_acquire_read();
214 atomic_inc(&cpu_hotplug.refcount);
217 EXPORT_SYMBOL_GPL(get_online_cpus);
219 bool try_get_online_cpus(void)
221 if (cpu_hotplug.active_writer == current)
223 if (!hotplug_trylock())
225 cpuhp_lock_acquire_tryread();
226 atomic_inc(&cpu_hotplug.refcount);
230 EXPORT_SYMBOL_GPL(try_get_online_cpus);
232 void put_online_cpus(void)
236 if (cpu_hotplug.active_writer == current)
239 refcount = atomic_dec_return(&cpu_hotplug.refcount);
240 if (WARN_ON(refcount < 0)) /* try to fix things up */
241 atomic_inc(&cpu_hotplug.refcount);
243 if (refcount <= 0 && waitqueue_active(&cpu_hotplug.wq))
244 wake_up(&cpu_hotplug.wq);
246 cpuhp_lock_release();
249 EXPORT_SYMBOL_GPL(put_online_cpus);
252 * This ensures that the hotplug operation can begin only when the
253 * refcount goes to zero.
255 * Note that during a cpu-hotplug operation, the new readers, if any,
256 * will be blocked by the cpu_hotplug.lock
258 * Since cpu_hotplug_begin() is always called after invoking
259 * cpu_maps_update_begin(), we can be sure that only one writer is active.
261 * Note that theoretically, there is a possibility of a livelock:
262 * - Refcount goes to zero, last reader wakes up the sleeping
264 * - Last reader unlocks the cpu_hotplug.lock.
265 * - A new reader arrives at this moment, bumps up the refcount.
266 * - The writer acquires the cpu_hotplug.lock finds the refcount
267 * non zero and goes to sleep again.
269 * However, this is very difficult to achieve in practice since
270 * get_online_cpus() not an api which is called all that often.
273 void cpu_hotplug_begin(void)
277 cpu_hotplug.active_writer = current;
278 cpuhp_lock_acquire();
282 prepare_to_wait(&cpu_hotplug.wq, &wait, TASK_UNINTERRUPTIBLE);
283 if (likely(!atomic_read(&cpu_hotplug.refcount)))
288 finish_wait(&cpu_hotplug.wq, &wait);
291 void cpu_hotplug_done(void)
293 cpu_hotplug.active_writer = NULL;
295 cpuhp_lock_release();
299 * Wait for currently running CPU hotplug operations to complete (if any) and
300 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
301 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
302 * hotplug path before performing hotplug operations. So acquiring that lock
303 * guarantees mutual exclusion from any currently running hotplug operations.
305 void cpu_hotplug_disable(void)
307 cpu_maps_update_begin();
308 cpu_hotplug_disabled = 1;
309 cpu_maps_update_done();
312 void cpu_hotplug_enable(void)
314 cpu_maps_update_begin();
315 cpu_hotplug_disabled = 0;
316 cpu_maps_update_done();
319 #endif /* CONFIG_HOTPLUG_CPU */
321 /* Need to know about CPUs going up/down? */
322 int __ref register_cpu_notifier(struct notifier_block *nb)
325 cpu_maps_update_begin();
326 ret = raw_notifier_chain_register(&cpu_chain, nb);
327 cpu_maps_update_done();
331 int __ref __register_cpu_notifier(struct notifier_block *nb)
333 return raw_notifier_chain_register(&cpu_chain, nb);
336 static int __cpu_notify(unsigned long val, void *v, int nr_to_call,
341 ret = __raw_notifier_call_chain(&cpu_chain, val, v, nr_to_call,
344 return notifier_to_errno(ret);
347 static int cpu_notify(unsigned long val, void *v)
349 return __cpu_notify(val, v, -1, NULL);
352 #ifdef CONFIG_HOTPLUG_CPU
354 static void cpu_notify_nofail(unsigned long val, void *v)
356 BUG_ON(cpu_notify(val, v));
358 EXPORT_SYMBOL(register_cpu_notifier);
359 EXPORT_SYMBOL(__register_cpu_notifier);
361 void __ref unregister_cpu_notifier(struct notifier_block *nb)
363 cpu_maps_update_begin();
364 raw_notifier_chain_unregister(&cpu_chain, nb);
365 cpu_maps_update_done();
367 EXPORT_SYMBOL(unregister_cpu_notifier);
369 void __ref __unregister_cpu_notifier(struct notifier_block *nb)
371 raw_notifier_chain_unregister(&cpu_chain, nb);
373 EXPORT_SYMBOL(__unregister_cpu_notifier);
376 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
379 * This function walks all processes, finds a valid mm struct for each one and
380 * then clears a corresponding bit in mm's cpumask. While this all sounds
381 * trivial, there are various non-obvious corner cases, which this function
382 * tries to solve in a safe manner.
384 * Also note that the function uses a somewhat relaxed locking scheme, so it may
385 * be called only for an already offlined CPU.
387 void clear_tasks_mm_cpumask(int cpu)
389 struct task_struct *p;
392 * This function is called after the cpu is taken down and marked
393 * offline, so its not like new tasks will ever get this cpu set in
394 * their mm mask. -- Peter Zijlstra
395 * Thus, we may use rcu_read_lock() here, instead of grabbing
396 * full-fledged tasklist_lock.
398 WARN_ON(cpu_online(cpu));
400 for_each_process(p) {
401 struct task_struct *t;
404 * Main thread might exit, but other threads may still have
405 * a valid mm. Find one.
407 t = find_lock_task_mm(p);
410 cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
416 static inline void check_for_tasks(int dead_cpu)
418 struct task_struct *g, *p;
420 read_lock_irq(&tasklist_lock);
421 do_each_thread(g, p) {
425 * We do the check with unlocked task_rq(p)->lock.
426 * Order the reading to do not warn about a task,
427 * which was running on this cpu in the past, and
428 * it's just been woken on another cpu.
431 if (task_cpu(p) != dead_cpu)
434 pr_warn("Task %s (pid=%d) is on cpu %d (state=%ld, flags=%x)\n",
435 p->comm, task_pid_nr(p), dead_cpu, p->state, p->flags);
436 } while_each_thread(g, p);
437 read_unlock_irq(&tasklist_lock);
440 struct take_cpu_down_param {
445 /* Take this CPU down. */
446 static int __ref take_cpu_down(void *_param)
448 struct take_cpu_down_param *param = _param;
451 /* Ensure this CPU doesn't handle any more interrupts. */
452 err = __cpu_disable();
456 cpu_notify(CPU_DYING | param->mod, param->hcpu);
457 /* Park the stopper thread */
458 kthread_park(current);
462 /* Requires cpu_add_remove_lock to be held */
463 static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
465 int mycpu, err, nr_calls = 0;
466 void *hcpu = (void *)(long)cpu;
467 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
468 struct take_cpu_down_param tcd_param = {
472 cpumask_var_t cpumask;
474 if (num_online_cpus() == 1)
477 if (!cpu_online(cpu))
480 /* Move the downtaker off the unplug cpu */
481 if (!alloc_cpumask_var(&cpumask, GFP_KERNEL))
483 cpumask_andnot(cpumask, cpu_online_mask, cpumask_of(cpu));
484 set_cpus_allowed_ptr(current, cpumask);
485 free_cpumask_var(cpumask);
487 mycpu = smp_processor_id();
489 printk(KERN_ERR "Yuck! Still on unplug CPU\n!");
495 err = cpu_unplug_begin(cpu);
497 printk("cpu_unplug_begin(%d) failed\n", cpu);
501 err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
504 __cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
505 pr_warn("%s: attempt to take down CPU %u failed\n",
511 * By now we've cleared cpu_active_mask, wait for all preempt-disabled
512 * and RCU users of this state to go away such that all new such users
515 * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might
516 * not imply sync_sched(), so explicitly call both.
518 * Do sync before park smpboot threads to take care the rcu boost case.
520 #ifdef CONFIG_PREEMPT
525 smpboot_park_threads(cpu);
528 * So now all preempt/rcu users must observe !cpu_active().
531 err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
533 /* CPU didn't die: tell everyone. Can't complain. */
534 smpboot_unpark_threads(cpu);
535 cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);
538 BUG_ON(cpu_online(cpu));
541 * The migration_call() CPU_DYING callback will have removed all
542 * runnable tasks from the cpu, there's only the idle task left now
543 * that the migration thread is done doing the stop_machine thing.
545 * Wait for the stop thread to go away.
547 while (!idle_cpu(cpu))
550 /* This actually kills the CPU. */
553 /* CPU is completely dead: tell everyone. Too late to complain. */
554 cpu_notify_nofail(CPU_DEAD | mod, hcpu);
556 check_for_tasks(cpu);
559 cpu_unplug_done(cpu);
564 cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
568 int __ref cpu_down(unsigned int cpu)
572 cpu_maps_update_begin();
574 if (cpu_hotplug_disabled) {
579 err = _cpu_down(cpu, 0);
582 cpu_maps_update_done();
585 EXPORT_SYMBOL(cpu_down);
586 #endif /*CONFIG_HOTPLUG_CPU*/
588 /* Requires cpu_add_remove_lock to be held */
589 static int _cpu_up(unsigned int cpu, int tasks_frozen)
591 int ret, nr_calls = 0;
592 void *hcpu = (void *)(long)cpu;
593 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
594 struct task_struct *idle;
598 if (cpu_online(cpu) || !cpu_present(cpu)) {
603 idle = idle_thread_get(cpu);
609 ret = smpboot_create_threads(cpu);
613 ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls);
616 pr_warn("%s: attempt to bring up CPU %u failed\n",
621 /* Arch-specific enabling code. */
622 ret = __cpu_up(cpu, idle);
625 BUG_ON(!cpu_online(cpu));
627 /* Wake the per cpu threads */
628 smpboot_unpark_threads(cpu);
630 /* Now call notifier in preparation. */
631 cpu_notify(CPU_ONLINE | mod, hcpu);
635 __cpu_notify(CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
642 int cpu_up(unsigned int cpu)
646 if (!cpu_possible(cpu)) {
647 pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
649 #if defined(CONFIG_IA64)
650 pr_err("please check additional_cpus= boot parameter\n");
655 err = try_online_node(cpu_to_node(cpu));
659 cpu_maps_update_begin();
661 if (cpu_hotplug_disabled) {
666 err = _cpu_up(cpu, 0);
669 cpu_maps_update_done();
672 EXPORT_SYMBOL_GPL(cpu_up);
674 #ifdef CONFIG_PM_SLEEP_SMP
675 static cpumask_var_t frozen_cpus;
677 int disable_nonboot_cpus(void)
679 int cpu, first_cpu, error = 0;
681 cpu_maps_update_begin();
682 first_cpu = cpumask_first(cpu_online_mask);
684 * We take down all of the non-boot CPUs in one shot to avoid races
685 * with the userspace trying to use the CPU hotplug at the same time
687 cpumask_clear(frozen_cpus);
689 pr_info("Disabling non-boot CPUs ...\n");
690 for_each_online_cpu(cpu) {
691 if (cpu == first_cpu)
693 trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
694 error = _cpu_down(cpu, 1);
695 trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
697 cpumask_set_cpu(cpu, frozen_cpus);
699 pr_err("Error taking CPU%d down: %d\n", cpu, error);
705 BUG_ON(num_online_cpus() > 1);
706 /* Make sure the CPUs won't be enabled by someone else */
707 cpu_hotplug_disabled = 1;
709 pr_err("Non-boot CPUs are not disabled\n");
711 cpu_maps_update_done();
715 void __weak arch_enable_nonboot_cpus_begin(void)
719 void __weak arch_enable_nonboot_cpus_end(void)
723 void __ref enable_nonboot_cpus(void)
727 /* Allow everyone to use the CPU hotplug again */
728 cpu_maps_update_begin();
729 cpu_hotplug_disabled = 0;
730 if (cpumask_empty(frozen_cpus))
733 pr_info("Enabling non-boot CPUs ...\n");
735 arch_enable_nonboot_cpus_begin();
737 for_each_cpu(cpu, frozen_cpus) {
738 trace_suspend_resume(TPS("CPU_ON"), cpu, true);
739 error = _cpu_up(cpu, 1);
740 trace_suspend_resume(TPS("CPU_ON"), cpu, false);
742 pr_info("CPU%d is up\n", cpu);
745 pr_warn("Error taking CPU%d up: %d\n", cpu, error);
748 arch_enable_nonboot_cpus_end();
750 cpumask_clear(frozen_cpus);
752 cpu_maps_update_done();
755 static int __init alloc_frozen_cpus(void)
757 if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
761 core_initcall(alloc_frozen_cpus);
764 * When callbacks for CPU hotplug notifications are being executed, we must
765 * ensure that the state of the system with respect to the tasks being frozen
766 * or not, as reported by the notification, remains unchanged *throughout the
767 * duration* of the execution of the callbacks.
768 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
770 * This synchronization is implemented by mutually excluding regular CPU
771 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
772 * Hibernate notifications.
775 cpu_hotplug_pm_callback(struct notifier_block *nb,
776 unsigned long action, void *ptr)
780 case PM_SUSPEND_PREPARE:
781 case PM_HIBERNATION_PREPARE:
782 cpu_hotplug_disable();
785 case PM_POST_SUSPEND:
786 case PM_POST_HIBERNATION:
787 cpu_hotplug_enable();
798 static int __init cpu_hotplug_pm_sync_init(void)
801 * cpu_hotplug_pm_callback has higher priority than x86
802 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
803 * to disable cpu hotplug to avoid cpu hotplug race.
805 pm_notifier(cpu_hotplug_pm_callback, 0);
808 core_initcall(cpu_hotplug_pm_sync_init);
810 #endif /* CONFIG_PM_SLEEP_SMP */
813 * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers
814 * @cpu: cpu that just started
816 * This function calls the cpu_chain notifiers with CPU_STARTING.
817 * It must be called by the arch code on the new cpu, before the new cpu
818 * enables interrupts and before the "boot" cpu returns from __cpu_up().
820 void notify_cpu_starting(unsigned int cpu)
822 unsigned long val = CPU_STARTING;
824 #ifdef CONFIG_PM_SLEEP_SMP
825 if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus))
826 val = CPU_STARTING_FROZEN;
827 #endif /* CONFIG_PM_SLEEP_SMP */
828 cpu_notify(val, (void *)(long)cpu);
831 #endif /* CONFIG_SMP */
834 * cpu_bit_bitmap[] is a special, "compressed" data structure that
835 * represents all NR_CPUS bits binary values of 1<<nr.
837 * It is used by cpumask_of() to get a constant address to a CPU
838 * mask value that has a single bit set only.
841 /* cpu_bit_bitmap[0] is empty - so we can back into it */
842 #define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
843 #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
844 #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
845 #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
847 const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
849 MASK_DECLARE_8(0), MASK_DECLARE_8(8),
850 MASK_DECLARE_8(16), MASK_DECLARE_8(24),
851 #if BITS_PER_LONG > 32
852 MASK_DECLARE_8(32), MASK_DECLARE_8(40),
853 MASK_DECLARE_8(48), MASK_DECLARE_8(56),
856 EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
858 const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
859 EXPORT_SYMBOL(cpu_all_bits);
861 #ifdef CONFIG_INIT_ALL_POSSIBLE
862 static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly
865 static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly;
867 const struct cpumask *const cpu_possible_mask = to_cpumask(cpu_possible_bits);
868 EXPORT_SYMBOL(cpu_possible_mask);
870 static DECLARE_BITMAP(cpu_online_bits, CONFIG_NR_CPUS) __read_mostly;
871 const struct cpumask *const cpu_online_mask = to_cpumask(cpu_online_bits);
872 EXPORT_SYMBOL(cpu_online_mask);
874 static DECLARE_BITMAP(cpu_present_bits, CONFIG_NR_CPUS) __read_mostly;
875 const struct cpumask *const cpu_present_mask = to_cpumask(cpu_present_bits);
876 EXPORT_SYMBOL(cpu_present_mask);
878 static DECLARE_BITMAP(cpu_active_bits, CONFIG_NR_CPUS) __read_mostly;
879 const struct cpumask *const cpu_active_mask = to_cpumask(cpu_active_bits);
880 EXPORT_SYMBOL(cpu_active_mask);
882 void set_cpu_possible(unsigned int cpu, bool possible)
885 cpumask_set_cpu(cpu, to_cpumask(cpu_possible_bits));
887 cpumask_clear_cpu(cpu, to_cpumask(cpu_possible_bits));
890 void set_cpu_present(unsigned int cpu, bool present)
893 cpumask_set_cpu(cpu, to_cpumask(cpu_present_bits));
895 cpumask_clear_cpu(cpu, to_cpumask(cpu_present_bits));
898 void set_cpu_online(unsigned int cpu, bool online)
901 cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits));
902 cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
904 cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits));
908 void set_cpu_active(unsigned int cpu, bool active)
911 cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
913 cpumask_clear_cpu(cpu, to_cpumask(cpu_active_bits));
916 void init_cpu_present(const struct cpumask *src)
918 cpumask_copy(to_cpumask(cpu_present_bits), src);
921 void init_cpu_possible(const struct cpumask *src)
923 cpumask_copy(to_cpumask(cpu_possible_bits), src);
926 void init_cpu_online(const struct cpumask *src)
928 cpumask_copy(to_cpumask(cpu_online_bits), src);