2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
4 * This code is licenced under the GPL.
6 #include <linux/proc_fs.h>
8 #include <linux/init.h>
9 #include <linux/notifier.h>
10 #include <linux/sched.h>
11 #include <linux/unistd.h>
12 #include <linux/cpu.h>
13 #include <linux/oom.h>
14 #include <linux/rcupdate.h>
15 #include <linux/export.h>
16 #include <linux/bug.h>
17 #include <linux/kthread.h>
18 #include <linux/stop_machine.h>
19 #include <linux/mutex.h>
20 #include <linux/gfp.h>
21 #include <linux/suspend.h>
22 #include <linux/lockdep.h>
23 #include <linux/tick.h>
24 #include <linux/irq.h>
25 #include <linux/smpboot.h>
26 #include <linux/relay.h>
27 #include <linux/slab.h>
29 #include <trace/events/power.h>
30 #define CREATE_TRACE_POINTS
31 #include <trace/events/cpuhp.h>
36 * cpuhp_cpu_state - Per cpu hotplug state storage
37 * @state: The current cpu state
38 * @target: The target state
39 * @thread: Pointer to the hotplug thread
40 * @should_run: Thread should execute
41 * @rollback: Perform a rollback
42 * @single: Single callback invocation
43 * @bringup: Single callback bringup or teardown selector
44 * @cb_state: The state for a single callback (install/uninstall)
45 * @result: Result of the operation
46 * @done: Signal completion to the issuer of the task
48 struct cpuhp_cpu_state {
49 enum cpuhp_state state;
50 enum cpuhp_state target;
52 struct task_struct *thread;
57 struct hlist_node *node;
58 enum cpuhp_state cb_state;
60 struct completion done;
64 static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state);
67 * cpuhp_step - Hotplug state machine step
68 * @name: Name of the step
69 * @startup: Startup function of the step
70 * @teardown: Teardown function of the step
71 * @skip_onerr: Do not invoke the functions on error rollback
72 * Will go away once the notifiers are gone
73 * @cant_stop: Bringup/teardown can't be stopped at this step
78 int (*single)(unsigned int cpu);
79 int (*multi)(unsigned int cpu,
80 struct hlist_node *node);
83 int (*single)(unsigned int cpu);
84 int (*multi)(unsigned int cpu,
85 struct hlist_node *node);
87 struct hlist_head list;
93 static DEFINE_MUTEX(cpuhp_state_mutex);
94 static struct cpuhp_step cpuhp_bp_states[];
95 static struct cpuhp_step cpuhp_ap_states[];
97 static bool cpuhp_is_ap_state(enum cpuhp_state state)
100 * The extra check for CPUHP_TEARDOWN_CPU is only for documentation
101 * purposes as that state is handled explicitly in cpu_down.
103 return state > CPUHP_BRINGUP_CPU && state != CPUHP_TEARDOWN_CPU;
106 static struct cpuhp_step *cpuhp_get_step(enum cpuhp_state state)
108 struct cpuhp_step *sp;
110 sp = cpuhp_is_ap_state(state) ? cpuhp_ap_states : cpuhp_bp_states;
115 * cpuhp_invoke_callback _ Invoke the callbacks for a given state
116 * @cpu: The cpu for which the callback should be invoked
117 * @step: The step in the state machine
118 * @bringup: True if the bringup callback should be invoked
120 * Called from cpu hotplug and from the state register machinery.
122 static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state,
123 bool bringup, struct hlist_node *node)
125 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
126 struct cpuhp_step *step = cpuhp_get_step(state);
127 int (*cbm)(unsigned int cpu, struct hlist_node *node);
128 int (*cb)(unsigned int cpu);
131 if (!step->multi_instance) {
132 cb = bringup ? step->startup.single : step->teardown.single;
135 trace_cpuhp_enter(cpu, st->target, state, cb);
137 trace_cpuhp_exit(cpu, st->state, state, ret);
140 cbm = bringup ? step->startup.multi : step->teardown.multi;
144 /* Single invocation for instance add/remove */
146 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
147 ret = cbm(cpu, node);
148 trace_cpuhp_exit(cpu, st->state, state, ret);
152 /* State transition. Invoke on all instances */
154 hlist_for_each(node, &step->list) {
155 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
156 ret = cbm(cpu, node);
157 trace_cpuhp_exit(cpu, st->state, state, ret);
164 /* Rollback the instances if one failed */
165 cbm = !bringup ? step->startup.multi : step->teardown.multi;
169 hlist_for_each(node, &step->list) {
178 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
179 static DEFINE_MUTEX(cpu_add_remove_lock);
180 bool cpuhp_tasks_frozen;
181 EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen);
184 * The following two APIs (cpu_maps_update_begin/done) must be used when
185 * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
186 * The APIs cpu_notifier_register_begin/done() must be used to protect CPU
187 * hotplug callback (un)registration performed using __register_cpu_notifier()
188 * or __unregister_cpu_notifier().
190 void cpu_maps_update_begin(void)
192 mutex_lock(&cpu_add_remove_lock);
194 EXPORT_SYMBOL(cpu_notifier_register_begin);
196 void cpu_maps_update_done(void)
198 mutex_unlock(&cpu_add_remove_lock);
200 EXPORT_SYMBOL(cpu_notifier_register_done);
202 static RAW_NOTIFIER_HEAD(cpu_chain);
204 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
205 * Should always be manipulated under cpu_add_remove_lock
207 static int cpu_hotplug_disabled;
209 #ifdef CONFIG_HOTPLUG_CPU
212 struct task_struct *active_writer;
213 /* wait queue to wake up the active_writer */
214 wait_queue_head_t wq;
215 /* verifies that no writer will get active while readers are active */
218 * Also blocks the new readers during
219 * an ongoing cpu hotplug operation.
223 #ifdef CONFIG_DEBUG_LOCK_ALLOC
224 struct lockdep_map dep_map;
227 .active_writer = NULL,
228 .wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq),
229 .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
230 #ifdef CONFIG_DEBUG_LOCK_ALLOC
231 .dep_map = STATIC_LOCKDEP_MAP_INIT("cpu_hotplug.dep_map", &cpu_hotplug.dep_map),
235 /* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
236 #define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
237 #define cpuhp_lock_acquire_tryread() \
238 lock_map_acquire_tryread(&cpu_hotplug.dep_map)
239 #define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map)
240 #define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map)
243 * hotplug_pcp - per cpu hotplug descriptor
244 * @unplug: set when pin_current_cpu() needs to sync tasks
245 * @sync_tsk: the task that waits for tasks to finish pinned sections
246 * @refcount: counter of tasks in pinned sections
247 * @grab_lock: set when the tasks entering pinned sections should wait
248 * @synced: notifier for @sync_tsk to tell cpu_down it's finished
249 * @mutex: the mutex to make tasks wait (used when @grab_lock is true)
250 * @mutex_init: zero if the mutex hasn't been initialized yet.
252 * Although @unplug and @sync_tsk may point to the same task, the @unplug
253 * is used as a flag and still exists after @sync_tsk has exited and
254 * @sync_tsk set to NULL.
257 struct task_struct *unplug;
258 struct task_struct *sync_tsk;
261 struct completion synced;
262 struct completion unplug_wait;
263 #ifdef CONFIG_PREEMPT_RT_FULL
265 * Note, on PREEMPT_RT, the hotplug lock must save the state of
266 * the task, otherwise the mutex will cause the task to fail
267 * to sleep when required. (Because it's called from migrate_disable())
269 * The spinlock_t on PREEMPT_RT is a mutex that saves the task's
279 #ifdef CONFIG_PREEMPT_RT_FULL
280 # define hotplug_lock(hp) rt_spin_lock__no_mg(&(hp)->lock)
281 # define hotplug_unlock(hp) rt_spin_unlock__no_mg(&(hp)->lock)
283 # define hotplug_lock(hp) mutex_lock(&(hp)->mutex)
284 # define hotplug_unlock(hp) mutex_unlock(&(hp)->mutex)
287 static DEFINE_PER_CPU(struct hotplug_pcp, hotplug_pcp);
290 * pin_current_cpu - Prevent the current cpu from being unplugged
292 * Lightweight version of get_online_cpus() to prevent cpu from being
293 * unplugged when code runs in a migration disabled region.
295 * Must be called with preemption disabled (preempt_count = 1)!
297 void pin_current_cpu(void)
299 struct hotplug_pcp *hp;
303 hp = this_cpu_ptr(&hotplug_pcp);
305 if (!hp->unplug || hp->refcount || force || preempt_count() > 1 ||
306 hp->unplug == current) {
317 * Try to push this task off of this CPU.
321 hp = this_cpu_ptr(&hotplug_pcp);
322 if (!hp->grab_lock) {
324 * Just let it continue it's already pinned
338 * unpin_current_cpu - Allow unplug of current cpu
340 * Must be called with preemption or interrupts disabled!
342 void unpin_current_cpu(void)
344 struct hotplug_pcp *hp = this_cpu_ptr(&hotplug_pcp);
346 WARN_ON(hp->refcount <= 0);
348 /* This is safe. sync_unplug_thread is pinned to this cpu */
349 if (!--hp->refcount && hp->unplug && hp->unplug != current)
350 wake_up_process(hp->unplug);
353 static void wait_for_pinned_cpus(struct hotplug_pcp *hp)
355 set_current_state(TASK_UNINTERRUPTIBLE);
356 while (hp->refcount) {
357 schedule_preempt_disabled();
358 set_current_state(TASK_UNINTERRUPTIBLE);
362 static int sync_unplug_thread(void *data)
364 struct hotplug_pcp *hp = data;
366 wait_for_completion(&hp->unplug_wait);
368 hp->unplug = current;
369 wait_for_pinned_cpus(hp);
372 * This thread will synchronize the cpu_down() with threads
373 * that have pinned the CPU. When the pinned CPU count reaches
374 * zero, we inform the cpu_down code to continue to the next step.
376 set_current_state(TASK_UNINTERRUPTIBLE);
378 complete(&hp->synced);
381 * If all succeeds, the next step will need tasks to wait till
382 * the CPU is offline before continuing. To do this, the grab_lock
383 * is set and tasks going into pin_current_cpu() will block on the
384 * mutex. But we still need to wait for those that are already in
385 * pinned CPU sections. If the cpu_down() failed, the kthread_should_stop()
386 * will kick this thread out.
388 while (!hp->grab_lock && !kthread_should_stop()) {
390 set_current_state(TASK_UNINTERRUPTIBLE);
393 /* Make sure grab_lock is seen before we see a stale completion */
397 * Now just before cpu_down() enters stop machine, we need to make
398 * sure all tasks that are in pinned CPU sections are out, and new
399 * tasks will now grab the lock, keeping them from entering pinned
402 if (!kthread_should_stop()) {
404 wait_for_pinned_cpus(hp);
406 complete(&hp->synced);
409 set_current_state(TASK_UNINTERRUPTIBLE);
410 while (!kthread_should_stop()) {
412 set_current_state(TASK_UNINTERRUPTIBLE);
414 set_current_state(TASK_RUNNING);
417 * Force this thread off this CPU as it's going down and
418 * we don't want any more work on this CPU.
420 current->flags &= ~PF_NO_SETAFFINITY;
421 set_cpus_allowed_ptr(current, cpu_present_mask);
426 static void __cpu_unplug_sync(struct hotplug_pcp *hp)
428 wake_up_process(hp->sync_tsk);
429 wait_for_completion(&hp->synced);
432 static void __cpu_unplug_wait(unsigned int cpu)
434 struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
436 complete(&hp->unplug_wait);
437 wait_for_completion(&hp->synced);
441 * Start the sync_unplug_thread on the target cpu and wait for it to
444 static int cpu_unplug_begin(unsigned int cpu)
446 struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
449 /* Protected by cpu_hotplug.lock */
450 if (!hp->mutex_init) {
451 #ifdef CONFIG_PREEMPT_RT_FULL
452 spin_lock_init(&hp->lock);
454 mutex_init(&hp->mutex);
459 /* Inform the scheduler to migrate tasks off this CPU */
460 tell_sched_cpu_down_begin(cpu);
462 init_completion(&hp->synced);
463 init_completion(&hp->unplug_wait);
465 hp->sync_tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d", cpu);
466 if (IS_ERR(hp->sync_tsk)) {
467 err = PTR_ERR(hp->sync_tsk);
471 kthread_bind(hp->sync_tsk, cpu);
474 * Wait for tasks to get out of the pinned sections,
475 * it's still OK if new tasks enter. Some CPU notifiers will
476 * wait for tasks that are going to enter these sections and
477 * we must not have them block.
479 wake_up_process(hp->sync_tsk);
483 static void cpu_unplug_sync(unsigned int cpu)
485 struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
487 init_completion(&hp->synced);
488 /* The completion needs to be initialzied before setting grab_lock */
491 /* Grab the mutex before setting grab_lock */
496 * The CPU notifiers have been completed.
497 * Wait for tasks to get out of pinned CPU sections and have new
498 * tasks block until the CPU is completely down.
500 __cpu_unplug_sync(hp);
502 /* All done with the sync thread */
503 kthread_stop(hp->sync_tsk);
507 static void cpu_unplug_done(unsigned int cpu)
509 struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
512 /* Let all tasks know cpu unplug is finished before cleaning up */
516 kthread_stop(hp->sync_tsk);
520 /* protected by cpu_hotplug.lock */
523 tell_sched_cpu_down_done(cpu);
526 void get_online_cpus(void)
529 if (cpu_hotplug.active_writer == current)
531 cpuhp_lock_acquire_read();
532 mutex_lock(&cpu_hotplug.lock);
533 atomic_inc(&cpu_hotplug.refcount);
534 mutex_unlock(&cpu_hotplug.lock);
536 EXPORT_SYMBOL_GPL(get_online_cpus);
538 void put_online_cpus(void)
542 if (cpu_hotplug.active_writer == current)
545 refcount = atomic_dec_return(&cpu_hotplug.refcount);
546 if (WARN_ON(refcount < 0)) /* try to fix things up */
547 atomic_inc(&cpu_hotplug.refcount);
549 if (refcount <= 0 && waitqueue_active(&cpu_hotplug.wq))
550 wake_up(&cpu_hotplug.wq);
552 cpuhp_lock_release();
555 EXPORT_SYMBOL_GPL(put_online_cpus);
558 * This ensures that the hotplug operation can begin only when the
559 * refcount goes to zero.
561 * Note that during a cpu-hotplug operation, the new readers, if any,
562 * will be blocked by the cpu_hotplug.lock
564 * Since cpu_hotplug_begin() is always called after invoking
565 * cpu_maps_update_begin(), we can be sure that only one writer is active.
567 * Note that theoretically, there is a possibility of a livelock:
568 * - Refcount goes to zero, last reader wakes up the sleeping
570 * - Last reader unlocks the cpu_hotplug.lock.
571 * - A new reader arrives at this moment, bumps up the refcount.
572 * - The writer acquires the cpu_hotplug.lock finds the refcount
573 * non zero and goes to sleep again.
575 * However, this is very difficult to achieve in practice since
576 * get_online_cpus() not an api which is called all that often.
579 void cpu_hotplug_begin(void)
583 cpu_hotplug.active_writer = current;
584 cpuhp_lock_acquire();
587 mutex_lock(&cpu_hotplug.lock);
588 prepare_to_wait(&cpu_hotplug.wq, &wait, TASK_UNINTERRUPTIBLE);
589 if (likely(!atomic_read(&cpu_hotplug.refcount)))
591 mutex_unlock(&cpu_hotplug.lock);
594 finish_wait(&cpu_hotplug.wq, &wait);
597 void cpu_hotplug_done(void)
599 cpu_hotplug.active_writer = NULL;
600 mutex_unlock(&cpu_hotplug.lock);
601 cpuhp_lock_release();
605 * Wait for currently running CPU hotplug operations to complete (if any) and
606 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
607 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
608 * hotplug path before performing hotplug operations. So acquiring that lock
609 * guarantees mutual exclusion from any currently running hotplug operations.
611 void cpu_hotplug_disable(void)
613 cpu_maps_update_begin();
614 cpu_hotplug_disabled++;
615 cpu_maps_update_done();
617 EXPORT_SYMBOL_GPL(cpu_hotplug_disable);
619 static void __cpu_hotplug_enable(void)
621 if (WARN_ONCE(!cpu_hotplug_disabled, "Unbalanced cpu hotplug enable\n"))
623 cpu_hotplug_disabled--;
626 void cpu_hotplug_enable(void)
628 cpu_maps_update_begin();
629 __cpu_hotplug_enable();
630 cpu_maps_update_done();
632 EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
633 #endif /* CONFIG_HOTPLUG_CPU */
635 /* Need to know about CPUs going up/down? */
636 int register_cpu_notifier(struct notifier_block *nb)
639 cpu_maps_update_begin();
640 ret = raw_notifier_chain_register(&cpu_chain, nb);
641 cpu_maps_update_done();
645 int __register_cpu_notifier(struct notifier_block *nb)
647 return raw_notifier_chain_register(&cpu_chain, nb);
650 static int __cpu_notify(unsigned long val, unsigned int cpu, int nr_to_call,
653 unsigned long mod = cpuhp_tasks_frozen ? CPU_TASKS_FROZEN : 0;
654 void *hcpu = (void *)(long)cpu;
658 ret = __raw_notifier_call_chain(&cpu_chain, val | mod, hcpu, nr_to_call,
661 return notifier_to_errno(ret);
664 static int cpu_notify(unsigned long val, unsigned int cpu)
666 return __cpu_notify(val, cpu, -1, NULL);
669 static void cpu_notify_nofail(unsigned long val, unsigned int cpu)
671 BUG_ON(cpu_notify(val, cpu));
674 /* Notifier wrappers for transitioning to state machine */
675 static int notify_prepare(unsigned int cpu)
680 ret = __cpu_notify(CPU_UP_PREPARE, cpu, -1, &nr_calls);
683 printk(KERN_WARNING "%s: attempt to bring up CPU %u failed\n",
685 __cpu_notify(CPU_UP_CANCELED, cpu, nr_calls, NULL);
690 static int notify_online(unsigned int cpu)
692 cpu_notify(CPU_ONLINE, cpu);
696 static int bringup_wait_for_ap(unsigned int cpu)
698 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
700 wait_for_completion(&st->done);
704 static int bringup_cpu(unsigned int cpu)
706 struct task_struct *idle = idle_thread_get(cpu);
710 * Some architectures have to walk the irq descriptors to
711 * setup the vector space for the cpu which comes online.
712 * Prevent irq alloc/free across the bringup.
716 /* Arch-specific enabling code. */
717 ret = __cpu_up(cpu, idle);
720 cpu_notify(CPU_UP_CANCELED, cpu);
723 ret = bringup_wait_for_ap(cpu);
724 BUG_ON(!cpu_online(cpu));
729 * Hotplug state machine related functions
731 static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st)
733 for (st->state++; st->state < st->target; st->state++) {
734 struct cpuhp_step *step = cpuhp_get_step(st->state);
736 if (!step->skip_onerr)
737 cpuhp_invoke_callback(cpu, st->state, true, NULL);
741 static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
742 enum cpuhp_state target)
744 enum cpuhp_state prev_state = st->state;
747 for (; st->state > target; st->state--) {
748 ret = cpuhp_invoke_callback(cpu, st->state, false, NULL);
750 st->target = prev_state;
751 undo_cpu_down(cpu, st);
758 static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st)
760 for (st->state--; st->state > st->target; st->state--) {
761 struct cpuhp_step *step = cpuhp_get_step(st->state);
763 if (!step->skip_onerr)
764 cpuhp_invoke_callback(cpu, st->state, false, NULL);
768 static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
769 enum cpuhp_state target)
771 enum cpuhp_state prev_state = st->state;
774 while (st->state < target) {
776 ret = cpuhp_invoke_callback(cpu, st->state, true, NULL);
778 st->target = prev_state;
779 undo_cpu_up(cpu, st);
787 * The cpu hotplug threads manage the bringup and teardown of the cpus
789 static void cpuhp_create(unsigned int cpu)
791 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
793 init_completion(&st->done);
796 static int cpuhp_should_run(unsigned int cpu)
798 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
800 return st->should_run;
803 /* Execute the teardown callbacks. Used to be CPU_DOWN_PREPARE */
804 static int cpuhp_ap_offline(unsigned int cpu, struct cpuhp_cpu_state *st)
806 enum cpuhp_state target = max((int)st->target, CPUHP_TEARDOWN_CPU);
808 return cpuhp_down_callbacks(cpu, st, target);
811 /* Execute the online startup callbacks. Used to be CPU_ONLINE */
812 static int cpuhp_ap_online(unsigned int cpu, struct cpuhp_cpu_state *st)
814 return cpuhp_up_callbacks(cpu, st, st->target);
818 * Execute teardown/startup callbacks on the plugged cpu. Also used to invoke
819 * callbacks when a state gets [un]installed at runtime.
821 static void cpuhp_thread_fun(unsigned int cpu)
823 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
827 * Paired with the mb() in cpuhp_kick_ap_work and
828 * cpuhp_invoke_ap_callback, so the work set is consistent visible.
834 st->should_run = false;
836 /* Single callback invocation for [un]install ? */
838 if (st->cb_state < CPUHP_AP_ONLINE) {
840 ret = cpuhp_invoke_callback(cpu, st->cb_state,
841 st->bringup, st->node);
844 ret = cpuhp_invoke_callback(cpu, st->cb_state,
845 st->bringup, st->node);
847 } else if (st->rollback) {
848 BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE);
850 undo_cpu_down(cpu, st);
852 * This is a momentary workaround to keep the notifier users
853 * happy. Will go away once we got rid of the notifiers.
855 cpu_notify_nofail(CPU_DOWN_FAILED, cpu);
856 st->rollback = false;
858 /* Cannot happen .... */
859 BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE);
861 /* Regular hotplug work */
862 if (st->state < st->target)
863 ret = cpuhp_ap_online(cpu, st);
864 else if (st->state > st->target)
865 ret = cpuhp_ap_offline(cpu, st);
871 /* Invoke a single callback on a remote cpu */
873 cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup,
874 struct hlist_node *node)
876 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
878 if (!cpu_online(cpu))
882 * If we are up and running, use the hotplug thread. For early calls
883 * we invoke the thread function directly.
886 return cpuhp_invoke_callback(cpu, state, bringup, node);
888 st->cb_state = state;
890 st->bringup = bringup;
894 * Make sure the above stores are visible before should_run becomes
895 * true. Paired with the mb() above in cpuhp_thread_fun()
898 st->should_run = true;
899 wake_up_process(st->thread);
900 wait_for_completion(&st->done);
904 /* Regular hotplug invocation of the AP hotplug thread */
905 static void __cpuhp_kick_ap_work(struct cpuhp_cpu_state *st)
910 * Make sure the above stores are visible before should_run becomes
911 * true. Paired with the mb() above in cpuhp_thread_fun()
914 st->should_run = true;
915 wake_up_process(st->thread);
918 static int cpuhp_kick_ap_work(unsigned int cpu)
920 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
921 enum cpuhp_state state = st->state;
923 trace_cpuhp_enter(cpu, st->target, state, cpuhp_kick_ap_work);
924 __cpuhp_kick_ap_work(st);
925 wait_for_completion(&st->done);
926 trace_cpuhp_exit(cpu, st->state, state, st->result);
930 static struct smp_hotplug_thread cpuhp_threads = {
931 .store = &cpuhp_state.thread,
932 .create = &cpuhp_create,
933 .thread_should_run = cpuhp_should_run,
934 .thread_fn = cpuhp_thread_fun,
935 .thread_comm = "cpuhp/%u",
939 void __init cpuhp_threads_init(void)
941 BUG_ON(smpboot_register_percpu_thread(&cpuhp_threads));
942 kthread_unpark(this_cpu_read(cpuhp_state.thread));
945 #ifdef CONFIG_HOTPLUG_CPU
946 EXPORT_SYMBOL(register_cpu_notifier);
947 EXPORT_SYMBOL(__register_cpu_notifier);
948 void unregister_cpu_notifier(struct notifier_block *nb)
950 cpu_maps_update_begin();
951 raw_notifier_chain_unregister(&cpu_chain, nb);
952 cpu_maps_update_done();
954 EXPORT_SYMBOL(unregister_cpu_notifier);
956 void __unregister_cpu_notifier(struct notifier_block *nb)
958 raw_notifier_chain_unregister(&cpu_chain, nb);
960 EXPORT_SYMBOL(__unregister_cpu_notifier);
963 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
966 * This function walks all processes, finds a valid mm struct for each one and
967 * then clears a corresponding bit in mm's cpumask. While this all sounds
968 * trivial, there are various non-obvious corner cases, which this function
969 * tries to solve in a safe manner.
971 * Also note that the function uses a somewhat relaxed locking scheme, so it may
972 * be called only for an already offlined CPU.
974 void clear_tasks_mm_cpumask(int cpu)
976 struct task_struct *p;
979 * This function is called after the cpu is taken down and marked
980 * offline, so its not like new tasks will ever get this cpu set in
981 * their mm mask. -- Peter Zijlstra
982 * Thus, we may use rcu_read_lock() here, instead of grabbing
983 * full-fledged tasklist_lock.
985 WARN_ON(cpu_online(cpu));
987 for_each_process(p) {
988 struct task_struct *t;
991 * Main thread might exit, but other threads may still have
992 * a valid mm. Find one.
994 t = find_lock_task_mm(p);
997 cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
1003 static inline void check_for_tasks(int dead_cpu)
1005 struct task_struct *g, *p;
1007 read_lock(&tasklist_lock);
1008 for_each_process_thread(g, p) {
1012 * We do the check with unlocked task_rq(p)->lock.
1013 * Order the reading to do not warn about a task,
1014 * which was running on this cpu in the past, and
1015 * it's just been woken on another cpu.
1018 if (task_cpu(p) != dead_cpu)
1021 pr_warn("Task %s (pid=%d) is on cpu %d (state=%ld, flags=%x)\n",
1022 p->comm, task_pid_nr(p), dead_cpu, p->state, p->flags);
1024 read_unlock(&tasklist_lock);
1027 static int notify_down_prepare(unsigned int cpu)
1029 int err, nr_calls = 0;
1031 err = __cpu_notify(CPU_DOWN_PREPARE, cpu, -1, &nr_calls);
1034 __cpu_notify(CPU_DOWN_FAILED, cpu, nr_calls, NULL);
1035 pr_warn("%s: attempt to take down CPU %u failed\n",
1041 /* Take this CPU down. */
1042 static int take_cpu_down(void *_param)
1044 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
1045 enum cpuhp_state target = max((int)st->target, CPUHP_AP_OFFLINE);
1046 int err, cpu = smp_processor_id();
1048 /* Ensure this CPU doesn't handle any more interrupts. */
1049 err = __cpu_disable();
1054 * We get here while we are in CPUHP_TEARDOWN_CPU state and we must not
1055 * do this step again.
1057 WARN_ON(st->state != CPUHP_TEARDOWN_CPU);
1059 /* Invoke the former CPU_DYING callbacks */
1060 for (; st->state > target; st->state--)
1061 cpuhp_invoke_callback(cpu, st->state, false, NULL);
1063 /* Give up timekeeping duties */
1064 tick_handover_do_timer();
1065 /* Park the stopper thread */
1066 stop_machine_park(cpu);
1070 static int takedown_cpu(unsigned int cpu)
1072 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1075 __cpu_unplug_wait(cpu);
1076 /* Park the smpboot threads */
1077 kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread);
1078 smpboot_park_threads(cpu);
1080 /* Notifiers are done. Don't let any more tasks pin this CPU. */
1081 cpu_unplug_sync(cpu);
1084 * Prevent irq alloc/free while the dying cpu reorganizes the
1085 * interrupt affinities.
1090 * So now all preempt/rcu users must observe !cpu_active().
1092 err = stop_machine(take_cpu_down, NULL, cpumask_of(cpu));
1094 /* CPU refused to die */
1095 irq_unlock_sparse();
1096 /* Unpark the hotplug thread so we can rollback there */
1097 kthread_unpark(per_cpu_ptr(&cpuhp_state, cpu)->thread);
1100 BUG_ON(cpu_online(cpu));
1103 * The CPUHP_AP_SCHED_MIGRATE_DYING callback will have removed all
1104 * runnable tasks from the cpu, there's only the idle task left now
1105 * that the migration thread is done doing the stop_machine thing.
1107 * Wait for the stop thread to go away.
1109 wait_for_completion(&st->done);
1110 BUG_ON(st->state != CPUHP_AP_IDLE_DEAD);
1112 /* Interrupts are moved away from the dying cpu, reenable alloc/free */
1113 irq_unlock_sparse();
1115 hotplug_cpu__broadcast_tick_pull(cpu);
1116 /* This actually kills the CPU. */
1119 tick_cleanup_dead_cpu(cpu);
1123 static int notify_dead(unsigned int cpu)
1125 cpu_notify_nofail(CPU_DEAD, cpu);
1126 check_for_tasks(cpu);
1130 static void cpuhp_complete_idle_dead(void *arg)
1132 struct cpuhp_cpu_state *st = arg;
1134 complete(&st->done);
1137 void cpuhp_report_idle_dead(void)
1139 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
1141 BUG_ON(st->state != CPUHP_AP_OFFLINE);
1142 rcu_report_dead(smp_processor_id());
1143 st->state = CPUHP_AP_IDLE_DEAD;
1145 * We cannot call complete after rcu_report_dead() so we delegate it
1148 smp_call_function_single(cpumask_first(cpu_online_mask),
1149 cpuhp_complete_idle_dead, st, 0);
1153 #define notify_down_prepare NULL
1154 #define takedown_cpu NULL
1155 #define notify_dead NULL
1158 #ifdef CONFIG_HOTPLUG_CPU
1160 /* Requires cpu_add_remove_lock to be held */
1161 static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
1162 enum cpuhp_state target)
1164 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1165 int prev_state, ret = 0;
1166 bool hasdied = false;
1168 cpumask_var_t cpumask;
1169 cpumask_var_t cpumask_org;
1171 if (num_online_cpus() == 1)
1174 if (!cpu_present(cpu))
1177 /* Move the downtaker off the unplug cpu */
1178 if (!alloc_cpumask_var(&cpumask, GFP_KERNEL))
1180 if (!alloc_cpumask_var(&cpumask_org, GFP_KERNEL)) {
1181 free_cpumask_var(cpumask);
1185 cpumask_copy(cpumask_org, tsk_cpus_allowed(current));
1186 cpumask_andnot(cpumask, cpu_online_mask, cpumask_of(cpu));
1187 set_cpus_allowed_ptr(current, cpumask);
1188 free_cpumask_var(cpumask);
1190 mycpu = smp_processor_id();
1192 printk(KERN_ERR "Yuck! Still on unplug CPU\n!");
1199 cpu_hotplug_begin();
1200 ret = cpu_unplug_begin(cpu);
1202 printk("cpu_unplug_begin(%d) failed\n", cpu);
1206 cpuhp_tasks_frozen = tasks_frozen;
1208 prev_state = st->state;
1209 st->target = target;
1211 * If the current CPU state is in the range of the AP hotplug thread,
1212 * then we need to kick the thread.
1214 if (st->state > CPUHP_TEARDOWN_CPU) {
1215 ret = cpuhp_kick_ap_work(cpu);
1217 * The AP side has done the error rollback already. Just
1218 * return the error code..
1224 * We might have stopped still in the range of the AP hotplug
1225 * thread. Nothing to do anymore.
1227 if (st->state > CPUHP_TEARDOWN_CPU)
1231 * The AP brought itself down to CPUHP_TEARDOWN_CPU. So we need
1232 * to do the further cleanups.
1234 ret = cpuhp_down_callbacks(cpu, st, target);
1235 if (ret && st->state > CPUHP_TEARDOWN_CPU && st->state < prev_state) {
1236 st->target = prev_state;
1237 st->rollback = true;
1238 cpuhp_kick_ap_work(cpu);
1241 hasdied = prev_state != st->state && st->state == CPUHP_OFFLINE;
1243 cpu_unplug_done(cpu);
1246 /* This post dead nonsense must die */
1247 if (!ret && hasdied)
1248 cpu_notify_nofail(CPU_POST_DEAD, cpu);
1250 set_cpus_allowed_ptr(current, cpumask_org);
1251 free_cpumask_var(cpumask_org);
1255 static int do_cpu_down(unsigned int cpu, enum cpuhp_state target)
1259 cpu_maps_update_begin();
1261 if (cpu_hotplug_disabled) {
1266 err = _cpu_down(cpu, 0, target);
1269 cpu_maps_update_done();
1272 int cpu_down(unsigned int cpu)
1274 return do_cpu_down(cpu, CPUHP_OFFLINE);
1276 EXPORT_SYMBOL(cpu_down);
1277 #endif /*CONFIG_HOTPLUG_CPU*/
1280 * notify_cpu_starting(cpu) - Invoke the callbacks on the starting CPU
1281 * @cpu: cpu that just started
1283 * It must be called by the arch code on the new cpu, before the new cpu
1284 * enables interrupts and before the "boot" cpu returns from __cpu_up().
1286 void notify_cpu_starting(unsigned int cpu)
1288 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1289 enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE);
1291 rcu_cpu_starting(cpu); /* Enables RCU usage on this CPU. */
1292 while (st->state < target) {
1294 cpuhp_invoke_callback(cpu, st->state, true, NULL);
1299 * Called from the idle task. We need to set active here, so we can kick off
1300 * the stopper thread and unpark the smpboot threads. If the target state is
1301 * beyond CPUHP_AP_ONLINE_IDLE we kick cpuhp thread and let it bring up the
1304 void cpuhp_online_idle(enum cpuhp_state state)
1306 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
1307 unsigned int cpu = smp_processor_id();
1309 /* Happens for the boot cpu */
1310 if (state != CPUHP_AP_ONLINE_IDLE)
1313 st->state = CPUHP_AP_ONLINE_IDLE;
1315 /* Unpark the stopper thread and the hotplug thread of this cpu */
1316 stop_machine_unpark(cpu);
1317 kthread_unpark(st->thread);
1319 /* Should we go further up ? */
1320 if (st->target > CPUHP_AP_ONLINE_IDLE)
1321 __cpuhp_kick_ap_work(st);
1323 complete(&st->done);
1326 /* Requires cpu_add_remove_lock to be held */
1327 static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
1329 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1330 struct task_struct *idle;
1333 cpu_hotplug_begin();
1335 if (!cpu_present(cpu)) {
1341 * The caller of do_cpu_up might have raced with another
1342 * caller. Ignore it for now.
1344 if (st->state >= target)
1347 if (st->state == CPUHP_OFFLINE) {
1348 /* Let it fail before we try to bring the cpu up */
1349 idle = idle_thread_get(cpu);
1351 ret = PTR_ERR(idle);
1356 cpuhp_tasks_frozen = tasks_frozen;
1358 st->target = target;
1360 * If the current CPU state is in the range of the AP hotplug thread,
1361 * then we need to kick the thread once more.
1363 if (st->state > CPUHP_BRINGUP_CPU) {
1364 ret = cpuhp_kick_ap_work(cpu);
1366 * The AP side has done the error rollback already. Just
1367 * return the error code..
1374 * Try to reach the target state. We max out on the BP at
1375 * CPUHP_BRINGUP_CPU. After that the AP hotplug thread is
1376 * responsible for bringing it up to the target state.
1378 target = min((int)target, CPUHP_BRINGUP_CPU);
1379 ret = cpuhp_up_callbacks(cpu, st, target);
1385 static int do_cpu_up(unsigned int cpu, enum cpuhp_state target)
1389 if (!cpu_possible(cpu)) {
1390 pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
1392 #if defined(CONFIG_IA64)
1393 pr_err("please check additional_cpus= boot parameter\n");
1398 err = try_online_node(cpu_to_node(cpu));
1402 cpu_maps_update_begin();
1404 if (cpu_hotplug_disabled) {
1409 err = _cpu_up(cpu, 0, target);
1411 cpu_maps_update_done();
1415 int cpu_up(unsigned int cpu)
1417 return do_cpu_up(cpu, CPUHP_ONLINE);
1419 EXPORT_SYMBOL_GPL(cpu_up);
1421 #ifdef CONFIG_PM_SLEEP_SMP
1422 static cpumask_var_t frozen_cpus;
1424 int freeze_secondary_cpus(int primary)
1428 cpu_maps_update_begin();
1429 if (!cpu_online(primary))
1430 primary = cpumask_first(cpu_online_mask);
1432 * We take down all of the non-boot CPUs in one shot to avoid races
1433 * with the userspace trying to use the CPU hotplug at the same time
1435 cpumask_clear(frozen_cpus);
1437 pr_info("Disabling non-boot CPUs ...\n");
1438 for_each_online_cpu(cpu) {
1441 trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
1442 error = _cpu_down(cpu, 1, CPUHP_OFFLINE);
1443 trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
1445 cpumask_set_cpu(cpu, frozen_cpus);
1447 pr_err("Error taking CPU%d down: %d\n", cpu, error);
1453 BUG_ON(num_online_cpus() > 1);
1455 pr_err("Non-boot CPUs are not disabled\n");
1458 * Make sure the CPUs won't be enabled by someone else. We need to do
1459 * this even in case of failure as all disable_nonboot_cpus() users are
1460 * supposed to do enable_nonboot_cpus() on the failure path.
1462 cpu_hotplug_disabled++;
1464 cpu_maps_update_done();
1468 void __weak arch_enable_nonboot_cpus_begin(void)
1472 void __weak arch_enable_nonboot_cpus_end(void)
1476 void enable_nonboot_cpus(void)
1480 /* Allow everyone to use the CPU hotplug again */
1481 cpu_maps_update_begin();
1482 __cpu_hotplug_enable();
1483 if (cpumask_empty(frozen_cpus))
1486 pr_info("Enabling non-boot CPUs ...\n");
1488 arch_enable_nonboot_cpus_begin();
1490 for_each_cpu(cpu, frozen_cpus) {
1491 trace_suspend_resume(TPS("CPU_ON"), cpu, true);
1492 error = _cpu_up(cpu, 1, CPUHP_ONLINE);
1493 trace_suspend_resume(TPS("CPU_ON"), cpu, false);
1495 pr_info("CPU%d is up\n", cpu);
1498 pr_warn("Error taking CPU%d up: %d\n", cpu, error);
1501 arch_enable_nonboot_cpus_end();
1503 cpumask_clear(frozen_cpus);
1505 cpu_maps_update_done();
1508 static int __init alloc_frozen_cpus(void)
1510 if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
1514 core_initcall(alloc_frozen_cpus);
1517 * When callbacks for CPU hotplug notifications are being executed, we must
1518 * ensure that the state of the system with respect to the tasks being frozen
1519 * or not, as reported by the notification, remains unchanged *throughout the
1520 * duration* of the execution of the callbacks.
1521 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
1523 * This synchronization is implemented by mutually excluding regular CPU
1524 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
1525 * Hibernate notifications.
1528 cpu_hotplug_pm_callback(struct notifier_block *nb,
1529 unsigned long action, void *ptr)
1533 case PM_SUSPEND_PREPARE:
1534 case PM_HIBERNATION_PREPARE:
1535 cpu_hotplug_disable();
1538 case PM_POST_SUSPEND:
1539 case PM_POST_HIBERNATION:
1540 cpu_hotplug_enable();
1551 static int __init cpu_hotplug_pm_sync_init(void)
1554 * cpu_hotplug_pm_callback has higher priority than x86
1555 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
1556 * to disable cpu hotplug to avoid cpu hotplug race.
1558 pm_notifier(cpu_hotplug_pm_callback, 0);
1561 core_initcall(cpu_hotplug_pm_sync_init);
1563 #endif /* CONFIG_PM_SLEEP_SMP */
1565 #endif /* CONFIG_SMP */
1567 /* Boot processor state steps */
1568 static struct cpuhp_step cpuhp_bp_states[] = {
1571 .startup.single = NULL,
1572 .teardown.single = NULL,
1575 [CPUHP_CREATE_THREADS]= {
1576 .name = "threads:prepare",
1577 .startup.single = smpboot_create_threads,
1578 .teardown.single = NULL,
1581 [CPUHP_PERF_PREPARE] = {
1582 .name = "perf:prepare",
1583 .startup.single = perf_event_init_cpu,
1584 .teardown.single = perf_event_exit_cpu,
1586 [CPUHP_WORKQUEUE_PREP] = {
1587 .name = "workqueue:prepare",
1588 .startup.single = workqueue_prepare_cpu,
1589 .teardown.single = NULL,
1591 [CPUHP_HRTIMERS_PREPARE] = {
1592 .name = "hrtimers:prepare",
1593 .startup.single = hrtimers_prepare_cpu,
1594 .teardown.single = hrtimers_dead_cpu,
1596 [CPUHP_SMPCFD_PREPARE] = {
1597 .name = "smpcfd:prepare",
1598 .startup.single = smpcfd_prepare_cpu,
1599 .teardown.single = smpcfd_dead_cpu,
1601 [CPUHP_RELAY_PREPARE] = {
1602 .name = "relay:prepare",
1603 .startup.single = relay_prepare_cpu,
1604 .teardown.single = NULL,
1606 [CPUHP_SLAB_PREPARE] = {
1607 .name = "slab:prepare",
1608 .startup.single = slab_prepare_cpu,
1609 .teardown.single = slab_dead_cpu,
1611 [CPUHP_RCUTREE_PREP] = {
1612 .name = "RCU/tree:prepare",
1613 .startup.single = rcutree_prepare_cpu,
1614 .teardown.single = rcutree_dead_cpu,
1617 * Preparatory and dead notifiers. Will be replaced once the notifiers
1618 * are converted to states.
1620 [CPUHP_NOTIFY_PREPARE] = {
1621 .name = "notify:prepare",
1622 .startup.single = notify_prepare,
1623 .teardown.single = notify_dead,
1628 * On the tear-down path, timers_dead_cpu() must be invoked
1629 * before blk_mq_queue_reinit_notify() from notify_dead(),
1630 * otherwise a RCU stall occurs.
1632 [CPUHP_TIMERS_DEAD] = {
1633 .name = "timers:dead",
1634 .startup.single = NULL,
1635 .teardown.single = timers_dead_cpu,
1637 /* Kicks the plugged cpu into life */
1638 [CPUHP_BRINGUP_CPU] = {
1639 .name = "cpu:bringup",
1640 .startup.single = bringup_cpu,
1641 .teardown.single = NULL,
1644 [CPUHP_AP_SMPCFD_DYING] = {
1645 .name = "smpcfd:dying",
1646 .startup.single = NULL,
1647 .teardown.single = smpcfd_dying_cpu,
1650 * Handled on controll processor until the plugged processor manages
1653 [CPUHP_TEARDOWN_CPU] = {
1654 .name = "cpu:teardown",
1655 .startup.single = NULL,
1656 .teardown.single = takedown_cpu,
1660 [CPUHP_BRINGUP_CPU] = { },
1664 /* Application processor state steps */
1665 static struct cpuhp_step cpuhp_ap_states[] = {
1667 /* Final state before CPU kills itself */
1668 [CPUHP_AP_IDLE_DEAD] = {
1669 .name = "idle:dead",
1672 * Last state before CPU enters the idle loop to die. Transient state
1673 * for synchronization.
1675 [CPUHP_AP_OFFLINE] = {
1676 .name = "ap:offline",
1679 /* First state is scheduler control. Interrupts are disabled */
1680 [CPUHP_AP_SCHED_STARTING] = {
1681 .name = "sched:starting",
1682 .startup.single = sched_cpu_starting,
1683 .teardown.single = sched_cpu_dying,
1685 [CPUHP_AP_RCUTREE_DYING] = {
1686 .name = "RCU/tree:dying",
1687 .startup.single = NULL,
1688 .teardown.single = rcutree_dying_cpu,
1690 /* Entry state on starting. Interrupts enabled from here on. Transient
1691 * state for synchronsization */
1692 [CPUHP_AP_ONLINE] = {
1693 .name = "ap:online",
1695 /* Handle smpboot threads park/unpark */
1696 [CPUHP_AP_SMPBOOT_THREADS] = {
1697 .name = "smpboot/threads:online",
1698 .startup.single = smpboot_unpark_threads,
1699 .teardown.single = NULL,
1701 [CPUHP_AP_PERF_ONLINE] = {
1702 .name = "perf:online",
1703 .startup.single = perf_event_init_cpu,
1704 .teardown.single = perf_event_exit_cpu,
1706 [CPUHP_AP_WORKQUEUE_ONLINE] = {
1707 .name = "workqueue:online",
1708 .startup.single = workqueue_online_cpu,
1709 .teardown.single = workqueue_offline_cpu,
1711 [CPUHP_AP_RCUTREE_ONLINE] = {
1712 .name = "RCU/tree:online",
1713 .startup.single = rcutree_online_cpu,
1714 .teardown.single = rcutree_offline_cpu,
1718 * Online/down_prepare notifiers. Will be removed once the notifiers
1719 * are converted to states.
1721 [CPUHP_AP_NOTIFY_ONLINE] = {
1722 .name = "notify:online",
1723 .startup.single = notify_online,
1724 .teardown.single = notify_down_prepare,
1729 * The dynamically registered state space is here
1733 /* Last state is scheduler control setting the cpu active */
1734 [CPUHP_AP_ACTIVE] = {
1735 .name = "sched:active",
1736 .startup.single = sched_cpu_activate,
1737 .teardown.single = sched_cpu_deactivate,
1741 /* CPU is fully up and running. */
1744 .startup.single = NULL,
1745 .teardown.single = NULL,
1749 /* Sanity check for callbacks */
1750 static int cpuhp_cb_check(enum cpuhp_state state)
1752 if (state <= CPUHP_OFFLINE || state >= CPUHP_ONLINE)
1757 static void cpuhp_store_callbacks(enum cpuhp_state state,
1759 int (*startup)(unsigned int cpu),
1760 int (*teardown)(unsigned int cpu),
1761 bool multi_instance)
1763 /* (Un)Install the callbacks for further cpu hotplug operations */
1764 struct cpuhp_step *sp;
1766 mutex_lock(&cpuhp_state_mutex);
1767 sp = cpuhp_get_step(state);
1768 sp->startup.single = startup;
1769 sp->teardown.single = teardown;
1771 sp->multi_instance = multi_instance;
1772 INIT_HLIST_HEAD(&sp->list);
1773 mutex_unlock(&cpuhp_state_mutex);
1776 static void *cpuhp_get_teardown_cb(enum cpuhp_state state)
1778 return cpuhp_get_step(state)->teardown.single;
1782 * Call the startup/teardown function for a step either on the AP or
1783 * on the current CPU.
1785 static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup,
1786 struct hlist_node *node)
1788 struct cpuhp_step *sp = cpuhp_get_step(state);
1791 if ((bringup && !sp->startup.single) ||
1792 (!bringup && !sp->teardown.single))
1795 * The non AP bound callbacks can fail on bringup. On teardown
1796 * e.g. module removal we crash for now.
1799 if (cpuhp_is_ap_state(state))
1800 ret = cpuhp_invoke_ap_callback(cpu, state, bringup, node);
1802 ret = cpuhp_invoke_callback(cpu, state, bringup, node);
1804 ret = cpuhp_invoke_callback(cpu, state, bringup, node);
1806 BUG_ON(ret && !bringup);
1811 * Called from __cpuhp_setup_state on a recoverable failure.
1813 * Note: The teardown callbacks for rollback are not allowed to fail!
1815 static void cpuhp_rollback_install(int failedcpu, enum cpuhp_state state,
1816 struct hlist_node *node)
1820 /* Roll back the already executed steps on the other cpus */
1821 for_each_present_cpu(cpu) {
1822 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1823 int cpustate = st->state;
1825 if (cpu >= failedcpu)
1828 /* Did we invoke the startup call on that cpu ? */
1829 if (cpustate >= state)
1830 cpuhp_issue_call(cpu, state, false, node);
1835 * Returns a free for dynamic slot assignment of the Online state. The states
1836 * are protected by the cpuhp_slot_states mutex and an empty slot is identified
1837 * by having no name assigned.
1839 static int cpuhp_reserve_state(enum cpuhp_state state)
1843 mutex_lock(&cpuhp_state_mutex);
1844 for (i = CPUHP_AP_ONLINE_DYN; i <= CPUHP_AP_ONLINE_DYN_END; i++) {
1845 if (cpuhp_ap_states[i].name)
1848 cpuhp_ap_states[i].name = "Reserved";
1849 mutex_unlock(&cpuhp_state_mutex);
1852 mutex_unlock(&cpuhp_state_mutex);
1853 WARN(1, "No more dynamic states available for CPU hotplug\n");
1857 int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
1860 struct cpuhp_step *sp;
1864 sp = cpuhp_get_step(state);
1865 if (sp->multi_instance == false)
1870 if (!invoke || !sp->startup.multi)
1874 * Try to call the startup callback for each present cpu
1875 * depending on the hotplug state of the cpu.
1877 for_each_present_cpu(cpu) {
1878 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1879 int cpustate = st->state;
1881 if (cpustate < state)
1884 ret = cpuhp_issue_call(cpu, state, true, node);
1886 if (sp->teardown.multi)
1887 cpuhp_rollback_install(cpu, state, node);
1893 mutex_lock(&cpuhp_state_mutex);
1894 hlist_add_head(node, &sp->list);
1895 mutex_unlock(&cpuhp_state_mutex);
1901 EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance);
1904 * __cpuhp_setup_state - Setup the callbacks for an hotplug machine state
1905 * @state: The state to setup
1906 * @invoke: If true, the startup function is invoked for cpus where
1907 * cpu state >= @state
1908 * @startup: startup callback function
1909 * @teardown: teardown callback function
1911 * Returns 0 if successful, otherwise a proper error code
1913 int __cpuhp_setup_state(enum cpuhp_state state,
1914 const char *name, bool invoke,
1915 int (*startup)(unsigned int cpu),
1916 int (*teardown)(unsigned int cpu),
1917 bool multi_instance)
1922 if (cpuhp_cb_check(state) || !name)
1927 /* currently assignments for the ONLINE state are possible */
1928 if (state == CPUHP_AP_ONLINE_DYN) {
1930 ret = cpuhp_reserve_state(state);
1936 cpuhp_store_callbacks(state, name, startup, teardown, multi_instance);
1938 if (!invoke || !startup)
1942 * Try to call the startup callback for each present cpu
1943 * depending on the hotplug state of the cpu.
1945 for_each_present_cpu(cpu) {
1946 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1947 int cpustate = st->state;
1949 if (cpustate < state)
1952 ret = cpuhp_issue_call(cpu, state, true, NULL);
1955 cpuhp_rollback_install(cpu, state, NULL);
1956 cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
1962 if (!ret && dyn_state)
1966 EXPORT_SYMBOL(__cpuhp_setup_state);
1968 int __cpuhp_state_remove_instance(enum cpuhp_state state,
1969 struct hlist_node *node, bool invoke)
1971 struct cpuhp_step *sp = cpuhp_get_step(state);
1974 BUG_ON(cpuhp_cb_check(state));
1976 if (!sp->multi_instance)
1980 if (!invoke || !cpuhp_get_teardown_cb(state))
1983 * Call the teardown callback for each present cpu depending
1984 * on the hotplug state of the cpu. This function is not
1985 * allowed to fail currently!
1987 for_each_present_cpu(cpu) {
1988 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1989 int cpustate = st->state;
1991 if (cpustate >= state)
1992 cpuhp_issue_call(cpu, state, false, node);
1996 mutex_lock(&cpuhp_state_mutex);
1998 mutex_unlock(&cpuhp_state_mutex);
2003 EXPORT_SYMBOL_GPL(__cpuhp_state_remove_instance);
2005 * __cpuhp_remove_state - Remove the callbacks for an hotplug machine state
2006 * @state: The state to remove
2007 * @invoke: If true, the teardown function is invoked for cpus where
2008 * cpu state >= @state
2010 * The teardown callback is currently not allowed to fail. Think
2011 * about module removal!
2013 void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
2015 struct cpuhp_step *sp = cpuhp_get_step(state);
2018 BUG_ON(cpuhp_cb_check(state));
2022 if (sp->multi_instance) {
2023 WARN(!hlist_empty(&sp->list),
2024 "Error: Removing state %d which has instances left.\n",
2029 if (!invoke || !cpuhp_get_teardown_cb(state))
2033 * Call the teardown callback for each present cpu depending
2034 * on the hotplug state of the cpu. This function is not
2035 * allowed to fail currently!
2037 for_each_present_cpu(cpu) {
2038 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
2039 int cpustate = st->state;
2041 if (cpustate >= state)
2042 cpuhp_issue_call(cpu, state, false, NULL);
2045 cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
2048 EXPORT_SYMBOL(__cpuhp_remove_state);
2050 #if defined(CONFIG_SYSFS) && defined(CONFIG_HOTPLUG_CPU)
2051 static ssize_t show_cpuhp_state(struct device *dev,
2052 struct device_attribute *attr, char *buf)
2054 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
2056 return sprintf(buf, "%d\n", st->state);
2058 static DEVICE_ATTR(state, 0444, show_cpuhp_state, NULL);
2060 static ssize_t write_cpuhp_target(struct device *dev,
2061 struct device_attribute *attr,
2062 const char *buf, size_t count)
2064 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
2065 struct cpuhp_step *sp;
2068 ret = kstrtoint(buf, 10, &target);
2072 #ifdef CONFIG_CPU_HOTPLUG_STATE_CONTROL
2073 if (target < CPUHP_OFFLINE || target > CPUHP_ONLINE)
2076 if (target != CPUHP_OFFLINE && target != CPUHP_ONLINE)
2080 ret = lock_device_hotplug_sysfs();
2084 mutex_lock(&cpuhp_state_mutex);
2085 sp = cpuhp_get_step(target);
2086 ret = !sp->name || sp->cant_stop ? -EINVAL : 0;
2087 mutex_unlock(&cpuhp_state_mutex);
2091 if (st->state < target)
2092 ret = do_cpu_up(dev->id, target);
2094 ret = do_cpu_down(dev->id, target);
2096 unlock_device_hotplug();
2097 return ret ? ret : count;
2100 static ssize_t show_cpuhp_target(struct device *dev,
2101 struct device_attribute *attr, char *buf)
2103 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
2105 return sprintf(buf, "%d\n", st->target);
2107 static DEVICE_ATTR(target, 0644, show_cpuhp_target, write_cpuhp_target);
2109 static struct attribute *cpuhp_cpu_attrs[] = {
2110 &dev_attr_state.attr,
2111 &dev_attr_target.attr,
2115 static struct attribute_group cpuhp_cpu_attr_group = {
2116 .attrs = cpuhp_cpu_attrs,
2121 static ssize_t show_cpuhp_states(struct device *dev,
2122 struct device_attribute *attr, char *buf)
2124 ssize_t cur, res = 0;
2127 mutex_lock(&cpuhp_state_mutex);
2128 for (i = CPUHP_OFFLINE; i <= CPUHP_ONLINE; i++) {
2129 struct cpuhp_step *sp = cpuhp_get_step(i);
2132 cur = sprintf(buf, "%3d: %s\n", i, sp->name);
2137 mutex_unlock(&cpuhp_state_mutex);
2140 static DEVICE_ATTR(states, 0444, show_cpuhp_states, NULL);
2142 static struct attribute *cpuhp_cpu_root_attrs[] = {
2143 &dev_attr_states.attr,
2147 static struct attribute_group cpuhp_cpu_root_attr_group = {
2148 .attrs = cpuhp_cpu_root_attrs,
2153 static int __init cpuhp_sysfs_init(void)
2157 ret = sysfs_create_group(&cpu_subsys.dev_root->kobj,
2158 &cpuhp_cpu_root_attr_group);
2162 for_each_possible_cpu(cpu) {
2163 struct device *dev = get_cpu_device(cpu);
2167 ret = sysfs_create_group(&dev->kobj, &cpuhp_cpu_attr_group);
2173 device_initcall(cpuhp_sysfs_init);
2177 * cpu_bit_bitmap[] is a special, "compressed" data structure that
2178 * represents all NR_CPUS bits binary values of 1<<nr.
2180 * It is used by cpumask_of() to get a constant address to a CPU
2181 * mask value that has a single bit set only.
2184 /* cpu_bit_bitmap[0] is empty - so we can back into it */
2185 #define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
2186 #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
2187 #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
2188 #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
2190 const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
2192 MASK_DECLARE_8(0), MASK_DECLARE_8(8),
2193 MASK_DECLARE_8(16), MASK_DECLARE_8(24),
2194 #if BITS_PER_LONG > 32
2195 MASK_DECLARE_8(32), MASK_DECLARE_8(40),
2196 MASK_DECLARE_8(48), MASK_DECLARE_8(56),
2199 EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
2201 const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
2202 EXPORT_SYMBOL(cpu_all_bits);
2204 #ifdef CONFIG_INIT_ALL_POSSIBLE
2205 struct cpumask __cpu_possible_mask __read_mostly
2208 struct cpumask __cpu_possible_mask __read_mostly;
2210 EXPORT_SYMBOL(__cpu_possible_mask);
2212 struct cpumask __cpu_online_mask __read_mostly;
2213 EXPORT_SYMBOL(__cpu_online_mask);
2215 struct cpumask __cpu_present_mask __read_mostly;
2216 EXPORT_SYMBOL(__cpu_present_mask);
2218 struct cpumask __cpu_active_mask __read_mostly;
2219 EXPORT_SYMBOL(__cpu_active_mask);
2221 void init_cpu_present(const struct cpumask *src)
2223 cpumask_copy(&__cpu_present_mask, src);
2226 void init_cpu_possible(const struct cpumask *src)
2228 cpumask_copy(&__cpu_possible_mask, src);
2231 void init_cpu_online(const struct cpumask *src)
2233 cpumask_copy(&__cpu_online_mask, src);
2237 * Activate the first processor.
2239 void __init boot_cpu_init(void)
2241 int cpu = smp_processor_id();
2243 /* Mark the boot cpu "present", "online" etc for SMP and UP case */
2244 set_cpu_online(cpu, true);
2245 set_cpu_active(cpu, true);
2246 set_cpu_present(cpu, true);
2247 set_cpu_possible(cpu, true);
2251 * Must be called _AFTER_ setting up the per_cpu areas
2253 void __init boot_cpu_state_init(void)
2255 per_cpu_ptr(&cpuhp_state, smp_processor_id())->state = CPUHP_ONLINE;