2 * linux/kernel/workqueue.c
4 * Generic mechanism for defining kernel helper threads for running
5 * arbitrary tasks in process context.
7 * Started by Ingo Molnar, Copyright (C) 2002
9 * Derived from the taskqueue/keventd code by:
11 * David Woodhouse <dwmw2@infradead.org>
13 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
14 * Theodore Ts'o <tytso@mit.edu>
16 * Made to use alloc_percpu by Christoph Lameter.
19 #include <linux/module.h>
20 #include <linux/kernel.h>
21 #include <linux/sched.h>
22 #include <linux/init.h>
23 #include <linux/signal.h>
24 #include <linux/completion.h>
25 #include <linux/workqueue.h>
26 #include <linux/slab.h>
27 #include <linux/cpu.h>
28 #include <linux/notifier.h>
29 #include <linux/kthread.h>
30 #include <linux/hardirq.h>
31 #include <linux/mempolicy.h>
32 #include <linux/freezer.h>
33 #include <linux/kallsyms.h>
34 #include <linux/debug_locks.h>
35 #include <linux/lockdep.h>
42 * The per-CPU workqueue (if single thread, we always use the first
45 struct cpu_workqueue_struct {
49 struct list_head worklist;
50 wait_queue_head_t more_work;
51 struct work_struct *current_work;
53 struct workqueue_struct *wq;
54 struct task_struct *thread;
56 int run_depth; /* Detect run_workqueue() recursion depth */
57 } ____cacheline_aligned;
60 * The externally visible workqueue abstraction is an array of
63 struct workqueue_struct {
64 struct cpu_workqueue_struct *cpu_wq;
65 struct list_head list;
68 int freezeable; /* Freeze threads during suspend */
71 struct lockdep_map lockdep_map;
75 /* Serializes the accesses to the list of workqueues. */
76 static DEFINE_SPINLOCK(workqueue_lock);
77 static LIST_HEAD(workqueues);
79 static int singlethread_cpu __read_mostly;
80 static const struct cpumask *cpu_singlethread_map __read_mostly;
82 * _cpu_down() first removes CPU from cpu_online_map, then CPU_DEAD
83 * flushes cwq->worklist. This means that flush_workqueue/wait_on_work
84 * which comes in between can't use for_each_online_cpu(). We could
85 * use cpu_possible_map, the cpumask below is more a documentation
88 static cpumask_var_t cpu_populated_map __read_mostly;
90 /* If it's single threaded, it isn't in the list of workqueues. */
91 static inline int is_wq_single_threaded(struct workqueue_struct *wq)
93 return wq->singlethread;
96 static const struct cpumask *wq_cpu_map(struct workqueue_struct *wq)
98 return is_wq_single_threaded(wq)
99 ? cpu_singlethread_map : cpu_populated_map;
103 struct cpu_workqueue_struct *wq_per_cpu(struct workqueue_struct *wq, int cpu)
105 if (unlikely(is_wq_single_threaded(wq)))
106 cpu = singlethread_cpu;
107 return per_cpu_ptr(wq->cpu_wq, cpu);
111 * Set the workqueue on which a work item is to be run
112 * - Must *only* be called if the pending flag is set
114 static inline void set_wq_data(struct work_struct *work,
115 struct cpu_workqueue_struct *cwq)
119 BUG_ON(!work_pending(work));
121 new = (unsigned long) cwq | (1UL << WORK_STRUCT_PENDING);
122 new |= WORK_STRUCT_FLAG_MASK & *work_data_bits(work);
123 atomic_long_set(&work->data, new);
127 struct cpu_workqueue_struct *get_wq_data(struct work_struct *work)
129 return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK);
132 static void insert_work(struct cpu_workqueue_struct *cwq,
133 struct work_struct *work, struct list_head *head)
135 set_wq_data(work, cwq);
137 * Ensure that we get the right work->data if we see the
138 * result of list_add() below, see try_to_grab_pending().
141 list_add_tail(&work->entry, head);
142 wake_up(&cwq->more_work);
145 static void __queue_work(struct cpu_workqueue_struct *cwq,
146 struct work_struct *work)
150 spin_lock_irqsave(&cwq->lock, flags);
151 insert_work(cwq, work, &cwq->worklist);
152 spin_unlock_irqrestore(&cwq->lock, flags);
156 * queue_work - queue work on a workqueue
157 * @wq: workqueue to use
158 * @work: work to queue
160 * Returns 0 if @work was already on a queue, non-zero otherwise.
162 * We queue the work to the CPU on which it was submitted, but if the CPU dies
163 * it can be processed by another CPU.
165 int queue_work(struct workqueue_struct *wq, struct work_struct *work)
169 ret = queue_work_on(get_cpu(), wq, work);
174 EXPORT_SYMBOL_GPL(queue_work);
177 * queue_work_on - queue work on specific cpu
178 * @cpu: CPU number to execute work on
179 * @wq: workqueue to use
180 * @work: work to queue
182 * Returns 0 if @work was already on a queue, non-zero otherwise.
184 * We queue the work to a specific CPU, the caller must ensure it
188 queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work)
192 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
193 BUG_ON(!list_empty(&work->entry));
194 __queue_work(wq_per_cpu(wq, cpu), work);
199 EXPORT_SYMBOL_GPL(queue_work_on);
201 static void delayed_work_timer_fn(unsigned long __data)
203 struct delayed_work *dwork = (struct delayed_work *)__data;
204 struct cpu_workqueue_struct *cwq = get_wq_data(&dwork->work);
205 struct workqueue_struct *wq = cwq->wq;
207 __queue_work(wq_per_cpu(wq, smp_processor_id()), &dwork->work);
211 * queue_delayed_work - queue work on a workqueue after delay
212 * @wq: workqueue to use
213 * @dwork: delayable work to queue
214 * @delay: number of jiffies to wait before queueing
216 * Returns 0 if @work was already on a queue, non-zero otherwise.
218 int queue_delayed_work(struct workqueue_struct *wq,
219 struct delayed_work *dwork, unsigned long delay)
222 return queue_work(wq, &dwork->work);
224 return queue_delayed_work_on(-1, wq, dwork, delay);
226 EXPORT_SYMBOL_GPL(queue_delayed_work);
229 * queue_delayed_work_on - queue work on specific CPU after delay
230 * @cpu: CPU number to execute work on
231 * @wq: workqueue to use
232 * @dwork: work to queue
233 * @delay: number of jiffies to wait before queueing
235 * Returns 0 if @work was already on a queue, non-zero otherwise.
237 int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
238 struct delayed_work *dwork, unsigned long delay)
241 struct timer_list *timer = &dwork->timer;
242 struct work_struct *work = &dwork->work;
244 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
245 BUG_ON(timer_pending(timer));
246 BUG_ON(!list_empty(&work->entry));
248 timer_stats_timer_set_start_info(&dwork->timer);
250 /* This stores cwq for the moment, for the timer_fn */
251 set_wq_data(work, wq_per_cpu(wq, raw_smp_processor_id()));
252 timer->expires = jiffies + delay;
253 timer->data = (unsigned long)dwork;
254 timer->function = delayed_work_timer_fn;
256 if (unlikely(cpu >= 0))
257 add_timer_on(timer, cpu);
264 EXPORT_SYMBOL_GPL(queue_delayed_work_on);
266 static void run_workqueue(struct cpu_workqueue_struct *cwq)
268 spin_lock_irq(&cwq->lock);
270 if (cwq->run_depth > 3) {
271 /* morton gets to eat his hat */
272 printk("%s: recursion depth exceeded: %d\n",
273 __func__, cwq->run_depth);
276 while (!list_empty(&cwq->worklist)) {
277 struct work_struct *work = list_entry(cwq->worklist.next,
278 struct work_struct, entry);
279 work_func_t f = work->func;
280 #ifdef CONFIG_LOCKDEP
282 * It is permissible to free the struct work_struct
283 * from inside the function that is called from it,
284 * this we need to take into account for lockdep too.
285 * To avoid bogus "held lock freed" warnings as well
286 * as problems when looking into work->lockdep_map,
287 * make a copy and use that here.
289 struct lockdep_map lockdep_map = work->lockdep_map;
292 cwq->current_work = work;
293 list_del_init(cwq->worklist.next);
294 spin_unlock_irq(&cwq->lock);
296 BUG_ON(get_wq_data(work) != cwq);
297 work_clear_pending(work);
298 lock_map_acquire(&cwq->wq->lockdep_map);
299 lock_map_acquire(&lockdep_map);
301 lock_map_release(&lockdep_map);
302 lock_map_release(&cwq->wq->lockdep_map);
304 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
305 printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
307 current->comm, preempt_count(),
308 task_pid_nr(current));
310 printk(KERN_ERR " last function: ");
311 print_symbol("%s\n", (unsigned long)f);
312 debug_show_held_locks(current);
314 #endif /* DDE_LINUX */
317 spin_lock_irq(&cwq->lock);
318 cwq->current_work = NULL;
321 spin_unlock_irq(&cwq->lock);
324 static int worker_thread(void *__cwq)
326 struct cpu_workqueue_struct *cwq = __cwq;
329 if (cwq->wq->freezeable)
332 set_user_nice(current, -5);
335 prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE);
336 if (!freezing(current) &&
337 !kthread_should_stop() &&
338 list_empty(&cwq->worklist))
340 finish_wait(&cwq->more_work, &wait);
344 if (kthread_should_stop())
354 struct work_struct work;
355 struct completion done;
358 static void wq_barrier_func(struct work_struct *work)
360 struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
361 complete(&barr->done);
364 static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
365 struct wq_barrier *barr, struct list_head *head)
367 INIT_WORK(&barr->work, wq_barrier_func);
368 __set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work));
370 init_completion(&barr->done);
372 insert_work(cwq, &barr->work, head);
375 static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
379 if (cwq->thread == current) {
381 * Probably keventd trying to flush its own queue. So simply run
382 * it by hand rather than deadlocking.
387 struct wq_barrier barr;
390 spin_lock_irq(&cwq->lock);
391 if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) {
392 insert_wq_barrier(cwq, &barr, &cwq->worklist);
395 spin_unlock_irq(&cwq->lock);
398 wait_for_completion(&barr.done);
405 * flush_workqueue - ensure that any scheduled work has run to completion.
406 * @wq: workqueue to flush
408 * Forces execution of the workqueue and blocks until its completion.
409 * This is typically used in driver shutdown handlers.
411 * We sleep until all works which were queued on entry have been handled,
412 * but we are not livelocked by new incoming ones.
414 * This function used to run the workqueues itself. Now we just wait for the
415 * helper threads to do it.
417 void flush_workqueue(struct workqueue_struct *wq)
419 const struct cpumask *cpu_map = wq_cpu_map(wq);
423 lock_map_acquire(&wq->lockdep_map);
424 lock_map_release(&wq->lockdep_map);
425 for_each_cpu_mask_nr(cpu, *cpu_map)
426 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
428 EXPORT_SYMBOL_GPL(flush_workqueue);
431 * flush_work - block until a work_struct's callback has terminated
432 * @work: the work which is to be flushed
434 * Returns false if @work has already terminated.
436 * It is expected that, prior to calling flush_work(), the caller has
437 * arranged for the work to not be requeued, otherwise it doesn't make
438 * sense to use this function.
440 int flush_work(struct work_struct *work)
442 struct cpu_workqueue_struct *cwq;
443 struct list_head *prev;
444 struct wq_barrier barr;
447 cwq = get_wq_data(work);
451 lock_map_acquire(&cwq->wq->lockdep_map);
452 lock_map_release(&cwq->wq->lockdep_map);
455 spin_lock_irq(&cwq->lock);
456 if (!list_empty(&work->entry)) {
458 * See the comment near try_to_grab_pending()->smp_rmb().
459 * If it was re-queued under us we are not going to wait.
462 if (unlikely(cwq != get_wq_data(work)))
466 if (cwq->current_work != work)
468 prev = &cwq->worklist;
470 insert_wq_barrier(cwq, &barr, prev->next);
472 spin_unlock_irq(&cwq->lock);
476 wait_for_completion(&barr.done);
479 EXPORT_SYMBOL_GPL(flush_work);
482 * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
483 * so this work can't be re-armed in any way.
485 static int try_to_grab_pending(struct work_struct *work)
487 struct cpu_workqueue_struct *cwq;
490 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work)))
494 * The queueing is in progress, or it is already queued. Try to
495 * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
498 cwq = get_wq_data(work);
502 spin_lock_irq(&cwq->lock);
503 if (!list_empty(&work->entry)) {
505 * This work is queued, but perhaps we locked the wrong cwq.
506 * In that case we must see the new value after rmb(), see
507 * insert_work()->wmb().
510 if (cwq == get_wq_data(work)) {
511 list_del_init(&work->entry);
515 spin_unlock_irq(&cwq->lock);
520 static void wait_on_cpu_work(struct cpu_workqueue_struct *cwq,
521 struct work_struct *work)
523 struct wq_barrier barr;
526 spin_lock_irq(&cwq->lock);
527 if (unlikely(cwq->current_work == work)) {
528 insert_wq_barrier(cwq, &barr, cwq->worklist.next);
531 spin_unlock_irq(&cwq->lock);
533 if (unlikely(running))
534 wait_for_completion(&barr.done);
537 static void wait_on_work(struct work_struct *work)
539 struct cpu_workqueue_struct *cwq;
540 struct workqueue_struct *wq;
541 const struct cpumask *cpu_map;
546 lock_map_acquire(&work->lockdep_map);
547 lock_map_release(&work->lockdep_map);
549 cwq = get_wq_data(work);
554 cpu_map = wq_cpu_map(wq);
556 for_each_cpu_mask_nr(cpu, *cpu_map)
557 wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
560 static int __cancel_work_timer(struct work_struct *work,
561 struct timer_list* timer)
566 ret = (timer && likely(del_timer(timer)));
568 ret = try_to_grab_pending(work);
570 } while (unlikely(ret < 0));
572 work_clear_pending(work);
577 * cancel_work_sync - block until a work_struct's callback has terminated
578 * @work: the work which is to be flushed
580 * Returns true if @work was pending.
582 * cancel_work_sync() will cancel the work if it is queued. If the work's
583 * callback appears to be running, cancel_work_sync() will block until it
586 * It is possible to use this function if the work re-queues itself. It can
587 * cancel the work even if it migrates to another workqueue, however in that
588 * case it only guarantees that work->func() has completed on the last queued
591 * cancel_work_sync(&delayed_work->work) should be used only if ->timer is not
592 * pending, otherwise it goes into a busy-wait loop until the timer expires.
594 * The caller must ensure that workqueue_struct on which this work was last
595 * queued can't be destroyed before this function returns.
597 int cancel_work_sync(struct work_struct *work)
599 return __cancel_work_timer(work, NULL);
601 EXPORT_SYMBOL_GPL(cancel_work_sync);
604 * cancel_delayed_work_sync - reliably kill off a delayed work.
605 * @dwork: the delayed work struct
607 * Returns true if @dwork was pending.
609 * It is possible to use this function if @dwork rearms itself via queue_work()
610 * or queue_delayed_work(). See also the comment for cancel_work_sync().
612 int cancel_delayed_work_sync(struct delayed_work *dwork)
614 return __cancel_work_timer(&dwork->work, &dwork->timer);
616 EXPORT_SYMBOL(cancel_delayed_work_sync);
618 static struct workqueue_struct *keventd_wq __read_mostly;
621 * schedule_work - put work task in global workqueue
622 * @work: job to be done
624 * This puts a job in the kernel-global workqueue.
626 int schedule_work(struct work_struct *work)
628 return queue_work(keventd_wq, work);
630 EXPORT_SYMBOL(schedule_work);
633 * schedule_work_on - put work task on a specific cpu
634 * @cpu: cpu to put the work task on
635 * @work: job to be done
637 * This puts a job on a specific cpu
639 int schedule_work_on(int cpu, struct work_struct *work)
641 return queue_work_on(cpu, keventd_wq, work);
643 EXPORT_SYMBOL(schedule_work_on);
646 * schedule_delayed_work - put work task in global workqueue after delay
647 * @dwork: job to be done
648 * @delay: number of jiffies to wait or 0 for immediate execution
650 * After waiting for a given time this puts a job in the kernel-global
653 int schedule_delayed_work(struct delayed_work *dwork,
656 return queue_delayed_work(keventd_wq, dwork, delay);
658 EXPORT_SYMBOL(schedule_delayed_work);
661 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
663 * @dwork: job to be done
664 * @delay: number of jiffies to wait
666 * After waiting for a given time this puts a job in the kernel-global
667 * workqueue on the specified CPU.
669 int schedule_delayed_work_on(int cpu,
670 struct delayed_work *dwork, unsigned long delay)
672 return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
674 EXPORT_SYMBOL(schedule_delayed_work_on);
677 * schedule_on_each_cpu - call a function on each online CPU from keventd
678 * @func: the function to call
680 * Returns zero on success.
681 * Returns -ve errno on failure.
683 * schedule_on_each_cpu() is very slow.
685 int schedule_on_each_cpu(work_func_t func)
688 struct work_struct *works;
690 works = alloc_percpu(struct work_struct);
695 for_each_online_cpu(cpu) {
696 struct work_struct *work = per_cpu_ptr(works, cpu);
698 INIT_WORK(work, func);
699 schedule_work_on(cpu, work);
701 for_each_online_cpu(cpu)
702 flush_work(per_cpu_ptr(works, cpu));
708 void flush_scheduled_work(void)
710 flush_workqueue(keventd_wq);
712 EXPORT_SYMBOL(flush_scheduled_work);
715 * execute_in_process_context - reliably execute the routine with user context
716 * @fn: the function to execute
717 * @ew: guaranteed storage for the execute work structure (must
718 * be available when the work executes)
720 * Executes the function immediately if process context is available,
721 * otherwise schedules the function for delayed execution.
723 * Returns: 0 - function was executed
724 * 1 - function was scheduled for execution
726 int execute_in_process_context(work_func_t fn, struct execute_work *ew)
728 if (!in_interrupt()) {
733 INIT_WORK(&ew->work, fn);
734 schedule_work(&ew->work);
738 EXPORT_SYMBOL_GPL(execute_in_process_context);
742 return keventd_wq != NULL;
745 int current_is_keventd(void)
747 struct cpu_workqueue_struct *cwq;
748 int cpu = raw_smp_processor_id(); /* preempt-safe: keventd is per-cpu */
753 cwq = per_cpu_ptr(keventd_wq->cpu_wq, cpu);
754 if (current == cwq->thread)
761 static struct cpu_workqueue_struct *
762 init_cpu_workqueue(struct workqueue_struct *wq, int cpu)
764 struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
767 spin_lock_init(&cwq->lock);
768 INIT_LIST_HEAD(&cwq->worklist);
769 init_waitqueue_head(&cwq->more_work);
774 static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
776 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
777 struct workqueue_struct *wq = cwq->wq;
778 const char *fmt = is_wq_single_threaded(wq) ? "%s" : "%s/%d";
779 struct task_struct *p;
781 p = kthread_create(worker_thread, cwq, fmt, wq->name, cpu);
783 * Nobody can add the work_struct to this cwq,
784 * if (caller is __create_workqueue)
785 * nobody should see this wq
786 * else // caller is CPU_UP_PREPARE
787 * cpu is not on cpu_online_map
788 * so we can abort safely.
793 sched_setscheduler_nocheck(p, SCHED_FIFO, ¶m);
799 static void start_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
801 struct task_struct *p = cwq->thread;
805 kthread_bind(p, cpu);
810 struct workqueue_struct *__create_workqueue_key(const char *name,
814 struct lock_class_key *key,
815 const char *lock_name)
817 struct workqueue_struct *wq;
818 struct cpu_workqueue_struct *cwq;
821 wq = kzalloc(sizeof(*wq), GFP_KERNEL);
825 wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct);
832 lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
833 wq->singlethread = singlethread;
834 wq->freezeable = freezeable;
836 INIT_LIST_HEAD(&wq->list);
839 cwq = init_cpu_workqueue(wq, singlethread_cpu);
840 err = create_workqueue_thread(cwq, singlethread_cpu);
841 start_workqueue_thread(cwq, -1);
843 cpu_maps_update_begin();
845 * We must place this wq on list even if the code below fails.
846 * cpu_down(cpu) can remove cpu from cpu_populated_map before
847 * destroy_workqueue() takes the lock, in that case we leak
850 spin_lock(&workqueue_lock);
851 list_add(&wq->list, &workqueues);
852 spin_unlock(&workqueue_lock);
854 * We must initialize cwqs for each possible cpu even if we
855 * are going to call destroy_workqueue() finally. Otherwise
856 * cpu_up() can hit the uninitialized cwq once we drop the
859 for_each_possible_cpu(cpu) {
860 cwq = init_cpu_workqueue(wq, cpu);
861 if (err || !cpu_online(cpu))
863 err = create_workqueue_thread(cwq, cpu);
864 start_workqueue_thread(cwq, cpu);
866 cpu_maps_update_done();
870 destroy_workqueue(wq);
875 EXPORT_SYMBOL_GPL(__create_workqueue_key);
877 static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq)
880 * Our caller is either destroy_workqueue() or CPU_POST_DEAD,
881 * cpu_add_remove_lock protects cwq->thread.
883 if (cwq->thread == NULL)
886 lock_map_acquire(&cwq->wq->lockdep_map);
887 lock_map_release(&cwq->wq->lockdep_map);
889 flush_cpu_workqueue(cwq);
891 * If the caller is CPU_POST_DEAD and cwq->worklist was not empty,
892 * a concurrent flush_workqueue() can insert a barrier after us.
893 * However, in that case run_workqueue() won't return and check
894 * kthread_should_stop() until it flushes all work_struct's.
895 * When ->worklist becomes empty it is safe to exit because no
896 * more work_structs can be queued on this cwq: flush_workqueue
897 * checks list_empty(), and a "normal" queue_work() can't use
900 kthread_stop(cwq->thread);
905 * destroy_workqueue - safely terminate a workqueue
906 * @wq: target workqueue
908 * Safely destroy a workqueue. All work currently pending will be done first.
910 void destroy_workqueue(struct workqueue_struct *wq)
912 const struct cpumask *cpu_map = wq_cpu_map(wq);
915 cpu_maps_update_begin();
916 spin_lock(&workqueue_lock);
918 spin_unlock(&workqueue_lock);
920 for_each_cpu_mask_nr(cpu, *cpu_map)
921 cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu));
922 cpu_maps_update_done();
924 free_percpu(wq->cpu_wq);
927 EXPORT_SYMBOL_GPL(destroy_workqueue);
929 static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
930 unsigned long action,
933 unsigned int cpu = (unsigned long)hcpu;
934 struct cpu_workqueue_struct *cwq;
935 struct workqueue_struct *wq;
938 action &= ~CPU_TASKS_FROZEN;
942 cpumask_set_cpu(cpu, cpu_populated_map);
945 list_for_each_entry(wq, &workqueues, list) {
946 cwq = per_cpu_ptr(wq->cpu_wq, cpu);
950 if (!create_workqueue_thread(cwq, cpu))
952 printk(KERN_ERR "workqueue [%s] for %i failed\n",
954 action = CPU_UP_CANCELED;
959 start_workqueue_thread(cwq, cpu);
962 case CPU_UP_CANCELED:
963 start_workqueue_thread(cwq, -1);
965 cleanup_workqueue_thread(cwq);
971 case CPU_UP_CANCELED:
973 cpumask_clear_cpu(cpu, cpu_populated_map);
980 static struct workqueue_struct *work_on_cpu_wq __read_mostly;
982 struct work_for_cpu {
983 struct work_struct work;
989 static void do_work_for_cpu(struct work_struct *w)
991 struct work_for_cpu *wfc = container_of(w, struct work_for_cpu, work);
993 wfc->ret = wfc->fn(wfc->arg);
997 * work_on_cpu - run a function in user context on a particular cpu
998 * @cpu: the cpu to run on
999 * @fn: the function to run
1000 * @arg: the function arg
1002 * This will return the value @fn returns.
1003 * It is up to the caller to ensure that the cpu doesn't go offline.
1005 long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
1007 struct work_for_cpu wfc;
1009 INIT_WORK(&wfc.work, do_work_for_cpu);
1012 queue_work_on(cpu, work_on_cpu_wq, &wfc.work);
1013 flush_work(&wfc.work);
1017 EXPORT_SYMBOL_GPL(work_on_cpu);
1018 #endif /* CONFIG_SMP */
1020 void __init init_workqueues(void)
1022 alloc_cpumask_var(&cpu_populated_map, GFP_KERNEL);
1024 cpumask_copy(cpu_populated_map, cpu_online_mask);
1025 singlethread_cpu = cpumask_first(cpu_possible_mask);
1026 cpu_singlethread_map = cpumask_of(singlethread_cpu);
1027 hotcpu_notifier(workqueue_cpu_callback, 0);
1028 keventd_wq = create_workqueue("events");
1029 BUG_ON(!keventd_wq);
1031 work_on_cpu_wq = create_workqueue("work_on_cpu");
1032 BUG_ON(!work_on_cpu_wq);
1037 core_initcall(init_workqueues);