Root/kernel/workqueue.c

1/*
2 * kernel/workqueue.c - generic async execution with shared worker pool
3 *
4 * Copyright (C) 2002 Ingo Molnar
5 *
6 * Derived from the taskqueue/keventd code by:
7 * David Woodhouse <dwmw2@infradead.org>
8 * Andrew Morton
9 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
10 * Theodore Ts'o <tytso@mit.edu>
11 *
12 * Made to use alloc_percpu by Christoph Lameter.
13 *
14 * Copyright (C) 2010 SUSE Linux Products GmbH
15 * Copyright (C) 2010 Tejun Heo <tj@kernel.org>
16 *
17 * This is the generic async execution mechanism. Work items as are
18 * executed in process context. The worker pool is shared and
19 * automatically managed. There is one worker pool for each CPU and
20 * one extra for works which are better served by workers which are
21 * not bound to any specific CPU.
22 *
23 * Please read Documentation/workqueue.txt for details.
24 */
25
26#include <linux/module.h>
27#include <linux/kernel.h>
28#include <linux/sched.h>
29#include <linux/init.h>
30#include <linux/signal.h>
31#include <linux/completion.h>
32#include <linux/workqueue.h>
33#include <linux/slab.h>
34#include <linux/cpu.h>
35#include <linux/notifier.h>
36#include <linux/kthread.h>
37#include <linux/hardirq.h>
38#include <linux/mempolicy.h>
39#include <linux/freezer.h>
40#include <linux/kallsyms.h>
41#include <linux/debug_locks.h>
42#include <linux/lockdep.h>
43#include <linux/idr.h>
44
45#define CREATE_TRACE_POINTS
46#include <trace/events/workqueue.h>
47
48#include "workqueue_sched.h"
49
50enum {
51    /* global_cwq flags */
52    GCWQ_MANAGE_WORKERS = 1 << 0, /* need to manage workers */
53    GCWQ_MANAGING_WORKERS = 1 << 1, /* managing workers */
54    GCWQ_DISASSOCIATED = 1 << 2, /* cpu can't serve workers */
55    GCWQ_FREEZING = 1 << 3, /* freeze in progress */
56    GCWQ_HIGHPRI_PENDING = 1 << 4, /* highpri works on queue */
57
58    /* worker flags */
59    WORKER_STARTED = 1 << 0, /* started */
60    WORKER_DIE = 1 << 1, /* die die die */
61    WORKER_IDLE = 1 << 2, /* is idle */
62    WORKER_PREP = 1 << 3, /* preparing to run works */
63    WORKER_ROGUE = 1 << 4, /* not bound to any cpu */
64    WORKER_REBIND = 1 << 5, /* mom is home, come back */
65    WORKER_CPU_INTENSIVE = 1 << 6, /* cpu intensive */
66    WORKER_UNBOUND = 1 << 7, /* worker is unbound */
67
68    WORKER_NOT_RUNNING = WORKER_PREP | WORKER_ROGUE | WORKER_REBIND |
69                  WORKER_CPU_INTENSIVE | WORKER_UNBOUND,
70
71    /* gcwq->trustee_state */
72    TRUSTEE_START = 0, /* start */
73    TRUSTEE_IN_CHARGE = 1, /* trustee in charge of gcwq */
74    TRUSTEE_BUTCHER = 2, /* butcher workers */
75    TRUSTEE_RELEASE = 3, /* release workers */
76    TRUSTEE_DONE = 4, /* trustee is done */
77
78    BUSY_WORKER_HASH_ORDER = 6, /* 64 pointers */
79    BUSY_WORKER_HASH_SIZE = 1 << BUSY_WORKER_HASH_ORDER,
80    BUSY_WORKER_HASH_MASK = BUSY_WORKER_HASH_SIZE - 1,
81
82    MAX_IDLE_WORKERS_RATIO = 4, /* 1/4 of busy can be idle */
83    IDLE_WORKER_TIMEOUT = 300 * HZ, /* keep idle ones for 5 mins */
84
85    MAYDAY_INITIAL_TIMEOUT = HZ / 100, /* call for help after 10ms */
86    MAYDAY_INTERVAL = HZ / 10, /* and then every 100ms */
87    CREATE_COOLDOWN = HZ, /* time to breath after fail */
88    TRUSTEE_COOLDOWN = HZ / 10, /* for trustee draining */
89
90    /*
91     * Rescue workers are used only on emergencies and shared by
92     * all cpus. Give -20.
93     */
94    RESCUER_NICE_LEVEL = -20,
95};
96
97/*
98 * Structure fields follow one of the following exclusion rules.
99 *
100 * I: Modifiable by initialization/destruction paths and read-only for
101 * everyone else.
102 *
103 * P: Preemption protected. Disabling preemption is enough and should
104 * only be modified and accessed from the local cpu.
105 *
106 * L: gcwq->lock protected. Access with gcwq->lock held.
107 *
108 * X: During normal operation, modification requires gcwq->lock and
109 * should be done only from local cpu. Either disabling preemption
110 * on local cpu or grabbing gcwq->lock is enough for read access.
111 * If GCWQ_DISASSOCIATED is set, it's identical to L.
112 *
113 * F: wq->flush_mutex protected.
114 *
115 * W: workqueue_lock protected.
116 */
117
118struct global_cwq;
119
120/*
121 * The poor guys doing the actual heavy lifting. All on-duty workers
122 * are either serving the manager role, on idle list or on busy hash.
123 */
124struct worker {
125    /* on idle list while idle, on busy hash table while busy */
126    union {
127        struct list_head entry; /* L: while idle */
128        struct hlist_node hentry; /* L: while busy */
129    };
130
131    struct work_struct *current_work; /* L: work being processed */
132    struct cpu_workqueue_struct *current_cwq; /* L: current_work's cwq */
133    struct list_head scheduled; /* L: scheduled works */
134    struct task_struct *task; /* I: worker task */
135    struct global_cwq *gcwq; /* I: the associated gcwq */
136    /* 64 bytes boundary on 64bit, 32 on 32bit */
137    unsigned long last_active; /* L: last active timestamp */
138    unsigned int flags; /* X: flags */
139    int id; /* I: worker id */
140    struct work_struct rebind_work; /* L: rebind worker to cpu */
141};
142
143/*
144 * Global per-cpu workqueue. There's one and only one for each cpu
145 * and all works are queued and processed here regardless of their
146 * target workqueues.
147 */
148struct global_cwq {
149    spinlock_t lock; /* the gcwq lock */
150    struct list_head worklist; /* L: list of pending works */
151    unsigned int cpu; /* I: the associated cpu */
152    unsigned int flags; /* L: GCWQ_* flags */
153
154    int nr_workers; /* L: total number of workers */
155    int nr_idle; /* L: currently idle ones */
156
157    /* workers are chained either in the idle_list or busy_hash */
158    struct list_head idle_list; /* X: list of idle workers */
159    struct hlist_head busy_hash[BUSY_WORKER_HASH_SIZE];
160                        /* L: hash of busy workers */
161
162    struct timer_list idle_timer; /* L: worker idle timeout */
163    struct timer_list mayday_timer; /* L: SOS timer for dworkers */
164
165    struct ida worker_ida; /* L: for worker IDs */
166
167    struct task_struct *trustee; /* L: for gcwq shutdown */
168    unsigned int trustee_state; /* L: trustee state */
169    wait_queue_head_t trustee_wait; /* trustee wait */
170    struct worker *first_idle; /* L: first idle worker */
171} ____cacheline_aligned_in_smp;
172
173/*
174 * The per-CPU workqueue. The lower WORK_STRUCT_FLAG_BITS of
175 * work_struct->data are used for flags and thus cwqs need to be
176 * aligned at two's power of the number of flag bits.
177 */
178struct cpu_workqueue_struct {
179    struct global_cwq *gcwq; /* I: the associated gcwq */
180    struct workqueue_struct *wq; /* I: the owning workqueue */
181    int work_color; /* L: current color */
182    int flush_color; /* L: flushing color */
183    int nr_in_flight[WORK_NR_COLORS];
184                        /* L: nr of in_flight works */
185    int nr_active; /* L: nr of active works */
186    int max_active; /* L: max active works */
187    struct list_head delayed_works; /* L: delayed works */
188};
189
190/*
191 * Structure used to wait for workqueue flush.
192 */
193struct wq_flusher {
194    struct list_head list; /* F: list of flushers */
195    int flush_color; /* F: flush color waiting for */
196    struct completion done; /* flush completion */
197};
198
199/*
200 * All cpumasks are assumed to be always set on UP and thus can't be
201 * used to determine whether there's something to be done.
202 */
203#ifdef CONFIG_SMP
204typedef cpumask_var_t mayday_mask_t;
205#define mayday_test_and_set_cpu(cpu, mask) \
206    cpumask_test_and_set_cpu((cpu), (mask))
207#define mayday_clear_cpu(cpu, mask) cpumask_clear_cpu((cpu), (mask))
208#define for_each_mayday_cpu(cpu, mask) for_each_cpu((cpu), (mask))
209#define alloc_mayday_mask(maskp, gfp) zalloc_cpumask_var((maskp), (gfp))
210#define free_mayday_mask(mask) free_cpumask_var((mask))
211#else
212typedef unsigned long mayday_mask_t;
213#define mayday_test_and_set_cpu(cpu, mask) test_and_set_bit(0, &(mask))
214#define mayday_clear_cpu(cpu, mask) clear_bit(0, &(mask))
215#define for_each_mayday_cpu(cpu, mask) if ((cpu) = 0, (mask))
216#define alloc_mayday_mask(maskp, gfp) true
217#define free_mayday_mask(mask) do { } while (0)
218#endif
219
220/*
221 * The externally visible workqueue abstraction is an array of
222 * per-CPU workqueues:
223 */
224struct workqueue_struct {
225    unsigned int flags; /* I: WQ_* flags */
226    union {
227        struct cpu_workqueue_struct __percpu *pcpu;
228        struct cpu_workqueue_struct *single;
229        unsigned long v;
230    } cpu_wq; /* I: cwq's */
231    struct list_head list; /* W: list of all workqueues */
232
233    struct mutex flush_mutex; /* protects wq flushing */
234    int work_color; /* F: current work color */
235    int flush_color; /* F: current flush color */
236    atomic_t nr_cwqs_to_flush; /* flush in progress */
237    struct wq_flusher *first_flusher; /* F: first flusher */
238    struct list_head flusher_queue; /* F: flush waiters */
239    struct list_head flusher_overflow; /* F: flush overflow list */
240
241    mayday_mask_t mayday_mask; /* cpus requesting rescue */
242    struct worker *rescuer; /* I: rescue worker */
243
244    int saved_max_active; /* W: saved cwq max_active */
245    const char *name; /* I: workqueue name */
246#ifdef CONFIG_LOCKDEP
247    struct lockdep_map lockdep_map;
248#endif
249};
250
251struct workqueue_struct *system_wq __read_mostly;
252struct workqueue_struct *system_long_wq __read_mostly;
253struct workqueue_struct *system_nrt_wq __read_mostly;
254struct workqueue_struct *system_unbound_wq __read_mostly;
255EXPORT_SYMBOL_GPL(system_wq);
256EXPORT_SYMBOL_GPL(system_long_wq);
257EXPORT_SYMBOL_GPL(system_nrt_wq);
258EXPORT_SYMBOL_GPL(system_unbound_wq);
259
260#define for_each_busy_worker(worker, i, pos, gcwq) \
261    for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) \
262        hlist_for_each_entry(worker, pos, &gcwq->busy_hash[i], hentry)
263
264static inline int __next_gcwq_cpu(int cpu, const struct cpumask *mask,
265                  unsigned int sw)
266{
267    if (cpu < nr_cpu_ids) {
268        if (sw & 1) {
269            cpu = cpumask_next(cpu, mask);
270            if (cpu < nr_cpu_ids)
271                return cpu;
272        }
273        if (sw & 2)
274            return WORK_CPU_UNBOUND;
275    }
276    return WORK_CPU_NONE;
277}
278
279static inline int __next_wq_cpu(int cpu, const struct cpumask *mask,
280                struct workqueue_struct *wq)
281{
282    return __next_gcwq_cpu(cpu, mask, !(wq->flags & WQ_UNBOUND) ? 1 : 2);
283}
284
285/*
286 * CPU iterators
287 *
288 * An extra gcwq is defined for an invalid cpu number
289 * (WORK_CPU_UNBOUND) to host workqueues which are not bound to any
290 * specific CPU. The following iterators are similar to
291 * for_each_*_cpu() iterators but also considers the unbound gcwq.
292 *
293 * for_each_gcwq_cpu() : possible CPUs + WORK_CPU_UNBOUND
294 * for_each_online_gcwq_cpu() : online CPUs + WORK_CPU_UNBOUND
295 * for_each_cwq_cpu() : possible CPUs for bound workqueues,
296 * WORK_CPU_UNBOUND for unbound workqueues
297 */
298#define for_each_gcwq_cpu(cpu) \
299    for ((cpu) = __next_gcwq_cpu(-1, cpu_possible_mask, 3); \
300         (cpu) < WORK_CPU_NONE; \
301         (cpu) = __next_gcwq_cpu((cpu), cpu_possible_mask, 3))
302
303#define for_each_online_gcwq_cpu(cpu) \
304    for ((cpu) = __next_gcwq_cpu(-1, cpu_online_mask, 3); \
305         (cpu) < WORK_CPU_NONE; \
306         (cpu) = __next_gcwq_cpu((cpu), cpu_online_mask, 3))
307
308#define for_each_cwq_cpu(cpu, wq) \
309    for ((cpu) = __next_wq_cpu(-1, cpu_possible_mask, (wq)); \
310         (cpu) < WORK_CPU_NONE; \
311         (cpu) = __next_wq_cpu((cpu), cpu_possible_mask, (wq)))
312
313#ifdef CONFIG_LOCKDEP
314/**
315 * in_workqueue_context() - in context of specified workqueue?
316 * @wq: the workqueue of interest
317 *
318 * Checks lockdep state to see if the current task is executing from
319 * within a workqueue item. This function exists only if lockdep is
320 * enabled.
321 */
322int in_workqueue_context(struct workqueue_struct *wq)
323{
324    return lock_is_held(&wq->lockdep_map);
325}
326#endif
327
328#ifdef CONFIG_DEBUG_OBJECTS_WORK
329
330static struct debug_obj_descr work_debug_descr;
331
332/*
333 * fixup_init is called when:
334 * - an active object is initialized
335 */
336static int work_fixup_init(void *addr, enum debug_obj_state state)
337{
338    struct work_struct *work = addr;
339
340    switch (state) {
341    case ODEBUG_STATE_ACTIVE:
342        cancel_work_sync(work);
343        debug_object_init(work, &work_debug_descr);
344        return 1;
345    default:
346        return 0;
347    }
348}
349
350/*
351 * fixup_activate is called when:
352 * - an active object is activated
353 * - an unknown object is activated (might be a statically initialized object)
354 */
355static int work_fixup_activate(void *addr, enum debug_obj_state state)
356{
357    struct work_struct *work = addr;
358
359    switch (state) {
360
361    case ODEBUG_STATE_NOTAVAILABLE:
362        /*
363         * This is not really a fixup. The work struct was
364         * statically initialized. We just make sure that it
365         * is tracked in the object tracker.
366         */
367        if (test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work))) {
368            debug_object_init(work, &work_debug_descr);
369            debug_object_activate(work, &work_debug_descr);
370            return 0;
371        }
372        WARN_ON_ONCE(1);
373        return 0;
374
375    case ODEBUG_STATE_ACTIVE:
376        WARN_ON(1);
377
378    default:
379        return 0;
380    }
381}
382
383/*
384 * fixup_free is called when:
385 * - an active object is freed
386 */
387static int work_fixup_free(void *addr, enum debug_obj_state state)
388{
389    struct work_struct *work = addr;
390
391    switch (state) {
392    case ODEBUG_STATE_ACTIVE:
393        cancel_work_sync(work);
394        debug_object_free(work, &work_debug_descr);
395        return 1;
396    default:
397        return 0;
398    }
399}
400
401static struct debug_obj_descr work_debug_descr = {
402    .name = "work_struct",
403    .fixup_init = work_fixup_init,
404    .fixup_activate = work_fixup_activate,
405    .fixup_free = work_fixup_free,
406};
407
408static inline void debug_work_activate(struct work_struct *work)
409{
410    debug_object_activate(work, &work_debug_descr);
411}
412
413static inline void debug_work_deactivate(struct work_struct *work)
414{
415    debug_object_deactivate(work, &work_debug_descr);
416}
417
418void __init_work(struct work_struct *work, int onstack)
419{
420    if (onstack)
421        debug_object_init_on_stack(work, &work_debug_descr);
422    else
423        debug_object_init(work, &work_debug_descr);
424}
425EXPORT_SYMBOL_GPL(__init_work);
426
427void destroy_work_on_stack(struct work_struct *work)
428{
429    debug_object_free(work, &work_debug_descr);
430}
431EXPORT_SYMBOL_GPL(destroy_work_on_stack);
432
433#else
434static inline void debug_work_activate(struct work_struct *work) { }
435static inline void debug_work_deactivate(struct work_struct *work) { }
436#endif
437
438/* Serializes the accesses to the list of workqueues. */
439static DEFINE_SPINLOCK(workqueue_lock);
440static LIST_HEAD(workqueues);
441static bool workqueue_freezing; /* W: have wqs started freezing? */
442
443/*
444 * The almighty global cpu workqueues. nr_running is the only field
445 * which is expected to be used frequently by other cpus via
446 * try_to_wake_up(). Put it in a separate cacheline.
447 */
448static DEFINE_PER_CPU(struct global_cwq, global_cwq);
449static DEFINE_PER_CPU_SHARED_ALIGNED(atomic_t, gcwq_nr_running);
450
451/*
452 * Global cpu workqueue and nr_running counter for unbound gcwq. The
453 * gcwq is always online, has GCWQ_DISASSOCIATED set, and all its
454 * workers have WORKER_UNBOUND set.
455 */
456static struct global_cwq unbound_global_cwq;
457static atomic_t unbound_gcwq_nr_running = ATOMIC_INIT(0); /* always 0 */
458
459static int worker_thread(void *__worker);
460
461static struct global_cwq *get_gcwq(unsigned int cpu)
462{
463    if (cpu != WORK_CPU_UNBOUND)
464        return &per_cpu(global_cwq, cpu);
465    else
466        return &unbound_global_cwq;
467}
468
469static atomic_t *get_gcwq_nr_running(unsigned int cpu)
470{
471    if (cpu != WORK_CPU_UNBOUND)
472        return &per_cpu(gcwq_nr_running, cpu);
473    else
474        return &unbound_gcwq_nr_running;
475}
476
477static struct cpu_workqueue_struct *get_cwq(unsigned int cpu,
478                        struct workqueue_struct *wq)
479{
480    if (!(wq->flags & WQ_UNBOUND)) {
481        if (likely(cpu < nr_cpu_ids)) {
482#ifdef CONFIG_SMP
483            return per_cpu_ptr(wq->cpu_wq.pcpu, cpu);
484#else
485            return wq->cpu_wq.single;
486#endif
487        }
488    } else if (likely(cpu == WORK_CPU_UNBOUND))
489        return wq->cpu_wq.single;
490    return NULL;
491}
492
493static unsigned int work_color_to_flags(int color)
494{
495    return color << WORK_STRUCT_COLOR_SHIFT;
496}
497
498static int get_work_color(struct work_struct *work)
499{
500    return (*work_data_bits(work) >> WORK_STRUCT_COLOR_SHIFT) &
501        ((1 << WORK_STRUCT_COLOR_BITS) - 1);
502}
503
504static int work_next_color(int color)
505{
506    return (color + 1) % WORK_NR_COLORS;
507}
508
509/*
510 * A work's data points to the cwq with WORK_STRUCT_CWQ set while the
511 * work is on queue. Once execution starts, WORK_STRUCT_CWQ is
512 * cleared and the work data contains the cpu number it was last on.
513 *
514 * set_work_{cwq|cpu}() and clear_work_data() can be used to set the
515 * cwq, cpu or clear work->data. These functions should only be
516 * called while the work is owned - ie. while the PENDING bit is set.
517 *
518 * get_work_[g]cwq() can be used to obtain the gcwq or cwq
519 * corresponding to a work. gcwq is available once the work has been
520 * queued anywhere after initialization. cwq is available only from
521 * queueing until execution starts.
522 */
523static inline void set_work_data(struct work_struct *work, unsigned long data,
524                 unsigned long flags)
525{
526    BUG_ON(!work_pending(work));
527    atomic_long_set(&work->data, data | flags | work_static(work));
528}
529
530static void set_work_cwq(struct work_struct *work,
531             struct cpu_workqueue_struct *cwq,
532             unsigned long extra_flags)
533{
534    set_work_data(work, (unsigned long)cwq,
535              WORK_STRUCT_PENDING | WORK_STRUCT_CWQ | extra_flags);
536}
537
538static void set_work_cpu(struct work_struct *work, unsigned int cpu)
539{
540    set_work_data(work, cpu << WORK_STRUCT_FLAG_BITS, WORK_STRUCT_PENDING);
541}
542
543static void clear_work_data(struct work_struct *work)
544{
545    set_work_data(work, WORK_STRUCT_NO_CPU, 0);
546}
547
548static struct cpu_workqueue_struct *get_work_cwq(struct work_struct *work)
549{
550    unsigned long data = atomic_long_read(&work->data);
551
552    if (data & WORK_STRUCT_CWQ)
553        return (void *)(data & WORK_STRUCT_WQ_DATA_MASK);
554    else
555        return NULL;
556}
557
558static struct global_cwq *get_work_gcwq(struct work_struct *work)
559{
560    unsigned long data = atomic_long_read(&work->data);
561    unsigned int cpu;
562
563    if (data & WORK_STRUCT_CWQ)
564        return ((struct cpu_workqueue_struct *)
565            (data & WORK_STRUCT_WQ_DATA_MASK))->gcwq;
566
567    cpu = data >> WORK_STRUCT_FLAG_BITS;
568    if (cpu == WORK_CPU_NONE)
569        return NULL;
570
571    BUG_ON(cpu >= nr_cpu_ids && cpu != WORK_CPU_UNBOUND);
572    return get_gcwq(cpu);
573}
574
575/*
576 * Policy functions. These define the policies on how the global
577 * worker pool is managed. Unless noted otherwise, these functions
578 * assume that they're being called with gcwq->lock held.
579 */
580
581static bool __need_more_worker(struct global_cwq *gcwq)
582{
583    return !atomic_read(get_gcwq_nr_running(gcwq->cpu)) ||
584        gcwq->flags & GCWQ_HIGHPRI_PENDING;
585}
586
587/*
588 * Need to wake up a worker? Called from anything but currently
589 * running workers.
590 */
591static bool need_more_worker(struct global_cwq *gcwq)
592{
593    return !list_empty(&gcwq->worklist) && __need_more_worker(gcwq);
594}
595
596/* Can I start working? Called from busy but !running workers. */
597static bool may_start_working(struct global_cwq *gcwq)
598{
599    return gcwq->nr_idle;
600}
601
602/* Do I need to keep working? Called from currently running workers. */
603static bool keep_working(struct global_cwq *gcwq)
604{
605    atomic_t *nr_running = get_gcwq_nr_running(gcwq->cpu);
606
607    return !list_empty(&gcwq->worklist) && atomic_read(nr_running) <= 1;
608}
609
610/* Do we need a new worker? Called from manager. */
611static bool need_to_create_worker(struct global_cwq *gcwq)
612{
613    return need_more_worker(gcwq) && !may_start_working(gcwq);
614}
615
616/* Do I need to be the manager? */
617static bool need_to_manage_workers(struct global_cwq *gcwq)
618{
619    return need_to_create_worker(gcwq) || gcwq->flags & GCWQ_MANAGE_WORKERS;
620}
621
622/* Do we have too many workers and should some go away? */
623static bool too_many_workers(struct global_cwq *gcwq)
624{
625    bool managing = gcwq->flags & GCWQ_MANAGING_WORKERS;
626    int nr_idle = gcwq->nr_idle + managing; /* manager is considered idle */
627    int nr_busy = gcwq->nr_workers - nr_idle;
628
629    return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy;
630}
631
632/*
633 * Wake up functions.
634 */
635
636/* Return the first worker. Safe with preemption disabled */
637static struct worker *first_worker(struct global_cwq *gcwq)
638{
639    if (unlikely(list_empty(&gcwq->idle_list)))
640        return NULL;
641
642    return list_first_entry(&gcwq->idle_list, struct worker, entry);
643}
644
645/**
646 * wake_up_worker - wake up an idle worker
647 * @gcwq: gcwq to wake worker for
648 *
649 * Wake up the first idle worker of @gcwq.
650 *
651 * CONTEXT:
652 * spin_lock_irq(gcwq->lock).
653 */
654static void wake_up_worker(struct global_cwq *gcwq)
655{
656    struct worker *worker = first_worker(gcwq);
657
658    if (likely(worker))
659        wake_up_process(worker->task);
660}
661
662/**
663 * wq_worker_waking_up - a worker is waking up
664 * @task: task waking up
665 * @cpu: CPU @task is waking up to
666 *
667 * This function is called during try_to_wake_up() when a worker is
668 * being awoken.
669 *
670 * CONTEXT:
671 * spin_lock_irq(rq->lock)
672 */
673void wq_worker_waking_up(struct task_struct *task, unsigned int cpu)
674{
675    struct worker *worker = kthread_data(task);
676
677    if (likely(!(worker->flags & WORKER_NOT_RUNNING)))
678        atomic_inc(get_gcwq_nr_running(cpu));
679}
680
681/**
682 * wq_worker_sleeping - a worker is going to sleep
683 * @task: task going to sleep
684 * @cpu: CPU in question, must be the current CPU number
685 *
686 * This function is called during schedule() when a busy worker is
687 * going to sleep. Worker on the same cpu can be woken up by
688 * returning pointer to its task.
689 *
690 * CONTEXT:
691 * spin_lock_irq(rq->lock)
692 *
693 * RETURNS:
694 * Worker task on @cpu to wake up, %NULL if none.
695 */
696struct task_struct *wq_worker_sleeping(struct task_struct *task,
697                       unsigned int cpu)
698{
699    struct worker *worker = kthread_data(task), *to_wakeup = NULL;
700    struct global_cwq *gcwq = get_gcwq(cpu);
701    atomic_t *nr_running = get_gcwq_nr_running(cpu);
702
703    if (unlikely(worker->flags & WORKER_NOT_RUNNING))
704        return NULL;
705
706    /* this can only happen on the local cpu */
707    BUG_ON(cpu != raw_smp_processor_id());
708
709    /*
710     * The counterpart of the following dec_and_test, implied mb,
711     * worklist not empty test sequence is in insert_work().
712     * Please read comment there.
713     *
714     * NOT_RUNNING is clear. This means that trustee is not in
715     * charge and we're running on the local cpu w/ rq lock held
716     * and preemption disabled, which in turn means that none else
717     * could be manipulating idle_list, so dereferencing idle_list
718     * without gcwq lock is safe.
719     */
720    if (atomic_dec_and_test(nr_running) && !list_empty(&gcwq->worklist))
721        to_wakeup = first_worker(gcwq);
722    return to_wakeup ? to_wakeup->task : NULL;
723}
724
725/**
726 * worker_set_flags - set worker flags and adjust nr_running accordingly
727 * @worker: self
728 * @flags: flags to set
729 * @wakeup: wakeup an idle worker if necessary
730 *
731 * Set @flags in @worker->flags and adjust nr_running accordingly. If
732 * nr_running becomes zero and @wakeup is %true, an idle worker is
733 * woken up.
734 *
735 * CONTEXT:
736 * spin_lock_irq(gcwq->lock)
737 */
738static inline void worker_set_flags(struct worker *worker, unsigned int flags,
739                    bool wakeup)
740{
741    struct global_cwq *gcwq = worker->gcwq;
742
743    WARN_ON_ONCE(worker->task != current);
744
745    /*
746     * If transitioning into NOT_RUNNING, adjust nr_running and
747     * wake up an idle worker as necessary if requested by
748     * @wakeup.
749     */
750    if ((flags & WORKER_NOT_RUNNING) &&
751        !(worker->flags & WORKER_NOT_RUNNING)) {
752        atomic_t *nr_running = get_gcwq_nr_running(gcwq->cpu);
753
754        if (wakeup) {
755            if (atomic_dec_and_test(nr_running) &&
756                !list_empty(&gcwq->worklist))
757                wake_up_worker(gcwq);
758        } else
759            atomic_dec(nr_running);
760    }
761
762    worker->flags |= flags;
763}
764
765/**
766 * worker_clr_flags - clear worker flags and adjust nr_running accordingly
767 * @worker: self
768 * @flags: flags to clear
769 *
770 * Clear @flags in @worker->flags and adjust nr_running accordingly.
771 *
772 * CONTEXT:
773 * spin_lock_irq(gcwq->lock)
774 */
775static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
776{
777    struct global_cwq *gcwq = worker->gcwq;
778    unsigned int oflags = worker->flags;
779
780    WARN_ON_ONCE(worker->task != current);
781
782    worker->flags &= ~flags;
783
784    /* if transitioning out of NOT_RUNNING, increment nr_running */
785    if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING))
786        if (!(worker->flags & WORKER_NOT_RUNNING))
787            atomic_inc(get_gcwq_nr_running(gcwq->cpu));
788}
789
790/**
791 * busy_worker_head - return the busy hash head for a work
792 * @gcwq: gcwq of interest
793 * @work: work to be hashed
794 *
795 * Return hash head of @gcwq for @work.
796 *
797 * CONTEXT:
798 * spin_lock_irq(gcwq->lock).
799 *
800 * RETURNS:
801 * Pointer to the hash head.
802 */
803static struct hlist_head *busy_worker_head(struct global_cwq *gcwq,
804                       struct work_struct *work)
805{
806    const int base_shift = ilog2(sizeof(struct work_struct));
807    unsigned long v = (unsigned long)work;
808
809    /* simple shift and fold hash, do we need something better? */
810    v >>= base_shift;
811    v += v >> BUSY_WORKER_HASH_ORDER;
812    v &= BUSY_WORKER_HASH_MASK;
813
814    return &gcwq->busy_hash[v];
815}
816
817/**
818 * __find_worker_executing_work - find worker which is executing a work
819 * @gcwq: gcwq of interest
820 * @bwh: hash head as returned by busy_worker_head()
821 * @work: work to find worker for
822 *
823 * Find a worker which is executing @work on @gcwq. @bwh should be
824 * the hash head obtained by calling busy_worker_head() with the same
825 * work.
826 *
827 * CONTEXT:
828 * spin_lock_irq(gcwq->lock).
829 *
830 * RETURNS:
831 * Pointer to worker which is executing @work if found, NULL
832 * otherwise.
833 */
834static struct worker *__find_worker_executing_work(struct global_cwq *gcwq,
835                           struct hlist_head *bwh,
836                           struct work_struct *work)
837{
838    struct worker *worker;
839    struct hlist_node *tmp;
840
841    hlist_for_each_entry(worker, tmp, bwh, hentry)
842        if (worker->current_work == work)
843            return worker;
844    return NULL;
845}
846
847/**
848 * find_worker_executing_work - find worker which is executing a work
849 * @gcwq: gcwq of interest
850 * @work: work to find worker for
851 *
852 * Find a worker which is executing @work on @gcwq. This function is
853 * identical to __find_worker_executing_work() except that this
854 * function calculates @bwh itself.
855 *
856 * CONTEXT:
857 * spin_lock_irq(gcwq->lock).
858 *
859 * RETURNS:
860 * Pointer to worker which is executing @work if found, NULL
861 * otherwise.
862 */
863static struct worker *find_worker_executing_work(struct global_cwq *gcwq,
864                         struct work_struct *work)
865{
866    return __find_worker_executing_work(gcwq, busy_worker_head(gcwq, work),
867                        work);
868}
869
870/**
871 * gcwq_determine_ins_pos - find insertion position
872 * @gcwq: gcwq of interest
873 * @cwq: cwq a work is being queued for
874 *
875 * A work for @cwq is about to be queued on @gcwq, determine insertion
876 * position for the work. If @cwq is for HIGHPRI wq, the work is
877 * queued at the head of the queue but in FIFO order with respect to
878 * other HIGHPRI works; otherwise, at the end of the queue. This
879 * function also sets GCWQ_HIGHPRI_PENDING flag to hint @gcwq that
880 * there are HIGHPRI works pending.
881 *
882 * CONTEXT:
883 * spin_lock_irq(gcwq->lock).
884 *
885 * RETURNS:
886 * Pointer to inserstion position.
887 */
888static inline struct list_head *gcwq_determine_ins_pos(struct global_cwq *gcwq,
889                           struct cpu_workqueue_struct *cwq)
890{
891    struct work_struct *twork;
892
893    if (likely(!(cwq->wq->flags & WQ_HIGHPRI)))
894        return &gcwq->worklist;
895
896    list_for_each_entry(twork, &gcwq->worklist, entry) {
897        struct cpu_workqueue_struct *tcwq = get_work_cwq(twork);
898
899        if (!(tcwq->wq->flags & WQ_HIGHPRI))
900            break;
901    }
902
903    gcwq->flags |= GCWQ_HIGHPRI_PENDING;
904    return &twork->entry;
905}
906
907/**
908 * insert_work - insert a work into gcwq
909 * @cwq: cwq @work belongs to
910 * @work: work to insert
911 * @head: insertion point
912 * @extra_flags: extra WORK_STRUCT_* flags to set
913 *
914 * Insert @work which belongs to @cwq into @gcwq after @head.
915 * @extra_flags is or'd to work_struct flags.
916 *
917 * CONTEXT:
918 * spin_lock_irq(gcwq->lock).
919 */
920static void insert_work(struct cpu_workqueue_struct *cwq,
921            struct work_struct *work, struct list_head *head,
922            unsigned int extra_flags)
923{
924    struct global_cwq *gcwq = cwq->gcwq;
925
926    /* we own @work, set data and link */
927    set_work_cwq(work, cwq, extra_flags);
928
929    /*
930     * Ensure that we get the right work->data if we see the
931     * result of list_add() below, see try_to_grab_pending().
932     */
933    smp_wmb();
934
935    list_add_tail(&work->entry, head);
936
937    /*
938     * Ensure either worker_sched_deactivated() sees the above
939     * list_add_tail() or we see zero nr_running to avoid workers
940     * lying around lazily while there are works to be processed.
941     */
942    smp_mb();
943
944    if (__need_more_worker(gcwq))
945        wake_up_worker(gcwq);
946}
947
948static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
949             struct work_struct *work)
950{
951    struct global_cwq *gcwq;
952    struct cpu_workqueue_struct *cwq;
953    struct list_head *worklist;
954    unsigned int work_flags;
955    unsigned long flags;
956
957    debug_work_activate(work);
958
959    if (WARN_ON_ONCE(wq->flags & WQ_DYING))
960        return;
961
962    /* determine gcwq to use */
963    if (!(wq->flags & WQ_UNBOUND)) {
964        struct global_cwq *last_gcwq;
965
966        if (unlikely(cpu == WORK_CPU_UNBOUND))
967            cpu = raw_smp_processor_id();
968
969        /*
970         * It's multi cpu. If @wq is non-reentrant and @work
971         * was previously on a different cpu, it might still
972         * be running there, in which case the work needs to
973         * be queued on that cpu to guarantee non-reentrance.
974         */
975        gcwq = get_gcwq(cpu);
976        if (wq->flags & WQ_NON_REENTRANT &&
977            (last_gcwq = get_work_gcwq(work)) && last_gcwq != gcwq) {
978            struct worker *worker;
979
980            spin_lock_irqsave(&last_gcwq->lock, flags);
981
982            worker = find_worker_executing_work(last_gcwq, work);
983
984            if (worker && worker->current_cwq->wq == wq)
985                gcwq = last_gcwq;
986            else {
987                /* meh... not running there, queue here */
988                spin_unlock_irqrestore(&last_gcwq->lock, flags);
989                spin_lock_irqsave(&gcwq->lock, flags);
990            }
991        } else
992            spin_lock_irqsave(&gcwq->lock, flags);
993    } else {
994        gcwq = get_gcwq(WORK_CPU_UNBOUND);
995        spin_lock_irqsave(&gcwq->lock, flags);
996    }
997
998    /* gcwq determined, get cwq and queue */
999    cwq = get_cwq(gcwq->cpu, wq);
1000
1001    BUG_ON(!list_empty(&work->entry));
1002
1003    cwq->nr_in_flight[cwq->work_color]++;
1004    work_flags = work_color_to_flags(cwq->work_color);
1005
1006    if (likely(cwq->nr_active < cwq->max_active)) {
1007        cwq->nr_active++;
1008        worklist = gcwq_determine_ins_pos(gcwq, cwq);
1009    } else {
1010        work_flags |= WORK_STRUCT_DELAYED;
1011        worklist = &cwq->delayed_works;
1012    }
1013
1014    insert_work(cwq, work, worklist, work_flags);
1015
1016    spin_unlock_irqrestore(&gcwq->lock, flags);
1017}
1018
1019/**
1020 * queue_work - queue work on a workqueue
1021 * @wq: workqueue to use
1022 * @work: work to queue
1023 *
1024 * Returns 0 if @work was already on a queue, non-zero otherwise.
1025 *
1026 * We queue the work to the CPU on which it was submitted, but if the CPU dies
1027 * it can be processed by another CPU.
1028 */
1029int queue_work(struct workqueue_struct *wq, struct work_struct *work)
1030{
1031    int ret;
1032
1033    ret = queue_work_on(get_cpu(), wq, work);
1034    put_cpu();
1035
1036    return ret;
1037}
1038EXPORT_SYMBOL_GPL(queue_work);
1039
1040/**
1041 * queue_work_on - queue work on specific cpu
1042 * @cpu: CPU number to execute work on
1043 * @wq: workqueue to use
1044 * @work: work to queue
1045 *
1046 * Returns 0 if @work was already on a queue, non-zero otherwise.
1047 *
1048 * We queue the work to a specific CPU, the caller must ensure it
1049 * can't go away.
1050 */
1051int
1052queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work)
1053{
1054    int ret = 0;
1055
1056    if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
1057        __queue_work(cpu, wq, work);
1058        ret = 1;
1059    }
1060    return ret;
1061}
1062EXPORT_SYMBOL_GPL(queue_work_on);
1063
1064static void delayed_work_timer_fn(unsigned long __data)
1065{
1066    struct delayed_work *dwork = (struct delayed_work *)__data;
1067    struct cpu_workqueue_struct *cwq = get_work_cwq(&dwork->work);
1068
1069    __queue_work(smp_processor_id(), cwq->wq, &dwork->work);
1070}
1071
1072/**
1073 * queue_delayed_work - queue work on a workqueue after delay
1074 * @wq: workqueue to use
1075 * @dwork: delayable work to queue
1076 * @delay: number of jiffies to wait before queueing
1077 *
1078 * Returns 0 if @work was already on a queue, non-zero otherwise.
1079 */
1080int queue_delayed_work(struct workqueue_struct *wq,
1081            struct delayed_work *dwork, unsigned long delay)
1082{
1083    if (delay == 0)
1084        return queue_work(wq, &dwork->work);
1085
1086    return queue_delayed_work_on(-1, wq, dwork, delay);
1087}
1088EXPORT_SYMBOL_GPL(queue_delayed_work);
1089
1090/**
1091 * queue_delayed_work_on - queue work on specific CPU after delay
1092 * @cpu: CPU number to execute work on
1093 * @wq: workqueue to use
1094 * @dwork: work to queue
1095 * @delay: number of jiffies to wait before queueing
1096 *
1097 * Returns 0 if @work was already on a queue, non-zero otherwise.
1098 */
1099int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
1100            struct delayed_work *dwork, unsigned long delay)
1101{
1102    int ret = 0;
1103    struct timer_list *timer = &dwork->timer;
1104    struct work_struct *work = &dwork->work;
1105
1106    if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
1107        unsigned int lcpu;
1108
1109        BUG_ON(timer_pending(timer));
1110        BUG_ON(!list_empty(&work->entry));
1111
1112        timer_stats_timer_set_start_info(&dwork->timer);
1113
1114        /*
1115         * This stores cwq for the moment, for the timer_fn.
1116         * Note that the work's gcwq is preserved to allow
1117         * reentrance detection for delayed works.
1118         */
1119        if (!(wq->flags & WQ_UNBOUND)) {
1120            struct global_cwq *gcwq = get_work_gcwq(work);
1121
1122            if (gcwq && gcwq->cpu != WORK_CPU_UNBOUND)
1123                lcpu = gcwq->cpu;
1124            else
1125                lcpu = raw_smp_processor_id();
1126        } else
1127            lcpu = WORK_CPU_UNBOUND;
1128
1129        set_work_cwq(work, get_cwq(lcpu, wq), 0);
1130
1131        timer->expires = jiffies + delay;
1132        timer->data = (unsigned long)dwork;
1133        timer->function = delayed_work_timer_fn;
1134
1135        if (unlikely(cpu >= 0))
1136            add_timer_on(timer, cpu);
1137        else
1138            add_timer(timer);
1139        ret = 1;
1140    }
1141    return ret;
1142}
1143EXPORT_SYMBOL_GPL(queue_delayed_work_on);
1144
1145/**
1146 * worker_enter_idle - enter idle state
1147 * @worker: worker which is entering idle state
1148 *
1149 * @worker is entering idle state. Update stats and idle timer if
1150 * necessary.
1151 *
1152 * LOCKING:
1153 * spin_lock_irq(gcwq->lock).
1154 */
1155static void worker_enter_idle(struct worker *worker)
1156{
1157    struct global_cwq *gcwq = worker->gcwq;
1158
1159    BUG_ON(worker->flags & WORKER_IDLE);
1160    BUG_ON(!list_empty(&worker->entry) &&
1161           (worker->hentry.next || worker->hentry.pprev));
1162
1163    /* can't use worker_set_flags(), also called from start_worker() */
1164    worker->flags |= WORKER_IDLE;
1165    gcwq->nr_idle++;
1166    worker->last_active = jiffies;
1167
1168    /* idle_list is LIFO */
1169    list_add(&worker->entry, &gcwq->idle_list);
1170
1171    if (likely(!(worker->flags & WORKER_ROGUE))) {
1172        if (too_many_workers(gcwq) && !timer_pending(&gcwq->idle_timer))
1173            mod_timer(&gcwq->idle_timer,
1174                  jiffies + IDLE_WORKER_TIMEOUT);
1175    } else
1176        wake_up_all(&gcwq->trustee_wait);
1177
1178    /* sanity check nr_running */
1179    WARN_ON_ONCE(gcwq->nr_workers == gcwq->nr_idle &&
1180             atomic_read(get_gcwq_nr_running(gcwq->cpu)));
1181}
1182
1183/**
1184 * worker_leave_idle - leave idle state
1185 * @worker: worker which is leaving idle state
1186 *
1187 * @worker is leaving idle state. Update stats.
1188 *
1189 * LOCKING:
1190 * spin_lock_irq(gcwq->lock).
1191 */
1192static void worker_leave_idle(struct worker *worker)
1193{
1194    struct global_cwq *gcwq = worker->gcwq;
1195
1196    BUG_ON(!(worker->flags & WORKER_IDLE));
1197    worker_clr_flags(worker, WORKER_IDLE);
1198    gcwq->nr_idle--;
1199    list_del_init(&worker->entry);
1200}
1201
1202/**
1203 * worker_maybe_bind_and_lock - bind worker to its cpu if possible and lock gcwq
1204 * @worker: self
1205 *
1206 * Works which are scheduled while the cpu is online must at least be
1207 * scheduled to a worker which is bound to the cpu so that if they are
1208 * flushed from cpu callbacks while cpu is going down, they are
1209 * guaranteed to execute on the cpu.
1210 *
1211 * This function is to be used by rogue workers and rescuers to bind
1212 * themselves to the target cpu and may race with cpu going down or
1213 * coming online. kthread_bind() can't be used because it may put the
1214 * worker to already dead cpu and set_cpus_allowed_ptr() can't be used
1215 * verbatim as it's best effort and blocking and gcwq may be
1216 * [dis]associated in the meantime.
1217 *
1218 * This function tries set_cpus_allowed() and locks gcwq and verifies
1219 * the binding against GCWQ_DISASSOCIATED which is set during
1220 * CPU_DYING and cleared during CPU_ONLINE, so if the worker enters
1221 * idle state or fetches works without dropping lock, it can guarantee
1222 * the scheduling requirement described in the first paragraph.
1223 *
1224 * CONTEXT:
1225 * Might sleep. Called without any lock but returns with gcwq->lock
1226 * held.
1227 *
1228 * RETURNS:
1229 * %true if the associated gcwq is online (@worker is successfully
1230 * bound), %false if offline.
1231 */
1232static bool worker_maybe_bind_and_lock(struct worker *worker)
1233__acquires(&gcwq->lock)
1234{
1235    struct global_cwq *gcwq = worker->gcwq;
1236    struct task_struct *task = worker->task;
1237
1238    while (true) {
1239        /*
1240         * The following call may fail, succeed or succeed
1241         * without actually migrating the task to the cpu if
1242         * it races with cpu hotunplug operation. Verify
1243         * against GCWQ_DISASSOCIATED.
1244         */
1245        if (!(gcwq->flags & GCWQ_DISASSOCIATED))
1246            set_cpus_allowed_ptr(task, get_cpu_mask(gcwq->cpu));
1247
1248        spin_lock_irq(&gcwq->lock);
1249        if (gcwq->flags & GCWQ_DISASSOCIATED)
1250            return false;
1251        if (task_cpu(task) == gcwq->cpu &&
1252            cpumask_equal(&current->cpus_allowed,
1253                  get_cpu_mask(gcwq->cpu)))
1254            return true;
1255        spin_unlock_irq(&gcwq->lock);
1256
1257        /* CPU has come up inbetween, retry migration */
1258        cpu_relax();
1259    }
1260}
1261
1262/*
1263 * Function for worker->rebind_work used to rebind rogue busy workers
1264 * to the associated cpu which is coming back online. This is
1265 * scheduled by cpu up but can race with other cpu hotplug operations
1266 * and may be executed twice without intervening cpu down.
1267 */
1268static void worker_rebind_fn(struct work_struct *work)
1269{
1270    struct worker *worker = container_of(work, struct worker, rebind_work);
1271    struct global_cwq *gcwq = worker->gcwq;
1272
1273    if (worker_maybe_bind_and_lock(worker))
1274        worker_clr_flags(worker, WORKER_REBIND);
1275
1276    spin_unlock_irq(&gcwq->lock);
1277}
1278
1279static struct worker *alloc_worker(void)
1280{
1281    struct worker *worker;
1282
1283    worker = kzalloc(sizeof(*worker), GFP_KERNEL);
1284    if (worker) {
1285        INIT_LIST_HEAD(&worker->entry);
1286        INIT_LIST_HEAD(&worker->scheduled);
1287        INIT_WORK(&worker->rebind_work, worker_rebind_fn);
1288        /* on creation a worker is in !idle && prep state */
1289        worker->flags = WORKER_PREP;
1290    }
1291    return worker;
1292}
1293
1294/**
1295 * create_worker - create a new workqueue worker
1296 * @gcwq: gcwq the new worker will belong to
1297 * @bind: whether to set affinity to @cpu or not
1298 *
1299 * Create a new worker which is bound to @gcwq. The returned worker
1300 * can be started by calling start_worker() or destroyed using
1301 * destroy_worker().
1302 *
1303 * CONTEXT:
1304 * Might sleep. Does GFP_KERNEL allocations.
1305 *
1306 * RETURNS:
1307 * Pointer to the newly created worker.
1308 */
1309static struct worker *create_worker(struct global_cwq *gcwq, bool bind)
1310{
1311    bool on_unbound_cpu = gcwq->cpu == WORK_CPU_UNBOUND;
1312    struct worker *worker = NULL;
1313    int id = -1;
1314
1315    spin_lock_irq(&gcwq->lock);
1316    while (ida_get_new(&gcwq->worker_ida, &id)) {
1317        spin_unlock_irq(&gcwq->lock);
1318        if (!ida_pre_get(&gcwq->worker_ida, GFP_KERNEL))
1319            goto fail;
1320        spin_lock_irq(&gcwq->lock);
1321    }
1322    spin_unlock_irq(&gcwq->lock);
1323
1324    worker = alloc_worker();
1325    if (!worker)
1326        goto fail;
1327
1328    worker->gcwq = gcwq;
1329    worker->id = id;
1330
1331    if (!on_unbound_cpu)
1332        worker->task = kthread_create(worker_thread, worker,
1333                          "kworker/%u:%d", gcwq->cpu, id);
1334    else
1335        worker->task = kthread_create(worker_thread, worker,
1336                          "kworker/u:%d", id);
1337    if (IS_ERR(worker->task))
1338        goto fail;
1339
1340    /*
1341     * A rogue worker will become a regular one if CPU comes
1342     * online later on. Make sure every worker has
1343     * PF_THREAD_BOUND set.
1344     */
1345    if (bind && !on_unbound_cpu)
1346        kthread_bind(worker->task, gcwq->cpu);
1347    else {
1348        worker->task->flags |= PF_THREAD_BOUND;
1349        if (on_unbound_cpu)
1350            worker->flags |= WORKER_UNBOUND;
1351    }
1352
1353    return worker;
1354fail:
1355    if (id >= 0) {
1356        spin_lock_irq(&gcwq->lock);
1357        ida_remove(&gcwq->worker_ida, id);
1358        spin_unlock_irq(&gcwq->lock);
1359    }
1360    kfree(worker);
1361    return NULL;
1362}
1363
1364/**
1365 * start_worker - start a newly created worker
1366 * @worker: worker to start
1367 *
1368 * Make the gcwq aware of @worker and start it.
1369 *
1370 * CONTEXT:
1371 * spin_lock_irq(gcwq->lock).
1372 */
1373static void start_worker(struct worker *worker)
1374{
1375    worker->flags |= WORKER_STARTED;
1376    worker->gcwq->nr_workers++;
1377    worker_enter_idle(worker);
1378    wake_up_process(worker->task);
1379}
1380
1381/**
1382 * destroy_worker - destroy a workqueue worker
1383 * @worker: worker to be destroyed
1384 *
1385 * Destroy @worker and adjust @gcwq stats accordingly.
1386 *
1387 * CONTEXT:
1388 * spin_lock_irq(gcwq->lock) which is released and regrabbed.
1389 */
1390static void destroy_worker(struct worker *worker)
1391{
1392    struct global_cwq *gcwq = worker->gcwq;
1393    int id = worker->id;
1394
1395    /* sanity check frenzy */
1396    BUG_ON(worker->current_work);
1397    BUG_ON(!list_empty(&worker->scheduled));
1398
1399    if (worker->flags & WORKER_STARTED)
1400        gcwq->nr_workers--;
1401    if (worker->flags & WORKER_IDLE)
1402        gcwq->nr_idle--;
1403
1404    list_del_init(&worker->entry);
1405    worker->flags |= WORKER_DIE;
1406
1407    spin_unlock_irq(&gcwq->lock);
1408
1409    kthread_stop(worker->task);
1410    kfree(worker);
1411
1412    spin_lock_irq(&gcwq->lock);
1413    ida_remove(&gcwq->worker_ida, id);
1414}
1415
1416static void idle_worker_timeout(unsigned long __gcwq)
1417{
1418    struct global_cwq *gcwq = (void *)__gcwq;
1419
1420    spin_lock_irq(&gcwq->lock);
1421
1422    if (too_many_workers(gcwq)) {
1423        struct worker *worker;
1424        unsigned long expires;
1425
1426        /* idle_list is kept in LIFO order, check the last one */
1427        worker = list_entry(gcwq->idle_list.prev, struct worker, entry);
1428        expires = worker->last_active + IDLE_WORKER_TIMEOUT;
1429
1430        if (time_before(jiffies, expires))
1431            mod_timer(&gcwq->idle_timer, expires);
1432        else {
1433            /* it's been idle for too long, wake up manager */
1434            gcwq->flags |= GCWQ_MANAGE_WORKERS;
1435            wake_up_worker(gcwq);
1436        }
1437    }
1438
1439    spin_unlock_irq(&gcwq->lock);
1440}
1441
1442static bool send_mayday(struct work_struct *work)
1443{
1444    struct cpu_workqueue_struct *cwq = get_work_cwq(work);
1445    struct workqueue_struct *wq = cwq->wq;
1446    unsigned int cpu;
1447
1448    if (!(wq->flags & WQ_RESCUER))
1449        return false;
1450
1451    /* mayday mayday mayday */
1452    cpu = cwq->gcwq->cpu;
1453    /* WORK_CPU_UNBOUND can't be set in cpumask, use cpu 0 instead */
1454    if (cpu == WORK_CPU_UNBOUND)
1455        cpu = 0;
1456    if (!mayday_test_and_set_cpu(cpu, wq->mayday_mask))
1457        wake_up_process(wq->rescuer->task);
1458    return true;
1459}
1460
1461static void gcwq_mayday_timeout(unsigned long __gcwq)
1462{
1463    struct global_cwq *gcwq = (void *)__gcwq;
1464    struct work_struct *work;
1465
1466    spin_lock_irq(&gcwq->lock);
1467
1468    if (need_to_create_worker(gcwq)) {
1469        /*
1470         * We've been trying to create a new worker but
1471         * haven't been successful. We might be hitting an
1472         * allocation deadlock. Send distress signals to
1473         * rescuers.
1474         */
1475        list_for_each_entry(work, &gcwq->worklist, entry)
1476            send_mayday(work);
1477    }
1478
1479    spin_unlock_irq(&gcwq->lock);
1480
1481    mod_timer(&gcwq->mayday_timer, jiffies + MAYDAY_INTERVAL);
1482}
1483
1484/**
1485 * maybe_create_worker - create a new worker if necessary
1486 * @gcwq: gcwq to create a new worker for
1487 *
1488 * Create a new worker for @gcwq if necessary. @gcwq is guaranteed to
1489 * have at least one idle worker on return from this function. If
1490 * creating a new worker takes longer than MAYDAY_INTERVAL, mayday is
1491 * sent to all rescuers with works scheduled on @gcwq to resolve
1492 * possible allocation deadlock.
1493 *
1494 * On return, need_to_create_worker() is guaranteed to be false and
1495 * may_start_working() true.
1496 *
1497 * LOCKING:
1498 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
1499 * multiple times. Does GFP_KERNEL allocations. Called only from
1500 * manager.
1501 *
1502 * RETURNS:
1503 * false if no action was taken and gcwq->lock stayed locked, true
1504 * otherwise.
1505 */
1506static bool maybe_create_worker(struct global_cwq *gcwq)
1507__releases(&gcwq->lock)
1508__acquires(&gcwq->lock)
1509{
1510    if (!need_to_create_worker(gcwq))
1511        return false;
1512restart:
1513    spin_unlock_irq(&gcwq->lock);
1514
1515    /* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */
1516    mod_timer(&gcwq->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT);
1517
1518    while (true) {
1519        struct worker *worker;
1520
1521        worker = create_worker(gcwq, true);
1522        if (worker) {
1523            del_timer_sync(&gcwq->mayday_timer);
1524            spin_lock_irq(&gcwq->lock);
1525            start_worker(worker);
1526            BUG_ON(need_to_create_worker(gcwq));
1527            return true;
1528        }
1529
1530        if (!need_to_create_worker(gcwq))
1531            break;
1532
1533        __set_current_state(TASK_INTERRUPTIBLE);
1534        schedule_timeout(CREATE_COOLDOWN);
1535
1536        if (!need_to_create_worker(gcwq))
1537            break;
1538    }
1539
1540    del_timer_sync(&gcwq->mayday_timer);
1541    spin_lock_irq(&gcwq->lock);
1542    if (need_to_create_worker(gcwq))
1543        goto restart;
1544    return true;
1545}
1546
1547/**
1548 * maybe_destroy_worker - destroy workers which have been idle for a while
1549 * @gcwq: gcwq to destroy workers for
1550 *
1551 * Destroy @gcwq workers which have been idle for longer than
1552 * IDLE_WORKER_TIMEOUT.
1553 *
1554 * LOCKING:
1555 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
1556 * multiple times. Called only from manager.
1557 *
1558 * RETURNS:
1559 * false if no action was taken and gcwq->lock stayed locked, true
1560 * otherwise.
1561 */
1562static bool maybe_destroy_workers(struct global_cwq *gcwq)
1563{
1564    bool ret = false;
1565
1566    while (too_many_workers(gcwq)) {
1567        struct worker *worker;
1568        unsigned long expires;
1569
1570        worker = list_entry(gcwq->idle_list.prev, struct worker, entry);
1571        expires = worker->last_active + IDLE_WORKER_TIMEOUT;
1572
1573        if (time_before(jiffies, expires)) {
1574            mod_timer(&gcwq->idle_timer, expires);
1575            break;
1576        }
1577
1578        destroy_worker(worker);
1579        ret = true;
1580    }
1581
1582    return ret;
1583}
1584
1585/**
1586 * manage_workers - manage worker pool
1587 * @worker: self
1588 *
1589 * Assume the manager role and manage gcwq worker pool @worker belongs
1590 * to. At any given time, there can be only zero or one manager per
1591 * gcwq. The exclusion is handled automatically by this function.
1592 *
1593 * The caller can safely start processing works on false return. On
1594 * true return, it's guaranteed that need_to_create_worker() is false
1595 * and may_start_working() is true.
1596 *
1597 * CONTEXT:
1598 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
1599 * multiple times. Does GFP_KERNEL allocations.
1600 *
1601 * RETURNS:
1602 * false if no action was taken and gcwq->lock stayed locked, true if
1603 * some action was taken.
1604 */
1605static bool manage_workers(struct worker *worker)
1606{
1607    struct global_cwq *gcwq = worker->gcwq;
1608    bool ret = false;
1609
1610    if (gcwq->flags & GCWQ_MANAGING_WORKERS)
1611        return ret;
1612
1613    gcwq->flags &= ~GCWQ_MANAGE_WORKERS;
1614    gcwq->flags |= GCWQ_MANAGING_WORKERS;
1615
1616    /*
1617     * Destroy and then create so that may_start_working() is true
1618     * on return.
1619     */
1620    ret |= maybe_destroy_workers(gcwq);
1621    ret |= maybe_create_worker(gcwq);
1622
1623    gcwq->flags &= ~GCWQ_MANAGING_WORKERS;
1624
1625    /*
1626     * The trustee might be waiting to take over the manager
1627     * position, tell it we're done.
1628     */
1629    if (unlikely(gcwq->trustee))
1630        wake_up_all(&gcwq->trustee_wait);
1631
1632    return ret;
1633}
1634
1635/**
1636 * move_linked_works - move linked works to a list
1637 * @work: start of series of works to be scheduled
1638 * @head: target list to append @work to
1639 * @nextp: out paramter for nested worklist walking
1640 *
1641 * Schedule linked works starting from @work to @head. Work series to
1642 * be scheduled starts at @work and includes any consecutive work with
1643 * WORK_STRUCT_LINKED set in its predecessor.
1644 *
1645 * If @nextp is not NULL, it's updated to point to the next work of
1646 * the last scheduled work. This allows move_linked_works() to be
1647 * nested inside outer list_for_each_entry_safe().
1648 *
1649 * CONTEXT:
1650 * spin_lock_irq(gcwq->lock).
1651 */
1652static void move_linked_works(struct work_struct *work, struct list_head *head,
1653                  struct work_struct **nextp)
1654{
1655    struct work_struct *n;
1656
1657    /*
1658     * Linked worklist will always end before the end of the list,
1659     * use NULL for list head.
1660     */
1661    list_for_each_entry_safe_from(work, n, NULL, entry) {
1662        list_move_tail(&work->entry, head);
1663        if (!(*work_data_bits(work) & WORK_STRUCT_LINKED))
1664            break;
1665    }
1666
1667    /*
1668     * If we're already inside safe list traversal and have moved
1669     * multiple works to the scheduled queue, the next position
1670     * needs to be updated.
1671     */
1672    if (nextp)
1673        *nextp = n;
1674}
1675
1676static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
1677{
1678    struct work_struct *work = list_first_entry(&cwq->delayed_works,
1679                            struct work_struct, entry);
1680    struct list_head *pos = gcwq_determine_ins_pos(cwq->gcwq, cwq);
1681
1682    move_linked_works(work, pos, NULL);
1683    __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work));
1684    cwq->nr_active++;
1685}
1686
1687/**
1688 * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight
1689 * @cwq: cwq of interest
1690 * @color: color of work which left the queue
1691 * @delayed: for a delayed work
1692 *
1693 * A work either has completed or is removed from pending queue,
1694 * decrement nr_in_flight of its cwq and handle workqueue flushing.
1695 *
1696 * CONTEXT:
1697 * spin_lock_irq(gcwq->lock).
1698 */
1699static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color,
1700                 bool delayed)
1701{
1702    /* ignore uncolored works */
1703    if (color == WORK_NO_COLOR)
1704        return;
1705
1706    cwq->nr_in_flight[color]--;
1707
1708    if (!delayed) {
1709        cwq->nr_active--;
1710        if (!list_empty(&cwq->delayed_works)) {
1711            /* one down, submit a delayed one */
1712            if (cwq->nr_active < cwq->max_active)
1713                cwq_activate_first_delayed(cwq);
1714        }
1715    }
1716
1717    /* is flush in progress and are we at the flushing tip? */
1718    if (likely(cwq->flush_color != color))
1719        return;
1720
1721    /* are there still in-flight works? */
1722    if (cwq->nr_in_flight[color])
1723        return;
1724
1725    /* this cwq is done, clear flush_color */
1726    cwq->flush_color = -1;
1727
1728    /*
1729     * If this was the last cwq, wake up the first flusher. It
1730     * will handle the rest.
1731     */
1732    if (atomic_dec_and_test(&cwq->wq->nr_cwqs_to_flush))
1733        complete(&cwq->wq->first_flusher->done);
1734}
1735
1736/**
1737 * process_one_work - process single work
1738 * @worker: self
1739 * @work: work to process
1740 *
1741 * Process @work. This function contains all the logics necessary to
1742 * process a single work including synchronization against and
1743 * interaction with other workers on the same cpu, queueing and
1744 * flushing. As long as context requirement is met, any worker can
1745 * call this function to process a work.
1746 *
1747 * CONTEXT:
1748 * spin_lock_irq(gcwq->lock) which is released and regrabbed.
1749 */
1750static void process_one_work(struct worker *worker, struct work_struct *work)
1751__releases(&gcwq->lock)
1752__acquires(&gcwq->lock)
1753{
1754    struct cpu_workqueue_struct *cwq = get_work_cwq(work);
1755    struct global_cwq *gcwq = cwq->gcwq;
1756    struct hlist_head *bwh = busy_worker_head(gcwq, work);
1757    bool cpu_intensive = cwq->wq->flags & WQ_CPU_INTENSIVE;
1758    work_func_t f = work->func;
1759    int work_color;
1760    struct worker *collision;
1761#ifdef CONFIG_LOCKDEP
1762    /*
1763     * It is permissible to free the struct work_struct from
1764     * inside the function that is called from it, this we need to
1765     * take into account for lockdep too. To avoid bogus "held
1766     * lock freed" warnings as well as problems when looking into
1767     * work->lockdep_map, make a copy and use that here.
1768     */
1769    struct lockdep_map lockdep_map = work->lockdep_map;
1770#endif
1771    /*
1772     * A single work shouldn't be executed concurrently by
1773     * multiple workers on a single cpu. Check whether anyone is
1774     * already processing the work. If so, defer the work to the
1775     * currently executing one.
1776     */
1777    collision = __find_worker_executing_work(gcwq, bwh, work);
1778    if (unlikely(collision)) {
1779        move_linked_works(work, &collision->scheduled, NULL);
1780        return;
1781    }
1782
1783    /* claim and process */
1784    debug_work_deactivate(work);
1785    hlist_add_head(&worker->hentry, bwh);
1786    worker->current_work = work;
1787    worker->current_cwq = cwq;
1788    work_color = get_work_color(work);
1789
1790    /* record the current cpu number in the work data and dequeue */
1791    set_work_cpu(work, gcwq->cpu);
1792    list_del_init(&work->entry);
1793
1794    /*
1795     * If HIGHPRI_PENDING, check the next work, and, if HIGHPRI,
1796     * wake up another worker; otherwise, clear HIGHPRI_PENDING.
1797     */
1798    if (unlikely(gcwq->flags & GCWQ_HIGHPRI_PENDING)) {
1799        struct work_struct *nwork = list_first_entry(&gcwq->worklist,
1800                        struct work_struct, entry);
1801
1802        if (!list_empty(&gcwq->worklist) &&
1803            get_work_cwq(nwork)->wq->flags & WQ_HIGHPRI)
1804            wake_up_worker(gcwq);
1805        else
1806            gcwq->flags &= ~GCWQ_HIGHPRI_PENDING;
1807    }
1808
1809    /*
1810     * CPU intensive works don't participate in concurrency
1811     * management. They're the scheduler's responsibility.
1812     */
1813    if (unlikely(cpu_intensive))
1814        worker_set_flags(worker, WORKER_CPU_INTENSIVE, true);
1815
1816    spin_unlock_irq(&gcwq->lock);
1817
1818    work_clear_pending(work);
1819    lock_map_acquire(&cwq->wq->lockdep_map);
1820    lock_map_acquire(&lockdep_map);
1821    trace_workqueue_execute_start(work);
1822    f(work);
1823    /*
1824     * While we must be careful to not use "work" after this, the trace
1825     * point will only record its address.
1826     */
1827    trace_workqueue_execute_end(work);
1828    lock_map_release(&lockdep_map);
1829    lock_map_release(&cwq->wq->lockdep_map);
1830
1831    if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
1832        printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
1833               "%s/0x%08x/%d\n",
1834               current->comm, preempt_count(), task_pid_nr(current));
1835        printk(KERN_ERR " last function: ");
1836        print_symbol("%s\n", (unsigned long)f);
1837        debug_show_held_locks(current);
1838        dump_stack();
1839    }
1840
1841    spin_lock_irq(&gcwq->lock);
1842
1843    /* clear cpu intensive status */
1844    if (unlikely(cpu_intensive))
1845        worker_clr_flags(worker, WORKER_CPU_INTENSIVE);
1846
1847    /* we're done with it, release */
1848    hlist_del_init(&worker->hentry);
1849    worker->current_work = NULL;
1850    worker->current_cwq = NULL;
1851    cwq_dec_nr_in_flight(cwq, work_color, false);
1852}
1853
1854/**
1855 * process_scheduled_works - process scheduled works
1856 * @worker: self
1857 *
1858 * Process all scheduled works. Please note that the scheduled list
1859 * may change while processing a work, so this function repeatedly
1860 * fetches a work from the top and executes it.
1861 *
1862 * CONTEXT:
1863 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
1864 * multiple times.
1865 */
1866static void process_scheduled_works(struct worker *worker)
1867{
1868    while (!list_empty(&worker->scheduled)) {
1869        struct work_struct *work = list_first_entry(&worker->scheduled,
1870                        struct work_struct, entry);
1871        process_one_work(worker, work);
1872    }
1873}
1874
1875/**
1876 * worker_thread - the worker thread function
1877 * @__worker: self
1878 *
1879 * The gcwq worker thread function. There's a single dynamic pool of
1880 * these per each cpu. These workers process all works regardless of
1881 * their specific target workqueue. The only exception is works which
1882 * belong to workqueues with a rescuer which will be explained in
1883 * rescuer_thread().
1884 */
1885static int worker_thread(void *__worker)
1886{
1887    struct worker *worker = __worker;
1888    struct global_cwq *gcwq = worker->gcwq;
1889
1890    /* tell the scheduler that this is a workqueue worker */
1891    worker->task->flags |= PF_WQ_WORKER;
1892woke_up:
1893    spin_lock_irq(&gcwq->lock);
1894
1895    /* DIE can be set only while we're idle, checking here is enough */
1896    if (worker->flags & WORKER_DIE) {
1897        spin_unlock_irq(&gcwq->lock);
1898        worker->task->flags &= ~PF_WQ_WORKER;
1899        return 0;
1900    }
1901
1902    worker_leave_idle(worker);
1903recheck:
1904    /* no more worker necessary? */
1905    if (!need_more_worker(gcwq))
1906        goto sleep;
1907
1908    /* do we need to manage? */
1909    if (unlikely(!may_start_working(gcwq)) && manage_workers(worker))
1910        goto recheck;
1911
1912    /*
1913     * ->scheduled list can only be filled while a worker is
1914     * preparing to process a work or actually processing it.
1915     * Make sure nobody diddled with it while I was sleeping.
1916     */
1917    BUG_ON(!list_empty(&worker->scheduled));
1918
1919    /*
1920     * When control reaches this point, we're guaranteed to have
1921     * at least one idle worker or that someone else has already
1922     * assumed the manager role.
1923     */
1924    worker_clr_flags(worker, WORKER_PREP);
1925
1926    do {
1927        struct work_struct *work =
1928            list_first_entry(&gcwq->worklist,
1929                     struct work_struct, entry);
1930
1931        if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) {
1932            /* optimization path, not strictly necessary */
1933            process_one_work(worker, work);
1934            if (unlikely(!list_empty(&worker->scheduled)))
1935                process_scheduled_works(worker);
1936        } else {
1937            move_linked_works(work, &worker->scheduled, NULL);
1938            process_scheduled_works(worker);
1939        }
1940    } while (keep_working(gcwq));
1941
1942    worker_set_flags(worker, WORKER_PREP, false);
1943sleep:
1944    if (unlikely(need_to_manage_workers(gcwq)) && manage_workers(worker))
1945        goto recheck;
1946
1947    /*
1948     * gcwq->lock is held and there's no work to process and no
1949     * need to manage, sleep. Workers are woken up only while
1950     * holding gcwq->lock or from local cpu, so setting the
1951     * current state before releasing gcwq->lock is enough to
1952     * prevent losing any event.
1953     */
1954    worker_enter_idle(worker);
1955    __set_current_state(TASK_INTERRUPTIBLE);
1956    spin_unlock_irq(&gcwq->lock);
1957    schedule();
1958    goto woke_up;
1959}
1960
1961/**
1962 * rescuer_thread - the rescuer thread function
1963 * @__wq: the associated workqueue
1964 *
1965 * Workqueue rescuer thread function. There's one rescuer for each
1966 * workqueue which has WQ_RESCUER set.
1967 *
1968 * Regular work processing on a gcwq may block trying to create a new
1969 * worker which uses GFP_KERNEL allocation which has slight chance of
1970 * developing into deadlock if some works currently on the same queue
1971 * need to be processed to satisfy the GFP_KERNEL allocation. This is
1972 * the problem rescuer solves.
1973 *
1974 * When such condition is possible, the gcwq summons rescuers of all
1975 * workqueues which have works queued on the gcwq and let them process
1976 * those works so that forward progress can be guaranteed.
1977 *
1978 * This should happen rarely.
1979 */
1980static int rescuer_thread(void *__wq)
1981{
1982    struct workqueue_struct *wq = __wq;
1983    struct worker *rescuer = wq->rescuer;
1984    struct list_head *scheduled = &rescuer->scheduled;
1985    bool is_unbound = wq->flags & WQ_UNBOUND;
1986    unsigned int cpu;
1987
1988    set_user_nice(current, RESCUER_NICE_LEVEL);
1989repeat:
1990    set_current_state(TASK_INTERRUPTIBLE);
1991
1992    if (kthread_should_stop())
1993        return 0;
1994
1995    /*
1996     * See whether any cpu is asking for help. Unbounded
1997     * workqueues use cpu 0 in mayday_mask for CPU_UNBOUND.
1998     */
1999    for_each_mayday_cpu(cpu, wq->mayday_mask) {
2000        unsigned int tcpu = is_unbound ? WORK_CPU_UNBOUND : cpu;
2001        struct cpu_workqueue_struct *cwq = get_cwq(tcpu, wq);
2002        struct global_cwq *gcwq = cwq->gcwq;
2003        struct work_struct *work, *n;
2004
2005        __set_current_state(TASK_RUNNING);
2006        mayday_clear_cpu(cpu, wq->mayday_mask);
2007
2008        /* migrate to the target cpu if possible */
2009        rescuer->gcwq = gcwq;
2010        worker_maybe_bind_and_lock(rescuer);
2011
2012        /*
2013         * Slurp in all works issued via this workqueue and
2014         * process'em.
2015         */
2016        BUG_ON(!list_empty(&rescuer->scheduled));
2017        list_for_each_entry_safe(work, n, &gcwq->worklist, entry)
2018            if (get_work_cwq(work) == cwq)
2019                move_linked_works(work, scheduled, &n);
2020
2021        process_scheduled_works(rescuer);
2022        spin_unlock_irq(&gcwq->lock);
2023    }
2024
2025    schedule();
2026    goto repeat;
2027}
2028
2029struct wq_barrier {
2030    struct work_struct work;
2031    struct completion done;
2032};
2033
2034static void wq_barrier_func(struct work_struct *work)
2035{
2036    struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
2037    complete(&barr->done);
2038}
2039
2040/**
2041 * insert_wq_barrier - insert a barrier work
2042 * @cwq: cwq to insert barrier into
2043 * @barr: wq_barrier to insert
2044 * @target: target work to attach @barr to
2045 * @worker: worker currently executing @target, NULL if @target is not executing
2046 *
2047 * @barr is linked to @target such that @barr is completed only after
2048 * @target finishes execution. Please note that the ordering
2049 * guarantee is observed only with respect to @target and on the local
2050 * cpu.
2051 *
2052 * Currently, a queued barrier can't be canceled. This is because
2053 * try_to_grab_pending() can't determine whether the work to be
2054 * grabbed is at the head of the queue and thus can't clear LINKED
2055 * flag of the previous work while there must be a valid next work
2056 * after a work with LINKED flag set.
2057 *
2058 * Note that when @worker is non-NULL, @target may be modified
2059 * underneath us, so we can't reliably determine cwq from @target.
2060 *
2061 * CONTEXT:
2062 * spin_lock_irq(gcwq->lock).
2063 */
2064static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
2065                  struct wq_barrier *barr,
2066                  struct work_struct *target, struct worker *worker)
2067{
2068    struct list_head *head;
2069    unsigned int linked = 0;
2070
2071    /*
2072     * debugobject calls are safe here even with gcwq->lock locked
2073     * as we know for sure that this will not trigger any of the
2074     * checks and call back into the fixup functions where we
2075     * might deadlock.
2076     */
2077    INIT_WORK_ON_STACK(&barr->work, wq_barrier_func);
2078    __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
2079    init_completion(&barr->done);
2080
2081    /*
2082     * If @target is currently being executed, schedule the
2083     * barrier to the worker; otherwise, put it after @target.
2084     */
2085    if (worker)
2086        head = worker->scheduled.next;
2087    else {
2088        unsigned long *bits = work_data_bits(target);
2089
2090        head = target->entry.next;
2091        /* there can already be other linked works, inherit and set */
2092        linked = *bits & WORK_STRUCT_LINKED;
2093        __set_bit(WORK_STRUCT_LINKED_BIT, bits);
2094    }
2095
2096    debug_work_activate(&barr->work);
2097    insert_work(cwq, &barr->work, head,
2098            work_color_to_flags(WORK_NO_COLOR) | linked);
2099}
2100
2101/**
2102 * flush_workqueue_prep_cwqs - prepare cwqs for workqueue flushing
2103 * @wq: workqueue being flushed
2104 * @flush_color: new flush color, < 0 for no-op
2105 * @work_color: new work color, < 0 for no-op
2106 *
2107 * Prepare cwqs for workqueue flushing.
2108 *
2109 * If @flush_color is non-negative, flush_color on all cwqs should be
2110 * -1. If no cwq has in-flight commands at the specified color, all
2111 * cwq->flush_color's stay at -1 and %false is returned. If any cwq
2112 * has in flight commands, its cwq->flush_color is set to
2113 * @flush_color, @wq->nr_cwqs_to_flush is updated accordingly, cwq
2114 * wakeup logic is armed and %true is returned.
2115 *
2116 * The caller should have initialized @wq->first_flusher prior to
2117 * calling this function with non-negative @flush_color. If
2118 * @flush_color is negative, no flush color update is done and %false
2119 * is returned.
2120 *
2121 * If @work_color is non-negative, all cwqs should have the same
2122 * work_color which is previous to @work_color and all will be
2123 * advanced to @work_color.
2124 *
2125 * CONTEXT:
2126 * mutex_lock(wq->flush_mutex).
2127 *
2128 * RETURNS:
2129 * %true if @flush_color >= 0 and there's something to flush. %false
2130 * otherwise.
2131 */
2132static bool flush_workqueue_prep_cwqs(struct workqueue_struct *wq,
2133                      int flush_color, int work_color)
2134{
2135    bool wait = false;
2136    unsigned int cpu;
2137
2138    if (flush_color >= 0) {
2139        BUG_ON(atomic_read(&wq->nr_cwqs_to_flush));
2140        atomic_set(&wq->nr_cwqs_to_flush, 1);
2141    }
2142
2143    for_each_cwq_cpu(cpu, wq) {
2144        struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
2145        struct global_cwq *gcwq = cwq->gcwq;
2146
2147        spin_lock_irq(&gcwq->lock);
2148
2149        if (flush_color >= 0) {
2150            BUG_ON(cwq->flush_color != -1);
2151
2152            if (cwq->nr_in_flight[flush_color]) {
2153                cwq->flush_color = flush_color;
2154                atomic_inc(&wq->nr_cwqs_to_flush);
2155                wait = true;
2156            }
2157        }
2158
2159        if (work_color >= 0) {
2160            BUG_ON(work_color != work_next_color(cwq->work_color));
2161            cwq->work_color = work_color;
2162        }
2163
2164        spin_unlock_irq(&gcwq->lock);
2165    }
2166
2167    if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_cwqs_to_flush))
2168        complete(&wq->first_flusher->done);
2169
2170    return wait;
2171}
2172
2173/**
2174 * flush_workqueue - ensure that any scheduled work has run to completion.
2175 * @wq: workqueue to flush
2176 *
2177 * Forces execution of the workqueue and blocks until its completion.
2178 * This is typically used in driver shutdown handlers.
2179 *
2180 * We sleep until all works which were queued on entry have been handled,
2181 * but we are not livelocked by new incoming ones.
2182 */
2183void flush_workqueue(struct workqueue_struct *wq)
2184{
2185    struct wq_flusher this_flusher = {
2186        .list = LIST_HEAD_INIT(this_flusher.list),
2187        .flush_color = -1,
2188        .done = COMPLETION_INITIALIZER_ONSTACK(this_flusher.done),
2189    };
2190    int next_color;
2191
2192    lock_map_acquire(&wq->lockdep_map);
2193    lock_map_release(&wq->lockdep_map);
2194
2195    mutex_lock(&wq->flush_mutex);
2196
2197    /*
2198     * Start-to-wait phase
2199     */
2200    next_color = work_next_color(wq->work_color);
2201
2202    if (next_color != wq->flush_color) {
2203        /*
2204         * Color space is not full. The current work_color
2205         * becomes our flush_color and work_color is advanced
2206         * by one.
2207         */
2208        BUG_ON(!list_empty(&wq->flusher_overflow));
2209        this_flusher.flush_color = wq->work_color;
2210        wq->work_color = next_color;
2211
2212        if (!wq->first_flusher) {
2213            /* no flush in progress, become the first flusher */
2214            BUG_ON(wq->flush_color != this_flusher.flush_color);
2215
2216            wq->first_flusher = &this_flusher;
2217
2218            if (!flush_workqueue_prep_cwqs(wq, wq->flush_color,
2219                               wq->work_color)) {
2220                /* nothing to flush, done */
2221                wq->flush_color = next_color;
2222                wq->first_flusher = NULL;
2223                goto out_unlock;
2224            }
2225        } else {
2226            /* wait in queue */
2227            BUG_ON(wq->flush_color == this_flusher.flush_color);
2228            list_add_tail(&this_flusher.list, &wq->flusher_queue);
2229            flush_workqueue_prep_cwqs(wq, -1, wq->work_color);
2230        }
2231    } else {
2232        /*
2233         * Oops, color space is full, wait on overflow queue.
2234         * The next flush completion will assign us
2235         * flush_color and transfer to flusher_queue.
2236         */
2237        list_add_tail(&this_flusher.list, &wq->flusher_overflow);
2238    }
2239
2240    mutex_unlock(&wq->flush_mutex);
2241
2242    wait_for_completion(&this_flusher.done);
2243
2244    /*
2245     * Wake-up-and-cascade phase
2246     *
2247     * First flushers are responsible for cascading flushes and
2248     * handling overflow. Non-first flushers can simply return.
2249     */
2250    if (wq->first_flusher != &this_flusher)
2251        return;
2252
2253    mutex_lock(&wq->flush_mutex);
2254
2255    /* we might have raced, check again with mutex held */
2256    if (wq->first_flusher != &this_flusher)
2257        goto out_unlock;
2258
2259    wq->first_flusher = NULL;
2260
2261    BUG_ON(!list_empty(&this_flusher.list));
2262    BUG_ON(wq->flush_color != this_flusher.flush_color);
2263
2264    while (true) {
2265        struct wq_flusher *next, *tmp;
2266
2267        /* complete all the flushers sharing the current flush color */
2268        list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) {
2269            if (next->flush_color != wq->flush_color)
2270                break;
2271            list_del_init(&next->list);
2272            complete(&next->done);
2273        }
2274
2275        BUG_ON(!list_empty(&wq->flusher_overflow) &&
2276               wq->flush_color != work_next_color(wq->work_color));
2277
2278        /* this flush_color is finished, advance by one */
2279        wq->flush_color = work_next_color(wq->flush_color);
2280
2281        /* one color has been freed, handle overflow queue */
2282        if (!list_empty(&wq->flusher_overflow)) {
2283            /*
2284             * Assign the same color to all overflowed
2285             * flushers, advance work_color and append to
2286             * flusher_queue. This is the start-to-wait
2287             * phase for these overflowed flushers.
2288             */
2289            list_for_each_entry(tmp, &wq->flusher_overflow, list)
2290                tmp->flush_color = wq->work_color;
2291
2292            wq->work_color = work_next_color(wq->work_color);
2293
2294            list_splice_tail_init(&wq->flusher_overflow,
2295                          &wq->flusher_queue);
2296            flush_workqueue_prep_cwqs(wq, -1, wq->work_color);
2297        }
2298
2299        if (list_empty(&wq->flusher_queue)) {
2300            BUG_ON(wq->flush_color != wq->work_color);
2301            break;
2302        }
2303
2304        /*
2305         * Need to flush more colors. Make the next flusher
2306         * the new first flusher and arm cwqs.
2307         */
2308        BUG_ON(wq->flush_color == wq->work_color);
2309        BUG_ON(wq->flush_color != next->flush_color);
2310
2311        list_del_init(&next->list);
2312        wq->first_flusher = next;
2313
2314        if (flush_workqueue_prep_cwqs(wq, wq->flush_color, -1))
2315            break;
2316
2317        /*
2318         * Meh... this color is already done, clear first
2319         * flusher and repeat cascading.
2320         */
2321        wq->first_flusher = NULL;
2322    }
2323
2324out_unlock:
2325    mutex_unlock(&wq->flush_mutex);
2326}
2327EXPORT_SYMBOL_GPL(flush_workqueue);
2328
2329/**
2330 * flush_work - block until a work_struct's callback has terminated
2331 * @work: the work which is to be flushed
2332 *
2333 * Returns false if @work has already terminated.
2334 *
2335 * It is expected that, prior to calling flush_work(), the caller has
2336 * arranged for the work to not be requeued, otherwise it doesn't make
2337 * sense to use this function.
2338 */
2339int flush_work(struct work_struct *work)
2340{
2341    struct worker *worker = NULL;
2342    struct global_cwq *gcwq;
2343    struct cpu_workqueue_struct *cwq;
2344    struct wq_barrier barr;
2345
2346    might_sleep();
2347    gcwq = get_work_gcwq(work);
2348    if (!gcwq)
2349        return 0;
2350
2351    spin_lock_irq(&gcwq->lock);
2352    if (!list_empty(&work->entry)) {
2353        /*
2354         * See the comment near try_to_grab_pending()->smp_rmb().
2355         * If it was re-queued to a different gcwq under us, we
2356         * are not going to wait.
2357         */
2358        smp_rmb();
2359        cwq = get_work_cwq(work);
2360        if (unlikely(!cwq || gcwq != cwq->gcwq))
2361            goto already_gone;
2362    } else {
2363        worker = find_worker_executing_work(gcwq, work);
2364        if (!worker)
2365            goto already_gone;
2366        cwq = worker->current_cwq;
2367    }
2368
2369    insert_wq_barrier(cwq, &barr, work, worker);
2370    spin_unlock_irq(&gcwq->lock);
2371
2372    lock_map_acquire(&cwq->wq->lockdep_map);
2373    lock_map_release(&cwq->wq->lockdep_map);
2374
2375    wait_for_completion(&barr.done);
2376    destroy_work_on_stack(&barr.work);
2377    return 1;
2378already_gone:
2379    spin_unlock_irq(&gcwq->lock);
2380    return 0;
2381}
2382EXPORT_SYMBOL_GPL(flush_work);
2383
2384/*
2385 * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
2386 * so this work can't be re-armed in any way.
2387 */
2388static int try_to_grab_pending(struct work_struct *work)
2389{
2390    struct global_cwq *gcwq;
2391    int ret = -1;
2392
2393    if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
2394        return 0;
2395
2396    /*
2397     * The queueing is in progress, or it is already queued. Try to
2398     * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
2399     */
2400    gcwq = get_work_gcwq(work);
2401    if (!gcwq)
2402        return ret;
2403
2404    spin_lock_irq(&gcwq->lock);
2405    if (!list_empty(&work->entry)) {
2406        /*
2407         * This work is queued, but perhaps we locked the wrong gcwq.
2408         * In that case we must see the new value after rmb(), see
2409         * insert_work()->wmb().
2410         */
2411        smp_rmb();
2412        if (gcwq == get_work_gcwq(work)) {
2413            debug_work_deactivate(work);
2414            list_del_init(&work->entry);
2415            cwq_dec_nr_in_flight(get_work_cwq(work),
2416                get_work_color(work),
2417                *work_data_bits(work) & WORK_STRUCT_DELAYED);
2418            ret = 1;
2419        }
2420    }
2421    spin_unlock_irq(&gcwq->lock);
2422
2423    return ret;
2424}
2425
2426static void wait_on_cpu_work(struct global_cwq *gcwq, struct work_struct *work)
2427{
2428    struct wq_barrier barr;
2429    struct worker *worker;
2430
2431    spin_lock_irq(&gcwq->lock);
2432
2433    worker = find_worker_executing_work(gcwq, work);
2434    if (unlikely(worker))
2435        insert_wq_barrier(worker->current_cwq, &barr, work, worker);
2436
2437    spin_unlock_irq(&gcwq->lock);
2438
2439    if (unlikely(worker)) {
2440        wait_for_completion(&barr.done);
2441        destroy_work_on_stack(&barr.work);
2442    }
2443}
2444
2445static void wait_on_work(struct work_struct *work)
2446{
2447    int cpu;
2448
2449    might_sleep();
2450
2451    lock_map_acquire(&work->lockdep_map);
2452    lock_map_release(&work->lockdep_map);
2453
2454    for_each_gcwq_cpu(cpu)
2455        wait_on_cpu_work(get_gcwq(cpu), work);
2456}
2457
2458static int __cancel_work_timer(struct work_struct *work,
2459                struct timer_list* timer)
2460{
2461    int ret;
2462
2463    do {
2464        ret = (timer && likely(del_timer(timer)));
2465        if (!ret)
2466            ret = try_to_grab_pending(work);
2467        wait_on_work(work);
2468    } while (unlikely(ret < 0));
2469
2470    clear_work_data(work);
2471    return ret;
2472}
2473
2474/**
2475 * cancel_work_sync - block until a work_struct's callback has terminated
2476 * @work: the work which is to be flushed
2477 *
2478 * Returns true if @work was pending.
2479 *
2480 * cancel_work_sync() will cancel the work if it is queued. If the work's
2481 * callback appears to be running, cancel_work_sync() will block until it
2482 * has completed.
2483 *
2484 * It is possible to use this function if the work re-queues itself. It can
2485 * cancel the work even if it migrates to another workqueue, however in that
2486 * case it only guarantees that work->func() has completed on the last queued
2487 * workqueue.
2488 *
2489 * cancel_work_sync(&delayed_work->work) should be used only if ->timer is not
2490 * pending, otherwise it goes into a busy-wait loop until the timer expires.
2491 *
2492 * The caller must ensure that workqueue_struct on which this work was last
2493 * queued can't be destroyed before this function returns.
2494 */
2495int cancel_work_sync(struct work_struct *work)
2496{
2497    return __cancel_work_timer(work, NULL);
2498}
2499EXPORT_SYMBOL_GPL(cancel_work_sync);
2500
2501/**
2502 * cancel_delayed_work_sync - reliably kill off a delayed work.
2503 * @dwork: the delayed work struct
2504 *
2505 * Returns true if @dwork was pending.
2506 *
2507 * It is possible to use this function if @dwork rearms itself via queue_work()
2508 * or queue_delayed_work(). See also the comment for cancel_work_sync().
2509 */
2510int cancel_delayed_work_sync(struct delayed_work *dwork)
2511{
2512    return __cancel_work_timer(&dwork->work, &dwork->timer);
2513}
2514EXPORT_SYMBOL(cancel_delayed_work_sync);
2515
2516/**
2517 * schedule_work - put work task in global workqueue
2518 * @work: job to be done
2519 *
2520 * Returns zero if @work was already on the kernel-global workqueue and
2521 * non-zero otherwise.
2522 *
2523 * This puts a job in the kernel-global workqueue if it was not already
2524 * queued and leaves it in the same position on the kernel-global
2525 * workqueue otherwise.
2526 */
2527int schedule_work(struct work_struct *work)
2528{
2529    return queue_work(system_wq, work);
2530}
2531EXPORT_SYMBOL(schedule_work);
2532
2533/*
2534 * schedule_work_on - put work task on a specific cpu
2535 * @cpu: cpu to put the work task on
2536 * @work: job to be done
2537 *
2538 * This puts a job on a specific cpu
2539 */
2540int schedule_work_on(int cpu, struct work_struct *work)
2541{
2542    return queue_work_on(cpu, system_wq, work);
2543}
2544EXPORT_SYMBOL(schedule_work_on);
2545
2546/**
2547 * schedule_delayed_work - put work task in global workqueue after delay
2548 * @dwork: job to be done
2549 * @delay: number of jiffies to wait or 0 for immediate execution
2550 *
2551 * After waiting for a given time this puts a job in the kernel-global
2552 * workqueue.
2553 */
2554int schedule_delayed_work(struct delayed_work *dwork,
2555                    unsigned long delay)
2556{
2557    return queue_delayed_work(system_wq, dwork, delay);
2558}
2559EXPORT_SYMBOL(schedule_delayed_work);
2560
2561/**
2562 * flush_delayed_work - block until a dwork_struct's callback has terminated
2563 * @dwork: the delayed work which is to be flushed
2564 *
2565 * Any timeout is cancelled, and any pending work is run immediately.
2566 */
2567void flush_delayed_work(struct delayed_work *dwork)
2568{
2569    if (del_timer_sync(&dwork->timer)) {
2570        __queue_work(get_cpu(), get_work_cwq(&dwork->work)->wq,
2571                 &dwork->work);
2572        put_cpu();
2573    }
2574    flush_work(&dwork->work);
2575}
2576EXPORT_SYMBOL(flush_delayed_work);
2577
2578/**
2579 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
2580 * @cpu: cpu to use
2581 * @dwork: job to be done
2582 * @delay: number of jiffies to wait
2583 *
2584 * After waiting for a given time this puts a job in the kernel-global
2585 * workqueue on the specified CPU.
2586 */
2587int schedule_delayed_work_on(int cpu,
2588            struct delayed_work *dwork, unsigned long delay)
2589{
2590    return queue_delayed_work_on(cpu, system_wq, dwork, delay);
2591}
2592EXPORT_SYMBOL(schedule_delayed_work_on);
2593
2594/**
2595 * schedule_on_each_cpu - call a function on each online CPU from keventd
2596 * @func: the function to call
2597 *
2598 * Returns zero on success.
2599 * Returns -ve errno on failure.
2600 *
2601 * schedule_on_each_cpu() is very slow.
2602 */
2603int schedule_on_each_cpu(work_func_t func)
2604{
2605    int cpu;
2606    struct work_struct __percpu *works;
2607
2608    works = alloc_percpu(struct work_struct);
2609    if (!works)
2610        return -ENOMEM;
2611
2612    get_online_cpus();
2613
2614    for_each_online_cpu(cpu) {
2615        struct work_struct *work = per_cpu_ptr(works, cpu);
2616
2617        INIT_WORK(work, func);
2618        schedule_work_on(cpu, work);
2619    }
2620
2621    for_each_online_cpu(cpu)
2622        flush_work(per_cpu_ptr(works, cpu));
2623
2624    put_online_cpus();
2625    free_percpu(works);
2626    return 0;
2627}
2628
2629/**
2630 * flush_scheduled_work - ensure that any scheduled work has run to completion.
2631 *
2632 * Forces execution of the kernel-global workqueue and blocks until its
2633 * completion.
2634 *
2635 * Think twice before calling this function! It's very easy to get into
2636 * trouble if you don't take great care. Either of the following situations
2637 * will lead to deadlock:
2638 *
2639 * One of the work items currently on the workqueue needs to acquire
2640 * a lock held by your code or its caller.
2641 *
2642 * Your code is running in the context of a work routine.
2643 *
2644 * They will be detected by lockdep when they occur, but the first might not
2645 * occur very often. It depends on what work items are on the workqueue and
2646 * what locks they need, which you have no control over.
2647 *
2648 * In most situations flushing the entire workqueue is overkill; you merely
2649 * need to know that a particular work item isn't queued and isn't running.
2650 * In such cases you should use cancel_delayed_work_sync() or
2651 * cancel_work_sync() instead.
2652 */
2653void flush_scheduled_work(void)
2654{
2655    flush_workqueue(system_wq);
2656}
2657EXPORT_SYMBOL(flush_scheduled_work);
2658
2659/**
2660 * execute_in_process_context - reliably execute the routine with user context
2661 * @fn: the function to execute
2662 * @ew: guaranteed storage for the execute work structure (must
2663 * be available when the work executes)
2664 *
2665 * Executes the function immediately if process context is available,
2666 * otherwise schedules the function for delayed execution.
2667 *
2668 * Returns: 0 - function was executed
2669 * 1 - function was scheduled for execution
2670 */
2671int execute_in_process_context(work_func_t fn, struct execute_work *ew)
2672{
2673    if (!in_interrupt()) {
2674        fn(&ew->work);
2675        return 0;
2676    }
2677
2678    INIT_WORK(&ew->work, fn);
2679    schedule_work(&ew->work);
2680
2681    return 1;
2682}
2683EXPORT_SYMBOL_GPL(execute_in_process_context);
2684
2685int keventd_up(void)
2686{
2687    return system_wq != NULL;
2688}
2689
2690static int alloc_cwqs(struct workqueue_struct *wq)
2691{
2692    /*
2693     * cwqs are forced aligned according to WORK_STRUCT_FLAG_BITS.
2694     * Make sure that the alignment isn't lower than that of
2695     * unsigned long long.
2696     */
2697    const size_t size = sizeof(struct cpu_workqueue_struct);
2698    const size_t align = max_t(size_t, 1 << WORK_STRUCT_FLAG_BITS,
2699                   __alignof__(unsigned long long));
2700#ifdef CONFIG_SMP
2701    bool percpu = !(wq->flags & WQ_UNBOUND);
2702#else
2703    bool percpu = false;
2704#endif
2705
2706    if (percpu)
2707        wq->cpu_wq.pcpu = __alloc_percpu(size, align);
2708    else {
2709        void *ptr;
2710
2711        /*
2712         * Allocate enough room to align cwq and put an extra
2713         * pointer at the end pointing back to the originally
2714         * allocated pointer which will be used for free.
2715         */
2716        ptr = kzalloc(size + align + sizeof(void *), GFP_KERNEL);
2717        if (ptr) {
2718            wq->cpu_wq.single = PTR_ALIGN(ptr, align);
2719            *(void **)(wq->cpu_wq.single + 1) = ptr;
2720        }
2721    }
2722
2723    /* just in case, make sure it's actually aligned */
2724    BUG_ON(!IS_ALIGNED(wq->cpu_wq.v, align));
2725    return wq->cpu_wq.v ? 0 : -ENOMEM;
2726}
2727
2728static void free_cwqs(struct workqueue_struct *wq)
2729{
2730#ifdef CONFIG_SMP
2731    bool percpu = !(wq->flags & WQ_UNBOUND);
2732#else
2733    bool percpu = false;
2734#endif
2735
2736    if (percpu)
2737        free_percpu(wq->cpu_wq.pcpu);
2738    else if (wq->cpu_wq.single) {
2739        /* the pointer to free is stored right after the cwq */
2740        kfree(*(void **)(wq->cpu_wq.single + 1));
2741    }
2742}
2743
2744static int wq_clamp_max_active(int max_active, unsigned int flags,
2745                   const char *name)
2746{
2747    int lim = flags & WQ_UNBOUND ? WQ_UNBOUND_MAX_ACTIVE : WQ_MAX_ACTIVE;
2748
2749    if (max_active < 1 || max_active > lim)
2750        printk(KERN_WARNING "workqueue: max_active %d requested for %s "
2751               "is out of range, clamping between %d and %d\n",
2752               max_active, name, 1, lim);
2753
2754    return clamp_val(max_active, 1, lim);
2755}
2756
2757struct workqueue_struct *__alloc_workqueue_key(const char *name,
2758                           unsigned int flags,
2759                           int max_active,
2760                           struct lock_class_key *key,
2761                           const char *lock_name)
2762{
2763    struct workqueue_struct *wq;
2764    unsigned int cpu;
2765
2766    /*
2767     * Unbound workqueues aren't concurrency managed and should be
2768     * dispatched to workers immediately.
2769     */
2770    if (flags & WQ_UNBOUND)
2771        flags |= WQ_HIGHPRI;
2772
2773    max_active = max_active ?: WQ_DFL_ACTIVE;
2774    max_active = wq_clamp_max_active(max_active, flags, name);
2775
2776    wq = kzalloc(sizeof(*wq), GFP_KERNEL);
2777    if (!wq)
2778        goto err;
2779
2780    wq->flags = flags;
2781    wq->saved_max_active = max_active;
2782    mutex_init(&wq->flush_mutex);
2783    atomic_set(&wq->nr_cwqs_to_flush, 0);
2784    INIT_LIST_HEAD(&wq->flusher_queue);
2785    INIT_LIST_HEAD(&wq->flusher_overflow);
2786
2787    wq->name = name;
2788    lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
2789    INIT_LIST_HEAD(&wq->list);
2790
2791    if (alloc_cwqs(wq) < 0)
2792        goto err;
2793
2794    for_each_cwq_cpu(cpu, wq) {
2795        struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
2796        struct global_cwq *gcwq = get_gcwq(cpu);
2797
2798        BUG_ON((unsigned long)cwq & WORK_STRUCT_FLAG_MASK);
2799        cwq->gcwq = gcwq;
2800        cwq->wq = wq;
2801        cwq->flush_color = -1;
2802        cwq->max_active = max_active;
2803        INIT_LIST_HEAD(&cwq->delayed_works);
2804    }
2805
2806    if (flags & WQ_RESCUER) {
2807        struct worker *rescuer;
2808
2809        if (!alloc_mayday_mask(&wq->mayday_mask, GFP_KERNEL))
2810            goto err;
2811
2812        wq->rescuer = rescuer = alloc_worker();
2813        if (!rescuer)
2814            goto err;
2815
2816        rescuer->task = kthread_create(rescuer_thread, wq, "%s", name);
2817        if (IS_ERR(rescuer->task))
2818            goto err;
2819
2820        rescuer->task->flags |= PF_THREAD_BOUND;
2821        wake_up_process(rescuer->task);
2822    }
2823
2824    /*
2825     * workqueue_lock protects global freeze state and workqueues
2826     * list. Grab it, set max_active accordingly and add the new
2827     * workqueue to workqueues list.
2828     */
2829    spin_lock(&workqueue_lock);
2830
2831    if (workqueue_freezing && wq->flags & WQ_FREEZEABLE)
2832        for_each_cwq_cpu(cpu, wq)
2833            get_cwq(cpu, wq)->max_active = 0;
2834
2835    list_add(&wq->list, &workqueues);
2836
2837    spin_unlock(&workqueue_lock);
2838
2839    return wq;
2840err:
2841    if (wq) {
2842        free_cwqs(wq);
2843        free_mayday_mask(wq->mayday_mask);
2844        kfree(wq->rescuer);
2845        kfree(wq);
2846    }
2847    return NULL;
2848}
2849EXPORT_SYMBOL_GPL(__alloc_workqueue_key);
2850
2851/**
2852 * destroy_workqueue - safely terminate a workqueue
2853 * @wq: target workqueue
2854 *
2855 * Safely destroy a workqueue. All work currently pending will be done first.
2856 */
2857void destroy_workqueue(struct workqueue_struct *wq)
2858{
2859    unsigned int cpu;
2860
2861    wq->flags |= WQ_DYING;
2862    flush_workqueue(wq);
2863
2864    /*
2865     * wq list is used to freeze wq, remove from list after
2866     * flushing is complete in case freeze races us.
2867     */
2868    spin_lock(&workqueue_lock);
2869    list_del(&wq->list);
2870    spin_unlock(&workqueue_lock);
2871
2872    /* sanity check */
2873    for_each_cwq_cpu(cpu, wq) {
2874        struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
2875        int i;
2876
2877        for (i = 0; i < WORK_NR_COLORS; i++)
2878            BUG_ON(cwq->nr_in_flight[i]);
2879        BUG_ON(cwq->nr_active);
2880        BUG_ON(!list_empty(&cwq->delayed_works));
2881    }
2882
2883    if (wq->flags & WQ_RESCUER) {
2884        kthread_stop(wq->rescuer->task);
2885        free_mayday_mask(wq->mayday_mask);
2886        kfree(wq->rescuer);
2887    }
2888
2889    free_cwqs(wq);
2890    kfree(wq);
2891}
2892EXPORT_SYMBOL_GPL(destroy_workqueue);
2893
2894/**
2895 * workqueue_set_max_active - adjust max_active of a workqueue
2896 * @wq: target workqueue
2897 * @max_active: new max_active value.
2898 *
2899 * Set max_active of @wq to @max_active.
2900 *
2901 * CONTEXT:
2902 * Don't call from IRQ context.
2903 */
2904void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
2905{
2906    unsigned int cpu;
2907
2908    max_active = wq_clamp_max_active(max_active, wq->flags, wq->name);
2909
2910    spin_lock(&workqueue_lock);
2911
2912    wq->saved_max_active = max_active;
2913
2914    for_each_cwq_cpu(cpu, wq) {
2915        struct global_cwq *gcwq = get_gcwq(cpu);
2916
2917        spin_lock_irq(&gcwq->lock);
2918
2919        if (!(wq->flags & WQ_FREEZEABLE) ||
2920            !(gcwq->flags & GCWQ_FREEZING))
2921            get_cwq(gcwq->cpu, wq)->max_active = max_active;
2922
2923        spin_unlock_irq(&gcwq->lock);
2924    }
2925
2926    spin_unlock(&workqueue_lock);
2927}
2928EXPORT_SYMBOL_GPL(workqueue_set_max_active);
2929
2930/**
2931 * workqueue_congested - test whether a workqueue is congested
2932 * @cpu: CPU in question
2933 * @wq: target workqueue
2934 *
2935 * Test whether @wq's cpu workqueue for @cpu is congested. There is
2936 * no synchronization around this function and the test result is
2937 * unreliable and only useful as advisory hints or for debugging.
2938 *
2939 * RETURNS:
2940 * %true if congested, %false otherwise.
2941 */
2942bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq)
2943{
2944    struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
2945
2946    return !list_empty(&cwq->delayed_works);
2947}
2948EXPORT_SYMBOL_GPL(workqueue_congested);
2949
2950/**
2951 * work_cpu - return the last known associated cpu for @work
2952 * @work: the work of interest
2953 *
2954 * RETURNS:
2955 * CPU number if @work was ever queued. WORK_CPU_NONE otherwise.
2956 */
2957unsigned int work_cpu(struct work_struct *work)
2958{
2959    struct global_cwq *gcwq = get_work_gcwq(work);
2960
2961    return gcwq ? gcwq->cpu : WORK_CPU_NONE;
2962}
2963EXPORT_SYMBOL_GPL(work_cpu);
2964
2965/**
2966 * work_busy - test whether a work is currently pending or running
2967 * @work: the work to be tested
2968 *
2969 * Test whether @work is currently pending or running. There is no
2970 * synchronization around this function and the test result is
2971 * unreliable and only useful as advisory hints or for debugging.
2972 * Especially for reentrant wqs, the pending state might hide the
2973 * running state.
2974 *
2975 * RETURNS:
2976 * OR'd bitmask of WORK_BUSY_* bits.
2977 */
2978unsigned int work_busy(struct work_struct *work)
2979{
2980    struct global_cwq *gcwq = get_work_gcwq(work);
2981    unsigned long flags;
2982    unsigned int ret = 0;
2983
2984    if (!gcwq)
2985        return false;
2986
2987    spin_lock_irqsave(&gcwq->lock, flags);
2988
2989    if (work_pending(work))
2990        ret |= WORK_BUSY_PENDING;
2991    if (find_worker_executing_work(gcwq, work))
2992        ret |= WORK_BUSY_RUNNING;
2993
2994    spin_unlock_irqrestore(&gcwq->lock, flags);
2995
2996    return ret;
2997}
2998EXPORT_SYMBOL_GPL(work_busy);
2999
3000/*
3001 * CPU hotplug.
3002 *
3003 * There are two challenges in supporting CPU hotplug. Firstly, there
3004 * are a lot of assumptions on strong associations among work, cwq and
3005 * gcwq which make migrating pending and scheduled works very
3006 * difficult to implement without impacting hot paths. Secondly,
3007 * gcwqs serve mix of short, long and very long running works making
3008 * blocked draining impractical.
3009 *
3010 * This is solved by allowing a gcwq to be detached from CPU, running
3011 * it with unbound (rogue) workers and allowing it to be reattached
3012 * later if the cpu comes back online. A separate thread is created
3013 * to govern a gcwq in such state and is called the trustee of the
3014 * gcwq.
3015 *
3016 * Trustee states and their descriptions.
3017 *
3018 * START Command state used on startup. On CPU_DOWN_PREPARE, a
3019 * new trustee is started with this state.
3020 *
3021 * IN_CHARGE Once started, trustee will enter this state after
3022 * assuming the manager role and making all existing
3023 * workers rogue. DOWN_PREPARE waits for trustee to
3024 * enter this state. After reaching IN_CHARGE, trustee
3025 * tries to execute the pending worklist until it's empty
3026 * and the state is set to BUTCHER, or the state is set
3027 * to RELEASE.
3028 *
3029 * BUTCHER Command state which is set by the cpu callback after
3030 * the cpu has went down. Once this state is set trustee
3031 * knows that there will be no new works on the worklist
3032 * and once the worklist is empty it can proceed to
3033 * killing idle workers.
3034 *
3035 * RELEASE Command state which is set by the cpu callback if the
3036 * cpu down has been canceled or it has come online
3037 * again. After recognizing this state, trustee stops
3038 * trying to drain or butcher and clears ROGUE, rebinds
3039 * all remaining workers back to the cpu and releases
3040 * manager role.
3041 *
3042 * DONE Trustee will enter this state after BUTCHER or RELEASE
3043 * is complete.
3044 *
3045 * trustee CPU draining
3046 * took over down complete
3047 * START -----------> IN_CHARGE -----------> BUTCHER -----------> DONE
3048 * | | ^
3049 * | CPU is back online v return workers |
3050 * ----------------> RELEASE --------------
3051 */
3052
3053/**
3054 * trustee_wait_event_timeout - timed event wait for trustee
3055 * @cond: condition to wait for
3056 * @timeout: timeout in jiffies
3057 *
3058 * wait_event_timeout() for trustee to use. Handles locking and
3059 * checks for RELEASE request.
3060 *
3061 * CONTEXT:
3062 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
3063 * multiple times. To be used by trustee.
3064 *
3065 * RETURNS:
3066 * Positive indicating left time if @cond is satisfied, 0 if timed
3067 * out, -1 if canceled.
3068 */
3069#define trustee_wait_event_timeout(cond, timeout) ({ \
3070    long __ret = (timeout); \
3071    while (!((cond) || (gcwq->trustee_state == TRUSTEE_RELEASE)) && \
3072           __ret) { \
3073        spin_unlock_irq(&gcwq->lock); \
3074        __wait_event_timeout(gcwq->trustee_wait, (cond) || \
3075            (gcwq->trustee_state == TRUSTEE_RELEASE), \
3076            __ret); \
3077        spin_lock_irq(&gcwq->lock); \
3078    } \
3079    gcwq->trustee_state == TRUSTEE_RELEASE ? -1 : (__ret); \
3080})
3081
3082/**
3083 * trustee_wait_event - event wait for trustee
3084 * @cond: condition to wait for
3085 *
3086 * wait_event() for trustee to use. Automatically handles locking and
3087 * checks for CANCEL request.
3088 *
3089 * CONTEXT:
3090 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
3091 * multiple times. To be used by trustee.
3092 *
3093 * RETURNS:
3094 * 0 if @cond is satisfied, -1 if canceled.
3095 */
3096#define trustee_wait_event(cond) ({ \
3097    long __ret1; \
3098    __ret1 = trustee_wait_event_timeout(cond, MAX_SCHEDULE_TIMEOUT);\
3099    __ret1 < 0 ? -1 : 0; \
3100})
3101
3102static int __cpuinit trustee_thread(void *__gcwq)
3103{
3104    struct global_cwq *gcwq = __gcwq;
3105    struct worker *worker;
3106    struct work_struct *work;
3107    struct hlist_node *pos;
3108    long rc;
3109    int i;
3110
3111    BUG_ON(gcwq->cpu != smp_processor_id());
3112
3113    spin_lock_irq(&gcwq->lock);
3114    /*
3115     * Claim the manager position and make all workers rogue.
3116     * Trustee must be bound to the target cpu and can't be
3117     * cancelled.
3118     */
3119    BUG_ON(gcwq->cpu != smp_processor_id());
3120    rc = trustee_wait_event(!(gcwq->flags & GCWQ_MANAGING_WORKERS));
3121    BUG_ON(rc < 0);
3122
3123    gcwq->flags |= GCWQ_MANAGING_WORKERS;
3124
3125    list_for_each_entry(worker, &gcwq->idle_list, entry)
3126        worker->flags |= WORKER_ROGUE;
3127
3128    for_each_busy_worker(worker, i, pos, gcwq)
3129        worker->flags |= WORKER_ROGUE;
3130
3131    /*
3132     * Call schedule() so that we cross rq->lock and thus can
3133     * guarantee sched callbacks see the rogue flag. This is
3134     * necessary as scheduler callbacks may be invoked from other
3135     * cpus.
3136     */
3137    spin_unlock_irq(&gcwq->lock);
3138    schedule();
3139    spin_lock_irq(&gcwq->lock);
3140
3141    /*
3142     * Sched callbacks are disabled now. Zap nr_running. After
3143     * this, nr_running stays zero and need_more_worker() and
3144     * keep_working() are always true as long as the worklist is
3145     * not empty.
3146     */
3147    atomic_set(get_gcwq_nr_running(gcwq->cpu), 0);
3148
3149    spin_unlock_irq(&gcwq->lock);
3150    del_timer_sync(&gcwq->idle_timer);
3151    spin_lock_irq(&gcwq->lock);
3152
3153    /*
3154     * We're now in charge. Notify and proceed to drain. We need
3155     * to keep the gcwq running during the whole CPU down
3156     * procedure as other cpu hotunplug callbacks may need to
3157     * flush currently running tasks.
3158     */
3159    gcwq->trustee_state = TRUSTEE_IN_CHARGE;
3160    wake_up_all(&gcwq->trustee_wait);
3161
3162    /*
3163     * The original cpu is in the process of dying and may go away
3164     * anytime now. When that happens, we and all workers would
3165     * be migrated to other cpus. Try draining any left work. We
3166     * want to get it over with ASAP - spam rescuers, wake up as
3167     * many idlers as necessary and create new ones till the
3168     * worklist is empty. Note that if the gcwq is frozen, there
3169     * may be frozen works in freezeable cwqs. Don't declare
3170     * completion while frozen.
3171     */
3172    while (gcwq->nr_workers != gcwq->nr_idle ||
3173           gcwq->flags & GCWQ_FREEZING ||
3174           gcwq->trustee_state == TRUSTEE_IN_CHARGE) {
3175        int nr_works = 0;
3176
3177        list_for_each_entry(work, &gcwq->worklist, entry) {
3178            send_mayday(work);
3179            nr_works++;
3180        }
3181
3182        list_for_each_entry(worker, &gcwq->idle_list, entry) {
3183            if (!nr_works--)
3184                break;
3185            wake_up_process(worker->task);
3186        }
3187
3188        if (need_to_create_worker(gcwq)) {
3189            spin_unlock_irq(&gcwq->lock);
3190            worker = create_worker(gcwq, false);
3191            spin_lock_irq(&gcwq->lock);
3192            if (worker) {
3193                worker->flags |= WORKER_ROGUE;
3194                start_worker(worker);
3195            }
3196        }
3197
3198        /* give a breather */
3199        if (trustee_wait_event_timeout(false, TRUSTEE_COOLDOWN) < 0)
3200            break;
3201    }
3202
3203    /*
3204     * Either all works have been scheduled and cpu is down, or
3205     * cpu down has already been canceled. Wait for and butcher
3206     * all workers till we're canceled.
3207     */
3208    do {
3209        rc = trustee_wait_event(!list_empty(&gcwq->idle_list));
3210        while (!list_empty(&gcwq->idle_list))
3211            destroy_worker(list_first_entry(&gcwq->idle_list,
3212                            struct worker, entry));
3213    } while (gcwq->nr_workers && rc >= 0);
3214
3215    /*
3216     * At this point, either draining has completed and no worker
3217     * is left, or cpu down has been canceled or the cpu is being
3218     * brought back up. There shouldn't be any idle one left.
3219     * Tell the remaining busy ones to rebind once it finishes the
3220     * currently scheduled works by scheduling the rebind_work.
3221     */
3222    WARN_ON(!list_empty(&gcwq->idle_list));
3223
3224    for_each_busy_worker(worker, i, pos, gcwq) {
3225        struct work_struct *rebind_work = &worker->rebind_work;
3226
3227        /*
3228         * Rebind_work may race with future cpu hotplug
3229         * operations. Use a separate flag to mark that
3230         * rebinding is scheduled.
3231         */
3232        worker->flags |= WORKER_REBIND;
3233        worker->flags &= ~WORKER_ROGUE;
3234
3235        /* queue rebind_work, wq doesn't matter, use the default one */
3236        if (test_and_set_bit(WORK_STRUCT_PENDING_BIT,
3237                     work_data_bits(rebind_work)))
3238            continue;
3239
3240        debug_work_activate(rebind_work);
3241        insert_work(get_cwq(gcwq->cpu, system_wq), rebind_work,
3242                worker->scheduled.next,
3243                work_color_to_flags(WORK_NO_COLOR));
3244    }
3245
3246    /* relinquish manager role */
3247    gcwq->flags &= ~GCWQ_MANAGING_WORKERS;
3248
3249    /* notify completion */
3250    gcwq->trustee = NULL;
3251    gcwq->trustee_state = TRUSTEE_DONE;
3252    wake_up_all(&gcwq->trustee_wait);
3253    spin_unlock_irq(&gcwq->lock);
3254    return 0;
3255}
3256
3257/**
3258 * wait_trustee_state - wait for trustee to enter the specified state
3259 * @gcwq: gcwq the trustee of interest belongs to
3260 * @state: target state to wait for
3261 *
3262 * Wait for the trustee to reach @state. DONE is already matched.
3263 *
3264 * CONTEXT:
3265 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
3266 * multiple times. To be used by cpu_callback.
3267 */
3268static void __cpuinit wait_trustee_state(struct global_cwq *gcwq, int state)
3269__releases(&gcwq->lock)
3270__acquires(&gcwq->lock)
3271{
3272    if (!(gcwq->trustee_state == state ||
3273          gcwq->trustee_state == TRUSTEE_DONE)) {
3274        spin_unlock_irq(&gcwq->lock);
3275        __wait_event(gcwq->trustee_wait,
3276                 gcwq->trustee_state == state ||
3277                 gcwq->trustee_state == TRUSTEE_DONE);
3278        spin_lock_irq(&gcwq->lock);
3279    }
3280}
3281
3282static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
3283                        unsigned long action,
3284                        void *hcpu)
3285{
3286    unsigned int cpu = (unsigned long)hcpu;
3287    struct global_cwq *gcwq = get_gcwq(cpu);
3288    struct task_struct *new_trustee = NULL;
3289    struct worker *uninitialized_var(new_worker);
3290    unsigned long flags;
3291
3292    action &= ~CPU_TASKS_FROZEN;
3293
3294    switch (action) {
3295    case CPU_DOWN_PREPARE:
3296        new_trustee = kthread_create(trustee_thread, gcwq,
3297                         "workqueue_trustee/%d\n", cpu);
3298        if (IS_ERR(new_trustee))
3299            return notifier_from_errno(PTR_ERR(new_trustee));
3300        kthread_bind(new_trustee, cpu);
3301        /* fall through */
3302    case CPU_UP_PREPARE:
3303        BUG_ON(gcwq->first_idle);
3304        new_worker = create_worker(gcwq, false);
3305        if (!new_worker) {
3306            if (new_trustee)
3307                kthread_stop(new_trustee);
3308            return NOTIFY_BAD;
3309        }
3310    }
3311
3312    /* some are called w/ irq disabled, don't disturb irq status */
3313    spin_lock_irqsave(&gcwq->lock, flags);
3314
3315    switch (action) {
3316    case CPU_DOWN_PREPARE:
3317        /* initialize trustee and tell it to acquire the gcwq */
3318        BUG_ON(gcwq->trustee || gcwq->trustee_state != TRUSTEE_DONE);
3319        gcwq->trustee = new_trustee;
3320        gcwq->trustee_state = TRUSTEE_START;
3321        wake_up_process(gcwq->trustee);
3322        wait_trustee_state(gcwq, TRUSTEE_IN_CHARGE);
3323        /* fall through */
3324    case CPU_UP_PREPARE:
3325        BUG_ON(gcwq->first_idle);
3326        gcwq->first_idle = new_worker;
3327        break;
3328
3329    case CPU_DYING:
3330        /*
3331         * Before this, the trustee and all workers except for
3332         * the ones which are still executing works from
3333         * before the last CPU down must be on the cpu. After
3334         * this, they'll all be diasporas.
3335         */
3336        gcwq->flags |= GCWQ_DISASSOCIATED;
3337        break;
3338
3339    case CPU_POST_DEAD:
3340        gcwq->trustee_state = TRUSTEE_BUTCHER;
3341        /* fall through */
3342    case CPU_UP_CANCELED:
3343        destroy_worker(gcwq->first_idle);
3344        gcwq->first_idle = NULL;
3345        break;
3346
3347    case CPU_DOWN_FAILED:
3348    case CPU_ONLINE:
3349        gcwq->flags &= ~GCWQ_DISASSOCIATED;
3350        if (gcwq->trustee_state != TRUSTEE_DONE) {
3351            gcwq->trustee_state = TRUSTEE_RELEASE;
3352            wake_up_process(gcwq->trustee);
3353            wait_trustee_state(gcwq, TRUSTEE_DONE);
3354        }
3355
3356        /*
3357         * Trustee is done and there might be no worker left.
3358         * Put the first_idle in and request a real manager to
3359         * take a look.
3360         */
3361        spin_unlock_irq(&gcwq->lock);
3362        kthread_bind(gcwq->first_idle->task, cpu);
3363        spin_lock_irq(&gcwq->lock);
3364        gcwq->flags |= GCWQ_MANAGE_WORKERS;
3365        start_worker(gcwq->first_idle);
3366        gcwq->first_idle = NULL;
3367        break;
3368    }
3369
3370    spin_unlock_irqrestore(&gcwq->lock, flags);
3371
3372    return notifier_from_errno(0);
3373}
3374
3375#ifdef CONFIG_SMP
3376
3377struct work_for_cpu {
3378    struct completion completion;
3379    long (*fn)(void *);
3380    void *arg;
3381    long ret;
3382};
3383
3384static int do_work_for_cpu(void *_wfc)
3385{
3386    struct work_for_cpu *wfc = _wfc;
3387    wfc->ret = wfc->fn(wfc->arg);
3388    complete(&wfc->completion);
3389    return 0;
3390}
3391
3392/**
3393 * work_on_cpu - run a function in user context on a particular cpu
3394 * @cpu: the cpu to run on
3395 * @fn: the function to run
3396 * @arg: the function arg
3397 *
3398 * This will return the value @fn returns.
3399 * It is up to the caller to ensure that the cpu doesn't go offline.
3400 * The caller must not hold any locks which would prevent @fn from completing.
3401 */
3402long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
3403{
3404    struct task_struct *sub_thread;
3405    struct work_for_cpu wfc = {
3406        .completion = COMPLETION_INITIALIZER_ONSTACK(wfc.completion),
3407        .fn = fn,
3408        .arg = arg,
3409    };
3410
3411    sub_thread = kthread_create(do_work_for_cpu, &wfc, "work_for_cpu");
3412    if (IS_ERR(sub_thread))
3413        return PTR_ERR(sub_thread);
3414    kthread_bind(sub_thread, cpu);
3415    wake_up_process(sub_thread);
3416    wait_for_completion(&wfc.completion);
3417    return wfc.ret;
3418}
3419EXPORT_SYMBOL_GPL(work_on_cpu);
3420#endif /* CONFIG_SMP */
3421
3422#ifdef CONFIG_FREEZER
3423
3424/**
3425 * freeze_workqueues_begin - begin freezing workqueues
3426 *
3427 * Start freezing workqueues. After this function returns, all
3428 * freezeable workqueues will queue new works to their frozen_works
3429 * list instead of gcwq->worklist.
3430 *
3431 * CONTEXT:
3432 * Grabs and releases workqueue_lock and gcwq->lock's.
3433 */
3434void freeze_workqueues_begin(void)
3435{
3436    unsigned int cpu;
3437
3438    spin_lock(&workqueue_lock);
3439
3440    BUG_ON(workqueue_freezing);
3441    workqueue_freezing = true;
3442
3443    for_each_gcwq_cpu(cpu) {
3444        struct global_cwq *gcwq = get_gcwq(cpu);
3445        struct workqueue_struct *wq;
3446
3447        spin_lock_irq(&gcwq->lock);
3448
3449        BUG_ON(gcwq->flags & GCWQ_FREEZING);
3450        gcwq->flags |= GCWQ_FREEZING;
3451
3452        list_for_each_entry(wq, &workqueues, list) {
3453            struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3454
3455            if (cwq && wq->flags & WQ_FREEZEABLE)
3456                cwq->max_active = 0;
3457        }
3458
3459        spin_unlock_irq(&gcwq->lock);
3460    }
3461
3462    spin_unlock(&workqueue_lock);
3463}
3464
3465/**
3466 * freeze_workqueues_busy - are freezeable workqueues still busy?
3467 *
3468 * Check whether freezing is complete. This function must be called
3469 * between freeze_workqueues_begin() and thaw_workqueues().
3470 *
3471 * CONTEXT:
3472 * Grabs and releases workqueue_lock.
3473 *
3474 * RETURNS:
3475 * %true if some freezeable workqueues are still busy. %false if
3476 * freezing is complete.
3477 */
3478bool freeze_workqueues_busy(void)
3479{
3480    unsigned int cpu;
3481    bool busy = false;
3482
3483    spin_lock(&workqueue_lock);
3484
3485    BUG_ON(!workqueue_freezing);
3486
3487    for_each_gcwq_cpu(cpu) {
3488        struct workqueue_struct *wq;
3489        /*
3490         * nr_active is monotonically decreasing. It's safe
3491         * to peek without lock.
3492         */
3493        list_for_each_entry(wq, &workqueues, list) {
3494            struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3495
3496            if (!cwq || !(wq->flags & WQ_FREEZEABLE))
3497                continue;
3498
3499            BUG_ON(cwq->nr_active < 0);
3500            if (cwq->nr_active) {
3501                busy = true;
3502                goto out_unlock;
3503            }
3504        }
3505    }
3506out_unlock:
3507    spin_unlock(&workqueue_lock);
3508    return busy;
3509}
3510
3511/**
3512 * thaw_workqueues - thaw workqueues
3513 *
3514 * Thaw workqueues. Normal queueing is restored and all collected
3515 * frozen works are transferred to their respective gcwq worklists.
3516 *
3517 * CONTEXT:
3518 * Grabs and releases workqueue_lock and gcwq->lock's.
3519 */
3520void thaw_workqueues(void)
3521{
3522    unsigned int cpu;
3523
3524    spin_lock(&workqueue_lock);
3525
3526    if (!workqueue_freezing)
3527        goto out_unlock;
3528
3529    for_each_gcwq_cpu(cpu) {
3530        struct global_cwq *gcwq = get_gcwq(cpu);
3531        struct workqueue_struct *wq;
3532
3533        spin_lock_irq(&gcwq->lock);
3534
3535        BUG_ON(!(gcwq->flags & GCWQ_FREEZING));
3536        gcwq->flags &= ~GCWQ_FREEZING;
3537
3538        list_for_each_entry(wq, &workqueues, list) {
3539            struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3540
3541            if (!cwq || !(wq->flags & WQ_FREEZEABLE))
3542                continue;
3543
3544            /* restore max_active and repopulate worklist */
3545            cwq->max_active = wq->saved_max_active;
3546
3547            while (!list_empty(&cwq->delayed_works) &&
3548                   cwq->nr_active < cwq->max_active)
3549                cwq_activate_first_delayed(cwq);
3550        }
3551
3552        wake_up_worker(gcwq);
3553
3554        spin_unlock_irq(&gcwq->lock);
3555    }
3556
3557    workqueue_freezing = false;
3558out_unlock:
3559    spin_unlock(&workqueue_lock);
3560}
3561#endif /* CONFIG_FREEZER */
3562
3563static int __init init_workqueues(void)
3564{
3565    unsigned int cpu;
3566    int i;
3567
3568    cpu_notifier(workqueue_cpu_callback, CPU_PRI_WORKQUEUE);
3569
3570    /* initialize gcwqs */
3571    for_each_gcwq_cpu(cpu) {
3572        struct global_cwq *gcwq = get_gcwq(cpu);
3573
3574        spin_lock_init(&gcwq->lock);
3575        INIT_LIST_HEAD(&gcwq->worklist);
3576        gcwq->cpu = cpu;
3577        gcwq->flags |= GCWQ_DISASSOCIATED;
3578
3579        INIT_LIST_HEAD(&gcwq->idle_list);
3580        for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++)
3581            INIT_HLIST_HEAD(&gcwq->busy_hash[i]);
3582
3583        init_timer_deferrable(&gcwq->idle_timer);
3584        gcwq->idle_timer.function = idle_worker_timeout;
3585        gcwq->idle_timer.data = (unsigned long)gcwq;
3586
3587        setup_timer(&gcwq->mayday_timer, gcwq_mayday_timeout,
3588                (unsigned long)gcwq);
3589
3590        ida_init(&gcwq->worker_ida);
3591
3592        gcwq->trustee_state = TRUSTEE_DONE;
3593        init_waitqueue_head(&gcwq->trustee_wait);
3594    }
3595
3596    /* create the initial worker */
3597    for_each_online_gcwq_cpu(cpu) {
3598        struct global_cwq *gcwq = get_gcwq(cpu);
3599        struct worker *worker;
3600
3601        if (cpu != WORK_CPU_UNBOUND)
3602            gcwq->flags &= ~GCWQ_DISASSOCIATED;
3603        worker = create_worker(gcwq, true);
3604        BUG_ON(!worker);
3605        spin_lock_irq(&gcwq->lock);
3606        start_worker(worker);
3607        spin_unlock_irq(&gcwq->lock);
3608    }
3609
3610    system_wq = alloc_workqueue("events", 0, 0);
3611    system_long_wq = alloc_workqueue("events_long", 0, 0);
3612    system_nrt_wq = alloc_workqueue("events_nrt", WQ_NON_REENTRANT, 0);
3613    system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND,
3614                        WQ_UNBOUND_MAX_ACTIVE);
3615    BUG_ON(!system_wq || !system_long_wq || !system_nrt_wq);
3616    return 0;
3617}
3618early_initcall(init_workqueues);
3619

Archive Download this file



interactive