Root/kernel/irq_work.c

1/*
2 * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
3 *
4 * Provides a framework for enqueueing and running callbacks from hardirq
5 * context. The enqueueing is NMI-safe.
6 */
7
8#include <linux/kernel.h>
9#include <linux/module.h>
10#include <linux/irq_work.h>
11#include <linux/hardirq.h>
12
13/*
14 * An entry can be in one of four states:
15 *
16 * free NULL, 0 -> {claimed} : free to be used
17 * claimed NULL, 3 -> {pending} : claimed to be enqueued
18 * pending next, 3 -> {busy} : queued, pending callback
19 * busy NULL, 2 -> {free, claimed} : callback in progress, can be claimed
20 *
21 * We use the lower two bits of the next pointer to keep PENDING and BUSY
22 * flags.
23 */
24
25#define IRQ_WORK_PENDING 1UL
26#define IRQ_WORK_BUSY 2UL
27#define IRQ_WORK_FLAGS 3UL
28
29static inline bool irq_work_is_set(struct irq_work *entry, int flags)
30{
31    return (unsigned long)entry->next & flags;
32}
33
34static inline struct irq_work *irq_work_next(struct irq_work *entry)
35{
36    unsigned long next = (unsigned long)entry->next;
37    next &= ~IRQ_WORK_FLAGS;
38    return (struct irq_work *)next;
39}
40
41static inline struct irq_work *next_flags(struct irq_work *entry, int flags)
42{
43    unsigned long next = (unsigned long)entry;
44    next |= flags;
45    return (struct irq_work *)next;
46}
47
48static DEFINE_PER_CPU(struct irq_work *, irq_work_list);
49
50/*
51 * Claim the entry so that no one else will poke at it.
52 */
53static bool irq_work_claim(struct irq_work *entry)
54{
55    struct irq_work *next, *nflags;
56
57    do {
58        next = entry->next;
59        if ((unsigned long)next & IRQ_WORK_PENDING)
60            return false;
61        nflags = next_flags(next, IRQ_WORK_FLAGS);
62    } while (cmpxchg(&entry->next, next, nflags) != next);
63
64    return true;
65}
66
67
68void __weak arch_irq_work_raise(void)
69{
70    /*
71     * Lame architectures will get the timer tick callback
72     */
73}
74
75/*
76 * Queue the entry and raise the IPI if needed.
77 */
78static void __irq_work_queue(struct irq_work *entry)
79{
80    struct irq_work *next;
81
82    preempt_disable();
83
84    do {
85        next = __this_cpu_read(irq_work_list);
86        /* Can assign non-atomic because we keep the flags set. */
87        entry->next = next_flags(next, IRQ_WORK_FLAGS);
88    } while (this_cpu_cmpxchg(irq_work_list, next, entry) != next);
89
90    /* The list was empty, raise self-interrupt to start processing. */
91    if (!irq_work_next(entry))
92        arch_irq_work_raise();
93
94    preempt_enable();
95}
96
97/*
98 * Enqueue the irq_work @entry, returns true on success, failure when the
99 * @entry was already enqueued by someone else.
100 *
101 * Can be re-enqueued while the callback is still in progress.
102 */
103bool irq_work_queue(struct irq_work *entry)
104{
105    if (!irq_work_claim(entry)) {
106        /*
107         * Already enqueued, can't do!
108         */
109        return false;
110    }
111
112    __irq_work_queue(entry);
113    return true;
114}
115EXPORT_SYMBOL_GPL(irq_work_queue);
116
117/*
118 * Run the irq_work entries on this cpu. Requires to be ran from hardirq
119 * context with local IRQs disabled.
120 */
121void irq_work_run(void)
122{
123    struct irq_work *list;
124
125    if (this_cpu_read(irq_work_list) == NULL)
126        return;
127
128    BUG_ON(!in_irq());
129    BUG_ON(!irqs_disabled());
130
131    list = this_cpu_xchg(irq_work_list, NULL);
132
133    while (list != NULL) {
134        struct irq_work *entry = list;
135
136        list = irq_work_next(list);
137
138        /*
139         * Clear the PENDING bit, after this point the @entry
140         * can be re-used.
141         */
142        entry->next = next_flags(NULL, IRQ_WORK_BUSY);
143        entry->func(entry);
144        /*
145         * Clear the BUSY bit and return to the free state if
146         * no-one else claimed it meanwhile.
147         */
148        (void)cmpxchg(&entry->next,
149                  next_flags(NULL, IRQ_WORK_BUSY),
150                  NULL);
151    }
152}
153EXPORT_SYMBOL_GPL(irq_work_run);
154
155/*
156 * Synchronize against the irq_work @entry, ensures the entry is not
157 * currently in use.
158 */
159void irq_work_sync(struct irq_work *entry)
160{
161    WARN_ON_ONCE(irqs_disabled());
162
163    while (irq_work_is_set(entry, IRQ_WORK_BUSY))
164        cpu_relax();
165}
166EXPORT_SYMBOL_GPL(irq_work_sync);
167

Archive Download this file



interactive