Root/lib/rwsem-spinlock.c

1/* rwsem-spinlock.c: R/W semaphores: contention handling functions for
2 * generic spinlock implementation
3 *
4 * Copyright (c) 2001 David Howells (dhowells@redhat.com).
5 * - Derived partially from idea by Andrea Arcangeli <andrea@suse.de>
6 * - Derived also from comments by Linus
7 */
8#include <linux/rwsem.h>
9#include <linux/sched.h>
10#include <linux/module.h>
11
12struct rwsem_waiter {
13    struct list_head list;
14    struct task_struct *task;
15    unsigned int flags;
16#define RWSEM_WAITING_FOR_READ 0x00000001
17#define RWSEM_WAITING_FOR_WRITE 0x00000002
18};
19
20int rwsem_is_locked(struct rw_semaphore *sem)
21{
22    int ret = 1;
23    unsigned long flags;
24
25    if (spin_trylock_irqsave(&sem->wait_lock, flags)) {
26        ret = (sem->activity != 0);
27        spin_unlock_irqrestore(&sem->wait_lock, flags);
28    }
29    return ret;
30}
31EXPORT_SYMBOL(rwsem_is_locked);
32
33/*
34 * initialise the semaphore
35 */
36void __init_rwsem(struct rw_semaphore *sem, const char *name,
37          struct lock_class_key *key)
38{
39#ifdef CONFIG_DEBUG_LOCK_ALLOC
40    /*
41     * Make sure we are not reinitializing a held semaphore:
42     */
43    debug_check_no_locks_freed((void *)sem, sizeof(*sem));
44    lockdep_init_map(&sem->dep_map, name, key, 0);
45#endif
46    sem->activity = 0;
47    spin_lock_init(&sem->wait_lock);
48    INIT_LIST_HEAD(&sem->wait_list);
49}
50EXPORT_SYMBOL(__init_rwsem);
51
52/*
53 * handle the lock release when processes blocked on it that can now run
54 * - if we come here, then:
55 * - the 'active count' _reached_ zero
56 * - the 'waiting count' is non-zero
57 * - the spinlock must be held by the caller
58 * - woken process blocks are discarded from the list after having task zeroed
59 * - writers are only woken if wakewrite is non-zero
60 */
61static inline struct rw_semaphore *
62__rwsem_do_wake(struct rw_semaphore *sem, int wakewrite)
63{
64    struct rwsem_waiter *waiter;
65    struct task_struct *tsk;
66    int woken;
67
68    waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
69
70    if (!wakewrite) {
71        if (waiter->flags & RWSEM_WAITING_FOR_WRITE)
72            goto out;
73        goto dont_wake_writers;
74    }
75
76    /* if we are allowed to wake writers try to grant a single write lock
77     * if there's a writer at the front of the queue
78     * - we leave the 'waiting count' incremented to signify potential
79     * contention
80     */
81    if (waiter->flags & RWSEM_WAITING_FOR_WRITE) {
82        sem->activity = -1;
83        list_del(&waiter->list);
84        tsk = waiter->task;
85        /* Don't touch waiter after ->task has been NULLed */
86        smp_mb();
87        waiter->task = NULL;
88        wake_up_process(tsk);
89        put_task_struct(tsk);
90        goto out;
91    }
92
93    /* grant an infinite number of read locks to the front of the queue */
94 dont_wake_writers:
95    woken = 0;
96    while (waiter->flags & RWSEM_WAITING_FOR_READ) {
97        struct list_head *next = waiter->list.next;
98
99        list_del(&waiter->list);
100        tsk = waiter->task;
101        smp_mb();
102        waiter->task = NULL;
103        wake_up_process(tsk);
104        put_task_struct(tsk);
105        woken++;
106        if (list_empty(&sem->wait_list))
107            break;
108        waiter = list_entry(next, struct rwsem_waiter, list);
109    }
110
111    sem->activity += woken;
112
113 out:
114    return sem;
115}
116
117/*
118 * wake a single writer
119 */
120static inline struct rw_semaphore *
121__rwsem_wake_one_writer(struct rw_semaphore *sem)
122{
123    struct rwsem_waiter *waiter;
124    struct task_struct *tsk;
125
126    sem->activity = -1;
127
128    waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
129    list_del(&waiter->list);
130
131    tsk = waiter->task;
132    smp_mb();
133    waiter->task = NULL;
134    wake_up_process(tsk);
135    put_task_struct(tsk);
136    return sem;
137}
138
139/*
140 * get a read lock on the semaphore
141 */
142void __sched __down_read(struct rw_semaphore *sem)
143{
144    struct rwsem_waiter waiter;
145    struct task_struct *tsk;
146    unsigned long flags;
147
148    spin_lock_irqsave(&sem->wait_lock, flags);
149
150    if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
151        /* granted */
152        sem->activity++;
153        spin_unlock_irqrestore(&sem->wait_lock, flags);
154        goto out;
155    }
156
157    tsk = current;
158    set_task_state(tsk, TASK_UNINTERRUPTIBLE);
159
160    /* set up my own style of waitqueue */
161    waiter.task = tsk;
162    waiter.flags = RWSEM_WAITING_FOR_READ;
163    get_task_struct(tsk);
164
165    list_add_tail(&waiter.list, &sem->wait_list);
166
167    /* we don't need to touch the semaphore struct anymore */
168    spin_unlock_irqrestore(&sem->wait_lock, flags);
169
170    /* wait to be given the lock */
171    for (;;) {
172        if (!waiter.task)
173            break;
174        schedule();
175        set_task_state(tsk, TASK_UNINTERRUPTIBLE);
176    }
177
178    tsk->state = TASK_RUNNING;
179 out:
180    ;
181}
182
183/*
184 * trylock for reading -- returns 1 if successful, 0 if contention
185 */
186int __down_read_trylock(struct rw_semaphore *sem)
187{
188    unsigned long flags;
189    int ret = 0;
190
191
192    spin_lock_irqsave(&sem->wait_lock, flags);
193
194    if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
195        /* granted */
196        sem->activity++;
197        ret = 1;
198    }
199
200    spin_unlock_irqrestore(&sem->wait_lock, flags);
201
202    return ret;
203}
204
205/*
206 * get a write lock on the semaphore
207 * - we increment the waiting count anyway to indicate an exclusive lock
208 */
209void __sched __down_write_nested(struct rw_semaphore *sem, int subclass)
210{
211    struct rwsem_waiter waiter;
212    struct task_struct *tsk;
213    unsigned long flags;
214
215    spin_lock_irqsave(&sem->wait_lock, flags);
216
217    if (sem->activity == 0 && list_empty(&sem->wait_list)) {
218        /* granted */
219        sem->activity = -1;
220        spin_unlock_irqrestore(&sem->wait_lock, flags);
221        goto out;
222    }
223
224    tsk = current;
225    set_task_state(tsk, TASK_UNINTERRUPTIBLE);
226
227    /* set up my own style of waitqueue */
228    waiter.task = tsk;
229    waiter.flags = RWSEM_WAITING_FOR_WRITE;
230    get_task_struct(tsk);
231
232    list_add_tail(&waiter.list, &sem->wait_list);
233
234    /* we don't need to touch the semaphore struct anymore */
235    spin_unlock_irqrestore(&sem->wait_lock, flags);
236
237    /* wait to be given the lock */
238    for (;;) {
239        if (!waiter.task)
240            break;
241        schedule();
242        set_task_state(tsk, TASK_UNINTERRUPTIBLE);
243    }
244
245    tsk->state = TASK_RUNNING;
246 out:
247    ;
248}
249
250void __sched __down_write(struct rw_semaphore *sem)
251{
252    __down_write_nested(sem, 0);
253}
254
255/*
256 * trylock for writing -- returns 1 if successful, 0 if contention
257 */
258int __down_write_trylock(struct rw_semaphore *sem)
259{
260    unsigned long flags;
261    int ret = 0;
262
263    spin_lock_irqsave(&sem->wait_lock, flags);
264
265    if (sem->activity == 0 && list_empty(&sem->wait_list)) {
266        /* granted */
267        sem->activity = -1;
268        ret = 1;
269    }
270
271    spin_unlock_irqrestore(&sem->wait_lock, flags);
272
273    return ret;
274}
275
276/*
277 * release a read lock on the semaphore
278 */
279void __up_read(struct rw_semaphore *sem)
280{
281    unsigned long flags;
282
283    spin_lock_irqsave(&sem->wait_lock, flags);
284
285    if (--sem->activity == 0 && !list_empty(&sem->wait_list))
286        sem = __rwsem_wake_one_writer(sem);
287
288    spin_unlock_irqrestore(&sem->wait_lock, flags);
289}
290
291/*
292 * release a write lock on the semaphore
293 */
294void __up_write(struct rw_semaphore *sem)
295{
296    unsigned long flags;
297
298    spin_lock_irqsave(&sem->wait_lock, flags);
299
300    sem->activity = 0;
301    if (!list_empty(&sem->wait_list))
302        sem = __rwsem_do_wake(sem, 1);
303
304    spin_unlock_irqrestore(&sem->wait_lock, flags);
305}
306
307/*
308 * downgrade a write lock into a read lock
309 * - just wake up any readers at the front of the queue
310 */
311void __downgrade_write(struct rw_semaphore *sem)
312{
313    unsigned long flags;
314
315    spin_lock_irqsave(&sem->wait_lock, flags);
316
317    sem->activity = 1;
318    if (!list_empty(&sem->wait_list))
319        sem = __rwsem_do_wake(sem, 0);
320
321    spin_unlock_irqrestore(&sem->wait_lock, flags);
322}
323
324

Archive Download this file



interactive