Root/net/unix/garbage.c

1/*
2 * NET3: Garbage Collector For AF_UNIX sockets
3 *
4 * Garbage Collector:
5 * Copyright (C) Barak A. Pearlmutter.
6 * Released under the GPL version 2 or later.
7 *
8 * Chopped about by Alan Cox 22/3/96 to make it fit the AF_UNIX socket problem.
9 * If it doesn't work blame me, it worked when Barak sent it.
10 *
11 * Assumptions:
12 *
13 * - object w/ a bit
14 * - free list
15 *
16 * Current optimizations:
17 *
18 * - explicit stack instead of recursion
19 * - tail recurse on first born instead of immediate push/pop
20 * - we gather the stuff that should not be killed into tree
21 * and stack is just a path from root to the current pointer.
22 *
23 * Future optimizations:
24 *
25 * - don't just push entire root set; process in place
26 *
27 * This program is free software; you can redistribute it and/or
28 * modify it under the terms of the GNU General Public License
29 * as published by the Free Software Foundation; either version
30 * 2 of the License, or (at your option) any later version.
31 *
32 * Fixes:
33 * Alan Cox 07 Sept 1997 Vmalloc internal stack as needed.
34 * Cope with changing max_files.
35 * Al Viro 11 Oct 1998
36 * Graph may have cycles. That is, we can send the descriptor
37 * of foo to bar and vice versa. Current code chokes on that.
38 * Fix: move SCM_RIGHTS ones into the separate list and then
39 * skb_free() them all instead of doing explicit fput's.
40 * Another problem: since fput() may block somebody may
41 * create a new unix_socket when we are in the middle of sweep
42 * phase. Fix: revert the logic wrt MARKED. Mark everything
43 * upon the beginning and unmark non-junk ones.
44 *
45 * [12 Oct 1998] AAARGH! New code purges all SCM_RIGHTS
46 * sent to connect()'ed but still not accept()'ed sockets.
47 * Fixed. Old code had slightly different problem here:
48 * extra fput() in situation when we passed the descriptor via
49 * such socket and closed it (descriptor). That would happen on
50 * each unix_gc() until the accept(). Since the struct file in
51 * question would go to the free list and might be reused...
52 * That might be the reason of random oopses on filp_close()
53 * in unrelated processes.
54 *
55 * AV 28 Feb 1999
56 * Kill the explicit allocation of stack. Now we keep the tree
57 * with root in dummy + pointer (gc_current) to one of the nodes.
58 * Stack is represented as path from gc_current to dummy. Unmark
59 * now means "add to tree". Push == "make it a son of gc_current".
60 * Pop == "move gc_current to parent". We keep only pointers to
61 * parents (->gc_tree).
62 * AV 1 Mar 1999
63 * Damn. Added missing check for ->dead in listen queues scanning.
64 *
65 * Miklos Szeredi 25 Jun 2007
66 * Reimplement with a cycle collecting algorithm. This should
67 * solve several problems with the previous code, like being racy
68 * wrt receive and holding up unrelated socket operations.
69 */
70
71#include <linux/kernel.h>
72#include <linux/string.h>
73#include <linux/socket.h>
74#include <linux/un.h>
75#include <linux/net.h>
76#include <linux/fs.h>
77#include <linux/slab.h>
78#include <linux/skbuff.h>
79#include <linux/netdevice.h>
80#include <linux/file.h>
81#include <linux/proc_fs.h>
82#include <linux/mutex.h>
83#include <linux/wait.h>
84
85#include <net/sock.h>
86#include <net/af_unix.h>
87#include <net/scm.h>
88#include <net/tcp_states.h>
89
90/* Internal data structures and random procedures: */
91
92static LIST_HEAD(gc_inflight_list);
93static LIST_HEAD(gc_candidates);
94static DEFINE_SPINLOCK(unix_gc_lock);
95static DECLARE_WAIT_QUEUE_HEAD(unix_gc_wait);
96
97unsigned int unix_tot_inflight;
98
99
100static struct sock *unix_get_socket(struct file *filp)
101{
102    struct sock *u_sock = NULL;
103    struct inode *inode = filp->f_path.dentry->d_inode;
104
105    /*
106     * Socket ?
107     */
108    if (S_ISSOCK(inode->i_mode)) {
109        struct socket *sock = SOCKET_I(inode);
110        struct sock *s = sock->sk;
111
112        /*
113         * PF_UNIX ?
114         */
115        if (s && sock->ops && sock->ops->family == PF_UNIX)
116            u_sock = s;
117    }
118    return u_sock;
119}
120
121/*
122 * Keep the number of times in flight count for the file
123 * descriptor if it is for an AF_UNIX socket.
124 */
125
126void unix_inflight(struct file *fp)
127{
128    struct sock *s = unix_get_socket(fp);
129    if (s) {
130        struct unix_sock *u = unix_sk(s);
131        spin_lock(&unix_gc_lock);
132        if (atomic_long_inc_return(&u->inflight) == 1) {
133            BUG_ON(!list_empty(&u->link));
134            list_add_tail(&u->link, &gc_inflight_list);
135        } else {
136            BUG_ON(list_empty(&u->link));
137        }
138        unix_tot_inflight++;
139        spin_unlock(&unix_gc_lock);
140    }
141}
142
143void unix_notinflight(struct file *fp)
144{
145    struct sock *s = unix_get_socket(fp);
146    if (s) {
147        struct unix_sock *u = unix_sk(s);
148        spin_lock(&unix_gc_lock);
149        BUG_ON(list_empty(&u->link));
150        if (atomic_long_dec_and_test(&u->inflight))
151            list_del_init(&u->link);
152        unix_tot_inflight--;
153        spin_unlock(&unix_gc_lock);
154    }
155}
156
157static inline struct sk_buff *sock_queue_head(struct sock *sk)
158{
159    return (struct sk_buff *)&sk->sk_receive_queue;
160}
161
162#define receive_queue_for_each_skb(sk, next, skb) \
163    for (skb = sock_queue_head(sk)->next, next = skb->next; \
164         skb != sock_queue_head(sk); skb = next, next = skb->next)
165
166static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
167              struct sk_buff_head *hitlist)
168{
169    struct sk_buff *skb;
170    struct sk_buff *next;
171
172    spin_lock(&x->sk_receive_queue.lock);
173    receive_queue_for_each_skb(x, next, skb) {
174        /*
175         * Do we have file descriptors ?
176         */
177        if (UNIXCB(skb).fp) {
178            bool hit = false;
179            /*
180             * Process the descriptors of this socket
181             */
182            int nfd = UNIXCB(skb).fp->count;
183            struct file **fp = UNIXCB(skb).fp->fp;
184            while (nfd--) {
185                /*
186                 * Get the socket the fd matches
187                 * if it indeed does so
188                 */
189                struct sock *sk = unix_get_socket(*fp++);
190                if (sk) {
191                    struct unix_sock *u = unix_sk(sk);
192
193                    /*
194                     * Ignore non-candidates, they could
195                     * have been added to the queues after
196                     * starting the garbage collection
197                     */
198                    if (u->gc_candidate) {
199                        hit = true;
200                        func(u);
201                    }
202                }
203            }
204            if (hit && hitlist != NULL) {
205                __skb_unlink(skb, &x->sk_receive_queue);
206                __skb_queue_tail(hitlist, skb);
207            }
208        }
209    }
210    spin_unlock(&x->sk_receive_queue.lock);
211}
212
213static void scan_children(struct sock *x, void (*func)(struct unix_sock *),
214              struct sk_buff_head *hitlist)
215{
216    if (x->sk_state != TCP_LISTEN)
217        scan_inflight(x, func, hitlist);
218    else {
219        struct sk_buff *skb;
220        struct sk_buff *next;
221        struct unix_sock *u;
222        LIST_HEAD(embryos);
223
224        /*
225         * For a listening socket collect the queued embryos
226         * and perform a scan on them as well.
227         */
228        spin_lock(&x->sk_receive_queue.lock);
229        receive_queue_for_each_skb(x, next, skb) {
230            u = unix_sk(skb->sk);
231
232            /*
233             * An embryo cannot be in-flight, so it's safe
234             * to use the list link.
235             */
236            BUG_ON(!list_empty(&u->link));
237            list_add_tail(&u->link, &embryos);
238        }
239        spin_unlock(&x->sk_receive_queue.lock);
240
241        while (!list_empty(&embryos)) {
242            u = list_entry(embryos.next, struct unix_sock, link);
243            scan_inflight(&u->sk, func, hitlist);
244            list_del_init(&u->link);
245        }
246    }
247}
248
249static void dec_inflight(struct unix_sock *usk)
250{
251    atomic_long_dec(&usk->inflight);
252}
253
254static void inc_inflight(struct unix_sock *usk)
255{
256    atomic_long_inc(&usk->inflight);
257}
258
259static void inc_inflight_move_tail(struct unix_sock *u)
260{
261    atomic_long_inc(&u->inflight);
262    /*
263     * If this still might be part of a cycle, move it to the end
264     * of the list, so that it's checked even if it was already
265     * passed over
266     */
267    if (u->gc_maybe_cycle)
268        list_move_tail(&u->link, &gc_candidates);
269}
270
271static bool gc_in_progress = false;
272
273void wait_for_unix_gc(void)
274{
275    wait_event(unix_gc_wait, gc_in_progress == false);
276}
277
278/* The external entry point: unix_gc() */
279void unix_gc(void)
280{
281    struct unix_sock *u;
282    struct unix_sock *next;
283    struct sk_buff_head hitlist;
284    struct list_head cursor;
285    LIST_HEAD(not_cycle_list);
286
287    spin_lock(&unix_gc_lock);
288
289    /* Avoid a recursive GC. */
290    if (gc_in_progress)
291        goto out;
292
293    gc_in_progress = true;
294    /*
295     * First, select candidates for garbage collection. Only
296     * in-flight sockets are considered, and from those only ones
297     * which don't have any external reference.
298     *
299     * Holding unix_gc_lock will protect these candidates from
300     * being detached, and hence from gaining an external
301     * reference. Since there are no possible receivers, all
302     * buffers currently on the candidates' queues stay there
303     * during the garbage collection.
304     *
305     * We also know that no new candidate can be added onto the
306     * receive queues. Other, non candidate sockets _can_ be
307     * added to queue, so we must make sure only to touch
308     * candidates.
309     */
310    list_for_each_entry_safe(u, next, &gc_inflight_list, link) {
311        long total_refs;
312        long inflight_refs;
313
314        total_refs = file_count(u->sk.sk_socket->file);
315        inflight_refs = atomic_long_read(&u->inflight);
316
317        BUG_ON(inflight_refs < 1);
318        BUG_ON(total_refs < inflight_refs);
319        if (total_refs == inflight_refs) {
320            list_move_tail(&u->link, &gc_candidates);
321            u->gc_candidate = 1;
322            u->gc_maybe_cycle = 1;
323        }
324    }
325
326    /*
327     * Now remove all internal in-flight reference to children of
328     * the candidates.
329     */
330    list_for_each_entry(u, &gc_candidates, link)
331        scan_children(&u->sk, dec_inflight, NULL);
332
333    /*
334     * Restore the references for children of all candidates,
335     * which have remaining references. Do this recursively, so
336     * only those remain, which form cyclic references.
337     *
338     * Use a "cursor" link, to make the list traversal safe, even
339     * though elements might be moved about.
340     */
341    list_add(&cursor, &gc_candidates);
342    while (cursor.next != &gc_candidates) {
343        u = list_entry(cursor.next, struct unix_sock, link);
344
345        /* Move cursor to after the current position. */
346        list_move(&cursor, &u->link);
347
348        if (atomic_long_read(&u->inflight) > 0) {
349            list_move_tail(&u->link, &not_cycle_list);
350            u->gc_maybe_cycle = 0;
351            scan_children(&u->sk, inc_inflight_move_tail, NULL);
352        }
353    }
354    list_del(&cursor);
355
356    /*
357     * not_cycle_list contains those sockets which do not make up a
358     * cycle. Restore these to the inflight list.
359     */
360    while (!list_empty(&not_cycle_list)) {
361        u = list_entry(not_cycle_list.next, struct unix_sock, link);
362        u->gc_candidate = 0;
363        list_move_tail(&u->link, &gc_inflight_list);
364    }
365
366    /*
367     * Now gc_candidates contains only garbage. Restore original
368     * inflight counters for these as well, and remove the skbuffs
369     * which are creating the cycle(s).
370     */
371    skb_queue_head_init(&hitlist);
372    list_for_each_entry(u, &gc_candidates, link)
373    scan_children(&u->sk, inc_inflight, &hitlist);
374
375    spin_unlock(&unix_gc_lock);
376
377    /* Here we are. Hitlist is filled. Die. */
378    __skb_queue_purge(&hitlist);
379
380    spin_lock(&unix_gc_lock);
381
382    /* All candidates should have been detached by now. */
383    BUG_ON(!list_empty(&gc_candidates));
384    gc_in_progress = false;
385    wake_up(&unix_gc_wait);
386
387 out:
388    spin_unlock(&unix_gc_lock);
389}
390

Archive Download this file



interactive