Root/
Source at commit cdde9cf73945d547acd3e96f9508c79e84ad0bf1 created 12 years 9 months ago. By Maarten ter Huurne, MMC: JZ4740: Added support for CPU frequency changing | |
---|---|
1 | /* |
2 | * linux/fs/pnode.c |
3 | * |
4 | * (C) Copyright IBM Corporation 2005. |
5 | * Released under GPL v2. |
6 | * Author : Ram Pai (linuxram@us.ibm.com) |
7 | * |
8 | */ |
9 | #include <linux/mnt_namespace.h> |
10 | #include <linux/mount.h> |
11 | #include <linux/fs.h> |
12 | #include "internal.h" |
13 | #include "pnode.h" |
14 | |
15 | /* return the next shared peer mount of @p */ |
16 | static inline struct mount *next_peer(struct mount *p) |
17 | { |
18 | return list_entry(p->mnt_share.next, struct mount, mnt_share); |
19 | } |
20 | |
21 | static inline struct mount *first_slave(struct mount *p) |
22 | { |
23 | return list_entry(p->mnt_slave_list.next, struct mount, mnt_slave); |
24 | } |
25 | |
26 | static inline struct mount *next_slave(struct mount *p) |
27 | { |
28 | return list_entry(p->mnt_slave.next, struct mount, mnt_slave); |
29 | } |
30 | |
31 | static struct mount *get_peer_under_root(struct mount *mnt, |
32 | struct mnt_namespace *ns, |
33 | const struct path *root) |
34 | { |
35 | struct mount *m = mnt; |
36 | |
37 | do { |
38 | /* Check the namespace first for optimization */ |
39 | if (m->mnt_ns == ns && is_path_reachable(m, m->mnt.mnt_root, root)) |
40 | return m; |
41 | |
42 | m = next_peer(m); |
43 | } while (m != mnt); |
44 | |
45 | return NULL; |
46 | } |
47 | |
48 | /* |
49 | * Get ID of closest dominating peer group having a representative |
50 | * under the given root. |
51 | * |
52 | * Caller must hold namespace_sem |
53 | */ |
54 | int get_dominating_id(struct mount *mnt, const struct path *root) |
55 | { |
56 | struct mount *m; |
57 | |
58 | for (m = mnt->mnt_master; m != NULL; m = m->mnt_master) { |
59 | struct mount *d = get_peer_under_root(m, mnt->mnt_ns, root); |
60 | if (d) |
61 | return d->mnt_group_id; |
62 | } |
63 | |
64 | return 0; |
65 | } |
66 | |
67 | static int do_make_slave(struct mount *mnt) |
68 | { |
69 | struct mount *peer_mnt = mnt, *master = mnt->mnt_master; |
70 | struct mount *slave_mnt; |
71 | |
72 | /* |
73 | * slave 'mnt' to a peer mount that has the |
74 | * same root dentry. If none is available then |
75 | * slave it to anything that is available. |
76 | */ |
77 | while ((peer_mnt = next_peer(peer_mnt)) != mnt && |
78 | peer_mnt->mnt.mnt_root != mnt->mnt.mnt_root) ; |
79 | |
80 | if (peer_mnt == mnt) { |
81 | peer_mnt = next_peer(mnt); |
82 | if (peer_mnt == mnt) |
83 | peer_mnt = NULL; |
84 | } |
85 | if (IS_MNT_SHARED(mnt) && list_empty(&mnt->mnt_share)) |
86 | mnt_release_group_id(mnt); |
87 | |
88 | list_del_init(&mnt->mnt_share); |
89 | mnt->mnt_group_id = 0; |
90 | |
91 | if (peer_mnt) |
92 | master = peer_mnt; |
93 | |
94 | if (master) { |
95 | list_for_each_entry(slave_mnt, &mnt->mnt_slave_list, mnt_slave) |
96 | slave_mnt->mnt_master = master; |
97 | list_move(&mnt->mnt_slave, &master->mnt_slave_list); |
98 | list_splice(&mnt->mnt_slave_list, master->mnt_slave_list.prev); |
99 | INIT_LIST_HEAD(&mnt->mnt_slave_list); |
100 | } else { |
101 | struct list_head *p = &mnt->mnt_slave_list; |
102 | while (!list_empty(p)) { |
103 | slave_mnt = list_first_entry(p, |
104 | struct mount, mnt_slave); |
105 | list_del_init(&slave_mnt->mnt_slave); |
106 | slave_mnt->mnt_master = NULL; |
107 | } |
108 | } |
109 | mnt->mnt_master = master; |
110 | CLEAR_MNT_SHARED(mnt); |
111 | return 0; |
112 | } |
113 | |
114 | /* |
115 | * vfsmount lock must be held for write |
116 | */ |
117 | void change_mnt_propagation(struct mount *mnt, int type) |
118 | { |
119 | if (type == MS_SHARED) { |
120 | set_mnt_shared(mnt); |
121 | return; |
122 | } |
123 | do_make_slave(mnt); |
124 | if (type != MS_SLAVE) { |
125 | list_del_init(&mnt->mnt_slave); |
126 | mnt->mnt_master = NULL; |
127 | if (type == MS_UNBINDABLE) |
128 | mnt->mnt.mnt_flags |= MNT_UNBINDABLE; |
129 | else |
130 | mnt->mnt.mnt_flags &= ~MNT_UNBINDABLE; |
131 | } |
132 | } |
133 | |
134 | /* |
135 | * get the next mount in the propagation tree. |
136 | * @m: the mount seen last |
137 | * @origin: the original mount from where the tree walk initiated |
138 | * |
139 | * Note that peer groups form contiguous segments of slave lists. |
140 | * We rely on that in get_source() to be able to find out if |
141 | * vfsmount found while iterating with propagation_next() is |
142 | * a peer of one we'd found earlier. |
143 | */ |
144 | static struct mount *propagation_next(struct mount *m, |
145 | struct mount *origin) |
146 | { |
147 | /* are there any slaves of this mount? */ |
148 | if (!IS_MNT_NEW(m) && !list_empty(&m->mnt_slave_list)) |
149 | return first_slave(m); |
150 | |
151 | while (1) { |
152 | struct mount *master = m->mnt_master; |
153 | |
154 | if (master == origin->mnt_master) { |
155 | struct mount *next = next_peer(m); |
156 | return (next == origin) ? NULL : next; |
157 | } else if (m->mnt_slave.next != &master->mnt_slave_list) |
158 | return next_slave(m); |
159 | |
160 | /* back at master */ |
161 | m = master; |
162 | } |
163 | } |
164 | |
165 | /* |
166 | * return the source mount to be used for cloning |
167 | * |
168 | * @dest the current destination mount |
169 | * @last_dest the last seen destination mount |
170 | * @last_src the last seen source mount |
171 | * @type return CL_SLAVE if the new mount has to be |
172 | * cloned as a slave. |
173 | */ |
174 | static struct mount *get_source(struct mount *dest, |
175 | struct mount *last_dest, |
176 | struct mount *last_src, |
177 | int *type) |
178 | { |
179 | struct mount *p_last_src = NULL; |
180 | struct mount *p_last_dest = NULL; |
181 | |
182 | while (last_dest != dest->mnt_master) { |
183 | p_last_dest = last_dest; |
184 | p_last_src = last_src; |
185 | last_dest = last_dest->mnt_master; |
186 | last_src = last_src->mnt_master; |
187 | } |
188 | |
189 | if (p_last_dest) { |
190 | do { |
191 | p_last_dest = next_peer(p_last_dest); |
192 | } while (IS_MNT_NEW(p_last_dest)); |
193 | /* is that a peer of the earlier? */ |
194 | if (dest == p_last_dest) { |
195 | *type = CL_MAKE_SHARED; |
196 | return p_last_src; |
197 | } |
198 | } |
199 | /* slave of the earlier, then */ |
200 | *type = CL_SLAVE; |
201 | /* beginning of peer group among the slaves? */ |
202 | if (IS_MNT_SHARED(dest)) |
203 | *type |= CL_MAKE_SHARED; |
204 | return last_src; |
205 | } |
206 | |
207 | /* |
208 | * mount 'source_mnt' under the destination 'dest_mnt' at |
209 | * dentry 'dest_dentry'. And propagate that mount to |
210 | * all the peer and slave mounts of 'dest_mnt'. |
211 | * Link all the new mounts into a propagation tree headed at |
212 | * source_mnt. Also link all the new mounts using ->mnt_list |
213 | * headed at source_mnt's ->mnt_list |
214 | * |
215 | * @dest_mnt: destination mount. |
216 | * @dest_dentry: destination dentry. |
217 | * @source_mnt: source mount. |
218 | * @tree_list : list of heads of trees to be attached. |
219 | */ |
220 | int propagate_mnt(struct mount *dest_mnt, struct dentry *dest_dentry, |
221 | struct mount *source_mnt, struct list_head *tree_list) |
222 | { |
223 | struct mount *m, *child; |
224 | int ret = 0; |
225 | struct mount *prev_dest_mnt = dest_mnt; |
226 | struct mount *prev_src_mnt = source_mnt; |
227 | LIST_HEAD(tmp_list); |
228 | LIST_HEAD(umount_list); |
229 | |
230 | for (m = propagation_next(dest_mnt, dest_mnt); m; |
231 | m = propagation_next(m, dest_mnt)) { |
232 | int type; |
233 | struct mount *source; |
234 | |
235 | if (IS_MNT_NEW(m)) |
236 | continue; |
237 | |
238 | source = get_source(m, prev_dest_mnt, prev_src_mnt, &type); |
239 | |
240 | child = copy_tree(source, source->mnt.mnt_root, type); |
241 | if (IS_ERR(child)) { |
242 | ret = PTR_ERR(child); |
243 | list_splice(tree_list, tmp_list.prev); |
244 | goto out; |
245 | } |
246 | |
247 | if (is_subdir(dest_dentry, m->mnt.mnt_root)) { |
248 | mnt_set_mountpoint(m, dest_dentry, child); |
249 | list_add_tail(&child->mnt_hash, tree_list); |
250 | } else { |
251 | /* |
252 | * This can happen if the parent mount was bind mounted |
253 | * on some subdirectory of a shared/slave mount. |
254 | */ |
255 | list_add_tail(&child->mnt_hash, &tmp_list); |
256 | } |
257 | prev_dest_mnt = m; |
258 | prev_src_mnt = child; |
259 | } |
260 | out: |
261 | br_write_lock(&vfsmount_lock); |
262 | while (!list_empty(&tmp_list)) { |
263 | child = list_first_entry(&tmp_list, struct mount, mnt_hash); |
264 | umount_tree(child, 0, &umount_list); |
265 | } |
266 | br_write_unlock(&vfsmount_lock); |
267 | release_mounts(&umount_list); |
268 | return ret; |
269 | } |
270 | |
271 | /* |
272 | * return true if the refcount is greater than count |
273 | */ |
274 | static inline int do_refcount_check(struct mount *mnt, int count) |
275 | { |
276 | int mycount = mnt_get_count(mnt) - mnt->mnt_ghosts; |
277 | return (mycount > count); |
278 | } |
279 | |
280 | /* |
281 | * check if the mount 'mnt' can be unmounted successfully. |
282 | * @mnt: the mount to be checked for unmount |
283 | * NOTE: unmounting 'mnt' would naturally propagate to all |
284 | * other mounts its parent propagates to. |
285 | * Check if any of these mounts that **do not have submounts** |
286 | * have more references than 'refcnt'. If so return busy. |
287 | * |
288 | * vfsmount lock must be held for write |
289 | */ |
290 | int propagate_mount_busy(struct mount *mnt, int refcnt) |
291 | { |
292 | struct mount *m, *child; |
293 | struct mount *parent = mnt->mnt_parent; |
294 | int ret = 0; |
295 | |
296 | if (mnt == parent) |
297 | return do_refcount_check(mnt, refcnt); |
298 | |
299 | /* |
300 | * quickly check if the current mount can be unmounted. |
301 | * If not, we don't have to go checking for all other |
302 | * mounts |
303 | */ |
304 | if (!list_empty(&mnt->mnt_mounts) || do_refcount_check(mnt, refcnt)) |
305 | return 1; |
306 | |
307 | for (m = propagation_next(parent, parent); m; |
308 | m = propagation_next(m, parent)) { |
309 | child = __lookup_mnt(&m->mnt, mnt->mnt_mountpoint, 0); |
310 | if (child && list_empty(&child->mnt_mounts) && |
311 | (ret = do_refcount_check(child, 1))) |
312 | break; |
313 | } |
314 | return ret; |
315 | } |
316 | |
317 | /* |
318 | * NOTE: unmounting 'mnt' naturally propagates to all other mounts its |
319 | * parent propagates to. |
320 | */ |
321 | static void __propagate_umount(struct mount *mnt) |
322 | { |
323 | struct mount *parent = mnt->mnt_parent; |
324 | struct mount *m; |
325 | |
326 | BUG_ON(parent == mnt); |
327 | |
328 | for (m = propagation_next(parent, parent); m; |
329 | m = propagation_next(m, parent)) { |
330 | |
331 | struct mount *child = __lookup_mnt(&m->mnt, |
332 | mnt->mnt_mountpoint, 0); |
333 | /* |
334 | * umount the child only if the child has no |
335 | * other children |
336 | */ |
337 | if (child && list_empty(&child->mnt_mounts)) |
338 | list_move_tail(&child->mnt_hash, &mnt->mnt_hash); |
339 | } |
340 | } |
341 | |
342 | /* |
343 | * collect all mounts that receive propagation from the mount in @list, |
344 | * and return these additional mounts in the same list. |
345 | * @list: the list of mounts to be unmounted. |
346 | * |
347 | * vfsmount lock must be held for write |
348 | */ |
349 | int propagate_umount(struct list_head *list) |
350 | { |
351 | struct mount *mnt; |
352 | |
353 | list_for_each_entry(mnt, list, mnt_hash) |
354 | __propagate_umount(mnt); |
355 | return 0; |
356 | } |
357 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9