Root/
1 | |
2 | #include <linux/wait.h> |
3 | #include <linux/backing-dev.h> |
4 | #include <linux/kthread.h> |
5 | #include <linux/freezer.h> |
6 | #include <linux/fs.h> |
7 | #include <linux/pagemap.h> |
8 | #include <linux/mm.h> |
9 | #include <linux/sched.h> |
10 | #include <linux/module.h> |
11 | #include <linux/writeback.h> |
12 | #include <linux/device.h> |
13 | |
14 | void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page) |
15 | { |
16 | } |
17 | EXPORT_SYMBOL(default_unplug_io_fn); |
18 | |
19 | struct backing_dev_info default_backing_dev_info = { |
20 | .name = "default", |
21 | .ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE, |
22 | .state = 0, |
23 | .capabilities = BDI_CAP_MAP_COPY, |
24 | .unplug_io_fn = default_unplug_io_fn, |
25 | }; |
26 | EXPORT_SYMBOL_GPL(default_backing_dev_info); |
27 | |
28 | static struct class *bdi_class; |
29 | |
30 | /* |
31 | * bdi_lock protects updates to bdi_list and bdi_pending_list, as well as |
32 | * reader side protection for bdi_pending_list. bdi_list has RCU reader side |
33 | * locking. |
34 | */ |
35 | DEFINE_SPINLOCK(bdi_lock); |
36 | LIST_HEAD(bdi_list); |
37 | LIST_HEAD(bdi_pending_list); |
38 | |
39 | static struct task_struct *sync_supers_tsk; |
40 | static struct timer_list sync_supers_timer; |
41 | |
42 | static int bdi_sync_supers(void *); |
43 | static void sync_supers_timer_fn(unsigned long); |
44 | static void arm_supers_timer(void); |
45 | |
46 | static void bdi_add_default_flusher_task(struct backing_dev_info *bdi); |
47 | |
48 | #ifdef CONFIG_DEBUG_FS |
49 | #include <linux/debugfs.h> |
50 | #include <linux/seq_file.h> |
51 | |
52 | static struct dentry *bdi_debug_root; |
53 | |
54 | static void bdi_debug_init(void) |
55 | { |
56 | bdi_debug_root = debugfs_create_dir("bdi", NULL); |
57 | } |
58 | |
59 | static int bdi_debug_stats_show(struct seq_file *m, void *v) |
60 | { |
61 | struct backing_dev_info *bdi = m->private; |
62 | struct bdi_writeback *wb; |
63 | unsigned long background_thresh; |
64 | unsigned long dirty_thresh; |
65 | unsigned long bdi_thresh; |
66 | unsigned long nr_dirty, nr_io, nr_more_io, nr_wb; |
67 | struct inode *inode; |
68 | |
69 | /* |
70 | * inode lock is enough here, the bdi->wb_list is protected by |
71 | * RCU on the reader side |
72 | */ |
73 | nr_wb = nr_dirty = nr_io = nr_more_io = 0; |
74 | spin_lock(&inode_lock); |
75 | list_for_each_entry(wb, &bdi->wb_list, list) { |
76 | nr_wb++; |
77 | list_for_each_entry(inode, &wb->b_dirty, i_list) |
78 | nr_dirty++; |
79 | list_for_each_entry(inode, &wb->b_io, i_list) |
80 | nr_io++; |
81 | list_for_each_entry(inode, &wb->b_more_io, i_list) |
82 | nr_more_io++; |
83 | } |
84 | spin_unlock(&inode_lock); |
85 | |
86 | get_dirty_limits(&background_thresh, &dirty_thresh, &bdi_thresh, bdi); |
87 | |
88 | #define K(x) ((x) << (PAGE_SHIFT - 10)) |
89 | seq_printf(m, |
90 | "BdiWriteback: %8lu kB\n" |
91 | "BdiReclaimable: %8lu kB\n" |
92 | "BdiDirtyThresh: %8lu kB\n" |
93 | "DirtyThresh: %8lu kB\n" |
94 | "BackgroundThresh: %8lu kB\n" |
95 | "WritebackThreads: %8lu\n" |
96 | "b_dirty: %8lu\n" |
97 | "b_io: %8lu\n" |
98 | "b_more_io: %8lu\n" |
99 | "bdi_list: %8u\n" |
100 | "state: %8lx\n" |
101 | "wb_mask: %8lx\n" |
102 | "wb_list: %8u\n" |
103 | "wb_cnt: %8u\n", |
104 | (unsigned long) K(bdi_stat(bdi, BDI_WRITEBACK)), |
105 | (unsigned long) K(bdi_stat(bdi, BDI_RECLAIMABLE)), |
106 | K(bdi_thresh), K(dirty_thresh), |
107 | K(background_thresh), nr_wb, nr_dirty, nr_io, nr_more_io, |
108 | !list_empty(&bdi->bdi_list), bdi->state, bdi->wb_mask, |
109 | !list_empty(&bdi->wb_list), bdi->wb_cnt); |
110 | #undef K |
111 | |
112 | return 0; |
113 | } |
114 | |
115 | static int bdi_debug_stats_open(struct inode *inode, struct file *file) |
116 | { |
117 | return single_open(file, bdi_debug_stats_show, inode->i_private); |
118 | } |
119 | |
120 | static const struct file_operations bdi_debug_stats_fops = { |
121 | .open = bdi_debug_stats_open, |
122 | .read = seq_read, |
123 | .llseek = seq_lseek, |
124 | .release = single_release, |
125 | }; |
126 | |
127 | static void bdi_debug_register(struct backing_dev_info *bdi, const char *name) |
128 | { |
129 | bdi->debug_dir = debugfs_create_dir(name, bdi_debug_root); |
130 | bdi->debug_stats = debugfs_create_file("stats", 0444, bdi->debug_dir, |
131 | bdi, &bdi_debug_stats_fops); |
132 | } |
133 | |
134 | static void bdi_debug_unregister(struct backing_dev_info *bdi) |
135 | { |
136 | debugfs_remove(bdi->debug_stats); |
137 | debugfs_remove(bdi->debug_dir); |
138 | } |
139 | #else |
140 | static inline void bdi_debug_init(void) |
141 | { |
142 | } |
143 | static inline void bdi_debug_register(struct backing_dev_info *bdi, |
144 | const char *name) |
145 | { |
146 | } |
147 | static inline void bdi_debug_unregister(struct backing_dev_info *bdi) |
148 | { |
149 | } |
150 | #endif |
151 | |
152 | static ssize_t read_ahead_kb_store(struct device *dev, |
153 | struct device_attribute *attr, |
154 | const char *buf, size_t count) |
155 | { |
156 | struct backing_dev_info *bdi = dev_get_drvdata(dev); |
157 | char *end; |
158 | unsigned long read_ahead_kb; |
159 | ssize_t ret = -EINVAL; |
160 | |
161 | read_ahead_kb = simple_strtoul(buf, &end, 10); |
162 | if (*buf && (end[0] == '\0' || (end[0] == '\n' && end[1] == '\0'))) { |
163 | bdi->ra_pages = read_ahead_kb >> (PAGE_SHIFT - 10); |
164 | ret = count; |
165 | } |
166 | return ret; |
167 | } |
168 | |
169 | #define K(pages) ((pages) << (PAGE_SHIFT - 10)) |
170 | |
171 | #define BDI_SHOW(name, expr) \ |
172 | static ssize_t name##_show(struct device *dev, \ |
173 | struct device_attribute *attr, char *page) \ |
174 | { \ |
175 | struct backing_dev_info *bdi = dev_get_drvdata(dev); \ |
176 | \ |
177 | return snprintf(page, PAGE_SIZE-1, "%lld\n", (long long)expr); \ |
178 | } |
179 | |
180 | BDI_SHOW(read_ahead_kb, K(bdi->ra_pages)) |
181 | |
182 | static ssize_t min_ratio_store(struct device *dev, |
183 | struct device_attribute *attr, const char *buf, size_t count) |
184 | { |
185 | struct backing_dev_info *bdi = dev_get_drvdata(dev); |
186 | char *end; |
187 | unsigned int ratio; |
188 | ssize_t ret = -EINVAL; |
189 | |
190 | ratio = simple_strtoul(buf, &end, 10); |
191 | if (*buf && (end[0] == '\0' || (end[0] == '\n' && end[1] == '\0'))) { |
192 | ret = bdi_set_min_ratio(bdi, ratio); |
193 | if (!ret) |
194 | ret = count; |
195 | } |
196 | return ret; |
197 | } |
198 | BDI_SHOW(min_ratio, bdi->min_ratio) |
199 | |
200 | static ssize_t max_ratio_store(struct device *dev, |
201 | struct device_attribute *attr, const char *buf, size_t count) |
202 | { |
203 | struct backing_dev_info *bdi = dev_get_drvdata(dev); |
204 | char *end; |
205 | unsigned int ratio; |
206 | ssize_t ret = -EINVAL; |
207 | |
208 | ratio = simple_strtoul(buf, &end, 10); |
209 | if (*buf && (end[0] == '\0' || (end[0] == '\n' && end[1] == '\0'))) { |
210 | ret = bdi_set_max_ratio(bdi, ratio); |
211 | if (!ret) |
212 | ret = count; |
213 | } |
214 | return ret; |
215 | } |
216 | BDI_SHOW(max_ratio, bdi->max_ratio) |
217 | |
218 | #define __ATTR_RW(attr) __ATTR(attr, 0644, attr##_show, attr##_store) |
219 | |
220 | static struct device_attribute bdi_dev_attrs[] = { |
221 | __ATTR_RW(read_ahead_kb), |
222 | __ATTR_RW(min_ratio), |
223 | __ATTR_RW(max_ratio), |
224 | __ATTR_NULL, |
225 | }; |
226 | |
227 | static __init int bdi_class_init(void) |
228 | { |
229 | bdi_class = class_create(THIS_MODULE, "bdi"); |
230 | if (IS_ERR(bdi_class)) |
231 | return PTR_ERR(bdi_class); |
232 | |
233 | bdi_class->dev_attrs = bdi_dev_attrs; |
234 | bdi_debug_init(); |
235 | return 0; |
236 | } |
237 | postcore_initcall(bdi_class_init); |
238 | |
239 | static int __init default_bdi_init(void) |
240 | { |
241 | int err; |
242 | |
243 | sync_supers_tsk = kthread_run(bdi_sync_supers, NULL, "sync_supers"); |
244 | BUG_ON(IS_ERR(sync_supers_tsk)); |
245 | |
246 | init_timer(&sync_supers_timer); |
247 | setup_timer(&sync_supers_timer, sync_supers_timer_fn, 0); |
248 | arm_supers_timer(); |
249 | |
250 | err = bdi_init(&default_backing_dev_info); |
251 | if (!err) |
252 | bdi_register(&default_backing_dev_info, NULL, "default"); |
253 | |
254 | return err; |
255 | } |
256 | subsys_initcall(default_bdi_init); |
257 | |
258 | static void bdi_wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi) |
259 | { |
260 | memset(wb, 0, sizeof(*wb)); |
261 | |
262 | wb->bdi = bdi; |
263 | wb->last_old_flush = jiffies; |
264 | INIT_LIST_HEAD(&wb->b_dirty); |
265 | INIT_LIST_HEAD(&wb->b_io); |
266 | INIT_LIST_HEAD(&wb->b_more_io); |
267 | } |
268 | |
269 | static void bdi_task_init(struct backing_dev_info *bdi, |
270 | struct bdi_writeback *wb) |
271 | { |
272 | struct task_struct *tsk = current; |
273 | |
274 | spin_lock(&bdi->wb_lock); |
275 | list_add_tail_rcu(&wb->list, &bdi->wb_list); |
276 | spin_unlock(&bdi->wb_lock); |
277 | |
278 | tsk->flags |= PF_FLUSHER | PF_SWAPWRITE; |
279 | set_freezable(); |
280 | |
281 | /* |
282 | * Our parent may run at a different priority, just set us to normal |
283 | */ |
284 | set_user_nice(tsk, 0); |
285 | } |
286 | |
287 | static int bdi_start_fn(void *ptr) |
288 | { |
289 | struct bdi_writeback *wb = ptr; |
290 | struct backing_dev_info *bdi = wb->bdi; |
291 | int ret; |
292 | |
293 | /* |
294 | * Add us to the active bdi_list |
295 | */ |
296 | spin_lock_bh(&bdi_lock); |
297 | list_add_rcu(&bdi->bdi_list, &bdi_list); |
298 | spin_unlock_bh(&bdi_lock); |
299 | |
300 | bdi_task_init(bdi, wb); |
301 | |
302 | /* |
303 | * Clear pending bit and wakeup anybody waiting to tear us down |
304 | */ |
305 | clear_bit(BDI_pending, &bdi->state); |
306 | smp_mb__after_clear_bit(); |
307 | wake_up_bit(&bdi->state, BDI_pending); |
308 | |
309 | ret = bdi_writeback_task(wb); |
310 | |
311 | /* |
312 | * Remove us from the list |
313 | */ |
314 | spin_lock(&bdi->wb_lock); |
315 | list_del_rcu(&wb->list); |
316 | spin_unlock(&bdi->wb_lock); |
317 | |
318 | /* |
319 | * Flush any work that raced with us exiting. No new work |
320 | * will be added, since this bdi isn't discoverable anymore. |
321 | */ |
322 | if (!list_empty(&bdi->work_list)) |
323 | wb_do_writeback(wb, 1); |
324 | |
325 | wb->task = NULL; |
326 | return ret; |
327 | } |
328 | |
329 | int bdi_has_dirty_io(struct backing_dev_info *bdi) |
330 | { |
331 | return wb_has_dirty_io(&bdi->wb); |
332 | } |
333 | |
334 | static void bdi_flush_io(struct backing_dev_info *bdi) |
335 | { |
336 | struct writeback_control wbc = { |
337 | .bdi = bdi, |
338 | .sync_mode = WB_SYNC_NONE, |
339 | .older_than_this = NULL, |
340 | .range_cyclic = 1, |
341 | .nr_to_write = 1024, |
342 | }; |
343 | |
344 | writeback_inodes_wbc(&wbc); |
345 | } |
346 | |
347 | /* |
348 | * kupdated() used to do this. We cannot do it from the bdi_forker_task() |
349 | * or we risk deadlocking on ->s_umount. The longer term solution would be |
350 | * to implement sync_supers_bdi() or similar and simply do it from the |
351 | * bdi writeback tasks individually. |
352 | */ |
353 | static int bdi_sync_supers(void *unused) |
354 | { |
355 | set_user_nice(current, 0); |
356 | |
357 | while (!kthread_should_stop()) { |
358 | set_current_state(TASK_INTERRUPTIBLE); |
359 | schedule(); |
360 | |
361 | /* |
362 | * Do this periodically, like kupdated() did before. |
363 | */ |
364 | sync_supers(); |
365 | } |
366 | |
367 | return 0; |
368 | } |
369 | |
370 | static void arm_supers_timer(void) |
371 | { |
372 | unsigned long next; |
373 | |
374 | next = msecs_to_jiffies(dirty_writeback_interval * 10) + jiffies; |
375 | mod_timer(&sync_supers_timer, round_jiffies_up(next)); |
376 | } |
377 | |
378 | static void sync_supers_timer_fn(unsigned long unused) |
379 | { |
380 | wake_up_process(sync_supers_tsk); |
381 | arm_supers_timer(); |
382 | } |
383 | |
384 | static int bdi_forker_task(void *ptr) |
385 | { |
386 | struct bdi_writeback *me = ptr; |
387 | |
388 | bdi_task_init(me->bdi, me); |
389 | |
390 | for (;;) { |
391 | struct backing_dev_info *bdi, *tmp; |
392 | struct bdi_writeback *wb; |
393 | |
394 | /* |
395 | * Temporary measure, we want to make sure we don't see |
396 | * dirty data on the default backing_dev_info |
397 | */ |
398 | if (wb_has_dirty_io(me) || !list_empty(&me->bdi->work_list)) |
399 | wb_do_writeback(me, 0); |
400 | |
401 | spin_lock_bh(&bdi_lock); |
402 | |
403 | /* |
404 | * Check if any existing bdi's have dirty data without |
405 | * a thread registered. If so, set that up. |
406 | */ |
407 | list_for_each_entry_safe(bdi, tmp, &bdi_list, bdi_list) { |
408 | if (bdi->wb.task) |
409 | continue; |
410 | if (list_empty(&bdi->work_list) && |
411 | !bdi_has_dirty_io(bdi)) |
412 | continue; |
413 | |
414 | bdi_add_default_flusher_task(bdi); |
415 | } |
416 | |
417 | set_current_state(TASK_INTERRUPTIBLE); |
418 | |
419 | if (list_empty(&bdi_pending_list)) { |
420 | unsigned long wait; |
421 | |
422 | spin_unlock_bh(&bdi_lock); |
423 | wait = msecs_to_jiffies(dirty_writeback_interval * 10); |
424 | schedule_timeout(wait); |
425 | try_to_freeze(); |
426 | continue; |
427 | } |
428 | |
429 | __set_current_state(TASK_RUNNING); |
430 | |
431 | /* |
432 | * This is our real job - check for pending entries in |
433 | * bdi_pending_list, and create the tasks that got added |
434 | */ |
435 | bdi = list_entry(bdi_pending_list.next, struct backing_dev_info, |
436 | bdi_list); |
437 | list_del_init(&bdi->bdi_list); |
438 | spin_unlock_bh(&bdi_lock); |
439 | |
440 | wb = &bdi->wb; |
441 | wb->task = kthread_run(bdi_start_fn, wb, "flush-%s", |
442 | dev_name(bdi->dev)); |
443 | /* |
444 | * If task creation fails, then readd the bdi to |
445 | * the pending list and force writeout of the bdi |
446 | * from this forker thread. That will free some memory |
447 | * and we can try again. |
448 | */ |
449 | if (IS_ERR(wb->task)) { |
450 | wb->task = NULL; |
451 | |
452 | /* |
453 | * Add this 'bdi' to the back, so we get |
454 | * a chance to flush other bdi's to free |
455 | * memory. |
456 | */ |
457 | spin_lock_bh(&bdi_lock); |
458 | list_add_tail(&bdi->bdi_list, &bdi_pending_list); |
459 | spin_unlock_bh(&bdi_lock); |
460 | |
461 | bdi_flush_io(bdi); |
462 | } |
463 | } |
464 | |
465 | return 0; |
466 | } |
467 | |
468 | static void bdi_add_to_pending(struct rcu_head *head) |
469 | { |
470 | struct backing_dev_info *bdi; |
471 | |
472 | bdi = container_of(head, struct backing_dev_info, rcu_head); |
473 | INIT_LIST_HEAD(&bdi->bdi_list); |
474 | |
475 | spin_lock(&bdi_lock); |
476 | list_add_tail(&bdi->bdi_list, &bdi_pending_list); |
477 | spin_unlock(&bdi_lock); |
478 | |
479 | /* |
480 | * We are now on the pending list, wake up bdi_forker_task() |
481 | * to finish the job and add us back to the active bdi_list |
482 | */ |
483 | wake_up_process(default_backing_dev_info.wb.task); |
484 | } |
485 | |
486 | /* |
487 | * Add the default flusher task that gets created for any bdi |
488 | * that has dirty data pending writeout |
489 | */ |
490 | void static bdi_add_default_flusher_task(struct backing_dev_info *bdi) |
491 | { |
492 | if (!bdi_cap_writeback_dirty(bdi)) |
493 | return; |
494 | |
495 | if (WARN_ON(!test_bit(BDI_registered, &bdi->state))) { |
496 | printk(KERN_ERR "bdi %p/%s is not registered!\n", |
497 | bdi, bdi->name); |
498 | return; |
499 | } |
500 | |
501 | /* |
502 | * Check with the helper whether to proceed adding a task. Will only |
503 | * abort if we two or more simultanous calls to |
504 | * bdi_add_default_flusher_task() occured, further additions will block |
505 | * waiting for previous additions to finish. |
506 | */ |
507 | if (!test_and_set_bit(BDI_pending, &bdi->state)) { |
508 | list_del_rcu(&bdi->bdi_list); |
509 | |
510 | /* |
511 | * We must wait for the current RCU period to end before |
512 | * moving to the pending list. So schedule that operation |
513 | * from an RCU callback. |
514 | */ |
515 | call_rcu(&bdi->rcu_head, bdi_add_to_pending); |
516 | } |
517 | } |
518 | |
519 | /* |
520 | * Remove bdi from bdi_list, and ensure that it is no longer visible |
521 | */ |
522 | static void bdi_remove_from_list(struct backing_dev_info *bdi) |
523 | { |
524 | spin_lock_bh(&bdi_lock); |
525 | list_del_rcu(&bdi->bdi_list); |
526 | spin_unlock_bh(&bdi_lock); |
527 | |
528 | synchronize_rcu(); |
529 | } |
530 | |
531 | int bdi_register(struct backing_dev_info *bdi, struct device *parent, |
532 | const char *fmt, ...) |
533 | { |
534 | va_list args; |
535 | int ret = 0; |
536 | struct device *dev; |
537 | |
538 | if (bdi->dev) /* The driver needs to use separate queues per device */ |
539 | goto exit; |
540 | |
541 | va_start(args, fmt); |
542 | dev = device_create_vargs(bdi_class, parent, MKDEV(0, 0), bdi, fmt, args); |
543 | va_end(args); |
544 | if (IS_ERR(dev)) { |
545 | ret = PTR_ERR(dev); |
546 | goto exit; |
547 | } |
548 | |
549 | spin_lock_bh(&bdi_lock); |
550 | list_add_tail_rcu(&bdi->bdi_list, &bdi_list); |
551 | spin_unlock_bh(&bdi_lock); |
552 | |
553 | bdi->dev = dev; |
554 | |
555 | /* |
556 | * Just start the forker thread for our default backing_dev_info, |
557 | * and add other bdi's to the list. They will get a thread created |
558 | * on-demand when they need it. |
559 | */ |
560 | if (bdi_cap_flush_forker(bdi)) { |
561 | struct bdi_writeback *wb = &bdi->wb; |
562 | |
563 | wb->task = kthread_run(bdi_forker_task, wb, "bdi-%s", |
564 | dev_name(dev)); |
565 | if (IS_ERR(wb->task)) { |
566 | wb->task = NULL; |
567 | ret = -ENOMEM; |
568 | |
569 | bdi_remove_from_list(bdi); |
570 | goto exit; |
571 | } |
572 | } |
573 | |
574 | bdi_debug_register(bdi, dev_name(dev)); |
575 | set_bit(BDI_registered, &bdi->state); |
576 | exit: |
577 | return ret; |
578 | } |
579 | EXPORT_SYMBOL(bdi_register); |
580 | |
581 | int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev) |
582 | { |
583 | return bdi_register(bdi, NULL, "%u:%u", MAJOR(dev), MINOR(dev)); |
584 | } |
585 | EXPORT_SYMBOL(bdi_register_dev); |
586 | |
587 | /* |
588 | * Remove bdi from the global list and shutdown any threads we have running |
589 | */ |
590 | static void bdi_wb_shutdown(struct backing_dev_info *bdi) |
591 | { |
592 | struct bdi_writeback *wb; |
593 | |
594 | if (!bdi_cap_writeback_dirty(bdi)) |
595 | return; |
596 | |
597 | /* |
598 | * If setup is pending, wait for that to complete first |
599 | */ |
600 | wait_on_bit(&bdi->state, BDI_pending, bdi_sched_wait, |
601 | TASK_UNINTERRUPTIBLE); |
602 | |
603 | /* |
604 | * Make sure nobody finds us on the bdi_list anymore |
605 | */ |
606 | bdi_remove_from_list(bdi); |
607 | |
608 | /* |
609 | * Finally, kill the kernel threads. We don't need to be RCU |
610 | * safe anymore, since the bdi is gone from visibility. Force |
611 | * unfreeze of the thread before calling kthread_stop(), otherwise |
612 | * it would never exet if it is currently stuck in the refrigerator. |
613 | */ |
614 | list_for_each_entry(wb, &bdi->wb_list, list) { |
615 | thaw_process(wb->task); |
616 | kthread_stop(wb->task); |
617 | } |
618 | } |
619 | |
620 | /* |
621 | * This bdi is going away now, make sure that no super_blocks point to it |
622 | */ |
623 | static void bdi_prune_sb(struct backing_dev_info *bdi) |
624 | { |
625 | struct super_block *sb; |
626 | |
627 | spin_lock(&sb_lock); |
628 | list_for_each_entry(sb, &super_blocks, s_list) { |
629 | if (sb->s_bdi == bdi) |
630 | sb->s_bdi = NULL; |
631 | } |
632 | spin_unlock(&sb_lock); |
633 | } |
634 | |
635 | void bdi_unregister(struct backing_dev_info *bdi) |
636 | { |
637 | if (bdi->dev) { |
638 | bdi_prune_sb(bdi); |
639 | |
640 | if (!bdi_cap_flush_forker(bdi)) |
641 | bdi_wb_shutdown(bdi); |
642 | bdi_debug_unregister(bdi); |
643 | device_unregister(bdi->dev); |
644 | bdi->dev = NULL; |
645 | } |
646 | } |
647 | EXPORT_SYMBOL(bdi_unregister); |
648 | |
649 | int bdi_init(struct backing_dev_info *bdi) |
650 | { |
651 | int i, err; |
652 | |
653 | bdi->dev = NULL; |
654 | |
655 | bdi->min_ratio = 0; |
656 | bdi->max_ratio = 100; |
657 | bdi->max_prop_frac = PROP_FRAC_BASE; |
658 | spin_lock_init(&bdi->wb_lock); |
659 | INIT_RCU_HEAD(&bdi->rcu_head); |
660 | INIT_LIST_HEAD(&bdi->bdi_list); |
661 | INIT_LIST_HEAD(&bdi->wb_list); |
662 | INIT_LIST_HEAD(&bdi->work_list); |
663 | |
664 | bdi_wb_init(&bdi->wb, bdi); |
665 | |
666 | /* |
667 | * Just one thread support for now, hard code mask and count |
668 | */ |
669 | bdi->wb_mask = 1; |
670 | bdi->wb_cnt = 1; |
671 | |
672 | for (i = 0; i < NR_BDI_STAT_ITEMS; i++) { |
673 | err = percpu_counter_init(&bdi->bdi_stat[i], 0); |
674 | if (err) |
675 | goto err; |
676 | } |
677 | |
678 | bdi->dirty_exceeded = 0; |
679 | err = prop_local_init_percpu(&bdi->completions); |
680 | |
681 | if (err) { |
682 | err: |
683 | while (i--) |
684 | percpu_counter_destroy(&bdi->bdi_stat[i]); |
685 | } |
686 | |
687 | return err; |
688 | } |
689 | EXPORT_SYMBOL(bdi_init); |
690 | |
691 | void bdi_destroy(struct backing_dev_info *bdi) |
692 | { |
693 | int i; |
694 | |
695 | /* |
696 | * Splice our entries to the default_backing_dev_info, if this |
697 | * bdi disappears |
698 | */ |
699 | if (bdi_has_dirty_io(bdi)) { |
700 | struct bdi_writeback *dst = &default_backing_dev_info.wb; |
701 | |
702 | spin_lock(&inode_lock); |
703 | list_splice(&bdi->wb.b_dirty, &dst->b_dirty); |
704 | list_splice(&bdi->wb.b_io, &dst->b_io); |
705 | list_splice(&bdi->wb.b_more_io, &dst->b_more_io); |
706 | spin_unlock(&inode_lock); |
707 | } |
708 | |
709 | bdi_unregister(bdi); |
710 | |
711 | for (i = 0; i < NR_BDI_STAT_ITEMS; i++) |
712 | percpu_counter_destroy(&bdi->bdi_stat[i]); |
713 | |
714 | prop_local_destroy_percpu(&bdi->completions); |
715 | } |
716 | EXPORT_SYMBOL(bdi_destroy); |
717 | |
718 | static wait_queue_head_t congestion_wqh[2] = { |
719 | __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]), |
720 | __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1]) |
721 | }; |
722 | |
723 | void clear_bdi_congested(struct backing_dev_info *bdi, int sync) |
724 | { |
725 | enum bdi_state bit; |
726 | wait_queue_head_t *wqh = &congestion_wqh[sync]; |
727 | |
728 | bit = sync ? BDI_sync_congested : BDI_async_congested; |
729 | clear_bit(bit, &bdi->state); |
730 | smp_mb__after_clear_bit(); |
731 | if (waitqueue_active(wqh)) |
732 | wake_up(wqh); |
733 | } |
734 | EXPORT_SYMBOL(clear_bdi_congested); |
735 | |
736 | void set_bdi_congested(struct backing_dev_info *bdi, int sync) |
737 | { |
738 | enum bdi_state bit; |
739 | |
740 | bit = sync ? BDI_sync_congested : BDI_async_congested; |
741 | set_bit(bit, &bdi->state); |
742 | } |
743 | EXPORT_SYMBOL(set_bdi_congested); |
744 | |
745 | /** |
746 | * congestion_wait - wait for a backing_dev to become uncongested |
747 | * @sync: SYNC or ASYNC IO |
748 | * @timeout: timeout in jiffies |
749 | * |
750 | * Waits for up to @timeout jiffies for a backing_dev (any backing_dev) to exit |
751 | * write congestion. If no backing_devs are congested then just wait for the |
752 | * next write to be completed. |
753 | */ |
754 | long congestion_wait(int sync, long timeout) |
755 | { |
756 | long ret; |
757 | DEFINE_WAIT(wait); |
758 | wait_queue_head_t *wqh = &congestion_wqh[sync]; |
759 | |
760 | prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE); |
761 | ret = io_schedule_timeout(timeout); |
762 | finish_wait(wqh, &wait); |
763 | return ret; |
764 | } |
765 | EXPORT_SYMBOL(congestion_wait); |
766 | |
767 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9