Root/
1 | #ifndef BLK_IOPOLL_H |
2 | #define BLK_IOPOLL_H |
3 | |
4 | struct blk_iopoll; |
5 | typedef int (blk_iopoll_fn)(struct blk_iopoll *, int); |
6 | |
7 | struct blk_iopoll { |
8 | struct list_head list; |
9 | unsigned long state; |
10 | unsigned long data; |
11 | int weight; |
12 | int max; |
13 | blk_iopoll_fn *poll; |
14 | }; |
15 | |
16 | enum { |
17 | IOPOLL_F_SCHED = 0, |
18 | IOPOLL_F_DISABLE = 1, |
19 | }; |
20 | |
21 | /* |
22 | * Returns 0 if we successfully set the IOPOLL_F_SCHED bit, indicating |
23 | * that we were the first to acquire this iop for scheduling. If this iop |
24 | * is currently disabled, return "failure". |
25 | */ |
26 | static inline int blk_iopoll_sched_prep(struct blk_iopoll *iop) |
27 | { |
28 | if (!test_bit(IOPOLL_F_DISABLE, &iop->state)) |
29 | return test_and_set_bit(IOPOLL_F_SCHED, &iop->state); |
30 | |
31 | return 1; |
32 | } |
33 | |
34 | static inline int blk_iopoll_disable_pending(struct blk_iopoll *iop) |
35 | { |
36 | return test_bit(IOPOLL_F_DISABLE, &iop->state); |
37 | } |
38 | |
39 | extern void blk_iopoll_sched(struct blk_iopoll *); |
40 | extern void blk_iopoll_init(struct blk_iopoll *, int, blk_iopoll_fn *); |
41 | extern void blk_iopoll_complete(struct blk_iopoll *); |
42 | extern void __blk_iopoll_complete(struct blk_iopoll *); |
43 | extern void blk_iopoll_enable(struct blk_iopoll *); |
44 | extern void blk_iopoll_disable(struct blk_iopoll *); |
45 | |
46 | extern int blk_iopoll_enabled; |
47 | |
48 | #endif |
49 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9