Root/
1 | #ifndef __ASM_PREEMPT_H |
2 | #define __ASM_PREEMPT_H |
3 | |
4 | #include <linux/thread_info.h> |
5 | |
6 | #define PREEMPT_ENABLED (0) |
7 | |
8 | static __always_inline int preempt_count(void) |
9 | { |
10 | return current_thread_info()->preempt_count; |
11 | } |
12 | |
13 | static __always_inline int *preempt_count_ptr(void) |
14 | { |
15 | return ¤t_thread_info()->preempt_count; |
16 | } |
17 | |
18 | static __always_inline void preempt_count_set(int pc) |
19 | { |
20 | *preempt_count_ptr() = pc; |
21 | } |
22 | |
23 | /* |
24 | * must be macros to avoid header recursion hell |
25 | */ |
26 | #define task_preempt_count(p) \ |
27 | (task_thread_info(p)->preempt_count & ~PREEMPT_NEED_RESCHED) |
28 | |
29 | #define init_task_preempt_count(p) do { \ |
30 | task_thread_info(p)->preempt_count = PREEMPT_DISABLED; \ |
31 | } while (0) |
32 | |
33 | #define init_idle_preempt_count(p, cpu) do { \ |
34 | task_thread_info(p)->preempt_count = PREEMPT_ENABLED; \ |
35 | } while (0) |
36 | |
37 | static __always_inline void set_preempt_need_resched(void) |
38 | { |
39 | } |
40 | |
41 | static __always_inline void clear_preempt_need_resched(void) |
42 | { |
43 | } |
44 | |
45 | static __always_inline bool test_preempt_need_resched(void) |
46 | { |
47 | return false; |
48 | } |
49 | |
50 | /* |
51 | * The various preempt_count add/sub methods |
52 | */ |
53 | |
54 | static __always_inline void __preempt_count_add(int val) |
55 | { |
56 | *preempt_count_ptr() += val; |
57 | } |
58 | |
59 | static __always_inline void __preempt_count_sub(int val) |
60 | { |
61 | *preempt_count_ptr() -= val; |
62 | } |
63 | |
64 | static __always_inline bool __preempt_count_dec_and_test(void) |
65 | { |
66 | /* |
67 | * Because of load-store architectures cannot do per-cpu atomic |
68 | * operations; we cannot use PREEMPT_NEED_RESCHED because it might get |
69 | * lost. |
70 | */ |
71 | return !--*preempt_count_ptr() && tif_need_resched(); |
72 | } |
73 | |
74 | /* |
75 | * Returns true when we need to resched and can (barring IRQ state). |
76 | */ |
77 | static __always_inline bool should_resched(void) |
78 | { |
79 | return unlikely(!preempt_count() && tif_need_resched()); |
80 | } |
81 | |
82 | #ifdef CONFIG_PREEMPT |
83 | extern asmlinkage void preempt_schedule(void); |
84 | #define __preempt_schedule() preempt_schedule() |
85 | |
86 | #ifdef CONFIG_CONTEXT_TRACKING |
87 | extern asmlinkage void preempt_schedule_context(void); |
88 | #define __preempt_schedule_context() preempt_schedule_context() |
89 | #endif |
90 | #endif /* CONFIG_PREEMPT */ |
91 | |
92 | #endif /* __ASM_PREEMPT_H */ |
93 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9