Root/
1 | #ifndef _LINUX_CPUSET_H |
2 | #define _LINUX_CPUSET_H |
3 | /* |
4 | * cpuset interface |
5 | * |
6 | * Copyright (C) 2003 BULL SA |
7 | * Copyright (C) 2004-2006 Silicon Graphics, Inc. |
8 | * |
9 | */ |
10 | |
11 | #include <linux/sched.h> |
12 | #include <linux/cpumask.h> |
13 | #include <linux/nodemask.h> |
14 | #include <linux/cgroup.h> |
15 | #include <linux/mm.h> |
16 | |
17 | #ifdef CONFIG_CPUSETS |
18 | |
19 | extern int number_of_cpusets; /* How many cpusets are defined in system? */ |
20 | |
21 | extern int cpuset_init(void); |
22 | extern void cpuset_init_smp(void); |
23 | extern void cpuset_update_active_cpus(void); |
24 | extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask); |
25 | extern int cpuset_cpus_allowed_fallback(struct task_struct *p); |
26 | extern nodemask_t cpuset_mems_allowed(struct task_struct *p); |
27 | #define cpuset_current_mems_allowed (current->mems_allowed) |
28 | void cpuset_init_current_mems_allowed(void); |
29 | int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask); |
30 | |
31 | extern int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask); |
32 | extern int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask); |
33 | |
34 | static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask) |
35 | { |
36 | return number_of_cpusets <= 1 || |
37 | __cpuset_node_allowed_softwall(node, gfp_mask); |
38 | } |
39 | |
40 | static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask) |
41 | { |
42 | return number_of_cpusets <= 1 || |
43 | __cpuset_node_allowed_hardwall(node, gfp_mask); |
44 | } |
45 | |
46 | static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask) |
47 | { |
48 | return cpuset_node_allowed_softwall(zone_to_nid(z), gfp_mask); |
49 | } |
50 | |
51 | static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask) |
52 | { |
53 | return cpuset_node_allowed_hardwall(zone_to_nid(z), gfp_mask); |
54 | } |
55 | |
56 | extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, |
57 | const struct task_struct *tsk2); |
58 | |
59 | #define cpuset_memory_pressure_bump() \ |
60 | do { \ |
61 | if (cpuset_memory_pressure_enabled) \ |
62 | __cpuset_memory_pressure_bump(); \ |
63 | } while (0) |
64 | extern int cpuset_memory_pressure_enabled; |
65 | extern void __cpuset_memory_pressure_bump(void); |
66 | |
67 | extern const struct file_operations proc_cpuset_operations; |
68 | struct seq_file; |
69 | extern void cpuset_task_status_allowed(struct seq_file *m, |
70 | struct task_struct *task); |
71 | |
72 | extern int cpuset_mem_spread_node(void); |
73 | extern int cpuset_slab_spread_node(void); |
74 | |
75 | static inline int cpuset_do_page_mem_spread(void) |
76 | { |
77 | return current->flags & PF_SPREAD_PAGE; |
78 | } |
79 | |
80 | static inline int cpuset_do_slab_mem_spread(void) |
81 | { |
82 | return current->flags & PF_SPREAD_SLAB; |
83 | } |
84 | |
85 | extern int current_cpuset_is_being_rebound(void); |
86 | |
87 | extern void rebuild_sched_domains(void); |
88 | |
89 | extern void cpuset_print_task_mems_allowed(struct task_struct *p); |
90 | |
91 | /* |
92 | * reading current mems_allowed and mempolicy in the fastpath must protected |
93 | * by get_mems_allowed() |
94 | */ |
95 | static inline void get_mems_allowed(void) |
96 | { |
97 | current->mems_allowed_change_disable++; |
98 | |
99 | /* |
100 | * ensure that reading mems_allowed and mempolicy happens after the |
101 | * update of ->mems_allowed_change_disable. |
102 | * |
103 | * the write-side task finds ->mems_allowed_change_disable is not 0, |
104 | * and knows the read-side task is reading mems_allowed or mempolicy, |
105 | * so it will clear old bits lazily. |
106 | */ |
107 | smp_mb(); |
108 | } |
109 | |
110 | static inline void put_mems_allowed(void) |
111 | { |
112 | /* |
113 | * ensure that reading mems_allowed and mempolicy before reducing |
114 | * mems_allowed_change_disable. |
115 | * |
116 | * the write-side task will know that the read-side task is still |
117 | * reading mems_allowed or mempolicy, don't clears old bits in the |
118 | * nodemask. |
119 | */ |
120 | smp_mb(); |
121 | --ACCESS_ONCE(current->mems_allowed_change_disable); |
122 | } |
123 | |
124 | static inline void set_mems_allowed(nodemask_t nodemask) |
125 | { |
126 | task_lock(current); |
127 | current->mems_allowed = nodemask; |
128 | task_unlock(current); |
129 | } |
130 | |
131 | #else /* !CONFIG_CPUSETS */ |
132 | |
133 | static inline int cpuset_init(void) { return 0; } |
134 | static inline void cpuset_init_smp(void) {} |
135 | |
136 | static inline void cpuset_update_active_cpus(void) |
137 | { |
138 | partition_sched_domains(1, NULL, NULL); |
139 | } |
140 | |
141 | static inline void cpuset_cpus_allowed(struct task_struct *p, |
142 | struct cpumask *mask) |
143 | { |
144 | cpumask_copy(mask, cpu_possible_mask); |
145 | } |
146 | |
147 | static inline int cpuset_cpus_allowed_fallback(struct task_struct *p) |
148 | { |
149 | cpumask_copy(&p->cpus_allowed, cpu_possible_mask); |
150 | return cpumask_any(cpu_active_mask); |
151 | } |
152 | |
153 | static inline nodemask_t cpuset_mems_allowed(struct task_struct *p) |
154 | { |
155 | return node_possible_map; |
156 | } |
157 | |
158 | #define cpuset_current_mems_allowed (node_states[N_HIGH_MEMORY]) |
159 | static inline void cpuset_init_current_mems_allowed(void) {} |
160 | |
161 | static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask) |
162 | { |
163 | return 1; |
164 | } |
165 | |
166 | static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask) |
167 | { |
168 | return 1; |
169 | } |
170 | |
171 | static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask) |
172 | { |
173 | return 1; |
174 | } |
175 | |
176 | static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask) |
177 | { |
178 | return 1; |
179 | } |
180 | |
181 | static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask) |
182 | { |
183 | return 1; |
184 | } |
185 | |
186 | static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, |
187 | const struct task_struct *tsk2) |
188 | { |
189 | return 1; |
190 | } |
191 | |
192 | static inline void cpuset_memory_pressure_bump(void) {} |
193 | |
194 | static inline void cpuset_task_status_allowed(struct seq_file *m, |
195 | struct task_struct *task) |
196 | { |
197 | } |
198 | |
199 | static inline int cpuset_mem_spread_node(void) |
200 | { |
201 | return 0; |
202 | } |
203 | |
204 | static inline int cpuset_slab_spread_node(void) |
205 | { |
206 | return 0; |
207 | } |
208 | |
209 | static inline int cpuset_do_page_mem_spread(void) |
210 | { |
211 | return 0; |
212 | } |
213 | |
214 | static inline int cpuset_do_slab_mem_spread(void) |
215 | { |
216 | return 0; |
217 | } |
218 | |
219 | static inline int current_cpuset_is_being_rebound(void) |
220 | { |
221 | return 0; |
222 | } |
223 | |
224 | static inline void rebuild_sched_domains(void) |
225 | { |
226 | partition_sched_domains(1, NULL, NULL); |
227 | } |
228 | |
229 | static inline void cpuset_print_task_mems_allowed(struct task_struct *p) |
230 | { |
231 | } |
232 | |
233 | static inline void set_mems_allowed(nodemask_t nodemask) |
234 | { |
235 | } |
236 | |
237 | static inline void get_mems_allowed(void) |
238 | { |
239 | } |
240 | |
241 | static inline void put_mems_allowed(void) |
242 | { |
243 | } |
244 | |
245 | #endif /* !CONFIG_CPUSETS */ |
246 | |
247 | #endif /* _LINUX_CPUSET_H */ |
248 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9