Root/
1 | /* |
2 | * NUMA memory policies for Linux. |
3 | * Copyright 2003,2004 Andi Kleen SuSE Labs |
4 | */ |
5 | #ifndef _LINUX_MEMPOLICY_H |
6 | #define _LINUX_MEMPOLICY_H 1 |
7 | |
8 | |
9 | #include <linux/mmzone.h> |
10 | #include <linux/slab.h> |
11 | #include <linux/rbtree.h> |
12 | #include <linux/spinlock.h> |
13 | #include <linux/nodemask.h> |
14 | #include <linux/pagemap.h> |
15 | #include <uapi/linux/mempolicy.h> |
16 | |
17 | struct mm_struct; |
18 | |
19 | #ifdef CONFIG_NUMA |
20 | |
21 | /* |
22 | * Describe a memory policy. |
23 | * |
24 | * A mempolicy can be either associated with a process or with a VMA. |
25 | * For VMA related allocations the VMA policy is preferred, otherwise |
26 | * the process policy is used. Interrupts ignore the memory policy |
27 | * of the current process. |
28 | * |
29 | * Locking policy for interlave: |
30 | * In process context there is no locking because only the process accesses |
31 | * its own state. All vma manipulation is somewhat protected by a down_read on |
32 | * mmap_sem. |
33 | * |
34 | * Freeing policy: |
35 | * Mempolicy objects are reference counted. A mempolicy will be freed when |
36 | * mpol_put() decrements the reference count to zero. |
37 | * |
38 | * Duplicating policy objects: |
39 | * mpol_dup() allocates a new mempolicy and copies the specified mempolicy |
40 | * to the new storage. The reference count of the new object is initialized |
41 | * to 1, representing the caller of mpol_dup(). |
42 | */ |
43 | struct mempolicy { |
44 | atomic_t refcnt; |
45 | unsigned short mode; /* See MPOL_* above */ |
46 | unsigned short flags; /* See set_mempolicy() MPOL_F_* above */ |
47 | union { |
48 | short preferred_node; /* preferred */ |
49 | nodemask_t nodes; /* interleave/bind */ |
50 | /* undefined for default */ |
51 | } v; |
52 | union { |
53 | nodemask_t cpuset_mems_allowed; /* relative to these nodes */ |
54 | nodemask_t user_nodemask; /* nodemask passed by user */ |
55 | } w; |
56 | }; |
57 | |
58 | /* |
59 | * Support for managing mempolicy data objects (clone, copy, destroy) |
60 | * The default fast path of a NULL MPOL_DEFAULT policy is always inlined. |
61 | */ |
62 | |
63 | extern void __mpol_put(struct mempolicy *pol); |
64 | static inline void mpol_put(struct mempolicy *pol) |
65 | { |
66 | if (pol) |
67 | __mpol_put(pol); |
68 | } |
69 | |
70 | /* |
71 | * Does mempolicy pol need explicit unref after use? |
72 | * Currently only needed for shared policies. |
73 | */ |
74 | static inline int mpol_needs_cond_ref(struct mempolicy *pol) |
75 | { |
76 | return (pol && (pol->flags & MPOL_F_SHARED)); |
77 | } |
78 | |
79 | static inline void mpol_cond_put(struct mempolicy *pol) |
80 | { |
81 | if (mpol_needs_cond_ref(pol)) |
82 | __mpol_put(pol); |
83 | } |
84 | |
85 | extern struct mempolicy *__mpol_dup(struct mempolicy *pol); |
86 | static inline struct mempolicy *mpol_dup(struct mempolicy *pol) |
87 | { |
88 | if (pol) |
89 | pol = __mpol_dup(pol); |
90 | return pol; |
91 | } |
92 | |
93 | #define vma_policy(vma) ((vma)->vm_policy) |
94 | |
95 | static inline void mpol_get(struct mempolicy *pol) |
96 | { |
97 | if (pol) |
98 | atomic_inc(&pol->refcnt); |
99 | } |
100 | |
101 | extern bool __mpol_equal(struct mempolicy *a, struct mempolicy *b); |
102 | static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b) |
103 | { |
104 | if (a == b) |
105 | return true; |
106 | return __mpol_equal(a, b); |
107 | } |
108 | |
109 | /* |
110 | * Tree of shared policies for a shared memory region. |
111 | * Maintain the policies in a pseudo mm that contains vmas. The vmas |
112 | * carry the policy. As a special twist the pseudo mm is indexed in pages, not |
113 | * bytes, so that we can work with shared memory segments bigger than |
114 | * unsigned long. |
115 | */ |
116 | |
117 | struct sp_node { |
118 | struct rb_node nd; |
119 | unsigned long start, end; |
120 | struct mempolicy *policy; |
121 | }; |
122 | |
123 | struct shared_policy { |
124 | struct rb_root root; |
125 | spinlock_t lock; |
126 | }; |
127 | |
128 | int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst); |
129 | void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol); |
130 | int mpol_set_shared_policy(struct shared_policy *info, |
131 | struct vm_area_struct *vma, |
132 | struct mempolicy *new); |
133 | void mpol_free_shared_policy(struct shared_policy *p); |
134 | struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp, |
135 | unsigned long idx); |
136 | |
137 | struct mempolicy *get_vma_policy(struct task_struct *tsk, |
138 | struct vm_area_struct *vma, unsigned long addr); |
139 | bool vma_policy_mof(struct task_struct *task, struct vm_area_struct *vma); |
140 | |
141 | extern void numa_default_policy(void); |
142 | extern void numa_policy_init(void); |
143 | extern void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new, |
144 | enum mpol_rebind_step step); |
145 | extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new); |
146 | extern void mpol_fix_fork_child_flag(struct task_struct *p); |
147 | |
148 | extern struct zonelist *huge_zonelist(struct vm_area_struct *vma, |
149 | unsigned long addr, gfp_t gfp_flags, |
150 | struct mempolicy **mpol, nodemask_t **nodemask); |
151 | extern bool init_nodemask_of_mempolicy(nodemask_t *mask); |
152 | extern bool mempolicy_nodemask_intersects(struct task_struct *tsk, |
153 | const nodemask_t *mask); |
154 | extern unsigned slab_node(void); |
155 | |
156 | extern enum zone_type policy_zone; |
157 | |
158 | static inline void check_highest_zone(enum zone_type k) |
159 | { |
160 | if (k > policy_zone && k != ZONE_MOVABLE) |
161 | policy_zone = k; |
162 | } |
163 | |
164 | int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, |
165 | const nodemask_t *to, int flags); |
166 | |
167 | |
168 | #ifdef CONFIG_TMPFS |
169 | extern int mpol_parse_str(char *str, struct mempolicy **mpol); |
170 | #endif |
171 | |
172 | extern void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol); |
173 | |
174 | /* Check if a vma is migratable */ |
175 | static inline int vma_migratable(struct vm_area_struct *vma) |
176 | { |
177 | if (vma->vm_flags & (VM_IO | VM_PFNMAP)) |
178 | return 0; |
179 | /* |
180 | * Migration allocates pages in the highest zone. If we cannot |
181 | * do so then migration (at least from node to node) is not |
182 | * possible. |
183 | */ |
184 | if (vma->vm_file && |
185 | gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping)) |
186 | < policy_zone) |
187 | return 0; |
188 | return 1; |
189 | } |
190 | |
191 | extern int mpol_misplaced(struct page *, struct vm_area_struct *, unsigned long); |
192 | |
193 | #else |
194 | |
195 | struct mempolicy {}; |
196 | |
197 | static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b) |
198 | { |
199 | return true; |
200 | } |
201 | |
202 | static inline void mpol_put(struct mempolicy *p) |
203 | { |
204 | } |
205 | |
206 | static inline void mpol_cond_put(struct mempolicy *pol) |
207 | { |
208 | } |
209 | |
210 | static inline void mpol_get(struct mempolicy *pol) |
211 | { |
212 | } |
213 | |
214 | static inline struct mempolicy *mpol_dup(struct mempolicy *old) |
215 | { |
216 | return NULL; |
217 | } |
218 | |
219 | struct shared_policy {}; |
220 | |
221 | static inline int mpol_set_shared_policy(struct shared_policy *info, |
222 | struct vm_area_struct *vma, |
223 | struct mempolicy *new) |
224 | { |
225 | return -EINVAL; |
226 | } |
227 | |
228 | static inline void mpol_shared_policy_init(struct shared_policy *sp, |
229 | struct mempolicy *mpol) |
230 | { |
231 | } |
232 | |
233 | static inline void mpol_free_shared_policy(struct shared_policy *p) |
234 | { |
235 | } |
236 | |
237 | static inline struct mempolicy * |
238 | mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx) |
239 | { |
240 | return NULL; |
241 | } |
242 | |
243 | #define vma_policy(vma) NULL |
244 | |
245 | static inline int |
246 | vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst) |
247 | { |
248 | return 0; |
249 | } |
250 | |
251 | static inline void numa_policy_init(void) |
252 | { |
253 | } |
254 | |
255 | static inline void numa_default_policy(void) |
256 | { |
257 | } |
258 | |
259 | static inline void mpol_rebind_task(struct task_struct *tsk, |
260 | const nodemask_t *new, |
261 | enum mpol_rebind_step step) |
262 | { |
263 | } |
264 | |
265 | static inline void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new) |
266 | { |
267 | } |
268 | |
269 | static inline void mpol_fix_fork_child_flag(struct task_struct *p) |
270 | { |
271 | } |
272 | |
273 | static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma, |
274 | unsigned long addr, gfp_t gfp_flags, |
275 | struct mempolicy **mpol, nodemask_t **nodemask) |
276 | { |
277 | *mpol = NULL; |
278 | *nodemask = NULL; |
279 | return node_zonelist(0, gfp_flags); |
280 | } |
281 | |
282 | static inline bool init_nodemask_of_mempolicy(nodemask_t *m) |
283 | { |
284 | return false; |
285 | } |
286 | |
287 | static inline bool mempolicy_nodemask_intersects(struct task_struct *tsk, |
288 | const nodemask_t *mask) |
289 | { |
290 | return false; |
291 | } |
292 | |
293 | static inline int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, |
294 | const nodemask_t *to, int flags) |
295 | { |
296 | return 0; |
297 | } |
298 | |
299 | static inline void check_highest_zone(int k) |
300 | { |
301 | } |
302 | |
303 | #ifdef CONFIG_TMPFS |
304 | static inline int mpol_parse_str(char *str, struct mempolicy **mpol) |
305 | { |
306 | return 1; /* error */ |
307 | } |
308 | #endif |
309 | |
310 | static inline void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol) |
311 | { |
312 | } |
313 | |
314 | static inline int mpol_misplaced(struct page *page, struct vm_area_struct *vma, |
315 | unsigned long address) |
316 | { |
317 | return -1; /* no node preference */ |
318 | } |
319 | |
320 | #endif /* CONFIG_NUMA */ |
321 | #endif |
322 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9