Root/
1 | /* |
2 | * Linux VM pressure |
3 | * |
4 | * Copyright 2012 Linaro Ltd. |
5 | * Anton Vorontsov <anton.vorontsov@linaro.org> |
6 | * |
7 | * Based on ideas from Andrew Morton, David Rientjes, KOSAKI Motohiro, |
8 | * Leonid Moiseichuk, Mel Gorman, Minchan Kim and Pekka Enberg. |
9 | * |
10 | * This program is free software; you can redistribute it and/or modify it |
11 | * under the terms of the GNU General Public License version 2 as published |
12 | * by the Free Software Foundation. |
13 | */ |
14 | |
15 | #include <linux/cgroup.h> |
16 | #include <linux/fs.h> |
17 | #include <linux/log2.h> |
18 | #include <linux/sched.h> |
19 | #include <linux/mm.h> |
20 | #include <linux/vmstat.h> |
21 | #include <linux/eventfd.h> |
22 | #include <linux/swap.h> |
23 | #include <linux/printk.h> |
24 | #include <linux/vmpressure.h> |
25 | |
26 | /* |
27 | * The window size (vmpressure_win) is the number of scanned pages before |
28 | * we try to analyze scanned/reclaimed ratio. So the window is used as a |
29 | * rate-limit tunable for the "low" level notification, and also for |
30 | * averaging the ratio for medium/critical levels. Using small window |
31 | * sizes can cause lot of false positives, but too big window size will |
32 | * delay the notifications. |
33 | * |
34 | * As the vmscan reclaimer logic works with chunks which are multiple of |
35 | * SWAP_CLUSTER_MAX, it makes sense to use it for the window size as well. |
36 | * |
37 | * TODO: Make the window size depend on machine size, as we do for vmstat |
38 | * thresholds. Currently we set it to 512 pages (2MB for 4KB pages). |
39 | */ |
40 | static const unsigned long vmpressure_win = SWAP_CLUSTER_MAX * 16; |
41 | |
42 | /* |
43 | * These thresholds are used when we account memory pressure through |
44 | * scanned/reclaimed ratio. The current values were chosen empirically. In |
45 | * essence, they are percents: the higher the value, the more number |
46 | * unsuccessful reclaims there were. |
47 | */ |
48 | static const unsigned int vmpressure_level_med = 60; |
49 | static const unsigned int vmpressure_level_critical = 95; |
50 | |
51 | /* |
52 | * When there are too little pages left to scan, vmpressure() may miss the |
53 | * critical pressure as number of pages will be less than "window size". |
54 | * However, in that case the vmscan priority will raise fast as the |
55 | * reclaimer will try to scan LRUs more deeply. |
56 | * |
57 | * The vmscan logic considers these special priorities: |
58 | * |
59 | * prio == DEF_PRIORITY (12): reclaimer starts with that value |
60 | * prio <= DEF_PRIORITY - 2 : kswapd becomes somewhat overwhelmed |
61 | * prio == 0 : close to OOM, kernel scans every page in an lru |
62 | * |
63 | * Any value in this range is acceptable for this tunable (i.e. from 12 to |
64 | * 0). Current value for the vmpressure_level_critical_prio is chosen |
65 | * empirically, but the number, in essence, means that we consider |
66 | * critical level when scanning depth is ~10% of the lru size (vmscan |
67 | * scans 'lru_size >> prio' pages, so it is actually 12.5%, or one |
68 | * eights). |
69 | */ |
70 | static const unsigned int vmpressure_level_critical_prio = ilog2(100 / 10); |
71 | |
72 | static struct vmpressure *work_to_vmpressure(struct work_struct *work) |
73 | { |
74 | return container_of(work, struct vmpressure, work); |
75 | } |
76 | |
77 | static struct vmpressure *vmpressure_parent(struct vmpressure *vmpr) |
78 | { |
79 | struct cgroup_subsys_state *css = vmpressure_to_css(vmpr); |
80 | struct mem_cgroup *memcg = mem_cgroup_from_css(css); |
81 | |
82 | memcg = parent_mem_cgroup(memcg); |
83 | if (!memcg) |
84 | return NULL; |
85 | return memcg_to_vmpressure(memcg); |
86 | } |
87 | |
88 | enum vmpressure_levels { |
89 | VMPRESSURE_LOW = 0, |
90 | VMPRESSURE_MEDIUM, |
91 | VMPRESSURE_CRITICAL, |
92 | VMPRESSURE_NUM_LEVELS, |
93 | }; |
94 | |
95 | static const char * const vmpressure_str_levels[] = { |
96 | [VMPRESSURE_LOW] = "low", |
97 | [VMPRESSURE_MEDIUM] = "medium", |
98 | [VMPRESSURE_CRITICAL] = "critical", |
99 | }; |
100 | |
101 | static enum vmpressure_levels vmpressure_level(unsigned long pressure) |
102 | { |
103 | if (pressure >= vmpressure_level_critical) |
104 | return VMPRESSURE_CRITICAL; |
105 | else if (pressure >= vmpressure_level_med) |
106 | return VMPRESSURE_MEDIUM; |
107 | return VMPRESSURE_LOW; |
108 | } |
109 | |
110 | static enum vmpressure_levels vmpressure_calc_level(unsigned long scanned, |
111 | unsigned long reclaimed) |
112 | { |
113 | unsigned long scale = scanned + reclaimed; |
114 | unsigned long pressure; |
115 | |
116 | /* |
117 | * We calculate the ratio (in percents) of how many pages were |
118 | * scanned vs. reclaimed in a given time frame (window). Note that |
119 | * time is in VM reclaimer's "ticks", i.e. number of pages |
120 | * scanned. This makes it possible to set desired reaction time |
121 | * and serves as a ratelimit. |
122 | */ |
123 | pressure = scale - (reclaimed * scale / scanned); |
124 | pressure = pressure * 100 / scale; |
125 | |
126 | pr_debug("%s: %3lu (s: %lu r: %lu)\n", __func__, pressure, |
127 | scanned, reclaimed); |
128 | |
129 | return vmpressure_level(pressure); |
130 | } |
131 | |
132 | struct vmpressure_event { |
133 | struct eventfd_ctx *efd; |
134 | enum vmpressure_levels level; |
135 | struct list_head node; |
136 | }; |
137 | |
138 | static bool vmpressure_event(struct vmpressure *vmpr, |
139 | unsigned long scanned, unsigned long reclaimed) |
140 | { |
141 | struct vmpressure_event *ev; |
142 | enum vmpressure_levels level; |
143 | bool signalled = false; |
144 | |
145 | level = vmpressure_calc_level(scanned, reclaimed); |
146 | |
147 | mutex_lock(&vmpr->events_lock); |
148 | |
149 | list_for_each_entry(ev, &vmpr->events, node) { |
150 | if (level >= ev->level) { |
151 | eventfd_signal(ev->efd, 1); |
152 | signalled = true; |
153 | } |
154 | } |
155 | |
156 | mutex_unlock(&vmpr->events_lock); |
157 | |
158 | return signalled; |
159 | } |
160 | |
161 | static void vmpressure_work_fn(struct work_struct *work) |
162 | { |
163 | struct vmpressure *vmpr = work_to_vmpressure(work); |
164 | unsigned long scanned; |
165 | unsigned long reclaimed; |
166 | |
167 | /* |
168 | * Several contexts might be calling vmpressure(), so it is |
169 | * possible that the work was rescheduled again before the old |
170 | * work context cleared the counters. In that case we will run |
171 | * just after the old work returns, but then scanned might be zero |
172 | * here. No need for any locks here since we don't care if |
173 | * vmpr->reclaimed is in sync. |
174 | */ |
175 | if (!vmpr->scanned) |
176 | return; |
177 | |
178 | spin_lock(&vmpr->sr_lock); |
179 | scanned = vmpr->scanned; |
180 | reclaimed = vmpr->reclaimed; |
181 | vmpr->scanned = 0; |
182 | vmpr->reclaimed = 0; |
183 | spin_unlock(&vmpr->sr_lock); |
184 | |
185 | do { |
186 | if (vmpressure_event(vmpr, scanned, reclaimed)) |
187 | break; |
188 | /* |
189 | * If not handled, propagate the event upward into the |
190 | * hierarchy. |
191 | */ |
192 | } while ((vmpr = vmpressure_parent(vmpr))); |
193 | } |
194 | |
195 | /** |
196 | * vmpressure() - Account memory pressure through scanned/reclaimed ratio |
197 | * @gfp: reclaimer's gfp mask |
198 | * @memcg: cgroup memory controller handle |
199 | * @scanned: number of pages scanned |
200 | * @reclaimed: number of pages reclaimed |
201 | * |
202 | * This function should be called from the vmscan reclaim path to account |
203 | * "instantaneous" memory pressure (scanned/reclaimed ratio). The raw |
204 | * pressure index is then further refined and averaged over time. |
205 | * |
206 | * This function does not return any value. |
207 | */ |
208 | void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, |
209 | unsigned long scanned, unsigned long reclaimed) |
210 | { |
211 | struct vmpressure *vmpr = memcg_to_vmpressure(memcg); |
212 | |
213 | /* |
214 | * Here we only want to account pressure that userland is able to |
215 | * help us with. For example, suppose that DMA zone is under |
216 | * pressure; if we notify userland about that kind of pressure, |
217 | * then it will be mostly a waste as it will trigger unnecessary |
218 | * freeing of memory by userland (since userland is more likely to |
219 | * have HIGHMEM/MOVABLE pages instead of the DMA fallback). That |
220 | * is why we include only movable, highmem and FS/IO pages. |
221 | * Indirect reclaim (kswapd) sets sc->gfp_mask to GFP_KERNEL, so |
222 | * we account it too. |
223 | */ |
224 | if (!(gfp & (__GFP_HIGHMEM | __GFP_MOVABLE | __GFP_IO | __GFP_FS))) |
225 | return; |
226 | |
227 | /* |
228 | * If we got here with no pages scanned, then that is an indicator |
229 | * that reclaimer was unable to find any shrinkable LRUs at the |
230 | * current scanning depth. But it does not mean that we should |
231 | * report the critical pressure, yet. If the scanning priority |
232 | * (scanning depth) goes too high (deep), we will be notified |
233 | * through vmpressure_prio(). But so far, keep calm. |
234 | */ |
235 | if (!scanned) |
236 | return; |
237 | |
238 | spin_lock(&vmpr->sr_lock); |
239 | vmpr->scanned += scanned; |
240 | vmpr->reclaimed += reclaimed; |
241 | scanned = vmpr->scanned; |
242 | spin_unlock(&vmpr->sr_lock); |
243 | |
244 | if (scanned < vmpressure_win) |
245 | return; |
246 | schedule_work(&vmpr->work); |
247 | } |
248 | |
249 | /** |
250 | * vmpressure_prio() - Account memory pressure through reclaimer priority level |
251 | * @gfp: reclaimer's gfp mask |
252 | * @memcg: cgroup memory controller handle |
253 | * @prio: reclaimer's priority |
254 | * |
255 | * This function should be called from the reclaim path every time when |
256 | * the vmscan's reclaiming priority (scanning depth) changes. |
257 | * |
258 | * This function does not return any value. |
259 | */ |
260 | void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio) |
261 | { |
262 | /* |
263 | * We only use prio for accounting critical level. For more info |
264 | * see comment for vmpressure_level_critical_prio variable above. |
265 | */ |
266 | if (prio > vmpressure_level_critical_prio) |
267 | return; |
268 | |
269 | /* |
270 | * OK, the prio is below the threshold, updating vmpressure |
271 | * information before shrinker dives into long shrinking of long |
272 | * range vmscan. Passing scanned = vmpressure_win, reclaimed = 0 |
273 | * to the vmpressure() basically means that we signal 'critical' |
274 | * level. |
275 | */ |
276 | vmpressure(gfp, memcg, vmpressure_win, 0); |
277 | } |
278 | |
279 | /** |
280 | * vmpressure_register_event() - Bind vmpressure notifications to an eventfd |
281 | * @css: css that is interested in vmpressure notifications |
282 | * @cft: cgroup control files handle |
283 | * @eventfd: eventfd context to link notifications with |
284 | * @args: event arguments (used to set up a pressure level threshold) |
285 | * |
286 | * This function associates eventfd context with the vmpressure |
287 | * infrastructure, so that the notifications will be delivered to the |
288 | * @eventfd. The @args parameter is a string that denotes pressure level |
289 | * threshold (one of vmpressure_str_levels, i.e. "low", "medium", or |
290 | * "critical"). |
291 | * |
292 | * This function should not be used directly, just pass it to (struct |
293 | * cftype).register_event, and then cgroup core will handle everything by |
294 | * itself. |
295 | */ |
296 | int vmpressure_register_event(struct cgroup_subsys_state *css, |
297 | struct cftype *cft, struct eventfd_ctx *eventfd, |
298 | const char *args) |
299 | { |
300 | struct vmpressure *vmpr = css_to_vmpressure(css); |
301 | struct vmpressure_event *ev; |
302 | int level; |
303 | |
304 | for (level = 0; level < VMPRESSURE_NUM_LEVELS; level++) { |
305 | if (!strcmp(vmpressure_str_levels[level], args)) |
306 | break; |
307 | } |
308 | |
309 | if (level >= VMPRESSURE_NUM_LEVELS) |
310 | return -EINVAL; |
311 | |
312 | ev = kzalloc(sizeof(*ev), GFP_KERNEL); |
313 | if (!ev) |
314 | return -ENOMEM; |
315 | |
316 | ev->efd = eventfd; |
317 | ev->level = level; |
318 | |
319 | mutex_lock(&vmpr->events_lock); |
320 | list_add(&ev->node, &vmpr->events); |
321 | mutex_unlock(&vmpr->events_lock); |
322 | |
323 | return 0; |
324 | } |
325 | |
326 | /** |
327 | * vmpressure_unregister_event() - Unbind eventfd from vmpressure |
328 | * @css: css handle |
329 | * @cft: cgroup control files handle |
330 | * @eventfd: eventfd context that was used to link vmpressure with the @cg |
331 | * |
332 | * This function does internal manipulations to detach the @eventfd from |
333 | * the vmpressure notifications, and then frees internal resources |
334 | * associated with the @eventfd (but the @eventfd itself is not freed). |
335 | * |
336 | * This function should not be used directly, just pass it to (struct |
337 | * cftype).unregister_event, and then cgroup core will handle everything |
338 | * by itself. |
339 | */ |
340 | void vmpressure_unregister_event(struct cgroup_subsys_state *css, |
341 | struct cftype *cft, |
342 | struct eventfd_ctx *eventfd) |
343 | { |
344 | struct vmpressure *vmpr = css_to_vmpressure(css); |
345 | struct vmpressure_event *ev; |
346 | |
347 | mutex_lock(&vmpr->events_lock); |
348 | list_for_each_entry(ev, &vmpr->events, node) { |
349 | if (ev->efd != eventfd) |
350 | continue; |
351 | list_del(&ev->node); |
352 | kfree(ev); |
353 | break; |
354 | } |
355 | mutex_unlock(&vmpr->events_lock); |
356 | } |
357 | |
358 | /** |
359 | * vmpressure_init() - Initialize vmpressure control structure |
360 | * @vmpr: Structure to be initialized |
361 | * |
362 | * This function should be called on every allocated vmpressure structure |
363 | * before any usage. |
364 | */ |
365 | void vmpressure_init(struct vmpressure *vmpr) |
366 | { |
367 | spin_lock_init(&vmpr->sr_lock); |
368 | mutex_init(&vmpr->events_lock); |
369 | INIT_LIST_HEAD(&vmpr->events); |
370 | INIT_WORK(&vmpr->work, vmpressure_work_fn); |
371 | } |
372 | |
373 | /** |
374 | * vmpressure_cleanup() - shuts down vmpressure control structure |
375 | * @vmpr: Structure to be cleaned up |
376 | * |
377 | * This function should be called before the structure in which it is |
378 | * embedded is cleaned up. |
379 | */ |
380 | void vmpressure_cleanup(struct vmpressure *vmpr) |
381 | { |
382 | /* |
383 | * Make sure there is no pending work before eventfd infrastructure |
384 | * goes away. |
385 | */ |
386 | flush_work(&vmpr->work); |
387 | } |
388 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9