Root/
1 | /* |
2 | * linux/mm/oom_kill.c |
3 | * |
4 | * Copyright (C) 1998,2000 Rik van Riel |
5 | * Thanks go out to Claus Fischer for some serious inspiration and |
6 | * for goading me into coding this file... |
7 | * |
8 | * The routines in this file are used to kill a process when |
9 | * we're seriously out of memory. This gets called from __alloc_pages() |
10 | * in mm/page_alloc.c when we really run out of memory. |
11 | * |
12 | * Since we won't call these routines often (on a well-configured |
13 | * machine) this file will double as a 'coding guide' and a signpost |
14 | * for newbie kernel hackers. It features several pointers to major |
15 | * kernel subsystems and hints as to where to find out what things do. |
16 | */ |
17 | |
18 | #include <linux/oom.h> |
19 | #include <linux/mm.h> |
20 | #include <linux/err.h> |
21 | #include <linux/gfp.h> |
22 | #include <linux/sched.h> |
23 | #include <linux/swap.h> |
24 | #include <linux/timex.h> |
25 | #include <linux/jiffies.h> |
26 | #include <linux/cpuset.h> |
27 | #include <linux/module.h> |
28 | #include <linux/notifier.h> |
29 | #include <linux/memcontrol.h> |
30 | #include <linux/security.h> |
31 | |
32 | int sysctl_panic_on_oom; |
33 | int sysctl_oom_kill_allocating_task; |
34 | int sysctl_oom_dump_tasks; |
35 | static DEFINE_SPINLOCK(zone_scan_lock); |
36 | /* #define DEBUG */ |
37 | |
38 | /* |
39 | * Is all threads of the target process nodes overlap ours? |
40 | */ |
41 | static int has_intersects_mems_allowed(struct task_struct *tsk) |
42 | { |
43 | struct task_struct *t; |
44 | |
45 | t = tsk; |
46 | do { |
47 | if (cpuset_mems_allowed_intersects(current, t)) |
48 | return 1; |
49 | t = next_thread(t); |
50 | } while (t != tsk); |
51 | |
52 | return 0; |
53 | } |
54 | |
55 | /** |
56 | * badness - calculate a numeric value for how bad this task has been |
57 | * @p: task struct of which task we should calculate |
58 | * @uptime: current uptime in seconds |
59 | * |
60 | * The formula used is relatively simple and documented inline in the |
61 | * function. The main rationale is that we want to select a good task |
62 | * to kill when we run out of memory. |
63 | * |
64 | * Good in this context means that: |
65 | * 1) we lose the minimum amount of work done |
66 | * 2) we recover a large amount of memory |
67 | * 3) we don't kill anything innocent of eating tons of memory |
68 | * 4) we want to kill the minimum amount of processes (one) |
69 | * 5) we try to kill the process the user expects us to kill, this |
70 | * algorithm has been meticulously tuned to meet the principle |
71 | * of least surprise ... (be careful when you change it) |
72 | */ |
73 | |
74 | unsigned long badness(struct task_struct *p, unsigned long uptime) |
75 | { |
76 | unsigned long points, cpu_time, run_time; |
77 | struct mm_struct *mm; |
78 | struct task_struct *child; |
79 | int oom_adj = p->signal->oom_adj; |
80 | struct task_cputime task_time; |
81 | unsigned long utime; |
82 | unsigned long stime; |
83 | |
84 | if (oom_adj == OOM_DISABLE) |
85 | return 0; |
86 | |
87 | task_lock(p); |
88 | mm = p->mm; |
89 | if (!mm) { |
90 | task_unlock(p); |
91 | return 0; |
92 | } |
93 | |
94 | /* |
95 | * The memory size of the process is the basis for the badness. |
96 | */ |
97 | points = mm->total_vm; |
98 | |
99 | /* |
100 | * After this unlock we can no longer dereference local variable `mm' |
101 | */ |
102 | task_unlock(p); |
103 | |
104 | /* |
105 | * swapoff can easily use up all memory, so kill those first. |
106 | */ |
107 | if (p->flags & PF_OOM_ORIGIN) |
108 | return ULONG_MAX; |
109 | |
110 | /* |
111 | * Processes which fork a lot of child processes are likely |
112 | * a good choice. We add half the vmsize of the children if they |
113 | * have an own mm. This prevents forking servers to flood the |
114 | * machine with an endless amount of children. In case a single |
115 | * child is eating the vast majority of memory, adding only half |
116 | * to the parents will make the child our kill candidate of choice. |
117 | */ |
118 | list_for_each_entry(child, &p->children, sibling) { |
119 | task_lock(child); |
120 | if (child->mm != mm && child->mm) |
121 | points += child->mm->total_vm/2 + 1; |
122 | task_unlock(child); |
123 | } |
124 | |
125 | /* |
126 | * CPU time is in tens of seconds and run time is in thousands |
127 | * of seconds. There is no particular reason for this other than |
128 | * that it turned out to work very well in practice. |
129 | */ |
130 | thread_group_cputime(p, &task_time); |
131 | utime = cputime_to_jiffies(task_time.utime); |
132 | stime = cputime_to_jiffies(task_time.stime); |
133 | cpu_time = (utime + stime) >> (SHIFT_HZ + 3); |
134 | |
135 | |
136 | if (uptime >= p->start_time.tv_sec) |
137 | run_time = (uptime - p->start_time.tv_sec) >> 10; |
138 | else |
139 | run_time = 0; |
140 | |
141 | if (cpu_time) |
142 | points /= int_sqrt(cpu_time); |
143 | if (run_time) |
144 | points /= int_sqrt(int_sqrt(run_time)); |
145 | |
146 | /* |
147 | * Niced processes are most likely less important, so double |
148 | * their badness points. |
149 | */ |
150 | if (task_nice(p) > 0) |
151 | points *= 2; |
152 | |
153 | /* |
154 | * Superuser processes are usually more important, so we make it |
155 | * less likely that we kill those. |
156 | */ |
157 | if (has_capability_noaudit(p, CAP_SYS_ADMIN) || |
158 | has_capability_noaudit(p, CAP_SYS_RESOURCE)) |
159 | points /= 4; |
160 | |
161 | /* |
162 | * We don't want to kill a process with direct hardware access. |
163 | * Not only could that mess up the hardware, but usually users |
164 | * tend to only have this flag set on applications they think |
165 | * of as important. |
166 | */ |
167 | if (has_capability_noaudit(p, CAP_SYS_RAWIO)) |
168 | points /= 4; |
169 | |
170 | /* |
171 | * If p's nodes don't overlap ours, it may still help to kill p |
172 | * because p may have allocated or otherwise mapped memory on |
173 | * this node before. However it will be less likely. |
174 | */ |
175 | if (!has_intersects_mems_allowed(p)) |
176 | points /= 8; |
177 | |
178 | /* |
179 | * Adjust the score by oom_adj. |
180 | */ |
181 | if (oom_adj) { |
182 | if (oom_adj > 0) { |
183 | if (!points) |
184 | points = 1; |
185 | points <<= oom_adj; |
186 | } else |
187 | points >>= -(oom_adj); |
188 | } |
189 | |
190 | #ifdef DEBUG |
191 | printk(KERN_DEBUG "OOMkill: task %d (%s) got %lu points\n", |
192 | p->pid, p->comm, points); |
193 | #endif |
194 | return points; |
195 | } |
196 | |
197 | /* |
198 | * Determine the type of allocation constraint. |
199 | */ |
200 | #ifdef CONFIG_NUMA |
201 | static enum oom_constraint constrained_alloc(struct zonelist *zonelist, |
202 | gfp_t gfp_mask, nodemask_t *nodemask) |
203 | { |
204 | struct zone *zone; |
205 | struct zoneref *z; |
206 | enum zone_type high_zoneidx = gfp_zone(gfp_mask); |
207 | |
208 | /* |
209 | * Reach here only when __GFP_NOFAIL is used. So, we should avoid |
210 | * to kill current.We have to random task kill in this case. |
211 | * Hopefully, CONSTRAINT_THISNODE...but no way to handle it, now. |
212 | */ |
213 | if (gfp_mask & __GFP_THISNODE) |
214 | return CONSTRAINT_NONE; |
215 | |
216 | /* |
217 | * The nodemask here is a nodemask passed to alloc_pages(). Now, |
218 | * cpuset doesn't use this nodemask for its hardwall/softwall/hierarchy |
219 | * feature. mempolicy is an only user of nodemask here. |
220 | * check mempolicy's nodemask contains all N_HIGH_MEMORY |
221 | */ |
222 | if (nodemask && !nodes_subset(node_states[N_HIGH_MEMORY], *nodemask)) |
223 | return CONSTRAINT_MEMORY_POLICY; |
224 | |
225 | /* Check this allocation failure is caused by cpuset's wall function */ |
226 | for_each_zone_zonelist_nodemask(zone, z, zonelist, |
227 | high_zoneidx, nodemask) |
228 | if (!cpuset_zone_allowed_softwall(zone, gfp_mask)) |
229 | return CONSTRAINT_CPUSET; |
230 | |
231 | return CONSTRAINT_NONE; |
232 | } |
233 | #else |
234 | static enum oom_constraint constrained_alloc(struct zonelist *zonelist, |
235 | gfp_t gfp_mask, nodemask_t *nodemask) |
236 | { |
237 | return CONSTRAINT_NONE; |
238 | } |
239 | #endif |
240 | |
241 | /* |
242 | * Simple selection loop. We chose the process with the highest |
243 | * number of 'points'. We expect the caller will lock the tasklist. |
244 | * |
245 | * (not docbooked, we don't want this one cluttering up the manual) |
246 | */ |
247 | static struct task_struct *select_bad_process(unsigned long *ppoints, |
248 | struct mem_cgroup *mem) |
249 | { |
250 | struct task_struct *p; |
251 | struct task_struct *chosen = NULL; |
252 | struct timespec uptime; |
253 | *ppoints = 0; |
254 | |
255 | do_posix_clock_monotonic_gettime(&uptime); |
256 | for_each_process(p) { |
257 | unsigned long points; |
258 | |
259 | /* |
260 | * skip kernel threads and tasks which have already released |
261 | * their mm. |
262 | */ |
263 | if (!p->mm) |
264 | continue; |
265 | /* skip the init task */ |
266 | if (is_global_init(p)) |
267 | continue; |
268 | if (mem && !task_in_mem_cgroup(p, mem)) |
269 | continue; |
270 | |
271 | /* |
272 | * This task already has access to memory reserves and is |
273 | * being killed. Don't allow any other task access to the |
274 | * memory reserve. |
275 | * |
276 | * Note: this may have a chance of deadlock if it gets |
277 | * blocked waiting for another task which itself is waiting |
278 | * for memory. Is there a better alternative? |
279 | */ |
280 | if (test_tsk_thread_flag(p, TIF_MEMDIE)) |
281 | return ERR_PTR(-1UL); |
282 | |
283 | /* |
284 | * This is in the process of releasing memory so wait for it |
285 | * to finish before killing some other task by mistake. |
286 | * |
287 | * However, if p is the current task, we allow the 'kill' to |
288 | * go ahead if it is exiting: this will simply set TIF_MEMDIE, |
289 | * which will allow it to gain access to memory reserves in |
290 | * the process of exiting and releasing its resources. |
291 | * Otherwise we could get an easy OOM deadlock. |
292 | */ |
293 | if (p->flags & PF_EXITING) { |
294 | if (p != current) |
295 | return ERR_PTR(-1UL); |
296 | |
297 | chosen = p; |
298 | *ppoints = ULONG_MAX; |
299 | } |
300 | |
301 | if (p->signal->oom_adj == OOM_DISABLE) |
302 | continue; |
303 | |
304 | points = badness(p, uptime.tv_sec); |
305 | if (points > *ppoints || !chosen) { |
306 | chosen = p; |
307 | *ppoints = points; |
308 | } |
309 | } |
310 | |
311 | return chosen; |
312 | } |
313 | |
314 | /** |
315 | * dump_tasks - dump current memory state of all system tasks |
316 | * @mem: target memory controller |
317 | * |
318 | * Dumps the current memory state of all system tasks, excluding kernel threads. |
319 | * State information includes task's pid, uid, tgid, vm size, rss, cpu, oom_adj |
320 | * score, and name. |
321 | * |
322 | * If the actual is non-NULL, only tasks that are a member of the mem_cgroup are |
323 | * shown. |
324 | * |
325 | * Call with tasklist_lock read-locked. |
326 | */ |
327 | static void dump_tasks(const struct mem_cgroup *mem) |
328 | { |
329 | struct task_struct *g, *p; |
330 | |
331 | printk(KERN_INFO "[ pid ] uid tgid total_vm rss cpu oom_adj " |
332 | "name\n"); |
333 | do_each_thread(g, p) { |
334 | struct mm_struct *mm; |
335 | |
336 | if (mem && !task_in_mem_cgroup(p, mem)) |
337 | continue; |
338 | if (!thread_group_leader(p)) |
339 | continue; |
340 | |
341 | task_lock(p); |
342 | mm = p->mm; |
343 | if (!mm) { |
344 | /* |
345 | * total_vm and rss sizes do not exist for tasks with no |
346 | * mm so there's no need to report them; they can't be |
347 | * oom killed anyway. |
348 | */ |
349 | task_unlock(p); |
350 | continue; |
351 | } |
352 | printk(KERN_INFO "[%5d] %5d %5d %8lu %8lu %3d %3d %s\n", |
353 | p->pid, __task_cred(p)->uid, p->tgid, mm->total_vm, |
354 | get_mm_rss(mm), (int)task_cpu(p), p->signal->oom_adj, |
355 | p->comm); |
356 | task_unlock(p); |
357 | } while_each_thread(g, p); |
358 | } |
359 | |
360 | static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order, |
361 | struct mem_cgroup *mem) |
362 | { |
363 | pr_warning("%s invoked oom-killer: gfp_mask=0x%x, order=%d, " |
364 | "oom_adj=%d\n", |
365 | current->comm, gfp_mask, order, current->signal->oom_adj); |
366 | task_lock(current); |
367 | cpuset_print_task_mems_allowed(current); |
368 | task_unlock(current); |
369 | dump_stack(); |
370 | mem_cgroup_print_oom_info(mem, p); |
371 | show_mem(); |
372 | if (sysctl_oom_dump_tasks) |
373 | dump_tasks(mem); |
374 | } |
375 | |
376 | #define K(x) ((x) << (PAGE_SHIFT-10)) |
377 | |
378 | /* |
379 | * Send SIGKILL to the selected process irrespective of CAP_SYS_RAW_IO |
380 | * flag though it's unlikely that we select a process with CAP_SYS_RAW_IO |
381 | * set. |
382 | */ |
383 | static void __oom_kill_task(struct task_struct *p, int verbose) |
384 | { |
385 | if (is_global_init(p)) { |
386 | WARN_ON(1); |
387 | printk(KERN_WARNING "tried to kill init!\n"); |
388 | return; |
389 | } |
390 | |
391 | task_lock(p); |
392 | if (!p->mm) { |
393 | WARN_ON(1); |
394 | printk(KERN_WARNING "tried to kill an mm-less task %d (%s)!\n", |
395 | task_pid_nr(p), p->comm); |
396 | task_unlock(p); |
397 | return; |
398 | } |
399 | |
400 | if (verbose) |
401 | printk(KERN_ERR "Killed process %d (%s) " |
402 | "vsz:%lukB, anon-rss:%lukB, file-rss:%lukB\n", |
403 | task_pid_nr(p), p->comm, |
404 | K(p->mm->total_vm), |
405 | K(get_mm_counter(p->mm, MM_ANONPAGES)), |
406 | K(get_mm_counter(p->mm, MM_FILEPAGES))); |
407 | task_unlock(p); |
408 | |
409 | /* |
410 | * We give our sacrificial lamb high priority and access to |
411 | * all the memory it needs. That way it should be able to |
412 | * exit() and clear out its resources quickly... |
413 | */ |
414 | p->rt.time_slice = HZ; |
415 | set_tsk_thread_flag(p, TIF_MEMDIE); |
416 | |
417 | force_sig(SIGKILL, p); |
418 | } |
419 | |
420 | static int oom_kill_task(struct task_struct *p) |
421 | { |
422 | /* WARNING: mm may not be dereferenced since we did not obtain its |
423 | * value from get_task_mm(p). This is OK since all we need to do is |
424 | * compare mm to q->mm below. |
425 | * |
426 | * Furthermore, even if mm contains a non-NULL value, p->mm may |
427 | * change to NULL at any time since we do not hold task_lock(p). |
428 | * However, this is of no concern to us. |
429 | */ |
430 | if (!p->mm || p->signal->oom_adj == OOM_DISABLE) |
431 | return 1; |
432 | |
433 | __oom_kill_task(p, 1); |
434 | |
435 | return 0; |
436 | } |
437 | |
438 | static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order, |
439 | unsigned long points, struct mem_cgroup *mem, |
440 | const char *message) |
441 | { |
442 | struct task_struct *c; |
443 | |
444 | if (printk_ratelimit()) |
445 | dump_header(p, gfp_mask, order, mem); |
446 | |
447 | /* |
448 | * If the task is already exiting, don't alarm the sysadmin or kill |
449 | * its children or threads, just set TIF_MEMDIE so it can die quickly |
450 | */ |
451 | if (p->flags & PF_EXITING) { |
452 | __oom_kill_task(p, 0); |
453 | return 0; |
454 | } |
455 | |
456 | printk(KERN_ERR "%s: kill process %d (%s) score %li or a child\n", |
457 | message, task_pid_nr(p), p->comm, points); |
458 | |
459 | /* Try to kill a child first */ |
460 | list_for_each_entry(c, &p->children, sibling) { |
461 | if (c->mm == p->mm) |
462 | continue; |
463 | if (mem && !task_in_mem_cgroup(c, mem)) |
464 | continue; |
465 | if (!oom_kill_task(c)) |
466 | return 0; |
467 | } |
468 | return oom_kill_task(p); |
469 | } |
470 | |
471 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR |
472 | void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask) |
473 | { |
474 | unsigned long points = 0; |
475 | struct task_struct *p; |
476 | |
477 | if (sysctl_panic_on_oom == 2) |
478 | panic("out of memory(memcg). panic_on_oom is selected.\n"); |
479 | read_lock(&tasklist_lock); |
480 | retry: |
481 | p = select_bad_process(&points, mem); |
482 | if (!p || PTR_ERR(p) == -1UL) |
483 | goto out; |
484 | |
485 | if (oom_kill_process(p, gfp_mask, 0, points, mem, |
486 | "Memory cgroup out of memory")) |
487 | goto retry; |
488 | out: |
489 | read_unlock(&tasklist_lock); |
490 | } |
491 | #endif |
492 | |
493 | static BLOCKING_NOTIFIER_HEAD(oom_notify_list); |
494 | |
495 | int register_oom_notifier(struct notifier_block *nb) |
496 | { |
497 | return blocking_notifier_chain_register(&oom_notify_list, nb); |
498 | } |
499 | EXPORT_SYMBOL_GPL(register_oom_notifier); |
500 | |
501 | int unregister_oom_notifier(struct notifier_block *nb) |
502 | { |
503 | return blocking_notifier_chain_unregister(&oom_notify_list, nb); |
504 | } |
505 | EXPORT_SYMBOL_GPL(unregister_oom_notifier); |
506 | |
507 | /* |
508 | * Try to acquire the OOM killer lock for the zones in zonelist. Returns zero |
509 | * if a parallel OOM killing is already taking place that includes a zone in |
510 | * the zonelist. Otherwise, locks all zones in the zonelist and returns 1. |
511 | */ |
512 | int try_set_zone_oom(struct zonelist *zonelist, gfp_t gfp_mask) |
513 | { |
514 | struct zoneref *z; |
515 | struct zone *zone; |
516 | int ret = 1; |
517 | |
518 | spin_lock(&zone_scan_lock); |
519 | for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) { |
520 | if (zone_is_oom_locked(zone)) { |
521 | ret = 0; |
522 | goto out; |
523 | } |
524 | } |
525 | |
526 | for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) { |
527 | /* |
528 | * Lock each zone in the zonelist under zone_scan_lock so a |
529 | * parallel invocation of try_set_zone_oom() doesn't succeed |
530 | * when it shouldn't. |
531 | */ |
532 | zone_set_flag(zone, ZONE_OOM_LOCKED); |
533 | } |
534 | |
535 | out: |
536 | spin_unlock(&zone_scan_lock); |
537 | return ret; |
538 | } |
539 | |
540 | /* |
541 | * Clears the ZONE_OOM_LOCKED flag for all zones in the zonelist so that failed |
542 | * allocation attempts with zonelists containing them may now recall the OOM |
543 | * killer, if necessary. |
544 | */ |
545 | void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_mask) |
546 | { |
547 | struct zoneref *z; |
548 | struct zone *zone; |
549 | |
550 | spin_lock(&zone_scan_lock); |
551 | for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) { |
552 | zone_clear_flag(zone, ZONE_OOM_LOCKED); |
553 | } |
554 | spin_unlock(&zone_scan_lock); |
555 | } |
556 | |
557 | /* |
558 | * Must be called with tasklist_lock held for read. |
559 | */ |
560 | static void __out_of_memory(gfp_t gfp_mask, int order) |
561 | { |
562 | struct task_struct *p; |
563 | unsigned long points; |
564 | |
565 | if (sysctl_oom_kill_allocating_task) |
566 | if (!oom_kill_process(current, gfp_mask, order, 0, NULL, |
567 | "Out of memory (oom_kill_allocating_task)")) |
568 | return; |
569 | retry: |
570 | /* |
571 | * Rambo mode: Shoot down a process and hope it solves whatever |
572 | * issues we may have. |
573 | */ |
574 | p = select_bad_process(&points, NULL); |
575 | |
576 | if (PTR_ERR(p) == -1UL) |
577 | return; |
578 | |
579 | /* Found nothing?!?! Either we hang forever, or we panic. */ |
580 | if (!p) { |
581 | read_unlock(&tasklist_lock); |
582 | dump_header(NULL, gfp_mask, order, NULL); |
583 | panic("Out of memory and no killable processes...\n"); |
584 | } |
585 | |
586 | if (oom_kill_process(p, gfp_mask, order, points, NULL, |
587 | "Out of memory")) |
588 | goto retry; |
589 | } |
590 | |
591 | /* |
592 | * pagefault handler calls into here because it is out of memory but |
593 | * doesn't know exactly how or why. |
594 | */ |
595 | void pagefault_out_of_memory(void) |
596 | { |
597 | unsigned long freed = 0; |
598 | |
599 | blocking_notifier_call_chain(&oom_notify_list, 0, &freed); |
600 | if (freed > 0) |
601 | /* Got some memory back in the last second. */ |
602 | return; |
603 | |
604 | if (sysctl_panic_on_oom) |
605 | panic("out of memory from page fault. panic_on_oom is selected.\n"); |
606 | |
607 | read_lock(&tasklist_lock); |
608 | __out_of_memory(0, 0); /* unknown gfp_mask and order */ |
609 | read_unlock(&tasklist_lock); |
610 | |
611 | /* |
612 | * Give "p" a good chance of killing itself before we |
613 | * retry to allocate memory. |
614 | */ |
615 | if (!test_thread_flag(TIF_MEMDIE)) |
616 | schedule_timeout_uninterruptible(1); |
617 | } |
618 | |
619 | /** |
620 | * out_of_memory - kill the "best" process when we run out of memory |
621 | * @zonelist: zonelist pointer |
622 | * @gfp_mask: memory allocation flags |
623 | * @order: amount of memory being requested as a power of 2 |
624 | * |
625 | * If we run out of memory, we have the choice between either |
626 | * killing a random task (bad), letting the system crash (worse) |
627 | * OR try to be smart about which process to kill. Note that we |
628 | * don't have to be perfect here, we just have to be good. |
629 | */ |
630 | void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, |
631 | int order, nodemask_t *nodemask) |
632 | { |
633 | unsigned long freed = 0; |
634 | enum oom_constraint constraint; |
635 | |
636 | blocking_notifier_call_chain(&oom_notify_list, 0, &freed); |
637 | if (freed > 0) |
638 | /* Got some memory back in the last second. */ |
639 | return; |
640 | |
641 | if (sysctl_panic_on_oom == 2) { |
642 | dump_header(NULL, gfp_mask, order, NULL); |
643 | panic("out of memory. Compulsory panic_on_oom is selected.\n"); |
644 | } |
645 | |
646 | /* |
647 | * Check if there were limitations on the allocation (only relevant for |
648 | * NUMA) that may require different handling. |
649 | */ |
650 | constraint = constrained_alloc(zonelist, gfp_mask, nodemask); |
651 | read_lock(&tasklist_lock); |
652 | |
653 | switch (constraint) { |
654 | case CONSTRAINT_MEMORY_POLICY: |
655 | oom_kill_process(current, gfp_mask, order, 0, NULL, |
656 | "No available memory (MPOL_BIND)"); |
657 | break; |
658 | |
659 | case CONSTRAINT_NONE: |
660 | if (sysctl_panic_on_oom) { |
661 | dump_header(NULL, gfp_mask, order, NULL); |
662 | panic("out of memory. panic_on_oom is selected\n"); |
663 | } |
664 | /* Fall-through */ |
665 | case CONSTRAINT_CPUSET: |
666 | __out_of_memory(gfp_mask, order); |
667 | break; |
668 | } |
669 | |
670 | read_unlock(&tasklist_lock); |
671 | |
672 | /* |
673 | * Give "p" a good chance of killing itself before we |
674 | * retry to allocate memory unless "p" is current |
675 | */ |
676 | if (!test_thread_flag(TIF_MEMDIE)) |
677 | schedule_timeout_uninterruptible(1); |
678 | } |
679 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9