Root/
1 | #include <linux/mm.h> |
2 | #include <linux/mmzone.h> |
3 | #include <linux/bootmem.h> |
4 | #include <linux/bit_spinlock.h> |
5 | #include <linux/page_cgroup.h> |
6 | #include <linux/hash.h> |
7 | #include <linux/slab.h> |
8 | #include <linux/memory.h> |
9 | #include <linux/vmalloc.h> |
10 | #include <linux/cgroup.h> |
11 | #include <linux/swapops.h> |
12 | #include <linux/kmemleak.h> |
13 | |
14 | static unsigned long total_usage; |
15 | |
16 | #if !defined(CONFIG_SPARSEMEM) |
17 | |
18 | |
19 | void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat) |
20 | { |
21 | pgdat->node_page_cgroup = NULL; |
22 | } |
23 | |
24 | struct page_cgroup *lookup_page_cgroup(struct page *page) |
25 | { |
26 | unsigned long pfn = page_to_pfn(page); |
27 | unsigned long offset; |
28 | struct page_cgroup *base; |
29 | |
30 | base = NODE_DATA(page_to_nid(page))->node_page_cgroup; |
31 | #ifdef CONFIG_DEBUG_VM |
32 | /* |
33 | * The sanity checks the page allocator does upon freeing a |
34 | * page can reach here before the page_cgroup arrays are |
35 | * allocated when feeding a range of pages to the allocator |
36 | * for the first time during bootup or memory hotplug. |
37 | */ |
38 | if (unlikely(!base)) |
39 | return NULL; |
40 | #endif |
41 | offset = pfn - NODE_DATA(page_to_nid(page))->node_start_pfn; |
42 | return base + offset; |
43 | } |
44 | |
45 | static int __init alloc_node_page_cgroup(int nid) |
46 | { |
47 | struct page_cgroup *base; |
48 | unsigned long table_size; |
49 | unsigned long nr_pages; |
50 | |
51 | nr_pages = NODE_DATA(nid)->node_spanned_pages; |
52 | if (!nr_pages) |
53 | return 0; |
54 | |
55 | table_size = sizeof(struct page_cgroup) * nr_pages; |
56 | |
57 | base = memblock_virt_alloc_try_nid_nopanic( |
58 | table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS), |
59 | BOOTMEM_ALLOC_ACCESSIBLE, nid); |
60 | if (!base) |
61 | return -ENOMEM; |
62 | NODE_DATA(nid)->node_page_cgroup = base; |
63 | total_usage += table_size; |
64 | return 0; |
65 | } |
66 | |
67 | void __init page_cgroup_init_flatmem(void) |
68 | { |
69 | |
70 | int nid, fail; |
71 | |
72 | if (mem_cgroup_disabled()) |
73 | return; |
74 | |
75 | for_each_online_node(nid) { |
76 | fail = alloc_node_page_cgroup(nid); |
77 | if (fail) |
78 | goto fail; |
79 | } |
80 | printk(KERN_INFO "allocated %ld bytes of page_cgroup\n", total_usage); |
81 | printk(KERN_INFO "please try 'cgroup_disable=memory' option if you" |
82 | " don't want memory cgroups\n"); |
83 | return; |
84 | fail: |
85 | printk(KERN_CRIT "allocation of page_cgroup failed.\n"); |
86 | printk(KERN_CRIT "please try 'cgroup_disable=memory' boot option\n"); |
87 | panic("Out of memory"); |
88 | } |
89 | |
90 | #else /* CONFIG_FLAT_NODE_MEM_MAP */ |
91 | |
92 | struct page_cgroup *lookup_page_cgroup(struct page *page) |
93 | { |
94 | unsigned long pfn = page_to_pfn(page); |
95 | struct mem_section *section = __pfn_to_section(pfn); |
96 | #ifdef CONFIG_DEBUG_VM |
97 | /* |
98 | * The sanity checks the page allocator does upon freeing a |
99 | * page can reach here before the page_cgroup arrays are |
100 | * allocated when feeding a range of pages to the allocator |
101 | * for the first time during bootup or memory hotplug. |
102 | */ |
103 | if (!section->page_cgroup) |
104 | return NULL; |
105 | #endif |
106 | return section->page_cgroup + pfn; |
107 | } |
108 | |
109 | static void *__meminit alloc_page_cgroup(size_t size, int nid) |
110 | { |
111 | gfp_t flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN; |
112 | void *addr = NULL; |
113 | |
114 | addr = alloc_pages_exact_nid(nid, size, flags); |
115 | if (addr) { |
116 | kmemleak_alloc(addr, size, 1, flags); |
117 | return addr; |
118 | } |
119 | |
120 | if (node_state(nid, N_HIGH_MEMORY)) |
121 | addr = vzalloc_node(size, nid); |
122 | else |
123 | addr = vzalloc(size); |
124 | |
125 | return addr; |
126 | } |
127 | |
128 | static int __meminit init_section_page_cgroup(unsigned long pfn, int nid) |
129 | { |
130 | struct mem_section *section; |
131 | struct page_cgroup *base; |
132 | unsigned long table_size; |
133 | |
134 | section = __pfn_to_section(pfn); |
135 | |
136 | if (section->page_cgroup) |
137 | return 0; |
138 | |
139 | table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION; |
140 | base = alloc_page_cgroup(table_size, nid); |
141 | |
142 | /* |
143 | * The value stored in section->page_cgroup is (base - pfn) |
144 | * and it does not point to the memory block allocated above, |
145 | * causing kmemleak false positives. |
146 | */ |
147 | kmemleak_not_leak(base); |
148 | |
149 | if (!base) { |
150 | printk(KERN_ERR "page cgroup allocation failure\n"); |
151 | return -ENOMEM; |
152 | } |
153 | |
154 | /* |
155 | * The passed "pfn" may not be aligned to SECTION. For the calculation |
156 | * we need to apply a mask. |
157 | */ |
158 | pfn &= PAGE_SECTION_MASK; |
159 | section->page_cgroup = base - pfn; |
160 | total_usage += table_size; |
161 | return 0; |
162 | } |
163 | #ifdef CONFIG_MEMORY_HOTPLUG |
164 | static void free_page_cgroup(void *addr) |
165 | { |
166 | if (is_vmalloc_addr(addr)) { |
167 | vfree(addr); |
168 | } else { |
169 | struct page *page = virt_to_page(addr); |
170 | size_t table_size = |
171 | sizeof(struct page_cgroup) * PAGES_PER_SECTION; |
172 | |
173 | BUG_ON(PageReserved(page)); |
174 | free_pages_exact(addr, table_size); |
175 | } |
176 | } |
177 | |
178 | static void __free_page_cgroup(unsigned long pfn) |
179 | { |
180 | struct mem_section *ms; |
181 | struct page_cgroup *base; |
182 | |
183 | ms = __pfn_to_section(pfn); |
184 | if (!ms || !ms->page_cgroup) |
185 | return; |
186 | base = ms->page_cgroup + pfn; |
187 | free_page_cgroup(base); |
188 | ms->page_cgroup = NULL; |
189 | } |
190 | |
191 | static int __meminit online_page_cgroup(unsigned long start_pfn, |
192 | unsigned long nr_pages, |
193 | int nid) |
194 | { |
195 | unsigned long start, end, pfn; |
196 | int fail = 0; |
197 | |
198 | start = SECTION_ALIGN_DOWN(start_pfn); |
199 | end = SECTION_ALIGN_UP(start_pfn + nr_pages); |
200 | |
201 | if (nid == -1) { |
202 | /* |
203 | * In this case, "nid" already exists and contains valid memory. |
204 | * "start_pfn" passed to us is a pfn which is an arg for |
205 | * online__pages(), and start_pfn should exist. |
206 | */ |
207 | nid = pfn_to_nid(start_pfn); |
208 | VM_BUG_ON(!node_state(nid, N_ONLINE)); |
209 | } |
210 | |
211 | for (pfn = start; !fail && pfn < end; pfn += PAGES_PER_SECTION) { |
212 | if (!pfn_present(pfn)) |
213 | continue; |
214 | fail = init_section_page_cgroup(pfn, nid); |
215 | } |
216 | if (!fail) |
217 | return 0; |
218 | |
219 | /* rollback */ |
220 | for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) |
221 | __free_page_cgroup(pfn); |
222 | |
223 | return -ENOMEM; |
224 | } |
225 | |
226 | static int __meminit offline_page_cgroup(unsigned long start_pfn, |
227 | unsigned long nr_pages, int nid) |
228 | { |
229 | unsigned long start, end, pfn; |
230 | |
231 | start = SECTION_ALIGN_DOWN(start_pfn); |
232 | end = SECTION_ALIGN_UP(start_pfn + nr_pages); |
233 | |
234 | for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) |
235 | __free_page_cgroup(pfn); |
236 | return 0; |
237 | |
238 | } |
239 | |
240 | static int __meminit page_cgroup_callback(struct notifier_block *self, |
241 | unsigned long action, void *arg) |
242 | { |
243 | struct memory_notify *mn = arg; |
244 | int ret = 0; |
245 | switch (action) { |
246 | case MEM_GOING_ONLINE: |
247 | ret = online_page_cgroup(mn->start_pfn, |
248 | mn->nr_pages, mn->status_change_nid); |
249 | break; |
250 | case MEM_OFFLINE: |
251 | offline_page_cgroup(mn->start_pfn, |
252 | mn->nr_pages, mn->status_change_nid); |
253 | break; |
254 | case MEM_CANCEL_ONLINE: |
255 | offline_page_cgroup(mn->start_pfn, |
256 | mn->nr_pages, mn->status_change_nid); |
257 | break; |
258 | case MEM_GOING_OFFLINE: |
259 | break; |
260 | case MEM_ONLINE: |
261 | case MEM_CANCEL_OFFLINE: |
262 | break; |
263 | } |
264 | |
265 | return notifier_from_errno(ret); |
266 | } |
267 | |
268 | #endif |
269 | |
270 | void __init page_cgroup_init(void) |
271 | { |
272 | unsigned long pfn; |
273 | int nid; |
274 | |
275 | if (mem_cgroup_disabled()) |
276 | return; |
277 | |
278 | for_each_node_state(nid, N_MEMORY) { |
279 | unsigned long start_pfn, end_pfn; |
280 | |
281 | start_pfn = node_start_pfn(nid); |
282 | end_pfn = node_end_pfn(nid); |
283 | /* |
284 | * start_pfn and end_pfn may not be aligned to SECTION and the |
285 | * page->flags of out of node pages are not initialized. So we |
286 | * scan [start_pfn, the biggest section's pfn < end_pfn) here. |
287 | */ |
288 | for (pfn = start_pfn; |
289 | pfn < end_pfn; |
290 | pfn = ALIGN(pfn + 1, PAGES_PER_SECTION)) { |
291 | |
292 | if (!pfn_valid(pfn)) |
293 | continue; |
294 | /* |
295 | * Nodes's pfns can be overlapping. |
296 | * We know some arch can have a nodes layout such as |
297 | * -------------pfn--------------> |
298 | * N0 | N1 | N2 | N0 | N1 | N2|.... |
299 | */ |
300 | if (pfn_to_nid(pfn) != nid) |
301 | continue; |
302 | if (init_section_page_cgroup(pfn, nid)) |
303 | goto oom; |
304 | } |
305 | } |
306 | hotplug_memory_notifier(page_cgroup_callback, 0); |
307 | printk(KERN_INFO "allocated %ld bytes of page_cgroup\n", total_usage); |
308 | printk(KERN_INFO "please try 'cgroup_disable=memory' option if you " |
309 | "don't want memory cgroups\n"); |
310 | return; |
311 | oom: |
312 | printk(KERN_CRIT "try 'cgroup_disable=memory' boot option\n"); |
313 | panic("Out of memory"); |
314 | } |
315 | |
316 | void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat) |
317 | { |
318 | return; |
319 | } |
320 | |
321 | #endif |
322 | |
323 | |
324 | #ifdef CONFIG_MEMCG_SWAP |
325 | |
326 | static DEFINE_MUTEX(swap_cgroup_mutex); |
327 | struct swap_cgroup_ctrl { |
328 | struct page **map; |
329 | unsigned long length; |
330 | spinlock_t lock; |
331 | }; |
332 | |
333 | static struct swap_cgroup_ctrl swap_cgroup_ctrl[MAX_SWAPFILES]; |
334 | |
335 | struct swap_cgroup { |
336 | unsigned short id; |
337 | }; |
338 | #define SC_PER_PAGE (PAGE_SIZE/sizeof(struct swap_cgroup)) |
339 | |
340 | /* |
341 | * SwapCgroup implements "lookup" and "exchange" operations. |
342 | * In typical usage, this swap_cgroup is accessed via memcg's charge/uncharge |
343 | * against SwapCache. At swap_free(), this is accessed directly from swap. |
344 | * |
345 | * This means, |
346 | * - we have no race in "exchange" when we're accessed via SwapCache because |
347 | * SwapCache(and its swp_entry) is under lock. |
348 | * - When called via swap_free(), there is no user of this entry and no race. |
349 | * Then, we don't need lock around "exchange". |
350 | * |
351 | * TODO: we can push these buffers out to HIGHMEM. |
352 | */ |
353 | |
354 | /* |
355 | * allocate buffer for swap_cgroup. |
356 | */ |
357 | static int swap_cgroup_prepare(int type) |
358 | { |
359 | struct page *page; |
360 | struct swap_cgroup_ctrl *ctrl; |
361 | unsigned long idx, max; |
362 | |
363 | ctrl = &swap_cgroup_ctrl[type]; |
364 | |
365 | for (idx = 0; idx < ctrl->length; idx++) { |
366 | page = alloc_page(GFP_KERNEL | __GFP_ZERO); |
367 | if (!page) |
368 | goto not_enough_page; |
369 | ctrl->map[idx] = page; |
370 | } |
371 | return 0; |
372 | not_enough_page: |
373 | max = idx; |
374 | for (idx = 0; idx < max; idx++) |
375 | __free_page(ctrl->map[idx]); |
376 | |
377 | return -ENOMEM; |
378 | } |
379 | |
380 | static struct swap_cgroup *lookup_swap_cgroup(swp_entry_t ent, |
381 | struct swap_cgroup_ctrl **ctrlp) |
382 | { |
383 | pgoff_t offset = swp_offset(ent); |
384 | struct swap_cgroup_ctrl *ctrl; |
385 | struct page *mappage; |
386 | struct swap_cgroup *sc; |
387 | |
388 | ctrl = &swap_cgroup_ctrl[swp_type(ent)]; |
389 | if (ctrlp) |
390 | *ctrlp = ctrl; |
391 | |
392 | mappage = ctrl->map[offset / SC_PER_PAGE]; |
393 | sc = page_address(mappage); |
394 | return sc + offset % SC_PER_PAGE; |
395 | } |
396 | |
397 | /** |
398 | * swap_cgroup_cmpxchg - cmpxchg mem_cgroup's id for this swp_entry. |
399 | * @ent: swap entry to be cmpxchged |
400 | * @old: old id |
401 | * @new: new id |
402 | * |
403 | * Returns old id at success, 0 at failure. |
404 | * (There is no mem_cgroup using 0 as its id) |
405 | */ |
406 | unsigned short swap_cgroup_cmpxchg(swp_entry_t ent, |
407 | unsigned short old, unsigned short new) |
408 | { |
409 | struct swap_cgroup_ctrl *ctrl; |
410 | struct swap_cgroup *sc; |
411 | unsigned long flags; |
412 | unsigned short retval; |
413 | |
414 | sc = lookup_swap_cgroup(ent, &ctrl); |
415 | |
416 | spin_lock_irqsave(&ctrl->lock, flags); |
417 | retval = sc->id; |
418 | if (retval == old) |
419 | sc->id = new; |
420 | else |
421 | retval = 0; |
422 | spin_unlock_irqrestore(&ctrl->lock, flags); |
423 | return retval; |
424 | } |
425 | |
426 | /** |
427 | * swap_cgroup_record - record mem_cgroup for this swp_entry. |
428 | * @ent: swap entry to be recorded into |
429 | * @id: mem_cgroup to be recorded |
430 | * |
431 | * Returns old value at success, 0 at failure. |
432 | * (Of course, old value can be 0.) |
433 | */ |
434 | unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id) |
435 | { |
436 | struct swap_cgroup_ctrl *ctrl; |
437 | struct swap_cgroup *sc; |
438 | unsigned short old; |
439 | unsigned long flags; |
440 | |
441 | sc = lookup_swap_cgroup(ent, &ctrl); |
442 | |
443 | spin_lock_irqsave(&ctrl->lock, flags); |
444 | old = sc->id; |
445 | sc->id = id; |
446 | spin_unlock_irqrestore(&ctrl->lock, flags); |
447 | |
448 | return old; |
449 | } |
450 | |
451 | /** |
452 | * lookup_swap_cgroup_id - lookup mem_cgroup id tied to swap entry |
453 | * @ent: swap entry to be looked up. |
454 | * |
455 | * Returns ID of mem_cgroup at success. 0 at failure. (0 is invalid ID) |
456 | */ |
457 | unsigned short lookup_swap_cgroup_id(swp_entry_t ent) |
458 | { |
459 | return lookup_swap_cgroup(ent, NULL)->id; |
460 | } |
461 | |
462 | int swap_cgroup_swapon(int type, unsigned long max_pages) |
463 | { |
464 | void *array; |
465 | unsigned long array_size; |
466 | unsigned long length; |
467 | struct swap_cgroup_ctrl *ctrl; |
468 | |
469 | if (!do_swap_account) |
470 | return 0; |
471 | |
472 | length = DIV_ROUND_UP(max_pages, SC_PER_PAGE); |
473 | array_size = length * sizeof(void *); |
474 | |
475 | array = vzalloc(array_size); |
476 | if (!array) |
477 | goto nomem; |
478 | |
479 | ctrl = &swap_cgroup_ctrl[type]; |
480 | mutex_lock(&swap_cgroup_mutex); |
481 | ctrl->length = length; |
482 | ctrl->map = array; |
483 | spin_lock_init(&ctrl->lock); |
484 | if (swap_cgroup_prepare(type)) { |
485 | /* memory shortage */ |
486 | ctrl->map = NULL; |
487 | ctrl->length = 0; |
488 | mutex_unlock(&swap_cgroup_mutex); |
489 | vfree(array); |
490 | goto nomem; |
491 | } |
492 | mutex_unlock(&swap_cgroup_mutex); |
493 | |
494 | return 0; |
495 | nomem: |
496 | printk(KERN_INFO "couldn't allocate enough memory for swap_cgroup.\n"); |
497 | printk(KERN_INFO |
498 | "swap_cgroup can be disabled by swapaccount=0 boot option\n"); |
499 | return -ENOMEM; |
500 | } |
501 | |
502 | void swap_cgroup_swapoff(int type) |
503 | { |
504 | struct page **map; |
505 | unsigned long i, length; |
506 | struct swap_cgroup_ctrl *ctrl; |
507 | |
508 | if (!do_swap_account) |
509 | return; |
510 | |
511 | mutex_lock(&swap_cgroup_mutex); |
512 | ctrl = &swap_cgroup_ctrl[type]; |
513 | map = ctrl->map; |
514 | length = ctrl->length; |
515 | ctrl->map = NULL; |
516 | ctrl->length = 0; |
517 | mutex_unlock(&swap_cgroup_mutex); |
518 | |
519 | if (map) { |
520 | for (i = 0; i < length; i++) { |
521 | struct page *page = map[i]; |
522 | if (page) |
523 | __free_page(page); |
524 | } |
525 | vfree(map); |
526 | } |
527 | } |
528 | |
529 | #endif |
530 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9