Root/
1 | #include <linux/mm.h> |
2 | #include <linux/mmzone.h> |
3 | #include <linux/bootmem.h> |
4 | #include <linux/bit_spinlock.h> |
5 | #include <linux/page_cgroup.h> |
6 | #include <linux/hash.h> |
7 | #include <linux/slab.h> |
8 | #include <linux/memory.h> |
9 | #include <linux/vmalloc.h> |
10 | #include <linux/cgroup.h> |
11 | #include <linux/swapops.h> |
12 | #include <linux/kmemleak.h> |
13 | |
14 | static unsigned long total_usage; |
15 | |
16 | #if !defined(CONFIG_SPARSEMEM) |
17 | |
18 | |
19 | void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat) |
20 | { |
21 | pgdat->node_page_cgroup = NULL; |
22 | } |
23 | |
24 | struct page_cgroup *lookup_page_cgroup(struct page *page) |
25 | { |
26 | unsigned long pfn = page_to_pfn(page); |
27 | unsigned long offset; |
28 | struct page_cgroup *base; |
29 | |
30 | base = NODE_DATA(page_to_nid(page))->node_page_cgroup; |
31 | #ifdef CONFIG_DEBUG_VM |
32 | /* |
33 | * The sanity checks the page allocator does upon freeing a |
34 | * page can reach here before the page_cgroup arrays are |
35 | * allocated when feeding a range of pages to the allocator |
36 | * for the first time during bootup or memory hotplug. |
37 | */ |
38 | if (unlikely(!base)) |
39 | return NULL; |
40 | #endif |
41 | offset = pfn - NODE_DATA(page_to_nid(page))->node_start_pfn; |
42 | return base + offset; |
43 | } |
44 | |
45 | static int __init alloc_node_page_cgroup(int nid) |
46 | { |
47 | struct page_cgroup *base; |
48 | unsigned long table_size; |
49 | unsigned long nr_pages; |
50 | |
51 | nr_pages = NODE_DATA(nid)->node_spanned_pages; |
52 | if (!nr_pages) |
53 | return 0; |
54 | |
55 | table_size = sizeof(struct page_cgroup) * nr_pages; |
56 | |
57 | base = __alloc_bootmem_node_nopanic(NODE_DATA(nid), |
58 | table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); |
59 | if (!base) |
60 | return -ENOMEM; |
61 | NODE_DATA(nid)->node_page_cgroup = base; |
62 | total_usage += table_size; |
63 | return 0; |
64 | } |
65 | |
66 | void __init page_cgroup_init_flatmem(void) |
67 | { |
68 | |
69 | int nid, fail; |
70 | |
71 | if (mem_cgroup_disabled()) |
72 | return; |
73 | |
74 | for_each_online_node(nid) { |
75 | fail = alloc_node_page_cgroup(nid); |
76 | if (fail) |
77 | goto fail; |
78 | } |
79 | printk(KERN_INFO "allocated %ld bytes of page_cgroup\n", total_usage); |
80 | printk(KERN_INFO "please try 'cgroup_disable=memory' option if you" |
81 | " don't want memory cgroups\n"); |
82 | return; |
83 | fail: |
84 | printk(KERN_CRIT "allocation of page_cgroup failed.\n"); |
85 | printk(KERN_CRIT "please try 'cgroup_disable=memory' boot option\n"); |
86 | panic("Out of memory"); |
87 | } |
88 | |
89 | #else /* CONFIG_FLAT_NODE_MEM_MAP */ |
90 | |
91 | struct page_cgroup *lookup_page_cgroup(struct page *page) |
92 | { |
93 | unsigned long pfn = page_to_pfn(page); |
94 | struct mem_section *section = __pfn_to_section(pfn); |
95 | #ifdef CONFIG_DEBUG_VM |
96 | /* |
97 | * The sanity checks the page allocator does upon freeing a |
98 | * page can reach here before the page_cgroup arrays are |
99 | * allocated when feeding a range of pages to the allocator |
100 | * for the first time during bootup or memory hotplug. |
101 | */ |
102 | if (!section->page_cgroup) |
103 | return NULL; |
104 | #endif |
105 | return section->page_cgroup + pfn; |
106 | } |
107 | |
108 | static void *__meminit alloc_page_cgroup(size_t size, int nid) |
109 | { |
110 | gfp_t flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN; |
111 | void *addr = NULL; |
112 | |
113 | addr = alloc_pages_exact_nid(nid, size, flags); |
114 | if (addr) { |
115 | kmemleak_alloc(addr, size, 1, flags); |
116 | return addr; |
117 | } |
118 | |
119 | if (node_state(nid, N_HIGH_MEMORY)) |
120 | addr = vzalloc_node(size, nid); |
121 | else |
122 | addr = vzalloc(size); |
123 | |
124 | return addr; |
125 | } |
126 | |
127 | static int __meminit init_section_page_cgroup(unsigned long pfn, int nid) |
128 | { |
129 | struct mem_section *section; |
130 | struct page_cgroup *base; |
131 | unsigned long table_size; |
132 | |
133 | section = __pfn_to_section(pfn); |
134 | |
135 | if (section->page_cgroup) |
136 | return 0; |
137 | |
138 | table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION; |
139 | base = alloc_page_cgroup(table_size, nid); |
140 | |
141 | /* |
142 | * The value stored in section->page_cgroup is (base - pfn) |
143 | * and it does not point to the memory block allocated above, |
144 | * causing kmemleak false positives. |
145 | */ |
146 | kmemleak_not_leak(base); |
147 | |
148 | if (!base) { |
149 | printk(KERN_ERR "page cgroup allocation failure\n"); |
150 | return -ENOMEM; |
151 | } |
152 | |
153 | /* |
154 | * The passed "pfn" may not be aligned to SECTION. For the calculation |
155 | * we need to apply a mask. |
156 | */ |
157 | pfn &= PAGE_SECTION_MASK; |
158 | section->page_cgroup = base - pfn; |
159 | total_usage += table_size; |
160 | return 0; |
161 | } |
162 | #ifdef CONFIG_MEMORY_HOTPLUG |
163 | static void free_page_cgroup(void *addr) |
164 | { |
165 | if (is_vmalloc_addr(addr)) { |
166 | vfree(addr); |
167 | } else { |
168 | struct page *page = virt_to_page(addr); |
169 | size_t table_size = |
170 | sizeof(struct page_cgroup) * PAGES_PER_SECTION; |
171 | |
172 | BUG_ON(PageReserved(page)); |
173 | free_pages_exact(addr, table_size); |
174 | } |
175 | } |
176 | |
177 | void __free_page_cgroup(unsigned long pfn) |
178 | { |
179 | struct mem_section *ms; |
180 | struct page_cgroup *base; |
181 | |
182 | ms = __pfn_to_section(pfn); |
183 | if (!ms || !ms->page_cgroup) |
184 | return; |
185 | base = ms->page_cgroup + pfn; |
186 | free_page_cgroup(base); |
187 | ms->page_cgroup = NULL; |
188 | } |
189 | |
190 | int __meminit online_page_cgroup(unsigned long start_pfn, |
191 | unsigned long nr_pages, |
192 | int nid) |
193 | { |
194 | unsigned long start, end, pfn; |
195 | int fail = 0; |
196 | |
197 | start = SECTION_ALIGN_DOWN(start_pfn); |
198 | end = SECTION_ALIGN_UP(start_pfn + nr_pages); |
199 | |
200 | if (nid == -1) { |
201 | /* |
202 | * In this case, "nid" already exists and contains valid memory. |
203 | * "start_pfn" passed to us is a pfn which is an arg for |
204 | * online__pages(), and start_pfn should exist. |
205 | */ |
206 | nid = pfn_to_nid(start_pfn); |
207 | VM_BUG_ON(!node_state(nid, N_ONLINE)); |
208 | } |
209 | |
210 | for (pfn = start; !fail && pfn < end; pfn += PAGES_PER_SECTION) { |
211 | if (!pfn_present(pfn)) |
212 | continue; |
213 | fail = init_section_page_cgroup(pfn, nid); |
214 | } |
215 | if (!fail) |
216 | return 0; |
217 | |
218 | /* rollback */ |
219 | for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) |
220 | __free_page_cgroup(pfn); |
221 | |
222 | return -ENOMEM; |
223 | } |
224 | |
225 | int __meminit offline_page_cgroup(unsigned long start_pfn, |
226 | unsigned long nr_pages, int nid) |
227 | { |
228 | unsigned long start, end, pfn; |
229 | |
230 | start = SECTION_ALIGN_DOWN(start_pfn); |
231 | end = SECTION_ALIGN_UP(start_pfn + nr_pages); |
232 | |
233 | for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) |
234 | __free_page_cgroup(pfn); |
235 | return 0; |
236 | |
237 | } |
238 | |
239 | static int __meminit page_cgroup_callback(struct notifier_block *self, |
240 | unsigned long action, void *arg) |
241 | { |
242 | struct memory_notify *mn = arg; |
243 | int ret = 0; |
244 | switch (action) { |
245 | case MEM_GOING_ONLINE: |
246 | ret = online_page_cgroup(mn->start_pfn, |
247 | mn->nr_pages, mn->status_change_nid); |
248 | break; |
249 | case MEM_OFFLINE: |
250 | offline_page_cgroup(mn->start_pfn, |
251 | mn->nr_pages, mn->status_change_nid); |
252 | break; |
253 | case MEM_CANCEL_ONLINE: |
254 | case MEM_GOING_OFFLINE: |
255 | break; |
256 | case MEM_ONLINE: |
257 | case MEM_CANCEL_OFFLINE: |
258 | break; |
259 | } |
260 | |
261 | return notifier_from_errno(ret); |
262 | } |
263 | |
264 | #endif |
265 | |
266 | void __init page_cgroup_init(void) |
267 | { |
268 | unsigned long pfn; |
269 | int nid; |
270 | |
271 | if (mem_cgroup_disabled()) |
272 | return; |
273 | |
274 | for_each_node_state(nid, N_HIGH_MEMORY) { |
275 | unsigned long start_pfn, end_pfn; |
276 | |
277 | start_pfn = node_start_pfn(nid); |
278 | end_pfn = node_end_pfn(nid); |
279 | /* |
280 | * start_pfn and end_pfn may not be aligned to SECTION and the |
281 | * page->flags of out of node pages are not initialized. So we |
282 | * scan [start_pfn, the biggest section's pfn < end_pfn) here. |
283 | */ |
284 | for (pfn = start_pfn; |
285 | pfn < end_pfn; |
286 | pfn = ALIGN(pfn + 1, PAGES_PER_SECTION)) { |
287 | |
288 | if (!pfn_valid(pfn)) |
289 | continue; |
290 | /* |
291 | * Nodes's pfns can be overlapping. |
292 | * We know some arch can have a nodes layout such as |
293 | * -------------pfn--------------> |
294 | * N0 | N1 | N2 | N0 | N1 | N2|.... |
295 | */ |
296 | if (pfn_to_nid(pfn) != nid) |
297 | continue; |
298 | if (init_section_page_cgroup(pfn, nid)) |
299 | goto oom; |
300 | } |
301 | } |
302 | hotplug_memory_notifier(page_cgroup_callback, 0); |
303 | printk(KERN_INFO "allocated %ld bytes of page_cgroup\n", total_usage); |
304 | printk(KERN_INFO "please try 'cgroup_disable=memory' option if you " |
305 | "don't want memory cgroups\n"); |
306 | return; |
307 | oom: |
308 | printk(KERN_CRIT "try 'cgroup_disable=memory' boot option\n"); |
309 | panic("Out of memory"); |
310 | } |
311 | |
312 | void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat) |
313 | { |
314 | return; |
315 | } |
316 | |
317 | #endif |
318 | |
319 | |
320 | #ifdef CONFIG_MEMCG_SWAP |
321 | |
322 | static DEFINE_MUTEX(swap_cgroup_mutex); |
323 | struct swap_cgroup_ctrl { |
324 | struct page **map; |
325 | unsigned long length; |
326 | spinlock_t lock; |
327 | }; |
328 | |
329 | static struct swap_cgroup_ctrl swap_cgroup_ctrl[MAX_SWAPFILES]; |
330 | |
331 | struct swap_cgroup { |
332 | unsigned short id; |
333 | }; |
334 | #define SC_PER_PAGE (PAGE_SIZE/sizeof(struct swap_cgroup)) |
335 | |
336 | /* |
337 | * SwapCgroup implements "lookup" and "exchange" operations. |
338 | * In typical usage, this swap_cgroup is accessed via memcg's charge/uncharge |
339 | * against SwapCache. At swap_free(), this is accessed directly from swap. |
340 | * |
341 | * This means, |
342 | * - we have no race in "exchange" when we're accessed via SwapCache because |
343 | * SwapCache(and its swp_entry) is under lock. |
344 | * - When called via swap_free(), there is no user of this entry and no race. |
345 | * Then, we don't need lock around "exchange". |
346 | * |
347 | * TODO: we can push these buffers out to HIGHMEM. |
348 | */ |
349 | |
350 | /* |
351 | * allocate buffer for swap_cgroup. |
352 | */ |
353 | static int swap_cgroup_prepare(int type) |
354 | { |
355 | struct page *page; |
356 | struct swap_cgroup_ctrl *ctrl; |
357 | unsigned long idx, max; |
358 | |
359 | ctrl = &swap_cgroup_ctrl[type]; |
360 | |
361 | for (idx = 0; idx < ctrl->length; idx++) { |
362 | page = alloc_page(GFP_KERNEL | __GFP_ZERO); |
363 | if (!page) |
364 | goto not_enough_page; |
365 | ctrl->map[idx] = page; |
366 | } |
367 | return 0; |
368 | not_enough_page: |
369 | max = idx; |
370 | for (idx = 0; idx < max; idx++) |
371 | __free_page(ctrl->map[idx]); |
372 | |
373 | return -ENOMEM; |
374 | } |
375 | |
376 | static struct swap_cgroup *lookup_swap_cgroup(swp_entry_t ent, |
377 | struct swap_cgroup_ctrl **ctrlp) |
378 | { |
379 | pgoff_t offset = swp_offset(ent); |
380 | struct swap_cgroup_ctrl *ctrl; |
381 | struct page *mappage; |
382 | struct swap_cgroup *sc; |
383 | |
384 | ctrl = &swap_cgroup_ctrl[swp_type(ent)]; |
385 | if (ctrlp) |
386 | *ctrlp = ctrl; |
387 | |
388 | mappage = ctrl->map[offset / SC_PER_PAGE]; |
389 | sc = page_address(mappage); |
390 | return sc + offset % SC_PER_PAGE; |
391 | } |
392 | |
393 | /** |
394 | * swap_cgroup_cmpxchg - cmpxchg mem_cgroup's id for this swp_entry. |
395 | * @ent: swap entry to be cmpxchged |
396 | * @old: old id |
397 | * @new: new id |
398 | * |
399 | * Returns old id at success, 0 at failure. |
400 | * (There is no mem_cgroup using 0 as its id) |
401 | */ |
402 | unsigned short swap_cgroup_cmpxchg(swp_entry_t ent, |
403 | unsigned short old, unsigned short new) |
404 | { |
405 | struct swap_cgroup_ctrl *ctrl; |
406 | struct swap_cgroup *sc; |
407 | unsigned long flags; |
408 | unsigned short retval; |
409 | |
410 | sc = lookup_swap_cgroup(ent, &ctrl); |
411 | |
412 | spin_lock_irqsave(&ctrl->lock, flags); |
413 | retval = sc->id; |
414 | if (retval == old) |
415 | sc->id = new; |
416 | else |
417 | retval = 0; |
418 | spin_unlock_irqrestore(&ctrl->lock, flags); |
419 | return retval; |
420 | } |
421 | |
422 | /** |
423 | * swap_cgroup_record - record mem_cgroup for this swp_entry. |
424 | * @ent: swap entry to be recorded into |
425 | * @id: mem_cgroup to be recorded |
426 | * |
427 | * Returns old value at success, 0 at failure. |
428 | * (Of course, old value can be 0.) |
429 | */ |
430 | unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id) |
431 | { |
432 | struct swap_cgroup_ctrl *ctrl; |
433 | struct swap_cgroup *sc; |
434 | unsigned short old; |
435 | unsigned long flags; |
436 | |
437 | sc = lookup_swap_cgroup(ent, &ctrl); |
438 | |
439 | spin_lock_irqsave(&ctrl->lock, flags); |
440 | old = sc->id; |
441 | sc->id = id; |
442 | spin_unlock_irqrestore(&ctrl->lock, flags); |
443 | |
444 | return old; |
445 | } |
446 | |
447 | /** |
448 | * lookup_swap_cgroup_id - lookup mem_cgroup id tied to swap entry |
449 | * @ent: swap entry to be looked up. |
450 | * |
451 | * Returns CSS ID of mem_cgroup at success. 0 at failure. (0 is invalid ID) |
452 | */ |
453 | unsigned short lookup_swap_cgroup_id(swp_entry_t ent) |
454 | { |
455 | return lookup_swap_cgroup(ent, NULL)->id; |
456 | } |
457 | |
458 | int swap_cgroup_swapon(int type, unsigned long max_pages) |
459 | { |
460 | void *array; |
461 | unsigned long array_size; |
462 | unsigned long length; |
463 | struct swap_cgroup_ctrl *ctrl; |
464 | |
465 | if (!do_swap_account) |
466 | return 0; |
467 | |
468 | length = DIV_ROUND_UP(max_pages, SC_PER_PAGE); |
469 | array_size = length * sizeof(void *); |
470 | |
471 | array = vzalloc(array_size); |
472 | if (!array) |
473 | goto nomem; |
474 | |
475 | ctrl = &swap_cgroup_ctrl[type]; |
476 | mutex_lock(&swap_cgroup_mutex); |
477 | ctrl->length = length; |
478 | ctrl->map = array; |
479 | spin_lock_init(&ctrl->lock); |
480 | if (swap_cgroup_prepare(type)) { |
481 | /* memory shortage */ |
482 | ctrl->map = NULL; |
483 | ctrl->length = 0; |
484 | mutex_unlock(&swap_cgroup_mutex); |
485 | vfree(array); |
486 | goto nomem; |
487 | } |
488 | mutex_unlock(&swap_cgroup_mutex); |
489 | |
490 | return 0; |
491 | nomem: |
492 | printk(KERN_INFO "couldn't allocate enough memory for swap_cgroup.\n"); |
493 | printk(KERN_INFO |
494 | "swap_cgroup can be disabled by swapaccount=0 boot option\n"); |
495 | return -ENOMEM; |
496 | } |
497 | |
498 | void swap_cgroup_swapoff(int type) |
499 | { |
500 | struct page **map; |
501 | unsigned long i, length; |
502 | struct swap_cgroup_ctrl *ctrl; |
503 | |
504 | if (!do_swap_account) |
505 | return; |
506 | |
507 | mutex_lock(&swap_cgroup_mutex); |
508 | ctrl = &swap_cgroup_ctrl[type]; |
509 | map = ctrl->map; |
510 | length = ctrl->length; |
511 | ctrl->map = NULL; |
512 | ctrl->length = 0; |
513 | mutex_unlock(&swap_cgroup_mutex); |
514 | |
515 | if (map) { |
516 | for (i = 0; i < length; i++) { |
517 | struct page *page = map[i]; |
518 | if (page) |
519 | __free_page(page); |
520 | } |
521 | vfree(map); |
522 | } |
523 | } |
524 | |
525 | #endif |
526 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9