Root/
1 | /* |
2 | * linux/mm/vmstat.c |
3 | * |
4 | * Manages VM statistics |
5 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds |
6 | * |
7 | * zoned VM statistics |
8 | * Copyright (C) 2006 Silicon Graphics, Inc., |
9 | * Christoph Lameter <christoph@lameter.com> |
10 | */ |
11 | #include <linux/fs.h> |
12 | #include <linux/mm.h> |
13 | #include <linux/err.h> |
14 | #include <linux/module.h> |
15 | #include <linux/cpu.h> |
16 | #include <linux/vmstat.h> |
17 | #include <linux/sched.h> |
18 | |
19 | #ifdef CONFIG_VM_EVENT_COUNTERS |
20 | DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}}; |
21 | EXPORT_PER_CPU_SYMBOL(vm_event_states); |
22 | |
23 | static void sum_vm_events(unsigned long *ret, const struct cpumask *cpumask) |
24 | { |
25 | int cpu; |
26 | int i; |
27 | |
28 | memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long)); |
29 | |
30 | for_each_cpu(cpu, cpumask) { |
31 | struct vm_event_state *this = &per_cpu(vm_event_states, cpu); |
32 | |
33 | for (i = 0; i < NR_VM_EVENT_ITEMS; i++) |
34 | ret[i] += this->event[i]; |
35 | } |
36 | } |
37 | |
38 | /* |
39 | * Accumulate the vm event counters across all CPUs. |
40 | * The result is unavoidably approximate - it can change |
41 | * during and after execution of this function. |
42 | */ |
43 | void all_vm_events(unsigned long *ret) |
44 | { |
45 | get_online_cpus(); |
46 | sum_vm_events(ret, cpu_online_mask); |
47 | put_online_cpus(); |
48 | } |
49 | EXPORT_SYMBOL_GPL(all_vm_events); |
50 | |
51 | #ifdef CONFIG_HOTPLUG |
52 | /* |
53 | * Fold the foreign cpu events into our own. |
54 | * |
55 | * This is adding to the events on one processor |
56 | * but keeps the global counts constant. |
57 | */ |
58 | void vm_events_fold_cpu(int cpu) |
59 | { |
60 | struct vm_event_state *fold_state = &per_cpu(vm_event_states, cpu); |
61 | int i; |
62 | |
63 | for (i = 0; i < NR_VM_EVENT_ITEMS; i++) { |
64 | count_vm_events(i, fold_state->event[i]); |
65 | fold_state->event[i] = 0; |
66 | } |
67 | } |
68 | #endif /* CONFIG_HOTPLUG */ |
69 | |
70 | #endif /* CONFIG_VM_EVENT_COUNTERS */ |
71 | |
72 | /* |
73 | * Manage combined zone based / global counters |
74 | * |
75 | * vm_stat contains the global counters |
76 | */ |
77 | atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; |
78 | EXPORT_SYMBOL(vm_stat); |
79 | |
80 | #ifdef CONFIG_SMP |
81 | |
82 | static int calculate_threshold(struct zone *zone) |
83 | { |
84 | int threshold; |
85 | int mem; /* memory in 128 MB units */ |
86 | |
87 | /* |
88 | * The threshold scales with the number of processors and the amount |
89 | * of memory per zone. More memory means that we can defer updates for |
90 | * longer, more processors could lead to more contention. |
91 | * fls() is used to have a cheap way of logarithmic scaling. |
92 | * |
93 | * Some sample thresholds: |
94 | * |
95 | * Threshold Processors (fls) Zonesize fls(mem+1) |
96 | * ------------------------------------------------------------------ |
97 | * 8 1 1 0.9-1 GB 4 |
98 | * 16 2 2 0.9-1 GB 4 |
99 | * 20 2 2 1-2 GB 5 |
100 | * 24 2 2 2-4 GB 6 |
101 | * 28 2 2 4-8 GB 7 |
102 | * 32 2 2 8-16 GB 8 |
103 | * 4 2 2 <128M 1 |
104 | * 30 4 3 2-4 GB 5 |
105 | * 48 4 3 8-16 GB 8 |
106 | * 32 8 4 1-2 GB 4 |
107 | * 32 8 4 0.9-1GB 4 |
108 | * 10 16 5 <128M 1 |
109 | * 40 16 5 900M 4 |
110 | * 70 64 7 2-4 GB 5 |
111 | * 84 64 7 4-8 GB 6 |
112 | * 108 512 9 4-8 GB 6 |
113 | * 125 1024 10 8-16 GB 8 |
114 | * 125 1024 10 16-32 GB 9 |
115 | */ |
116 | |
117 | mem = zone->present_pages >> (27 - PAGE_SHIFT); |
118 | |
119 | threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem)); |
120 | |
121 | /* |
122 | * Maximum threshold is 125 |
123 | */ |
124 | threshold = min(125, threshold); |
125 | |
126 | return threshold; |
127 | } |
128 | |
129 | /* |
130 | * Refresh the thresholds for each zone. |
131 | */ |
132 | static void refresh_zone_stat_thresholds(void) |
133 | { |
134 | struct zone *zone; |
135 | int cpu; |
136 | int threshold; |
137 | |
138 | for_each_populated_zone(zone) { |
139 | threshold = calculate_threshold(zone); |
140 | |
141 | for_each_online_cpu(cpu) |
142 | zone_pcp(zone, cpu)->stat_threshold = threshold; |
143 | } |
144 | } |
145 | |
146 | /* |
147 | * For use when we know that interrupts are disabled. |
148 | */ |
149 | void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item, |
150 | int delta) |
151 | { |
152 | struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id()); |
153 | s8 *p = pcp->vm_stat_diff + item; |
154 | long x; |
155 | |
156 | x = delta + *p; |
157 | |
158 | if (unlikely(x > pcp->stat_threshold || x < -pcp->stat_threshold)) { |
159 | zone_page_state_add(x, zone, item); |
160 | x = 0; |
161 | } |
162 | *p = x; |
163 | } |
164 | EXPORT_SYMBOL(__mod_zone_page_state); |
165 | |
166 | /* |
167 | * For an unknown interrupt state |
168 | */ |
169 | void mod_zone_page_state(struct zone *zone, enum zone_stat_item item, |
170 | int delta) |
171 | { |
172 | unsigned long flags; |
173 | |
174 | local_irq_save(flags); |
175 | __mod_zone_page_state(zone, item, delta); |
176 | local_irq_restore(flags); |
177 | } |
178 | EXPORT_SYMBOL(mod_zone_page_state); |
179 | |
180 | /* |
181 | * Optimized increment and decrement functions. |
182 | * |
183 | * These are only for a single page and therefore can take a struct page * |
184 | * argument instead of struct zone *. This allows the inclusion of the code |
185 | * generated for page_zone(page) into the optimized functions. |
186 | * |
187 | * No overflow check is necessary and therefore the differential can be |
188 | * incremented or decremented in place which may allow the compilers to |
189 | * generate better code. |
190 | * The increment or decrement is known and therefore one boundary check can |
191 | * be omitted. |
192 | * |
193 | * NOTE: These functions are very performance sensitive. Change only |
194 | * with care. |
195 | * |
196 | * Some processors have inc/dec instructions that are atomic vs an interrupt. |
197 | * However, the code must first determine the differential location in a zone |
198 | * based on the processor number and then inc/dec the counter. There is no |
199 | * guarantee without disabling preemption that the processor will not change |
200 | * in between and therefore the atomicity vs. interrupt cannot be exploited |
201 | * in a useful way here. |
202 | */ |
203 | void __inc_zone_state(struct zone *zone, enum zone_stat_item item) |
204 | { |
205 | struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id()); |
206 | s8 *p = pcp->vm_stat_diff + item; |
207 | |
208 | (*p)++; |
209 | |
210 | if (unlikely(*p > pcp->stat_threshold)) { |
211 | int overstep = pcp->stat_threshold / 2; |
212 | |
213 | zone_page_state_add(*p + overstep, zone, item); |
214 | *p = -overstep; |
215 | } |
216 | } |
217 | |
218 | void __inc_zone_page_state(struct page *page, enum zone_stat_item item) |
219 | { |
220 | __inc_zone_state(page_zone(page), item); |
221 | } |
222 | EXPORT_SYMBOL(__inc_zone_page_state); |
223 | |
224 | void __dec_zone_state(struct zone *zone, enum zone_stat_item item) |
225 | { |
226 | struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id()); |
227 | s8 *p = pcp->vm_stat_diff + item; |
228 | |
229 | (*p)--; |
230 | |
231 | if (unlikely(*p < - pcp->stat_threshold)) { |
232 | int overstep = pcp->stat_threshold / 2; |
233 | |
234 | zone_page_state_add(*p - overstep, zone, item); |
235 | *p = overstep; |
236 | } |
237 | } |
238 | |
239 | void __dec_zone_page_state(struct page *page, enum zone_stat_item item) |
240 | { |
241 | __dec_zone_state(page_zone(page), item); |
242 | } |
243 | EXPORT_SYMBOL(__dec_zone_page_state); |
244 | |
245 | void inc_zone_state(struct zone *zone, enum zone_stat_item item) |
246 | { |
247 | unsigned long flags; |
248 | |
249 | local_irq_save(flags); |
250 | __inc_zone_state(zone, item); |
251 | local_irq_restore(flags); |
252 | } |
253 | |
254 | void inc_zone_page_state(struct page *page, enum zone_stat_item item) |
255 | { |
256 | unsigned long flags; |
257 | struct zone *zone; |
258 | |
259 | zone = page_zone(page); |
260 | local_irq_save(flags); |
261 | __inc_zone_state(zone, item); |
262 | local_irq_restore(flags); |
263 | } |
264 | EXPORT_SYMBOL(inc_zone_page_state); |
265 | |
266 | void dec_zone_page_state(struct page *page, enum zone_stat_item item) |
267 | { |
268 | unsigned long flags; |
269 | |
270 | local_irq_save(flags); |
271 | __dec_zone_page_state(page, item); |
272 | local_irq_restore(flags); |
273 | } |
274 | EXPORT_SYMBOL(dec_zone_page_state); |
275 | |
276 | /* |
277 | * Update the zone counters for one cpu. |
278 | * |
279 | * The cpu specified must be either the current cpu or a processor that |
280 | * is not online. If it is the current cpu then the execution thread must |
281 | * be pinned to the current cpu. |
282 | * |
283 | * Note that refresh_cpu_vm_stats strives to only access |
284 | * node local memory. The per cpu pagesets on remote zones are placed |
285 | * in the memory local to the processor using that pageset. So the |
286 | * loop over all zones will access a series of cachelines local to |
287 | * the processor. |
288 | * |
289 | * The call to zone_page_state_add updates the cachelines with the |
290 | * statistics in the remote zone struct as well as the global cachelines |
291 | * with the global counters. These could cause remote node cache line |
292 | * bouncing and will have to be only done when necessary. |
293 | */ |
294 | void refresh_cpu_vm_stats(int cpu) |
295 | { |
296 | struct zone *zone; |
297 | int i; |
298 | int global_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, }; |
299 | |
300 | for_each_populated_zone(zone) { |
301 | struct per_cpu_pageset *p; |
302 | |
303 | p = zone_pcp(zone, cpu); |
304 | |
305 | for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) |
306 | if (p->vm_stat_diff[i]) { |
307 | unsigned long flags; |
308 | int v; |
309 | |
310 | local_irq_save(flags); |
311 | v = p->vm_stat_diff[i]; |
312 | p->vm_stat_diff[i] = 0; |
313 | local_irq_restore(flags); |
314 | atomic_long_add(v, &zone->vm_stat[i]); |
315 | global_diff[i] += v; |
316 | #ifdef CONFIG_NUMA |
317 | /* 3 seconds idle till flush */ |
318 | p->expire = 3; |
319 | #endif |
320 | } |
321 | cond_resched(); |
322 | #ifdef CONFIG_NUMA |
323 | /* |
324 | * Deal with draining the remote pageset of this |
325 | * processor |
326 | * |
327 | * Check if there are pages remaining in this pageset |
328 | * if not then there is nothing to expire. |
329 | */ |
330 | if (!p->expire || !p->pcp.count) |
331 | continue; |
332 | |
333 | /* |
334 | * We never drain zones local to this processor. |
335 | */ |
336 | if (zone_to_nid(zone) == numa_node_id()) { |
337 | p->expire = 0; |
338 | continue; |
339 | } |
340 | |
341 | p->expire--; |
342 | if (p->expire) |
343 | continue; |
344 | |
345 | if (p->pcp.count) |
346 | drain_zone_pages(zone, &p->pcp); |
347 | #endif |
348 | } |
349 | |
350 | for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) |
351 | if (global_diff[i]) |
352 | atomic_long_add(global_diff[i], &vm_stat[i]); |
353 | } |
354 | |
355 | #endif |
356 | |
357 | #ifdef CONFIG_NUMA |
358 | /* |
359 | * zonelist = the list of zones passed to the allocator |
360 | * z = the zone from which the allocation occurred. |
361 | * |
362 | * Must be called with interrupts disabled. |
363 | */ |
364 | void zone_statistics(struct zone *preferred_zone, struct zone *z) |
365 | { |
366 | if (z->zone_pgdat == preferred_zone->zone_pgdat) { |
367 | __inc_zone_state(z, NUMA_HIT); |
368 | } else { |
369 | __inc_zone_state(z, NUMA_MISS); |
370 | __inc_zone_state(preferred_zone, NUMA_FOREIGN); |
371 | } |
372 | if (z->node == numa_node_id()) |
373 | __inc_zone_state(z, NUMA_LOCAL); |
374 | else |
375 | __inc_zone_state(z, NUMA_OTHER); |
376 | } |
377 | #endif |
378 | |
379 | #ifdef CONFIG_PROC_FS |
380 | #include <linux/proc_fs.h> |
381 | #include <linux/seq_file.h> |
382 | |
383 | static char * const migratetype_names[MIGRATE_TYPES] = { |
384 | "Unmovable", |
385 | "Reclaimable", |
386 | "Movable", |
387 | "Reserve", |
388 | "Isolate", |
389 | }; |
390 | |
391 | static void *frag_start(struct seq_file *m, loff_t *pos) |
392 | { |
393 | pg_data_t *pgdat; |
394 | loff_t node = *pos; |
395 | for (pgdat = first_online_pgdat(); |
396 | pgdat && node; |
397 | pgdat = next_online_pgdat(pgdat)) |
398 | --node; |
399 | |
400 | return pgdat; |
401 | } |
402 | |
403 | static void *frag_next(struct seq_file *m, void *arg, loff_t *pos) |
404 | { |
405 | pg_data_t *pgdat = (pg_data_t *)arg; |
406 | |
407 | (*pos)++; |
408 | return next_online_pgdat(pgdat); |
409 | } |
410 | |
411 | static void frag_stop(struct seq_file *m, void *arg) |
412 | { |
413 | } |
414 | |
415 | /* Walk all the zones in a node and print using a callback */ |
416 | static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat, |
417 | void (*print)(struct seq_file *m, pg_data_t *, struct zone *)) |
418 | { |
419 | struct zone *zone; |
420 | struct zone *node_zones = pgdat->node_zones; |
421 | unsigned long flags; |
422 | |
423 | for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) { |
424 | if (!populated_zone(zone)) |
425 | continue; |
426 | |
427 | spin_lock_irqsave(&zone->lock, flags); |
428 | print(m, pgdat, zone); |
429 | spin_unlock_irqrestore(&zone->lock, flags); |
430 | } |
431 | } |
432 | |
433 | static void frag_show_print(struct seq_file *m, pg_data_t *pgdat, |
434 | struct zone *zone) |
435 | { |
436 | int order; |
437 | |
438 | seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name); |
439 | for (order = 0; order < MAX_ORDER; ++order) |
440 | seq_printf(m, "%6lu ", zone->free_area[order].nr_free); |
441 | seq_putc(m, '\n'); |
442 | } |
443 | |
444 | /* |
445 | * This walks the free areas for each zone. |
446 | */ |
447 | static int frag_show(struct seq_file *m, void *arg) |
448 | { |
449 | pg_data_t *pgdat = (pg_data_t *)arg; |
450 | walk_zones_in_node(m, pgdat, frag_show_print); |
451 | return 0; |
452 | } |
453 | |
454 | static void pagetypeinfo_showfree_print(struct seq_file *m, |
455 | pg_data_t *pgdat, struct zone *zone) |
456 | { |
457 | int order, mtype; |
458 | |
459 | for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) { |
460 | seq_printf(m, "Node %4d, zone %8s, type %12s ", |
461 | pgdat->node_id, |
462 | zone->name, |
463 | migratetype_names[mtype]); |
464 | for (order = 0; order < MAX_ORDER; ++order) { |
465 | unsigned long freecount = 0; |
466 | struct free_area *area; |
467 | struct list_head *curr; |
468 | |
469 | area = &(zone->free_area[order]); |
470 | |
471 | list_for_each(curr, &area->free_list[mtype]) |
472 | freecount++; |
473 | seq_printf(m, "%6lu ", freecount); |
474 | } |
475 | seq_putc(m, '\n'); |
476 | } |
477 | } |
478 | |
479 | /* Print out the free pages at each order for each migatetype */ |
480 | static int pagetypeinfo_showfree(struct seq_file *m, void *arg) |
481 | { |
482 | int order; |
483 | pg_data_t *pgdat = (pg_data_t *)arg; |
484 | |
485 | /* Print header */ |
486 | seq_printf(m, "%-43s ", "Free pages count per migrate type at order"); |
487 | for (order = 0; order < MAX_ORDER; ++order) |
488 | seq_printf(m, "%6d ", order); |
489 | seq_putc(m, '\n'); |
490 | |
491 | walk_zones_in_node(m, pgdat, pagetypeinfo_showfree_print); |
492 | |
493 | return 0; |
494 | } |
495 | |
496 | static void pagetypeinfo_showblockcount_print(struct seq_file *m, |
497 | pg_data_t *pgdat, struct zone *zone) |
498 | { |
499 | int mtype; |
500 | unsigned long pfn; |
501 | unsigned long start_pfn = zone->zone_start_pfn; |
502 | unsigned long end_pfn = start_pfn + zone->spanned_pages; |
503 | unsigned long count[MIGRATE_TYPES] = { 0, }; |
504 | |
505 | for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { |
506 | struct page *page; |
507 | |
508 | if (!pfn_valid(pfn)) |
509 | continue; |
510 | |
511 | page = pfn_to_page(pfn); |
512 | |
513 | /* Watch for unexpected holes punched in the memmap */ |
514 | if (!memmap_valid_within(pfn, page, zone)) |
515 | continue; |
516 | |
517 | mtype = get_pageblock_migratetype(page); |
518 | |
519 | if (mtype < MIGRATE_TYPES) |
520 | count[mtype]++; |
521 | } |
522 | |
523 | /* Print counts */ |
524 | seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name); |
525 | for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) |
526 | seq_printf(m, "%12lu ", count[mtype]); |
527 | seq_putc(m, '\n'); |
528 | } |
529 | |
530 | /* Print out the free pages at each order for each migratetype */ |
531 | static int pagetypeinfo_showblockcount(struct seq_file *m, void *arg) |
532 | { |
533 | int mtype; |
534 | pg_data_t *pgdat = (pg_data_t *)arg; |
535 | |
536 | seq_printf(m, "\n%-23s", "Number of blocks type "); |
537 | for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) |
538 | seq_printf(m, "%12s ", migratetype_names[mtype]); |
539 | seq_putc(m, '\n'); |
540 | walk_zones_in_node(m, pgdat, pagetypeinfo_showblockcount_print); |
541 | |
542 | return 0; |
543 | } |
544 | |
545 | /* |
546 | * This prints out statistics in relation to grouping pages by mobility. |
547 | * It is expensive to collect so do not constantly read the file. |
548 | */ |
549 | static int pagetypeinfo_show(struct seq_file *m, void *arg) |
550 | { |
551 | pg_data_t *pgdat = (pg_data_t *)arg; |
552 | |
553 | /* check memoryless node */ |
554 | if (!node_state(pgdat->node_id, N_HIGH_MEMORY)) |
555 | return 0; |
556 | |
557 | seq_printf(m, "Page block order: %d\n", pageblock_order); |
558 | seq_printf(m, "Pages per block: %lu\n", pageblock_nr_pages); |
559 | seq_putc(m, '\n'); |
560 | pagetypeinfo_showfree(m, pgdat); |
561 | pagetypeinfo_showblockcount(m, pgdat); |
562 | |
563 | return 0; |
564 | } |
565 | |
566 | static const struct seq_operations fragmentation_op = { |
567 | .start = frag_start, |
568 | .next = frag_next, |
569 | .stop = frag_stop, |
570 | .show = frag_show, |
571 | }; |
572 | |
573 | static int fragmentation_open(struct inode *inode, struct file *file) |
574 | { |
575 | return seq_open(file, &fragmentation_op); |
576 | } |
577 | |
578 | static const struct file_operations fragmentation_file_operations = { |
579 | .open = fragmentation_open, |
580 | .read = seq_read, |
581 | .llseek = seq_lseek, |
582 | .release = seq_release, |
583 | }; |
584 | |
585 | static const struct seq_operations pagetypeinfo_op = { |
586 | .start = frag_start, |
587 | .next = frag_next, |
588 | .stop = frag_stop, |
589 | .show = pagetypeinfo_show, |
590 | }; |
591 | |
592 | static int pagetypeinfo_open(struct inode *inode, struct file *file) |
593 | { |
594 | return seq_open(file, &pagetypeinfo_op); |
595 | } |
596 | |
597 | static const struct file_operations pagetypeinfo_file_ops = { |
598 | .open = pagetypeinfo_open, |
599 | .read = seq_read, |
600 | .llseek = seq_lseek, |
601 | .release = seq_release, |
602 | }; |
603 | |
604 | #ifdef CONFIG_ZONE_DMA |
605 | #define TEXT_FOR_DMA(xx) xx "_dma", |
606 | #else |
607 | #define TEXT_FOR_DMA(xx) |
608 | #endif |
609 | |
610 | #ifdef CONFIG_ZONE_DMA32 |
611 | #define TEXT_FOR_DMA32(xx) xx "_dma32", |
612 | #else |
613 | #define TEXT_FOR_DMA32(xx) |
614 | #endif |
615 | |
616 | #ifdef CONFIG_HIGHMEM |
617 | #define TEXT_FOR_HIGHMEM(xx) xx "_high", |
618 | #else |
619 | #define TEXT_FOR_HIGHMEM(xx) |
620 | #endif |
621 | |
622 | #define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \ |
623 | TEXT_FOR_HIGHMEM(xx) xx "_movable", |
624 | |
625 | static const char * const vmstat_text[] = { |
626 | /* Zoned VM counters */ |
627 | "nr_free_pages", |
628 | "nr_inactive_anon", |
629 | "nr_active_anon", |
630 | "nr_inactive_file", |
631 | "nr_active_file", |
632 | "nr_unevictable", |
633 | "nr_mlock", |
634 | "nr_anon_pages", |
635 | "nr_mapped", |
636 | "nr_file_pages", |
637 | "nr_dirty", |
638 | "nr_writeback", |
639 | "nr_slab_reclaimable", |
640 | "nr_slab_unreclaimable", |
641 | "nr_page_table_pages", |
642 | "nr_unstable", |
643 | "nr_bounce", |
644 | "nr_vmscan_write", |
645 | "nr_writeback_temp", |
646 | |
647 | #ifdef CONFIG_NUMA |
648 | "numa_hit", |
649 | "numa_miss", |
650 | "numa_foreign", |
651 | "numa_interleave", |
652 | "numa_local", |
653 | "numa_other", |
654 | #endif |
655 | |
656 | #ifdef CONFIG_VM_EVENT_COUNTERS |
657 | "pgpgin", |
658 | "pgpgout", |
659 | "pswpin", |
660 | "pswpout", |
661 | |
662 | TEXTS_FOR_ZONES("pgalloc") |
663 | |
664 | "pgfree", |
665 | "pgactivate", |
666 | "pgdeactivate", |
667 | |
668 | "pgfault", |
669 | "pgmajfault", |
670 | |
671 | TEXTS_FOR_ZONES("pgrefill") |
672 | TEXTS_FOR_ZONES("pgsteal") |
673 | TEXTS_FOR_ZONES("pgscan_kswapd") |
674 | TEXTS_FOR_ZONES("pgscan_direct") |
675 | |
676 | #ifdef CONFIG_NUMA |
677 | "zone_reclaim_failed", |
678 | #endif |
679 | "pginodesteal", |
680 | "slabs_scanned", |
681 | "kswapd_steal", |
682 | "kswapd_inodesteal", |
683 | "pageoutrun", |
684 | "allocstall", |
685 | |
686 | "pgrotated", |
687 | #ifdef CONFIG_HUGETLB_PAGE |
688 | "htlb_buddy_alloc_success", |
689 | "htlb_buddy_alloc_fail", |
690 | #endif |
691 | "unevictable_pgs_culled", |
692 | "unevictable_pgs_scanned", |
693 | "unevictable_pgs_rescued", |
694 | "unevictable_pgs_mlocked", |
695 | "unevictable_pgs_munlocked", |
696 | "unevictable_pgs_cleared", |
697 | "unevictable_pgs_stranded", |
698 | "unevictable_pgs_mlockfreed", |
699 | #endif |
700 | }; |
701 | |
702 | static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat, |
703 | struct zone *zone) |
704 | { |
705 | int i; |
706 | seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name); |
707 | seq_printf(m, |
708 | "\n pages free %lu" |
709 | "\n min %lu" |
710 | "\n low %lu" |
711 | "\n high %lu" |
712 | "\n scanned %lu" |
713 | "\n spanned %lu" |
714 | "\n present %lu", |
715 | zone_page_state(zone, NR_FREE_PAGES), |
716 | min_wmark_pages(zone), |
717 | low_wmark_pages(zone), |
718 | high_wmark_pages(zone), |
719 | zone->pages_scanned, |
720 | zone->spanned_pages, |
721 | zone->present_pages); |
722 | |
723 | for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) |
724 | seq_printf(m, "\n %-12s %lu", vmstat_text[i], |
725 | zone_page_state(zone, i)); |
726 | |
727 | seq_printf(m, |
728 | "\n protection: (%lu", |
729 | zone->lowmem_reserve[0]); |
730 | for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++) |
731 | seq_printf(m, ", %lu", zone->lowmem_reserve[i]); |
732 | seq_printf(m, |
733 | ")" |
734 | "\n pagesets"); |
735 | for_each_online_cpu(i) { |
736 | struct per_cpu_pageset *pageset; |
737 | |
738 | pageset = zone_pcp(zone, i); |
739 | seq_printf(m, |
740 | "\n cpu: %i" |
741 | "\n count: %i" |
742 | "\n high: %i" |
743 | "\n batch: %i", |
744 | i, |
745 | pageset->pcp.count, |
746 | pageset->pcp.high, |
747 | pageset->pcp.batch); |
748 | #ifdef CONFIG_SMP |
749 | seq_printf(m, "\n vm stats threshold: %d", |
750 | pageset->stat_threshold); |
751 | #endif |
752 | } |
753 | seq_printf(m, |
754 | "\n all_unreclaimable: %u" |
755 | "\n prev_priority: %i" |
756 | "\n start_pfn: %lu" |
757 | "\n inactive_ratio: %u", |
758 | zone_is_all_unreclaimable(zone), |
759 | zone->prev_priority, |
760 | zone->zone_start_pfn, |
761 | zone->inactive_ratio); |
762 | seq_putc(m, '\n'); |
763 | } |
764 | |
765 | /* |
766 | * Output information about zones in @pgdat. |
767 | */ |
768 | static int zoneinfo_show(struct seq_file *m, void *arg) |
769 | { |
770 | pg_data_t *pgdat = (pg_data_t *)arg; |
771 | walk_zones_in_node(m, pgdat, zoneinfo_show_print); |
772 | return 0; |
773 | } |
774 | |
775 | static const struct seq_operations zoneinfo_op = { |
776 | .start = frag_start, /* iterate over all zones. The same as in |
777 | * fragmentation. */ |
778 | .next = frag_next, |
779 | .stop = frag_stop, |
780 | .show = zoneinfo_show, |
781 | }; |
782 | |
783 | static int zoneinfo_open(struct inode *inode, struct file *file) |
784 | { |
785 | return seq_open(file, &zoneinfo_op); |
786 | } |
787 | |
788 | static const struct file_operations proc_zoneinfo_file_operations = { |
789 | .open = zoneinfo_open, |
790 | .read = seq_read, |
791 | .llseek = seq_lseek, |
792 | .release = seq_release, |
793 | }; |
794 | |
795 | static void *vmstat_start(struct seq_file *m, loff_t *pos) |
796 | { |
797 | unsigned long *v; |
798 | #ifdef CONFIG_VM_EVENT_COUNTERS |
799 | unsigned long *e; |
800 | #endif |
801 | int i; |
802 | |
803 | if (*pos >= ARRAY_SIZE(vmstat_text)) |
804 | return NULL; |
805 | |
806 | #ifdef CONFIG_VM_EVENT_COUNTERS |
807 | v = kmalloc(NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long) |
808 | + sizeof(struct vm_event_state), GFP_KERNEL); |
809 | #else |
810 | v = kmalloc(NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long), |
811 | GFP_KERNEL); |
812 | #endif |
813 | m->private = v; |
814 | if (!v) |
815 | return ERR_PTR(-ENOMEM); |
816 | for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) |
817 | v[i] = global_page_state(i); |
818 | #ifdef CONFIG_VM_EVENT_COUNTERS |
819 | e = v + NR_VM_ZONE_STAT_ITEMS; |
820 | all_vm_events(e); |
821 | e[PGPGIN] /= 2; /* sectors -> kbytes */ |
822 | e[PGPGOUT] /= 2; |
823 | #endif |
824 | return v + *pos; |
825 | } |
826 | |
827 | static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos) |
828 | { |
829 | (*pos)++; |
830 | if (*pos >= ARRAY_SIZE(vmstat_text)) |
831 | return NULL; |
832 | return (unsigned long *)m->private + *pos; |
833 | } |
834 | |
835 | static int vmstat_show(struct seq_file *m, void *arg) |
836 | { |
837 | unsigned long *l = arg; |
838 | unsigned long off = l - (unsigned long *)m->private; |
839 | |
840 | seq_printf(m, "%s %lu\n", vmstat_text[off], *l); |
841 | return 0; |
842 | } |
843 | |
844 | static void vmstat_stop(struct seq_file *m, void *arg) |
845 | { |
846 | kfree(m->private); |
847 | m->private = NULL; |
848 | } |
849 | |
850 | static const struct seq_operations vmstat_op = { |
851 | .start = vmstat_start, |
852 | .next = vmstat_next, |
853 | .stop = vmstat_stop, |
854 | .show = vmstat_show, |
855 | }; |
856 | |
857 | static int vmstat_open(struct inode *inode, struct file *file) |
858 | { |
859 | return seq_open(file, &vmstat_op); |
860 | } |
861 | |
862 | static const struct file_operations proc_vmstat_file_operations = { |
863 | .open = vmstat_open, |
864 | .read = seq_read, |
865 | .llseek = seq_lseek, |
866 | .release = seq_release, |
867 | }; |
868 | #endif /* CONFIG_PROC_FS */ |
869 | |
870 | #ifdef CONFIG_SMP |
871 | static DEFINE_PER_CPU(struct delayed_work, vmstat_work); |
872 | int sysctl_stat_interval __read_mostly = HZ; |
873 | |
874 | static void vmstat_update(struct work_struct *w) |
875 | { |
876 | refresh_cpu_vm_stats(smp_processor_id()); |
877 | schedule_delayed_work(&__get_cpu_var(vmstat_work), |
878 | round_jiffies_relative(sysctl_stat_interval)); |
879 | } |
880 | |
881 | static void __cpuinit start_cpu_timer(int cpu) |
882 | { |
883 | struct delayed_work *vmstat_work = &per_cpu(vmstat_work, cpu); |
884 | |
885 | INIT_DELAYED_WORK_DEFERRABLE(vmstat_work, vmstat_update); |
886 | schedule_delayed_work_on(cpu, vmstat_work, |
887 | __round_jiffies_relative(HZ, cpu)); |
888 | } |
889 | |
890 | /* |
891 | * Use the cpu notifier to insure that the thresholds are recalculated |
892 | * when necessary. |
893 | */ |
894 | static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb, |
895 | unsigned long action, |
896 | void *hcpu) |
897 | { |
898 | long cpu = (long)hcpu; |
899 | |
900 | switch (action) { |
901 | case CPU_ONLINE: |
902 | case CPU_ONLINE_FROZEN: |
903 | start_cpu_timer(cpu); |
904 | break; |
905 | case CPU_DOWN_PREPARE: |
906 | case CPU_DOWN_PREPARE_FROZEN: |
907 | cancel_rearming_delayed_work(&per_cpu(vmstat_work, cpu)); |
908 | per_cpu(vmstat_work, cpu).work.func = NULL; |
909 | break; |
910 | case CPU_DOWN_FAILED: |
911 | case CPU_DOWN_FAILED_FROZEN: |
912 | start_cpu_timer(cpu); |
913 | break; |
914 | case CPU_DEAD: |
915 | case CPU_DEAD_FROZEN: |
916 | refresh_zone_stat_thresholds(); |
917 | break; |
918 | default: |
919 | break; |
920 | } |
921 | return NOTIFY_OK; |
922 | } |
923 | |
924 | static struct notifier_block __cpuinitdata vmstat_notifier = |
925 | { &vmstat_cpuup_callback, NULL, 0 }; |
926 | #endif |
927 | |
928 | static int __init setup_vmstat(void) |
929 | { |
930 | #ifdef CONFIG_SMP |
931 | int cpu; |
932 | |
933 | refresh_zone_stat_thresholds(); |
934 | register_cpu_notifier(&vmstat_notifier); |
935 | |
936 | for_each_online_cpu(cpu) |
937 | start_cpu_timer(cpu); |
938 | #endif |
939 | #ifdef CONFIG_PROC_FS |
940 | proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations); |
941 | proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops); |
942 | proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations); |
943 | proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations); |
944 | #endif |
945 | return 0; |
946 | } |
947 | module_init(setup_vmstat) |
948 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9