Root/
1 | /* |
2 | * linux/mm/compaction.c |
3 | * |
4 | * Memory compaction for the reduction of external fragmentation. Note that |
5 | * this heavily depends upon page migration to do all the real heavy |
6 | * lifting |
7 | * |
8 | * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie> |
9 | */ |
10 | #include <linux/swap.h> |
11 | #include <linux/migrate.h> |
12 | #include <linux/compaction.h> |
13 | #include <linux/mm_inline.h> |
14 | #include <linux/backing-dev.h> |
15 | #include <linux/sysctl.h> |
16 | #include <linux/sysfs.h> |
17 | #include "internal.h" |
18 | |
19 | #define CREATE_TRACE_POINTS |
20 | #include <trace/events/compaction.h> |
21 | |
22 | /* |
23 | * compact_control is used to track pages being migrated and the free pages |
24 | * they are being migrated to during memory compaction. The free_pfn starts |
25 | * at the end of a zone and migrate_pfn begins at the start. Movable pages |
26 | * are moved to the end of a zone during a compaction run and the run |
27 | * completes when free_pfn <= migrate_pfn |
28 | */ |
29 | struct compact_control { |
30 | struct list_head freepages; /* List of free pages to migrate to */ |
31 | struct list_head migratepages; /* List of pages being migrated */ |
32 | unsigned long nr_freepages; /* Number of isolated free pages */ |
33 | unsigned long nr_migratepages; /* Number of pages to migrate */ |
34 | unsigned long free_pfn; /* isolate_freepages search base */ |
35 | unsigned long migrate_pfn; /* isolate_migratepages search base */ |
36 | bool sync; /* Synchronous migration */ |
37 | |
38 | int order; /* order a direct compactor needs */ |
39 | int migratetype; /* MOVABLE, RECLAIMABLE etc */ |
40 | struct zone *zone; |
41 | }; |
42 | |
43 | static unsigned long release_freepages(struct list_head *freelist) |
44 | { |
45 | struct page *page, *next; |
46 | unsigned long count = 0; |
47 | |
48 | list_for_each_entry_safe(page, next, freelist, lru) { |
49 | list_del(&page->lru); |
50 | __free_page(page); |
51 | count++; |
52 | } |
53 | |
54 | return count; |
55 | } |
56 | |
57 | /* Isolate free pages onto a private freelist. Must hold zone->lock */ |
58 | static unsigned long isolate_freepages_block(struct zone *zone, |
59 | unsigned long blockpfn, |
60 | struct list_head *freelist) |
61 | { |
62 | unsigned long zone_end_pfn, end_pfn; |
63 | int nr_scanned = 0, total_isolated = 0; |
64 | struct page *cursor; |
65 | |
66 | /* Get the last PFN we should scan for free pages at */ |
67 | zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages; |
68 | end_pfn = min(blockpfn + pageblock_nr_pages, zone_end_pfn); |
69 | |
70 | /* Find the first usable PFN in the block to initialse page cursor */ |
71 | for (; blockpfn < end_pfn; blockpfn++) { |
72 | if (pfn_valid_within(blockpfn)) |
73 | break; |
74 | } |
75 | cursor = pfn_to_page(blockpfn); |
76 | |
77 | /* Isolate free pages. This assumes the block is valid */ |
78 | for (; blockpfn < end_pfn; blockpfn++, cursor++) { |
79 | int isolated, i; |
80 | struct page *page = cursor; |
81 | |
82 | if (!pfn_valid_within(blockpfn)) |
83 | continue; |
84 | nr_scanned++; |
85 | |
86 | if (!PageBuddy(page)) |
87 | continue; |
88 | |
89 | /* Found a free page, break it into order-0 pages */ |
90 | isolated = split_free_page(page); |
91 | total_isolated += isolated; |
92 | for (i = 0; i < isolated; i++) { |
93 | list_add(&page->lru, freelist); |
94 | page++; |
95 | } |
96 | |
97 | /* If a page was split, advance to the end of it */ |
98 | if (isolated) { |
99 | blockpfn += isolated - 1; |
100 | cursor += isolated - 1; |
101 | } |
102 | } |
103 | |
104 | trace_mm_compaction_isolate_freepages(nr_scanned, total_isolated); |
105 | return total_isolated; |
106 | } |
107 | |
108 | /* Returns true if the page is within a block suitable for migration to */ |
109 | static bool suitable_migration_target(struct page *page) |
110 | { |
111 | |
112 | int migratetype = get_pageblock_migratetype(page); |
113 | |
114 | /* Don't interfere with memory hot-remove or the min_free_kbytes blocks */ |
115 | if (migratetype == MIGRATE_ISOLATE || migratetype == MIGRATE_RESERVE) |
116 | return false; |
117 | |
118 | /* If the page is a large free page, then allow migration */ |
119 | if (PageBuddy(page) && page_order(page) >= pageblock_order) |
120 | return true; |
121 | |
122 | /* If the block is MIGRATE_MOVABLE, allow migration */ |
123 | if (migratetype == MIGRATE_MOVABLE) |
124 | return true; |
125 | |
126 | /* Otherwise skip the block */ |
127 | return false; |
128 | } |
129 | |
130 | /* |
131 | * Based on information in the current compact_control, find blocks |
132 | * suitable for isolating free pages from and then isolate them. |
133 | */ |
134 | static void isolate_freepages(struct zone *zone, |
135 | struct compact_control *cc) |
136 | { |
137 | struct page *page; |
138 | unsigned long high_pfn, low_pfn, pfn; |
139 | unsigned long flags; |
140 | int nr_freepages = cc->nr_freepages; |
141 | struct list_head *freelist = &cc->freepages; |
142 | |
143 | /* |
144 | * Initialise the free scanner. The starting point is where we last |
145 | * scanned from (or the end of the zone if starting). The low point |
146 | * is the end of the pageblock the migration scanner is using. |
147 | */ |
148 | pfn = cc->free_pfn; |
149 | low_pfn = cc->migrate_pfn + pageblock_nr_pages; |
150 | |
151 | /* |
152 | * Take care that if the migration scanner is at the end of the zone |
153 | * that the free scanner does not accidentally move to the next zone |
154 | * in the next isolation cycle. |
155 | */ |
156 | high_pfn = min(low_pfn, pfn); |
157 | |
158 | /* |
159 | * Isolate free pages until enough are available to migrate the |
160 | * pages on cc->migratepages. We stop searching if the migrate |
161 | * and free page scanners meet or enough free pages are isolated. |
162 | */ |
163 | for (; pfn > low_pfn && cc->nr_migratepages > nr_freepages; |
164 | pfn -= pageblock_nr_pages) { |
165 | unsigned long isolated; |
166 | |
167 | if (!pfn_valid(pfn)) |
168 | continue; |
169 | |
170 | /* |
171 | * Check for overlapping nodes/zones. It's possible on some |
172 | * configurations to have a setup like |
173 | * node0 node1 node0 |
174 | * i.e. it's possible that all pages within a zones range of |
175 | * pages do not belong to a single zone. |
176 | */ |
177 | page = pfn_to_page(pfn); |
178 | if (page_zone(page) != zone) |
179 | continue; |
180 | |
181 | /* Check the block is suitable for migration */ |
182 | if (!suitable_migration_target(page)) |
183 | continue; |
184 | |
185 | /* |
186 | * Found a block suitable for isolating free pages from. Now |
187 | * we disabled interrupts, double check things are ok and |
188 | * isolate the pages. This is to minimise the time IRQs |
189 | * are disabled |
190 | */ |
191 | isolated = 0; |
192 | spin_lock_irqsave(&zone->lock, flags); |
193 | if (suitable_migration_target(page)) { |
194 | isolated = isolate_freepages_block(zone, pfn, freelist); |
195 | nr_freepages += isolated; |
196 | } |
197 | spin_unlock_irqrestore(&zone->lock, flags); |
198 | |
199 | /* |
200 | * Record the highest PFN we isolated pages from. When next |
201 | * looking for free pages, the search will restart here as |
202 | * page migration may have returned some pages to the allocator |
203 | */ |
204 | if (isolated) |
205 | high_pfn = max(high_pfn, pfn); |
206 | } |
207 | |
208 | /* split_free_page does not map the pages */ |
209 | list_for_each_entry(page, freelist, lru) { |
210 | arch_alloc_page(page, 0); |
211 | kernel_map_pages(page, 1, 1); |
212 | } |
213 | |
214 | cc->free_pfn = high_pfn; |
215 | cc->nr_freepages = nr_freepages; |
216 | } |
217 | |
218 | /* Update the number of anon and file isolated pages in the zone */ |
219 | static void acct_isolated(struct zone *zone, struct compact_control *cc) |
220 | { |
221 | struct page *page; |
222 | unsigned int count[2] = { 0, }; |
223 | |
224 | list_for_each_entry(page, &cc->migratepages, lru) |
225 | count[!!page_is_file_cache(page)]++; |
226 | |
227 | __mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]); |
228 | __mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]); |
229 | } |
230 | |
231 | /* Similar to reclaim, but different enough that they don't share logic */ |
232 | static bool too_many_isolated(struct zone *zone) |
233 | { |
234 | unsigned long active, inactive, isolated; |
235 | |
236 | inactive = zone_page_state(zone, NR_INACTIVE_FILE) + |
237 | zone_page_state(zone, NR_INACTIVE_ANON); |
238 | active = zone_page_state(zone, NR_ACTIVE_FILE) + |
239 | zone_page_state(zone, NR_ACTIVE_ANON); |
240 | isolated = zone_page_state(zone, NR_ISOLATED_FILE) + |
241 | zone_page_state(zone, NR_ISOLATED_ANON); |
242 | |
243 | return isolated > (inactive + active) / 2; |
244 | } |
245 | |
246 | /* possible outcome of isolate_migratepages */ |
247 | typedef enum { |
248 | ISOLATE_ABORT, /* Abort compaction now */ |
249 | ISOLATE_NONE, /* No pages isolated, continue scanning */ |
250 | ISOLATE_SUCCESS, /* Pages isolated, migrate */ |
251 | } isolate_migrate_t; |
252 | |
253 | /* |
254 | * Isolate all pages that can be migrated from the block pointed to by |
255 | * the migrate scanner within compact_control. |
256 | */ |
257 | static isolate_migrate_t isolate_migratepages(struct zone *zone, |
258 | struct compact_control *cc) |
259 | { |
260 | unsigned long low_pfn, end_pfn; |
261 | unsigned long last_pageblock_nr = 0, pageblock_nr; |
262 | unsigned long nr_scanned = 0, nr_isolated = 0; |
263 | struct list_head *migratelist = &cc->migratepages; |
264 | isolate_mode_t mode = ISOLATE_ACTIVE|ISOLATE_INACTIVE; |
265 | |
266 | /* Do not scan outside zone boundaries */ |
267 | low_pfn = max(cc->migrate_pfn, zone->zone_start_pfn); |
268 | |
269 | /* Only scan within a pageblock boundary */ |
270 | end_pfn = ALIGN(low_pfn + pageblock_nr_pages, pageblock_nr_pages); |
271 | |
272 | /* Do not cross the free scanner or scan within a memory hole */ |
273 | if (end_pfn > cc->free_pfn || !pfn_valid(low_pfn)) { |
274 | cc->migrate_pfn = end_pfn; |
275 | return ISOLATE_NONE; |
276 | } |
277 | |
278 | /* |
279 | * Ensure that there are not too many pages isolated from the LRU |
280 | * list by either parallel reclaimers or compaction. If there are, |
281 | * delay for some time until fewer pages are isolated |
282 | */ |
283 | while (unlikely(too_many_isolated(zone))) { |
284 | /* async migration should just abort */ |
285 | if (!cc->sync) |
286 | return ISOLATE_ABORT; |
287 | |
288 | congestion_wait(BLK_RW_ASYNC, HZ/10); |
289 | |
290 | if (fatal_signal_pending(current)) |
291 | return ISOLATE_ABORT; |
292 | } |
293 | |
294 | /* Time to isolate some pages for migration */ |
295 | cond_resched(); |
296 | spin_lock_irq(&zone->lru_lock); |
297 | for (; low_pfn < end_pfn; low_pfn++) { |
298 | struct page *page; |
299 | bool locked = true; |
300 | |
301 | /* give a chance to irqs before checking need_resched() */ |
302 | if (!((low_pfn+1) % SWAP_CLUSTER_MAX)) { |
303 | spin_unlock_irq(&zone->lru_lock); |
304 | locked = false; |
305 | } |
306 | if (need_resched() || spin_is_contended(&zone->lru_lock)) { |
307 | if (locked) |
308 | spin_unlock_irq(&zone->lru_lock); |
309 | cond_resched(); |
310 | spin_lock_irq(&zone->lru_lock); |
311 | if (fatal_signal_pending(current)) |
312 | break; |
313 | } else if (!locked) |
314 | spin_lock_irq(&zone->lru_lock); |
315 | |
316 | /* |
317 | * migrate_pfn does not necessarily start aligned to a |
318 | * pageblock. Ensure that pfn_valid is called when moving |
319 | * into a new MAX_ORDER_NR_PAGES range in case of large |
320 | * memory holes within the zone |
321 | */ |
322 | if ((low_pfn & (MAX_ORDER_NR_PAGES - 1)) == 0) { |
323 | if (!pfn_valid(low_pfn)) { |
324 | low_pfn += MAX_ORDER_NR_PAGES - 1; |
325 | continue; |
326 | } |
327 | } |
328 | |
329 | if (!pfn_valid_within(low_pfn)) |
330 | continue; |
331 | nr_scanned++; |
332 | |
333 | /* |
334 | * Get the page and ensure the page is within the same zone. |
335 | * See the comment in isolate_freepages about overlapping |
336 | * nodes. It is deliberate that the new zone lock is not taken |
337 | * as memory compaction should not move pages between nodes. |
338 | */ |
339 | page = pfn_to_page(low_pfn); |
340 | if (page_zone(page) != zone) |
341 | continue; |
342 | |
343 | /* Skip if free */ |
344 | if (PageBuddy(page)) |
345 | continue; |
346 | |
347 | /* |
348 | * For async migration, also only scan in MOVABLE blocks. Async |
349 | * migration is optimistic to see if the minimum amount of work |
350 | * satisfies the allocation |
351 | */ |
352 | pageblock_nr = low_pfn >> pageblock_order; |
353 | if (!cc->sync && last_pageblock_nr != pageblock_nr && |
354 | get_pageblock_migratetype(page) != MIGRATE_MOVABLE) { |
355 | low_pfn += pageblock_nr_pages; |
356 | low_pfn = ALIGN(low_pfn, pageblock_nr_pages) - 1; |
357 | last_pageblock_nr = pageblock_nr; |
358 | continue; |
359 | } |
360 | |
361 | if (!PageLRU(page)) |
362 | continue; |
363 | |
364 | /* |
365 | * PageLRU is set, and lru_lock excludes isolation, |
366 | * splitting and collapsing (collapsing has already |
367 | * happened if PageLRU is set). |
368 | */ |
369 | if (PageTransHuge(page)) { |
370 | low_pfn += (1 << compound_order(page)) - 1; |
371 | continue; |
372 | } |
373 | |
374 | if (!cc->sync) |
375 | mode |= ISOLATE_ASYNC_MIGRATE; |
376 | |
377 | /* Try isolate the page */ |
378 | if (__isolate_lru_page(page, mode, 0) != 0) |
379 | continue; |
380 | |
381 | VM_BUG_ON(PageTransCompound(page)); |
382 | |
383 | /* Successfully isolated */ |
384 | del_page_from_lru_list(zone, page, page_lru(page)); |
385 | list_add(&page->lru, migratelist); |
386 | cc->nr_migratepages++; |
387 | nr_isolated++; |
388 | |
389 | /* Avoid isolating too much */ |
390 | if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) { |
391 | ++low_pfn; |
392 | break; |
393 | } |
394 | } |
395 | |
396 | acct_isolated(zone, cc); |
397 | |
398 | spin_unlock_irq(&zone->lru_lock); |
399 | cc->migrate_pfn = low_pfn; |
400 | |
401 | trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated); |
402 | |
403 | return ISOLATE_SUCCESS; |
404 | } |
405 | |
406 | /* |
407 | * This is a migrate-callback that "allocates" freepages by taking pages |
408 | * from the isolated freelists in the block we are migrating to. |
409 | */ |
410 | static struct page *compaction_alloc(struct page *migratepage, |
411 | unsigned long data, |
412 | int **result) |
413 | { |
414 | struct compact_control *cc = (struct compact_control *)data; |
415 | struct page *freepage; |
416 | |
417 | /* Isolate free pages if necessary */ |
418 | if (list_empty(&cc->freepages)) { |
419 | isolate_freepages(cc->zone, cc); |
420 | |
421 | if (list_empty(&cc->freepages)) |
422 | return NULL; |
423 | } |
424 | |
425 | freepage = list_entry(cc->freepages.next, struct page, lru); |
426 | list_del(&freepage->lru); |
427 | cc->nr_freepages--; |
428 | |
429 | return freepage; |
430 | } |
431 | |
432 | /* |
433 | * We cannot control nr_migratepages and nr_freepages fully when migration is |
434 | * running as migrate_pages() has no knowledge of compact_control. When |
435 | * migration is complete, we count the number of pages on the lists by hand. |
436 | */ |
437 | static void update_nr_listpages(struct compact_control *cc) |
438 | { |
439 | int nr_migratepages = 0; |
440 | int nr_freepages = 0; |
441 | struct page *page; |
442 | |
443 | list_for_each_entry(page, &cc->migratepages, lru) |
444 | nr_migratepages++; |
445 | list_for_each_entry(page, &cc->freepages, lru) |
446 | nr_freepages++; |
447 | |
448 | cc->nr_migratepages = nr_migratepages; |
449 | cc->nr_freepages = nr_freepages; |
450 | } |
451 | |
452 | static int compact_finished(struct zone *zone, |
453 | struct compact_control *cc) |
454 | { |
455 | unsigned int order; |
456 | unsigned long watermark; |
457 | |
458 | if (fatal_signal_pending(current)) |
459 | return COMPACT_PARTIAL; |
460 | |
461 | /* Compaction run completes if the migrate and free scanner meet */ |
462 | if (cc->free_pfn <= cc->migrate_pfn) |
463 | return COMPACT_COMPLETE; |
464 | |
465 | /* |
466 | * order == -1 is expected when compacting via |
467 | * /proc/sys/vm/compact_memory |
468 | */ |
469 | if (cc->order == -1) |
470 | return COMPACT_CONTINUE; |
471 | |
472 | /* Compaction run is not finished if the watermark is not met */ |
473 | watermark = low_wmark_pages(zone); |
474 | watermark += (1 << cc->order); |
475 | |
476 | if (!zone_watermark_ok(zone, cc->order, watermark, 0, 0)) |
477 | return COMPACT_CONTINUE; |
478 | |
479 | /* Direct compactor: Is a suitable page free? */ |
480 | for (order = cc->order; order < MAX_ORDER; order++) { |
481 | /* Job done if page is free of the right migratetype */ |
482 | if (!list_empty(&zone->free_area[order].free_list[cc->migratetype])) |
483 | return COMPACT_PARTIAL; |
484 | |
485 | /* Job done if allocation would set block type */ |
486 | if (order >= pageblock_order && zone->free_area[order].nr_free) |
487 | return COMPACT_PARTIAL; |
488 | } |
489 | |
490 | return COMPACT_CONTINUE; |
491 | } |
492 | |
493 | /* |
494 | * compaction_suitable: Is this suitable to run compaction on this zone now? |
495 | * Returns |
496 | * COMPACT_SKIPPED - If there are too few free pages for compaction |
497 | * COMPACT_PARTIAL - If the allocation would succeed without compaction |
498 | * COMPACT_CONTINUE - If compaction should run now |
499 | */ |
500 | unsigned long compaction_suitable(struct zone *zone, int order) |
501 | { |
502 | int fragindex; |
503 | unsigned long watermark; |
504 | |
505 | /* |
506 | * order == -1 is expected when compacting via |
507 | * /proc/sys/vm/compact_memory |
508 | */ |
509 | if (order == -1) |
510 | return COMPACT_CONTINUE; |
511 | |
512 | /* |
513 | * Watermarks for order-0 must be met for compaction. Note the 2UL. |
514 | * This is because during migration, copies of pages need to be |
515 | * allocated and for a short time, the footprint is higher |
516 | */ |
517 | watermark = low_wmark_pages(zone) + (2UL << order); |
518 | if (!zone_watermark_ok(zone, 0, watermark, 0, 0)) |
519 | return COMPACT_SKIPPED; |
520 | |
521 | /* |
522 | * fragmentation index determines if allocation failures are due to |
523 | * low memory or external fragmentation |
524 | * |
525 | * index of -1000 implies allocations might succeed depending on |
526 | * watermarks |
527 | * index towards 0 implies failure is due to lack of memory |
528 | * index towards 1000 implies failure is due to fragmentation |
529 | * |
530 | * Only compact if a failure would be due to fragmentation. |
531 | */ |
532 | fragindex = fragmentation_index(zone, order); |
533 | if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold) |
534 | return COMPACT_SKIPPED; |
535 | |
536 | if (fragindex == -1000 && zone_watermark_ok(zone, order, watermark, |
537 | 0, 0)) |
538 | return COMPACT_PARTIAL; |
539 | |
540 | return COMPACT_CONTINUE; |
541 | } |
542 | |
543 | static int compact_zone(struct zone *zone, struct compact_control *cc) |
544 | { |
545 | int ret; |
546 | |
547 | ret = compaction_suitable(zone, cc->order); |
548 | switch (ret) { |
549 | case COMPACT_PARTIAL: |
550 | case COMPACT_SKIPPED: |
551 | /* Compaction is likely to fail */ |
552 | return ret; |
553 | case COMPACT_CONTINUE: |
554 | /* Fall through to compaction */ |
555 | ; |
556 | } |
557 | |
558 | /* Setup to move all movable pages to the end of the zone */ |
559 | cc->migrate_pfn = zone->zone_start_pfn; |
560 | cc->free_pfn = cc->migrate_pfn + zone->spanned_pages; |
561 | cc->free_pfn &= ~(pageblock_nr_pages-1); |
562 | |
563 | migrate_prep_local(); |
564 | |
565 | while ((ret = compact_finished(zone, cc)) == COMPACT_CONTINUE) { |
566 | unsigned long nr_migrate, nr_remaining; |
567 | int err; |
568 | |
569 | switch (isolate_migratepages(zone, cc)) { |
570 | case ISOLATE_ABORT: |
571 | ret = COMPACT_PARTIAL; |
572 | goto out; |
573 | case ISOLATE_NONE: |
574 | continue; |
575 | case ISOLATE_SUCCESS: |
576 | ; |
577 | } |
578 | |
579 | nr_migrate = cc->nr_migratepages; |
580 | err = migrate_pages(&cc->migratepages, compaction_alloc, |
581 | (unsigned long)cc, false, |
582 | cc->sync ? MIGRATE_SYNC_LIGHT : MIGRATE_ASYNC); |
583 | update_nr_listpages(cc); |
584 | nr_remaining = cc->nr_migratepages; |
585 | |
586 | count_vm_event(COMPACTBLOCKS); |
587 | count_vm_events(COMPACTPAGES, nr_migrate - nr_remaining); |
588 | if (nr_remaining) |
589 | count_vm_events(COMPACTPAGEFAILED, nr_remaining); |
590 | trace_mm_compaction_migratepages(nr_migrate - nr_remaining, |
591 | nr_remaining); |
592 | |
593 | /* Release LRU pages not migrated */ |
594 | if (err) { |
595 | putback_lru_pages(&cc->migratepages); |
596 | cc->nr_migratepages = 0; |
597 | } |
598 | |
599 | } |
600 | |
601 | out: |
602 | /* Release free pages and check accounting */ |
603 | cc->nr_freepages -= release_freepages(&cc->freepages); |
604 | VM_BUG_ON(cc->nr_freepages != 0); |
605 | |
606 | return ret; |
607 | } |
608 | |
609 | static unsigned long compact_zone_order(struct zone *zone, |
610 | int order, gfp_t gfp_mask, |
611 | bool sync) |
612 | { |
613 | struct compact_control cc = { |
614 | .nr_freepages = 0, |
615 | .nr_migratepages = 0, |
616 | .order = order, |
617 | .migratetype = allocflags_to_migratetype(gfp_mask), |
618 | .zone = zone, |
619 | .sync = sync, |
620 | }; |
621 | INIT_LIST_HEAD(&cc.freepages); |
622 | INIT_LIST_HEAD(&cc.migratepages); |
623 | |
624 | return compact_zone(zone, &cc); |
625 | } |
626 | |
627 | int sysctl_extfrag_threshold = 500; |
628 | |
629 | /** |
630 | * try_to_compact_pages - Direct compact to satisfy a high-order allocation |
631 | * @zonelist: The zonelist used for the current allocation |
632 | * @order: The order of the current allocation |
633 | * @gfp_mask: The GFP mask of the current allocation |
634 | * @nodemask: The allowed nodes to allocate from |
635 | * @sync: Whether migration is synchronous or not |
636 | * |
637 | * This is the main entry point for direct page compaction. |
638 | */ |
639 | unsigned long try_to_compact_pages(struct zonelist *zonelist, |
640 | int order, gfp_t gfp_mask, nodemask_t *nodemask, |
641 | bool sync) |
642 | { |
643 | enum zone_type high_zoneidx = gfp_zone(gfp_mask); |
644 | int may_enter_fs = gfp_mask & __GFP_FS; |
645 | int may_perform_io = gfp_mask & __GFP_IO; |
646 | struct zoneref *z; |
647 | struct zone *zone; |
648 | int rc = COMPACT_SKIPPED; |
649 | |
650 | /* |
651 | * Check whether it is worth even starting compaction. The order check is |
652 | * made because an assumption is made that the page allocator can satisfy |
653 | * the "cheaper" orders without taking special steps |
654 | */ |
655 | if (!order || !may_enter_fs || !may_perform_io) |
656 | return rc; |
657 | |
658 | count_vm_event(COMPACTSTALL); |
659 | |
660 | /* Compact each zone in the list */ |
661 | for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx, |
662 | nodemask) { |
663 | int status; |
664 | |
665 | status = compact_zone_order(zone, order, gfp_mask, sync); |
666 | rc = max(status, rc); |
667 | |
668 | /* If a normal allocation would succeed, stop compacting */ |
669 | if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0, 0)) |
670 | break; |
671 | } |
672 | |
673 | return rc; |
674 | } |
675 | |
676 | |
677 | /* Compact all zones within a node */ |
678 | static int __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc) |
679 | { |
680 | int zoneid; |
681 | struct zone *zone; |
682 | |
683 | for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { |
684 | |
685 | zone = &pgdat->node_zones[zoneid]; |
686 | if (!populated_zone(zone)) |
687 | continue; |
688 | |
689 | cc->nr_freepages = 0; |
690 | cc->nr_migratepages = 0; |
691 | cc->zone = zone; |
692 | INIT_LIST_HEAD(&cc->freepages); |
693 | INIT_LIST_HEAD(&cc->migratepages); |
694 | |
695 | if (cc->order == -1 || !compaction_deferred(zone, cc->order)) |
696 | compact_zone(zone, cc); |
697 | |
698 | if (cc->order > 0) { |
699 | int ok = zone_watermark_ok(zone, cc->order, |
700 | low_wmark_pages(zone), 0, 0); |
701 | if (ok && cc->order > zone->compact_order_failed) |
702 | zone->compact_order_failed = cc->order + 1; |
703 | /* Currently async compaction is never deferred. */ |
704 | else if (!ok && cc->sync) |
705 | defer_compaction(zone, cc->order); |
706 | } |
707 | |
708 | VM_BUG_ON(!list_empty(&cc->freepages)); |
709 | VM_BUG_ON(!list_empty(&cc->migratepages)); |
710 | } |
711 | |
712 | return 0; |
713 | } |
714 | |
715 | int compact_pgdat(pg_data_t *pgdat, int order) |
716 | { |
717 | struct compact_control cc = { |
718 | .order = order, |
719 | .sync = false, |
720 | }; |
721 | |
722 | return __compact_pgdat(pgdat, &cc); |
723 | } |
724 | |
725 | static int compact_node(int nid) |
726 | { |
727 | struct compact_control cc = { |
728 | .order = -1, |
729 | .sync = true, |
730 | }; |
731 | |
732 | return __compact_pgdat(NODE_DATA(nid), &cc); |
733 | } |
734 | |
735 | /* Compact all nodes in the system */ |
736 | static int compact_nodes(void) |
737 | { |
738 | int nid; |
739 | |
740 | /* Flush pending updates to the LRU lists */ |
741 | lru_add_drain_all(); |
742 | |
743 | for_each_online_node(nid) |
744 | compact_node(nid); |
745 | |
746 | return COMPACT_COMPLETE; |
747 | } |
748 | |
749 | /* The written value is actually unused, all memory is compacted */ |
750 | int sysctl_compact_memory; |
751 | |
752 | /* This is the entry point for compacting all nodes via /proc/sys/vm */ |
753 | int sysctl_compaction_handler(struct ctl_table *table, int write, |
754 | void __user *buffer, size_t *length, loff_t *ppos) |
755 | { |
756 | if (write) |
757 | return compact_nodes(); |
758 | |
759 | return 0; |
760 | } |
761 | |
762 | int sysctl_extfrag_handler(struct ctl_table *table, int write, |
763 | void __user *buffer, size_t *length, loff_t *ppos) |
764 | { |
765 | proc_dointvec_minmax(table, write, buffer, length, ppos); |
766 | |
767 | return 0; |
768 | } |
769 | |
770 | #if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA) |
771 | ssize_t sysfs_compact_node(struct device *dev, |
772 | struct device_attribute *attr, |
773 | const char *buf, size_t count) |
774 | { |
775 | int nid = dev->id; |
776 | |
777 | if (nid >= 0 && nid < nr_node_ids && node_online(nid)) { |
778 | /* Flush pending updates to the LRU lists */ |
779 | lru_add_drain_all(); |
780 | |
781 | compact_node(nid); |
782 | } |
783 | |
784 | return count; |
785 | } |
786 | static DEVICE_ATTR(compact, S_IWUSR, NULL, sysfs_compact_node); |
787 | |
788 | int compaction_register_node(struct node *node) |
789 | { |
790 | return device_create_file(&node->dev, &dev_attr_compact); |
791 | } |
792 | |
793 | void compaction_unregister_node(struct node *node) |
794 | { |
795 | return device_remove_file(&node->dev, &dev_attr_compact); |
796 | } |
797 | #endif /* CONFIG_SYSFS && CONFIG_NUMA */ |
798 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9