Root/
1 | #ifndef _LINUX_MMZONE_H |
2 | #define _LINUX_MMZONE_H |
3 | |
4 | #ifndef __ASSEMBLY__ |
5 | #ifndef __GENERATING_BOUNDS_H |
6 | |
7 | #include <linux/spinlock.h> |
8 | #include <linux/list.h> |
9 | #include <linux/wait.h> |
10 | #include <linux/bitops.h> |
11 | #include <linux/cache.h> |
12 | #include <linux/threads.h> |
13 | #include <linux/numa.h> |
14 | #include <linux/init.h> |
15 | #include <linux/seqlock.h> |
16 | #include <linux/nodemask.h> |
17 | #include <linux/pageblock-flags.h> |
18 | #include <generated/bounds.h> |
19 | #include <asm/atomic.h> |
20 | #include <asm/page.h> |
21 | |
22 | /* Free memory management - zoned buddy allocator. */ |
23 | #ifndef CONFIG_FORCE_MAX_ZONEORDER |
24 | #define MAX_ORDER 11 |
25 | #else |
26 | #define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER |
27 | #endif |
28 | #define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1)) |
29 | |
30 | /* |
31 | * PAGE_ALLOC_COSTLY_ORDER is the order at which allocations are deemed |
32 | * costly to service. That is between allocation orders which should |
33 | * coelesce naturally under reasonable reclaim pressure and those which |
34 | * will not. |
35 | */ |
36 | #define PAGE_ALLOC_COSTLY_ORDER 3 |
37 | |
38 | #define MIGRATE_UNMOVABLE 0 |
39 | #define MIGRATE_RECLAIMABLE 1 |
40 | #define MIGRATE_MOVABLE 2 |
41 | #define MIGRATE_PCPTYPES 3 /* the number of types on the pcp lists */ |
42 | #define MIGRATE_RESERVE 3 |
43 | #define MIGRATE_ISOLATE 4 /* can't allocate from here */ |
44 | #define MIGRATE_TYPES 5 |
45 | |
46 | #define for_each_migratetype_order(order, type) \ |
47 | for (order = 0; order < MAX_ORDER; order++) \ |
48 | for (type = 0; type < MIGRATE_TYPES; type++) |
49 | |
50 | extern int page_group_by_mobility_disabled; |
51 | |
52 | static inline int get_pageblock_migratetype(struct page *page) |
53 | { |
54 | return get_pageblock_flags_group(page, PB_migrate, PB_migrate_end); |
55 | } |
56 | |
57 | struct free_area { |
58 | struct list_head free_list[MIGRATE_TYPES]; |
59 | unsigned long nr_free; |
60 | }; |
61 | |
62 | struct pglist_data; |
63 | |
64 | /* |
65 | * zone->lock and zone->lru_lock are two of the hottest locks in the kernel. |
66 | * So add a wild amount of padding here to ensure that they fall into separate |
67 | * cachelines. There are very few zone structures in the machine, so space |
68 | * consumption is not a concern here. |
69 | */ |
70 | #if defined(CONFIG_SMP) |
71 | struct zone_padding { |
72 | char x[0]; |
73 | } ____cacheline_internodealigned_in_smp; |
74 | #define ZONE_PADDING(name) struct zone_padding name; |
75 | #else |
76 | #define ZONE_PADDING(name) |
77 | #endif |
78 | |
79 | enum zone_stat_item { |
80 | /* First 128 byte cacheline (assuming 64 bit words) */ |
81 | NR_FREE_PAGES, |
82 | NR_LRU_BASE, |
83 | NR_INACTIVE_ANON = NR_LRU_BASE, /* must match order of LRU_[IN]ACTIVE */ |
84 | NR_ACTIVE_ANON, /* " " " " " */ |
85 | NR_INACTIVE_FILE, /* " " " " " */ |
86 | NR_ACTIVE_FILE, /* " " " " " */ |
87 | NR_UNEVICTABLE, /* " " " " " */ |
88 | NR_MLOCK, /* mlock()ed pages found and moved off LRU */ |
89 | NR_ANON_PAGES, /* Mapped anonymous pages */ |
90 | NR_FILE_MAPPED, /* pagecache pages mapped into pagetables. |
91 | only modified from process context */ |
92 | NR_FILE_PAGES, |
93 | NR_FILE_DIRTY, |
94 | NR_WRITEBACK, |
95 | NR_SLAB_RECLAIMABLE, |
96 | NR_SLAB_UNRECLAIMABLE, |
97 | NR_PAGETABLE, /* used for pagetables */ |
98 | NR_KERNEL_STACK, |
99 | /* Second 128 byte cacheline */ |
100 | NR_UNSTABLE_NFS, /* NFS unstable pages */ |
101 | NR_BOUNCE, |
102 | NR_VMSCAN_WRITE, |
103 | NR_WRITEBACK_TEMP, /* Writeback using temporary buffers */ |
104 | NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */ |
105 | NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */ |
106 | NR_SHMEM, /* shmem pages (included tmpfs/GEM pages) */ |
107 | NR_DIRTIED, /* page dirtyings since bootup */ |
108 | NR_WRITTEN, /* page writings since bootup */ |
109 | #ifdef CONFIG_NUMA |
110 | NUMA_HIT, /* allocated in intended node */ |
111 | NUMA_MISS, /* allocated in non intended node */ |
112 | NUMA_FOREIGN, /* was intended here, hit elsewhere */ |
113 | NUMA_INTERLEAVE_HIT, /* interleaver preferred this zone */ |
114 | NUMA_LOCAL, /* allocation from local node */ |
115 | NUMA_OTHER, /* allocation from other node */ |
116 | #endif |
117 | NR_ANON_TRANSPARENT_HUGEPAGES, |
118 | NR_VM_ZONE_STAT_ITEMS }; |
119 | |
120 | /* |
121 | * We do arithmetic on the LRU lists in various places in the code, |
122 | * so it is important to keep the active lists LRU_ACTIVE higher in |
123 | * the array than the corresponding inactive lists, and to keep |
124 | * the *_FILE lists LRU_FILE higher than the corresponding _ANON lists. |
125 | * |
126 | * This has to be kept in sync with the statistics in zone_stat_item |
127 | * above and the descriptions in vmstat_text in mm/vmstat.c |
128 | */ |
129 | #define LRU_BASE 0 |
130 | #define LRU_ACTIVE 1 |
131 | #define LRU_FILE 2 |
132 | |
133 | enum lru_list { |
134 | LRU_INACTIVE_ANON = LRU_BASE, |
135 | LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE, |
136 | LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE, |
137 | LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE, |
138 | LRU_UNEVICTABLE, |
139 | NR_LRU_LISTS |
140 | }; |
141 | |
142 | #define for_each_lru(l) for (l = 0; l < NR_LRU_LISTS; l++) |
143 | |
144 | #define for_each_evictable_lru(l) for (l = 0; l <= LRU_ACTIVE_FILE; l++) |
145 | |
146 | static inline int is_file_lru(enum lru_list l) |
147 | { |
148 | return (l == LRU_INACTIVE_FILE || l == LRU_ACTIVE_FILE); |
149 | } |
150 | |
151 | static inline int is_active_lru(enum lru_list l) |
152 | { |
153 | return (l == LRU_ACTIVE_ANON || l == LRU_ACTIVE_FILE); |
154 | } |
155 | |
156 | static inline int is_unevictable_lru(enum lru_list l) |
157 | { |
158 | return (l == LRU_UNEVICTABLE); |
159 | } |
160 | |
161 | enum zone_watermarks { |
162 | WMARK_MIN, |
163 | WMARK_LOW, |
164 | WMARK_HIGH, |
165 | NR_WMARK |
166 | }; |
167 | |
168 | #define min_wmark_pages(z) (z->watermark[WMARK_MIN]) |
169 | #define low_wmark_pages(z) (z->watermark[WMARK_LOW]) |
170 | #define high_wmark_pages(z) (z->watermark[WMARK_HIGH]) |
171 | |
172 | struct per_cpu_pages { |
173 | int count; /* number of pages in the list */ |
174 | int high; /* high watermark, emptying needed */ |
175 | int batch; /* chunk size for buddy add/remove */ |
176 | |
177 | /* Lists of pages, one per migrate type stored on the pcp-lists */ |
178 | struct list_head lists[MIGRATE_PCPTYPES]; |
179 | }; |
180 | |
181 | struct per_cpu_pageset { |
182 | struct per_cpu_pages pcp; |
183 | #ifdef CONFIG_NUMA |
184 | s8 expire; |
185 | #endif |
186 | #ifdef CONFIG_SMP |
187 | s8 stat_threshold; |
188 | s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS]; |
189 | #endif |
190 | }; |
191 | |
192 | #endif /* !__GENERATING_BOUNDS.H */ |
193 | |
194 | enum zone_type { |
195 | #ifdef CONFIG_ZONE_DMA |
196 | /* |
197 | * ZONE_DMA is used when there are devices that are not able |
198 | * to do DMA to all of addressable memory (ZONE_NORMAL). Then we |
199 | * carve out the portion of memory that is needed for these devices. |
200 | * The range is arch specific. |
201 | * |
202 | * Some examples |
203 | * |
204 | * Architecture Limit |
205 | * --------------------------- |
206 | * parisc, ia64, sparc <4G |
207 | * s390 <2G |
208 | * arm Various |
209 | * alpha Unlimited or 0-16MB. |
210 | * |
211 | * i386, x86_64 and multiple other arches |
212 | * <16M. |
213 | */ |
214 | ZONE_DMA, |
215 | #endif |
216 | #ifdef CONFIG_ZONE_DMA32 |
217 | /* |
218 | * x86_64 needs two ZONE_DMAs because it supports devices that are |
219 | * only able to do DMA to the lower 16M but also 32 bit devices that |
220 | * can only do DMA areas below 4G. |
221 | */ |
222 | ZONE_DMA32, |
223 | #endif |
224 | /* |
225 | * Normal addressable memory is in ZONE_NORMAL. DMA operations can be |
226 | * performed on pages in ZONE_NORMAL if the DMA devices support |
227 | * transfers to all addressable memory. |
228 | */ |
229 | ZONE_NORMAL, |
230 | #ifdef CONFIG_HIGHMEM |
231 | /* |
232 | * A memory area that is only addressable by the kernel through |
233 | * mapping portions into its own address space. This is for example |
234 | * used by i386 to allow the kernel to address the memory beyond |
235 | * 900MB. The kernel will set up special mappings (page |
236 | * table entries on i386) for each page that the kernel needs to |
237 | * access. |
238 | */ |
239 | ZONE_HIGHMEM, |
240 | #endif |
241 | ZONE_MOVABLE, |
242 | __MAX_NR_ZONES |
243 | }; |
244 | |
245 | #ifndef __GENERATING_BOUNDS_H |
246 | |
247 | /* |
248 | * When a memory allocation must conform to specific limitations (such |
249 | * as being suitable for DMA) the caller will pass in hints to the |
250 | * allocator in the gfp_mask, in the zone modifier bits. These bits |
251 | * are used to select a priority ordered list of memory zones which |
252 | * match the requested limits. See gfp_zone() in include/linux/gfp.h |
253 | */ |
254 | |
255 | #if MAX_NR_ZONES < 2 |
256 | #define ZONES_SHIFT 0 |
257 | #elif MAX_NR_ZONES <= 2 |
258 | #define ZONES_SHIFT 1 |
259 | #elif MAX_NR_ZONES <= 4 |
260 | #define ZONES_SHIFT 2 |
261 | #else |
262 | #error ZONES_SHIFT -- too many zones configured adjust calculation |
263 | #endif |
264 | |
265 | struct zone_reclaim_stat { |
266 | /* |
267 | * The pageout code in vmscan.c keeps track of how many of the |
268 | * mem/swap backed and file backed pages are refeferenced. |
269 | * The higher the rotated/scanned ratio, the more valuable |
270 | * that cache is. |
271 | * |
272 | * The anon LRU stats live in [0], file LRU stats in [1] |
273 | */ |
274 | unsigned long recent_rotated[2]; |
275 | unsigned long recent_scanned[2]; |
276 | }; |
277 | |
278 | struct zone { |
279 | /* Fields commonly accessed by the page allocator */ |
280 | |
281 | /* zone watermarks, access with *_wmark_pages(zone) macros */ |
282 | unsigned long watermark[NR_WMARK]; |
283 | |
284 | /* |
285 | * When free pages are below this point, additional steps are taken |
286 | * when reading the number of free pages to avoid per-cpu counter |
287 | * drift allowing watermarks to be breached |
288 | */ |
289 | unsigned long percpu_drift_mark; |
290 | |
291 | /* |
292 | * We don't know if the memory that we're going to allocate will be freeable |
293 | * or/and it will be released eventually, so to avoid totally wasting several |
294 | * GB of ram we must reserve some of the lower zone memory (otherwise we risk |
295 | * to run OOM on the lower zones despite there's tons of freeable ram |
296 | * on the higher zones). This array is recalculated at runtime if the |
297 | * sysctl_lowmem_reserve_ratio sysctl changes. |
298 | */ |
299 | unsigned long lowmem_reserve[MAX_NR_ZONES]; |
300 | |
301 | #ifdef CONFIG_NUMA |
302 | int node; |
303 | /* |
304 | * zone reclaim becomes active if more unmapped pages exist. |
305 | */ |
306 | unsigned long min_unmapped_pages; |
307 | unsigned long min_slab_pages; |
308 | #endif |
309 | struct per_cpu_pageset __percpu *pageset; |
310 | /* |
311 | * free areas of different sizes |
312 | */ |
313 | spinlock_t lock; |
314 | int all_unreclaimable; /* All pages pinned */ |
315 | #ifdef CONFIG_MEMORY_HOTPLUG |
316 | /* see spanned/present_pages for more description */ |
317 | seqlock_t span_seqlock; |
318 | #endif |
319 | struct free_area free_area[MAX_ORDER]; |
320 | |
321 | #ifndef CONFIG_SPARSEMEM |
322 | /* |
323 | * Flags for a pageblock_nr_pages block. See pageblock-flags.h. |
324 | * In SPARSEMEM, this map is stored in struct mem_section |
325 | */ |
326 | unsigned long *pageblock_flags; |
327 | #endif /* CONFIG_SPARSEMEM */ |
328 | |
329 | #ifdef CONFIG_COMPACTION |
330 | /* |
331 | * On compaction failure, 1<<compact_defer_shift compactions |
332 | * are skipped before trying again. The number attempted since |
333 | * last failure is tracked with compact_considered. |
334 | */ |
335 | unsigned int compact_considered; |
336 | unsigned int compact_defer_shift; |
337 | #endif |
338 | |
339 | ZONE_PADDING(_pad1_) |
340 | |
341 | /* Fields commonly accessed by the page reclaim scanner */ |
342 | spinlock_t lru_lock; |
343 | struct zone_lru { |
344 | struct list_head list; |
345 | } lru[NR_LRU_LISTS]; |
346 | |
347 | struct zone_reclaim_stat reclaim_stat; |
348 | |
349 | unsigned long pages_scanned; /* since last reclaim */ |
350 | unsigned long flags; /* zone flags, see below */ |
351 | |
352 | /* Zone statistics */ |
353 | atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; |
354 | |
355 | /* |
356 | * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on |
357 | * this zone's LRU. Maintained by the pageout code. |
358 | */ |
359 | unsigned int inactive_ratio; |
360 | |
361 | |
362 | ZONE_PADDING(_pad2_) |
363 | /* Rarely used or read-mostly fields */ |
364 | |
365 | /* |
366 | * wait_table -- the array holding the hash table |
367 | * wait_table_hash_nr_entries -- the size of the hash table array |
368 | * wait_table_bits -- wait_table_size == (1 << wait_table_bits) |
369 | * |
370 | * The purpose of all these is to keep track of the people |
371 | * waiting for a page to become available and make them |
372 | * runnable again when possible. The trouble is that this |
373 | * consumes a lot of space, especially when so few things |
374 | * wait on pages at a given time. So instead of using |
375 | * per-page waitqueues, we use a waitqueue hash table. |
376 | * |
377 | * The bucket discipline is to sleep on the same queue when |
378 | * colliding and wake all in that wait queue when removing. |
379 | * When something wakes, it must check to be sure its page is |
380 | * truly available, a la thundering herd. The cost of a |
381 | * collision is great, but given the expected load of the |
382 | * table, they should be so rare as to be outweighed by the |
383 | * benefits from the saved space. |
384 | * |
385 | * __wait_on_page_locked() and unlock_page() in mm/filemap.c, are the |
386 | * primary users of these fields, and in mm/page_alloc.c |
387 | * free_area_init_core() performs the initialization of them. |
388 | */ |
389 | wait_queue_head_t * wait_table; |
390 | unsigned long wait_table_hash_nr_entries; |
391 | unsigned long wait_table_bits; |
392 | |
393 | /* |
394 | * Discontig memory support fields. |
395 | */ |
396 | struct pglist_data *zone_pgdat; |
397 | /* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */ |
398 | unsigned long zone_start_pfn; |
399 | |
400 | /* |
401 | * zone_start_pfn, spanned_pages and present_pages are all |
402 | * protected by span_seqlock. It is a seqlock because it has |
403 | * to be read outside of zone->lock, and it is done in the main |
404 | * allocator path. But, it is written quite infrequently. |
405 | * |
406 | * The lock is declared along with zone->lock because it is |
407 | * frequently read in proximity to zone->lock. It's good to |
408 | * give them a chance of being in the same cacheline. |
409 | */ |
410 | unsigned long spanned_pages; /* total size, including holes */ |
411 | unsigned long present_pages; /* amount of memory (excluding holes) */ |
412 | |
413 | /* |
414 | * rarely used fields: |
415 | */ |
416 | const char *name; |
417 | } ____cacheline_internodealigned_in_smp; |
418 | |
419 | typedef enum { |
420 | ZONE_RECLAIM_LOCKED, /* prevents concurrent reclaim */ |
421 | ZONE_OOM_LOCKED, /* zone is in OOM killer zonelist */ |
422 | ZONE_CONGESTED, /* zone has many dirty pages backed by |
423 | * a congested BDI |
424 | */ |
425 | } zone_flags_t; |
426 | |
427 | static inline void zone_set_flag(struct zone *zone, zone_flags_t flag) |
428 | { |
429 | set_bit(flag, &zone->flags); |
430 | } |
431 | |
432 | static inline int zone_test_and_set_flag(struct zone *zone, zone_flags_t flag) |
433 | { |
434 | return test_and_set_bit(flag, &zone->flags); |
435 | } |
436 | |
437 | static inline void zone_clear_flag(struct zone *zone, zone_flags_t flag) |
438 | { |
439 | clear_bit(flag, &zone->flags); |
440 | } |
441 | |
442 | static inline int zone_is_reclaim_congested(const struct zone *zone) |
443 | { |
444 | return test_bit(ZONE_CONGESTED, &zone->flags); |
445 | } |
446 | |
447 | static inline int zone_is_reclaim_locked(const struct zone *zone) |
448 | { |
449 | return test_bit(ZONE_RECLAIM_LOCKED, &zone->flags); |
450 | } |
451 | |
452 | static inline int zone_is_oom_locked(const struct zone *zone) |
453 | { |
454 | return test_bit(ZONE_OOM_LOCKED, &zone->flags); |
455 | } |
456 | |
457 | /* |
458 | * The "priority" of VM scanning is how much of the queues we will scan in one |
459 | * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the |
460 | * queues ("queue_length >> 12") during an aging round. |
461 | */ |
462 | #define DEF_PRIORITY 12 |
463 | |
464 | /* Maximum number of zones on a zonelist */ |
465 | #define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES) |
466 | |
467 | #ifdef CONFIG_NUMA |
468 | |
469 | /* |
470 | * The NUMA zonelists are doubled because we need zonelists that restrict the |
471 | * allocations to a single node for GFP_THISNODE. |
472 | * |
473 | * [0] : Zonelist with fallback |
474 | * [1] : No fallback (GFP_THISNODE) |
475 | */ |
476 | #define MAX_ZONELISTS 2 |
477 | |
478 | |
479 | /* |
480 | * We cache key information from each zonelist for smaller cache |
481 | * footprint when scanning for free pages in get_page_from_freelist(). |
482 | * |
483 | * 1) The BITMAP fullzones tracks which zones in a zonelist have come |
484 | * up short of free memory since the last time (last_fullzone_zap) |
485 | * we zero'd fullzones. |
486 | * 2) The array z_to_n[] maps each zone in the zonelist to its node |
487 | * id, so that we can efficiently evaluate whether that node is |
488 | * set in the current tasks mems_allowed. |
489 | * |
490 | * Both fullzones and z_to_n[] are one-to-one with the zonelist, |
491 | * indexed by a zones offset in the zonelist zones[] array. |
492 | * |
493 | * The get_page_from_freelist() routine does two scans. During the |
494 | * first scan, we skip zones whose corresponding bit in 'fullzones' |
495 | * is set or whose corresponding node in current->mems_allowed (which |
496 | * comes from cpusets) is not set. During the second scan, we bypass |
497 | * this zonelist_cache, to ensure we look methodically at each zone. |
498 | * |
499 | * Once per second, we zero out (zap) fullzones, forcing us to |
500 | * reconsider nodes that might have regained more free memory. |
501 | * The field last_full_zap is the time we last zapped fullzones. |
502 | * |
503 | * This mechanism reduces the amount of time we waste repeatedly |
504 | * reexaming zones for free memory when they just came up low on |
505 | * memory momentarilly ago. |
506 | * |
507 | * The zonelist_cache struct members logically belong in struct |
508 | * zonelist. However, the mempolicy zonelists constructed for |
509 | * MPOL_BIND are intentionally variable length (and usually much |
510 | * shorter). A general purpose mechanism for handling structs with |
511 | * multiple variable length members is more mechanism than we want |
512 | * here. We resort to some special case hackery instead. |
513 | * |
514 | * The MPOL_BIND zonelists don't need this zonelist_cache (in good |
515 | * part because they are shorter), so we put the fixed length stuff |
516 | * at the front of the zonelist struct, ending in a variable length |
517 | * zones[], as is needed by MPOL_BIND. |
518 | * |
519 | * Then we put the optional zonelist cache on the end of the zonelist |
520 | * struct. This optional stuff is found by a 'zlcache_ptr' pointer in |
521 | * the fixed length portion at the front of the struct. This pointer |
522 | * both enables us to find the zonelist cache, and in the case of |
523 | * MPOL_BIND zonelists, (which will just set the zlcache_ptr to NULL) |
524 | * to know that the zonelist cache is not there. |
525 | * |
526 | * The end result is that struct zonelists come in two flavors: |
527 | * 1) The full, fixed length version, shown below, and |
528 | * 2) The custom zonelists for MPOL_BIND. |
529 | * The custom MPOL_BIND zonelists have a NULL zlcache_ptr and no zlcache. |
530 | * |
531 | * Even though there may be multiple CPU cores on a node modifying |
532 | * fullzones or last_full_zap in the same zonelist_cache at the same |
533 | * time, we don't lock it. This is just hint data - if it is wrong now |
534 | * and then, the allocator will still function, perhaps a bit slower. |
535 | */ |
536 | |
537 | |
538 | struct zonelist_cache { |
539 | unsigned short z_to_n[MAX_ZONES_PER_ZONELIST]; /* zone->nid */ |
540 | DECLARE_BITMAP(fullzones, MAX_ZONES_PER_ZONELIST); /* zone full? */ |
541 | unsigned long last_full_zap; /* when last zap'd (jiffies) */ |
542 | }; |
543 | #else |
544 | #define MAX_ZONELISTS 1 |
545 | struct zonelist_cache; |
546 | #endif |
547 | |
548 | /* |
549 | * This struct contains information about a zone in a zonelist. It is stored |
550 | * here to avoid dereferences into large structures and lookups of tables |
551 | */ |
552 | struct zoneref { |
553 | struct zone *zone; /* Pointer to actual zone */ |
554 | int zone_idx; /* zone_idx(zoneref->zone) */ |
555 | }; |
556 | |
557 | /* |
558 | * One allocation request operates on a zonelist. A zonelist |
559 | * is a list of zones, the first one is the 'goal' of the |
560 | * allocation, the other zones are fallback zones, in decreasing |
561 | * priority. |
562 | * |
563 | * If zlcache_ptr is not NULL, then it is just the address of zlcache, |
564 | * as explained above. If zlcache_ptr is NULL, there is no zlcache. |
565 | * * |
566 | * To speed the reading of the zonelist, the zonerefs contain the zone index |
567 | * of the entry being read. Helper functions to access information given |
568 | * a struct zoneref are |
569 | * |
570 | * zonelist_zone() - Return the struct zone * for an entry in _zonerefs |
571 | * zonelist_zone_idx() - Return the index of the zone for an entry |
572 | * zonelist_node_idx() - Return the index of the node for an entry |
573 | */ |
574 | struct zonelist { |
575 | struct zonelist_cache *zlcache_ptr; // NULL or &zlcache |
576 | struct zoneref _zonerefs[MAX_ZONES_PER_ZONELIST + 1]; |
577 | #ifdef CONFIG_NUMA |
578 | struct zonelist_cache zlcache; // optional ... |
579 | #endif |
580 | }; |
581 | |
582 | #ifdef CONFIG_ARCH_POPULATES_NODE_MAP |
583 | struct node_active_region { |
584 | unsigned long start_pfn; |
585 | unsigned long end_pfn; |
586 | int nid; |
587 | }; |
588 | #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ |
589 | |
590 | #ifndef CONFIG_DISCONTIGMEM |
591 | /* The array of struct pages - for discontigmem use pgdat->lmem_map */ |
592 | extern struct page *mem_map; |
593 | #endif |
594 | |
595 | /* |
596 | * The pg_data_t structure is used in machines with CONFIG_DISCONTIGMEM |
597 | * (mostly NUMA machines?) to denote a higher-level memory zone than the |
598 | * zone denotes. |
599 | * |
600 | * On NUMA machines, each NUMA node would have a pg_data_t to describe |
601 | * it's memory layout. |
602 | * |
603 | * Memory statistics and page replacement data structures are maintained on a |
604 | * per-zone basis. |
605 | */ |
606 | struct bootmem_data; |
607 | typedef struct pglist_data { |
608 | struct zone node_zones[MAX_NR_ZONES]; |
609 | struct zonelist node_zonelists[MAX_ZONELISTS]; |
610 | int nr_zones; |
611 | #ifdef CONFIG_FLAT_NODE_MEM_MAP /* means !SPARSEMEM */ |
612 | struct page *node_mem_map; |
613 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR |
614 | struct page_cgroup *node_page_cgroup; |
615 | #endif |
616 | #endif |
617 | #ifndef CONFIG_NO_BOOTMEM |
618 | struct bootmem_data *bdata; |
619 | #endif |
620 | #ifdef CONFIG_MEMORY_HOTPLUG |
621 | /* |
622 | * Must be held any time you expect node_start_pfn, node_present_pages |
623 | * or node_spanned_pages stay constant. Holding this will also |
624 | * guarantee that any pfn_valid() stays that way. |
625 | * |
626 | * Nests above zone->lock and zone->size_seqlock. |
627 | */ |
628 | spinlock_t node_size_lock; |
629 | #endif |
630 | unsigned long node_start_pfn; |
631 | unsigned long node_present_pages; /* total number of physical pages */ |
632 | unsigned long node_spanned_pages; /* total size of physical page |
633 | range, including holes */ |
634 | int node_id; |
635 | wait_queue_head_t kswapd_wait; |
636 | struct task_struct *kswapd; |
637 | int kswapd_max_order; |
638 | enum zone_type classzone_idx; |
639 | } pg_data_t; |
640 | |
641 | #define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages) |
642 | #define node_spanned_pages(nid) (NODE_DATA(nid)->node_spanned_pages) |
643 | #ifdef CONFIG_FLAT_NODE_MEM_MAP |
644 | #define pgdat_page_nr(pgdat, pagenr) ((pgdat)->node_mem_map + (pagenr)) |
645 | #else |
646 | #define pgdat_page_nr(pgdat, pagenr) pfn_to_page((pgdat)->node_start_pfn + (pagenr)) |
647 | #endif |
648 | #define nid_page_nr(nid, pagenr) pgdat_page_nr(NODE_DATA(nid),(pagenr)) |
649 | |
650 | #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) |
651 | |
652 | #define node_end_pfn(nid) ({\ |
653 | pg_data_t *__pgdat = NODE_DATA(nid);\ |
654 | __pgdat->node_start_pfn + __pgdat->node_spanned_pages;\ |
655 | }) |
656 | |
657 | #include <linux/memory_hotplug.h> |
658 | |
659 | extern struct mutex zonelists_mutex; |
660 | void build_all_zonelists(void *data); |
661 | void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx); |
662 | bool zone_watermark_ok(struct zone *z, int order, unsigned long mark, |
663 | int classzone_idx, int alloc_flags); |
664 | bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark, |
665 | int classzone_idx, int alloc_flags); |
666 | enum memmap_context { |
667 | MEMMAP_EARLY, |
668 | MEMMAP_HOTPLUG, |
669 | }; |
670 | extern int init_currently_empty_zone(struct zone *zone, unsigned long start_pfn, |
671 | unsigned long size, |
672 | enum memmap_context context); |
673 | |
674 | #ifdef CONFIG_HAVE_MEMORY_PRESENT |
675 | void memory_present(int nid, unsigned long start, unsigned long end); |
676 | #else |
677 | static inline void memory_present(int nid, unsigned long start, unsigned long end) {} |
678 | #endif |
679 | |
680 | #ifdef CONFIG_HAVE_MEMORYLESS_NODES |
681 | int local_memory_node(int node_id); |
682 | #else |
683 | static inline int local_memory_node(int node_id) { return node_id; }; |
684 | #endif |
685 | |
686 | #ifdef CONFIG_NEED_NODE_MEMMAP_SIZE |
687 | unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long); |
688 | #endif |
689 | |
690 | /* |
691 | * zone_idx() returns 0 for the ZONE_DMA zone, 1 for the ZONE_NORMAL zone, etc. |
692 | */ |
693 | #define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones) |
694 | |
695 | static inline int populated_zone(struct zone *zone) |
696 | { |
697 | return (!!zone->present_pages); |
698 | } |
699 | |
700 | extern int movable_zone; |
701 | |
702 | static inline int zone_movable_is_highmem(void) |
703 | { |
704 | #if defined(CONFIG_HIGHMEM) && defined(CONFIG_ARCH_POPULATES_NODE_MAP) |
705 | return movable_zone == ZONE_HIGHMEM; |
706 | #else |
707 | return 0; |
708 | #endif |
709 | } |
710 | |
711 | static inline int is_highmem_idx(enum zone_type idx) |
712 | { |
713 | #ifdef CONFIG_HIGHMEM |
714 | return (idx == ZONE_HIGHMEM || |
715 | (idx == ZONE_MOVABLE && zone_movable_is_highmem())); |
716 | #else |
717 | return 0; |
718 | #endif |
719 | } |
720 | |
721 | static inline int is_normal_idx(enum zone_type idx) |
722 | { |
723 | return (idx == ZONE_NORMAL); |
724 | } |
725 | |
726 | /** |
727 | * is_highmem - helper function to quickly check if a struct zone is a |
728 | * highmem zone or not. This is an attempt to keep references |
729 | * to ZONE_{DMA/NORMAL/HIGHMEM/etc} in general code to a minimum. |
730 | * @zone - pointer to struct zone variable |
731 | */ |
732 | static inline int is_highmem(struct zone *zone) |
733 | { |
734 | #ifdef CONFIG_HIGHMEM |
735 | int zone_off = (char *)zone - (char *)zone->zone_pgdat->node_zones; |
736 | return zone_off == ZONE_HIGHMEM * sizeof(*zone) || |
737 | (zone_off == ZONE_MOVABLE * sizeof(*zone) && |
738 | zone_movable_is_highmem()); |
739 | #else |
740 | return 0; |
741 | #endif |
742 | } |
743 | |
744 | static inline int is_normal(struct zone *zone) |
745 | { |
746 | return zone == zone->zone_pgdat->node_zones + ZONE_NORMAL; |
747 | } |
748 | |
749 | static inline int is_dma32(struct zone *zone) |
750 | { |
751 | #ifdef CONFIG_ZONE_DMA32 |
752 | return zone == zone->zone_pgdat->node_zones + ZONE_DMA32; |
753 | #else |
754 | return 0; |
755 | #endif |
756 | } |
757 | |
758 | static inline int is_dma(struct zone *zone) |
759 | { |
760 | #ifdef CONFIG_ZONE_DMA |
761 | return zone == zone->zone_pgdat->node_zones + ZONE_DMA; |
762 | #else |
763 | return 0; |
764 | #endif |
765 | } |
766 | |
767 | /* These two functions are used to setup the per zone pages min values */ |
768 | struct ctl_table; |
769 | int min_free_kbytes_sysctl_handler(struct ctl_table *, int, |
770 | void __user *, size_t *, loff_t *); |
771 | extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1]; |
772 | int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, |
773 | void __user *, size_t *, loff_t *); |
774 | int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int, |
775 | void __user *, size_t *, loff_t *); |
776 | int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int, |
777 | void __user *, size_t *, loff_t *); |
778 | int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int, |
779 | void __user *, size_t *, loff_t *); |
780 | |
781 | extern int numa_zonelist_order_handler(struct ctl_table *, int, |
782 | void __user *, size_t *, loff_t *); |
783 | extern char numa_zonelist_order[]; |
784 | #define NUMA_ZONELIST_ORDER_LEN 16 /* string buffer size */ |
785 | |
786 | #ifndef CONFIG_NEED_MULTIPLE_NODES |
787 | |
788 | extern struct pglist_data contig_page_data; |
789 | #define NODE_DATA(nid) (&contig_page_data) |
790 | #define NODE_MEM_MAP(nid) mem_map |
791 | |
792 | #else /* CONFIG_NEED_MULTIPLE_NODES */ |
793 | |
794 | #include <asm/mmzone.h> |
795 | |
796 | #endif /* !CONFIG_NEED_MULTIPLE_NODES */ |
797 | |
798 | extern struct pglist_data *first_online_pgdat(void); |
799 | extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat); |
800 | extern struct zone *next_zone(struct zone *zone); |
801 | |
802 | /** |
803 | * for_each_online_pgdat - helper macro to iterate over all online nodes |
804 | * @pgdat - pointer to a pg_data_t variable |
805 | */ |
806 | #define for_each_online_pgdat(pgdat) \ |
807 | for (pgdat = first_online_pgdat(); \ |
808 | pgdat; \ |
809 | pgdat = next_online_pgdat(pgdat)) |
810 | /** |
811 | * for_each_zone - helper macro to iterate over all memory zones |
812 | * @zone - pointer to struct zone variable |
813 | * |
814 | * The user only needs to declare the zone variable, for_each_zone |
815 | * fills it in. |
816 | */ |
817 | #define for_each_zone(zone) \ |
818 | for (zone = (first_online_pgdat())->node_zones; \ |
819 | zone; \ |
820 | zone = next_zone(zone)) |
821 | |
822 | #define for_each_populated_zone(zone) \ |
823 | for (zone = (first_online_pgdat())->node_zones; \ |
824 | zone; \ |
825 | zone = next_zone(zone)) \ |
826 | if (!populated_zone(zone)) \ |
827 | ; /* do nothing */ \ |
828 | else |
829 | |
830 | static inline struct zone *zonelist_zone(struct zoneref *zoneref) |
831 | { |
832 | return zoneref->zone; |
833 | } |
834 | |
835 | static inline int zonelist_zone_idx(struct zoneref *zoneref) |
836 | { |
837 | return zoneref->zone_idx; |
838 | } |
839 | |
840 | static inline int zonelist_node_idx(struct zoneref *zoneref) |
841 | { |
842 | #ifdef CONFIG_NUMA |
843 | /* zone_to_nid not available in this context */ |
844 | return zoneref->zone->node; |
845 | #else |
846 | return 0; |
847 | #endif /* CONFIG_NUMA */ |
848 | } |
849 | |
850 | /** |
851 | * next_zones_zonelist - Returns the next zone at or below highest_zoneidx within the allowed nodemask using a cursor within a zonelist as a starting point |
852 | * @z - The cursor used as a starting point for the search |
853 | * @highest_zoneidx - The zone index of the highest zone to return |
854 | * @nodes - An optional nodemask to filter the zonelist with |
855 | * @zone - The first suitable zone found is returned via this parameter |
856 | * |
857 | * This function returns the next zone at or below a given zone index that is |
858 | * within the allowed nodemask using a cursor as the starting point for the |
859 | * search. The zoneref returned is a cursor that represents the current zone |
860 | * being examined. It should be advanced by one before calling |
861 | * next_zones_zonelist again. |
862 | */ |
863 | struct zoneref *next_zones_zonelist(struct zoneref *z, |
864 | enum zone_type highest_zoneidx, |
865 | nodemask_t *nodes, |
866 | struct zone **zone); |
867 | |
868 | /** |
869 | * first_zones_zonelist - Returns the first zone at or below highest_zoneidx within the allowed nodemask in a zonelist |
870 | * @zonelist - The zonelist to search for a suitable zone |
871 | * @highest_zoneidx - The zone index of the highest zone to return |
872 | * @nodes - An optional nodemask to filter the zonelist with |
873 | * @zone - The first suitable zone found is returned via this parameter |
874 | * |
875 | * This function returns the first zone at or below a given zone index that is |
876 | * within the allowed nodemask. The zoneref returned is a cursor that can be |
877 | * used to iterate the zonelist with next_zones_zonelist by advancing it by |
878 | * one before calling. |
879 | */ |
880 | static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist, |
881 | enum zone_type highest_zoneidx, |
882 | nodemask_t *nodes, |
883 | struct zone **zone) |
884 | { |
885 | return next_zones_zonelist(zonelist->_zonerefs, highest_zoneidx, nodes, |
886 | zone); |
887 | } |
888 | |
889 | /** |
890 | * for_each_zone_zonelist_nodemask - helper macro to iterate over valid zones in a zonelist at or below a given zone index and within a nodemask |
891 | * @zone - The current zone in the iterator |
892 | * @z - The current pointer within zonelist->zones being iterated |
893 | * @zlist - The zonelist being iterated |
894 | * @highidx - The zone index of the highest zone to return |
895 | * @nodemask - Nodemask allowed by the allocator |
896 | * |
897 | * This iterator iterates though all zones at or below a given zone index and |
898 | * within a given nodemask |
899 | */ |
900 | #define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \ |
901 | for (z = first_zones_zonelist(zlist, highidx, nodemask, &zone); \ |
902 | zone; \ |
903 | z = next_zones_zonelist(++z, highidx, nodemask, &zone)) \ |
904 | |
905 | /** |
906 | * for_each_zone_zonelist - helper macro to iterate over valid zones in a zonelist at or below a given zone index |
907 | * @zone - The current zone in the iterator |
908 | * @z - The current pointer within zonelist->zones being iterated |
909 | * @zlist - The zonelist being iterated |
910 | * @highidx - The zone index of the highest zone to return |
911 | * |
912 | * This iterator iterates though all zones at or below a given zone index. |
913 | */ |
914 | #define for_each_zone_zonelist(zone, z, zlist, highidx) \ |
915 | for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, NULL) |
916 | |
917 | #ifdef CONFIG_SPARSEMEM |
918 | #include <asm/sparsemem.h> |
919 | #endif |
920 | |
921 | #if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \ |
922 | !defined(CONFIG_ARCH_POPULATES_NODE_MAP) |
923 | static inline unsigned long early_pfn_to_nid(unsigned long pfn) |
924 | { |
925 | return 0; |
926 | } |
927 | #endif |
928 | |
929 | #ifdef CONFIG_FLATMEM |
930 | #define pfn_to_nid(pfn) (0) |
931 | #endif |
932 | |
933 | #ifdef CONFIG_SPARSEMEM |
934 | |
935 | /* |
936 | * SECTION_SHIFT #bits space required to store a section # |
937 | * |
938 | * PA_SECTION_SHIFT physical address to/from section number |
939 | * PFN_SECTION_SHIFT pfn to/from section number |
940 | */ |
941 | #define SECTIONS_SHIFT (MAX_PHYSMEM_BITS - SECTION_SIZE_BITS) |
942 | |
943 | #define PA_SECTION_SHIFT (SECTION_SIZE_BITS) |
944 | #define PFN_SECTION_SHIFT (SECTION_SIZE_BITS - PAGE_SHIFT) |
945 | |
946 | #define NR_MEM_SECTIONS (1UL << SECTIONS_SHIFT) |
947 | |
948 | #define PAGES_PER_SECTION (1UL << PFN_SECTION_SHIFT) |
949 | #define PAGE_SECTION_MASK (~(PAGES_PER_SECTION-1)) |
950 | |
951 | #define SECTION_BLOCKFLAGS_BITS \ |
952 | ((1UL << (PFN_SECTION_SHIFT - pageblock_order)) * NR_PAGEBLOCK_BITS) |
953 | |
954 | #if (MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS |
955 | #error Allocator MAX_ORDER exceeds SECTION_SIZE |
956 | #endif |
957 | |
958 | #define pfn_to_section_nr(pfn) ((pfn) >> PFN_SECTION_SHIFT) |
959 | #define section_nr_to_pfn(sec) ((sec) << PFN_SECTION_SHIFT) |
960 | |
961 | #define SECTION_ALIGN_UP(pfn) (((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK) |
962 | #define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK) |
963 | |
964 | struct page; |
965 | struct page_cgroup; |
966 | struct mem_section { |
967 | /* |
968 | * This is, logically, a pointer to an array of struct |
969 | * pages. However, it is stored with some other magic. |
970 | * (see sparse.c::sparse_init_one_section()) |
971 | * |
972 | * Additionally during early boot we encode node id of |
973 | * the location of the section here to guide allocation. |
974 | * (see sparse.c::memory_present()) |
975 | * |
976 | * Making it a UL at least makes someone do a cast |
977 | * before using it wrong. |
978 | */ |
979 | unsigned long section_mem_map; |
980 | |
981 | /* See declaration of similar field in struct zone */ |
982 | unsigned long *pageblock_flags; |
983 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR |
984 | /* |
985 | * If !SPARSEMEM, pgdat doesn't have page_cgroup pointer. We use |
986 | * section. (see memcontrol.h/page_cgroup.h about this.) |
987 | */ |
988 | struct page_cgroup *page_cgroup; |
989 | unsigned long pad; |
990 | #endif |
991 | }; |
992 | |
993 | #ifdef CONFIG_SPARSEMEM_EXTREME |
994 | #define SECTIONS_PER_ROOT (PAGE_SIZE / sizeof (struct mem_section)) |
995 | #else |
996 | #define SECTIONS_PER_ROOT 1 |
997 | #endif |
998 | |
999 | #define SECTION_NR_TO_ROOT(sec) ((sec) / SECTIONS_PER_ROOT) |
1000 | #define NR_SECTION_ROOTS DIV_ROUND_UP(NR_MEM_SECTIONS, SECTIONS_PER_ROOT) |
1001 | #define SECTION_ROOT_MASK (SECTIONS_PER_ROOT - 1) |
1002 | |
1003 | #ifdef CONFIG_SPARSEMEM_EXTREME |
1004 | extern struct mem_section *mem_section[NR_SECTION_ROOTS]; |
1005 | #else |
1006 | extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]; |
1007 | #endif |
1008 | |
1009 | static inline struct mem_section *__nr_to_section(unsigned long nr) |
1010 | { |
1011 | if (!mem_section[SECTION_NR_TO_ROOT(nr)]) |
1012 | return NULL; |
1013 | return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK]; |
1014 | } |
1015 | extern int __section_nr(struct mem_section* ms); |
1016 | extern unsigned long usemap_size(void); |
1017 | |
1018 | /* |
1019 | * We use the lower bits of the mem_map pointer to store |
1020 | * a little bit of information. There should be at least |
1021 | * 3 bits here due to 32-bit alignment. |
1022 | */ |
1023 | #define SECTION_MARKED_PRESENT (1UL<<0) |
1024 | #define SECTION_HAS_MEM_MAP (1UL<<1) |
1025 | #define SECTION_MAP_LAST_BIT (1UL<<2) |
1026 | #define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1)) |
1027 | #define SECTION_NID_SHIFT 2 |
1028 | |
1029 | static inline struct page *__section_mem_map_addr(struct mem_section *section) |
1030 | { |
1031 | unsigned long map = section->section_mem_map; |
1032 | map &= SECTION_MAP_MASK; |
1033 | return (struct page *)map; |
1034 | } |
1035 | |
1036 | static inline int present_section(struct mem_section *section) |
1037 | { |
1038 | return (section && (section->section_mem_map & SECTION_MARKED_PRESENT)); |
1039 | } |
1040 | |
1041 | static inline int present_section_nr(unsigned long nr) |
1042 | { |
1043 | return present_section(__nr_to_section(nr)); |
1044 | } |
1045 | |
1046 | static inline int valid_section(struct mem_section *section) |
1047 | { |
1048 | return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP)); |
1049 | } |
1050 | |
1051 | static inline int valid_section_nr(unsigned long nr) |
1052 | { |
1053 | return valid_section(__nr_to_section(nr)); |
1054 | } |
1055 | |
1056 | static inline struct mem_section *__pfn_to_section(unsigned long pfn) |
1057 | { |
1058 | return __nr_to_section(pfn_to_section_nr(pfn)); |
1059 | } |
1060 | |
1061 | #ifndef CONFIG_HAVE_ARCH_PFN_VALID |
1062 | static inline int pfn_valid(unsigned long pfn) |
1063 | { |
1064 | if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) |
1065 | return 0; |
1066 | return valid_section(__nr_to_section(pfn_to_section_nr(pfn))); |
1067 | } |
1068 | #endif |
1069 | |
1070 | static inline int pfn_present(unsigned long pfn) |
1071 | { |
1072 | if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) |
1073 | return 0; |
1074 | return present_section(__nr_to_section(pfn_to_section_nr(pfn))); |
1075 | } |
1076 | |
1077 | /* |
1078 | * These are _only_ used during initialisation, therefore they |
1079 | * can use __initdata ... They could have names to indicate |
1080 | * this restriction. |
1081 | */ |
1082 | #ifdef CONFIG_NUMA |
1083 | #define pfn_to_nid(pfn) \ |
1084 | ({ \ |
1085 | unsigned long __pfn_to_nid_pfn = (pfn); \ |
1086 | page_to_nid(pfn_to_page(__pfn_to_nid_pfn)); \ |
1087 | }) |
1088 | #else |
1089 | #define pfn_to_nid(pfn) (0) |
1090 | #endif |
1091 | |
1092 | #define early_pfn_valid(pfn) pfn_valid(pfn) |
1093 | void sparse_init(void); |
1094 | #else |
1095 | #define sparse_init() do {} while (0) |
1096 | #define sparse_index_init(_sec, _nid) do {} while (0) |
1097 | #endif /* CONFIG_SPARSEMEM */ |
1098 | |
1099 | #ifdef CONFIG_NODES_SPAN_OTHER_NODES |
1100 | bool early_pfn_in_nid(unsigned long pfn, int nid); |
1101 | #else |
1102 | #define early_pfn_in_nid(pfn, nid) (1) |
1103 | #endif |
1104 | |
1105 | #ifndef early_pfn_valid |
1106 | #define early_pfn_valid(pfn) (1) |
1107 | #endif |
1108 | |
1109 | void memory_present(int nid, unsigned long start, unsigned long end); |
1110 | unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long); |
1111 | |
1112 | /* |
1113 | * If it is possible to have holes within a MAX_ORDER_NR_PAGES, then we |
1114 | * need to check pfn validility within that MAX_ORDER_NR_PAGES block. |
1115 | * pfn_valid_within() should be used in this case; we optimise this away |
1116 | * when we have no holes within a MAX_ORDER_NR_PAGES block. |
1117 | */ |
1118 | #ifdef CONFIG_HOLES_IN_ZONE |
1119 | #define pfn_valid_within(pfn) pfn_valid(pfn) |
1120 | #else |
1121 | #define pfn_valid_within(pfn) (1) |
1122 | #endif |
1123 | |
1124 | #ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL |
1125 | /* |
1126 | * pfn_valid() is meant to be able to tell if a given PFN has valid memmap |
1127 | * associated with it or not. In FLATMEM, it is expected that holes always |
1128 | * have valid memmap as long as there is valid PFNs either side of the hole. |
1129 | * In SPARSEMEM, it is assumed that a valid section has a memmap for the |
1130 | * entire section. |
1131 | * |
1132 | * However, an ARM, and maybe other embedded architectures in the future |
1133 | * free memmap backing holes to save memory on the assumption the memmap is |
1134 | * never used. The page_zone linkages are then broken even though pfn_valid() |
1135 | * returns true. A walker of the full memmap must then do this additional |
1136 | * check to ensure the memmap they are looking at is sane by making sure |
1137 | * the zone and PFN linkages are still valid. This is expensive, but walkers |
1138 | * of the full memmap are extremely rare. |
1139 | */ |
1140 | int memmap_valid_within(unsigned long pfn, |
1141 | struct page *page, struct zone *zone); |
1142 | #else |
1143 | static inline int memmap_valid_within(unsigned long pfn, |
1144 | struct page *page, struct zone *zone) |
1145 | { |
1146 | return 1; |
1147 | } |
1148 | #endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */ |
1149 | |
1150 | #endif /* !__GENERATING_BOUNDS.H */ |
1151 | #endif /* !__ASSEMBLY__ */ |
1152 | #endif /* _LINUX_MMZONE_H */ |
1153 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9