Root/
1 | #ifndef _LINUX_MIGRATE_H |
2 | #define _LINUX_MIGRATE_H |
3 | |
4 | #include <linux/mm.h> |
5 | #include <linux/mempolicy.h> |
6 | #include <linux/migrate_mode.h> |
7 | |
8 | typedef struct page *new_page_t(struct page *, unsigned long private, int **); |
9 | |
10 | /* |
11 | * Return values from addresss_space_operations.migratepage(): |
12 | * - negative errno on page migration failure; |
13 | * - zero on page migration success; |
14 | * |
15 | * The balloon page migration introduces this special case where a 'distinct' |
16 | * return code is used to flag a successful page migration to unmap_and_move(). |
17 | * This approach is necessary because page migration can race against balloon |
18 | * deflation procedure, and for such case we could introduce a nasty page leak |
19 | * if a successfully migrated balloon page gets released concurrently with |
20 | * migration's unmap_and_move() wrap-up steps. |
21 | */ |
22 | #define MIGRATEPAGE_SUCCESS 0 |
23 | #define MIGRATEPAGE_BALLOON_SUCCESS 1 /* special ret code for balloon page |
24 | * sucessful migration case. |
25 | */ |
26 | enum migrate_reason { |
27 | MR_COMPACTION, |
28 | MR_MEMORY_FAILURE, |
29 | MR_MEMORY_HOTPLUG, |
30 | MR_SYSCALL, /* also applies to cpusets */ |
31 | MR_MEMPOLICY_MBIND, |
32 | MR_NUMA_MISPLACED, |
33 | MR_CMA |
34 | }; |
35 | |
36 | #ifdef CONFIG_MIGRATION |
37 | |
38 | extern void putback_lru_pages(struct list_head *l); |
39 | extern void putback_movable_pages(struct list_head *l); |
40 | extern int migrate_page(struct address_space *, |
41 | struct page *, struct page *, enum migrate_mode); |
42 | extern int migrate_pages(struct list_head *l, new_page_t x, |
43 | unsigned long private, enum migrate_mode mode, int reason); |
44 | |
45 | extern int fail_migrate_page(struct address_space *, |
46 | struct page *, struct page *); |
47 | |
48 | extern int migrate_prep(void); |
49 | extern int migrate_prep_local(void); |
50 | extern int migrate_vmas(struct mm_struct *mm, |
51 | const nodemask_t *from, const nodemask_t *to, |
52 | unsigned long flags); |
53 | extern void migrate_page_copy(struct page *newpage, struct page *page); |
54 | extern int migrate_huge_page_move_mapping(struct address_space *mapping, |
55 | struct page *newpage, struct page *page); |
56 | extern int migrate_page_move_mapping(struct address_space *mapping, |
57 | struct page *newpage, struct page *page, |
58 | struct buffer_head *head, enum migrate_mode mode); |
59 | #else |
60 | |
61 | static inline void putback_lru_pages(struct list_head *l) {} |
62 | static inline void putback_movable_pages(struct list_head *l) {} |
63 | static inline int migrate_pages(struct list_head *l, new_page_t x, |
64 | unsigned long private, enum migrate_mode mode, int reason) |
65 | { return -ENOSYS; } |
66 | |
67 | static inline int migrate_prep(void) { return -ENOSYS; } |
68 | static inline int migrate_prep_local(void) { return -ENOSYS; } |
69 | |
70 | static inline int migrate_vmas(struct mm_struct *mm, |
71 | const nodemask_t *from, const nodemask_t *to, |
72 | unsigned long flags) |
73 | { |
74 | return -ENOSYS; |
75 | } |
76 | |
77 | static inline void migrate_page_copy(struct page *newpage, |
78 | struct page *page) {} |
79 | |
80 | static inline int migrate_huge_page_move_mapping(struct address_space *mapping, |
81 | struct page *newpage, struct page *page) |
82 | { |
83 | return -ENOSYS; |
84 | } |
85 | |
86 | /* Possible settings for the migrate_page() method in address_operations */ |
87 | #define migrate_page NULL |
88 | #define fail_migrate_page NULL |
89 | |
90 | #endif /* CONFIG_MIGRATION */ |
91 | |
92 | #ifdef CONFIG_NUMA_BALANCING |
93 | extern int migrate_misplaced_page(struct page *page, int node); |
94 | extern int migrate_misplaced_page(struct page *page, int node); |
95 | extern bool migrate_ratelimited(int node); |
96 | #else |
97 | static inline int migrate_misplaced_page(struct page *page, int node) |
98 | { |
99 | return -EAGAIN; /* can't migrate now */ |
100 | } |
101 | static inline bool migrate_ratelimited(int node) |
102 | { |
103 | return false; |
104 | } |
105 | #endif /* CONFIG_NUMA_BALANCING */ |
106 | |
107 | #if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE) |
108 | extern int migrate_misplaced_transhuge_page(struct mm_struct *mm, |
109 | struct vm_area_struct *vma, |
110 | pmd_t *pmd, pmd_t entry, |
111 | unsigned long address, |
112 | struct page *page, int node); |
113 | #else |
114 | static inline int migrate_misplaced_transhuge_page(struct mm_struct *mm, |
115 | struct vm_area_struct *vma, |
116 | pmd_t *pmd, pmd_t entry, |
117 | unsigned long address, |
118 | struct page *page, int node) |
119 | { |
120 | return -EAGAIN; |
121 | } |
122 | #endif /* CONFIG_NUMA_BALANCING && CONFIG_TRANSPARENT_HUGEPAGE*/ |
123 | |
124 | #endif /* _LINUX_MIGRATE_H */ |
125 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9