Root/
1 | /* |
2 | * linux/mm/swap_state.c |
3 | * |
4 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds |
5 | * Swap reorganised 29.12.95, Stephen Tweedie |
6 | * |
7 | * Rewritten to use page cache, (C) 1998 Stephen Tweedie |
8 | */ |
9 | #include <linux/module.h> |
10 | #include <linux/mm.h> |
11 | #include <linux/kernel_stat.h> |
12 | #include <linux/swap.h> |
13 | #include <linux/swapops.h> |
14 | #include <linux/init.h> |
15 | #include <linux/pagemap.h> |
16 | #include <linux/buffer_head.h> |
17 | #include <linux/backing-dev.h> |
18 | #include <linux/pagevec.h> |
19 | #include <linux/migrate.h> |
20 | #include <linux/page_cgroup.h> |
21 | |
22 | #include <asm/pgtable.h> |
23 | |
24 | /* |
25 | * swapper_space is a fiction, retained to simplify the path through |
26 | * vmscan's shrink_page_list, to make sync_page look nicer, and to allow |
27 | * future use of radix_tree tags in the swap cache. |
28 | */ |
29 | static const struct address_space_operations swap_aops = { |
30 | .writepage = swap_writepage, |
31 | .sync_page = block_sync_page, |
32 | .set_page_dirty = __set_page_dirty_nobuffers, |
33 | .migratepage = migrate_page, |
34 | }; |
35 | |
36 | static struct backing_dev_info swap_backing_dev_info = { |
37 | .name = "swap", |
38 | .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED, |
39 | .unplug_io_fn = swap_unplug_io_fn, |
40 | }; |
41 | |
42 | struct address_space swapper_space = { |
43 | .page_tree = RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN), |
44 | .tree_lock = __SPIN_LOCK_UNLOCKED(swapper_space.tree_lock), |
45 | .a_ops = &swap_aops, |
46 | .i_mmap_nonlinear = LIST_HEAD_INIT(swapper_space.i_mmap_nonlinear), |
47 | .backing_dev_info = &swap_backing_dev_info, |
48 | }; |
49 | |
50 | #define INC_CACHE_INFO(x) do { swap_cache_info.x++; } while (0) |
51 | |
52 | static struct { |
53 | unsigned long add_total; |
54 | unsigned long del_total; |
55 | unsigned long find_success; |
56 | unsigned long find_total; |
57 | } swap_cache_info; |
58 | |
59 | void show_swap_cache_info(void) |
60 | { |
61 | printk("%lu pages in swap cache\n", total_swapcache_pages); |
62 | printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu\n", |
63 | swap_cache_info.add_total, swap_cache_info.del_total, |
64 | swap_cache_info.find_success, swap_cache_info.find_total); |
65 | printk("Free swap = %ldkB\n", nr_swap_pages << (PAGE_SHIFT - 10)); |
66 | printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10)); |
67 | } |
68 | |
69 | /* |
70 | * __add_to_swap_cache resembles add_to_page_cache_locked on swapper_space, |
71 | * but sets SwapCache flag and private instead of mapping and index. |
72 | */ |
73 | static int __add_to_swap_cache(struct page *page, swp_entry_t entry) |
74 | { |
75 | int error; |
76 | |
77 | VM_BUG_ON(!PageLocked(page)); |
78 | VM_BUG_ON(PageSwapCache(page)); |
79 | VM_BUG_ON(!PageSwapBacked(page)); |
80 | |
81 | page_cache_get(page); |
82 | SetPageSwapCache(page); |
83 | set_page_private(page, entry.val); |
84 | |
85 | spin_lock_irq(&swapper_space.tree_lock); |
86 | error = radix_tree_insert(&swapper_space.page_tree, entry.val, page); |
87 | if (likely(!error)) { |
88 | total_swapcache_pages++; |
89 | __inc_zone_page_state(page, NR_FILE_PAGES); |
90 | INC_CACHE_INFO(add_total); |
91 | } |
92 | spin_unlock_irq(&swapper_space.tree_lock); |
93 | |
94 | if (unlikely(error)) { |
95 | /* |
96 | * Only the context which have set SWAP_HAS_CACHE flag |
97 | * would call add_to_swap_cache(). |
98 | * So add_to_swap_cache() doesn't returns -EEXIST. |
99 | */ |
100 | VM_BUG_ON(error == -EEXIST); |
101 | set_page_private(page, 0UL); |
102 | ClearPageSwapCache(page); |
103 | page_cache_release(page); |
104 | } |
105 | |
106 | return error; |
107 | } |
108 | |
109 | |
110 | int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask) |
111 | { |
112 | int error; |
113 | |
114 | error = radix_tree_preload(gfp_mask); |
115 | if (!error) { |
116 | error = __add_to_swap_cache(page, entry); |
117 | radix_tree_preload_end(); |
118 | } |
119 | return error; |
120 | } |
121 | |
122 | /* |
123 | * This must be called only on pages that have |
124 | * been verified to be in the swap cache. |
125 | */ |
126 | void __delete_from_swap_cache(struct page *page) |
127 | { |
128 | VM_BUG_ON(!PageLocked(page)); |
129 | VM_BUG_ON(!PageSwapCache(page)); |
130 | VM_BUG_ON(PageWriteback(page)); |
131 | |
132 | radix_tree_delete(&swapper_space.page_tree, page_private(page)); |
133 | set_page_private(page, 0); |
134 | ClearPageSwapCache(page); |
135 | total_swapcache_pages--; |
136 | __dec_zone_page_state(page, NR_FILE_PAGES); |
137 | INC_CACHE_INFO(del_total); |
138 | } |
139 | |
140 | /** |
141 | * add_to_swap - allocate swap space for a page |
142 | * @page: page we want to move to swap |
143 | * |
144 | * Allocate swap space for the page and add the page to the |
145 | * swap cache. Caller needs to hold the page lock. |
146 | */ |
147 | int add_to_swap(struct page *page) |
148 | { |
149 | swp_entry_t entry; |
150 | int err; |
151 | |
152 | VM_BUG_ON(!PageLocked(page)); |
153 | VM_BUG_ON(!PageUptodate(page)); |
154 | |
155 | entry = get_swap_page(); |
156 | if (!entry.val) |
157 | return 0; |
158 | |
159 | /* |
160 | * Radix-tree node allocations from PF_MEMALLOC contexts could |
161 | * completely exhaust the page allocator. __GFP_NOMEMALLOC |
162 | * stops emergency reserves from being allocated. |
163 | * |
164 | * TODO: this could cause a theoretical memory reclaim |
165 | * deadlock in the swap out path. |
166 | */ |
167 | /* |
168 | * Add it to the swap cache and mark it dirty |
169 | */ |
170 | err = add_to_swap_cache(page, entry, |
171 | __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN); |
172 | |
173 | if (!err) { /* Success */ |
174 | SetPageDirty(page); |
175 | return 1; |
176 | } else { /* -ENOMEM radix-tree allocation failure */ |
177 | /* |
178 | * add_to_swap_cache() doesn't return -EEXIST, so we can safely |
179 | * clear SWAP_HAS_CACHE flag. |
180 | */ |
181 | swapcache_free(entry, NULL); |
182 | return 0; |
183 | } |
184 | } |
185 | |
186 | /* |
187 | * This must be called only on pages that have |
188 | * been verified to be in the swap cache and locked. |
189 | * It will never put the page into the free list, |
190 | * the caller has a reference on the page. |
191 | */ |
192 | void delete_from_swap_cache(struct page *page) |
193 | { |
194 | swp_entry_t entry; |
195 | |
196 | entry.val = page_private(page); |
197 | |
198 | spin_lock_irq(&swapper_space.tree_lock); |
199 | __delete_from_swap_cache(page); |
200 | spin_unlock_irq(&swapper_space.tree_lock); |
201 | |
202 | swapcache_free(entry, page); |
203 | page_cache_release(page); |
204 | } |
205 | |
206 | /* |
207 | * If we are the only user, then try to free up the swap cache. |
208 | * |
209 | * Its ok to check for PageSwapCache without the page lock |
210 | * here because we are going to recheck again inside |
211 | * try_to_free_swap() _with_ the lock. |
212 | * - Marcelo |
213 | */ |
214 | static inline void free_swap_cache(struct page *page) |
215 | { |
216 | if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) { |
217 | try_to_free_swap(page); |
218 | unlock_page(page); |
219 | } |
220 | } |
221 | |
222 | /* |
223 | * Perform a free_page(), also freeing any swap cache associated with |
224 | * this page if it is the last user of the page. |
225 | */ |
226 | void free_page_and_swap_cache(struct page *page) |
227 | { |
228 | free_swap_cache(page); |
229 | page_cache_release(page); |
230 | } |
231 | |
232 | /* |
233 | * Passed an array of pages, drop them all from swapcache and then release |
234 | * them. They are removed from the LRU and freed if this is their last use. |
235 | */ |
236 | void free_pages_and_swap_cache(struct page **pages, int nr) |
237 | { |
238 | struct page **pagep = pages; |
239 | |
240 | lru_add_drain(); |
241 | while (nr) { |
242 | int todo = min(nr, PAGEVEC_SIZE); |
243 | int i; |
244 | |
245 | for (i = 0; i < todo; i++) |
246 | free_swap_cache(pagep[i]); |
247 | release_pages(pagep, todo, 0); |
248 | pagep += todo; |
249 | nr -= todo; |
250 | } |
251 | } |
252 | |
253 | /* |
254 | * Lookup a swap entry in the swap cache. A found page will be returned |
255 | * unlocked and with its refcount incremented - we rely on the kernel |
256 | * lock getting page table operations atomic even if we drop the page |
257 | * lock before returning. |
258 | */ |
259 | struct page * lookup_swap_cache(swp_entry_t entry) |
260 | { |
261 | struct page *page; |
262 | |
263 | page = find_get_page(&swapper_space, entry.val); |
264 | |
265 | if (page) |
266 | INC_CACHE_INFO(find_success); |
267 | |
268 | INC_CACHE_INFO(find_total); |
269 | return page; |
270 | } |
271 | |
272 | /* |
273 | * Locate a page of swap in physical memory, reserving swap cache space |
274 | * and reading the disk if it is not already cached. |
275 | * A failure return means that either the page allocation failed or that |
276 | * the swap entry is no longer in use. |
277 | */ |
278 | struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, |
279 | struct vm_area_struct *vma, unsigned long addr) |
280 | { |
281 | struct page *found_page, *new_page = NULL; |
282 | int err; |
283 | |
284 | do { |
285 | /* |
286 | * First check the swap cache. Since this is normally |
287 | * called after lookup_swap_cache() failed, re-calling |
288 | * that would confuse statistics. |
289 | */ |
290 | found_page = find_get_page(&swapper_space, entry.val); |
291 | if (found_page) |
292 | break; |
293 | |
294 | /* |
295 | * Get a new page to read into from swap. |
296 | */ |
297 | if (!new_page) { |
298 | new_page = alloc_page_vma(gfp_mask, vma, addr); |
299 | if (!new_page) |
300 | break; /* Out of memory */ |
301 | } |
302 | |
303 | /* |
304 | * call radix_tree_preload() while we can wait. |
305 | */ |
306 | err = radix_tree_preload(gfp_mask & GFP_KERNEL); |
307 | if (err) |
308 | break; |
309 | |
310 | /* |
311 | * Swap entry may have been freed since our caller observed it. |
312 | */ |
313 | err = swapcache_prepare(entry); |
314 | if (err == -EEXIST) { /* seems racy */ |
315 | radix_tree_preload_end(); |
316 | continue; |
317 | } |
318 | if (err) { /* swp entry is obsolete ? */ |
319 | radix_tree_preload_end(); |
320 | break; |
321 | } |
322 | |
323 | /* May fail (-ENOMEM) if radix-tree node allocation failed. */ |
324 | __set_page_locked(new_page); |
325 | SetPageSwapBacked(new_page); |
326 | err = __add_to_swap_cache(new_page, entry); |
327 | if (likely(!err)) { |
328 | radix_tree_preload_end(); |
329 | /* |
330 | * Initiate read into locked page and return. |
331 | */ |
332 | lru_cache_add_anon(new_page); |
333 | swap_readpage(new_page); |
334 | return new_page; |
335 | } |
336 | radix_tree_preload_end(); |
337 | ClearPageSwapBacked(new_page); |
338 | __clear_page_locked(new_page); |
339 | /* |
340 | * add_to_swap_cache() doesn't return -EEXIST, so we can safely |
341 | * clear SWAP_HAS_CACHE flag. |
342 | */ |
343 | swapcache_free(entry, NULL); |
344 | } while (err != -ENOMEM); |
345 | |
346 | if (new_page) |
347 | page_cache_release(new_page); |
348 | return found_page; |
349 | } |
350 | |
351 | /** |
352 | * swapin_readahead - swap in pages in hope we need them soon |
353 | * @entry: swap entry of this memory |
354 | * @gfp_mask: memory allocation flags |
355 | * @vma: user vma this address belongs to |
356 | * @addr: target address for mempolicy |
357 | * |
358 | * Returns the struct page for entry and addr, after queueing swapin. |
359 | * |
360 | * Primitive swap readahead code. We simply read an aligned block of |
361 | * (1 << page_cluster) entries in the swap area. This method is chosen |
362 | * because it doesn't cost us any seek time. We also make sure to queue |
363 | * the 'original' request together with the readahead ones... |
364 | * |
365 | * This has been extended to use the NUMA policies from the mm triggering |
366 | * the readahead. |
367 | * |
368 | * Caller must hold down_read on the vma->vm_mm if vma is not NULL. |
369 | */ |
370 | struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask, |
371 | struct vm_area_struct *vma, unsigned long addr) |
372 | { |
373 | int nr_pages; |
374 | struct page *page; |
375 | unsigned long offset; |
376 | unsigned long end_offset; |
377 | |
378 | /* |
379 | * Get starting offset for readaround, and number of pages to read. |
380 | * Adjust starting address by readbehind (for NUMA interleave case)? |
381 | * No, it's very unlikely that swap layout would follow vma layout, |
382 | * more likely that neighbouring swap pages came from the same node: |
383 | * so use the same "addr" to choose the same node for each swap read. |
384 | */ |
385 | nr_pages = valid_swaphandles(entry, &offset); |
386 | for (end_offset = offset + nr_pages; offset < end_offset; offset++) { |
387 | /* Ok, do the async read-ahead now */ |
388 | page = read_swap_cache_async(swp_entry(swp_type(entry), offset), |
389 | gfp_mask, vma, addr); |
390 | if (!page) |
391 | break; |
392 | page_cache_release(page); |
393 | } |
394 | lru_add_drain(); /* Push any new pages onto the LRU now */ |
395 | return read_swap_cache_async(entry, gfp_mask, vma, addr); |
396 | } |
397 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9