Root/
1 | /* |
2 | * Frontswap frontend |
3 | * |
4 | * This code provides the generic "frontend" layer to call a matching |
5 | * "backend" driver implementation of frontswap. See |
6 | * Documentation/vm/frontswap.txt for more information. |
7 | * |
8 | * Copyright (C) 2009-2012 Oracle Corp. All rights reserved. |
9 | * Author: Dan Magenheimer |
10 | * |
11 | * This work is licensed under the terms of the GNU GPL, version 2. |
12 | */ |
13 | |
14 | #include <linux/mman.h> |
15 | #include <linux/swap.h> |
16 | #include <linux/swapops.h> |
17 | #include <linux/security.h> |
18 | #include <linux/module.h> |
19 | #include <linux/debugfs.h> |
20 | #include <linux/frontswap.h> |
21 | #include <linux/swapfile.h> |
22 | |
23 | /* |
24 | * frontswap_ops is set by frontswap_register_ops to contain the pointers |
25 | * to the frontswap "backend" implementation functions. |
26 | */ |
27 | static struct frontswap_ops frontswap_ops __read_mostly; |
28 | |
29 | /* |
30 | * This global enablement flag reduces overhead on systems where frontswap_ops |
31 | * has not been registered, so is preferred to the slower alternative: a |
32 | * function call that checks a non-global. |
33 | */ |
34 | bool frontswap_enabled __read_mostly; |
35 | EXPORT_SYMBOL(frontswap_enabled); |
36 | |
37 | /* |
38 | * If enabled, frontswap_store will return failure even on success. As |
39 | * a result, the swap subsystem will always write the page to swap, in |
40 | * effect converting frontswap into a writethrough cache. In this mode, |
41 | * there is no direct reduction in swap writes, but a frontswap backend |
42 | * can unilaterally "reclaim" any pages in use with no data loss, thus |
43 | * providing increases control over maximum memory usage due to frontswap. |
44 | */ |
45 | static bool frontswap_writethrough_enabled __read_mostly; |
46 | |
47 | /* |
48 | * If enabled, the underlying tmem implementation is capable of doing |
49 | * exclusive gets, so frontswap_load, on a successful tmem_get must |
50 | * mark the page as no longer in frontswap AND mark it dirty. |
51 | */ |
52 | static bool frontswap_tmem_exclusive_gets_enabled __read_mostly; |
53 | |
54 | #ifdef CONFIG_DEBUG_FS |
55 | /* |
56 | * Counters available via /sys/kernel/debug/frontswap (if debugfs is |
57 | * properly configured). These are for information only so are not protected |
58 | * against increment races. |
59 | */ |
60 | static u64 frontswap_loads; |
61 | static u64 frontswap_succ_stores; |
62 | static u64 frontswap_failed_stores; |
63 | static u64 frontswap_invalidates; |
64 | |
65 | static inline void inc_frontswap_loads(void) { |
66 | frontswap_loads++; |
67 | } |
68 | static inline void inc_frontswap_succ_stores(void) { |
69 | frontswap_succ_stores++; |
70 | } |
71 | static inline void inc_frontswap_failed_stores(void) { |
72 | frontswap_failed_stores++; |
73 | } |
74 | static inline void inc_frontswap_invalidates(void) { |
75 | frontswap_invalidates++; |
76 | } |
77 | #else |
78 | static inline void inc_frontswap_loads(void) { } |
79 | static inline void inc_frontswap_succ_stores(void) { } |
80 | static inline void inc_frontswap_failed_stores(void) { } |
81 | static inline void inc_frontswap_invalidates(void) { } |
82 | #endif |
83 | /* |
84 | * Register operations for frontswap, returning previous thus allowing |
85 | * detection of multiple backends and possible nesting. |
86 | */ |
87 | struct frontswap_ops frontswap_register_ops(struct frontswap_ops *ops) |
88 | { |
89 | struct frontswap_ops old = frontswap_ops; |
90 | |
91 | frontswap_ops = *ops; |
92 | frontswap_enabled = true; |
93 | return old; |
94 | } |
95 | EXPORT_SYMBOL(frontswap_register_ops); |
96 | |
97 | /* |
98 | * Enable/disable frontswap writethrough (see above). |
99 | */ |
100 | void frontswap_writethrough(bool enable) |
101 | { |
102 | frontswap_writethrough_enabled = enable; |
103 | } |
104 | EXPORT_SYMBOL(frontswap_writethrough); |
105 | |
106 | /* |
107 | * Enable/disable frontswap exclusive gets (see above). |
108 | */ |
109 | void frontswap_tmem_exclusive_gets(bool enable) |
110 | { |
111 | frontswap_tmem_exclusive_gets_enabled = enable; |
112 | } |
113 | EXPORT_SYMBOL(frontswap_tmem_exclusive_gets); |
114 | |
115 | /* |
116 | * Called when a swap device is swapon'd. |
117 | */ |
118 | void __frontswap_init(unsigned type) |
119 | { |
120 | struct swap_info_struct *sis = swap_info[type]; |
121 | |
122 | BUG_ON(sis == NULL); |
123 | if (sis->frontswap_map == NULL) |
124 | return; |
125 | frontswap_ops.init(type); |
126 | } |
127 | EXPORT_SYMBOL(__frontswap_init); |
128 | |
129 | static inline void __frontswap_clear(struct swap_info_struct *sis, pgoff_t offset) |
130 | { |
131 | frontswap_clear(sis, offset); |
132 | atomic_dec(&sis->frontswap_pages); |
133 | } |
134 | |
135 | /* |
136 | * "Store" data from a page to frontswap and associate it with the page's |
137 | * swaptype and offset. Page must be locked and in the swap cache. |
138 | * If frontswap already contains a page with matching swaptype and |
139 | * offset, the frontswap implementation may either overwrite the data and |
140 | * return success or invalidate the page from frontswap and return failure. |
141 | */ |
142 | int __frontswap_store(struct page *page) |
143 | { |
144 | int ret = -1, dup = 0; |
145 | swp_entry_t entry = { .val = page_private(page), }; |
146 | int type = swp_type(entry); |
147 | struct swap_info_struct *sis = swap_info[type]; |
148 | pgoff_t offset = swp_offset(entry); |
149 | |
150 | BUG_ON(!PageLocked(page)); |
151 | BUG_ON(sis == NULL); |
152 | if (frontswap_test(sis, offset)) |
153 | dup = 1; |
154 | ret = frontswap_ops.store(type, offset, page); |
155 | if (ret == 0) { |
156 | frontswap_set(sis, offset); |
157 | inc_frontswap_succ_stores(); |
158 | if (!dup) |
159 | atomic_inc(&sis->frontswap_pages); |
160 | } else { |
161 | /* |
162 | failed dup always results in automatic invalidate of |
163 | the (older) page from frontswap |
164 | */ |
165 | inc_frontswap_failed_stores(); |
166 | if (dup) |
167 | __frontswap_clear(sis, offset); |
168 | } |
169 | if (frontswap_writethrough_enabled) |
170 | /* report failure so swap also writes to swap device */ |
171 | ret = -1; |
172 | return ret; |
173 | } |
174 | EXPORT_SYMBOL(__frontswap_store); |
175 | |
176 | /* |
177 | * "Get" data from frontswap associated with swaptype and offset that were |
178 | * specified when the data was put to frontswap and use it to fill the |
179 | * specified page with data. Page must be locked and in the swap cache. |
180 | */ |
181 | int __frontswap_load(struct page *page) |
182 | { |
183 | int ret = -1; |
184 | swp_entry_t entry = { .val = page_private(page), }; |
185 | int type = swp_type(entry); |
186 | struct swap_info_struct *sis = swap_info[type]; |
187 | pgoff_t offset = swp_offset(entry); |
188 | |
189 | BUG_ON(!PageLocked(page)); |
190 | BUG_ON(sis == NULL); |
191 | if (frontswap_test(sis, offset)) |
192 | ret = frontswap_ops.load(type, offset, page); |
193 | if (ret == 0) { |
194 | inc_frontswap_loads(); |
195 | if (frontswap_tmem_exclusive_gets_enabled) { |
196 | SetPageDirty(page); |
197 | frontswap_clear(sis, offset); |
198 | } |
199 | } |
200 | return ret; |
201 | } |
202 | EXPORT_SYMBOL(__frontswap_load); |
203 | |
204 | /* |
205 | * Invalidate any data from frontswap associated with the specified swaptype |
206 | * and offset so that a subsequent "get" will fail. |
207 | */ |
208 | void __frontswap_invalidate_page(unsigned type, pgoff_t offset) |
209 | { |
210 | struct swap_info_struct *sis = swap_info[type]; |
211 | |
212 | BUG_ON(sis == NULL); |
213 | if (frontswap_test(sis, offset)) { |
214 | frontswap_ops.invalidate_page(type, offset); |
215 | __frontswap_clear(sis, offset); |
216 | inc_frontswap_invalidates(); |
217 | } |
218 | } |
219 | EXPORT_SYMBOL(__frontswap_invalidate_page); |
220 | |
221 | /* |
222 | * Invalidate all data from frontswap associated with all offsets for the |
223 | * specified swaptype. |
224 | */ |
225 | void __frontswap_invalidate_area(unsigned type) |
226 | { |
227 | struct swap_info_struct *sis = swap_info[type]; |
228 | |
229 | BUG_ON(sis == NULL); |
230 | if (sis->frontswap_map == NULL) |
231 | return; |
232 | frontswap_ops.invalidate_area(type); |
233 | atomic_set(&sis->frontswap_pages, 0); |
234 | memset(sis->frontswap_map, 0, sis->max / sizeof(long)); |
235 | } |
236 | EXPORT_SYMBOL(__frontswap_invalidate_area); |
237 | |
238 | static unsigned long __frontswap_curr_pages(void) |
239 | { |
240 | int type; |
241 | unsigned long totalpages = 0; |
242 | struct swap_info_struct *si = NULL; |
243 | |
244 | assert_spin_locked(&swap_lock); |
245 | for (type = swap_list.head; type >= 0; type = si->next) { |
246 | si = swap_info[type]; |
247 | totalpages += atomic_read(&si->frontswap_pages); |
248 | } |
249 | return totalpages; |
250 | } |
251 | |
252 | static int __frontswap_unuse_pages(unsigned long total, unsigned long *unused, |
253 | int *swapid) |
254 | { |
255 | int ret = -EINVAL; |
256 | struct swap_info_struct *si = NULL; |
257 | int si_frontswap_pages; |
258 | unsigned long total_pages_to_unuse = total; |
259 | unsigned long pages = 0, pages_to_unuse = 0; |
260 | int type; |
261 | |
262 | assert_spin_locked(&swap_lock); |
263 | for (type = swap_list.head; type >= 0; type = si->next) { |
264 | si = swap_info[type]; |
265 | si_frontswap_pages = atomic_read(&si->frontswap_pages); |
266 | if (total_pages_to_unuse < si_frontswap_pages) { |
267 | pages = pages_to_unuse = total_pages_to_unuse; |
268 | } else { |
269 | pages = si_frontswap_pages; |
270 | pages_to_unuse = 0; /* unuse all */ |
271 | } |
272 | /* ensure there is enough RAM to fetch pages from frontswap */ |
273 | if (security_vm_enough_memory_mm(current->mm, pages)) { |
274 | ret = -ENOMEM; |
275 | continue; |
276 | } |
277 | vm_unacct_memory(pages); |
278 | *unused = pages_to_unuse; |
279 | *swapid = type; |
280 | ret = 0; |
281 | break; |
282 | } |
283 | |
284 | return ret; |
285 | } |
286 | |
287 | /* |
288 | * Used to check if it's necessory and feasible to unuse pages. |
289 | * Return 1 when nothing to do, 0 when need to shink pages, |
290 | * error code when there is an error. |
291 | */ |
292 | static int __frontswap_shrink(unsigned long target_pages, |
293 | unsigned long *pages_to_unuse, |
294 | int *type) |
295 | { |
296 | unsigned long total_pages = 0, total_pages_to_unuse; |
297 | |
298 | assert_spin_locked(&swap_lock); |
299 | |
300 | total_pages = __frontswap_curr_pages(); |
301 | if (total_pages <= target_pages) { |
302 | /* Nothing to do */ |
303 | *pages_to_unuse = 0; |
304 | return 1; |
305 | } |
306 | total_pages_to_unuse = total_pages - target_pages; |
307 | return __frontswap_unuse_pages(total_pages_to_unuse, pages_to_unuse, type); |
308 | } |
309 | |
310 | /* |
311 | * Frontswap, like a true swap device, may unnecessarily retain pages |
312 | * under certain circumstances; "shrink" frontswap is essentially a |
313 | * "partial swapoff" and works by calling try_to_unuse to attempt to |
314 | * unuse enough frontswap pages to attempt to -- subject to memory |
315 | * constraints -- reduce the number of pages in frontswap to the |
316 | * number given in the parameter target_pages. |
317 | */ |
318 | void frontswap_shrink(unsigned long target_pages) |
319 | { |
320 | unsigned long pages_to_unuse = 0; |
321 | int uninitialized_var(type), ret; |
322 | |
323 | /* |
324 | * we don't want to hold swap_lock while doing a very |
325 | * lengthy try_to_unuse, but swap_list may change |
326 | * so restart scan from swap_list.head each time |
327 | */ |
328 | spin_lock(&swap_lock); |
329 | ret = __frontswap_shrink(target_pages, &pages_to_unuse, &type); |
330 | spin_unlock(&swap_lock); |
331 | if (ret == 0) |
332 | try_to_unuse(type, true, pages_to_unuse); |
333 | return; |
334 | } |
335 | EXPORT_SYMBOL(frontswap_shrink); |
336 | |
337 | /* |
338 | * Count and return the number of frontswap pages across all |
339 | * swap devices. This is exported so that backend drivers can |
340 | * determine current usage without reading debugfs. |
341 | */ |
342 | unsigned long frontswap_curr_pages(void) |
343 | { |
344 | unsigned long totalpages = 0; |
345 | |
346 | spin_lock(&swap_lock); |
347 | totalpages = __frontswap_curr_pages(); |
348 | spin_unlock(&swap_lock); |
349 | |
350 | return totalpages; |
351 | } |
352 | EXPORT_SYMBOL(frontswap_curr_pages); |
353 | |
354 | static int __init init_frontswap(void) |
355 | { |
356 | #ifdef CONFIG_DEBUG_FS |
357 | struct dentry *root = debugfs_create_dir("frontswap", NULL); |
358 | if (root == NULL) |
359 | return -ENXIO; |
360 | debugfs_create_u64("loads", S_IRUGO, root, &frontswap_loads); |
361 | debugfs_create_u64("succ_stores", S_IRUGO, root, &frontswap_succ_stores); |
362 | debugfs_create_u64("failed_stores", S_IRUGO, root, |
363 | &frontswap_failed_stores); |
364 | debugfs_create_u64("invalidates", S_IRUGO, |
365 | root, &frontswap_invalidates); |
366 | #endif |
367 | return 0; |
368 | } |
369 | |
370 | module_init(init_frontswap); |
371 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9