Root/
1 | #ifndef _LINUX_PAGEMAP_H |
2 | #define _LINUX_PAGEMAP_H |
3 | |
4 | /* |
5 | * Copyright 1995 Linus Torvalds |
6 | */ |
7 | #include <linux/mm.h> |
8 | #include <linux/fs.h> |
9 | #include <linux/list.h> |
10 | #include <linux/highmem.h> |
11 | #include <linux/compiler.h> |
12 | #include <asm/uaccess.h> |
13 | #include <linux/gfp.h> |
14 | #include <linux/bitops.h> |
15 | #include <linux/hardirq.h> /* for in_interrupt() */ |
16 | #include <linux/hugetlb_inline.h> |
17 | |
18 | /* |
19 | * Bits in mapping->flags. The lower __GFP_BITS_SHIFT bits are the page |
20 | * allocation mode flags. |
21 | */ |
22 | enum mapping_flags { |
23 | AS_EIO = __GFP_BITS_SHIFT + 0, /* IO error on async write */ |
24 | AS_ENOSPC = __GFP_BITS_SHIFT + 1, /* ENOSPC on async write */ |
25 | AS_MM_ALL_LOCKS = __GFP_BITS_SHIFT + 2, /* under mm_take_all_locks() */ |
26 | AS_UNEVICTABLE = __GFP_BITS_SHIFT + 3, /* e.g., ramdisk, SHM_LOCK */ |
27 | }; |
28 | |
29 | static inline void mapping_set_error(struct address_space *mapping, int error) |
30 | { |
31 | if (unlikely(error)) { |
32 | if (error == -ENOSPC) |
33 | set_bit(AS_ENOSPC, &mapping->flags); |
34 | else |
35 | set_bit(AS_EIO, &mapping->flags); |
36 | } |
37 | } |
38 | |
39 | static inline void mapping_set_unevictable(struct address_space *mapping) |
40 | { |
41 | set_bit(AS_UNEVICTABLE, &mapping->flags); |
42 | } |
43 | |
44 | static inline void mapping_clear_unevictable(struct address_space *mapping) |
45 | { |
46 | clear_bit(AS_UNEVICTABLE, &mapping->flags); |
47 | } |
48 | |
49 | static inline int mapping_unevictable(struct address_space *mapping) |
50 | { |
51 | if (likely(mapping)) |
52 | return test_bit(AS_UNEVICTABLE, &mapping->flags); |
53 | return !!mapping; |
54 | } |
55 | |
56 | static inline gfp_t mapping_gfp_mask(struct address_space * mapping) |
57 | { |
58 | return (__force gfp_t)mapping->flags & __GFP_BITS_MASK; |
59 | } |
60 | |
61 | /* |
62 | * This is non-atomic. Only to be used before the mapping is activated. |
63 | * Probably needs a barrier... |
64 | */ |
65 | static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask) |
66 | { |
67 | m->flags = (m->flags & ~(__force unsigned long)__GFP_BITS_MASK) | |
68 | (__force unsigned long)mask; |
69 | } |
70 | |
71 | /* |
72 | * The page cache can done in larger chunks than |
73 | * one page, because it allows for more efficient |
74 | * throughput (it can then be mapped into user |
75 | * space in smaller chunks for same flexibility). |
76 | * |
77 | * Or rather, it _will_ be done in larger chunks. |
78 | */ |
79 | #define PAGE_CACHE_SHIFT PAGE_SHIFT |
80 | #define PAGE_CACHE_SIZE PAGE_SIZE |
81 | #define PAGE_CACHE_MASK PAGE_MASK |
82 | #define PAGE_CACHE_ALIGN(addr) (((addr)+PAGE_CACHE_SIZE-1)&PAGE_CACHE_MASK) |
83 | |
84 | #define page_cache_get(page) get_page(page) |
85 | #define page_cache_release(page) put_page(page) |
86 | void release_pages(struct page **pages, int nr, int cold); |
87 | |
88 | /* |
89 | * speculatively take a reference to a page. |
90 | * If the page is free (_count == 0), then _count is untouched, and 0 |
91 | * is returned. Otherwise, _count is incremented by 1 and 1 is returned. |
92 | * |
93 | * This function must be called inside the same rcu_read_lock() section as has |
94 | * been used to lookup the page in the pagecache radix-tree (or page table): |
95 | * this allows allocators to use a synchronize_rcu() to stabilize _count. |
96 | * |
97 | * Unless an RCU grace period has passed, the count of all pages coming out |
98 | * of the allocator must be considered unstable. page_count may return higher |
99 | * than expected, and put_page must be able to do the right thing when the |
100 | * page has been finished with, no matter what it is subsequently allocated |
101 | * for (because put_page is what is used here to drop an invalid speculative |
102 | * reference). |
103 | * |
104 | * This is the interesting part of the lockless pagecache (and lockless |
105 | * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page) |
106 | * has the following pattern: |
107 | * 1. find page in radix tree |
108 | * 2. conditionally increment refcount |
109 | * 3. check the page is still in pagecache (if no, goto 1) |
110 | * |
111 | * Remove-side that cares about stability of _count (eg. reclaim) has the |
112 | * following (with tree_lock held for write): |
113 | * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg) |
114 | * B. remove page from pagecache |
115 | * C. free the page |
116 | * |
117 | * There are 2 critical interleavings that matter: |
118 | * - 2 runs before A: in this case, A sees elevated refcount and bails out |
119 | * - A runs before 2: in this case, 2 sees zero refcount and retries; |
120 | * subsequently, B will complete and 1 will find no page, causing the |
121 | * lookup to return NULL. |
122 | * |
123 | * It is possible that between 1 and 2, the page is removed then the exact same |
124 | * page is inserted into the same position in pagecache. That's OK: the |
125 | * old find_get_page using tree_lock could equally have run before or after |
126 | * such a re-insertion, depending on order that locks are granted. |
127 | * |
128 | * Lookups racing against pagecache insertion isn't a big problem: either 1 |
129 | * will find the page or it will not. Likewise, the old find_get_page could run |
130 | * either before the insertion or afterwards, depending on timing. |
131 | */ |
132 | static inline int page_cache_get_speculative(struct page *page) |
133 | { |
134 | VM_BUG_ON(in_interrupt()); |
135 | |
136 | #if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU) |
137 | # ifdef CONFIG_PREEMPT |
138 | VM_BUG_ON(!in_atomic()); |
139 | # endif |
140 | /* |
141 | * Preempt must be disabled here - we rely on rcu_read_lock doing |
142 | * this for us. |
143 | * |
144 | * Pagecache won't be truncated from interrupt context, so if we have |
145 | * found a page in the radix tree here, we have pinned its refcount by |
146 | * disabling preempt, and hence no need for the "speculative get" that |
147 | * SMP requires. |
148 | */ |
149 | VM_BUG_ON(page_count(page) == 0); |
150 | atomic_inc(&page->_count); |
151 | |
152 | #else |
153 | if (unlikely(!get_page_unless_zero(page))) { |
154 | /* |
155 | * Either the page has been freed, or will be freed. |
156 | * In either case, retry here and the caller should |
157 | * do the right thing (see comments above). |
158 | */ |
159 | return 0; |
160 | } |
161 | #endif |
162 | VM_BUG_ON(PageTail(page)); |
163 | |
164 | return 1; |
165 | } |
166 | |
167 | /* |
168 | * Same as above, but add instead of inc (could just be merged) |
169 | */ |
170 | static inline int page_cache_add_speculative(struct page *page, int count) |
171 | { |
172 | VM_BUG_ON(in_interrupt()); |
173 | |
174 | #if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU) |
175 | # ifdef CONFIG_PREEMPT |
176 | VM_BUG_ON(!in_atomic()); |
177 | # endif |
178 | VM_BUG_ON(page_count(page) == 0); |
179 | atomic_add(count, &page->_count); |
180 | |
181 | #else |
182 | if (unlikely(!atomic_add_unless(&page->_count, count, 0))) |
183 | return 0; |
184 | #endif |
185 | VM_BUG_ON(PageCompound(page) && page != compound_head(page)); |
186 | |
187 | return 1; |
188 | } |
189 | |
190 | static inline int page_freeze_refs(struct page *page, int count) |
191 | { |
192 | return likely(atomic_cmpxchg(&page->_count, count, 0) == count); |
193 | } |
194 | |
195 | static inline void page_unfreeze_refs(struct page *page, int count) |
196 | { |
197 | VM_BUG_ON(page_count(page) != 0); |
198 | VM_BUG_ON(count == 0); |
199 | |
200 | atomic_set(&page->_count, count); |
201 | } |
202 | |
203 | #ifdef CONFIG_NUMA |
204 | extern struct page *__page_cache_alloc(gfp_t gfp); |
205 | #else |
206 | static inline struct page *__page_cache_alloc(gfp_t gfp) |
207 | { |
208 | return alloc_pages(gfp, 0); |
209 | } |
210 | #endif |
211 | |
212 | static inline struct page *page_cache_alloc(struct address_space *x) |
213 | { |
214 | return __page_cache_alloc(mapping_gfp_mask(x)); |
215 | } |
216 | |
217 | static inline struct page *page_cache_alloc_cold(struct address_space *x) |
218 | { |
219 | return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD); |
220 | } |
221 | |
222 | typedef int filler_t(void *, struct page *); |
223 | |
224 | extern struct page * find_get_page(struct address_space *mapping, |
225 | pgoff_t index); |
226 | extern struct page * find_lock_page(struct address_space *mapping, |
227 | pgoff_t index); |
228 | extern struct page * find_or_create_page(struct address_space *mapping, |
229 | pgoff_t index, gfp_t gfp_mask); |
230 | unsigned find_get_pages(struct address_space *mapping, pgoff_t start, |
231 | unsigned int nr_pages, struct page **pages); |
232 | unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start, |
233 | unsigned int nr_pages, struct page **pages); |
234 | unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index, |
235 | int tag, unsigned int nr_pages, struct page **pages); |
236 | |
237 | struct page *grab_cache_page_write_begin(struct address_space *mapping, |
238 | pgoff_t index, unsigned flags); |
239 | |
240 | /* |
241 | * Returns locked page at given index in given cache, creating it if needed. |
242 | */ |
243 | static inline struct page *grab_cache_page(struct address_space *mapping, |
244 | pgoff_t index) |
245 | { |
246 | return find_or_create_page(mapping, index, mapping_gfp_mask(mapping)); |
247 | } |
248 | |
249 | extern struct page * grab_cache_page_nowait(struct address_space *mapping, |
250 | pgoff_t index); |
251 | extern struct page * read_cache_page_async(struct address_space *mapping, |
252 | pgoff_t index, filler_t *filler, |
253 | void *data); |
254 | extern struct page * read_cache_page(struct address_space *mapping, |
255 | pgoff_t index, filler_t *filler, |
256 | void *data); |
257 | extern struct page * read_cache_page_gfp(struct address_space *mapping, |
258 | pgoff_t index, gfp_t gfp_mask); |
259 | extern int read_cache_pages(struct address_space *mapping, |
260 | struct list_head *pages, filler_t *filler, void *data); |
261 | |
262 | static inline struct page *read_mapping_page_async( |
263 | struct address_space *mapping, |
264 | pgoff_t index, void *data) |
265 | { |
266 | filler_t *filler = (filler_t *)mapping->a_ops->readpage; |
267 | return read_cache_page_async(mapping, index, filler, data); |
268 | } |
269 | |
270 | static inline struct page *read_mapping_page(struct address_space *mapping, |
271 | pgoff_t index, void *data) |
272 | { |
273 | filler_t *filler = (filler_t *)mapping->a_ops->readpage; |
274 | return read_cache_page(mapping, index, filler, data); |
275 | } |
276 | |
277 | /* |
278 | * Return byte-offset into filesystem object for page. |
279 | */ |
280 | static inline loff_t page_offset(struct page *page) |
281 | { |
282 | return ((loff_t)page->index) << PAGE_CACHE_SHIFT; |
283 | } |
284 | |
285 | extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma, |
286 | unsigned long address); |
287 | |
288 | static inline pgoff_t linear_page_index(struct vm_area_struct *vma, |
289 | unsigned long address) |
290 | { |
291 | pgoff_t pgoff; |
292 | if (unlikely(is_vm_hugetlb_page(vma))) |
293 | return linear_hugepage_index(vma, address); |
294 | pgoff = (address - vma->vm_start) >> PAGE_SHIFT; |
295 | pgoff += vma->vm_pgoff; |
296 | return pgoff >> (PAGE_CACHE_SHIFT - PAGE_SHIFT); |
297 | } |
298 | |
299 | extern void __lock_page(struct page *page); |
300 | extern int __lock_page_killable(struct page *page); |
301 | extern void __lock_page_nosync(struct page *page); |
302 | extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm, |
303 | unsigned int flags); |
304 | extern void unlock_page(struct page *page); |
305 | |
306 | static inline void __set_page_locked(struct page *page) |
307 | { |
308 | __set_bit(PG_locked, &page->flags); |
309 | } |
310 | |
311 | static inline void __clear_page_locked(struct page *page) |
312 | { |
313 | __clear_bit(PG_locked, &page->flags); |
314 | } |
315 | |
316 | static inline int trylock_page(struct page *page) |
317 | { |
318 | return (likely(!test_and_set_bit_lock(PG_locked, &page->flags))); |
319 | } |
320 | |
321 | /* |
322 | * lock_page may only be called if we have the page's inode pinned. |
323 | */ |
324 | static inline void lock_page(struct page *page) |
325 | { |
326 | might_sleep(); |
327 | if (!trylock_page(page)) |
328 | __lock_page(page); |
329 | } |
330 | |
331 | /* |
332 | * lock_page_killable is like lock_page but can be interrupted by fatal |
333 | * signals. It returns 0 if it locked the page and -EINTR if it was |
334 | * killed while waiting. |
335 | */ |
336 | static inline int lock_page_killable(struct page *page) |
337 | { |
338 | might_sleep(); |
339 | if (!trylock_page(page)) |
340 | return __lock_page_killable(page); |
341 | return 0; |
342 | } |
343 | |
344 | /* |
345 | * lock_page_nosync should only be used if we can't pin the page's inode. |
346 | * Doesn't play quite so well with block device plugging. |
347 | */ |
348 | static inline void lock_page_nosync(struct page *page) |
349 | { |
350 | might_sleep(); |
351 | if (!trylock_page(page)) |
352 | __lock_page_nosync(page); |
353 | } |
354 | |
355 | /* |
356 | * lock_page_or_retry - Lock the page, unless this would block and the |
357 | * caller indicated that it can handle a retry. |
358 | */ |
359 | static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm, |
360 | unsigned int flags) |
361 | { |
362 | might_sleep(); |
363 | return trylock_page(page) || __lock_page_or_retry(page, mm, flags); |
364 | } |
365 | |
366 | /* |
367 | * This is exported only for wait_on_page_locked/wait_on_page_writeback. |
368 | * Never use this directly! |
369 | */ |
370 | extern void wait_on_page_bit(struct page *page, int bit_nr); |
371 | |
372 | /* |
373 | * Wait for a page to be unlocked. |
374 | * |
375 | * This must be called with the caller "holding" the page, |
376 | * ie with increased "page->count" so that the page won't |
377 | * go away during the wait.. |
378 | */ |
379 | static inline void wait_on_page_locked(struct page *page) |
380 | { |
381 | if (PageLocked(page)) |
382 | wait_on_page_bit(page, PG_locked); |
383 | } |
384 | |
385 | /* |
386 | * Wait for a page to complete writeback |
387 | */ |
388 | static inline void wait_on_page_writeback(struct page *page) |
389 | { |
390 | if (PageWriteback(page)) |
391 | wait_on_page_bit(page, PG_writeback); |
392 | } |
393 | |
394 | extern void end_page_writeback(struct page *page); |
395 | |
396 | /* |
397 | * Add an arbitrary waiter to a page's wait queue |
398 | */ |
399 | extern void add_page_wait_queue(struct page *page, wait_queue_t *waiter); |
400 | |
401 | /* |
402 | * Fault a userspace page into pagetables. Return non-zero on a fault. |
403 | * |
404 | * This assumes that two userspace pages are always sufficient. That's |
405 | * not true if PAGE_CACHE_SIZE > PAGE_SIZE. |
406 | */ |
407 | static inline int fault_in_pages_writeable(char __user *uaddr, int size) |
408 | { |
409 | int ret; |
410 | |
411 | if (unlikely(size == 0)) |
412 | return 0; |
413 | |
414 | /* |
415 | * Writing zeroes into userspace here is OK, because we know that if |
416 | * the zero gets there, we'll be overwriting it. |
417 | */ |
418 | ret = __put_user(0, uaddr); |
419 | if (ret == 0) { |
420 | char __user *end = uaddr + size - 1; |
421 | |
422 | /* |
423 | * If the page was already mapped, this will get a cache miss |
424 | * for sure, so try to avoid doing it. |
425 | */ |
426 | if (((unsigned long)uaddr & PAGE_MASK) != |
427 | ((unsigned long)end & PAGE_MASK)) |
428 | ret = __put_user(0, end); |
429 | } |
430 | return ret; |
431 | } |
432 | |
433 | static inline int fault_in_pages_readable(const char __user *uaddr, int size) |
434 | { |
435 | volatile char c; |
436 | int ret; |
437 | |
438 | if (unlikely(size == 0)) |
439 | return 0; |
440 | |
441 | ret = __get_user(c, uaddr); |
442 | if (ret == 0) { |
443 | const char __user *end = uaddr + size - 1; |
444 | |
445 | if (((unsigned long)uaddr & PAGE_MASK) != |
446 | ((unsigned long)end & PAGE_MASK)) { |
447 | ret = __get_user(c, end); |
448 | (void)c; |
449 | } |
450 | } |
451 | return ret; |
452 | } |
453 | |
454 | int add_to_page_cache_locked(struct page *page, struct address_space *mapping, |
455 | pgoff_t index, gfp_t gfp_mask); |
456 | int add_to_page_cache_lru(struct page *page, struct address_space *mapping, |
457 | pgoff_t index, gfp_t gfp_mask); |
458 | extern void remove_from_page_cache(struct page *page); |
459 | extern void __remove_from_page_cache(struct page *page); |
460 | |
461 | /* |
462 | * Like add_to_page_cache_locked, but used to add newly allocated pages: |
463 | * the page is new, so we can just run __set_page_locked() against it. |
464 | */ |
465 | static inline int add_to_page_cache(struct page *page, |
466 | struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) |
467 | { |
468 | int error; |
469 | |
470 | __set_page_locked(page); |
471 | error = add_to_page_cache_locked(page, mapping, offset, gfp_mask); |
472 | if (unlikely(error)) |
473 | __clear_page_locked(page); |
474 | return error; |
475 | } |
476 | |
477 | #endif /* _LINUX_PAGEMAP_H */ |
478 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9