Root/
1 | #ifndef _LINUX_PAGEMAP_H |
2 | #define _LINUX_PAGEMAP_H |
3 | |
4 | /* |
5 | * Copyright 1995 Linus Torvalds |
6 | */ |
7 | #include <linux/mm.h> |
8 | #include <linux/fs.h> |
9 | #include <linux/list.h> |
10 | #include <linux/highmem.h> |
11 | #include <linux/compiler.h> |
12 | #include <asm/uaccess.h> |
13 | #include <linux/gfp.h> |
14 | #include <linux/bitops.h> |
15 | #include <linux/hardirq.h> /* for in_interrupt() */ |
16 | |
17 | /* |
18 | * Bits in mapping->flags. The lower __GFP_BITS_SHIFT bits are the page |
19 | * allocation mode flags. |
20 | */ |
21 | enum mapping_flags { |
22 | AS_EIO = __GFP_BITS_SHIFT + 0, /* IO error on async write */ |
23 | AS_ENOSPC = __GFP_BITS_SHIFT + 1, /* ENOSPC on async write */ |
24 | AS_MM_ALL_LOCKS = __GFP_BITS_SHIFT + 2, /* under mm_take_all_locks() */ |
25 | AS_UNEVICTABLE = __GFP_BITS_SHIFT + 3, /* e.g., ramdisk, SHM_LOCK */ |
26 | }; |
27 | |
28 | static inline void mapping_set_error(struct address_space *mapping, int error) |
29 | { |
30 | if (unlikely(error)) { |
31 | if (error == -ENOSPC) |
32 | set_bit(AS_ENOSPC, &mapping->flags); |
33 | else |
34 | set_bit(AS_EIO, &mapping->flags); |
35 | } |
36 | } |
37 | |
38 | static inline void mapping_set_unevictable(struct address_space *mapping) |
39 | { |
40 | set_bit(AS_UNEVICTABLE, &mapping->flags); |
41 | } |
42 | |
43 | static inline void mapping_clear_unevictable(struct address_space *mapping) |
44 | { |
45 | clear_bit(AS_UNEVICTABLE, &mapping->flags); |
46 | } |
47 | |
48 | static inline int mapping_unevictable(struct address_space *mapping) |
49 | { |
50 | if (likely(mapping)) |
51 | return test_bit(AS_UNEVICTABLE, &mapping->flags); |
52 | return !!mapping; |
53 | } |
54 | |
55 | static inline gfp_t mapping_gfp_mask(struct address_space * mapping) |
56 | { |
57 | return (__force gfp_t)mapping->flags & __GFP_BITS_MASK; |
58 | } |
59 | |
60 | /* |
61 | * This is non-atomic. Only to be used before the mapping is activated. |
62 | * Probably needs a barrier... |
63 | */ |
64 | static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask) |
65 | { |
66 | m->flags = (m->flags & ~(__force unsigned long)__GFP_BITS_MASK) | |
67 | (__force unsigned long)mask; |
68 | } |
69 | |
70 | /* |
71 | * The page cache can done in larger chunks than |
72 | * one page, because it allows for more efficient |
73 | * throughput (it can then be mapped into user |
74 | * space in smaller chunks for same flexibility). |
75 | * |
76 | * Or rather, it _will_ be done in larger chunks. |
77 | */ |
78 | #define PAGE_CACHE_SHIFT PAGE_SHIFT |
79 | #define PAGE_CACHE_SIZE PAGE_SIZE |
80 | #define PAGE_CACHE_MASK PAGE_MASK |
81 | #define PAGE_CACHE_ALIGN(addr) (((addr)+PAGE_CACHE_SIZE-1)&PAGE_CACHE_MASK) |
82 | |
83 | #define page_cache_get(page) get_page(page) |
84 | #define page_cache_release(page) put_page(page) |
85 | void release_pages(struct page **pages, int nr, int cold); |
86 | |
87 | /* |
88 | * speculatively take a reference to a page. |
89 | * If the page is free (_count == 0), then _count is untouched, and 0 |
90 | * is returned. Otherwise, _count is incremented by 1 and 1 is returned. |
91 | * |
92 | * This function must be called inside the same rcu_read_lock() section as has |
93 | * been used to lookup the page in the pagecache radix-tree (or page table): |
94 | * this allows allocators to use a synchronize_rcu() to stabilize _count. |
95 | * |
96 | * Unless an RCU grace period has passed, the count of all pages coming out |
97 | * of the allocator must be considered unstable. page_count may return higher |
98 | * than expected, and put_page must be able to do the right thing when the |
99 | * page has been finished with, no matter what it is subsequently allocated |
100 | * for (because put_page is what is used here to drop an invalid speculative |
101 | * reference). |
102 | * |
103 | * This is the interesting part of the lockless pagecache (and lockless |
104 | * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page) |
105 | * has the following pattern: |
106 | * 1. find page in radix tree |
107 | * 2. conditionally increment refcount |
108 | * 3. check the page is still in pagecache (if no, goto 1) |
109 | * |
110 | * Remove-side that cares about stability of _count (eg. reclaim) has the |
111 | * following (with tree_lock held for write): |
112 | * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg) |
113 | * B. remove page from pagecache |
114 | * C. free the page |
115 | * |
116 | * There are 2 critical interleavings that matter: |
117 | * - 2 runs before A: in this case, A sees elevated refcount and bails out |
118 | * - A runs before 2: in this case, 2 sees zero refcount and retries; |
119 | * subsequently, B will complete and 1 will find no page, causing the |
120 | * lookup to return NULL. |
121 | * |
122 | * It is possible that between 1 and 2, the page is removed then the exact same |
123 | * page is inserted into the same position in pagecache. That's OK: the |
124 | * old find_get_page using tree_lock could equally have run before or after |
125 | * such a re-insertion, depending on order that locks are granted. |
126 | * |
127 | * Lookups racing against pagecache insertion isn't a big problem: either 1 |
128 | * will find the page or it will not. Likewise, the old find_get_page could run |
129 | * either before the insertion or afterwards, depending on timing. |
130 | */ |
131 | static inline int page_cache_get_speculative(struct page *page) |
132 | { |
133 | VM_BUG_ON(in_interrupt()); |
134 | |
135 | #if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU) |
136 | # ifdef CONFIG_PREEMPT |
137 | VM_BUG_ON(!in_atomic()); |
138 | # endif |
139 | /* |
140 | * Preempt must be disabled here - we rely on rcu_read_lock doing |
141 | * this for us. |
142 | * |
143 | * Pagecache won't be truncated from interrupt context, so if we have |
144 | * found a page in the radix tree here, we have pinned its refcount by |
145 | * disabling preempt, and hence no need for the "speculative get" that |
146 | * SMP requires. |
147 | */ |
148 | VM_BUG_ON(page_count(page) == 0); |
149 | atomic_inc(&page->_count); |
150 | |
151 | #else |
152 | if (unlikely(!get_page_unless_zero(page))) { |
153 | /* |
154 | * Either the page has been freed, or will be freed. |
155 | * In either case, retry here and the caller should |
156 | * do the right thing (see comments above). |
157 | */ |
158 | return 0; |
159 | } |
160 | #endif |
161 | VM_BUG_ON(PageTail(page)); |
162 | |
163 | return 1; |
164 | } |
165 | |
166 | /* |
167 | * Same as above, but add instead of inc (could just be merged) |
168 | */ |
169 | static inline int page_cache_add_speculative(struct page *page, int count) |
170 | { |
171 | VM_BUG_ON(in_interrupt()); |
172 | |
173 | #if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU) |
174 | # ifdef CONFIG_PREEMPT |
175 | VM_BUG_ON(!in_atomic()); |
176 | # endif |
177 | VM_BUG_ON(page_count(page) == 0); |
178 | atomic_add(count, &page->_count); |
179 | |
180 | #else |
181 | if (unlikely(!atomic_add_unless(&page->_count, count, 0))) |
182 | return 0; |
183 | #endif |
184 | VM_BUG_ON(PageCompound(page) && page != compound_head(page)); |
185 | |
186 | return 1; |
187 | } |
188 | |
189 | static inline int page_freeze_refs(struct page *page, int count) |
190 | { |
191 | return likely(atomic_cmpxchg(&page->_count, count, 0) == count); |
192 | } |
193 | |
194 | static inline void page_unfreeze_refs(struct page *page, int count) |
195 | { |
196 | VM_BUG_ON(page_count(page) != 0); |
197 | VM_BUG_ON(count == 0); |
198 | |
199 | atomic_set(&page->_count, count); |
200 | } |
201 | |
202 | #ifdef CONFIG_NUMA |
203 | extern struct page *__page_cache_alloc(gfp_t gfp); |
204 | #else |
205 | static inline struct page *__page_cache_alloc(gfp_t gfp) |
206 | { |
207 | return alloc_pages(gfp, 0); |
208 | } |
209 | #endif |
210 | |
211 | static inline struct page *page_cache_alloc(struct address_space *x) |
212 | { |
213 | return __page_cache_alloc(mapping_gfp_mask(x)); |
214 | } |
215 | |
216 | static inline struct page *page_cache_alloc_cold(struct address_space *x) |
217 | { |
218 | return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD); |
219 | } |
220 | |
221 | typedef int filler_t(void *, struct page *); |
222 | |
223 | extern struct page * find_get_page(struct address_space *mapping, |
224 | pgoff_t index); |
225 | extern struct page * find_lock_page(struct address_space *mapping, |
226 | pgoff_t index); |
227 | extern struct page * find_or_create_page(struct address_space *mapping, |
228 | pgoff_t index, gfp_t gfp_mask); |
229 | unsigned find_get_pages(struct address_space *mapping, pgoff_t start, |
230 | unsigned int nr_pages, struct page **pages); |
231 | unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start, |
232 | unsigned int nr_pages, struct page **pages); |
233 | unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index, |
234 | int tag, unsigned int nr_pages, struct page **pages); |
235 | |
236 | struct page *grab_cache_page_write_begin(struct address_space *mapping, |
237 | pgoff_t index, unsigned flags); |
238 | |
239 | /* |
240 | * Returns locked page at given index in given cache, creating it if needed. |
241 | */ |
242 | static inline struct page *grab_cache_page(struct address_space *mapping, |
243 | pgoff_t index) |
244 | { |
245 | return find_or_create_page(mapping, index, mapping_gfp_mask(mapping)); |
246 | } |
247 | |
248 | extern struct page * grab_cache_page_nowait(struct address_space *mapping, |
249 | pgoff_t index); |
250 | extern struct page * read_cache_page_async(struct address_space *mapping, |
251 | pgoff_t index, filler_t *filler, |
252 | void *data); |
253 | extern struct page * read_cache_page(struct address_space *mapping, |
254 | pgoff_t index, filler_t *filler, |
255 | void *data); |
256 | extern struct page * read_cache_page_gfp(struct address_space *mapping, |
257 | pgoff_t index, gfp_t gfp_mask); |
258 | extern int read_cache_pages(struct address_space *mapping, |
259 | struct list_head *pages, filler_t *filler, void *data); |
260 | |
261 | static inline struct page *read_mapping_page_async( |
262 | struct address_space *mapping, |
263 | pgoff_t index, void *data) |
264 | { |
265 | filler_t *filler = (filler_t *)mapping->a_ops->readpage; |
266 | return read_cache_page_async(mapping, index, filler, data); |
267 | } |
268 | |
269 | static inline struct page *read_mapping_page(struct address_space *mapping, |
270 | pgoff_t index, void *data) |
271 | { |
272 | filler_t *filler = (filler_t *)mapping->a_ops->readpage; |
273 | return read_cache_page(mapping, index, filler, data); |
274 | } |
275 | |
276 | /* |
277 | * Return byte-offset into filesystem object for page. |
278 | */ |
279 | static inline loff_t page_offset(struct page *page) |
280 | { |
281 | return ((loff_t)page->index) << PAGE_CACHE_SHIFT; |
282 | } |
283 | |
284 | static inline pgoff_t linear_page_index(struct vm_area_struct *vma, |
285 | unsigned long address) |
286 | { |
287 | pgoff_t pgoff = (address - vma->vm_start) >> PAGE_SHIFT; |
288 | pgoff += vma->vm_pgoff; |
289 | return pgoff >> (PAGE_CACHE_SHIFT - PAGE_SHIFT); |
290 | } |
291 | |
292 | extern void __lock_page(struct page *page); |
293 | extern int __lock_page_killable(struct page *page); |
294 | extern void __lock_page_nosync(struct page *page); |
295 | extern void unlock_page(struct page *page); |
296 | |
297 | static inline void __set_page_locked(struct page *page) |
298 | { |
299 | __set_bit(PG_locked, &page->flags); |
300 | } |
301 | |
302 | static inline void __clear_page_locked(struct page *page) |
303 | { |
304 | __clear_bit(PG_locked, &page->flags); |
305 | } |
306 | |
307 | static inline int trylock_page(struct page *page) |
308 | { |
309 | return (likely(!test_and_set_bit_lock(PG_locked, &page->flags))); |
310 | } |
311 | |
312 | /* |
313 | * lock_page may only be called if we have the page's inode pinned. |
314 | */ |
315 | static inline void lock_page(struct page *page) |
316 | { |
317 | might_sleep(); |
318 | if (!trylock_page(page)) |
319 | __lock_page(page); |
320 | } |
321 | |
322 | /* |
323 | * lock_page_killable is like lock_page but can be interrupted by fatal |
324 | * signals. It returns 0 if it locked the page and -EINTR if it was |
325 | * killed while waiting. |
326 | */ |
327 | static inline int lock_page_killable(struct page *page) |
328 | { |
329 | might_sleep(); |
330 | if (!trylock_page(page)) |
331 | return __lock_page_killable(page); |
332 | return 0; |
333 | } |
334 | |
335 | /* |
336 | * lock_page_nosync should only be used if we can't pin the page's inode. |
337 | * Doesn't play quite so well with block device plugging. |
338 | */ |
339 | static inline void lock_page_nosync(struct page *page) |
340 | { |
341 | might_sleep(); |
342 | if (!trylock_page(page)) |
343 | __lock_page_nosync(page); |
344 | } |
345 | |
346 | /* |
347 | * This is exported only for wait_on_page_locked/wait_on_page_writeback. |
348 | * Never use this directly! |
349 | */ |
350 | extern void wait_on_page_bit(struct page *page, int bit_nr); |
351 | |
352 | /* |
353 | * Wait for a page to be unlocked. |
354 | * |
355 | * This must be called with the caller "holding" the page, |
356 | * ie with increased "page->count" so that the page won't |
357 | * go away during the wait.. |
358 | */ |
359 | static inline void wait_on_page_locked(struct page *page) |
360 | { |
361 | if (PageLocked(page)) |
362 | wait_on_page_bit(page, PG_locked); |
363 | } |
364 | |
365 | /* |
366 | * Wait for a page to complete writeback |
367 | */ |
368 | static inline void wait_on_page_writeback(struct page *page) |
369 | { |
370 | if (PageWriteback(page)) |
371 | wait_on_page_bit(page, PG_writeback); |
372 | } |
373 | |
374 | extern void end_page_writeback(struct page *page); |
375 | |
376 | /* |
377 | * Add an arbitrary waiter to a page's wait queue |
378 | */ |
379 | extern void add_page_wait_queue(struct page *page, wait_queue_t *waiter); |
380 | |
381 | /* |
382 | * Fault a userspace page into pagetables. Return non-zero on a fault. |
383 | * |
384 | * This assumes that two userspace pages are always sufficient. That's |
385 | * not true if PAGE_CACHE_SIZE > PAGE_SIZE. |
386 | */ |
387 | static inline int fault_in_pages_writeable(char __user *uaddr, int size) |
388 | { |
389 | int ret; |
390 | |
391 | if (unlikely(size == 0)) |
392 | return 0; |
393 | |
394 | /* |
395 | * Writing zeroes into userspace here is OK, because we know that if |
396 | * the zero gets there, we'll be overwriting it. |
397 | */ |
398 | ret = __put_user(0, uaddr); |
399 | if (ret == 0) { |
400 | char __user *end = uaddr + size - 1; |
401 | |
402 | /* |
403 | * If the page was already mapped, this will get a cache miss |
404 | * for sure, so try to avoid doing it. |
405 | */ |
406 | if (((unsigned long)uaddr & PAGE_MASK) != |
407 | ((unsigned long)end & PAGE_MASK)) |
408 | ret = __put_user(0, end); |
409 | } |
410 | return ret; |
411 | } |
412 | |
413 | static inline int fault_in_pages_readable(const char __user *uaddr, int size) |
414 | { |
415 | volatile char c; |
416 | int ret; |
417 | |
418 | if (unlikely(size == 0)) |
419 | return 0; |
420 | |
421 | ret = __get_user(c, uaddr); |
422 | if (ret == 0) { |
423 | const char __user *end = uaddr + size - 1; |
424 | |
425 | if (((unsigned long)uaddr & PAGE_MASK) != |
426 | ((unsigned long)end & PAGE_MASK)) |
427 | ret = __get_user(c, end); |
428 | } |
429 | return ret; |
430 | } |
431 | |
432 | int add_to_page_cache_locked(struct page *page, struct address_space *mapping, |
433 | pgoff_t index, gfp_t gfp_mask); |
434 | int add_to_page_cache_lru(struct page *page, struct address_space *mapping, |
435 | pgoff_t index, gfp_t gfp_mask); |
436 | extern void remove_from_page_cache(struct page *page); |
437 | extern void __remove_from_page_cache(struct page *page); |
438 | |
439 | /* |
440 | * Like add_to_page_cache_locked, but used to add newly allocated pages: |
441 | * the page is new, so we can just run __set_page_locked() against it. |
442 | */ |
443 | static inline int add_to_page_cache(struct page *page, |
444 | struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) |
445 | { |
446 | int error; |
447 | |
448 | __set_page_locked(page); |
449 | error = add_to_page_cache_locked(page, mapping, offset, gfp_mask); |
450 | if (unlikely(error)) |
451 | __clear_page_locked(page); |
452 | return error; |
453 | } |
454 | |
455 | #endif /* _LINUX_PAGEMAP_H */ |
456 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9