Root/
1 | /* |
2 | * High memory handling common code and variables. |
3 | * |
4 | * (C) 1999 Andrea Arcangeli, SuSE GmbH, andrea@suse.de |
5 | * Gerhard Wichert, Siemens AG, Gerhard.Wichert@pdb.siemens.de |
6 | * |
7 | * |
8 | * Redesigned the x86 32-bit VM architecture to deal with |
9 | * 64-bit physical space. With current x86 CPUs this |
10 | * means up to 64 Gigabytes physical RAM. |
11 | * |
12 | * Rewrote high memory support to move the page cache into |
13 | * high memory. Implemented permanent (schedulable) kmaps |
14 | * based on Linus' idea. |
15 | * |
16 | * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com> |
17 | */ |
18 | |
19 | #include <linux/mm.h> |
20 | #include <linux/export.h> |
21 | #include <linux/swap.h> |
22 | #include <linux/bio.h> |
23 | #include <linux/pagemap.h> |
24 | #include <linux/mempool.h> |
25 | #include <linux/blkdev.h> |
26 | #include <linux/init.h> |
27 | #include <linux/hash.h> |
28 | #include <linux/highmem.h> |
29 | #include <linux/kgdb.h> |
30 | #include <asm/tlbflush.h> |
31 | |
32 | |
33 | #if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32) |
34 | DEFINE_PER_CPU(int, __kmap_atomic_idx); |
35 | #endif |
36 | |
37 | /* |
38 | * Virtual_count is not a pure "count". |
39 | * 0 means that it is not mapped, and has not been mapped |
40 | * since a TLB flush - it is usable. |
41 | * 1 means that there are no users, but it has been mapped |
42 | * since the last TLB flush - so we can't use it. |
43 | * n means that there are (n-1) current users of it. |
44 | */ |
45 | #ifdef CONFIG_HIGHMEM |
46 | |
47 | unsigned long totalhigh_pages __read_mostly; |
48 | EXPORT_SYMBOL(totalhigh_pages); |
49 | |
50 | |
51 | EXPORT_PER_CPU_SYMBOL(__kmap_atomic_idx); |
52 | |
53 | unsigned int nr_free_highpages (void) |
54 | { |
55 | pg_data_t *pgdat; |
56 | unsigned int pages = 0; |
57 | |
58 | for_each_online_pgdat(pgdat) { |
59 | pages += zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM], |
60 | NR_FREE_PAGES); |
61 | if (zone_movable_is_highmem()) |
62 | pages += zone_page_state( |
63 | &pgdat->node_zones[ZONE_MOVABLE], |
64 | NR_FREE_PAGES); |
65 | } |
66 | |
67 | return pages; |
68 | } |
69 | |
70 | static int pkmap_count[LAST_PKMAP]; |
71 | static unsigned int last_pkmap_nr; |
72 | static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kmap_lock); |
73 | |
74 | pte_t * pkmap_page_table; |
75 | |
76 | static DECLARE_WAIT_QUEUE_HEAD(pkmap_map_wait); |
77 | |
78 | /* |
79 | * Most architectures have no use for kmap_high_get(), so let's abstract |
80 | * the disabling of IRQ out of the locking in that case to save on a |
81 | * potential useless overhead. |
82 | */ |
83 | #ifdef ARCH_NEEDS_KMAP_HIGH_GET |
84 | #define lock_kmap() spin_lock_irq(&kmap_lock) |
85 | #define unlock_kmap() spin_unlock_irq(&kmap_lock) |
86 | #define lock_kmap_any(flags) spin_lock_irqsave(&kmap_lock, flags) |
87 | #define unlock_kmap_any(flags) spin_unlock_irqrestore(&kmap_lock, flags) |
88 | #else |
89 | #define lock_kmap() spin_lock(&kmap_lock) |
90 | #define unlock_kmap() spin_unlock(&kmap_lock) |
91 | #define lock_kmap_any(flags) \ |
92 | do { spin_lock(&kmap_lock); (void)(flags); } while (0) |
93 | #define unlock_kmap_any(flags) \ |
94 | do { spin_unlock(&kmap_lock); (void)(flags); } while (0) |
95 | #endif |
96 | |
97 | struct page *kmap_to_page(void *vaddr) |
98 | { |
99 | unsigned long addr = (unsigned long)vaddr; |
100 | |
101 | if (addr >= PKMAP_ADDR(0) && addr <= PKMAP_ADDR(LAST_PKMAP)) { |
102 | int i = (addr - PKMAP_ADDR(0)) >> PAGE_SHIFT; |
103 | return pte_page(pkmap_page_table[i]); |
104 | } |
105 | |
106 | return virt_to_page(addr); |
107 | } |
108 | |
109 | static void flush_all_zero_pkmaps(void) |
110 | { |
111 | int i; |
112 | int need_flush = 0; |
113 | |
114 | flush_cache_kmaps(); |
115 | |
116 | for (i = 0; i < LAST_PKMAP; i++) { |
117 | struct page *page; |
118 | |
119 | /* |
120 | * zero means we don't have anything to do, |
121 | * >1 means that it is still in use. Only |
122 | * a count of 1 means that it is free but |
123 | * needs to be unmapped |
124 | */ |
125 | if (pkmap_count[i] != 1) |
126 | continue; |
127 | pkmap_count[i] = 0; |
128 | |
129 | /* sanity check */ |
130 | BUG_ON(pte_none(pkmap_page_table[i])); |
131 | |
132 | /* |
133 | * Don't need an atomic fetch-and-clear op here; |
134 | * no-one has the page mapped, and cannot get at |
135 | * its virtual address (and hence PTE) without first |
136 | * getting the kmap_lock (which is held here). |
137 | * So no dangers, even with speculative execution. |
138 | */ |
139 | page = pte_page(pkmap_page_table[i]); |
140 | pte_clear(&init_mm, (unsigned long)page_address(page), |
141 | &pkmap_page_table[i]); |
142 | |
143 | set_page_address(page, NULL); |
144 | need_flush = 1; |
145 | } |
146 | if (need_flush) |
147 | flush_tlb_kernel_range(PKMAP_ADDR(0), PKMAP_ADDR(LAST_PKMAP)); |
148 | } |
149 | |
150 | /** |
151 | * kmap_flush_unused - flush all unused kmap mappings in order to remove stray mappings |
152 | */ |
153 | void kmap_flush_unused(void) |
154 | { |
155 | lock_kmap(); |
156 | flush_all_zero_pkmaps(); |
157 | unlock_kmap(); |
158 | } |
159 | |
160 | static inline unsigned long map_new_virtual(struct page *page) |
161 | { |
162 | unsigned long vaddr; |
163 | int count; |
164 | |
165 | start: |
166 | count = LAST_PKMAP; |
167 | /* Find an empty entry */ |
168 | for (;;) { |
169 | last_pkmap_nr = (last_pkmap_nr + 1) & LAST_PKMAP_MASK; |
170 | if (!last_pkmap_nr) { |
171 | flush_all_zero_pkmaps(); |
172 | count = LAST_PKMAP; |
173 | } |
174 | if (!pkmap_count[last_pkmap_nr]) |
175 | break; /* Found a usable entry */ |
176 | if (--count) |
177 | continue; |
178 | |
179 | /* |
180 | * Sleep for somebody else to unmap their entries |
181 | */ |
182 | { |
183 | DECLARE_WAITQUEUE(wait, current); |
184 | |
185 | __set_current_state(TASK_UNINTERRUPTIBLE); |
186 | add_wait_queue(&pkmap_map_wait, &wait); |
187 | unlock_kmap(); |
188 | schedule(); |
189 | remove_wait_queue(&pkmap_map_wait, &wait); |
190 | lock_kmap(); |
191 | |
192 | /* Somebody else might have mapped it while we slept */ |
193 | if (page_address(page)) |
194 | return (unsigned long)page_address(page); |
195 | |
196 | /* Re-start */ |
197 | goto start; |
198 | } |
199 | } |
200 | vaddr = PKMAP_ADDR(last_pkmap_nr); |
201 | set_pte_at(&init_mm, vaddr, |
202 | &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot)); |
203 | |
204 | pkmap_count[last_pkmap_nr] = 1; |
205 | set_page_address(page, (void *)vaddr); |
206 | |
207 | return vaddr; |
208 | } |
209 | |
210 | /** |
211 | * kmap_high - map a highmem page into memory |
212 | * @page: &struct page to map |
213 | * |
214 | * Returns the page's virtual memory address. |
215 | * |
216 | * We cannot call this from interrupts, as it may block. |
217 | */ |
218 | void *kmap_high(struct page *page) |
219 | { |
220 | unsigned long vaddr; |
221 | |
222 | /* |
223 | * For highmem pages, we can't trust "virtual" until |
224 | * after we have the lock. |
225 | */ |
226 | lock_kmap(); |
227 | vaddr = (unsigned long)page_address(page); |
228 | if (!vaddr) |
229 | vaddr = map_new_virtual(page); |
230 | pkmap_count[PKMAP_NR(vaddr)]++; |
231 | BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 2); |
232 | unlock_kmap(); |
233 | return (void*) vaddr; |
234 | } |
235 | |
236 | EXPORT_SYMBOL(kmap_high); |
237 | |
238 | #ifdef ARCH_NEEDS_KMAP_HIGH_GET |
239 | /** |
240 | * kmap_high_get - pin a highmem page into memory |
241 | * @page: &struct page to pin |
242 | * |
243 | * Returns the page's current virtual memory address, or NULL if no mapping |
244 | * exists. If and only if a non null address is returned then a |
245 | * matching call to kunmap_high() is necessary. |
246 | * |
247 | * This can be called from any context. |
248 | */ |
249 | void *kmap_high_get(struct page *page) |
250 | { |
251 | unsigned long vaddr, flags; |
252 | |
253 | lock_kmap_any(flags); |
254 | vaddr = (unsigned long)page_address(page); |
255 | if (vaddr) { |
256 | BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 1); |
257 | pkmap_count[PKMAP_NR(vaddr)]++; |
258 | } |
259 | unlock_kmap_any(flags); |
260 | return (void*) vaddr; |
261 | } |
262 | #endif |
263 | |
264 | /** |
265 | * kunmap_high - unmap a highmem page into memory |
266 | * @page: &struct page to unmap |
267 | * |
268 | * If ARCH_NEEDS_KMAP_HIGH_GET is not defined then this may be called |
269 | * only from user context. |
270 | */ |
271 | void kunmap_high(struct page *page) |
272 | { |
273 | unsigned long vaddr; |
274 | unsigned long nr; |
275 | unsigned long flags; |
276 | int need_wakeup; |
277 | |
278 | lock_kmap_any(flags); |
279 | vaddr = (unsigned long)page_address(page); |
280 | BUG_ON(!vaddr); |
281 | nr = PKMAP_NR(vaddr); |
282 | |
283 | /* |
284 | * A count must never go down to zero |
285 | * without a TLB flush! |
286 | */ |
287 | need_wakeup = 0; |
288 | switch (--pkmap_count[nr]) { |
289 | case 0: |
290 | BUG(); |
291 | case 1: |
292 | /* |
293 | * Avoid an unnecessary wake_up() function call. |
294 | * The common case is pkmap_count[] == 1, but |
295 | * no waiters. |
296 | * The tasks queued in the wait-queue are guarded |
297 | * by both the lock in the wait-queue-head and by |
298 | * the kmap_lock. As the kmap_lock is held here, |
299 | * no need for the wait-queue-head's lock. Simply |
300 | * test if the queue is empty. |
301 | */ |
302 | need_wakeup = waitqueue_active(&pkmap_map_wait); |
303 | } |
304 | unlock_kmap_any(flags); |
305 | |
306 | /* do wake-up, if needed, race-free outside of the spin lock */ |
307 | if (need_wakeup) |
308 | wake_up(&pkmap_map_wait); |
309 | } |
310 | |
311 | EXPORT_SYMBOL(kunmap_high); |
312 | #endif |
313 | |
314 | #if defined(HASHED_PAGE_VIRTUAL) |
315 | |
316 | #define PA_HASH_ORDER 7 |
317 | |
318 | /* |
319 | * Describes one page->virtual association |
320 | */ |
321 | struct page_address_map { |
322 | struct page *page; |
323 | void *virtual; |
324 | struct list_head list; |
325 | }; |
326 | |
327 | /* |
328 | * page_address_map freelist, allocated from page_address_maps. |
329 | */ |
330 | static struct list_head page_address_pool; /* freelist */ |
331 | static spinlock_t pool_lock; /* protects page_address_pool */ |
332 | |
333 | /* |
334 | * Hash table bucket |
335 | */ |
336 | static struct page_address_slot { |
337 | struct list_head lh; /* List of page_address_maps */ |
338 | spinlock_t lock; /* Protect this bucket's list */ |
339 | } ____cacheline_aligned_in_smp page_address_htable[1<<PA_HASH_ORDER]; |
340 | |
341 | static struct page_address_slot *page_slot(const struct page *page) |
342 | { |
343 | return &page_address_htable[hash_ptr(page, PA_HASH_ORDER)]; |
344 | } |
345 | |
346 | /** |
347 | * page_address - get the mapped virtual address of a page |
348 | * @page: &struct page to get the virtual address of |
349 | * |
350 | * Returns the page's virtual address. |
351 | */ |
352 | void *page_address(const struct page *page) |
353 | { |
354 | unsigned long flags; |
355 | void *ret; |
356 | struct page_address_slot *pas; |
357 | |
358 | if (!PageHighMem(page)) |
359 | return lowmem_page_address(page); |
360 | |
361 | pas = page_slot(page); |
362 | ret = NULL; |
363 | spin_lock_irqsave(&pas->lock, flags); |
364 | if (!list_empty(&pas->lh)) { |
365 | struct page_address_map *pam; |
366 | |
367 | list_for_each_entry(pam, &pas->lh, list) { |
368 | if (pam->page == page) { |
369 | ret = pam->virtual; |
370 | goto done; |
371 | } |
372 | } |
373 | } |
374 | done: |
375 | spin_unlock_irqrestore(&pas->lock, flags); |
376 | return ret; |
377 | } |
378 | |
379 | EXPORT_SYMBOL(page_address); |
380 | |
381 | /** |
382 | * set_page_address - set a page's virtual address |
383 | * @page: &struct page to set |
384 | * @virtual: virtual address to use |
385 | */ |
386 | void set_page_address(struct page *page, void *virtual) |
387 | { |
388 | unsigned long flags; |
389 | struct page_address_slot *pas; |
390 | struct page_address_map *pam; |
391 | |
392 | BUG_ON(!PageHighMem(page)); |
393 | |
394 | pas = page_slot(page); |
395 | if (virtual) { /* Add */ |
396 | BUG_ON(list_empty(&page_address_pool)); |
397 | |
398 | spin_lock_irqsave(&pool_lock, flags); |
399 | pam = list_entry(page_address_pool.next, |
400 | struct page_address_map, list); |
401 | list_del(&pam->list); |
402 | spin_unlock_irqrestore(&pool_lock, flags); |
403 | |
404 | pam->page = page; |
405 | pam->virtual = virtual; |
406 | |
407 | spin_lock_irqsave(&pas->lock, flags); |
408 | list_add_tail(&pam->list, &pas->lh); |
409 | spin_unlock_irqrestore(&pas->lock, flags); |
410 | } else { /* Remove */ |
411 | spin_lock_irqsave(&pas->lock, flags); |
412 | list_for_each_entry(pam, &pas->lh, list) { |
413 | if (pam->page == page) { |
414 | list_del(&pam->list); |
415 | spin_unlock_irqrestore(&pas->lock, flags); |
416 | spin_lock_irqsave(&pool_lock, flags); |
417 | list_add_tail(&pam->list, &page_address_pool); |
418 | spin_unlock_irqrestore(&pool_lock, flags); |
419 | goto done; |
420 | } |
421 | } |
422 | spin_unlock_irqrestore(&pas->lock, flags); |
423 | } |
424 | done: |
425 | return; |
426 | } |
427 | |
428 | static struct page_address_map page_address_maps[LAST_PKMAP]; |
429 | |
430 | void __init page_address_init(void) |
431 | { |
432 | int i; |
433 | |
434 | INIT_LIST_HEAD(&page_address_pool); |
435 | for (i = 0; i < ARRAY_SIZE(page_address_maps); i++) |
436 | list_add(&page_address_maps[i].list, &page_address_pool); |
437 | for (i = 0; i < ARRAY_SIZE(page_address_htable); i++) { |
438 | INIT_LIST_HEAD(&page_address_htable[i].lh); |
439 | spin_lock_init(&page_address_htable[i].lock); |
440 | } |
441 | spin_lock_init(&pool_lock); |
442 | } |
443 | |
444 | #endif /* defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL) */ |
445 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9