Root/
1 | /* |
2 | * linux/mm/percpu.c - percpu memory allocator |
3 | * |
4 | * Copyright (C) 2009 SUSE Linux Products GmbH |
5 | * Copyright (C) 2009 Tejun Heo <tj@kernel.org> |
6 | * |
7 | * This file is released under the GPLv2. |
8 | * |
9 | * This is percpu allocator which can handle both static and dynamic |
10 | * areas. Percpu areas are allocated in chunks in vmalloc area. Each |
11 | * chunk is consisted of boot-time determined number of units and the |
12 | * first chunk is used for static percpu variables in the kernel image |
13 | * (special boot time alloc/init handling necessary as these areas |
14 | * need to be brought up before allocation services are running). |
15 | * Unit grows as necessary and all units grow or shrink in unison. |
16 | * When a chunk is filled up, another chunk is allocated. ie. in |
17 | * vmalloc area |
18 | * |
19 | * c0 c1 c2 |
20 | * ------------------- ------------------- ------------ |
21 | * | u0 | u1 | u2 | u3 | | u0 | u1 | u2 | u3 | | u0 | u1 | u |
22 | * ------------------- ...... ------------------- .... ------------ |
23 | * |
24 | * Allocation is done in offset-size areas of single unit space. Ie, |
25 | * an area of 512 bytes at 6k in c1 occupies 512 bytes at 6k of c1:u0, |
26 | * c1:u1, c1:u2 and c1:u3. On UMA, units corresponds directly to |
27 | * cpus. On NUMA, the mapping can be non-linear and even sparse. |
28 | * Percpu access can be done by configuring percpu base registers |
29 | * according to cpu to unit mapping and pcpu_unit_size. |
30 | * |
31 | * There are usually many small percpu allocations many of them being |
32 | * as small as 4 bytes. The allocator organizes chunks into lists |
33 | * according to free size and tries to allocate from the fullest one. |
34 | * Each chunk keeps the maximum contiguous area size hint which is |
35 | * guaranteed to be eqaul to or larger than the maximum contiguous |
36 | * area in the chunk. This helps the allocator not to iterate the |
37 | * chunk maps unnecessarily. |
38 | * |
39 | * Allocation state in each chunk is kept using an array of integers |
40 | * on chunk->map. A positive value in the map represents a free |
41 | * region and negative allocated. Allocation inside a chunk is done |
42 | * by scanning this map sequentially and serving the first matching |
43 | * entry. This is mostly copied from the percpu_modalloc() allocator. |
44 | * Chunks can be determined from the address using the index field |
45 | * in the page struct. The index field contains a pointer to the chunk. |
46 | * |
47 | * To use this allocator, arch code should do the followings. |
48 | * |
49 | * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate |
50 | * regular address to percpu pointer and back if they need to be |
51 | * different from the default |
52 | * |
53 | * - use pcpu_setup_first_chunk() during percpu area initialization to |
54 | * setup the first chunk containing the kernel static percpu area |
55 | */ |
56 | |
57 | #include <linux/bitmap.h> |
58 | #include <linux/bootmem.h> |
59 | #include <linux/err.h> |
60 | #include <linux/list.h> |
61 | #include <linux/log2.h> |
62 | #include <linux/mm.h> |
63 | #include <linux/module.h> |
64 | #include <linux/mutex.h> |
65 | #include <linux/percpu.h> |
66 | #include <linux/pfn.h> |
67 | #include <linux/slab.h> |
68 | #include <linux/spinlock.h> |
69 | #include <linux/vmalloc.h> |
70 | #include <linux/workqueue.h> |
71 | |
72 | #include <asm/cacheflush.h> |
73 | #include <asm/sections.h> |
74 | #include <asm/tlbflush.h> |
75 | #include <asm/io.h> |
76 | |
77 | #define PCPU_SLOT_BASE_SHIFT 5 /* 1-31 shares the same slot */ |
78 | #define PCPU_DFL_MAP_ALLOC 16 /* start a map with 16 ents */ |
79 | |
80 | /* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */ |
81 | #ifndef __addr_to_pcpu_ptr |
82 | #define __addr_to_pcpu_ptr(addr) \ |
83 | (void __percpu *)((unsigned long)(addr) - \ |
84 | (unsigned long)pcpu_base_addr + \ |
85 | (unsigned long)__per_cpu_start) |
86 | #endif |
87 | #ifndef __pcpu_ptr_to_addr |
88 | #define __pcpu_ptr_to_addr(ptr) \ |
89 | (void __force *)((unsigned long)(ptr) + \ |
90 | (unsigned long)pcpu_base_addr - \ |
91 | (unsigned long)__per_cpu_start) |
92 | #endif |
93 | |
94 | struct pcpu_chunk { |
95 | struct list_head list; /* linked to pcpu_slot lists */ |
96 | int free_size; /* free bytes in the chunk */ |
97 | int contig_hint; /* max contiguous size hint */ |
98 | void *base_addr; /* base address of this chunk */ |
99 | int map_used; /* # of map entries used */ |
100 | int map_alloc; /* # of map entries allocated */ |
101 | int *map; /* allocation map */ |
102 | struct vm_struct **vms; /* mapped vmalloc regions */ |
103 | bool immutable; /* no [de]population allowed */ |
104 | unsigned long populated[]; /* populated bitmap */ |
105 | }; |
106 | |
107 | static int pcpu_unit_pages __read_mostly; |
108 | static int pcpu_unit_size __read_mostly; |
109 | static int pcpu_nr_units __read_mostly; |
110 | static int pcpu_atom_size __read_mostly; |
111 | static int pcpu_nr_slots __read_mostly; |
112 | static size_t pcpu_chunk_struct_size __read_mostly; |
113 | |
114 | /* cpus with the lowest and highest unit numbers */ |
115 | static unsigned int pcpu_first_unit_cpu __read_mostly; |
116 | static unsigned int pcpu_last_unit_cpu __read_mostly; |
117 | |
118 | /* the address of the first chunk which starts with the kernel static area */ |
119 | void *pcpu_base_addr __read_mostly; |
120 | EXPORT_SYMBOL_GPL(pcpu_base_addr); |
121 | |
122 | static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */ |
123 | const unsigned long *pcpu_unit_offsets __read_mostly; /* cpu -> unit offset */ |
124 | |
125 | /* group information, used for vm allocation */ |
126 | static int pcpu_nr_groups __read_mostly; |
127 | static const unsigned long *pcpu_group_offsets __read_mostly; |
128 | static const size_t *pcpu_group_sizes __read_mostly; |
129 | |
130 | /* |
131 | * The first chunk which always exists. Note that unlike other |
132 | * chunks, this one can be allocated and mapped in several different |
133 | * ways and thus often doesn't live in the vmalloc area. |
134 | */ |
135 | static struct pcpu_chunk *pcpu_first_chunk; |
136 | |
137 | /* |
138 | * Optional reserved chunk. This chunk reserves part of the first |
139 | * chunk and serves it for reserved allocations. The amount of |
140 | * reserved offset is in pcpu_reserved_chunk_limit. When reserved |
141 | * area doesn't exist, the following variables contain NULL and 0 |
142 | * respectively. |
143 | */ |
144 | static struct pcpu_chunk *pcpu_reserved_chunk; |
145 | static int pcpu_reserved_chunk_limit; |
146 | |
147 | /* |
148 | * Synchronization rules. |
149 | * |
150 | * There are two locks - pcpu_alloc_mutex and pcpu_lock. The former |
151 | * protects allocation/reclaim paths, chunks, populated bitmap and |
152 | * vmalloc mapping. The latter is a spinlock and protects the index |
153 | * data structures - chunk slots, chunks and area maps in chunks. |
154 | * |
155 | * During allocation, pcpu_alloc_mutex is kept locked all the time and |
156 | * pcpu_lock is grabbed and released as necessary. All actual memory |
157 | * allocations are done using GFP_KERNEL with pcpu_lock released. In |
158 | * general, percpu memory can't be allocated with irq off but |
159 | * irqsave/restore are still used in alloc path so that it can be used |
160 | * from early init path - sched_init() specifically. |
161 | * |
162 | * Free path accesses and alters only the index data structures, so it |
163 | * can be safely called from atomic context. When memory needs to be |
164 | * returned to the system, free path schedules reclaim_work which |
165 | * grabs both pcpu_alloc_mutex and pcpu_lock, unlinks chunks to be |
166 | * reclaimed, release both locks and frees the chunks. Note that it's |
167 | * necessary to grab both locks to remove a chunk from circulation as |
168 | * allocation path might be referencing the chunk with only |
169 | * pcpu_alloc_mutex locked. |
170 | */ |
171 | static DEFINE_MUTEX(pcpu_alloc_mutex); /* protects whole alloc and reclaim */ |
172 | static DEFINE_SPINLOCK(pcpu_lock); /* protects index data structures */ |
173 | |
174 | static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */ |
175 | |
176 | /* reclaim work to release fully free chunks, scheduled from free path */ |
177 | static void pcpu_reclaim(struct work_struct *work); |
178 | static DECLARE_WORK(pcpu_reclaim_work, pcpu_reclaim); |
179 | |
180 | static int __pcpu_size_to_slot(int size) |
181 | { |
182 | int highbit = fls(size); /* size is in bytes */ |
183 | return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1); |
184 | } |
185 | |
186 | static int pcpu_size_to_slot(int size) |
187 | { |
188 | if (size == pcpu_unit_size) |
189 | return pcpu_nr_slots - 1; |
190 | return __pcpu_size_to_slot(size); |
191 | } |
192 | |
193 | static int pcpu_chunk_slot(const struct pcpu_chunk *chunk) |
194 | { |
195 | if (chunk->free_size < sizeof(int) || chunk->contig_hint < sizeof(int)) |
196 | return 0; |
197 | |
198 | return pcpu_size_to_slot(chunk->free_size); |
199 | } |
200 | |
201 | static int pcpu_page_idx(unsigned int cpu, int page_idx) |
202 | { |
203 | return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx; |
204 | } |
205 | |
206 | static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk, |
207 | unsigned int cpu, int page_idx) |
208 | { |
209 | return (unsigned long)chunk->base_addr + pcpu_unit_offsets[cpu] + |
210 | (page_idx << PAGE_SHIFT); |
211 | } |
212 | |
213 | static struct page *pcpu_chunk_page(struct pcpu_chunk *chunk, |
214 | unsigned int cpu, int page_idx) |
215 | { |
216 | /* must not be used on pre-mapped chunk */ |
217 | WARN_ON(chunk->immutable); |
218 | |
219 | return vmalloc_to_page((void *)pcpu_chunk_addr(chunk, cpu, page_idx)); |
220 | } |
221 | |
222 | /* set the pointer to a chunk in a page struct */ |
223 | static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu) |
224 | { |
225 | page->index = (unsigned long)pcpu; |
226 | } |
227 | |
228 | /* obtain pointer to a chunk from a page struct */ |
229 | static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page) |
230 | { |
231 | return (struct pcpu_chunk *)page->index; |
232 | } |
233 | |
234 | static void pcpu_next_unpop(struct pcpu_chunk *chunk, int *rs, int *re, int end) |
235 | { |
236 | *rs = find_next_zero_bit(chunk->populated, end, *rs); |
237 | *re = find_next_bit(chunk->populated, end, *rs + 1); |
238 | } |
239 | |
240 | static void pcpu_next_pop(struct pcpu_chunk *chunk, int *rs, int *re, int end) |
241 | { |
242 | *rs = find_next_bit(chunk->populated, end, *rs); |
243 | *re = find_next_zero_bit(chunk->populated, end, *rs + 1); |
244 | } |
245 | |
246 | /* |
247 | * (Un)populated page region iterators. Iterate over (un)populated |
248 | * page regions betwen @start and @end in @chunk. @rs and @re should |
249 | * be integer variables and will be set to start and end page index of |
250 | * the current region. |
251 | */ |
252 | #define pcpu_for_each_unpop_region(chunk, rs, re, start, end) \ |
253 | for ((rs) = (start), pcpu_next_unpop((chunk), &(rs), &(re), (end)); \ |
254 | (rs) < (re); \ |
255 | (rs) = (re) + 1, pcpu_next_unpop((chunk), &(rs), &(re), (end))) |
256 | |
257 | #define pcpu_for_each_pop_region(chunk, rs, re, start, end) \ |
258 | for ((rs) = (start), pcpu_next_pop((chunk), &(rs), &(re), (end)); \ |
259 | (rs) < (re); \ |
260 | (rs) = (re) + 1, pcpu_next_pop((chunk), &(rs), &(re), (end))) |
261 | |
262 | /** |
263 | * pcpu_mem_alloc - allocate memory |
264 | * @size: bytes to allocate |
265 | * |
266 | * Allocate @size bytes. If @size is smaller than PAGE_SIZE, |
267 | * kzalloc() is used; otherwise, vmalloc() is used. The returned |
268 | * memory is always zeroed. |
269 | * |
270 | * CONTEXT: |
271 | * Does GFP_KERNEL allocation. |
272 | * |
273 | * RETURNS: |
274 | * Pointer to the allocated area on success, NULL on failure. |
275 | */ |
276 | static void *pcpu_mem_alloc(size_t size) |
277 | { |
278 | if (size <= PAGE_SIZE) |
279 | return kzalloc(size, GFP_KERNEL); |
280 | else { |
281 | void *ptr = vmalloc(size); |
282 | if (ptr) |
283 | memset(ptr, 0, size); |
284 | return ptr; |
285 | } |
286 | } |
287 | |
288 | /** |
289 | * pcpu_mem_free - free memory |
290 | * @ptr: memory to free |
291 | * @size: size of the area |
292 | * |
293 | * Free @ptr. @ptr should have been allocated using pcpu_mem_alloc(). |
294 | */ |
295 | static void pcpu_mem_free(void *ptr, size_t size) |
296 | { |
297 | if (size <= PAGE_SIZE) |
298 | kfree(ptr); |
299 | else |
300 | vfree(ptr); |
301 | } |
302 | |
303 | /** |
304 | * pcpu_chunk_relocate - put chunk in the appropriate chunk slot |
305 | * @chunk: chunk of interest |
306 | * @oslot: the previous slot it was on |
307 | * |
308 | * This function is called after an allocation or free changed @chunk. |
309 | * New slot according to the changed state is determined and @chunk is |
310 | * moved to the slot. Note that the reserved chunk is never put on |
311 | * chunk slots. |
312 | * |
313 | * CONTEXT: |
314 | * pcpu_lock. |
315 | */ |
316 | static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot) |
317 | { |
318 | int nslot = pcpu_chunk_slot(chunk); |
319 | |
320 | if (chunk != pcpu_reserved_chunk && oslot != nslot) { |
321 | if (oslot < nslot) |
322 | list_move(&chunk->list, &pcpu_slot[nslot]); |
323 | else |
324 | list_move_tail(&chunk->list, &pcpu_slot[nslot]); |
325 | } |
326 | } |
327 | |
328 | /** |
329 | * pcpu_chunk_addr_search - determine chunk containing specified address |
330 | * @addr: address for which the chunk needs to be determined. |
331 | * |
332 | * RETURNS: |
333 | * The address of the found chunk. |
334 | */ |
335 | static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr) |
336 | { |
337 | void *first_start = pcpu_first_chunk->base_addr; |
338 | |
339 | /* is it in the first chunk? */ |
340 | if (addr >= first_start && addr < first_start + pcpu_unit_size) { |
341 | /* is it in the reserved area? */ |
342 | if (addr < first_start + pcpu_reserved_chunk_limit) |
343 | return pcpu_reserved_chunk; |
344 | return pcpu_first_chunk; |
345 | } |
346 | |
347 | /* |
348 | * The address is relative to unit0 which might be unused and |
349 | * thus unmapped. Offset the address to the unit space of the |
350 | * current processor before looking it up in the vmalloc |
351 | * space. Note that any possible cpu id can be used here, so |
352 | * there's no need to worry about preemption or cpu hotplug. |
353 | */ |
354 | addr += pcpu_unit_offsets[raw_smp_processor_id()]; |
355 | return pcpu_get_page_chunk(vmalloc_to_page(addr)); |
356 | } |
357 | |
358 | /** |
359 | * pcpu_need_to_extend - determine whether chunk area map needs to be extended |
360 | * @chunk: chunk of interest |
361 | * |
362 | * Determine whether area map of @chunk needs to be extended to |
363 | * accomodate a new allocation. |
364 | * |
365 | * CONTEXT: |
366 | * pcpu_lock. |
367 | * |
368 | * RETURNS: |
369 | * New target map allocation length if extension is necessary, 0 |
370 | * otherwise. |
371 | */ |
372 | static int pcpu_need_to_extend(struct pcpu_chunk *chunk) |
373 | { |
374 | int new_alloc; |
375 | |
376 | if (chunk->map_alloc >= chunk->map_used + 2) |
377 | return 0; |
378 | |
379 | new_alloc = PCPU_DFL_MAP_ALLOC; |
380 | while (new_alloc < chunk->map_used + 2) |
381 | new_alloc *= 2; |
382 | |
383 | return new_alloc; |
384 | } |
385 | |
386 | /** |
387 | * pcpu_extend_area_map - extend area map of a chunk |
388 | * @chunk: chunk of interest |
389 | * @new_alloc: new target allocation length of the area map |
390 | * |
391 | * Extend area map of @chunk to have @new_alloc entries. |
392 | * |
393 | * CONTEXT: |
394 | * Does GFP_KERNEL allocation. Grabs and releases pcpu_lock. |
395 | * |
396 | * RETURNS: |
397 | * 0 on success, -errno on failure. |
398 | */ |
399 | static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc) |
400 | { |
401 | int *old = NULL, *new = NULL; |
402 | size_t old_size = 0, new_size = new_alloc * sizeof(new[0]); |
403 | unsigned long flags; |
404 | |
405 | new = pcpu_mem_alloc(new_size); |
406 | if (!new) |
407 | return -ENOMEM; |
408 | |
409 | /* acquire pcpu_lock and switch to new area map */ |
410 | spin_lock_irqsave(&pcpu_lock, flags); |
411 | |
412 | if (new_alloc <= chunk->map_alloc) |
413 | goto out_unlock; |
414 | |
415 | old_size = chunk->map_alloc * sizeof(chunk->map[0]); |
416 | memcpy(new, chunk->map, old_size); |
417 | |
418 | /* |
419 | * map_alloc < PCPU_DFL_MAP_ALLOC indicates that the chunk is |
420 | * one of the first chunks and still using static map. |
421 | */ |
422 | if (chunk->map_alloc >= PCPU_DFL_MAP_ALLOC) |
423 | old = chunk->map; |
424 | |
425 | chunk->map_alloc = new_alloc; |
426 | chunk->map = new; |
427 | new = NULL; |
428 | |
429 | out_unlock: |
430 | spin_unlock_irqrestore(&pcpu_lock, flags); |
431 | |
432 | /* |
433 | * pcpu_mem_free() might end up calling vfree() which uses |
434 | * IRQ-unsafe lock and thus can't be called under pcpu_lock. |
435 | */ |
436 | pcpu_mem_free(old, old_size); |
437 | pcpu_mem_free(new, new_size); |
438 | |
439 | return 0; |
440 | } |
441 | |
442 | /** |
443 | * pcpu_split_block - split a map block |
444 | * @chunk: chunk of interest |
445 | * @i: index of map block to split |
446 | * @head: head size in bytes (can be 0) |
447 | * @tail: tail size in bytes (can be 0) |
448 | * |
449 | * Split the @i'th map block into two or three blocks. If @head is |
450 | * non-zero, @head bytes block is inserted before block @i moving it |
451 | * to @i+1 and reducing its size by @head bytes. |
452 | * |
453 | * If @tail is non-zero, the target block, which can be @i or @i+1 |
454 | * depending on @head, is reduced by @tail bytes and @tail byte block |
455 | * is inserted after the target block. |
456 | * |
457 | * @chunk->map must have enough free slots to accomodate the split. |
458 | * |
459 | * CONTEXT: |
460 | * pcpu_lock. |
461 | */ |
462 | static void pcpu_split_block(struct pcpu_chunk *chunk, int i, |
463 | int head, int tail) |
464 | { |
465 | int nr_extra = !!head + !!tail; |
466 | |
467 | BUG_ON(chunk->map_alloc < chunk->map_used + nr_extra); |
468 | |
469 | /* insert new subblocks */ |
470 | memmove(&chunk->map[i + nr_extra], &chunk->map[i], |
471 | sizeof(chunk->map[0]) * (chunk->map_used - i)); |
472 | chunk->map_used += nr_extra; |
473 | |
474 | if (head) { |
475 | chunk->map[i + 1] = chunk->map[i] - head; |
476 | chunk->map[i++] = head; |
477 | } |
478 | if (tail) { |
479 | chunk->map[i++] -= tail; |
480 | chunk->map[i] = tail; |
481 | } |
482 | } |
483 | |
484 | /** |
485 | * pcpu_alloc_area - allocate area from a pcpu_chunk |
486 | * @chunk: chunk of interest |
487 | * @size: wanted size in bytes |
488 | * @align: wanted align |
489 | * |
490 | * Try to allocate @size bytes area aligned at @align from @chunk. |
491 | * Note that this function only allocates the offset. It doesn't |
492 | * populate or map the area. |
493 | * |
494 | * @chunk->map must have at least two free slots. |
495 | * |
496 | * CONTEXT: |
497 | * pcpu_lock. |
498 | * |
499 | * RETURNS: |
500 | * Allocated offset in @chunk on success, -1 if no matching area is |
501 | * found. |
502 | */ |
503 | static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align) |
504 | { |
505 | int oslot = pcpu_chunk_slot(chunk); |
506 | int max_contig = 0; |
507 | int i, off; |
508 | |
509 | for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++])) { |
510 | bool is_last = i + 1 == chunk->map_used; |
511 | int head, tail; |
512 | |
513 | /* extra for alignment requirement */ |
514 | head = ALIGN(off, align) - off; |
515 | BUG_ON(i == 0 && head != 0); |
516 | |
517 | if (chunk->map[i] < 0) |
518 | continue; |
519 | if (chunk->map[i] < head + size) { |
520 | max_contig = max(chunk->map[i], max_contig); |
521 | continue; |
522 | } |
523 | |
524 | /* |
525 | * If head is small or the previous block is free, |
526 | * merge'em. Note that 'small' is defined as smaller |
527 | * than sizeof(int), which is very small but isn't too |
528 | * uncommon for percpu allocations. |
529 | */ |
530 | if (head && (head < sizeof(int) || chunk->map[i - 1] > 0)) { |
531 | if (chunk->map[i - 1] > 0) |
532 | chunk->map[i - 1] += head; |
533 | else { |
534 | chunk->map[i - 1] -= head; |
535 | chunk->free_size -= head; |
536 | } |
537 | chunk->map[i] -= head; |
538 | off += head; |
539 | head = 0; |
540 | } |
541 | |
542 | /* if tail is small, just keep it around */ |
543 | tail = chunk->map[i] - head - size; |
544 | if (tail < sizeof(int)) |
545 | tail = 0; |
546 | |
547 | /* split if warranted */ |
548 | if (head || tail) { |
549 | pcpu_split_block(chunk, i, head, tail); |
550 | if (head) { |
551 | i++; |
552 | off += head; |
553 | max_contig = max(chunk->map[i - 1], max_contig); |
554 | } |
555 | if (tail) |
556 | max_contig = max(chunk->map[i + 1], max_contig); |
557 | } |
558 | |
559 | /* update hint and mark allocated */ |
560 | if (is_last) |
561 | chunk->contig_hint = max_contig; /* fully scanned */ |
562 | else |
563 | chunk->contig_hint = max(chunk->contig_hint, |
564 | max_contig); |
565 | |
566 | chunk->free_size -= chunk->map[i]; |
567 | chunk->map[i] = -chunk->map[i]; |
568 | |
569 | pcpu_chunk_relocate(chunk, oslot); |
570 | return off; |
571 | } |
572 | |
573 | chunk->contig_hint = max_contig; /* fully scanned */ |
574 | pcpu_chunk_relocate(chunk, oslot); |
575 | |
576 | /* tell the upper layer that this chunk has no matching area */ |
577 | return -1; |
578 | } |
579 | |
580 | /** |
581 | * pcpu_free_area - free area to a pcpu_chunk |
582 | * @chunk: chunk of interest |
583 | * @freeme: offset of area to free |
584 | * |
585 | * Free area starting from @freeme to @chunk. Note that this function |
586 | * only modifies the allocation map. It doesn't depopulate or unmap |
587 | * the area. |
588 | * |
589 | * CONTEXT: |
590 | * pcpu_lock. |
591 | */ |
592 | static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme) |
593 | { |
594 | int oslot = pcpu_chunk_slot(chunk); |
595 | int i, off; |
596 | |
597 | for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++])) |
598 | if (off == freeme) |
599 | break; |
600 | BUG_ON(off != freeme); |
601 | BUG_ON(chunk->map[i] > 0); |
602 | |
603 | chunk->map[i] = -chunk->map[i]; |
604 | chunk->free_size += chunk->map[i]; |
605 | |
606 | /* merge with previous? */ |
607 | if (i > 0 && chunk->map[i - 1] >= 0) { |
608 | chunk->map[i - 1] += chunk->map[i]; |
609 | chunk->map_used--; |
610 | memmove(&chunk->map[i], &chunk->map[i + 1], |
611 | (chunk->map_used - i) * sizeof(chunk->map[0])); |
612 | i--; |
613 | } |
614 | /* merge with next? */ |
615 | if (i + 1 < chunk->map_used && chunk->map[i + 1] >= 0) { |
616 | chunk->map[i] += chunk->map[i + 1]; |
617 | chunk->map_used--; |
618 | memmove(&chunk->map[i + 1], &chunk->map[i + 2], |
619 | (chunk->map_used - (i + 1)) * sizeof(chunk->map[0])); |
620 | } |
621 | |
622 | chunk->contig_hint = max(chunk->map[i], chunk->contig_hint); |
623 | pcpu_chunk_relocate(chunk, oslot); |
624 | } |
625 | |
626 | /** |
627 | * pcpu_get_pages_and_bitmap - get temp pages array and bitmap |
628 | * @chunk: chunk of interest |
629 | * @bitmapp: output parameter for bitmap |
630 | * @may_alloc: may allocate the array |
631 | * |
632 | * Returns pointer to array of pointers to struct page and bitmap, |
633 | * both of which can be indexed with pcpu_page_idx(). The returned |
634 | * array is cleared to zero and *@bitmapp is copied from |
635 | * @chunk->populated. Note that there is only one array and bitmap |
636 | * and access exclusion is the caller's responsibility. |
637 | * |
638 | * CONTEXT: |
639 | * pcpu_alloc_mutex and does GFP_KERNEL allocation if @may_alloc. |
640 | * Otherwise, don't care. |
641 | * |
642 | * RETURNS: |
643 | * Pointer to temp pages array on success, NULL on failure. |
644 | */ |
645 | static struct page **pcpu_get_pages_and_bitmap(struct pcpu_chunk *chunk, |
646 | unsigned long **bitmapp, |
647 | bool may_alloc) |
648 | { |
649 | static struct page **pages; |
650 | static unsigned long *bitmap; |
651 | size_t pages_size = pcpu_nr_units * pcpu_unit_pages * sizeof(pages[0]); |
652 | size_t bitmap_size = BITS_TO_LONGS(pcpu_unit_pages) * |
653 | sizeof(unsigned long); |
654 | |
655 | if (!pages || !bitmap) { |
656 | if (may_alloc && !pages) |
657 | pages = pcpu_mem_alloc(pages_size); |
658 | if (may_alloc && !bitmap) |
659 | bitmap = pcpu_mem_alloc(bitmap_size); |
660 | if (!pages || !bitmap) |
661 | return NULL; |
662 | } |
663 | |
664 | memset(pages, 0, pages_size); |
665 | bitmap_copy(bitmap, chunk->populated, pcpu_unit_pages); |
666 | |
667 | *bitmapp = bitmap; |
668 | return pages; |
669 | } |
670 | |
671 | /** |
672 | * pcpu_free_pages - free pages which were allocated for @chunk |
673 | * @chunk: chunk pages were allocated for |
674 | * @pages: array of pages to be freed, indexed by pcpu_page_idx() |
675 | * @populated: populated bitmap |
676 | * @page_start: page index of the first page to be freed |
677 | * @page_end: page index of the last page to be freed + 1 |
678 | * |
679 | * Free pages [@page_start and @page_end) in @pages for all units. |
680 | * The pages were allocated for @chunk. |
681 | */ |
682 | static void pcpu_free_pages(struct pcpu_chunk *chunk, |
683 | struct page **pages, unsigned long *populated, |
684 | int page_start, int page_end) |
685 | { |
686 | unsigned int cpu; |
687 | int i; |
688 | |
689 | for_each_possible_cpu(cpu) { |
690 | for (i = page_start; i < page_end; i++) { |
691 | struct page *page = pages[pcpu_page_idx(cpu, i)]; |
692 | |
693 | if (page) |
694 | __free_page(page); |
695 | } |
696 | } |
697 | } |
698 | |
699 | /** |
700 | * pcpu_alloc_pages - allocates pages for @chunk |
701 | * @chunk: target chunk |
702 | * @pages: array to put the allocated pages into, indexed by pcpu_page_idx() |
703 | * @populated: populated bitmap |
704 | * @page_start: page index of the first page to be allocated |
705 | * @page_end: page index of the last page to be allocated + 1 |
706 | * |
707 | * Allocate pages [@page_start,@page_end) into @pages for all units. |
708 | * The allocation is for @chunk. Percpu core doesn't care about the |
709 | * content of @pages and will pass it verbatim to pcpu_map_pages(). |
710 | */ |
711 | static int pcpu_alloc_pages(struct pcpu_chunk *chunk, |
712 | struct page **pages, unsigned long *populated, |
713 | int page_start, int page_end) |
714 | { |
715 | const gfp_t gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_COLD; |
716 | unsigned int cpu; |
717 | int i; |
718 | |
719 | for_each_possible_cpu(cpu) { |
720 | for (i = page_start; i < page_end; i++) { |
721 | struct page **pagep = &pages[pcpu_page_idx(cpu, i)]; |
722 | |
723 | *pagep = alloc_pages_node(cpu_to_node(cpu), gfp, 0); |
724 | if (!*pagep) { |
725 | pcpu_free_pages(chunk, pages, populated, |
726 | page_start, page_end); |
727 | return -ENOMEM; |
728 | } |
729 | } |
730 | } |
731 | return 0; |
732 | } |
733 | |
734 | /** |
735 | * pcpu_pre_unmap_flush - flush cache prior to unmapping |
736 | * @chunk: chunk the regions to be flushed belongs to |
737 | * @page_start: page index of the first page to be flushed |
738 | * @page_end: page index of the last page to be flushed + 1 |
739 | * |
740 | * Pages in [@page_start,@page_end) of @chunk are about to be |
741 | * unmapped. Flush cache. As each flushing trial can be very |
742 | * expensive, issue flush on the whole region at once rather than |
743 | * doing it for each cpu. This could be an overkill but is more |
744 | * scalable. |
745 | */ |
746 | static void pcpu_pre_unmap_flush(struct pcpu_chunk *chunk, |
747 | int page_start, int page_end) |
748 | { |
749 | flush_cache_vunmap( |
750 | pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start), |
751 | pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end)); |
752 | } |
753 | |
754 | static void __pcpu_unmap_pages(unsigned long addr, int nr_pages) |
755 | { |
756 | unmap_kernel_range_noflush(addr, nr_pages << PAGE_SHIFT); |
757 | } |
758 | |
759 | /** |
760 | * pcpu_unmap_pages - unmap pages out of a pcpu_chunk |
761 | * @chunk: chunk of interest |
762 | * @pages: pages array which can be used to pass information to free |
763 | * @populated: populated bitmap |
764 | * @page_start: page index of the first page to unmap |
765 | * @page_end: page index of the last page to unmap + 1 |
766 | * |
767 | * For each cpu, unmap pages [@page_start,@page_end) out of @chunk. |
768 | * Corresponding elements in @pages were cleared by the caller and can |
769 | * be used to carry information to pcpu_free_pages() which will be |
770 | * called after all unmaps are finished. The caller should call |
771 | * proper pre/post flush functions. |
772 | */ |
773 | static void pcpu_unmap_pages(struct pcpu_chunk *chunk, |
774 | struct page **pages, unsigned long *populated, |
775 | int page_start, int page_end) |
776 | { |
777 | unsigned int cpu; |
778 | int i; |
779 | |
780 | for_each_possible_cpu(cpu) { |
781 | for (i = page_start; i < page_end; i++) { |
782 | struct page *page; |
783 | |
784 | page = pcpu_chunk_page(chunk, cpu, i); |
785 | WARN_ON(!page); |
786 | pages[pcpu_page_idx(cpu, i)] = page; |
787 | } |
788 | __pcpu_unmap_pages(pcpu_chunk_addr(chunk, cpu, page_start), |
789 | page_end - page_start); |
790 | } |
791 | |
792 | for (i = page_start; i < page_end; i++) |
793 | __clear_bit(i, populated); |
794 | } |
795 | |
796 | /** |
797 | * pcpu_post_unmap_tlb_flush - flush TLB after unmapping |
798 | * @chunk: pcpu_chunk the regions to be flushed belong to |
799 | * @page_start: page index of the first page to be flushed |
800 | * @page_end: page index of the last page to be flushed + 1 |
801 | * |
802 | * Pages [@page_start,@page_end) of @chunk have been unmapped. Flush |
803 | * TLB for the regions. This can be skipped if the area is to be |
804 | * returned to vmalloc as vmalloc will handle TLB flushing lazily. |
805 | * |
806 | * As with pcpu_pre_unmap_flush(), TLB flushing also is done at once |
807 | * for the whole region. |
808 | */ |
809 | static void pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk, |
810 | int page_start, int page_end) |
811 | { |
812 | flush_tlb_kernel_range( |
813 | pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start), |
814 | pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end)); |
815 | } |
816 | |
817 | static int __pcpu_map_pages(unsigned long addr, struct page **pages, |
818 | int nr_pages) |
819 | { |
820 | return map_kernel_range_noflush(addr, nr_pages << PAGE_SHIFT, |
821 | PAGE_KERNEL, pages); |
822 | } |
823 | |
824 | /** |
825 | * pcpu_map_pages - map pages into a pcpu_chunk |
826 | * @chunk: chunk of interest |
827 | * @pages: pages array containing pages to be mapped |
828 | * @populated: populated bitmap |
829 | * @page_start: page index of the first page to map |
830 | * @page_end: page index of the last page to map + 1 |
831 | * |
832 | * For each cpu, map pages [@page_start,@page_end) into @chunk. The |
833 | * caller is responsible for calling pcpu_post_map_flush() after all |
834 | * mappings are complete. |
835 | * |
836 | * This function is responsible for setting corresponding bits in |
837 | * @chunk->populated bitmap and whatever is necessary for reverse |
838 | * lookup (addr -> chunk). |
839 | */ |
840 | static int pcpu_map_pages(struct pcpu_chunk *chunk, |
841 | struct page **pages, unsigned long *populated, |
842 | int page_start, int page_end) |
843 | { |
844 | unsigned int cpu, tcpu; |
845 | int i, err; |
846 | |
847 | for_each_possible_cpu(cpu) { |
848 | err = __pcpu_map_pages(pcpu_chunk_addr(chunk, cpu, page_start), |
849 | &pages[pcpu_page_idx(cpu, page_start)], |
850 | page_end - page_start); |
851 | if (err < 0) |
852 | goto err; |
853 | } |
854 | |
855 | /* mapping successful, link chunk and mark populated */ |
856 | for (i = page_start; i < page_end; i++) { |
857 | for_each_possible_cpu(cpu) |
858 | pcpu_set_page_chunk(pages[pcpu_page_idx(cpu, i)], |
859 | chunk); |
860 | __set_bit(i, populated); |
861 | } |
862 | |
863 | return 0; |
864 | |
865 | err: |
866 | for_each_possible_cpu(tcpu) { |
867 | if (tcpu == cpu) |
868 | break; |
869 | __pcpu_unmap_pages(pcpu_chunk_addr(chunk, tcpu, page_start), |
870 | page_end - page_start); |
871 | } |
872 | return err; |
873 | } |
874 | |
875 | /** |
876 | * pcpu_post_map_flush - flush cache after mapping |
877 | * @chunk: pcpu_chunk the regions to be flushed belong to |
878 | * @page_start: page index of the first page to be flushed |
879 | * @page_end: page index of the last page to be flushed + 1 |
880 | * |
881 | * Pages [@page_start,@page_end) of @chunk have been mapped. Flush |
882 | * cache. |
883 | * |
884 | * As with pcpu_pre_unmap_flush(), TLB flushing also is done at once |
885 | * for the whole region. |
886 | */ |
887 | static void pcpu_post_map_flush(struct pcpu_chunk *chunk, |
888 | int page_start, int page_end) |
889 | { |
890 | flush_cache_vmap( |
891 | pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start), |
892 | pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end)); |
893 | } |
894 | |
895 | /** |
896 | * pcpu_depopulate_chunk - depopulate and unmap an area of a pcpu_chunk |
897 | * @chunk: chunk to depopulate |
898 | * @off: offset to the area to depopulate |
899 | * @size: size of the area to depopulate in bytes |
900 | * @flush: whether to flush cache and tlb or not |
901 | * |
902 | * For each cpu, depopulate and unmap pages [@page_start,@page_end) |
903 | * from @chunk. If @flush is true, vcache is flushed before unmapping |
904 | * and tlb after. |
905 | * |
906 | * CONTEXT: |
907 | * pcpu_alloc_mutex. |
908 | */ |
909 | static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size) |
910 | { |
911 | int page_start = PFN_DOWN(off); |
912 | int page_end = PFN_UP(off + size); |
913 | struct page **pages; |
914 | unsigned long *populated; |
915 | int rs, re; |
916 | |
917 | /* quick path, check whether it's empty already */ |
918 | rs = page_start; |
919 | pcpu_next_unpop(chunk, &rs, &re, page_end); |
920 | if (rs == page_start && re == page_end) |
921 | return; |
922 | |
923 | /* immutable chunks can't be depopulated */ |
924 | WARN_ON(chunk->immutable); |
925 | |
926 | /* |
927 | * If control reaches here, there must have been at least one |
928 | * successful population attempt so the temp pages array must |
929 | * be available now. |
930 | */ |
931 | pages = pcpu_get_pages_and_bitmap(chunk, &populated, false); |
932 | BUG_ON(!pages); |
933 | |
934 | /* unmap and free */ |
935 | pcpu_pre_unmap_flush(chunk, page_start, page_end); |
936 | |
937 | pcpu_for_each_pop_region(chunk, rs, re, page_start, page_end) |
938 | pcpu_unmap_pages(chunk, pages, populated, rs, re); |
939 | |
940 | /* no need to flush tlb, vmalloc will handle it lazily */ |
941 | |
942 | pcpu_for_each_pop_region(chunk, rs, re, page_start, page_end) |
943 | pcpu_free_pages(chunk, pages, populated, rs, re); |
944 | |
945 | /* commit new bitmap */ |
946 | bitmap_copy(chunk->populated, populated, pcpu_unit_pages); |
947 | } |
948 | |
949 | /** |
950 | * pcpu_populate_chunk - populate and map an area of a pcpu_chunk |
951 | * @chunk: chunk of interest |
952 | * @off: offset to the area to populate |
953 | * @size: size of the area to populate in bytes |
954 | * |
955 | * For each cpu, populate and map pages [@page_start,@page_end) into |
956 | * @chunk. The area is cleared on return. |
957 | * |
958 | * CONTEXT: |
959 | * pcpu_alloc_mutex, does GFP_KERNEL allocation. |
960 | */ |
961 | static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size) |
962 | { |
963 | int page_start = PFN_DOWN(off); |
964 | int page_end = PFN_UP(off + size); |
965 | int free_end = page_start, unmap_end = page_start; |
966 | struct page **pages; |
967 | unsigned long *populated; |
968 | unsigned int cpu; |
969 | int rs, re, rc; |
970 | |
971 | /* quick path, check whether all pages are already there */ |
972 | rs = page_start; |
973 | pcpu_next_pop(chunk, &rs, &re, page_end); |
974 | if (rs == page_start && re == page_end) |
975 | goto clear; |
976 | |
977 | /* need to allocate and map pages, this chunk can't be immutable */ |
978 | WARN_ON(chunk->immutable); |
979 | |
980 | pages = pcpu_get_pages_and_bitmap(chunk, &populated, true); |
981 | if (!pages) |
982 | return -ENOMEM; |
983 | |
984 | /* alloc and map */ |
985 | pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) { |
986 | rc = pcpu_alloc_pages(chunk, pages, populated, rs, re); |
987 | if (rc) |
988 | goto err_free; |
989 | free_end = re; |
990 | } |
991 | |
992 | pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) { |
993 | rc = pcpu_map_pages(chunk, pages, populated, rs, re); |
994 | if (rc) |
995 | goto err_unmap; |
996 | unmap_end = re; |
997 | } |
998 | pcpu_post_map_flush(chunk, page_start, page_end); |
999 | |
1000 | /* commit new bitmap */ |
1001 | bitmap_copy(chunk->populated, populated, pcpu_unit_pages); |
1002 | clear: |
1003 | for_each_possible_cpu(cpu) |
1004 | memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size); |
1005 | return 0; |
1006 | |
1007 | err_unmap: |
1008 | pcpu_pre_unmap_flush(chunk, page_start, unmap_end); |
1009 | pcpu_for_each_unpop_region(chunk, rs, re, page_start, unmap_end) |
1010 | pcpu_unmap_pages(chunk, pages, populated, rs, re); |
1011 | pcpu_post_unmap_tlb_flush(chunk, page_start, unmap_end); |
1012 | err_free: |
1013 | pcpu_for_each_unpop_region(chunk, rs, re, page_start, free_end) |
1014 | pcpu_free_pages(chunk, pages, populated, rs, re); |
1015 | return rc; |
1016 | } |
1017 | |
1018 | static void free_pcpu_chunk(struct pcpu_chunk *chunk) |
1019 | { |
1020 | if (!chunk) |
1021 | return; |
1022 | if (chunk->vms) |
1023 | pcpu_free_vm_areas(chunk->vms, pcpu_nr_groups); |
1024 | pcpu_mem_free(chunk->map, chunk->map_alloc * sizeof(chunk->map[0])); |
1025 | kfree(chunk); |
1026 | } |
1027 | |
1028 | static struct pcpu_chunk *alloc_pcpu_chunk(void) |
1029 | { |
1030 | struct pcpu_chunk *chunk; |
1031 | |
1032 | chunk = kzalloc(pcpu_chunk_struct_size, GFP_KERNEL); |
1033 | if (!chunk) |
1034 | return NULL; |
1035 | |
1036 | chunk->map = pcpu_mem_alloc(PCPU_DFL_MAP_ALLOC * sizeof(chunk->map[0])); |
1037 | chunk->map_alloc = PCPU_DFL_MAP_ALLOC; |
1038 | chunk->map[chunk->map_used++] = pcpu_unit_size; |
1039 | |
1040 | chunk->vms = pcpu_get_vm_areas(pcpu_group_offsets, pcpu_group_sizes, |
1041 | pcpu_nr_groups, pcpu_atom_size, |
1042 | GFP_KERNEL); |
1043 | if (!chunk->vms) { |
1044 | free_pcpu_chunk(chunk); |
1045 | return NULL; |
1046 | } |
1047 | |
1048 | INIT_LIST_HEAD(&chunk->list); |
1049 | chunk->free_size = pcpu_unit_size; |
1050 | chunk->contig_hint = pcpu_unit_size; |
1051 | chunk->base_addr = chunk->vms[0]->addr - pcpu_group_offsets[0]; |
1052 | |
1053 | return chunk; |
1054 | } |
1055 | |
1056 | /** |
1057 | * pcpu_alloc - the percpu allocator |
1058 | * @size: size of area to allocate in bytes |
1059 | * @align: alignment of area (max PAGE_SIZE) |
1060 | * @reserved: allocate from the reserved chunk if available |
1061 | * |
1062 | * Allocate percpu area of @size bytes aligned at @align. |
1063 | * |
1064 | * CONTEXT: |
1065 | * Does GFP_KERNEL allocation. |
1066 | * |
1067 | * RETURNS: |
1068 | * Percpu pointer to the allocated area on success, NULL on failure. |
1069 | */ |
1070 | static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved) |
1071 | { |
1072 | static int warn_limit = 10; |
1073 | struct pcpu_chunk *chunk; |
1074 | const char *err; |
1075 | int slot, off, new_alloc; |
1076 | unsigned long flags; |
1077 | |
1078 | if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) { |
1079 | WARN(true, "illegal size (%zu) or align (%zu) for " |
1080 | "percpu allocation\n", size, align); |
1081 | return NULL; |
1082 | } |
1083 | |
1084 | mutex_lock(&pcpu_alloc_mutex); |
1085 | spin_lock_irqsave(&pcpu_lock, flags); |
1086 | |
1087 | /* serve reserved allocations from the reserved chunk if available */ |
1088 | if (reserved && pcpu_reserved_chunk) { |
1089 | chunk = pcpu_reserved_chunk; |
1090 | |
1091 | if (size > chunk->contig_hint) { |
1092 | err = "alloc from reserved chunk failed"; |
1093 | goto fail_unlock; |
1094 | } |
1095 | |
1096 | while ((new_alloc = pcpu_need_to_extend(chunk))) { |
1097 | spin_unlock_irqrestore(&pcpu_lock, flags); |
1098 | if (pcpu_extend_area_map(chunk, new_alloc) < 0) { |
1099 | err = "failed to extend area map of reserved chunk"; |
1100 | goto fail_unlock_mutex; |
1101 | } |
1102 | spin_lock_irqsave(&pcpu_lock, flags); |
1103 | } |
1104 | |
1105 | off = pcpu_alloc_area(chunk, size, align); |
1106 | if (off >= 0) |
1107 | goto area_found; |
1108 | |
1109 | err = "alloc from reserved chunk failed"; |
1110 | goto fail_unlock; |
1111 | } |
1112 | |
1113 | restart: |
1114 | /* search through normal chunks */ |
1115 | for (slot = pcpu_size_to_slot(size); slot < pcpu_nr_slots; slot++) { |
1116 | list_for_each_entry(chunk, &pcpu_slot[slot], list) { |
1117 | if (size > chunk->contig_hint) |
1118 | continue; |
1119 | |
1120 | new_alloc = pcpu_need_to_extend(chunk); |
1121 | if (new_alloc) { |
1122 | spin_unlock_irqrestore(&pcpu_lock, flags); |
1123 | if (pcpu_extend_area_map(chunk, |
1124 | new_alloc) < 0) { |
1125 | err = "failed to extend area map"; |
1126 | goto fail_unlock_mutex; |
1127 | } |
1128 | spin_lock_irqsave(&pcpu_lock, flags); |
1129 | /* |
1130 | * pcpu_lock has been dropped, need to |
1131 | * restart cpu_slot list walking. |
1132 | */ |
1133 | goto restart; |
1134 | } |
1135 | |
1136 | off = pcpu_alloc_area(chunk, size, align); |
1137 | if (off >= 0) |
1138 | goto area_found; |
1139 | } |
1140 | } |
1141 | |
1142 | /* hmmm... no space left, create a new chunk */ |
1143 | spin_unlock_irqrestore(&pcpu_lock, flags); |
1144 | |
1145 | chunk = alloc_pcpu_chunk(); |
1146 | if (!chunk) { |
1147 | err = "failed to allocate new chunk"; |
1148 | goto fail_unlock_mutex; |
1149 | } |
1150 | |
1151 | spin_lock_irqsave(&pcpu_lock, flags); |
1152 | pcpu_chunk_relocate(chunk, -1); |
1153 | goto restart; |
1154 | |
1155 | area_found: |
1156 | spin_unlock_irqrestore(&pcpu_lock, flags); |
1157 | |
1158 | /* populate, map and clear the area */ |
1159 | if (pcpu_populate_chunk(chunk, off, size)) { |
1160 | spin_lock_irqsave(&pcpu_lock, flags); |
1161 | pcpu_free_area(chunk, off); |
1162 | err = "failed to populate"; |
1163 | goto fail_unlock; |
1164 | } |
1165 | |
1166 | mutex_unlock(&pcpu_alloc_mutex); |
1167 | |
1168 | /* return address relative to base address */ |
1169 | return __addr_to_pcpu_ptr(chunk->base_addr + off); |
1170 | |
1171 | fail_unlock: |
1172 | spin_unlock_irqrestore(&pcpu_lock, flags); |
1173 | fail_unlock_mutex: |
1174 | mutex_unlock(&pcpu_alloc_mutex); |
1175 | if (warn_limit) { |
1176 | pr_warning("PERCPU: allocation failed, size=%zu align=%zu, " |
1177 | "%s\n", size, align, err); |
1178 | dump_stack(); |
1179 | if (!--warn_limit) |
1180 | pr_info("PERCPU: limit reached, disable warning\n"); |
1181 | } |
1182 | return NULL; |
1183 | } |
1184 | |
1185 | /** |
1186 | * __alloc_percpu - allocate dynamic percpu area |
1187 | * @size: size of area to allocate in bytes |
1188 | * @align: alignment of area (max PAGE_SIZE) |
1189 | * |
1190 | * Allocate percpu area of @size bytes aligned at @align. Might |
1191 | * sleep. Might trigger writeouts. |
1192 | * |
1193 | * CONTEXT: |
1194 | * Does GFP_KERNEL allocation. |
1195 | * |
1196 | * RETURNS: |
1197 | * Percpu pointer to the allocated area on success, NULL on failure. |
1198 | */ |
1199 | void __percpu *__alloc_percpu(size_t size, size_t align) |
1200 | { |
1201 | return pcpu_alloc(size, align, false); |
1202 | } |
1203 | EXPORT_SYMBOL_GPL(__alloc_percpu); |
1204 | |
1205 | /** |
1206 | * __alloc_reserved_percpu - allocate reserved percpu area |
1207 | * @size: size of area to allocate in bytes |
1208 | * @align: alignment of area (max PAGE_SIZE) |
1209 | * |
1210 | * Allocate percpu area of @size bytes aligned at @align from reserved |
1211 | * percpu area if arch has set it up; otherwise, allocation is served |
1212 | * from the same dynamic area. Might sleep. Might trigger writeouts. |
1213 | * |
1214 | * CONTEXT: |
1215 | * Does GFP_KERNEL allocation. |
1216 | * |
1217 | * RETURNS: |
1218 | * Percpu pointer to the allocated area on success, NULL on failure. |
1219 | */ |
1220 | void __percpu *__alloc_reserved_percpu(size_t size, size_t align) |
1221 | { |
1222 | return pcpu_alloc(size, align, true); |
1223 | } |
1224 | |
1225 | /** |
1226 | * pcpu_reclaim - reclaim fully free chunks, workqueue function |
1227 | * @work: unused |
1228 | * |
1229 | * Reclaim all fully free chunks except for the first one. |
1230 | * |
1231 | * CONTEXT: |
1232 | * workqueue context. |
1233 | */ |
1234 | static void pcpu_reclaim(struct work_struct *work) |
1235 | { |
1236 | LIST_HEAD(todo); |
1237 | struct list_head *head = &pcpu_slot[pcpu_nr_slots - 1]; |
1238 | struct pcpu_chunk *chunk, *next; |
1239 | |
1240 | mutex_lock(&pcpu_alloc_mutex); |
1241 | spin_lock_irq(&pcpu_lock); |
1242 | |
1243 | list_for_each_entry_safe(chunk, next, head, list) { |
1244 | WARN_ON(chunk->immutable); |
1245 | |
1246 | /* spare the first one */ |
1247 | if (chunk == list_first_entry(head, struct pcpu_chunk, list)) |
1248 | continue; |
1249 | |
1250 | list_move(&chunk->list, &todo); |
1251 | } |
1252 | |
1253 | spin_unlock_irq(&pcpu_lock); |
1254 | |
1255 | list_for_each_entry_safe(chunk, next, &todo, list) { |
1256 | pcpu_depopulate_chunk(chunk, 0, pcpu_unit_size); |
1257 | free_pcpu_chunk(chunk); |
1258 | } |
1259 | |
1260 | mutex_unlock(&pcpu_alloc_mutex); |
1261 | } |
1262 | |
1263 | /** |
1264 | * free_percpu - free percpu area |
1265 | * @ptr: pointer to area to free |
1266 | * |
1267 | * Free percpu area @ptr. |
1268 | * |
1269 | * CONTEXT: |
1270 | * Can be called from atomic context. |
1271 | */ |
1272 | void free_percpu(void __percpu *ptr) |
1273 | { |
1274 | void *addr; |
1275 | struct pcpu_chunk *chunk; |
1276 | unsigned long flags; |
1277 | int off; |
1278 | |
1279 | if (!ptr) |
1280 | return; |
1281 | |
1282 | addr = __pcpu_ptr_to_addr(ptr); |
1283 | |
1284 | spin_lock_irqsave(&pcpu_lock, flags); |
1285 | |
1286 | chunk = pcpu_chunk_addr_search(addr); |
1287 | off = addr - chunk->base_addr; |
1288 | |
1289 | pcpu_free_area(chunk, off); |
1290 | |
1291 | /* if there are more than one fully free chunks, wake up grim reaper */ |
1292 | if (chunk->free_size == pcpu_unit_size) { |
1293 | struct pcpu_chunk *pos; |
1294 | |
1295 | list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list) |
1296 | if (pos != chunk) { |
1297 | schedule_work(&pcpu_reclaim_work); |
1298 | break; |
1299 | } |
1300 | } |
1301 | |
1302 | spin_unlock_irqrestore(&pcpu_lock, flags); |
1303 | } |
1304 | EXPORT_SYMBOL_GPL(free_percpu); |
1305 | |
1306 | /** |
1307 | * is_kernel_percpu_address - test whether address is from static percpu area |
1308 | * @addr: address to test |
1309 | * |
1310 | * Test whether @addr belongs to in-kernel static percpu area. Module |
1311 | * static percpu areas are not considered. For those, use |
1312 | * is_module_percpu_address(). |
1313 | * |
1314 | * RETURNS: |
1315 | * %true if @addr is from in-kernel static percpu area, %false otherwise. |
1316 | */ |
1317 | bool is_kernel_percpu_address(unsigned long addr) |
1318 | { |
1319 | const size_t static_size = __per_cpu_end - __per_cpu_start; |
1320 | void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr); |
1321 | unsigned int cpu; |
1322 | |
1323 | for_each_possible_cpu(cpu) { |
1324 | void *start = per_cpu_ptr(base, cpu); |
1325 | |
1326 | if ((void *)addr >= start && (void *)addr < start + static_size) |
1327 | return true; |
1328 | } |
1329 | return false; |
1330 | } |
1331 | |
1332 | /** |
1333 | * per_cpu_ptr_to_phys - convert translated percpu address to physical address |
1334 | * @addr: the address to be converted to physical address |
1335 | * |
1336 | * Given @addr which is dereferenceable address obtained via one of |
1337 | * percpu access macros, this function translates it into its physical |
1338 | * address. The caller is responsible for ensuring @addr stays valid |
1339 | * until this function finishes. |
1340 | * |
1341 | * RETURNS: |
1342 | * The physical address for @addr. |
1343 | */ |
1344 | phys_addr_t per_cpu_ptr_to_phys(void *addr) |
1345 | { |
1346 | if ((unsigned long)addr < VMALLOC_START || |
1347 | (unsigned long)addr >= VMALLOC_END) |
1348 | return __pa(addr); |
1349 | else |
1350 | return page_to_phys(vmalloc_to_page(addr)); |
1351 | } |
1352 | |
1353 | static inline size_t pcpu_calc_fc_sizes(size_t static_size, |
1354 | size_t reserved_size, |
1355 | ssize_t *dyn_sizep) |
1356 | { |
1357 | size_t size_sum; |
1358 | |
1359 | size_sum = PFN_ALIGN(static_size + reserved_size + |
1360 | (*dyn_sizep >= 0 ? *dyn_sizep : 0)); |
1361 | if (*dyn_sizep != 0) |
1362 | *dyn_sizep = size_sum - static_size - reserved_size; |
1363 | |
1364 | return size_sum; |
1365 | } |
1366 | |
1367 | /** |
1368 | * pcpu_alloc_alloc_info - allocate percpu allocation info |
1369 | * @nr_groups: the number of groups |
1370 | * @nr_units: the number of units |
1371 | * |
1372 | * Allocate ai which is large enough for @nr_groups groups containing |
1373 | * @nr_units units. The returned ai's groups[0].cpu_map points to the |
1374 | * cpu_map array which is long enough for @nr_units and filled with |
1375 | * NR_CPUS. It's the caller's responsibility to initialize cpu_map |
1376 | * pointer of other groups. |
1377 | * |
1378 | * RETURNS: |
1379 | * Pointer to the allocated pcpu_alloc_info on success, NULL on |
1380 | * failure. |
1381 | */ |
1382 | struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups, |
1383 | int nr_units) |
1384 | { |
1385 | struct pcpu_alloc_info *ai; |
1386 | size_t base_size, ai_size; |
1387 | void *ptr; |
1388 | int unit; |
1389 | |
1390 | base_size = ALIGN(sizeof(*ai) + nr_groups * sizeof(ai->groups[0]), |
1391 | __alignof__(ai->groups[0].cpu_map[0])); |
1392 | ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]); |
1393 | |
1394 | ptr = alloc_bootmem_nopanic(PFN_ALIGN(ai_size)); |
1395 | if (!ptr) |
1396 | return NULL; |
1397 | ai = ptr; |
1398 | ptr += base_size; |
1399 | |
1400 | ai->groups[0].cpu_map = ptr; |
1401 | |
1402 | for (unit = 0; unit < nr_units; unit++) |
1403 | ai->groups[0].cpu_map[unit] = NR_CPUS; |
1404 | |
1405 | ai->nr_groups = nr_groups; |
1406 | ai->__ai_size = PFN_ALIGN(ai_size); |
1407 | |
1408 | return ai; |
1409 | } |
1410 | |
1411 | /** |
1412 | * pcpu_free_alloc_info - free percpu allocation info |
1413 | * @ai: pcpu_alloc_info to free |
1414 | * |
1415 | * Free @ai which was allocated by pcpu_alloc_alloc_info(). |
1416 | */ |
1417 | void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai) |
1418 | { |
1419 | free_bootmem(__pa(ai), ai->__ai_size); |
1420 | } |
1421 | |
1422 | /** |
1423 | * pcpu_build_alloc_info - build alloc_info considering distances between CPUs |
1424 | * @reserved_size: the size of reserved percpu area in bytes |
1425 | * @dyn_size: free size for dynamic allocation in bytes, -1 for auto |
1426 | * @atom_size: allocation atom size |
1427 | * @cpu_distance_fn: callback to determine distance between cpus, optional |
1428 | * |
1429 | * This function determines grouping of units, their mappings to cpus |
1430 | * and other parameters considering needed percpu size, allocation |
1431 | * atom size and distances between CPUs. |
1432 | * |
1433 | * Groups are always mutliples of atom size and CPUs which are of |
1434 | * LOCAL_DISTANCE both ways are grouped together and share space for |
1435 | * units in the same group. The returned configuration is guaranteed |
1436 | * to have CPUs on different nodes on different groups and >=75% usage |
1437 | * of allocated virtual address space. |
1438 | * |
1439 | * RETURNS: |
1440 | * On success, pointer to the new allocation_info is returned. On |
1441 | * failure, ERR_PTR value is returned. |
1442 | */ |
1443 | struct pcpu_alloc_info * __init pcpu_build_alloc_info( |
1444 | size_t reserved_size, ssize_t dyn_size, |
1445 | size_t atom_size, |
1446 | pcpu_fc_cpu_distance_fn_t cpu_distance_fn) |
1447 | { |
1448 | static int group_map[NR_CPUS] __initdata; |
1449 | static int group_cnt[NR_CPUS] __initdata; |
1450 | const size_t static_size = __per_cpu_end - __per_cpu_start; |
1451 | int group_cnt_max = 0, nr_groups = 1, nr_units = 0; |
1452 | size_t size_sum, min_unit_size, alloc_size; |
1453 | int upa, max_upa, uninitialized_var(best_upa); /* units_per_alloc */ |
1454 | int last_allocs, group, unit; |
1455 | unsigned int cpu, tcpu; |
1456 | struct pcpu_alloc_info *ai; |
1457 | unsigned int *cpu_map; |
1458 | |
1459 | /* this function may be called multiple times */ |
1460 | memset(group_map, 0, sizeof(group_map)); |
1461 | memset(group_cnt, 0, sizeof(group_map)); |
1462 | |
1463 | /* |
1464 | * Determine min_unit_size, alloc_size and max_upa such that |
1465 | * alloc_size is multiple of atom_size and is the smallest |
1466 | * which can accomodate 4k aligned segments which are equal to |
1467 | * or larger than min_unit_size. |
1468 | */ |
1469 | size_sum = pcpu_calc_fc_sizes(static_size, reserved_size, &dyn_size); |
1470 | min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE); |
1471 | |
1472 | alloc_size = roundup(min_unit_size, atom_size); |
1473 | upa = alloc_size / min_unit_size; |
1474 | while (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK)) |
1475 | upa--; |
1476 | max_upa = upa; |
1477 | |
1478 | /* group cpus according to their proximity */ |
1479 | for_each_possible_cpu(cpu) { |
1480 | group = 0; |
1481 | next_group: |
1482 | for_each_possible_cpu(tcpu) { |
1483 | if (cpu == tcpu) |
1484 | break; |
1485 | if (group_map[tcpu] == group && cpu_distance_fn && |
1486 | (cpu_distance_fn(cpu, tcpu) > LOCAL_DISTANCE || |
1487 | cpu_distance_fn(tcpu, cpu) > LOCAL_DISTANCE)) { |
1488 | group++; |
1489 | nr_groups = max(nr_groups, group + 1); |
1490 | goto next_group; |
1491 | } |
1492 | } |
1493 | group_map[cpu] = group; |
1494 | group_cnt[group]++; |
1495 | group_cnt_max = max(group_cnt_max, group_cnt[group]); |
1496 | } |
1497 | |
1498 | /* |
1499 | * Expand unit size until address space usage goes over 75% |
1500 | * and then as much as possible without using more address |
1501 | * space. |
1502 | */ |
1503 | last_allocs = INT_MAX; |
1504 | for (upa = max_upa; upa; upa--) { |
1505 | int allocs = 0, wasted = 0; |
1506 | |
1507 | if (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK)) |
1508 | continue; |
1509 | |
1510 | for (group = 0; group < nr_groups; group++) { |
1511 | int this_allocs = DIV_ROUND_UP(group_cnt[group], upa); |
1512 | allocs += this_allocs; |
1513 | wasted += this_allocs * upa - group_cnt[group]; |
1514 | } |
1515 | |
1516 | /* |
1517 | * Don't accept if wastage is over 25%. The |
1518 | * greater-than comparison ensures upa==1 always |
1519 | * passes the following check. |
1520 | */ |
1521 | if (wasted > num_possible_cpus() / 3) |
1522 | continue; |
1523 | |
1524 | /* and then don't consume more memory */ |
1525 | if (allocs > last_allocs) |
1526 | break; |
1527 | last_allocs = allocs; |
1528 | best_upa = upa; |
1529 | } |
1530 | upa = best_upa; |
1531 | |
1532 | /* allocate and fill alloc_info */ |
1533 | for (group = 0; group < nr_groups; group++) |
1534 | nr_units += roundup(group_cnt[group], upa); |
1535 | |
1536 | ai = pcpu_alloc_alloc_info(nr_groups, nr_units); |
1537 | if (!ai) |
1538 | return ERR_PTR(-ENOMEM); |
1539 | cpu_map = ai->groups[0].cpu_map; |
1540 | |
1541 | for (group = 0; group < nr_groups; group++) { |
1542 | ai->groups[group].cpu_map = cpu_map; |
1543 | cpu_map += roundup(group_cnt[group], upa); |
1544 | } |
1545 | |
1546 | ai->static_size = static_size; |
1547 | ai->reserved_size = reserved_size; |
1548 | ai->dyn_size = dyn_size; |
1549 | ai->unit_size = alloc_size / upa; |
1550 | ai->atom_size = atom_size; |
1551 | ai->alloc_size = alloc_size; |
1552 | |
1553 | for (group = 0, unit = 0; group_cnt[group]; group++) { |
1554 | struct pcpu_group_info *gi = &ai->groups[group]; |
1555 | |
1556 | /* |
1557 | * Initialize base_offset as if all groups are located |
1558 | * back-to-back. The caller should update this to |
1559 | * reflect actual allocation. |
1560 | */ |
1561 | gi->base_offset = unit * ai->unit_size; |
1562 | |
1563 | for_each_possible_cpu(cpu) |
1564 | if (group_map[cpu] == group) |
1565 | gi->cpu_map[gi->nr_units++] = cpu; |
1566 | gi->nr_units = roundup(gi->nr_units, upa); |
1567 | unit += gi->nr_units; |
1568 | } |
1569 | BUG_ON(unit != nr_units); |
1570 | |
1571 | return ai; |
1572 | } |
1573 | |
1574 | /** |
1575 | * pcpu_dump_alloc_info - print out information about pcpu_alloc_info |
1576 | * @lvl: loglevel |
1577 | * @ai: allocation info to dump |
1578 | * |
1579 | * Print out information about @ai using loglevel @lvl. |
1580 | */ |
1581 | static void pcpu_dump_alloc_info(const char *lvl, |
1582 | const struct pcpu_alloc_info *ai) |
1583 | { |
1584 | int group_width = 1, cpu_width = 1, width; |
1585 | char empty_str[] = "--------"; |
1586 | int alloc = 0, alloc_end = 0; |
1587 | int group, v; |
1588 | int upa, apl; /* units per alloc, allocs per line */ |
1589 | |
1590 | v = ai->nr_groups; |
1591 | while (v /= 10) |
1592 | group_width++; |
1593 | |
1594 | v = num_possible_cpus(); |
1595 | while (v /= 10) |
1596 | cpu_width++; |
1597 | empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0'; |
1598 | |
1599 | upa = ai->alloc_size / ai->unit_size; |
1600 | width = upa * (cpu_width + 1) + group_width + 3; |
1601 | apl = rounddown_pow_of_two(max(60 / width, 1)); |
1602 | |
1603 | printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu", |
1604 | lvl, ai->static_size, ai->reserved_size, ai->dyn_size, |
1605 | ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size); |
1606 | |
1607 | for (group = 0; group < ai->nr_groups; group++) { |
1608 | const struct pcpu_group_info *gi = &ai->groups[group]; |
1609 | int unit = 0, unit_end = 0; |
1610 | |
1611 | BUG_ON(gi->nr_units % upa); |
1612 | for (alloc_end += gi->nr_units / upa; |
1613 | alloc < alloc_end; alloc++) { |
1614 | if (!(alloc % apl)) { |
1615 | printk("\n"); |
1616 | printk("%spcpu-alloc: ", lvl); |
1617 | } |
1618 | printk("[%0*d] ", group_width, group); |
1619 | |
1620 | for (unit_end += upa; unit < unit_end; unit++) |
1621 | if (gi->cpu_map[unit] != NR_CPUS) |
1622 | printk("%0*d ", cpu_width, |
1623 | gi->cpu_map[unit]); |
1624 | else |
1625 | printk("%s ", empty_str); |
1626 | } |
1627 | } |
1628 | printk("\n"); |
1629 | } |
1630 | |
1631 | /** |
1632 | * pcpu_setup_first_chunk - initialize the first percpu chunk |
1633 | * @ai: pcpu_alloc_info describing how to percpu area is shaped |
1634 | * @base_addr: mapped address |
1635 | * |
1636 | * Initialize the first percpu chunk which contains the kernel static |
1637 | * perpcu area. This function is to be called from arch percpu area |
1638 | * setup path. |
1639 | * |
1640 | * @ai contains all information necessary to initialize the first |
1641 | * chunk and prime the dynamic percpu allocator. |
1642 | * |
1643 | * @ai->static_size is the size of static percpu area. |
1644 | * |
1645 | * @ai->reserved_size, if non-zero, specifies the amount of bytes to |
1646 | * reserve after the static area in the first chunk. This reserves |
1647 | * the first chunk such that it's available only through reserved |
1648 | * percpu allocation. This is primarily used to serve module percpu |
1649 | * static areas on architectures where the addressing model has |
1650 | * limited offset range for symbol relocations to guarantee module |
1651 | * percpu symbols fall inside the relocatable range. |
1652 | * |
1653 | * @ai->dyn_size determines the number of bytes available for dynamic |
1654 | * allocation in the first chunk. The area between @ai->static_size + |
1655 | * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused. |
1656 | * |
1657 | * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE |
1658 | * and equal to or larger than @ai->static_size + @ai->reserved_size + |
1659 | * @ai->dyn_size. |
1660 | * |
1661 | * @ai->atom_size is the allocation atom size and used as alignment |
1662 | * for vm areas. |
1663 | * |
1664 | * @ai->alloc_size is the allocation size and always multiple of |
1665 | * @ai->atom_size. This is larger than @ai->atom_size if |
1666 | * @ai->unit_size is larger than @ai->atom_size. |
1667 | * |
1668 | * @ai->nr_groups and @ai->groups describe virtual memory layout of |
1669 | * percpu areas. Units which should be colocated are put into the |
1670 | * same group. Dynamic VM areas will be allocated according to these |
1671 | * groupings. If @ai->nr_groups is zero, a single group containing |
1672 | * all units is assumed. |
1673 | * |
1674 | * The caller should have mapped the first chunk at @base_addr and |
1675 | * copied static data to each unit. |
1676 | * |
1677 | * If the first chunk ends up with both reserved and dynamic areas, it |
1678 | * is served by two chunks - one to serve the core static and reserved |
1679 | * areas and the other for the dynamic area. They share the same vm |
1680 | * and page map but uses different area allocation map to stay away |
1681 | * from each other. The latter chunk is circulated in the chunk slots |
1682 | * and available for dynamic allocation like any other chunks. |
1683 | * |
1684 | * RETURNS: |
1685 | * 0 on success, -errno on failure. |
1686 | */ |
1687 | int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, |
1688 | void *base_addr) |
1689 | { |
1690 | static char cpus_buf[4096] __initdata; |
1691 | static int smap[2], dmap[2]; |
1692 | size_t dyn_size = ai->dyn_size; |
1693 | size_t size_sum = ai->static_size + ai->reserved_size + dyn_size; |
1694 | struct pcpu_chunk *schunk, *dchunk = NULL; |
1695 | unsigned long *group_offsets; |
1696 | size_t *group_sizes; |
1697 | unsigned long *unit_off; |
1698 | unsigned int cpu; |
1699 | int *unit_map; |
1700 | int group, unit, i; |
1701 | |
1702 | cpumask_scnprintf(cpus_buf, sizeof(cpus_buf), cpu_possible_mask); |
1703 | |
1704 | #define PCPU_SETUP_BUG_ON(cond) do { \ |
1705 | if (unlikely(cond)) { \ |
1706 | pr_emerg("PERCPU: failed to initialize, %s", #cond); \ |
1707 | pr_emerg("PERCPU: cpu_possible_mask=%s\n", cpus_buf); \ |
1708 | pcpu_dump_alloc_info(KERN_EMERG, ai); \ |
1709 | BUG(); \ |
1710 | } \ |
1711 | } while (0) |
1712 | |
1713 | /* sanity checks */ |
1714 | BUILD_BUG_ON(ARRAY_SIZE(smap) >= PCPU_DFL_MAP_ALLOC || |
1715 | ARRAY_SIZE(dmap) >= PCPU_DFL_MAP_ALLOC); |
1716 | PCPU_SETUP_BUG_ON(ai->nr_groups <= 0); |
1717 | PCPU_SETUP_BUG_ON(!ai->static_size); |
1718 | PCPU_SETUP_BUG_ON(!base_addr); |
1719 | PCPU_SETUP_BUG_ON(ai->unit_size < size_sum); |
1720 | PCPU_SETUP_BUG_ON(ai->unit_size & ~PAGE_MASK); |
1721 | PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE); |
1722 | |
1723 | /* process group information and build config tables accordingly */ |
1724 | group_offsets = alloc_bootmem(ai->nr_groups * sizeof(group_offsets[0])); |
1725 | group_sizes = alloc_bootmem(ai->nr_groups * sizeof(group_sizes[0])); |
1726 | unit_map = alloc_bootmem(nr_cpu_ids * sizeof(unit_map[0])); |
1727 | unit_off = alloc_bootmem(nr_cpu_ids * sizeof(unit_off[0])); |
1728 | |
1729 | for (cpu = 0; cpu < nr_cpu_ids; cpu++) |
1730 | unit_map[cpu] = UINT_MAX; |
1731 | pcpu_first_unit_cpu = NR_CPUS; |
1732 | |
1733 | for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) { |
1734 | const struct pcpu_group_info *gi = &ai->groups[group]; |
1735 | |
1736 | group_offsets[group] = gi->base_offset; |
1737 | group_sizes[group] = gi->nr_units * ai->unit_size; |
1738 | |
1739 | for (i = 0; i < gi->nr_units; i++) { |
1740 | cpu = gi->cpu_map[i]; |
1741 | if (cpu == NR_CPUS) |
1742 | continue; |
1743 | |
1744 | PCPU_SETUP_BUG_ON(cpu > nr_cpu_ids); |
1745 | PCPU_SETUP_BUG_ON(!cpu_possible(cpu)); |
1746 | PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX); |
1747 | |
1748 | unit_map[cpu] = unit + i; |
1749 | unit_off[cpu] = gi->base_offset + i * ai->unit_size; |
1750 | |
1751 | if (pcpu_first_unit_cpu == NR_CPUS) |
1752 | pcpu_first_unit_cpu = cpu; |
1753 | } |
1754 | } |
1755 | pcpu_last_unit_cpu = cpu; |
1756 | pcpu_nr_units = unit; |
1757 | |
1758 | for_each_possible_cpu(cpu) |
1759 | PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX); |
1760 | |
1761 | /* we're done parsing the input, undefine BUG macro and dump config */ |
1762 | #undef PCPU_SETUP_BUG_ON |
1763 | pcpu_dump_alloc_info(KERN_INFO, ai); |
1764 | |
1765 | pcpu_nr_groups = ai->nr_groups; |
1766 | pcpu_group_offsets = group_offsets; |
1767 | pcpu_group_sizes = group_sizes; |
1768 | pcpu_unit_map = unit_map; |
1769 | pcpu_unit_offsets = unit_off; |
1770 | |
1771 | /* determine basic parameters */ |
1772 | pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT; |
1773 | pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT; |
1774 | pcpu_atom_size = ai->atom_size; |
1775 | pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) + |
1776 | BITS_TO_LONGS(pcpu_unit_pages) * sizeof(unsigned long); |
1777 | |
1778 | /* |
1779 | * Allocate chunk slots. The additional last slot is for |
1780 | * empty chunks. |
1781 | */ |
1782 | pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2; |
1783 | pcpu_slot = alloc_bootmem(pcpu_nr_slots * sizeof(pcpu_slot[0])); |
1784 | for (i = 0; i < pcpu_nr_slots; i++) |
1785 | INIT_LIST_HEAD(&pcpu_slot[i]); |
1786 | |
1787 | /* |
1788 | * Initialize static chunk. If reserved_size is zero, the |
1789 | * static chunk covers static area + dynamic allocation area |
1790 | * in the first chunk. If reserved_size is not zero, it |
1791 | * covers static area + reserved area (mostly used for module |
1792 | * static percpu allocation). |
1793 | */ |
1794 | schunk = alloc_bootmem(pcpu_chunk_struct_size); |
1795 | INIT_LIST_HEAD(&schunk->list); |
1796 | schunk->base_addr = base_addr; |
1797 | schunk->map = smap; |
1798 | schunk->map_alloc = ARRAY_SIZE(smap); |
1799 | schunk->immutable = true; |
1800 | bitmap_fill(schunk->populated, pcpu_unit_pages); |
1801 | |
1802 | if (ai->reserved_size) { |
1803 | schunk->free_size = ai->reserved_size; |
1804 | pcpu_reserved_chunk = schunk; |
1805 | pcpu_reserved_chunk_limit = ai->static_size + ai->reserved_size; |
1806 | } else { |
1807 | schunk->free_size = dyn_size; |
1808 | dyn_size = 0; /* dynamic area covered */ |
1809 | } |
1810 | schunk->contig_hint = schunk->free_size; |
1811 | |
1812 | schunk->map[schunk->map_used++] = -ai->static_size; |
1813 | if (schunk->free_size) |
1814 | schunk->map[schunk->map_used++] = schunk->free_size; |
1815 | |
1816 | /* init dynamic chunk if necessary */ |
1817 | if (dyn_size) { |
1818 | dchunk = alloc_bootmem(pcpu_chunk_struct_size); |
1819 | INIT_LIST_HEAD(&dchunk->list); |
1820 | dchunk->base_addr = base_addr; |
1821 | dchunk->map = dmap; |
1822 | dchunk->map_alloc = ARRAY_SIZE(dmap); |
1823 | dchunk->immutable = true; |
1824 | bitmap_fill(dchunk->populated, pcpu_unit_pages); |
1825 | |
1826 | dchunk->contig_hint = dchunk->free_size = dyn_size; |
1827 | dchunk->map[dchunk->map_used++] = -pcpu_reserved_chunk_limit; |
1828 | dchunk->map[dchunk->map_used++] = dchunk->free_size; |
1829 | } |
1830 | |
1831 | /* link the first chunk in */ |
1832 | pcpu_first_chunk = dchunk ?: schunk; |
1833 | pcpu_chunk_relocate(pcpu_first_chunk, -1); |
1834 | |
1835 | /* we're done */ |
1836 | pcpu_base_addr = base_addr; |
1837 | return 0; |
1838 | } |
1839 | |
1840 | const char *pcpu_fc_names[PCPU_FC_NR] __initdata = { |
1841 | [PCPU_FC_AUTO] = "auto", |
1842 | [PCPU_FC_EMBED] = "embed", |
1843 | [PCPU_FC_PAGE] = "page", |
1844 | }; |
1845 | |
1846 | enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO; |
1847 | |
1848 | static int __init percpu_alloc_setup(char *str) |
1849 | { |
1850 | if (0) |
1851 | /* nada */; |
1852 | #ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK |
1853 | else if (!strcmp(str, "embed")) |
1854 | pcpu_chosen_fc = PCPU_FC_EMBED; |
1855 | #endif |
1856 | #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK |
1857 | else if (!strcmp(str, "page")) |
1858 | pcpu_chosen_fc = PCPU_FC_PAGE; |
1859 | #endif |
1860 | else |
1861 | pr_warning("PERCPU: unknown allocator %s specified\n", str); |
1862 | |
1863 | return 0; |
1864 | } |
1865 | early_param("percpu_alloc", percpu_alloc_setup); |
1866 | |
1867 | #if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \ |
1868 | !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) |
1869 | /** |
1870 | * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem |
1871 | * @reserved_size: the size of reserved percpu area in bytes |
1872 | * @dyn_size: free size for dynamic allocation in bytes, -1 for auto |
1873 | * @atom_size: allocation atom size |
1874 | * @cpu_distance_fn: callback to determine distance between cpus, optional |
1875 | * @alloc_fn: function to allocate percpu page |
1876 | * @free_fn: funtion to free percpu page |
1877 | * |
1878 | * This is a helper to ease setting up embedded first percpu chunk and |
1879 | * can be called where pcpu_setup_first_chunk() is expected. |
1880 | * |
1881 | * If this function is used to setup the first chunk, it is allocated |
1882 | * by calling @alloc_fn and used as-is without being mapped into |
1883 | * vmalloc area. Allocations are always whole multiples of @atom_size |
1884 | * aligned to @atom_size. |
1885 | * |
1886 | * This enables the first chunk to piggy back on the linear physical |
1887 | * mapping which often uses larger page size. Please note that this |
1888 | * can result in very sparse cpu->unit mapping on NUMA machines thus |
1889 | * requiring large vmalloc address space. Don't use this allocator if |
1890 | * vmalloc space is not orders of magnitude larger than distances |
1891 | * between node memory addresses (ie. 32bit NUMA machines). |
1892 | * |
1893 | * When @dyn_size is positive, dynamic area might be larger than |
1894 | * specified to fill page alignment. When @dyn_size is auto, |
1895 | * @dyn_size is just big enough to fill page alignment after static |
1896 | * and reserved areas. |
1897 | * |
1898 | * If the needed size is smaller than the minimum or specified unit |
1899 | * size, the leftover is returned using @free_fn. |
1900 | * |
1901 | * RETURNS: |
1902 | * 0 on success, -errno on failure. |
1903 | */ |
1904 | int __init pcpu_embed_first_chunk(size_t reserved_size, ssize_t dyn_size, |
1905 | size_t atom_size, |
1906 | pcpu_fc_cpu_distance_fn_t cpu_distance_fn, |
1907 | pcpu_fc_alloc_fn_t alloc_fn, |
1908 | pcpu_fc_free_fn_t free_fn) |
1909 | { |
1910 | void *base = (void *)ULONG_MAX; |
1911 | void **areas = NULL; |
1912 | struct pcpu_alloc_info *ai; |
1913 | size_t size_sum, areas_size, max_distance; |
1914 | int group, i, rc; |
1915 | |
1916 | ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size, |
1917 | cpu_distance_fn); |
1918 | if (IS_ERR(ai)) |
1919 | return PTR_ERR(ai); |
1920 | |
1921 | size_sum = ai->static_size + ai->reserved_size + ai->dyn_size; |
1922 | areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *)); |
1923 | |
1924 | areas = alloc_bootmem_nopanic(areas_size); |
1925 | if (!areas) { |
1926 | rc = -ENOMEM; |
1927 | goto out_free; |
1928 | } |
1929 | |
1930 | /* allocate, copy and determine base address */ |
1931 | for (group = 0; group < ai->nr_groups; group++) { |
1932 | struct pcpu_group_info *gi = &ai->groups[group]; |
1933 | unsigned int cpu = NR_CPUS; |
1934 | void *ptr; |
1935 | |
1936 | for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++) |
1937 | cpu = gi->cpu_map[i]; |
1938 | BUG_ON(cpu == NR_CPUS); |
1939 | |
1940 | /* allocate space for the whole group */ |
1941 | ptr = alloc_fn(cpu, gi->nr_units * ai->unit_size, atom_size); |
1942 | if (!ptr) { |
1943 | rc = -ENOMEM; |
1944 | goto out_free_areas; |
1945 | } |
1946 | areas[group] = ptr; |
1947 | |
1948 | base = min(ptr, base); |
1949 | |
1950 | for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) { |
1951 | if (gi->cpu_map[i] == NR_CPUS) { |
1952 | /* unused unit, free whole */ |
1953 | free_fn(ptr, ai->unit_size); |
1954 | continue; |
1955 | } |
1956 | /* copy and return the unused part */ |
1957 | memcpy(ptr, __per_cpu_load, ai->static_size); |
1958 | free_fn(ptr + size_sum, ai->unit_size - size_sum); |
1959 | } |
1960 | } |
1961 | |
1962 | /* base address is now known, determine group base offsets */ |
1963 | max_distance = 0; |
1964 | for (group = 0; group < ai->nr_groups; group++) { |
1965 | ai->groups[group].base_offset = areas[group] - base; |
1966 | max_distance = max_t(size_t, max_distance, |
1967 | ai->groups[group].base_offset); |
1968 | } |
1969 | max_distance += ai->unit_size; |
1970 | |
1971 | /* warn if maximum distance is further than 75% of vmalloc space */ |
1972 | if (max_distance > (VMALLOC_END - VMALLOC_START) * 3 / 4) { |
1973 | pr_warning("PERCPU: max_distance=0x%zx too large for vmalloc " |
1974 | "space 0x%lx\n", |
1975 | max_distance, VMALLOC_END - VMALLOC_START); |
1976 | #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK |
1977 | /* and fail if we have fallback */ |
1978 | rc = -EINVAL; |
1979 | goto out_free; |
1980 | #endif |
1981 | } |
1982 | |
1983 | pr_info("PERCPU: Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n", |
1984 | PFN_DOWN(size_sum), base, ai->static_size, ai->reserved_size, |
1985 | ai->dyn_size, ai->unit_size); |
1986 | |
1987 | rc = pcpu_setup_first_chunk(ai, base); |
1988 | goto out_free; |
1989 | |
1990 | out_free_areas: |
1991 | for (group = 0; group < ai->nr_groups; group++) |
1992 | free_fn(areas[group], |
1993 | ai->groups[group].nr_units * ai->unit_size); |
1994 | out_free: |
1995 | pcpu_free_alloc_info(ai); |
1996 | if (areas) |
1997 | free_bootmem(__pa(areas), areas_size); |
1998 | return rc; |
1999 | } |
2000 | #endif /* CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK || |
2001 | !CONFIG_HAVE_SETUP_PER_CPU_AREA */ |
2002 | |
2003 | #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK |
2004 | /** |
2005 | * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages |
2006 | * @reserved_size: the size of reserved percpu area in bytes |
2007 | * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE |
2008 | * @free_fn: funtion to free percpu page, always called with PAGE_SIZE |
2009 | * @populate_pte_fn: function to populate pte |
2010 | * |
2011 | * This is a helper to ease setting up page-remapped first percpu |
2012 | * chunk and can be called where pcpu_setup_first_chunk() is expected. |
2013 | * |
2014 | * This is the basic allocator. Static percpu area is allocated |
2015 | * page-by-page into vmalloc area. |
2016 | * |
2017 | * RETURNS: |
2018 | * 0 on success, -errno on failure. |
2019 | */ |
2020 | int __init pcpu_page_first_chunk(size_t reserved_size, |
2021 | pcpu_fc_alloc_fn_t alloc_fn, |
2022 | pcpu_fc_free_fn_t free_fn, |
2023 | pcpu_fc_populate_pte_fn_t populate_pte_fn) |
2024 | { |
2025 | static struct vm_struct vm; |
2026 | struct pcpu_alloc_info *ai; |
2027 | char psize_str[16]; |
2028 | int unit_pages; |
2029 | size_t pages_size; |
2030 | struct page **pages; |
2031 | int unit, i, j, rc; |
2032 | |
2033 | snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10); |
2034 | |
2035 | ai = pcpu_build_alloc_info(reserved_size, -1, PAGE_SIZE, NULL); |
2036 | if (IS_ERR(ai)) |
2037 | return PTR_ERR(ai); |
2038 | BUG_ON(ai->nr_groups != 1); |
2039 | BUG_ON(ai->groups[0].nr_units != num_possible_cpus()); |
2040 | |
2041 | unit_pages = ai->unit_size >> PAGE_SHIFT; |
2042 | |
2043 | /* unaligned allocations can't be freed, round up to page size */ |
2044 | pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() * |
2045 | sizeof(pages[0])); |
2046 | pages = alloc_bootmem(pages_size); |
2047 | |
2048 | /* allocate pages */ |
2049 | j = 0; |
2050 | for (unit = 0; unit < num_possible_cpus(); unit++) |
2051 | for (i = 0; i < unit_pages; i++) { |
2052 | unsigned int cpu = ai->groups[0].cpu_map[unit]; |
2053 | void *ptr; |
2054 | |
2055 | ptr = alloc_fn(cpu, PAGE_SIZE, PAGE_SIZE); |
2056 | if (!ptr) { |
2057 | pr_warning("PERCPU: failed to allocate %s page " |
2058 | "for cpu%u\n", psize_str, cpu); |
2059 | goto enomem; |
2060 | } |
2061 | pages[j++] = virt_to_page(ptr); |
2062 | } |
2063 | |
2064 | /* allocate vm area, map the pages and copy static data */ |
2065 | vm.flags = VM_ALLOC; |
2066 | vm.size = num_possible_cpus() * ai->unit_size; |
2067 | vm_area_register_early(&vm, PAGE_SIZE); |
2068 | |
2069 | for (unit = 0; unit < num_possible_cpus(); unit++) { |
2070 | unsigned long unit_addr = |
2071 | (unsigned long)vm.addr + unit * ai->unit_size; |
2072 | |
2073 | for (i = 0; i < unit_pages; i++) |
2074 | populate_pte_fn(unit_addr + (i << PAGE_SHIFT)); |
2075 | |
2076 | /* pte already populated, the following shouldn't fail */ |
2077 | rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages], |
2078 | unit_pages); |
2079 | if (rc < 0) |
2080 | panic("failed to map percpu area, err=%d\n", rc); |
2081 | |
2082 | /* |
2083 | * FIXME: Archs with virtual cache should flush local |
2084 | * cache for the linear mapping here - something |
2085 | * equivalent to flush_cache_vmap() on the local cpu. |
2086 | * flush_cache_vmap() can't be used as most supporting |
2087 | * data structures are not set up yet. |
2088 | */ |
2089 | |
2090 | /* copy static data */ |
2091 | memcpy((void *)unit_addr, __per_cpu_load, ai->static_size); |
2092 | } |
2093 | |
2094 | /* we're ready, commit */ |
2095 | pr_info("PERCPU: %d %s pages/cpu @%p s%zu r%zu d%zu\n", |
2096 | unit_pages, psize_str, vm.addr, ai->static_size, |
2097 | ai->reserved_size, ai->dyn_size); |
2098 | |
2099 | rc = pcpu_setup_first_chunk(ai, vm.addr); |
2100 | goto out_free_ar; |
2101 | |
2102 | enomem: |
2103 | while (--j >= 0) |
2104 | free_fn(page_address(pages[j]), PAGE_SIZE); |
2105 | rc = -ENOMEM; |
2106 | out_free_ar: |
2107 | free_bootmem(__pa(pages), pages_size); |
2108 | pcpu_free_alloc_info(ai); |
2109 | return rc; |
2110 | } |
2111 | #endif /* CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK */ |
2112 | |
2113 | /* |
2114 | * Generic percpu area setup. |
2115 | * |
2116 | * The embedding helper is used because its behavior closely resembles |
2117 | * the original non-dynamic generic percpu area setup. This is |
2118 | * important because many archs have addressing restrictions and might |
2119 | * fail if the percpu area is located far away from the previous |
2120 | * location. As an added bonus, in non-NUMA cases, embedding is |
2121 | * generally a good idea TLB-wise because percpu area can piggy back |
2122 | * on the physical linear memory mapping which uses large page |
2123 | * mappings on applicable archs. |
2124 | */ |
2125 | #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA |
2126 | unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; |
2127 | EXPORT_SYMBOL(__per_cpu_offset); |
2128 | |
2129 | static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size, |
2130 | size_t align) |
2131 | { |
2132 | return __alloc_bootmem_nopanic(size, align, __pa(MAX_DMA_ADDRESS)); |
2133 | } |
2134 | |
2135 | static void __init pcpu_dfl_fc_free(void *ptr, size_t size) |
2136 | { |
2137 | free_bootmem(__pa(ptr), size); |
2138 | } |
2139 | |
2140 | void __init setup_per_cpu_areas(void) |
2141 | { |
2142 | unsigned long delta; |
2143 | unsigned int cpu; |
2144 | int rc; |
2145 | |
2146 | /* |
2147 | * Always reserve area for module percpu variables. That's |
2148 | * what the legacy allocator did. |
2149 | */ |
2150 | rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE, |
2151 | PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, NULL, |
2152 | pcpu_dfl_fc_alloc, pcpu_dfl_fc_free); |
2153 | if (rc < 0) |
2154 | panic("Failed to initialized percpu areas."); |
2155 | |
2156 | delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; |
2157 | for_each_possible_cpu(cpu) |
2158 | __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu]; |
2159 | } |
2160 | #endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */ |
2161 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9