Root/mm/percpu.c

1/*
2 * mm/percpu.c - percpu memory allocator
3 *
4 * Copyright (C) 2009 SUSE Linux Products GmbH
5 * Copyright (C) 2009 Tejun Heo <tj@kernel.org>
6 *
7 * This file is released under the GPLv2.
8 *
9 * This is percpu allocator which can handle both static and dynamic
10 * areas. Percpu areas are allocated in chunks. Each chunk is
11 * consisted of boot-time determined number of units and the first
12 * chunk is used for static percpu variables in the kernel image
13 * (special boot time alloc/init handling necessary as these areas
14 * need to be brought up before allocation services are running).
15 * Unit grows as necessary and all units grow or shrink in unison.
16 * When a chunk is filled up, another chunk is allocated.
17 *
18 * c0 c1 c2
19 * ------------------- ------------------- ------------
20 * | u0 | u1 | u2 | u3 | | u0 | u1 | u2 | u3 | | u0 | u1 | u
21 * ------------------- ...... ------------------- .... ------------
22 *
23 * Allocation is done in offset-size areas of single unit space. Ie,
24 * an area of 512 bytes at 6k in c1 occupies 512 bytes at 6k of c1:u0,
25 * c1:u1, c1:u2 and c1:u3. On UMA, units corresponds directly to
26 * cpus. On NUMA, the mapping can be non-linear and even sparse.
27 * Percpu access can be done by configuring percpu base registers
28 * according to cpu to unit mapping and pcpu_unit_size.
29 *
30 * There are usually many small percpu allocations many of them being
31 * as small as 4 bytes. The allocator organizes chunks into lists
32 * according to free size and tries to allocate from the fullest one.
33 * Each chunk keeps the maximum contiguous area size hint which is
34 * guaranteed to be eqaul to or larger than the maximum contiguous
35 * area in the chunk. This helps the allocator not to iterate the
36 * chunk maps unnecessarily.
37 *
38 * Allocation state in each chunk is kept using an array of integers
39 * on chunk->map. A positive value in the map represents a free
40 * region and negative allocated. Allocation inside a chunk is done
41 * by scanning this map sequentially and serving the first matching
42 * entry. This is mostly copied from the percpu_modalloc() allocator.
43 * Chunks can be determined from the address using the index field
44 * in the page struct. The index field contains a pointer to the chunk.
45 *
46 * To use this allocator, arch code should do the followings.
47 *
48 * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
49 * regular address to percpu pointer and back if they need to be
50 * different from the default
51 *
52 * - use pcpu_setup_first_chunk() during percpu area initialization to
53 * setup the first chunk containing the kernel static percpu area
54 */
55
56#include <linux/bitmap.h>
57#include <linux/bootmem.h>
58#include <linux/err.h>
59#include <linux/list.h>
60#include <linux/log2.h>
61#include <linux/mm.h>
62#include <linux/module.h>
63#include <linux/mutex.h>
64#include <linux/percpu.h>
65#include <linux/pfn.h>
66#include <linux/slab.h>
67#include <linux/spinlock.h>
68#include <linux/vmalloc.h>
69#include <linux/workqueue.h>
70
71#include <asm/cacheflush.h>
72#include <asm/sections.h>
73#include <asm/tlbflush.h>
74#include <asm/io.h>
75
76#define PCPU_SLOT_BASE_SHIFT 5 /* 1-31 shares the same slot */
77#define PCPU_DFL_MAP_ALLOC 16 /* start a map with 16 ents */
78
79/* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */
80#ifndef __addr_to_pcpu_ptr
81#define __addr_to_pcpu_ptr(addr) \
82    (void __percpu *)((unsigned long)(addr) - \
83              (unsigned long)pcpu_base_addr + \
84              (unsigned long)__per_cpu_start)
85#endif
86#ifndef __pcpu_ptr_to_addr
87#define __pcpu_ptr_to_addr(ptr) \
88    (void __force *)((unsigned long)(ptr) + \
89             (unsigned long)pcpu_base_addr - \
90             (unsigned long)__per_cpu_start)
91#endif
92
93struct pcpu_chunk {
94    struct list_head list; /* linked to pcpu_slot lists */
95    int free_size; /* free bytes in the chunk */
96    int contig_hint; /* max contiguous size hint */
97    void *base_addr; /* base address of this chunk */
98    int map_used; /* # of map entries used */
99    int map_alloc; /* # of map entries allocated */
100    int *map; /* allocation map */
101    void *data; /* chunk data */
102    bool immutable; /* no [de]population allowed */
103    unsigned long populated[]; /* populated bitmap */
104};
105
106static int pcpu_unit_pages __read_mostly;
107static int pcpu_unit_size __read_mostly;
108static int pcpu_nr_units __read_mostly;
109static int pcpu_atom_size __read_mostly;
110static int pcpu_nr_slots __read_mostly;
111static size_t pcpu_chunk_struct_size __read_mostly;
112
113/* cpus with the lowest and highest unit numbers */
114static unsigned int pcpu_first_unit_cpu __read_mostly;
115static unsigned int pcpu_last_unit_cpu __read_mostly;
116
117/* the address of the first chunk which starts with the kernel static area */
118void *pcpu_base_addr __read_mostly;
119EXPORT_SYMBOL_GPL(pcpu_base_addr);
120
121static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
122const unsigned long *pcpu_unit_offsets __read_mostly; /* cpu -> unit offset */
123
124/* group information, used for vm allocation */
125static int pcpu_nr_groups __read_mostly;
126static const unsigned long *pcpu_group_offsets __read_mostly;
127static const size_t *pcpu_group_sizes __read_mostly;
128
129/*
130 * The first chunk which always exists. Note that unlike other
131 * chunks, this one can be allocated and mapped in several different
132 * ways and thus often doesn't live in the vmalloc area.
133 */
134static struct pcpu_chunk *pcpu_first_chunk;
135
136/*
137 * Optional reserved chunk. This chunk reserves part of the first
138 * chunk and serves it for reserved allocations. The amount of
139 * reserved offset is in pcpu_reserved_chunk_limit. When reserved
140 * area doesn't exist, the following variables contain NULL and 0
141 * respectively.
142 */
143static struct pcpu_chunk *pcpu_reserved_chunk;
144static int pcpu_reserved_chunk_limit;
145
146/*
147 * Synchronization rules.
148 *
149 * There are two locks - pcpu_alloc_mutex and pcpu_lock. The former
150 * protects allocation/reclaim paths, chunks, populated bitmap and
151 * vmalloc mapping. The latter is a spinlock and protects the index
152 * data structures - chunk slots, chunks and area maps in chunks.
153 *
154 * During allocation, pcpu_alloc_mutex is kept locked all the time and
155 * pcpu_lock is grabbed and released as necessary. All actual memory
156 * allocations are done using GFP_KERNEL with pcpu_lock released. In
157 * general, percpu memory can't be allocated with irq off but
158 * irqsave/restore are still used in alloc path so that it can be used
159 * from early init path - sched_init() specifically.
160 *
161 * Free path accesses and alters only the index data structures, so it
162 * can be safely called from atomic context. When memory needs to be
163 * returned to the system, free path schedules reclaim_work which
164 * grabs both pcpu_alloc_mutex and pcpu_lock, unlinks chunks to be
165 * reclaimed, release both locks and frees the chunks. Note that it's
166 * necessary to grab both locks to remove a chunk from circulation as
167 * allocation path might be referencing the chunk with only
168 * pcpu_alloc_mutex locked.
169 */
170static DEFINE_MUTEX(pcpu_alloc_mutex); /* protects whole alloc and reclaim */
171static DEFINE_SPINLOCK(pcpu_lock); /* protects index data structures */
172
173static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */
174
175/* reclaim work to release fully free chunks, scheduled from free path */
176static void pcpu_reclaim(struct work_struct *work);
177static DECLARE_WORK(pcpu_reclaim_work, pcpu_reclaim);
178
179static bool pcpu_addr_in_first_chunk(void *addr)
180{
181    void *first_start = pcpu_first_chunk->base_addr;
182
183    return addr >= first_start && addr < first_start + pcpu_unit_size;
184}
185
186static bool pcpu_addr_in_reserved_chunk(void *addr)
187{
188    void *first_start = pcpu_first_chunk->base_addr;
189
190    return addr >= first_start &&
191        addr < first_start + pcpu_reserved_chunk_limit;
192}
193
194static int __pcpu_size_to_slot(int size)
195{
196    int highbit = fls(size); /* size is in bytes */
197    return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1);
198}
199
200static int pcpu_size_to_slot(int size)
201{
202    if (size == pcpu_unit_size)
203        return pcpu_nr_slots - 1;
204    return __pcpu_size_to_slot(size);
205}
206
207static int pcpu_chunk_slot(const struct pcpu_chunk *chunk)
208{
209    if (chunk->free_size < sizeof(int) || chunk->contig_hint < sizeof(int))
210        return 0;
211
212    return pcpu_size_to_slot(chunk->free_size);
213}
214
215/* set the pointer to a chunk in a page struct */
216static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu)
217{
218    page->index = (unsigned long)pcpu;
219}
220
221/* obtain pointer to a chunk from a page struct */
222static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page)
223{
224    return (struct pcpu_chunk *)page->index;
225}
226
227static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx)
228{
229    return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx;
230}
231
232static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk,
233                     unsigned int cpu, int page_idx)
234{
235    return (unsigned long)chunk->base_addr + pcpu_unit_offsets[cpu] +
236        (page_idx << PAGE_SHIFT);
237}
238
239static void __maybe_unused pcpu_next_unpop(struct pcpu_chunk *chunk,
240                       int *rs, int *re, int end)
241{
242    *rs = find_next_zero_bit(chunk->populated, end, *rs);
243    *re = find_next_bit(chunk->populated, end, *rs + 1);
244}
245
246static void __maybe_unused pcpu_next_pop(struct pcpu_chunk *chunk,
247                     int *rs, int *re, int end)
248{
249    *rs = find_next_bit(chunk->populated, end, *rs);
250    *re = find_next_zero_bit(chunk->populated, end, *rs + 1);
251}
252
253/*
254 * (Un)populated page region iterators. Iterate over (un)populated
255 * page regions betwen @start and @end in @chunk. @rs and @re should
256 * be integer variables and will be set to start and end page index of
257 * the current region.
258 */
259#define pcpu_for_each_unpop_region(chunk, rs, re, start, end) \
260    for ((rs) = (start), pcpu_next_unpop((chunk), &(rs), &(re), (end)); \
261         (rs) < (re); \
262         (rs) = (re) + 1, pcpu_next_unpop((chunk), &(rs), &(re), (end)))
263
264#define pcpu_for_each_pop_region(chunk, rs, re, start, end) \
265    for ((rs) = (start), pcpu_next_pop((chunk), &(rs), &(re), (end)); \
266         (rs) < (re); \
267         (rs) = (re) + 1, pcpu_next_pop((chunk), &(rs), &(re), (end)))
268
269/**
270 * pcpu_mem_alloc - allocate memory
271 * @size: bytes to allocate
272 *
273 * Allocate @size bytes. If @size is smaller than PAGE_SIZE,
274 * kzalloc() is used; otherwise, vmalloc() is used. The returned
275 * memory is always zeroed.
276 *
277 * CONTEXT:
278 * Does GFP_KERNEL allocation.
279 *
280 * RETURNS:
281 * Pointer to the allocated area on success, NULL on failure.
282 */
283static void *pcpu_mem_alloc(size_t size)
284{
285    if (WARN_ON_ONCE(!slab_is_available()))
286        return NULL;
287
288    if (size <= PAGE_SIZE)
289        return kzalloc(size, GFP_KERNEL);
290    else {
291        void *ptr = vmalloc(size);
292        if (ptr)
293            memset(ptr, 0, size);
294        return ptr;
295    }
296}
297
298/**
299 * pcpu_mem_free - free memory
300 * @ptr: memory to free
301 * @size: size of the area
302 *
303 * Free @ptr. @ptr should have been allocated using pcpu_mem_alloc().
304 */
305static void pcpu_mem_free(void *ptr, size_t size)
306{
307    if (size <= PAGE_SIZE)
308        kfree(ptr);
309    else
310        vfree(ptr);
311}
312
313/**
314 * pcpu_chunk_relocate - put chunk in the appropriate chunk slot
315 * @chunk: chunk of interest
316 * @oslot: the previous slot it was on
317 *
318 * This function is called after an allocation or free changed @chunk.
319 * New slot according to the changed state is determined and @chunk is
320 * moved to the slot. Note that the reserved chunk is never put on
321 * chunk slots.
322 *
323 * CONTEXT:
324 * pcpu_lock.
325 */
326static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot)
327{
328    int nslot = pcpu_chunk_slot(chunk);
329
330    if (chunk != pcpu_reserved_chunk && oslot != nslot) {
331        if (oslot < nslot)
332            list_move(&chunk->list, &pcpu_slot[nslot]);
333        else
334            list_move_tail(&chunk->list, &pcpu_slot[nslot]);
335    }
336}
337
338/**
339 * pcpu_need_to_extend - determine whether chunk area map needs to be extended
340 * @chunk: chunk of interest
341 *
342 * Determine whether area map of @chunk needs to be extended to
343 * accomodate a new allocation.
344 *
345 * CONTEXT:
346 * pcpu_lock.
347 *
348 * RETURNS:
349 * New target map allocation length if extension is necessary, 0
350 * otherwise.
351 */
352static int pcpu_need_to_extend(struct pcpu_chunk *chunk)
353{
354    int new_alloc;
355
356    if (chunk->map_alloc >= chunk->map_used + 2)
357        return 0;
358
359    new_alloc = PCPU_DFL_MAP_ALLOC;
360    while (new_alloc < chunk->map_used + 2)
361        new_alloc *= 2;
362
363    return new_alloc;
364}
365
366/**
367 * pcpu_extend_area_map - extend area map of a chunk
368 * @chunk: chunk of interest
369 * @new_alloc: new target allocation length of the area map
370 *
371 * Extend area map of @chunk to have @new_alloc entries.
372 *
373 * CONTEXT:
374 * Does GFP_KERNEL allocation. Grabs and releases pcpu_lock.
375 *
376 * RETURNS:
377 * 0 on success, -errno on failure.
378 */
379static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc)
380{
381    int *old = NULL, *new = NULL;
382    size_t old_size = 0, new_size = new_alloc * sizeof(new[0]);
383    unsigned long flags;
384
385    new = pcpu_mem_alloc(new_size);
386    if (!new)
387        return -ENOMEM;
388
389    /* acquire pcpu_lock and switch to new area map */
390    spin_lock_irqsave(&pcpu_lock, flags);
391
392    if (new_alloc <= chunk->map_alloc)
393        goto out_unlock;
394
395    old_size = chunk->map_alloc * sizeof(chunk->map[0]);
396    old = chunk->map;
397
398    memcpy(new, old, old_size);
399
400    chunk->map_alloc = new_alloc;
401    chunk->map = new;
402    new = NULL;
403
404out_unlock:
405    spin_unlock_irqrestore(&pcpu_lock, flags);
406
407    /*
408     * pcpu_mem_free() might end up calling vfree() which uses
409     * IRQ-unsafe lock and thus can't be called under pcpu_lock.
410     */
411    pcpu_mem_free(old, old_size);
412    pcpu_mem_free(new, new_size);
413
414    return 0;
415}
416
417/**
418 * pcpu_split_block - split a map block
419 * @chunk: chunk of interest
420 * @i: index of map block to split
421 * @head: head size in bytes (can be 0)
422 * @tail: tail size in bytes (can be 0)
423 *
424 * Split the @i'th map block into two or three blocks. If @head is
425 * non-zero, @head bytes block is inserted before block @i moving it
426 * to @i+1 and reducing its size by @head bytes.
427 *
428 * If @tail is non-zero, the target block, which can be @i or @i+1
429 * depending on @head, is reduced by @tail bytes and @tail byte block
430 * is inserted after the target block.
431 *
432 * @chunk->map must have enough free slots to accomodate the split.
433 *
434 * CONTEXT:
435 * pcpu_lock.
436 */
437static void pcpu_split_block(struct pcpu_chunk *chunk, int i,
438                 int head, int tail)
439{
440    int nr_extra = !!head + !!tail;
441
442    BUG_ON(chunk->map_alloc < chunk->map_used + nr_extra);
443
444    /* insert new subblocks */
445    memmove(&chunk->map[i + nr_extra], &chunk->map[i],
446        sizeof(chunk->map[0]) * (chunk->map_used - i));
447    chunk->map_used += nr_extra;
448
449    if (head) {
450        chunk->map[i + 1] = chunk->map[i] - head;
451        chunk->map[i++] = head;
452    }
453    if (tail) {
454        chunk->map[i++] -= tail;
455        chunk->map[i] = tail;
456    }
457}
458
459/**
460 * pcpu_alloc_area - allocate area from a pcpu_chunk
461 * @chunk: chunk of interest
462 * @size: wanted size in bytes
463 * @align: wanted align
464 *
465 * Try to allocate @size bytes area aligned at @align from @chunk.
466 * Note that this function only allocates the offset. It doesn't
467 * populate or map the area.
468 *
469 * @chunk->map must have at least two free slots.
470 *
471 * CONTEXT:
472 * pcpu_lock.
473 *
474 * RETURNS:
475 * Allocated offset in @chunk on success, -1 if no matching area is
476 * found.
477 */
478static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align)
479{
480    int oslot = pcpu_chunk_slot(chunk);
481    int max_contig = 0;
482    int i, off;
483
484    for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++])) {
485        bool is_last = i + 1 == chunk->map_used;
486        int head, tail;
487
488        /* extra for alignment requirement */
489        head = ALIGN(off, align) - off;
490        BUG_ON(i == 0 && head != 0);
491
492        if (chunk->map[i] < 0)
493            continue;
494        if (chunk->map[i] < head + size) {
495            max_contig = max(chunk->map[i], max_contig);
496            continue;
497        }
498
499        /*
500         * If head is small or the previous block is free,
501         * merge'em. Note that 'small' is defined as smaller
502         * than sizeof(int), which is very small but isn't too
503         * uncommon for percpu allocations.
504         */
505        if (head && (head < sizeof(int) || chunk->map[i - 1] > 0)) {
506            if (chunk->map[i - 1] > 0)
507                chunk->map[i - 1] += head;
508            else {
509                chunk->map[i - 1] -= head;
510                chunk->free_size -= head;
511            }
512            chunk->map[i] -= head;
513            off += head;
514            head = 0;
515        }
516
517        /* if tail is small, just keep it around */
518        tail = chunk->map[i] - head - size;
519        if (tail < sizeof(int))
520            tail = 0;
521
522        /* split if warranted */
523        if (head || tail) {
524            pcpu_split_block(chunk, i, head, tail);
525            if (head) {
526                i++;
527                off += head;
528                max_contig = max(chunk->map[i - 1], max_contig);
529            }
530            if (tail)
531                max_contig = max(chunk->map[i + 1], max_contig);
532        }
533
534        /* update hint and mark allocated */
535        if (is_last)
536            chunk->contig_hint = max_contig; /* fully scanned */
537        else
538            chunk->contig_hint = max(chunk->contig_hint,
539                         max_contig);
540
541        chunk->free_size -= chunk->map[i];
542        chunk->map[i] = -chunk->map[i];
543
544        pcpu_chunk_relocate(chunk, oslot);
545        return off;
546    }
547
548    chunk->contig_hint = max_contig; /* fully scanned */
549    pcpu_chunk_relocate(chunk, oslot);
550
551    /* tell the upper layer that this chunk has no matching area */
552    return -1;
553}
554
555/**
556 * pcpu_free_area - free area to a pcpu_chunk
557 * @chunk: chunk of interest
558 * @freeme: offset of area to free
559 *
560 * Free area starting from @freeme to @chunk. Note that this function
561 * only modifies the allocation map. It doesn't depopulate or unmap
562 * the area.
563 *
564 * CONTEXT:
565 * pcpu_lock.
566 */
567static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme)
568{
569    int oslot = pcpu_chunk_slot(chunk);
570    int i, off;
571
572    for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++]))
573        if (off == freeme)
574            break;
575    BUG_ON(off != freeme);
576    BUG_ON(chunk->map[i] > 0);
577
578    chunk->map[i] = -chunk->map[i];
579    chunk->free_size += chunk->map[i];
580
581    /* merge with previous? */
582    if (i > 0 && chunk->map[i - 1] >= 0) {
583        chunk->map[i - 1] += chunk->map[i];
584        chunk->map_used--;
585        memmove(&chunk->map[i], &chunk->map[i + 1],
586            (chunk->map_used - i) * sizeof(chunk->map[0]));
587        i--;
588    }
589    /* merge with next? */
590    if (i + 1 < chunk->map_used && chunk->map[i + 1] >= 0) {
591        chunk->map[i] += chunk->map[i + 1];
592        chunk->map_used--;
593        memmove(&chunk->map[i + 1], &chunk->map[i + 2],
594            (chunk->map_used - (i + 1)) * sizeof(chunk->map[0]));
595    }
596
597    chunk->contig_hint = max(chunk->map[i], chunk->contig_hint);
598    pcpu_chunk_relocate(chunk, oslot);
599}
600
601static struct pcpu_chunk *pcpu_alloc_chunk(void)
602{
603    struct pcpu_chunk *chunk;
604
605    chunk = pcpu_mem_alloc(pcpu_chunk_struct_size);
606    if (!chunk)
607        return NULL;
608
609    chunk->map = pcpu_mem_alloc(PCPU_DFL_MAP_ALLOC * sizeof(chunk->map[0]));
610    if (!chunk->map) {
611        kfree(chunk);
612        return NULL;
613    }
614
615    chunk->map_alloc = PCPU_DFL_MAP_ALLOC;
616    chunk->map[chunk->map_used++] = pcpu_unit_size;
617
618    INIT_LIST_HEAD(&chunk->list);
619    chunk->free_size = pcpu_unit_size;
620    chunk->contig_hint = pcpu_unit_size;
621
622    return chunk;
623}
624
625static void pcpu_free_chunk(struct pcpu_chunk *chunk)
626{
627    if (!chunk)
628        return;
629    pcpu_mem_free(chunk->map, chunk->map_alloc * sizeof(chunk->map[0]));
630    kfree(chunk);
631}
632
633/*
634 * Chunk management implementation.
635 *
636 * To allow different implementations, chunk alloc/free and
637 * [de]population are implemented in a separate file which is pulled
638 * into this file and compiled together. The following functions
639 * should be implemented.
640 *
641 * pcpu_populate_chunk - populate the specified range of a chunk
642 * pcpu_depopulate_chunk - depopulate the specified range of a chunk
643 * pcpu_create_chunk - create a new chunk
644 * pcpu_destroy_chunk - destroy a chunk, always preceded by full depop
645 * pcpu_addr_to_page - translate address to physical address
646 * pcpu_verify_alloc_info - check alloc_info is acceptable during init
647 */
648static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size);
649static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size);
650static struct pcpu_chunk *pcpu_create_chunk(void);
651static void pcpu_destroy_chunk(struct pcpu_chunk *chunk);
652static struct page *pcpu_addr_to_page(void *addr);
653static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai);
654
655#ifdef CONFIG_NEED_PER_CPU_KM
656#include "percpu-km.c"
657#else
658#include "percpu-vm.c"
659#endif
660
661/**
662 * pcpu_chunk_addr_search - determine chunk containing specified address
663 * @addr: address for which the chunk needs to be determined.
664 *
665 * RETURNS:
666 * The address of the found chunk.
667 */
668static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
669{
670    /* is it in the first chunk? */
671    if (pcpu_addr_in_first_chunk(addr)) {
672        /* is it in the reserved area? */
673        if (pcpu_addr_in_reserved_chunk(addr))
674            return pcpu_reserved_chunk;
675        return pcpu_first_chunk;
676    }
677
678    /*
679     * The address is relative to unit0 which might be unused and
680     * thus unmapped. Offset the address to the unit space of the
681     * current processor before looking it up in the vmalloc
682     * space. Note that any possible cpu id can be used here, so
683     * there's no need to worry about preemption or cpu hotplug.
684     */
685    addr += pcpu_unit_offsets[raw_smp_processor_id()];
686    return pcpu_get_page_chunk(pcpu_addr_to_page(addr));
687}
688
689/**
690 * pcpu_alloc - the percpu allocator
691 * @size: size of area to allocate in bytes
692 * @align: alignment of area (max PAGE_SIZE)
693 * @reserved: allocate from the reserved chunk if available
694 *
695 * Allocate percpu area of @size bytes aligned at @align.
696 *
697 * CONTEXT:
698 * Does GFP_KERNEL allocation.
699 *
700 * RETURNS:
701 * Percpu pointer to the allocated area on success, NULL on failure.
702 */
703static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved)
704{
705    static int warn_limit = 10;
706    struct pcpu_chunk *chunk;
707    const char *err;
708    int slot, off, new_alloc;
709    unsigned long flags;
710
711    if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) {
712        WARN(true, "illegal size (%zu) or align (%zu) for "
713             "percpu allocation\n", size, align);
714        return NULL;
715    }
716
717    mutex_lock(&pcpu_alloc_mutex);
718    spin_lock_irqsave(&pcpu_lock, flags);
719
720    /* serve reserved allocations from the reserved chunk if available */
721    if (reserved && pcpu_reserved_chunk) {
722        chunk = pcpu_reserved_chunk;
723
724        if (size > chunk->contig_hint) {
725            err = "alloc from reserved chunk failed";
726            goto fail_unlock;
727        }
728
729        while ((new_alloc = pcpu_need_to_extend(chunk))) {
730            spin_unlock_irqrestore(&pcpu_lock, flags);
731            if (pcpu_extend_area_map(chunk, new_alloc) < 0) {
732                err = "failed to extend area map of reserved chunk";
733                goto fail_unlock_mutex;
734            }
735            spin_lock_irqsave(&pcpu_lock, flags);
736        }
737
738        off = pcpu_alloc_area(chunk, size, align);
739        if (off >= 0)
740            goto area_found;
741
742        err = "alloc from reserved chunk failed";
743        goto fail_unlock;
744    }
745
746restart:
747    /* search through normal chunks */
748    for (slot = pcpu_size_to_slot(size); slot < pcpu_nr_slots; slot++) {
749        list_for_each_entry(chunk, &pcpu_slot[slot], list) {
750            if (size > chunk->contig_hint)
751                continue;
752
753            new_alloc = pcpu_need_to_extend(chunk);
754            if (new_alloc) {
755                spin_unlock_irqrestore(&pcpu_lock, flags);
756                if (pcpu_extend_area_map(chunk,
757                             new_alloc) < 0) {
758                    err = "failed to extend area map";
759                    goto fail_unlock_mutex;
760                }
761                spin_lock_irqsave(&pcpu_lock, flags);
762                /*
763                 * pcpu_lock has been dropped, need to
764                 * restart cpu_slot list walking.
765                 */
766                goto restart;
767            }
768
769            off = pcpu_alloc_area(chunk, size, align);
770            if (off >= 0)
771                goto area_found;
772        }
773    }
774
775    /* hmmm... no space left, create a new chunk */
776    spin_unlock_irqrestore(&pcpu_lock, flags);
777
778    chunk = pcpu_create_chunk();
779    if (!chunk) {
780        err = "failed to allocate new chunk";
781        goto fail_unlock_mutex;
782    }
783
784    spin_lock_irqsave(&pcpu_lock, flags);
785    pcpu_chunk_relocate(chunk, -1);
786    goto restart;
787
788area_found:
789    spin_unlock_irqrestore(&pcpu_lock, flags);
790
791    /* populate, map and clear the area */
792    if (pcpu_populate_chunk(chunk, off, size)) {
793        spin_lock_irqsave(&pcpu_lock, flags);
794        pcpu_free_area(chunk, off);
795        err = "failed to populate";
796        goto fail_unlock;
797    }
798
799    mutex_unlock(&pcpu_alloc_mutex);
800
801    /* return address relative to base address */
802    return __addr_to_pcpu_ptr(chunk->base_addr + off);
803
804fail_unlock:
805    spin_unlock_irqrestore(&pcpu_lock, flags);
806fail_unlock_mutex:
807    mutex_unlock(&pcpu_alloc_mutex);
808    if (warn_limit) {
809        pr_warning("PERCPU: allocation failed, size=%zu align=%zu, "
810               "%s\n", size, align, err);
811        dump_stack();
812        if (!--warn_limit)
813            pr_info("PERCPU: limit reached, disable warning\n");
814    }
815    return NULL;
816}
817
818/**
819 * __alloc_percpu - allocate dynamic percpu area
820 * @size: size of area to allocate in bytes
821 * @align: alignment of area (max PAGE_SIZE)
822 *
823 * Allocate percpu area of @size bytes aligned at @align. Might
824 * sleep. Might trigger writeouts.
825 *
826 * CONTEXT:
827 * Does GFP_KERNEL allocation.
828 *
829 * RETURNS:
830 * Percpu pointer to the allocated area on success, NULL on failure.
831 */
832void __percpu *__alloc_percpu(size_t size, size_t align)
833{
834    return pcpu_alloc(size, align, false);
835}
836EXPORT_SYMBOL_GPL(__alloc_percpu);
837
838/**
839 * __alloc_reserved_percpu - allocate reserved percpu area
840 * @size: size of area to allocate in bytes
841 * @align: alignment of area (max PAGE_SIZE)
842 *
843 * Allocate percpu area of @size bytes aligned at @align from reserved
844 * percpu area if arch has set it up; otherwise, allocation is served
845 * from the same dynamic area. Might sleep. Might trigger writeouts.
846 *
847 * CONTEXT:
848 * Does GFP_KERNEL allocation.
849 *
850 * RETURNS:
851 * Percpu pointer to the allocated area on success, NULL on failure.
852 */
853void __percpu *__alloc_reserved_percpu(size_t size, size_t align)
854{
855    return pcpu_alloc(size, align, true);
856}
857
858/**
859 * pcpu_reclaim - reclaim fully free chunks, workqueue function
860 * @work: unused
861 *
862 * Reclaim all fully free chunks except for the first one.
863 *
864 * CONTEXT:
865 * workqueue context.
866 */
867static void pcpu_reclaim(struct work_struct *work)
868{
869    LIST_HEAD(todo);
870    struct list_head *head = &pcpu_slot[pcpu_nr_slots - 1];
871    struct pcpu_chunk *chunk, *next;
872
873    mutex_lock(&pcpu_alloc_mutex);
874    spin_lock_irq(&pcpu_lock);
875
876    list_for_each_entry_safe(chunk, next, head, list) {
877        WARN_ON(chunk->immutable);
878
879        /* spare the first one */
880        if (chunk == list_first_entry(head, struct pcpu_chunk, list))
881            continue;
882
883        list_move(&chunk->list, &todo);
884    }
885
886    spin_unlock_irq(&pcpu_lock);
887
888    list_for_each_entry_safe(chunk, next, &todo, list) {
889        pcpu_depopulate_chunk(chunk, 0, pcpu_unit_size);
890        pcpu_destroy_chunk(chunk);
891    }
892
893    mutex_unlock(&pcpu_alloc_mutex);
894}
895
896/**
897 * free_percpu - free percpu area
898 * @ptr: pointer to area to free
899 *
900 * Free percpu area @ptr.
901 *
902 * CONTEXT:
903 * Can be called from atomic context.
904 */
905void free_percpu(void __percpu *ptr)
906{
907    void *addr;
908    struct pcpu_chunk *chunk;
909    unsigned long flags;
910    int off;
911
912    if (!ptr)
913        return;
914
915    addr = __pcpu_ptr_to_addr(ptr);
916
917    spin_lock_irqsave(&pcpu_lock, flags);
918
919    chunk = pcpu_chunk_addr_search(addr);
920    off = addr - chunk->base_addr;
921
922    pcpu_free_area(chunk, off);
923
924    /* if there are more than one fully free chunks, wake up grim reaper */
925    if (chunk->free_size == pcpu_unit_size) {
926        struct pcpu_chunk *pos;
927
928        list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list)
929            if (pos != chunk) {
930                schedule_work(&pcpu_reclaim_work);
931                break;
932            }
933    }
934
935    spin_unlock_irqrestore(&pcpu_lock, flags);
936}
937EXPORT_SYMBOL_GPL(free_percpu);
938
939/**
940 * is_kernel_percpu_address - test whether address is from static percpu area
941 * @addr: address to test
942 *
943 * Test whether @addr belongs to in-kernel static percpu area. Module
944 * static percpu areas are not considered. For those, use
945 * is_module_percpu_address().
946 *
947 * RETURNS:
948 * %true if @addr is from in-kernel static percpu area, %false otherwise.
949 */
950bool is_kernel_percpu_address(unsigned long addr)
951{
952    const size_t static_size = __per_cpu_end - __per_cpu_start;
953    void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
954    unsigned int cpu;
955
956    for_each_possible_cpu(cpu) {
957        void *start = per_cpu_ptr(base, cpu);
958
959        if ((void *)addr >= start && (void *)addr < start + static_size)
960            return true;
961        }
962    return false;
963}
964
965/**
966 * per_cpu_ptr_to_phys - convert translated percpu address to physical address
967 * @addr: the address to be converted to physical address
968 *
969 * Given @addr which is dereferenceable address obtained via one of
970 * percpu access macros, this function translates it into its physical
971 * address. The caller is responsible for ensuring @addr stays valid
972 * until this function finishes.
973 *
974 * RETURNS:
975 * The physical address for @addr.
976 */
977phys_addr_t per_cpu_ptr_to_phys(void *addr)
978{
979    void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
980    bool in_first_chunk = false;
981    unsigned long first_start, first_end;
982    unsigned int cpu;
983
984    /*
985     * The following test on first_start/end isn't strictly
986     * necessary but will speed up lookups of addresses which
987     * aren't in the first chunk.
988     */
989    first_start = pcpu_chunk_addr(pcpu_first_chunk, pcpu_first_unit_cpu, 0);
990    first_end = pcpu_chunk_addr(pcpu_first_chunk, pcpu_last_unit_cpu,
991                    pcpu_unit_pages);
992    if ((unsigned long)addr >= first_start &&
993        (unsigned long)addr < first_end) {
994        for_each_possible_cpu(cpu) {
995            void *start = per_cpu_ptr(base, cpu);
996
997            if (addr >= start && addr < start + pcpu_unit_size) {
998                in_first_chunk = true;
999                break;
1000            }
1001        }
1002    }
1003
1004    if (in_first_chunk) {
1005        if ((unsigned long)addr < VMALLOC_START ||
1006            (unsigned long)addr >= VMALLOC_END)
1007            return __pa(addr);
1008        else
1009            return page_to_phys(vmalloc_to_page(addr));
1010    } else
1011        return page_to_phys(pcpu_addr_to_page(addr));
1012}
1013
1014/**
1015 * pcpu_alloc_alloc_info - allocate percpu allocation info
1016 * @nr_groups: the number of groups
1017 * @nr_units: the number of units
1018 *
1019 * Allocate ai which is large enough for @nr_groups groups containing
1020 * @nr_units units. The returned ai's groups[0].cpu_map points to the
1021 * cpu_map array which is long enough for @nr_units and filled with
1022 * NR_CPUS. It's the caller's responsibility to initialize cpu_map
1023 * pointer of other groups.
1024 *
1025 * RETURNS:
1026 * Pointer to the allocated pcpu_alloc_info on success, NULL on
1027 * failure.
1028 */
1029struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
1030                              int nr_units)
1031{
1032    struct pcpu_alloc_info *ai;
1033    size_t base_size, ai_size;
1034    void *ptr;
1035    int unit;
1036
1037    base_size = ALIGN(sizeof(*ai) + nr_groups * sizeof(ai->groups[0]),
1038              __alignof__(ai->groups[0].cpu_map[0]));
1039    ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]);
1040
1041    ptr = alloc_bootmem_nopanic(PFN_ALIGN(ai_size));
1042    if (!ptr)
1043        return NULL;
1044    ai = ptr;
1045    ptr += base_size;
1046
1047    ai->groups[0].cpu_map = ptr;
1048
1049    for (unit = 0; unit < nr_units; unit++)
1050        ai->groups[0].cpu_map[unit] = NR_CPUS;
1051
1052    ai->nr_groups = nr_groups;
1053    ai->__ai_size = PFN_ALIGN(ai_size);
1054
1055    return ai;
1056}
1057
1058/**
1059 * pcpu_free_alloc_info - free percpu allocation info
1060 * @ai: pcpu_alloc_info to free
1061 *
1062 * Free @ai which was allocated by pcpu_alloc_alloc_info().
1063 */
1064void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai)
1065{
1066    free_bootmem(__pa(ai), ai->__ai_size);
1067}
1068
1069/**
1070 * pcpu_build_alloc_info - build alloc_info considering distances between CPUs
1071 * @reserved_size: the size of reserved percpu area in bytes
1072 * @dyn_size: minimum free size for dynamic allocation in bytes
1073 * @atom_size: allocation atom size
1074 * @cpu_distance_fn: callback to determine distance between cpus, optional
1075 *
1076 * This function determines grouping of units, their mappings to cpus
1077 * and other parameters considering needed percpu size, allocation
1078 * atom size and distances between CPUs.
1079 *
1080 * Groups are always mutliples of atom size and CPUs which are of
1081 * LOCAL_DISTANCE both ways are grouped together and share space for
1082 * units in the same group. The returned configuration is guaranteed
1083 * to have CPUs on different nodes on different groups and >=75% usage
1084 * of allocated virtual address space.
1085 *
1086 * RETURNS:
1087 * On success, pointer to the new allocation_info is returned. On
1088 * failure, ERR_PTR value is returned.
1089 */
1090static struct pcpu_alloc_info * __init pcpu_build_alloc_info(
1091                size_t reserved_size, size_t dyn_size,
1092                size_t atom_size,
1093                pcpu_fc_cpu_distance_fn_t cpu_distance_fn)
1094{
1095    static int group_map[NR_CPUS] __initdata;
1096    static int group_cnt[NR_CPUS] __initdata;
1097    const size_t static_size = __per_cpu_end - __per_cpu_start;
1098    int nr_groups = 1, nr_units = 0;
1099    size_t size_sum, min_unit_size, alloc_size;
1100    int upa, max_upa, uninitialized_var(best_upa); /* units_per_alloc */
1101    int last_allocs, group, unit;
1102    unsigned int cpu, tcpu;
1103    struct pcpu_alloc_info *ai;
1104    unsigned int *cpu_map;
1105
1106    /* this function may be called multiple times */
1107    memset(group_map, 0, sizeof(group_map));
1108    memset(group_cnt, 0, sizeof(group_cnt));
1109
1110    /* calculate size_sum and ensure dyn_size is enough for early alloc */
1111    size_sum = PFN_ALIGN(static_size + reserved_size +
1112                max_t(size_t, dyn_size, PERCPU_DYNAMIC_EARLY_SIZE));
1113    dyn_size = size_sum - static_size - reserved_size;
1114
1115    /*
1116     * Determine min_unit_size, alloc_size and max_upa such that
1117     * alloc_size is multiple of atom_size and is the smallest
1118     * which can accomodate 4k aligned segments which are equal to
1119     * or larger than min_unit_size.
1120     */
1121    min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE);
1122
1123    alloc_size = roundup(min_unit_size, atom_size);
1124    upa = alloc_size / min_unit_size;
1125    while (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK))
1126        upa--;
1127    max_upa = upa;
1128
1129    /* group cpus according to their proximity */
1130    for_each_possible_cpu(cpu) {
1131        group = 0;
1132    next_group:
1133        for_each_possible_cpu(tcpu) {
1134            if (cpu == tcpu)
1135                break;
1136            if (group_map[tcpu] == group && cpu_distance_fn &&
1137                (cpu_distance_fn(cpu, tcpu) > LOCAL_DISTANCE ||
1138                 cpu_distance_fn(tcpu, cpu) > LOCAL_DISTANCE)) {
1139                group++;
1140                nr_groups = max(nr_groups, group + 1);
1141                goto next_group;
1142            }
1143        }
1144        group_map[cpu] = group;
1145        group_cnt[group]++;
1146    }
1147
1148    /*
1149     * Expand unit size until address space usage goes over 75%
1150     * and then as much as possible without using more address
1151     * space.
1152     */
1153    last_allocs = INT_MAX;
1154    for (upa = max_upa; upa; upa--) {
1155        int allocs = 0, wasted = 0;
1156
1157        if (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK))
1158            continue;
1159
1160        for (group = 0; group < nr_groups; group++) {
1161            int this_allocs = DIV_ROUND_UP(group_cnt[group], upa);
1162            allocs += this_allocs;
1163            wasted += this_allocs * upa - group_cnt[group];
1164        }
1165
1166        /*
1167         * Don't accept if wastage is over 1/3. The
1168         * greater-than comparison ensures upa==1 always
1169         * passes the following check.
1170         */
1171        if (wasted > num_possible_cpus() / 3)
1172            continue;
1173
1174        /* and then don't consume more memory */
1175        if (allocs > last_allocs)
1176            break;
1177        last_allocs = allocs;
1178        best_upa = upa;
1179    }
1180    upa = best_upa;
1181
1182    /* allocate and fill alloc_info */
1183    for (group = 0; group < nr_groups; group++)
1184        nr_units += roundup(group_cnt[group], upa);
1185
1186    ai = pcpu_alloc_alloc_info(nr_groups, nr_units);
1187    if (!ai)
1188        return ERR_PTR(-ENOMEM);
1189    cpu_map = ai->groups[0].cpu_map;
1190
1191    for (group = 0; group < nr_groups; group++) {
1192        ai->groups[group].cpu_map = cpu_map;
1193        cpu_map += roundup(group_cnt[group], upa);
1194    }
1195
1196    ai->static_size = static_size;
1197    ai->reserved_size = reserved_size;
1198    ai->dyn_size = dyn_size;
1199    ai->unit_size = alloc_size / upa;
1200    ai->atom_size = atom_size;
1201    ai->alloc_size = alloc_size;
1202
1203    for (group = 0, unit = 0; group_cnt[group]; group++) {
1204        struct pcpu_group_info *gi = &ai->groups[group];
1205
1206        /*
1207         * Initialize base_offset as if all groups are located
1208         * back-to-back. The caller should update this to
1209         * reflect actual allocation.
1210         */
1211        gi->base_offset = unit * ai->unit_size;
1212
1213        for_each_possible_cpu(cpu)
1214            if (group_map[cpu] == group)
1215                gi->cpu_map[gi->nr_units++] = cpu;
1216        gi->nr_units = roundup(gi->nr_units, upa);
1217        unit += gi->nr_units;
1218    }
1219    BUG_ON(unit != nr_units);
1220
1221    return ai;
1222}
1223
1224/**
1225 * pcpu_dump_alloc_info - print out information about pcpu_alloc_info
1226 * @lvl: loglevel
1227 * @ai: allocation info to dump
1228 *
1229 * Print out information about @ai using loglevel @lvl.
1230 */
1231static void pcpu_dump_alloc_info(const char *lvl,
1232                 const struct pcpu_alloc_info *ai)
1233{
1234    int group_width = 1, cpu_width = 1, width;
1235    char empty_str[] = "--------";
1236    int alloc = 0, alloc_end = 0;
1237    int group, v;
1238    int upa, apl; /* units per alloc, allocs per line */
1239
1240    v = ai->nr_groups;
1241    while (v /= 10)
1242        group_width++;
1243
1244    v = num_possible_cpus();
1245    while (v /= 10)
1246        cpu_width++;
1247    empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0';
1248
1249    upa = ai->alloc_size / ai->unit_size;
1250    width = upa * (cpu_width + 1) + group_width + 3;
1251    apl = rounddown_pow_of_two(max(60 / width, 1));
1252
1253    printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu",
1254           lvl, ai->static_size, ai->reserved_size, ai->dyn_size,
1255           ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size);
1256
1257    for (group = 0; group < ai->nr_groups; group++) {
1258        const struct pcpu_group_info *gi = &ai->groups[group];
1259        int unit = 0, unit_end = 0;
1260
1261        BUG_ON(gi->nr_units % upa);
1262        for (alloc_end += gi->nr_units / upa;
1263             alloc < alloc_end; alloc++) {
1264            if (!(alloc % apl)) {
1265                printk("\n");
1266                printk("%spcpu-alloc: ", lvl);
1267            }
1268            printk("[%0*d] ", group_width, group);
1269
1270            for (unit_end += upa; unit < unit_end; unit++)
1271                if (gi->cpu_map[unit] != NR_CPUS)
1272                    printk("%0*d ", cpu_width,
1273                           gi->cpu_map[unit]);
1274                else
1275                    printk("%s ", empty_str);
1276        }
1277    }
1278    printk("\n");
1279}
1280
1281/**
1282 * pcpu_setup_first_chunk - initialize the first percpu chunk
1283 * @ai: pcpu_alloc_info describing how to percpu area is shaped
1284 * @base_addr: mapped address
1285 *
1286 * Initialize the first percpu chunk which contains the kernel static
1287 * perpcu area. This function is to be called from arch percpu area
1288 * setup path.
1289 *
1290 * @ai contains all information necessary to initialize the first
1291 * chunk and prime the dynamic percpu allocator.
1292 *
1293 * @ai->static_size is the size of static percpu area.
1294 *
1295 * @ai->reserved_size, if non-zero, specifies the amount of bytes to
1296 * reserve after the static area in the first chunk. This reserves
1297 * the first chunk such that it's available only through reserved
1298 * percpu allocation. This is primarily used to serve module percpu
1299 * static areas on architectures where the addressing model has
1300 * limited offset range for symbol relocations to guarantee module
1301 * percpu symbols fall inside the relocatable range.
1302 *
1303 * @ai->dyn_size determines the number of bytes available for dynamic
1304 * allocation in the first chunk. The area between @ai->static_size +
1305 * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused.
1306 *
1307 * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE
1308 * and equal to or larger than @ai->static_size + @ai->reserved_size +
1309 * @ai->dyn_size.
1310 *
1311 * @ai->atom_size is the allocation atom size and used as alignment
1312 * for vm areas.
1313 *
1314 * @ai->alloc_size is the allocation size and always multiple of
1315 * @ai->atom_size. This is larger than @ai->atom_size if
1316 * @ai->unit_size is larger than @ai->atom_size.
1317 *
1318 * @ai->nr_groups and @ai->groups describe virtual memory layout of
1319 * percpu areas. Units which should be colocated are put into the
1320 * same group. Dynamic VM areas will be allocated according to these
1321 * groupings. If @ai->nr_groups is zero, a single group containing
1322 * all units is assumed.
1323 *
1324 * The caller should have mapped the first chunk at @base_addr and
1325 * copied static data to each unit.
1326 *
1327 * If the first chunk ends up with both reserved and dynamic areas, it
1328 * is served by two chunks - one to serve the core static and reserved
1329 * areas and the other for the dynamic area. They share the same vm
1330 * and page map but uses different area allocation map to stay away
1331 * from each other. The latter chunk is circulated in the chunk slots
1332 * and available for dynamic allocation like any other chunks.
1333 *
1334 * RETURNS:
1335 * 0 on success, -errno on failure.
1336 */
1337int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
1338                  void *base_addr)
1339{
1340    static char cpus_buf[4096] __initdata;
1341    static int smap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata;
1342    static int dmap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata;
1343    size_t dyn_size = ai->dyn_size;
1344    size_t size_sum = ai->static_size + ai->reserved_size + dyn_size;
1345    struct pcpu_chunk *schunk, *dchunk = NULL;
1346    unsigned long *group_offsets;
1347    size_t *group_sizes;
1348    unsigned long *unit_off;
1349    unsigned int cpu;
1350    int *unit_map;
1351    int group, unit, i;
1352
1353    cpumask_scnprintf(cpus_buf, sizeof(cpus_buf), cpu_possible_mask);
1354
1355#define PCPU_SETUP_BUG_ON(cond) do { \
1356    if (unlikely(cond)) { \
1357        pr_emerg("PERCPU: failed to initialize, %s", #cond); \
1358        pr_emerg("PERCPU: cpu_possible_mask=%s\n", cpus_buf); \
1359        pcpu_dump_alloc_info(KERN_EMERG, ai); \
1360        BUG(); \
1361    } \
1362} while (0)
1363
1364    /* sanity checks */
1365    PCPU_SETUP_BUG_ON(ai->nr_groups <= 0);
1366    PCPU_SETUP_BUG_ON(!ai->static_size);
1367    PCPU_SETUP_BUG_ON(!base_addr);
1368    PCPU_SETUP_BUG_ON(ai->unit_size < size_sum);
1369    PCPU_SETUP_BUG_ON(ai->unit_size & ~PAGE_MASK);
1370    PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE);
1371    PCPU_SETUP_BUG_ON(ai->dyn_size < PERCPU_DYNAMIC_EARLY_SIZE);
1372    PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0);
1373
1374    /* process group information and build config tables accordingly */
1375    group_offsets = alloc_bootmem(ai->nr_groups * sizeof(group_offsets[0]));
1376    group_sizes = alloc_bootmem(ai->nr_groups * sizeof(group_sizes[0]));
1377    unit_map = alloc_bootmem(nr_cpu_ids * sizeof(unit_map[0]));
1378    unit_off = alloc_bootmem(nr_cpu_ids * sizeof(unit_off[0]));
1379
1380    for (cpu = 0; cpu < nr_cpu_ids; cpu++)
1381        unit_map[cpu] = UINT_MAX;
1382    pcpu_first_unit_cpu = NR_CPUS;
1383
1384    for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) {
1385        const struct pcpu_group_info *gi = &ai->groups[group];
1386
1387        group_offsets[group] = gi->base_offset;
1388        group_sizes[group] = gi->nr_units * ai->unit_size;
1389
1390        for (i = 0; i < gi->nr_units; i++) {
1391            cpu = gi->cpu_map[i];
1392            if (cpu == NR_CPUS)
1393                continue;
1394
1395            PCPU_SETUP_BUG_ON(cpu > nr_cpu_ids);
1396            PCPU_SETUP_BUG_ON(!cpu_possible(cpu));
1397            PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX);
1398
1399            unit_map[cpu] = unit + i;
1400            unit_off[cpu] = gi->base_offset + i * ai->unit_size;
1401
1402            if (pcpu_first_unit_cpu == NR_CPUS)
1403                pcpu_first_unit_cpu = cpu;
1404            pcpu_last_unit_cpu = cpu;
1405        }
1406    }
1407    pcpu_nr_units = unit;
1408
1409    for_each_possible_cpu(cpu)
1410        PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX);
1411
1412    /* we're done parsing the input, undefine BUG macro and dump config */
1413#undef PCPU_SETUP_BUG_ON
1414    pcpu_dump_alloc_info(KERN_INFO, ai);
1415
1416    pcpu_nr_groups = ai->nr_groups;
1417    pcpu_group_offsets = group_offsets;
1418    pcpu_group_sizes = group_sizes;
1419    pcpu_unit_map = unit_map;
1420    pcpu_unit_offsets = unit_off;
1421
1422    /* determine basic parameters */
1423    pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT;
1424    pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT;
1425    pcpu_atom_size = ai->atom_size;
1426    pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) +
1427        BITS_TO_LONGS(pcpu_unit_pages) * sizeof(unsigned long);
1428
1429    /*
1430     * Allocate chunk slots. The additional last slot is for
1431     * empty chunks.
1432     */
1433    pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2;
1434    pcpu_slot = alloc_bootmem(pcpu_nr_slots * sizeof(pcpu_slot[0]));
1435    for (i = 0; i < pcpu_nr_slots; i++)
1436        INIT_LIST_HEAD(&pcpu_slot[i]);
1437
1438    /*
1439     * Initialize static chunk. If reserved_size is zero, the
1440     * static chunk covers static area + dynamic allocation area
1441     * in the first chunk. If reserved_size is not zero, it
1442     * covers static area + reserved area (mostly used for module
1443     * static percpu allocation).
1444     */
1445    schunk = alloc_bootmem(pcpu_chunk_struct_size);
1446    INIT_LIST_HEAD(&schunk->list);
1447    schunk->base_addr = base_addr;
1448    schunk->map = smap;
1449    schunk->map_alloc = ARRAY_SIZE(smap);
1450    schunk->immutable = true;
1451    bitmap_fill(schunk->populated, pcpu_unit_pages);
1452
1453    if (ai->reserved_size) {
1454        schunk->free_size = ai->reserved_size;
1455        pcpu_reserved_chunk = schunk;
1456        pcpu_reserved_chunk_limit = ai->static_size + ai->reserved_size;
1457    } else {
1458        schunk->free_size = dyn_size;
1459        dyn_size = 0; /* dynamic area covered */
1460    }
1461    schunk->contig_hint = schunk->free_size;
1462
1463    schunk->map[schunk->map_used++] = -ai->static_size;
1464    if (schunk->free_size)
1465        schunk->map[schunk->map_used++] = schunk->free_size;
1466
1467    /* init dynamic chunk if necessary */
1468    if (dyn_size) {
1469        dchunk = alloc_bootmem(pcpu_chunk_struct_size);
1470        INIT_LIST_HEAD(&dchunk->list);
1471        dchunk->base_addr = base_addr;
1472        dchunk->map = dmap;
1473        dchunk->map_alloc = ARRAY_SIZE(dmap);
1474        dchunk->immutable = true;
1475        bitmap_fill(dchunk->populated, pcpu_unit_pages);
1476
1477        dchunk->contig_hint = dchunk->free_size = dyn_size;
1478        dchunk->map[dchunk->map_used++] = -pcpu_reserved_chunk_limit;
1479        dchunk->map[dchunk->map_used++] = dchunk->free_size;
1480    }
1481
1482    /* link the first chunk in */
1483    pcpu_first_chunk = dchunk ?: schunk;
1484    pcpu_chunk_relocate(pcpu_first_chunk, -1);
1485
1486    /* we're done */
1487    pcpu_base_addr = base_addr;
1488    return 0;
1489}
1490
1491const char *pcpu_fc_names[PCPU_FC_NR] __initdata = {
1492    [PCPU_FC_AUTO] = "auto",
1493    [PCPU_FC_EMBED] = "embed",
1494    [PCPU_FC_PAGE] = "page",
1495};
1496
1497enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO;
1498
1499static int __init percpu_alloc_setup(char *str)
1500{
1501    if (0)
1502        /* nada */;
1503#ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
1504    else if (!strcmp(str, "embed"))
1505        pcpu_chosen_fc = PCPU_FC_EMBED;
1506#endif
1507#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
1508    else if (!strcmp(str, "page"))
1509        pcpu_chosen_fc = PCPU_FC_PAGE;
1510#endif
1511    else
1512        pr_warning("PERCPU: unknown allocator %s specified\n", str);
1513
1514    return 0;
1515}
1516early_param("percpu_alloc", percpu_alloc_setup);
1517
1518#if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \
1519    !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
1520/**
1521 * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem
1522 * @reserved_size: the size of reserved percpu area in bytes
1523 * @dyn_size: minimum free size for dynamic allocation in bytes
1524 * @atom_size: allocation atom size
1525 * @cpu_distance_fn: callback to determine distance between cpus, optional
1526 * @alloc_fn: function to allocate percpu page
1527 * @free_fn: funtion to free percpu page
1528 *
1529 * This is a helper to ease setting up embedded first percpu chunk and
1530 * can be called where pcpu_setup_first_chunk() is expected.
1531 *
1532 * If this function is used to setup the first chunk, it is allocated
1533 * by calling @alloc_fn and used as-is without being mapped into
1534 * vmalloc area. Allocations are always whole multiples of @atom_size
1535 * aligned to @atom_size.
1536 *
1537 * This enables the first chunk to piggy back on the linear physical
1538 * mapping which often uses larger page size. Please note that this
1539 * can result in very sparse cpu->unit mapping on NUMA machines thus
1540 * requiring large vmalloc address space. Don't use this allocator if
1541 * vmalloc space is not orders of magnitude larger than distances
1542 * between node memory addresses (ie. 32bit NUMA machines).
1543 *
1544 * @dyn_size specifies the minimum dynamic area size.
1545 *
1546 * If the needed size is smaller than the minimum or specified unit
1547 * size, the leftover is returned using @free_fn.
1548 *
1549 * RETURNS:
1550 * 0 on success, -errno on failure.
1551 */
1552int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
1553                  size_t atom_size,
1554                  pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
1555                  pcpu_fc_alloc_fn_t alloc_fn,
1556                  pcpu_fc_free_fn_t free_fn)
1557{
1558    void *base = (void *)ULONG_MAX;
1559    void **areas = NULL;
1560    struct pcpu_alloc_info *ai;
1561    size_t size_sum, areas_size, max_distance;
1562    int group, i, rc;
1563
1564    ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size,
1565                   cpu_distance_fn);
1566    if (IS_ERR(ai))
1567        return PTR_ERR(ai);
1568
1569    size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
1570    areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *));
1571
1572    areas = alloc_bootmem_nopanic(areas_size);
1573    if (!areas) {
1574        rc = -ENOMEM;
1575        goto out_free;
1576    }
1577
1578    /* allocate, copy and determine base address */
1579    for (group = 0; group < ai->nr_groups; group++) {
1580        struct pcpu_group_info *gi = &ai->groups[group];
1581        unsigned int cpu = NR_CPUS;
1582        void *ptr;
1583
1584        for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++)
1585            cpu = gi->cpu_map[i];
1586        BUG_ON(cpu == NR_CPUS);
1587
1588        /* allocate space for the whole group */
1589        ptr = alloc_fn(cpu, gi->nr_units * ai->unit_size, atom_size);
1590        if (!ptr) {
1591            rc = -ENOMEM;
1592            goto out_free_areas;
1593        }
1594        areas[group] = ptr;
1595
1596        base = min(ptr, base);
1597
1598        for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) {
1599            if (gi->cpu_map[i] == NR_CPUS) {
1600                /* unused unit, free whole */
1601                free_fn(ptr, ai->unit_size);
1602                continue;
1603            }
1604            /* copy and return the unused part */
1605            memcpy(ptr, __per_cpu_load, ai->static_size);
1606            free_fn(ptr + size_sum, ai->unit_size - size_sum);
1607        }
1608    }
1609
1610    /* base address is now known, determine group base offsets */
1611    max_distance = 0;
1612    for (group = 0; group < ai->nr_groups; group++) {
1613        ai->groups[group].base_offset = areas[group] - base;
1614        max_distance = max_t(size_t, max_distance,
1615                     ai->groups[group].base_offset);
1616    }
1617    max_distance += ai->unit_size;
1618
1619    /* warn if maximum distance is further than 75% of vmalloc space */
1620    if (max_distance > (VMALLOC_END - VMALLOC_START) * 3 / 4) {
1621        pr_warning("PERCPU: max_distance=0x%zx too large for vmalloc "
1622               "space 0x%lx\n",
1623               max_distance, VMALLOC_END - VMALLOC_START);
1624#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
1625        /* and fail if we have fallback */
1626        rc = -EINVAL;
1627        goto out_free;
1628#endif
1629    }
1630
1631    pr_info("PERCPU: Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n",
1632        PFN_DOWN(size_sum), base, ai->static_size, ai->reserved_size,
1633        ai->dyn_size, ai->unit_size);
1634
1635    rc = pcpu_setup_first_chunk(ai, base);
1636    goto out_free;
1637
1638out_free_areas:
1639    for (group = 0; group < ai->nr_groups; group++)
1640        free_fn(areas[group],
1641            ai->groups[group].nr_units * ai->unit_size);
1642out_free:
1643    pcpu_free_alloc_info(ai);
1644    if (areas)
1645        free_bootmem(__pa(areas), areas_size);
1646    return rc;
1647}
1648#endif /* CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK ||
1649      !CONFIG_HAVE_SETUP_PER_CPU_AREA */
1650
1651#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
1652/**
1653 * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages
1654 * @reserved_size: the size of reserved percpu area in bytes
1655 * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE
1656 * @free_fn: funtion to free percpu page, always called with PAGE_SIZE
1657 * @populate_pte_fn: function to populate pte
1658 *
1659 * This is a helper to ease setting up page-remapped first percpu
1660 * chunk and can be called where pcpu_setup_first_chunk() is expected.
1661 *
1662 * This is the basic allocator. Static percpu area is allocated
1663 * page-by-page into vmalloc area.
1664 *
1665 * RETURNS:
1666 * 0 on success, -errno on failure.
1667 */
1668int __init pcpu_page_first_chunk(size_t reserved_size,
1669                 pcpu_fc_alloc_fn_t alloc_fn,
1670                 pcpu_fc_free_fn_t free_fn,
1671                 pcpu_fc_populate_pte_fn_t populate_pte_fn)
1672{
1673    static struct vm_struct vm;
1674    struct pcpu_alloc_info *ai;
1675    char psize_str[16];
1676    int unit_pages;
1677    size_t pages_size;
1678    struct page **pages;
1679    int unit, i, j, rc;
1680
1681    snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10);
1682
1683    ai = pcpu_build_alloc_info(reserved_size, 0, PAGE_SIZE, NULL);
1684    if (IS_ERR(ai))
1685        return PTR_ERR(ai);
1686    BUG_ON(ai->nr_groups != 1);
1687    BUG_ON(ai->groups[0].nr_units != num_possible_cpus());
1688
1689    unit_pages = ai->unit_size >> PAGE_SHIFT;
1690
1691    /* unaligned allocations can't be freed, round up to page size */
1692    pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() *
1693                   sizeof(pages[0]));
1694    pages = alloc_bootmem(pages_size);
1695
1696    /* allocate pages */
1697    j = 0;
1698    for (unit = 0; unit < num_possible_cpus(); unit++)
1699        for (i = 0; i < unit_pages; i++) {
1700            unsigned int cpu = ai->groups[0].cpu_map[unit];
1701            void *ptr;
1702
1703            ptr = alloc_fn(cpu, PAGE_SIZE, PAGE_SIZE);
1704            if (!ptr) {
1705                pr_warning("PERCPU: failed to allocate %s page "
1706                       "for cpu%u\n", psize_str, cpu);
1707                goto enomem;
1708            }
1709            pages[j++] = virt_to_page(ptr);
1710        }
1711
1712    /* allocate vm area, map the pages and copy static data */
1713    vm.flags = VM_ALLOC;
1714    vm.size = num_possible_cpus() * ai->unit_size;
1715    vm_area_register_early(&vm, PAGE_SIZE);
1716
1717    for (unit = 0; unit < num_possible_cpus(); unit++) {
1718        unsigned long unit_addr =
1719            (unsigned long)vm.addr + unit * ai->unit_size;
1720
1721        for (i = 0; i < unit_pages; i++)
1722            populate_pte_fn(unit_addr + (i << PAGE_SHIFT));
1723
1724        /* pte already populated, the following shouldn't fail */
1725        rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages],
1726                      unit_pages);
1727        if (rc < 0)
1728            panic("failed to map percpu area, err=%d\n", rc);
1729
1730        /*
1731         * FIXME: Archs with virtual cache should flush local
1732         * cache for the linear mapping here - something
1733         * equivalent to flush_cache_vmap() on the local cpu.
1734         * flush_cache_vmap() can't be used as most supporting
1735         * data structures are not set up yet.
1736         */
1737
1738        /* copy static data */
1739        memcpy((void *)unit_addr, __per_cpu_load, ai->static_size);
1740    }
1741
1742    /* we're ready, commit */
1743    pr_info("PERCPU: %d %s pages/cpu @%p s%zu r%zu d%zu\n",
1744        unit_pages, psize_str, vm.addr, ai->static_size,
1745        ai->reserved_size, ai->dyn_size);
1746
1747    rc = pcpu_setup_first_chunk(ai, vm.addr);
1748    goto out_free_ar;
1749
1750enomem:
1751    while (--j >= 0)
1752        free_fn(page_address(pages[j]), PAGE_SIZE);
1753    rc = -ENOMEM;
1754out_free_ar:
1755    free_bootmem(__pa(pages), pages_size);
1756    pcpu_free_alloc_info(ai);
1757    return rc;
1758}
1759#endif /* CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK */
1760
1761/*
1762 * Generic percpu area setup.
1763 *
1764 * The embedding helper is used because its behavior closely resembles
1765 * the original non-dynamic generic percpu area setup. This is
1766 * important because many archs have addressing restrictions and might
1767 * fail if the percpu area is located far away from the previous
1768 * location. As an added bonus, in non-NUMA cases, embedding is
1769 * generally a good idea TLB-wise because percpu area can piggy back
1770 * on the physical linear memory mapping which uses large page
1771 * mappings on applicable archs.
1772 */
1773#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
1774unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
1775EXPORT_SYMBOL(__per_cpu_offset);
1776
1777static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size,
1778                       size_t align)
1779{
1780    return __alloc_bootmem_nopanic(size, align, __pa(MAX_DMA_ADDRESS));
1781}
1782
1783static void __init pcpu_dfl_fc_free(void *ptr, size_t size)
1784{
1785    free_bootmem(__pa(ptr), size);
1786}
1787
1788void __init setup_per_cpu_areas(void)
1789{
1790    unsigned long delta;
1791    unsigned int cpu;
1792    int rc;
1793
1794    /*
1795     * Always reserve area for module percpu variables. That's
1796     * what the legacy allocator did.
1797     */
1798    rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
1799                    PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, NULL,
1800                    pcpu_dfl_fc_alloc, pcpu_dfl_fc_free);
1801    if (rc < 0)
1802        panic("Failed to initialized percpu areas.");
1803
1804    delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
1805    for_each_possible_cpu(cpu)
1806        __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
1807}
1808#endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */
1809
1810/*
1811 * First and reserved chunks are initialized with temporary allocation
1812 * map in initdata so that they can be used before slab is online.
1813 * This function is called after slab is brought up and replaces those
1814 * with properly allocated maps.
1815 */
1816void __init percpu_init_late(void)
1817{
1818    struct pcpu_chunk *target_chunks[] =
1819        { pcpu_first_chunk, pcpu_reserved_chunk, NULL };
1820    struct pcpu_chunk *chunk;
1821    unsigned long flags;
1822    int i;
1823
1824    for (i = 0; (chunk = target_chunks[i]); i++) {
1825        int *map;
1826        const size_t size = PERCPU_DYNAMIC_EARLY_SLOTS * sizeof(map[0]);
1827
1828        BUILD_BUG_ON(size > PAGE_SIZE);
1829
1830        map = pcpu_mem_alloc(size);
1831        BUG_ON(!map);
1832
1833        spin_lock_irqsave(&pcpu_lock, flags);
1834        memcpy(map, chunk->map, size);
1835        chunk->map = map;
1836        spin_unlock_irqrestore(&pcpu_lock, flags);
1837    }
1838}
1839

Archive Download this file



interactive