Root/mm/percpu.c

1/*
2 * mm/percpu.c - percpu memory allocator
3 *
4 * Copyright (C) 2009 SUSE Linux Products GmbH
5 * Copyright (C) 2009 Tejun Heo <tj@kernel.org>
6 *
7 * This file is released under the GPLv2.
8 *
9 * This is percpu allocator which can handle both static and dynamic
10 * areas. Percpu areas are allocated in chunks. Each chunk is
11 * consisted of boot-time determined number of units and the first
12 * chunk is used for static percpu variables in the kernel image
13 * (special boot time alloc/init handling necessary as these areas
14 * need to be brought up before allocation services are running).
15 * Unit grows as necessary and all units grow or shrink in unison.
16 * When a chunk is filled up, another chunk is allocated.
17 *
18 * c0 c1 c2
19 * ------------------- ------------------- ------------
20 * | u0 | u1 | u2 | u3 | | u0 | u1 | u2 | u3 | | u0 | u1 | u
21 * ------------------- ...... ------------------- .... ------------
22 *
23 * Allocation is done in offset-size areas of single unit space. Ie,
24 * an area of 512 bytes at 6k in c1 occupies 512 bytes at 6k of c1:u0,
25 * c1:u1, c1:u2 and c1:u3. On UMA, units corresponds directly to
26 * cpus. On NUMA, the mapping can be non-linear and even sparse.
27 * Percpu access can be done by configuring percpu base registers
28 * according to cpu to unit mapping and pcpu_unit_size.
29 *
30 * There are usually many small percpu allocations many of them being
31 * as small as 4 bytes. The allocator organizes chunks into lists
32 * according to free size and tries to allocate from the fullest one.
33 * Each chunk keeps the maximum contiguous area size hint which is
34 * guaranteed to be equal to or larger than the maximum contiguous
35 * area in the chunk. This helps the allocator not to iterate the
36 * chunk maps unnecessarily.
37 *
38 * Allocation state in each chunk is kept using an array of integers
39 * on chunk->map. A positive value in the map represents a free
40 * region and negative allocated. Allocation inside a chunk is done
41 * by scanning this map sequentially and serving the first matching
42 * entry. This is mostly copied from the percpu_modalloc() allocator.
43 * Chunks can be determined from the address using the index field
44 * in the page struct. The index field contains a pointer to the chunk.
45 *
46 * To use this allocator, arch code should do the followings.
47 *
48 * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
49 * regular address to percpu pointer and back if they need to be
50 * different from the default
51 *
52 * - use pcpu_setup_first_chunk() during percpu area initialization to
53 * setup the first chunk containing the kernel static percpu area
54 */
55
56#include <linux/bitmap.h>
57#include <linux/bootmem.h>
58#include <linux/err.h>
59#include <linux/list.h>
60#include <linux/log2.h>
61#include <linux/mm.h>
62#include <linux/module.h>
63#include <linux/mutex.h>
64#include <linux/percpu.h>
65#include <linux/pfn.h>
66#include <linux/slab.h>
67#include <linux/spinlock.h>
68#include <linux/vmalloc.h>
69#include <linux/workqueue.h>
70
71#include <asm/cacheflush.h>
72#include <asm/sections.h>
73#include <asm/tlbflush.h>
74#include <asm/io.h>
75
76#define PCPU_SLOT_BASE_SHIFT 5 /* 1-31 shares the same slot */
77#define PCPU_DFL_MAP_ALLOC 16 /* start a map with 16 ents */
78
79#ifdef CONFIG_SMP
80/* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */
81#ifndef __addr_to_pcpu_ptr
82#define __addr_to_pcpu_ptr(addr) \
83    (void __percpu *)((unsigned long)(addr) - \
84              (unsigned long)pcpu_base_addr + \
85              (unsigned long)__per_cpu_start)
86#endif
87#ifndef __pcpu_ptr_to_addr
88#define __pcpu_ptr_to_addr(ptr) \
89    (void __force *)((unsigned long)(ptr) + \
90             (unsigned long)pcpu_base_addr - \
91             (unsigned long)__per_cpu_start)
92#endif
93#else /* CONFIG_SMP */
94/* on UP, it's always identity mapped */
95#define __addr_to_pcpu_ptr(addr) (void __percpu *)(addr)
96#define __pcpu_ptr_to_addr(ptr) (void __force *)(ptr)
97#endif /* CONFIG_SMP */
98
99struct pcpu_chunk {
100    struct list_head list; /* linked to pcpu_slot lists */
101    int free_size; /* free bytes in the chunk */
102    int contig_hint; /* max contiguous size hint */
103    void *base_addr; /* base address of this chunk */
104    int map_used; /* # of map entries used */
105    int map_alloc; /* # of map entries allocated */
106    int *map; /* allocation map */
107    void *data; /* chunk data */
108    bool immutable; /* no [de]population allowed */
109    unsigned long populated[]; /* populated bitmap */
110};
111
112static int pcpu_unit_pages __read_mostly;
113static int pcpu_unit_size __read_mostly;
114static int pcpu_nr_units __read_mostly;
115static int pcpu_atom_size __read_mostly;
116static int pcpu_nr_slots __read_mostly;
117static size_t pcpu_chunk_struct_size __read_mostly;
118
119/* cpus with the lowest and highest unit numbers */
120static unsigned int pcpu_first_unit_cpu __read_mostly;
121static unsigned int pcpu_last_unit_cpu __read_mostly;
122
123/* the address of the first chunk which starts with the kernel static area */
124void *pcpu_base_addr __read_mostly;
125EXPORT_SYMBOL_GPL(pcpu_base_addr);
126
127static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
128const unsigned long *pcpu_unit_offsets __read_mostly; /* cpu -> unit offset */
129
130/* group information, used for vm allocation */
131static int pcpu_nr_groups __read_mostly;
132static const unsigned long *pcpu_group_offsets __read_mostly;
133static const size_t *pcpu_group_sizes __read_mostly;
134
135/*
136 * The first chunk which always exists. Note that unlike other
137 * chunks, this one can be allocated and mapped in several different
138 * ways and thus often doesn't live in the vmalloc area.
139 */
140static struct pcpu_chunk *pcpu_first_chunk;
141
142/*
143 * Optional reserved chunk. This chunk reserves part of the first
144 * chunk and serves it for reserved allocations. The amount of
145 * reserved offset is in pcpu_reserved_chunk_limit. When reserved
146 * area doesn't exist, the following variables contain NULL and 0
147 * respectively.
148 */
149static struct pcpu_chunk *pcpu_reserved_chunk;
150static int pcpu_reserved_chunk_limit;
151
152/*
153 * Synchronization rules.
154 *
155 * There are two locks - pcpu_alloc_mutex and pcpu_lock. The former
156 * protects allocation/reclaim paths, chunks, populated bitmap and
157 * vmalloc mapping. The latter is a spinlock and protects the index
158 * data structures - chunk slots, chunks and area maps in chunks.
159 *
160 * During allocation, pcpu_alloc_mutex is kept locked all the time and
161 * pcpu_lock is grabbed and released as necessary. All actual memory
162 * allocations are done using GFP_KERNEL with pcpu_lock released. In
163 * general, percpu memory can't be allocated with irq off but
164 * irqsave/restore are still used in alloc path so that it can be used
165 * from early init path - sched_init() specifically.
166 *
167 * Free path accesses and alters only the index data structures, so it
168 * can be safely called from atomic context. When memory needs to be
169 * returned to the system, free path schedules reclaim_work which
170 * grabs both pcpu_alloc_mutex and pcpu_lock, unlinks chunks to be
171 * reclaimed, release both locks and frees the chunks. Note that it's
172 * necessary to grab both locks to remove a chunk from circulation as
173 * allocation path might be referencing the chunk with only
174 * pcpu_alloc_mutex locked.
175 */
176static DEFINE_MUTEX(pcpu_alloc_mutex); /* protects whole alloc and reclaim */
177static DEFINE_SPINLOCK(pcpu_lock); /* protects index data structures */
178
179static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */
180
181/* reclaim work to release fully free chunks, scheduled from free path */
182static void pcpu_reclaim(struct work_struct *work);
183static DECLARE_WORK(pcpu_reclaim_work, pcpu_reclaim);
184
185static bool pcpu_addr_in_first_chunk(void *addr)
186{
187    void *first_start = pcpu_first_chunk->base_addr;
188
189    return addr >= first_start && addr < first_start + pcpu_unit_size;
190}
191
192static bool pcpu_addr_in_reserved_chunk(void *addr)
193{
194    void *first_start = pcpu_first_chunk->base_addr;
195
196    return addr >= first_start &&
197        addr < first_start + pcpu_reserved_chunk_limit;
198}
199
200static int __pcpu_size_to_slot(int size)
201{
202    int highbit = fls(size); /* size is in bytes */
203    return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1);
204}
205
206static int pcpu_size_to_slot(int size)
207{
208    if (size == pcpu_unit_size)
209        return pcpu_nr_slots - 1;
210    return __pcpu_size_to_slot(size);
211}
212
213static int pcpu_chunk_slot(const struct pcpu_chunk *chunk)
214{
215    if (chunk->free_size < sizeof(int) || chunk->contig_hint < sizeof(int))
216        return 0;
217
218    return pcpu_size_to_slot(chunk->free_size);
219}
220
221/* set the pointer to a chunk in a page struct */
222static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu)
223{
224    page->index = (unsigned long)pcpu;
225}
226
227/* obtain pointer to a chunk from a page struct */
228static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page)
229{
230    return (struct pcpu_chunk *)page->index;
231}
232
233static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx)
234{
235    return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx;
236}
237
238static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk,
239                     unsigned int cpu, int page_idx)
240{
241    return (unsigned long)chunk->base_addr + pcpu_unit_offsets[cpu] +
242        (page_idx << PAGE_SHIFT);
243}
244
245static void __maybe_unused pcpu_next_unpop(struct pcpu_chunk *chunk,
246                       int *rs, int *re, int end)
247{
248    *rs = find_next_zero_bit(chunk->populated, end, *rs);
249    *re = find_next_bit(chunk->populated, end, *rs + 1);
250}
251
252static void __maybe_unused pcpu_next_pop(struct pcpu_chunk *chunk,
253                     int *rs, int *re, int end)
254{
255    *rs = find_next_bit(chunk->populated, end, *rs);
256    *re = find_next_zero_bit(chunk->populated, end, *rs + 1);
257}
258
259/*
260 * (Un)populated page region iterators. Iterate over (un)populated
261 * page regions between @start and @end in @chunk. @rs and @re should
262 * be integer variables and will be set to start and end page index of
263 * the current region.
264 */
265#define pcpu_for_each_unpop_region(chunk, rs, re, start, end) \
266    for ((rs) = (start), pcpu_next_unpop((chunk), &(rs), &(re), (end)); \
267         (rs) < (re); \
268         (rs) = (re) + 1, pcpu_next_unpop((chunk), &(rs), &(re), (end)))
269
270#define pcpu_for_each_pop_region(chunk, rs, re, start, end) \
271    for ((rs) = (start), pcpu_next_pop((chunk), &(rs), &(re), (end)); \
272         (rs) < (re); \
273         (rs) = (re) + 1, pcpu_next_pop((chunk), &(rs), &(re), (end)))
274
275/**
276 * pcpu_mem_alloc - allocate memory
277 * @size: bytes to allocate
278 *
279 * Allocate @size bytes. If @size is smaller than PAGE_SIZE,
280 * kzalloc() is used; otherwise, vmalloc() is used. The returned
281 * memory is always zeroed.
282 *
283 * CONTEXT:
284 * Does GFP_KERNEL allocation.
285 *
286 * RETURNS:
287 * Pointer to the allocated area on success, NULL on failure.
288 */
289static void *pcpu_mem_alloc(size_t size)
290{
291    if (WARN_ON_ONCE(!slab_is_available()))
292        return NULL;
293
294    if (size <= PAGE_SIZE)
295        return kzalloc(size, GFP_KERNEL);
296    else
297        return vzalloc(size);
298}
299
300/**
301 * pcpu_mem_free - free memory
302 * @ptr: memory to free
303 * @size: size of the area
304 *
305 * Free @ptr. @ptr should have been allocated using pcpu_mem_alloc().
306 */
307static void pcpu_mem_free(void *ptr, size_t size)
308{
309    if (size <= PAGE_SIZE)
310        kfree(ptr);
311    else
312        vfree(ptr);
313}
314
315/**
316 * pcpu_chunk_relocate - put chunk in the appropriate chunk slot
317 * @chunk: chunk of interest
318 * @oslot: the previous slot it was on
319 *
320 * This function is called after an allocation or free changed @chunk.
321 * New slot according to the changed state is determined and @chunk is
322 * moved to the slot. Note that the reserved chunk is never put on
323 * chunk slots.
324 *
325 * CONTEXT:
326 * pcpu_lock.
327 */
328static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot)
329{
330    int nslot = pcpu_chunk_slot(chunk);
331
332    if (chunk != pcpu_reserved_chunk && oslot != nslot) {
333        if (oslot < nslot)
334            list_move(&chunk->list, &pcpu_slot[nslot]);
335        else
336            list_move_tail(&chunk->list, &pcpu_slot[nslot]);
337    }
338}
339
340/**
341 * pcpu_need_to_extend - determine whether chunk area map needs to be extended
342 * @chunk: chunk of interest
343 *
344 * Determine whether area map of @chunk needs to be extended to
345 * accommodate a new allocation.
346 *
347 * CONTEXT:
348 * pcpu_lock.
349 *
350 * RETURNS:
351 * New target map allocation length if extension is necessary, 0
352 * otherwise.
353 */
354static int pcpu_need_to_extend(struct pcpu_chunk *chunk)
355{
356    int new_alloc;
357
358    if (chunk->map_alloc >= chunk->map_used + 2)
359        return 0;
360
361    new_alloc = PCPU_DFL_MAP_ALLOC;
362    while (new_alloc < chunk->map_used + 2)
363        new_alloc *= 2;
364
365    return new_alloc;
366}
367
368/**
369 * pcpu_extend_area_map - extend area map of a chunk
370 * @chunk: chunk of interest
371 * @new_alloc: new target allocation length of the area map
372 *
373 * Extend area map of @chunk to have @new_alloc entries.
374 *
375 * CONTEXT:
376 * Does GFP_KERNEL allocation. Grabs and releases pcpu_lock.
377 *
378 * RETURNS:
379 * 0 on success, -errno on failure.
380 */
381static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc)
382{
383    int *old = NULL, *new = NULL;
384    size_t old_size = 0, new_size = new_alloc * sizeof(new[0]);
385    unsigned long flags;
386
387    new = pcpu_mem_alloc(new_size);
388    if (!new)
389        return -ENOMEM;
390
391    /* acquire pcpu_lock and switch to new area map */
392    spin_lock_irqsave(&pcpu_lock, flags);
393
394    if (new_alloc <= chunk->map_alloc)
395        goto out_unlock;
396
397    old_size = chunk->map_alloc * sizeof(chunk->map[0]);
398    old = chunk->map;
399
400    memcpy(new, old, old_size);
401
402    chunk->map_alloc = new_alloc;
403    chunk->map = new;
404    new = NULL;
405
406out_unlock:
407    spin_unlock_irqrestore(&pcpu_lock, flags);
408
409    /*
410     * pcpu_mem_free() might end up calling vfree() which uses
411     * IRQ-unsafe lock and thus can't be called under pcpu_lock.
412     */
413    pcpu_mem_free(old, old_size);
414    pcpu_mem_free(new, new_size);
415
416    return 0;
417}
418
419/**
420 * pcpu_split_block - split a map block
421 * @chunk: chunk of interest
422 * @i: index of map block to split
423 * @head: head size in bytes (can be 0)
424 * @tail: tail size in bytes (can be 0)
425 *
426 * Split the @i'th map block into two or three blocks. If @head is
427 * non-zero, @head bytes block is inserted before block @i moving it
428 * to @i+1 and reducing its size by @head bytes.
429 *
430 * If @tail is non-zero, the target block, which can be @i or @i+1
431 * depending on @head, is reduced by @tail bytes and @tail byte block
432 * is inserted after the target block.
433 *
434 * @chunk->map must have enough free slots to accommodate the split.
435 *
436 * CONTEXT:
437 * pcpu_lock.
438 */
439static void pcpu_split_block(struct pcpu_chunk *chunk, int i,
440                 int head, int tail)
441{
442    int nr_extra = !!head + !!tail;
443
444    BUG_ON(chunk->map_alloc < chunk->map_used + nr_extra);
445
446    /* insert new subblocks */
447    memmove(&chunk->map[i + nr_extra], &chunk->map[i],
448        sizeof(chunk->map[0]) * (chunk->map_used - i));
449    chunk->map_used += nr_extra;
450
451    if (head) {
452        chunk->map[i + 1] = chunk->map[i] - head;
453        chunk->map[i++] = head;
454    }
455    if (tail) {
456        chunk->map[i++] -= tail;
457        chunk->map[i] = tail;
458    }
459}
460
461/**
462 * pcpu_alloc_area - allocate area from a pcpu_chunk
463 * @chunk: chunk of interest
464 * @size: wanted size in bytes
465 * @align: wanted align
466 *
467 * Try to allocate @size bytes area aligned at @align from @chunk.
468 * Note that this function only allocates the offset. It doesn't
469 * populate or map the area.
470 *
471 * @chunk->map must have at least two free slots.
472 *
473 * CONTEXT:
474 * pcpu_lock.
475 *
476 * RETURNS:
477 * Allocated offset in @chunk on success, -1 if no matching area is
478 * found.
479 */
480static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align)
481{
482    int oslot = pcpu_chunk_slot(chunk);
483    int max_contig = 0;
484    int i, off;
485
486    for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++])) {
487        bool is_last = i + 1 == chunk->map_used;
488        int head, tail;
489
490        /* extra for alignment requirement */
491        head = ALIGN(off, align) - off;
492        BUG_ON(i == 0 && head != 0);
493
494        if (chunk->map[i] < 0)
495            continue;
496        if (chunk->map[i] < head + size) {
497            max_contig = max(chunk->map[i], max_contig);
498            continue;
499        }
500
501        /*
502         * If head is small or the previous block is free,
503         * merge'em. Note that 'small' is defined as smaller
504         * than sizeof(int), which is very small but isn't too
505         * uncommon for percpu allocations.
506         */
507        if (head && (head < sizeof(int) || chunk->map[i - 1] > 0)) {
508            if (chunk->map[i - 1] > 0)
509                chunk->map[i - 1] += head;
510            else {
511                chunk->map[i - 1] -= head;
512                chunk->free_size -= head;
513            }
514            chunk->map[i] -= head;
515            off += head;
516            head = 0;
517        }
518
519        /* if tail is small, just keep it around */
520        tail = chunk->map[i] - head - size;
521        if (tail < sizeof(int))
522            tail = 0;
523
524        /* split if warranted */
525        if (head || tail) {
526            pcpu_split_block(chunk, i, head, tail);
527            if (head) {
528                i++;
529                off += head;
530                max_contig = max(chunk->map[i - 1], max_contig);
531            }
532            if (tail)
533                max_contig = max(chunk->map[i + 1], max_contig);
534        }
535
536        /* update hint and mark allocated */
537        if (is_last)
538            chunk->contig_hint = max_contig; /* fully scanned */
539        else
540            chunk->contig_hint = max(chunk->contig_hint,
541                         max_contig);
542
543        chunk->free_size -= chunk->map[i];
544        chunk->map[i] = -chunk->map[i];
545
546        pcpu_chunk_relocate(chunk, oslot);
547        return off;
548    }
549
550    chunk->contig_hint = max_contig; /* fully scanned */
551    pcpu_chunk_relocate(chunk, oslot);
552
553    /* tell the upper layer that this chunk has no matching area */
554    return -1;
555}
556
557/**
558 * pcpu_free_area - free area to a pcpu_chunk
559 * @chunk: chunk of interest
560 * @freeme: offset of area to free
561 *
562 * Free area starting from @freeme to @chunk. Note that this function
563 * only modifies the allocation map. It doesn't depopulate or unmap
564 * the area.
565 *
566 * CONTEXT:
567 * pcpu_lock.
568 */
569static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme)
570{
571    int oslot = pcpu_chunk_slot(chunk);
572    int i, off;
573
574    for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++]))
575        if (off == freeme)
576            break;
577    BUG_ON(off != freeme);
578    BUG_ON(chunk->map[i] > 0);
579
580    chunk->map[i] = -chunk->map[i];
581    chunk->free_size += chunk->map[i];
582
583    /* merge with previous? */
584    if (i > 0 && chunk->map[i - 1] >= 0) {
585        chunk->map[i - 1] += chunk->map[i];
586        chunk->map_used--;
587        memmove(&chunk->map[i], &chunk->map[i + 1],
588            (chunk->map_used - i) * sizeof(chunk->map[0]));
589        i--;
590    }
591    /* merge with next? */
592    if (i + 1 < chunk->map_used && chunk->map[i + 1] >= 0) {
593        chunk->map[i] += chunk->map[i + 1];
594        chunk->map_used--;
595        memmove(&chunk->map[i + 1], &chunk->map[i + 2],
596            (chunk->map_used - (i + 1)) * sizeof(chunk->map[0]));
597    }
598
599    chunk->contig_hint = max(chunk->map[i], chunk->contig_hint);
600    pcpu_chunk_relocate(chunk, oslot);
601}
602
603static struct pcpu_chunk *pcpu_alloc_chunk(void)
604{
605    struct pcpu_chunk *chunk;
606
607    chunk = pcpu_mem_alloc(pcpu_chunk_struct_size);
608    if (!chunk)
609        return NULL;
610
611    chunk->map = pcpu_mem_alloc(PCPU_DFL_MAP_ALLOC * sizeof(chunk->map[0]));
612    if (!chunk->map) {
613        kfree(chunk);
614        return NULL;
615    }
616
617    chunk->map_alloc = PCPU_DFL_MAP_ALLOC;
618    chunk->map[chunk->map_used++] = pcpu_unit_size;
619
620    INIT_LIST_HEAD(&chunk->list);
621    chunk->free_size = pcpu_unit_size;
622    chunk->contig_hint = pcpu_unit_size;
623
624    return chunk;
625}
626
627static void pcpu_free_chunk(struct pcpu_chunk *chunk)
628{
629    if (!chunk)
630        return;
631    pcpu_mem_free(chunk->map, chunk->map_alloc * sizeof(chunk->map[0]));
632    kfree(chunk);
633}
634
635/*
636 * Chunk management implementation.
637 *
638 * To allow different implementations, chunk alloc/free and
639 * [de]population are implemented in a separate file which is pulled
640 * into this file and compiled together. The following functions
641 * should be implemented.
642 *
643 * pcpu_populate_chunk - populate the specified range of a chunk
644 * pcpu_depopulate_chunk - depopulate the specified range of a chunk
645 * pcpu_create_chunk - create a new chunk
646 * pcpu_destroy_chunk - destroy a chunk, always preceded by full depop
647 * pcpu_addr_to_page - translate address to physical address
648 * pcpu_verify_alloc_info - check alloc_info is acceptable during init
649 */
650static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size);
651static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size);
652static struct pcpu_chunk *pcpu_create_chunk(void);
653static void pcpu_destroy_chunk(struct pcpu_chunk *chunk);
654static struct page *pcpu_addr_to_page(void *addr);
655static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai);
656
657#ifdef CONFIG_NEED_PER_CPU_KM
658#include "percpu-km.c"
659#else
660#include "percpu-vm.c"
661#endif
662
663/**
664 * pcpu_chunk_addr_search - determine chunk containing specified address
665 * @addr: address for which the chunk needs to be determined.
666 *
667 * RETURNS:
668 * The address of the found chunk.
669 */
670static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
671{
672    /* is it in the first chunk? */
673    if (pcpu_addr_in_first_chunk(addr)) {
674        /* is it in the reserved area? */
675        if (pcpu_addr_in_reserved_chunk(addr))
676            return pcpu_reserved_chunk;
677        return pcpu_first_chunk;
678    }
679
680    /*
681     * The address is relative to unit0 which might be unused and
682     * thus unmapped. Offset the address to the unit space of the
683     * current processor before looking it up in the vmalloc
684     * space. Note that any possible cpu id can be used here, so
685     * there's no need to worry about preemption or cpu hotplug.
686     */
687    addr += pcpu_unit_offsets[raw_smp_processor_id()];
688    return pcpu_get_page_chunk(pcpu_addr_to_page(addr));
689}
690
691/**
692 * pcpu_alloc - the percpu allocator
693 * @size: size of area to allocate in bytes
694 * @align: alignment of area (max PAGE_SIZE)
695 * @reserved: allocate from the reserved chunk if available
696 *
697 * Allocate percpu area of @size bytes aligned at @align.
698 *
699 * CONTEXT:
700 * Does GFP_KERNEL allocation.
701 *
702 * RETURNS:
703 * Percpu pointer to the allocated area on success, NULL on failure.
704 */
705static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved)
706{
707    static int warn_limit = 10;
708    struct pcpu_chunk *chunk;
709    const char *err;
710    int slot, off, new_alloc;
711    unsigned long flags;
712
713    if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) {
714        WARN(true, "illegal size (%zu) or align (%zu) for "
715             "percpu allocation\n", size, align);
716        return NULL;
717    }
718
719    mutex_lock(&pcpu_alloc_mutex);
720    spin_lock_irqsave(&pcpu_lock, flags);
721
722    /* serve reserved allocations from the reserved chunk if available */
723    if (reserved && pcpu_reserved_chunk) {
724        chunk = pcpu_reserved_chunk;
725
726        if (size > chunk->contig_hint) {
727            err = "alloc from reserved chunk failed";
728            goto fail_unlock;
729        }
730
731        while ((new_alloc = pcpu_need_to_extend(chunk))) {
732            spin_unlock_irqrestore(&pcpu_lock, flags);
733            if (pcpu_extend_area_map(chunk, new_alloc) < 0) {
734                err = "failed to extend area map of reserved chunk";
735                goto fail_unlock_mutex;
736            }
737            spin_lock_irqsave(&pcpu_lock, flags);
738        }
739
740        off = pcpu_alloc_area(chunk, size, align);
741        if (off >= 0)
742            goto area_found;
743
744        err = "alloc from reserved chunk failed";
745        goto fail_unlock;
746    }
747
748restart:
749    /* search through normal chunks */
750    for (slot = pcpu_size_to_slot(size); slot < pcpu_nr_slots; slot++) {
751        list_for_each_entry(chunk, &pcpu_slot[slot], list) {
752            if (size > chunk->contig_hint)
753                continue;
754
755            new_alloc = pcpu_need_to_extend(chunk);
756            if (new_alloc) {
757                spin_unlock_irqrestore(&pcpu_lock, flags);
758                if (pcpu_extend_area_map(chunk,
759                             new_alloc) < 0) {
760                    err = "failed to extend area map";
761                    goto fail_unlock_mutex;
762                }
763                spin_lock_irqsave(&pcpu_lock, flags);
764                /*
765                 * pcpu_lock has been dropped, need to
766                 * restart cpu_slot list walking.
767                 */
768                goto restart;
769            }
770
771            off = pcpu_alloc_area(chunk, size, align);
772            if (off >= 0)
773                goto area_found;
774        }
775    }
776
777    /* hmmm... no space left, create a new chunk */
778    spin_unlock_irqrestore(&pcpu_lock, flags);
779
780    chunk = pcpu_create_chunk();
781    if (!chunk) {
782        err = "failed to allocate new chunk";
783        goto fail_unlock_mutex;
784    }
785
786    spin_lock_irqsave(&pcpu_lock, flags);
787    pcpu_chunk_relocate(chunk, -1);
788    goto restart;
789
790area_found:
791    spin_unlock_irqrestore(&pcpu_lock, flags);
792
793    /* populate, map and clear the area */
794    if (pcpu_populate_chunk(chunk, off, size)) {
795        spin_lock_irqsave(&pcpu_lock, flags);
796        pcpu_free_area(chunk, off);
797        err = "failed to populate";
798        goto fail_unlock;
799    }
800
801    mutex_unlock(&pcpu_alloc_mutex);
802
803    /* return address relative to base address */
804    return __addr_to_pcpu_ptr(chunk->base_addr + off);
805
806fail_unlock:
807    spin_unlock_irqrestore(&pcpu_lock, flags);
808fail_unlock_mutex:
809    mutex_unlock(&pcpu_alloc_mutex);
810    if (warn_limit) {
811        pr_warning("PERCPU: allocation failed, size=%zu align=%zu, "
812               "%s\n", size, align, err);
813        dump_stack();
814        if (!--warn_limit)
815            pr_info("PERCPU: limit reached, disable warning\n");
816    }
817    return NULL;
818}
819
820/**
821 * __alloc_percpu - allocate dynamic percpu area
822 * @size: size of area to allocate in bytes
823 * @align: alignment of area (max PAGE_SIZE)
824 *
825 * Allocate zero-filled percpu area of @size bytes aligned at @align.
826 * Might sleep. Might trigger writeouts.
827 *
828 * CONTEXT:
829 * Does GFP_KERNEL allocation.
830 *
831 * RETURNS:
832 * Percpu pointer to the allocated area on success, NULL on failure.
833 */
834void __percpu *__alloc_percpu(size_t size, size_t align)
835{
836    return pcpu_alloc(size, align, false);
837}
838EXPORT_SYMBOL_GPL(__alloc_percpu);
839
840/**
841 * __alloc_reserved_percpu - allocate reserved percpu area
842 * @size: size of area to allocate in bytes
843 * @align: alignment of area (max PAGE_SIZE)
844 *
845 * Allocate zero-filled percpu area of @size bytes aligned at @align
846 * from reserved percpu area if arch has set it up; otherwise,
847 * allocation is served from the same dynamic area. Might sleep.
848 * Might trigger writeouts.
849 *
850 * CONTEXT:
851 * Does GFP_KERNEL allocation.
852 *
853 * RETURNS:
854 * Percpu pointer to the allocated area on success, NULL on failure.
855 */
856void __percpu *__alloc_reserved_percpu(size_t size, size_t align)
857{
858    return pcpu_alloc(size, align, true);
859}
860
861/**
862 * pcpu_reclaim - reclaim fully free chunks, workqueue function
863 * @work: unused
864 *
865 * Reclaim all fully free chunks except for the first one.
866 *
867 * CONTEXT:
868 * workqueue context.
869 */
870static void pcpu_reclaim(struct work_struct *work)
871{
872    LIST_HEAD(todo);
873    struct list_head *head = &pcpu_slot[pcpu_nr_slots - 1];
874    struct pcpu_chunk *chunk, *next;
875
876    mutex_lock(&pcpu_alloc_mutex);
877    spin_lock_irq(&pcpu_lock);
878
879    list_for_each_entry_safe(chunk, next, head, list) {
880        WARN_ON(chunk->immutable);
881
882        /* spare the first one */
883        if (chunk == list_first_entry(head, struct pcpu_chunk, list))
884            continue;
885
886        list_move(&chunk->list, &todo);
887    }
888
889    spin_unlock_irq(&pcpu_lock);
890
891    list_for_each_entry_safe(chunk, next, &todo, list) {
892        pcpu_depopulate_chunk(chunk, 0, pcpu_unit_size);
893        pcpu_destroy_chunk(chunk);
894    }
895
896    mutex_unlock(&pcpu_alloc_mutex);
897}
898
899/**
900 * free_percpu - free percpu area
901 * @ptr: pointer to area to free
902 *
903 * Free percpu area @ptr.
904 *
905 * CONTEXT:
906 * Can be called from atomic context.
907 */
908void free_percpu(void __percpu *ptr)
909{
910    void *addr;
911    struct pcpu_chunk *chunk;
912    unsigned long flags;
913    int off;
914
915    if (!ptr)
916        return;
917
918    addr = __pcpu_ptr_to_addr(ptr);
919
920    spin_lock_irqsave(&pcpu_lock, flags);
921
922    chunk = pcpu_chunk_addr_search(addr);
923    off = addr - chunk->base_addr;
924
925    pcpu_free_area(chunk, off);
926
927    /* if there are more than one fully free chunks, wake up grim reaper */
928    if (chunk->free_size == pcpu_unit_size) {
929        struct pcpu_chunk *pos;
930
931        list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list)
932            if (pos != chunk) {
933                schedule_work(&pcpu_reclaim_work);
934                break;
935            }
936    }
937
938    spin_unlock_irqrestore(&pcpu_lock, flags);
939}
940EXPORT_SYMBOL_GPL(free_percpu);
941
942/**
943 * is_kernel_percpu_address - test whether address is from static percpu area
944 * @addr: address to test
945 *
946 * Test whether @addr belongs to in-kernel static percpu area. Module
947 * static percpu areas are not considered. For those, use
948 * is_module_percpu_address().
949 *
950 * RETURNS:
951 * %true if @addr is from in-kernel static percpu area, %false otherwise.
952 */
953bool is_kernel_percpu_address(unsigned long addr)
954{
955#ifdef CONFIG_SMP
956    const size_t static_size = __per_cpu_end - __per_cpu_start;
957    void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
958    unsigned int cpu;
959
960    for_each_possible_cpu(cpu) {
961        void *start = per_cpu_ptr(base, cpu);
962
963        if ((void *)addr >= start && (void *)addr < start + static_size)
964            return true;
965        }
966#endif
967    /* on UP, can't distinguish from other static vars, always false */
968    return false;
969}
970
971/**
972 * per_cpu_ptr_to_phys - convert translated percpu address to physical address
973 * @addr: the address to be converted to physical address
974 *
975 * Given @addr which is dereferenceable address obtained via one of
976 * percpu access macros, this function translates it into its physical
977 * address. The caller is responsible for ensuring @addr stays valid
978 * until this function finishes.
979 *
980 * RETURNS:
981 * The physical address for @addr.
982 */
983phys_addr_t per_cpu_ptr_to_phys(void *addr)
984{
985    void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
986    bool in_first_chunk = false;
987    unsigned long first_start, first_end;
988    unsigned int cpu;
989
990    /*
991     * The following test on first_start/end isn't strictly
992     * necessary but will speed up lookups of addresses which
993     * aren't in the first chunk.
994     */
995    first_start = pcpu_chunk_addr(pcpu_first_chunk, pcpu_first_unit_cpu, 0);
996    first_end = pcpu_chunk_addr(pcpu_first_chunk, pcpu_last_unit_cpu,
997                    pcpu_unit_pages);
998    if ((unsigned long)addr >= first_start &&
999        (unsigned long)addr < first_end) {
1000        for_each_possible_cpu(cpu) {
1001            void *start = per_cpu_ptr(base, cpu);
1002
1003            if (addr >= start && addr < start + pcpu_unit_size) {
1004                in_first_chunk = true;
1005                break;
1006            }
1007        }
1008    }
1009
1010    if (in_first_chunk) {
1011        if (!is_vmalloc_addr(addr))
1012            return __pa(addr);
1013        else
1014            return page_to_phys(vmalloc_to_page(addr));
1015    } else
1016        return page_to_phys(pcpu_addr_to_page(addr));
1017}
1018
1019/**
1020 * pcpu_alloc_alloc_info - allocate percpu allocation info
1021 * @nr_groups: the number of groups
1022 * @nr_units: the number of units
1023 *
1024 * Allocate ai which is large enough for @nr_groups groups containing
1025 * @nr_units units. The returned ai's groups[0].cpu_map points to the
1026 * cpu_map array which is long enough for @nr_units and filled with
1027 * NR_CPUS. It's the caller's responsibility to initialize cpu_map
1028 * pointer of other groups.
1029 *
1030 * RETURNS:
1031 * Pointer to the allocated pcpu_alloc_info on success, NULL on
1032 * failure.
1033 */
1034struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
1035                              int nr_units)
1036{
1037    struct pcpu_alloc_info *ai;
1038    size_t base_size, ai_size;
1039    void *ptr;
1040    int unit;
1041
1042    base_size = ALIGN(sizeof(*ai) + nr_groups * sizeof(ai->groups[0]),
1043              __alignof__(ai->groups[0].cpu_map[0]));
1044    ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]);
1045
1046    ptr = alloc_bootmem_nopanic(PFN_ALIGN(ai_size));
1047    if (!ptr)
1048        return NULL;
1049    ai = ptr;
1050    ptr += base_size;
1051
1052    ai->groups[0].cpu_map = ptr;
1053
1054    for (unit = 0; unit < nr_units; unit++)
1055        ai->groups[0].cpu_map[unit] = NR_CPUS;
1056
1057    ai->nr_groups = nr_groups;
1058    ai->__ai_size = PFN_ALIGN(ai_size);
1059
1060    return ai;
1061}
1062
1063/**
1064 * pcpu_free_alloc_info - free percpu allocation info
1065 * @ai: pcpu_alloc_info to free
1066 *
1067 * Free @ai which was allocated by pcpu_alloc_alloc_info().
1068 */
1069void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai)
1070{
1071    free_bootmem(__pa(ai), ai->__ai_size);
1072}
1073
1074/**
1075 * pcpu_dump_alloc_info - print out information about pcpu_alloc_info
1076 * @lvl: loglevel
1077 * @ai: allocation info to dump
1078 *
1079 * Print out information about @ai using loglevel @lvl.
1080 */
1081static void pcpu_dump_alloc_info(const char *lvl,
1082                 const struct pcpu_alloc_info *ai)
1083{
1084    int group_width = 1, cpu_width = 1, width;
1085    char empty_str[] = "--------";
1086    int alloc = 0, alloc_end = 0;
1087    int group, v;
1088    int upa, apl; /* units per alloc, allocs per line */
1089
1090    v = ai->nr_groups;
1091    while (v /= 10)
1092        group_width++;
1093
1094    v = num_possible_cpus();
1095    while (v /= 10)
1096        cpu_width++;
1097    empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0';
1098
1099    upa = ai->alloc_size / ai->unit_size;
1100    width = upa * (cpu_width + 1) + group_width + 3;
1101    apl = rounddown_pow_of_two(max(60 / width, 1));
1102
1103    printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu",
1104           lvl, ai->static_size, ai->reserved_size, ai->dyn_size,
1105           ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size);
1106
1107    for (group = 0; group < ai->nr_groups; group++) {
1108        const struct pcpu_group_info *gi = &ai->groups[group];
1109        int unit = 0, unit_end = 0;
1110
1111        BUG_ON(gi->nr_units % upa);
1112        for (alloc_end += gi->nr_units / upa;
1113             alloc < alloc_end; alloc++) {
1114            if (!(alloc % apl)) {
1115                printk("\n");
1116                printk("%spcpu-alloc: ", lvl);
1117            }
1118            printk("[%0*d] ", group_width, group);
1119
1120            for (unit_end += upa; unit < unit_end; unit++)
1121                if (gi->cpu_map[unit] != NR_CPUS)
1122                    printk("%0*d ", cpu_width,
1123                           gi->cpu_map[unit]);
1124                else
1125                    printk("%s ", empty_str);
1126        }
1127    }
1128    printk("\n");
1129}
1130
1131/**
1132 * pcpu_setup_first_chunk - initialize the first percpu chunk
1133 * @ai: pcpu_alloc_info describing how to percpu area is shaped
1134 * @base_addr: mapped address
1135 *
1136 * Initialize the first percpu chunk which contains the kernel static
1137 * perpcu area. This function is to be called from arch percpu area
1138 * setup path.
1139 *
1140 * @ai contains all information necessary to initialize the first
1141 * chunk and prime the dynamic percpu allocator.
1142 *
1143 * @ai->static_size is the size of static percpu area.
1144 *
1145 * @ai->reserved_size, if non-zero, specifies the amount of bytes to
1146 * reserve after the static area in the first chunk. This reserves
1147 * the first chunk such that it's available only through reserved
1148 * percpu allocation. This is primarily used to serve module percpu
1149 * static areas on architectures where the addressing model has
1150 * limited offset range for symbol relocations to guarantee module
1151 * percpu symbols fall inside the relocatable range.
1152 *
1153 * @ai->dyn_size determines the number of bytes available for dynamic
1154 * allocation in the first chunk. The area between @ai->static_size +
1155 * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused.
1156 *
1157 * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE
1158 * and equal to or larger than @ai->static_size + @ai->reserved_size +
1159 * @ai->dyn_size.
1160 *
1161 * @ai->atom_size is the allocation atom size and used as alignment
1162 * for vm areas.
1163 *
1164 * @ai->alloc_size is the allocation size and always multiple of
1165 * @ai->atom_size. This is larger than @ai->atom_size if
1166 * @ai->unit_size is larger than @ai->atom_size.
1167 *
1168 * @ai->nr_groups and @ai->groups describe virtual memory layout of
1169 * percpu areas. Units which should be colocated are put into the
1170 * same group. Dynamic VM areas will be allocated according to these
1171 * groupings. If @ai->nr_groups is zero, a single group containing
1172 * all units is assumed.
1173 *
1174 * The caller should have mapped the first chunk at @base_addr and
1175 * copied static data to each unit.
1176 *
1177 * If the first chunk ends up with both reserved and dynamic areas, it
1178 * is served by two chunks - one to serve the core static and reserved
1179 * areas and the other for the dynamic area. They share the same vm
1180 * and page map but uses different area allocation map to stay away
1181 * from each other. The latter chunk is circulated in the chunk slots
1182 * and available for dynamic allocation like any other chunks.
1183 *
1184 * RETURNS:
1185 * 0 on success, -errno on failure.
1186 */
1187int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
1188                  void *base_addr)
1189{
1190    static char cpus_buf[4096] __initdata;
1191    static int smap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata;
1192    static int dmap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata;
1193    size_t dyn_size = ai->dyn_size;
1194    size_t size_sum = ai->static_size + ai->reserved_size + dyn_size;
1195    struct pcpu_chunk *schunk, *dchunk = NULL;
1196    unsigned long *group_offsets;
1197    size_t *group_sizes;
1198    unsigned long *unit_off;
1199    unsigned int cpu;
1200    int *unit_map;
1201    int group, unit, i;
1202
1203    cpumask_scnprintf(cpus_buf, sizeof(cpus_buf), cpu_possible_mask);
1204
1205#define PCPU_SETUP_BUG_ON(cond) do { \
1206    if (unlikely(cond)) { \
1207        pr_emerg("PERCPU: failed to initialize, %s", #cond); \
1208        pr_emerg("PERCPU: cpu_possible_mask=%s\n", cpus_buf); \
1209        pcpu_dump_alloc_info(KERN_EMERG, ai); \
1210        BUG(); \
1211    } \
1212} while (0)
1213
1214    /* sanity checks */
1215    PCPU_SETUP_BUG_ON(ai->nr_groups <= 0);
1216#ifdef CONFIG_SMP
1217    PCPU_SETUP_BUG_ON(!ai->static_size);
1218    PCPU_SETUP_BUG_ON((unsigned long)__per_cpu_start & ~PAGE_MASK);
1219#endif
1220    PCPU_SETUP_BUG_ON(!base_addr);
1221    PCPU_SETUP_BUG_ON((unsigned long)base_addr & ~PAGE_MASK);
1222    PCPU_SETUP_BUG_ON(ai->unit_size < size_sum);
1223    PCPU_SETUP_BUG_ON(ai->unit_size & ~PAGE_MASK);
1224    PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE);
1225    PCPU_SETUP_BUG_ON(ai->dyn_size < PERCPU_DYNAMIC_EARLY_SIZE);
1226    PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0);
1227
1228    /* process group information and build config tables accordingly */
1229    group_offsets = alloc_bootmem(ai->nr_groups * sizeof(group_offsets[0]));
1230    group_sizes = alloc_bootmem(ai->nr_groups * sizeof(group_sizes[0]));
1231    unit_map = alloc_bootmem(nr_cpu_ids * sizeof(unit_map[0]));
1232    unit_off = alloc_bootmem(nr_cpu_ids * sizeof(unit_off[0]));
1233
1234    for (cpu = 0; cpu < nr_cpu_ids; cpu++)
1235        unit_map[cpu] = UINT_MAX;
1236    pcpu_first_unit_cpu = NR_CPUS;
1237
1238    for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) {
1239        const struct pcpu_group_info *gi = &ai->groups[group];
1240
1241        group_offsets[group] = gi->base_offset;
1242        group_sizes[group] = gi->nr_units * ai->unit_size;
1243
1244        for (i = 0; i < gi->nr_units; i++) {
1245            cpu = gi->cpu_map[i];
1246            if (cpu == NR_CPUS)
1247                continue;
1248
1249            PCPU_SETUP_BUG_ON(cpu > nr_cpu_ids);
1250            PCPU_SETUP_BUG_ON(!cpu_possible(cpu));
1251            PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX);
1252
1253            unit_map[cpu] = unit + i;
1254            unit_off[cpu] = gi->base_offset + i * ai->unit_size;
1255
1256            if (pcpu_first_unit_cpu == NR_CPUS)
1257                pcpu_first_unit_cpu = cpu;
1258            pcpu_last_unit_cpu = cpu;
1259        }
1260    }
1261    pcpu_nr_units = unit;
1262
1263    for_each_possible_cpu(cpu)
1264        PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX);
1265
1266    /* we're done parsing the input, undefine BUG macro and dump config */
1267#undef PCPU_SETUP_BUG_ON
1268    pcpu_dump_alloc_info(KERN_DEBUG, ai);
1269
1270    pcpu_nr_groups = ai->nr_groups;
1271    pcpu_group_offsets = group_offsets;
1272    pcpu_group_sizes = group_sizes;
1273    pcpu_unit_map = unit_map;
1274    pcpu_unit_offsets = unit_off;
1275
1276    /* determine basic parameters */
1277    pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT;
1278    pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT;
1279    pcpu_atom_size = ai->atom_size;
1280    pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) +
1281        BITS_TO_LONGS(pcpu_unit_pages) * sizeof(unsigned long);
1282
1283    /*
1284     * Allocate chunk slots. The additional last slot is for
1285     * empty chunks.
1286     */
1287    pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2;
1288    pcpu_slot = alloc_bootmem(pcpu_nr_slots * sizeof(pcpu_slot[0]));
1289    for (i = 0; i < pcpu_nr_slots; i++)
1290        INIT_LIST_HEAD(&pcpu_slot[i]);
1291
1292    /*
1293     * Initialize static chunk. If reserved_size is zero, the
1294     * static chunk covers static area + dynamic allocation area
1295     * in the first chunk. If reserved_size is not zero, it
1296     * covers static area + reserved area (mostly used for module
1297     * static percpu allocation).
1298     */
1299    schunk = alloc_bootmem(pcpu_chunk_struct_size);
1300    INIT_LIST_HEAD(&schunk->list);
1301    schunk->base_addr = base_addr;
1302    schunk->map = smap;
1303    schunk->map_alloc = ARRAY_SIZE(smap);
1304    schunk->immutable = true;
1305    bitmap_fill(schunk->populated, pcpu_unit_pages);
1306
1307    if (ai->reserved_size) {
1308        schunk->free_size = ai->reserved_size;
1309        pcpu_reserved_chunk = schunk;
1310        pcpu_reserved_chunk_limit = ai->static_size + ai->reserved_size;
1311    } else {
1312        schunk->free_size = dyn_size;
1313        dyn_size = 0; /* dynamic area covered */
1314    }
1315    schunk->contig_hint = schunk->free_size;
1316
1317    schunk->map[schunk->map_used++] = -ai->static_size;
1318    if (schunk->free_size)
1319        schunk->map[schunk->map_used++] = schunk->free_size;
1320
1321    /* init dynamic chunk if necessary */
1322    if (dyn_size) {
1323        dchunk = alloc_bootmem(pcpu_chunk_struct_size);
1324        INIT_LIST_HEAD(&dchunk->list);
1325        dchunk->base_addr = base_addr;
1326        dchunk->map = dmap;
1327        dchunk->map_alloc = ARRAY_SIZE(dmap);
1328        dchunk->immutable = true;
1329        bitmap_fill(dchunk->populated, pcpu_unit_pages);
1330
1331        dchunk->contig_hint = dchunk->free_size = dyn_size;
1332        dchunk->map[dchunk->map_used++] = -pcpu_reserved_chunk_limit;
1333        dchunk->map[dchunk->map_used++] = dchunk->free_size;
1334    }
1335
1336    /* link the first chunk in */
1337    pcpu_first_chunk = dchunk ?: schunk;
1338    pcpu_chunk_relocate(pcpu_first_chunk, -1);
1339
1340    /* we're done */
1341    pcpu_base_addr = base_addr;
1342    return 0;
1343}
1344
1345#ifdef CONFIG_SMP
1346
1347const char *pcpu_fc_names[PCPU_FC_NR] __initdata = {
1348    [PCPU_FC_AUTO] = "auto",
1349    [PCPU_FC_EMBED] = "embed",
1350    [PCPU_FC_PAGE] = "page",
1351};
1352
1353enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO;
1354
1355static int __init percpu_alloc_setup(char *str)
1356{
1357    if (0)
1358        /* nada */;
1359#ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
1360    else if (!strcmp(str, "embed"))
1361        pcpu_chosen_fc = PCPU_FC_EMBED;
1362#endif
1363#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
1364    else if (!strcmp(str, "page"))
1365        pcpu_chosen_fc = PCPU_FC_PAGE;
1366#endif
1367    else
1368        pr_warning("PERCPU: unknown allocator %s specified\n", str);
1369
1370    return 0;
1371}
1372early_param("percpu_alloc", percpu_alloc_setup);
1373
1374/*
1375 * pcpu_embed_first_chunk() is used by the generic percpu setup.
1376 * Build it if needed by the arch config or the generic setup is going
1377 * to be used.
1378 */
1379#if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \
1380    !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
1381#define BUILD_EMBED_FIRST_CHUNK
1382#endif
1383
1384/* build pcpu_page_first_chunk() iff needed by the arch config */
1385#if defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK)
1386#define BUILD_PAGE_FIRST_CHUNK
1387#endif
1388
1389/* pcpu_build_alloc_info() is used by both embed and page first chunk */
1390#if defined(BUILD_EMBED_FIRST_CHUNK) || defined(BUILD_PAGE_FIRST_CHUNK)
1391/**
1392 * pcpu_build_alloc_info - build alloc_info considering distances between CPUs
1393 * @reserved_size: the size of reserved percpu area in bytes
1394 * @dyn_size: minimum free size for dynamic allocation in bytes
1395 * @atom_size: allocation atom size
1396 * @cpu_distance_fn: callback to determine distance between cpus, optional
1397 *
1398 * This function determines grouping of units, their mappings to cpus
1399 * and other parameters considering needed percpu size, allocation
1400 * atom size and distances between CPUs.
1401 *
1402 * Groups are always mutliples of atom size and CPUs which are of
1403 * LOCAL_DISTANCE both ways are grouped together and share space for
1404 * units in the same group. The returned configuration is guaranteed
1405 * to have CPUs on different nodes on different groups and >=75% usage
1406 * of allocated virtual address space.
1407 *
1408 * RETURNS:
1409 * On success, pointer to the new allocation_info is returned. On
1410 * failure, ERR_PTR value is returned.
1411 */
1412static struct pcpu_alloc_info * __init pcpu_build_alloc_info(
1413                size_t reserved_size, size_t dyn_size,
1414                size_t atom_size,
1415                pcpu_fc_cpu_distance_fn_t cpu_distance_fn)
1416{
1417    static int group_map[NR_CPUS] __initdata;
1418    static int group_cnt[NR_CPUS] __initdata;
1419    const size_t static_size = __per_cpu_end - __per_cpu_start;
1420    int nr_groups = 1, nr_units = 0;
1421    size_t size_sum, min_unit_size, alloc_size;
1422    int upa, max_upa, uninitialized_var(best_upa); /* units_per_alloc */
1423    int last_allocs, group, unit;
1424    unsigned int cpu, tcpu;
1425    struct pcpu_alloc_info *ai;
1426    unsigned int *cpu_map;
1427
1428    /* this function may be called multiple times */
1429    memset(group_map, 0, sizeof(group_map));
1430    memset(group_cnt, 0, sizeof(group_cnt));
1431
1432    /* calculate size_sum and ensure dyn_size is enough for early alloc */
1433    size_sum = PFN_ALIGN(static_size + reserved_size +
1434                max_t(size_t, dyn_size, PERCPU_DYNAMIC_EARLY_SIZE));
1435    dyn_size = size_sum - static_size - reserved_size;
1436
1437    /*
1438     * Determine min_unit_size, alloc_size and max_upa such that
1439     * alloc_size is multiple of atom_size and is the smallest
1440     * which can accommodate 4k aligned segments which are equal to
1441     * or larger than min_unit_size.
1442     */
1443    min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE);
1444
1445    alloc_size = roundup(min_unit_size, atom_size);
1446    upa = alloc_size / min_unit_size;
1447    while (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK))
1448        upa--;
1449    max_upa = upa;
1450
1451    /* group cpus according to their proximity */
1452    for_each_possible_cpu(cpu) {
1453        group = 0;
1454    next_group:
1455        for_each_possible_cpu(tcpu) {
1456            if (cpu == tcpu)
1457                break;
1458            if (group_map[tcpu] == group && cpu_distance_fn &&
1459                (cpu_distance_fn(cpu, tcpu) > LOCAL_DISTANCE ||
1460                 cpu_distance_fn(tcpu, cpu) > LOCAL_DISTANCE)) {
1461                group++;
1462                nr_groups = max(nr_groups, group + 1);
1463                goto next_group;
1464            }
1465        }
1466        group_map[cpu] = group;
1467        group_cnt[group]++;
1468    }
1469
1470    /*
1471     * Expand unit size until address space usage goes over 75%
1472     * and then as much as possible without using more address
1473     * space.
1474     */
1475    last_allocs = INT_MAX;
1476    for (upa = max_upa; upa; upa--) {
1477        int allocs = 0, wasted = 0;
1478
1479        if (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK))
1480            continue;
1481
1482        for (group = 0; group < nr_groups; group++) {
1483            int this_allocs = DIV_ROUND_UP(group_cnt[group], upa);
1484            allocs += this_allocs;
1485            wasted += this_allocs * upa - group_cnt[group];
1486        }
1487
1488        /*
1489         * Don't accept if wastage is over 1/3. The
1490         * greater-than comparison ensures upa==1 always
1491         * passes the following check.
1492         */
1493        if (wasted > num_possible_cpus() / 3)
1494            continue;
1495
1496        /* and then don't consume more memory */
1497        if (allocs > last_allocs)
1498            break;
1499        last_allocs = allocs;
1500        best_upa = upa;
1501    }
1502    upa = best_upa;
1503
1504    /* allocate and fill alloc_info */
1505    for (group = 0; group < nr_groups; group++)
1506        nr_units += roundup(group_cnt[group], upa);
1507
1508    ai = pcpu_alloc_alloc_info(nr_groups, nr_units);
1509    if (!ai)
1510        return ERR_PTR(-ENOMEM);
1511    cpu_map = ai->groups[0].cpu_map;
1512
1513    for (group = 0; group < nr_groups; group++) {
1514        ai->groups[group].cpu_map = cpu_map;
1515        cpu_map += roundup(group_cnt[group], upa);
1516    }
1517
1518    ai->static_size = static_size;
1519    ai->reserved_size = reserved_size;
1520    ai->dyn_size = dyn_size;
1521    ai->unit_size = alloc_size / upa;
1522    ai->atom_size = atom_size;
1523    ai->alloc_size = alloc_size;
1524
1525    for (group = 0, unit = 0; group_cnt[group]; group++) {
1526        struct pcpu_group_info *gi = &ai->groups[group];
1527
1528        /*
1529         * Initialize base_offset as if all groups are located
1530         * back-to-back. The caller should update this to
1531         * reflect actual allocation.
1532         */
1533        gi->base_offset = unit * ai->unit_size;
1534
1535        for_each_possible_cpu(cpu)
1536            if (group_map[cpu] == group)
1537                gi->cpu_map[gi->nr_units++] = cpu;
1538        gi->nr_units = roundup(gi->nr_units, upa);
1539        unit += gi->nr_units;
1540    }
1541    BUG_ON(unit != nr_units);
1542
1543    return ai;
1544}
1545#endif /* BUILD_EMBED_FIRST_CHUNK || BUILD_PAGE_FIRST_CHUNK */
1546
1547#if defined(BUILD_EMBED_FIRST_CHUNK)
1548/**
1549 * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem
1550 * @reserved_size: the size of reserved percpu area in bytes
1551 * @dyn_size: minimum free size for dynamic allocation in bytes
1552 * @atom_size: allocation atom size
1553 * @cpu_distance_fn: callback to determine distance between cpus, optional
1554 * @alloc_fn: function to allocate percpu page
1555 * @free_fn: function to free percpu page
1556 *
1557 * This is a helper to ease setting up embedded first percpu chunk and
1558 * can be called where pcpu_setup_first_chunk() is expected.
1559 *
1560 * If this function is used to setup the first chunk, it is allocated
1561 * by calling @alloc_fn and used as-is without being mapped into
1562 * vmalloc area. Allocations are always whole multiples of @atom_size
1563 * aligned to @atom_size.
1564 *
1565 * This enables the first chunk to piggy back on the linear physical
1566 * mapping which often uses larger page size. Please note that this
1567 * can result in very sparse cpu->unit mapping on NUMA machines thus
1568 * requiring large vmalloc address space. Don't use this allocator if
1569 * vmalloc space is not orders of magnitude larger than distances
1570 * between node memory addresses (ie. 32bit NUMA machines).
1571 *
1572 * @dyn_size specifies the minimum dynamic area size.
1573 *
1574 * If the needed size is smaller than the minimum or specified unit
1575 * size, the leftover is returned using @free_fn.
1576 *
1577 * RETURNS:
1578 * 0 on success, -errno on failure.
1579 */
1580int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
1581                  size_t atom_size,
1582                  pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
1583                  pcpu_fc_alloc_fn_t alloc_fn,
1584                  pcpu_fc_free_fn_t free_fn)
1585{
1586    void *base = (void *)ULONG_MAX;
1587    void **areas = NULL;
1588    struct pcpu_alloc_info *ai;
1589    size_t size_sum, areas_size, max_distance;
1590    int group, i, rc;
1591
1592    ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size,
1593                   cpu_distance_fn);
1594    if (IS_ERR(ai))
1595        return PTR_ERR(ai);
1596
1597    size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
1598    areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *));
1599
1600    areas = alloc_bootmem_nopanic(areas_size);
1601    if (!areas) {
1602        rc = -ENOMEM;
1603        goto out_free;
1604    }
1605
1606    /* allocate, copy and determine base address */
1607    for (group = 0; group < ai->nr_groups; group++) {
1608        struct pcpu_group_info *gi = &ai->groups[group];
1609        unsigned int cpu = NR_CPUS;
1610        void *ptr;
1611
1612        for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++)
1613            cpu = gi->cpu_map[i];
1614        BUG_ON(cpu == NR_CPUS);
1615
1616        /* allocate space for the whole group */
1617        ptr = alloc_fn(cpu, gi->nr_units * ai->unit_size, atom_size);
1618        if (!ptr) {
1619            rc = -ENOMEM;
1620            goto out_free_areas;
1621        }
1622        areas[group] = ptr;
1623
1624        base = min(ptr, base);
1625
1626        for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) {
1627            if (gi->cpu_map[i] == NR_CPUS) {
1628                /* unused unit, free whole */
1629                free_fn(ptr, ai->unit_size);
1630                continue;
1631            }
1632            /* copy and return the unused part */
1633            memcpy(ptr, __per_cpu_load, ai->static_size);
1634            free_fn(ptr + size_sum, ai->unit_size - size_sum);
1635        }
1636    }
1637
1638    /* base address is now known, determine group base offsets */
1639    max_distance = 0;
1640    for (group = 0; group < ai->nr_groups; group++) {
1641        ai->groups[group].base_offset = areas[group] - base;
1642        max_distance = max_t(size_t, max_distance,
1643                     ai->groups[group].base_offset);
1644    }
1645    max_distance += ai->unit_size;
1646
1647    /* warn if maximum distance is further than 75% of vmalloc space */
1648    if (max_distance > (VMALLOC_END - VMALLOC_START) * 3 / 4) {
1649        pr_warning("PERCPU: max_distance=0x%zx too large for vmalloc "
1650               "space 0x%lx\n", max_distance,
1651               (unsigned long)(VMALLOC_END - VMALLOC_START));
1652#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
1653        /* and fail if we have fallback */
1654        rc = -EINVAL;
1655        goto out_free;
1656#endif
1657    }
1658
1659    pr_info("PERCPU: Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n",
1660        PFN_DOWN(size_sum), base, ai->static_size, ai->reserved_size,
1661        ai->dyn_size, ai->unit_size);
1662
1663    rc = pcpu_setup_first_chunk(ai, base);
1664    goto out_free;
1665
1666out_free_areas:
1667    for (group = 0; group < ai->nr_groups; group++)
1668        free_fn(areas[group],
1669            ai->groups[group].nr_units * ai->unit_size);
1670out_free:
1671    pcpu_free_alloc_info(ai);
1672    if (areas)
1673        free_bootmem(__pa(areas), areas_size);
1674    return rc;
1675}
1676#endif /* BUILD_EMBED_FIRST_CHUNK */
1677
1678#ifdef BUILD_PAGE_FIRST_CHUNK
1679/**
1680 * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages
1681 * @reserved_size: the size of reserved percpu area in bytes
1682 * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE
1683 * @free_fn: function to free percpu page, always called with PAGE_SIZE
1684 * @populate_pte_fn: function to populate pte
1685 *
1686 * This is a helper to ease setting up page-remapped first percpu
1687 * chunk and can be called where pcpu_setup_first_chunk() is expected.
1688 *
1689 * This is the basic allocator. Static percpu area is allocated
1690 * page-by-page into vmalloc area.
1691 *
1692 * RETURNS:
1693 * 0 on success, -errno on failure.
1694 */
1695int __init pcpu_page_first_chunk(size_t reserved_size,
1696                 pcpu_fc_alloc_fn_t alloc_fn,
1697                 pcpu_fc_free_fn_t free_fn,
1698                 pcpu_fc_populate_pte_fn_t populate_pte_fn)
1699{
1700    static struct vm_struct vm;
1701    struct pcpu_alloc_info *ai;
1702    char psize_str[16];
1703    int unit_pages;
1704    size_t pages_size;
1705    struct page **pages;
1706    int unit, i, j, rc;
1707
1708    snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10);
1709
1710    ai = pcpu_build_alloc_info(reserved_size, 0, PAGE_SIZE, NULL);
1711    if (IS_ERR(ai))
1712        return PTR_ERR(ai);
1713    BUG_ON(ai->nr_groups != 1);
1714    BUG_ON(ai->groups[0].nr_units != num_possible_cpus());
1715
1716    unit_pages = ai->unit_size >> PAGE_SHIFT;
1717
1718    /* unaligned allocations can't be freed, round up to page size */
1719    pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() *
1720                   sizeof(pages[0]));
1721    pages = alloc_bootmem(pages_size);
1722
1723    /* allocate pages */
1724    j = 0;
1725    for (unit = 0; unit < num_possible_cpus(); unit++)
1726        for (i = 0; i < unit_pages; i++) {
1727            unsigned int cpu = ai->groups[0].cpu_map[unit];
1728            void *ptr;
1729
1730            ptr = alloc_fn(cpu, PAGE_SIZE, PAGE_SIZE);
1731            if (!ptr) {
1732                pr_warning("PERCPU: failed to allocate %s page "
1733                       "for cpu%u\n", psize_str, cpu);
1734                goto enomem;
1735            }
1736            pages[j++] = virt_to_page(ptr);
1737        }
1738
1739    /* allocate vm area, map the pages and copy static data */
1740    vm.flags = VM_ALLOC;
1741    vm.size = num_possible_cpus() * ai->unit_size;
1742    vm_area_register_early(&vm, PAGE_SIZE);
1743
1744    for (unit = 0; unit < num_possible_cpus(); unit++) {
1745        unsigned long unit_addr =
1746            (unsigned long)vm.addr + unit * ai->unit_size;
1747
1748        for (i = 0; i < unit_pages; i++)
1749            populate_pte_fn(unit_addr + (i << PAGE_SHIFT));
1750
1751        /* pte already populated, the following shouldn't fail */
1752        rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages],
1753                      unit_pages);
1754        if (rc < 0)
1755            panic("failed to map percpu area, err=%d\n", rc);
1756
1757        /*
1758         * FIXME: Archs with virtual cache should flush local
1759         * cache for the linear mapping here - something
1760         * equivalent to flush_cache_vmap() on the local cpu.
1761         * flush_cache_vmap() can't be used as most supporting
1762         * data structures are not set up yet.
1763         */
1764
1765        /* copy static data */
1766        memcpy((void *)unit_addr, __per_cpu_load, ai->static_size);
1767    }
1768
1769    /* we're ready, commit */
1770    pr_info("PERCPU: %d %s pages/cpu @%p s%zu r%zu d%zu\n",
1771        unit_pages, psize_str, vm.addr, ai->static_size,
1772        ai->reserved_size, ai->dyn_size);
1773
1774    rc = pcpu_setup_first_chunk(ai, vm.addr);
1775    goto out_free_ar;
1776
1777enomem:
1778    while (--j >= 0)
1779        free_fn(page_address(pages[j]), PAGE_SIZE);
1780    rc = -ENOMEM;
1781out_free_ar:
1782    free_bootmem(__pa(pages), pages_size);
1783    pcpu_free_alloc_info(ai);
1784    return rc;
1785}
1786#endif /* BUILD_PAGE_FIRST_CHUNK */
1787
1788#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
1789/*
1790 * Generic SMP percpu area setup.
1791 *
1792 * The embedding helper is used because its behavior closely resembles
1793 * the original non-dynamic generic percpu area setup. This is
1794 * important because many archs have addressing restrictions and might
1795 * fail if the percpu area is located far away from the previous
1796 * location. As an added bonus, in non-NUMA cases, embedding is
1797 * generally a good idea TLB-wise because percpu area can piggy back
1798 * on the physical linear memory mapping which uses large page
1799 * mappings on applicable archs.
1800 */
1801unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
1802EXPORT_SYMBOL(__per_cpu_offset);
1803
1804static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size,
1805                       size_t align)
1806{
1807    return __alloc_bootmem_nopanic(size, align, __pa(MAX_DMA_ADDRESS));
1808}
1809
1810static void __init pcpu_dfl_fc_free(void *ptr, size_t size)
1811{
1812    free_bootmem(__pa(ptr), size);
1813}
1814
1815void __init setup_per_cpu_areas(void)
1816{
1817    unsigned long delta;
1818    unsigned int cpu;
1819    int rc;
1820
1821    /*
1822     * Always reserve area for module percpu variables. That's
1823     * what the legacy allocator did.
1824     */
1825    rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
1826                    PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, NULL,
1827                    pcpu_dfl_fc_alloc, pcpu_dfl_fc_free);
1828    if (rc < 0)
1829        panic("Failed to initialize percpu areas.");
1830
1831    delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
1832    for_each_possible_cpu(cpu)
1833        __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
1834}
1835#endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */
1836
1837#else /* CONFIG_SMP */
1838
1839/*
1840 * UP percpu area setup.
1841 *
1842 * UP always uses km-based percpu allocator with identity mapping.
1843 * Static percpu variables are indistinguishable from the usual static
1844 * variables and don't require any special preparation.
1845 */
1846void __init setup_per_cpu_areas(void)
1847{
1848    const size_t unit_size =
1849        roundup_pow_of_two(max_t(size_t, PCPU_MIN_UNIT_SIZE,
1850                     PERCPU_DYNAMIC_RESERVE));
1851    struct pcpu_alloc_info *ai;
1852    void *fc;
1853
1854    ai = pcpu_alloc_alloc_info(1, 1);
1855    fc = __alloc_bootmem(unit_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
1856    if (!ai || !fc)
1857        panic("Failed to allocate memory for percpu areas.");
1858
1859    ai->dyn_size = unit_size;
1860    ai->unit_size = unit_size;
1861    ai->atom_size = unit_size;
1862    ai->alloc_size = unit_size;
1863    ai->groups[0].nr_units = 1;
1864    ai->groups[0].cpu_map[0] = 0;
1865
1866    if (pcpu_setup_first_chunk(ai, fc) < 0)
1867        panic("Failed to initialize percpu areas.");
1868}
1869
1870#endif /* CONFIG_SMP */
1871
1872/*
1873 * First and reserved chunks are initialized with temporary allocation
1874 * map in initdata so that they can be used before slab is online.
1875 * This function is called after slab is brought up and replaces those
1876 * with properly allocated maps.
1877 */
1878void __init percpu_init_late(void)
1879{
1880    struct pcpu_chunk *target_chunks[] =
1881        { pcpu_first_chunk, pcpu_reserved_chunk, NULL };
1882    struct pcpu_chunk *chunk;
1883    unsigned long flags;
1884    int i;
1885
1886    for (i = 0; (chunk = target_chunks[i]); i++) {
1887        int *map;
1888        const size_t size = PERCPU_DYNAMIC_EARLY_SLOTS * sizeof(map[0]);
1889
1890        BUILD_BUG_ON(size > PAGE_SIZE);
1891
1892        map = pcpu_mem_alloc(size);
1893        BUG_ON(!map);
1894
1895        spin_lock_irqsave(&pcpu_lock, flags);
1896        memcpy(map, chunk->map, size);
1897        chunk->map = map;
1898        spin_unlock_irqrestore(&pcpu_lock, flags);
1899    }
1900}
1901

Archive Download this file



interactive