Root/kernel/early_res.c

1/*
2 * early_res, could be used to replace bootmem
3 */
4#include <linux/kernel.h>
5#include <linux/types.h>
6#include <linux/init.h>
7#include <linux/bootmem.h>
8#include <linux/mm.h>
9#include <linux/early_res.h>
10#include <linux/slab.h>
11#include <linux/kmemleak.h>
12
13/*
14 * Early reserved memory areas.
15 */
16/*
17 * need to make sure this one is bigger enough before
18 * find_fw_memmap_area could be used
19 */
20#define MAX_EARLY_RES_X 32
21
22struct early_res {
23    u64 start, end;
24    char name[15];
25    char overlap_ok;
26};
27static struct early_res early_res_x[MAX_EARLY_RES_X] __initdata;
28
29static int max_early_res __initdata = MAX_EARLY_RES_X;
30static struct early_res *early_res __initdata = &early_res_x[0];
31static int early_res_count __initdata;
32
33static int __init find_overlapped_early(u64 start, u64 end)
34{
35    int i;
36    struct early_res *r;
37
38    for (i = 0; i < max_early_res && early_res[i].end; i++) {
39        r = &early_res[i];
40        if (end > r->start && start < r->end)
41            break;
42    }
43
44    return i;
45}
46
47/*
48 * Drop the i-th range from the early reservation map,
49 * by copying any higher ranges down one over it, and
50 * clearing what had been the last slot.
51 */
52static void __init drop_range(int i)
53{
54    int j;
55
56    for (j = i + 1; j < max_early_res && early_res[j].end; j++)
57        ;
58
59    memmove(&early_res[i], &early_res[i + 1],
60           (j - 1 - i) * sizeof(struct early_res));
61
62    early_res[j - 1].end = 0;
63    early_res_count--;
64}
65
66static void __init drop_range_partial(int i, u64 start, u64 end)
67{
68    u64 common_start, common_end;
69    u64 old_start, old_end;
70
71    old_start = early_res[i].start;
72    old_end = early_res[i].end;
73    common_start = max(old_start, start);
74    common_end = min(old_end, end);
75
76    /* no overlap ? */
77    if (common_start >= common_end)
78        return;
79
80    if (old_start < common_start) {
81        /* make head segment */
82        early_res[i].end = common_start;
83        if (old_end > common_end) {
84            char name[15];
85
86            /*
87             * Save a local copy of the name, since the
88             * early_res array could get resized inside
89             * reserve_early_without_check() ->
90             * __check_and_double_early_res(), which would
91             * make the current name pointer invalid.
92             */
93            strncpy(name, early_res[i].name,
94                     sizeof(early_res[i].name) - 1);
95            /* add another for left over on tail */
96            reserve_early_without_check(common_end, old_end, name);
97        }
98        return;
99    } else {
100        if (old_end > common_end) {
101            /* reuse the entry for tail left */
102            early_res[i].start = common_end;
103            return;
104        }
105        /* all covered */
106        drop_range(i);
107    }
108}
109
110/*
111 * Split any existing ranges that:
112 * 1) are marked 'overlap_ok', and
113 * 2) overlap with the stated range [start, end)
114 * into whatever portion (if any) of the existing range is entirely
115 * below or entirely above the stated range. Drop the portion
116 * of the existing range that overlaps with the stated range,
117 * which will allow the caller of this routine to then add that
118 * stated range without conflicting with any existing range.
119 */
120static void __init drop_overlaps_that_are_ok(u64 start, u64 end)
121{
122    int i;
123    struct early_res *r;
124    u64 lower_start, lower_end;
125    u64 upper_start, upper_end;
126    char name[15];
127
128    for (i = 0; i < max_early_res && early_res[i].end; i++) {
129        r = &early_res[i];
130
131        /* Continue past non-overlapping ranges */
132        if (end <= r->start || start >= r->end)
133            continue;
134
135        /*
136         * Leave non-ok overlaps as is; let caller
137         * panic "Overlapping early reservations"
138         * when it hits this overlap.
139         */
140        if (!r->overlap_ok)
141            return;
142
143        /*
144         * We have an ok overlap. We will drop it from the early
145         * reservation map, and add back in any non-overlapping
146         * portions (lower or upper) as separate, overlap_ok,
147         * non-overlapping ranges.
148         */
149
150        /* 1. Note any non-overlapping (lower or upper) ranges. */
151        strncpy(name, r->name, sizeof(name) - 1);
152
153        lower_start = lower_end = 0;
154        upper_start = upper_end = 0;
155        if (r->start < start) {
156            lower_start = r->start;
157            lower_end = start;
158        }
159        if (r->end > end) {
160            upper_start = end;
161            upper_end = r->end;
162        }
163
164        /* 2. Drop the original ok overlapping range */
165        drop_range(i);
166
167        i--; /* resume for-loop on copied down entry */
168
169        /* 3. Add back in any non-overlapping ranges. */
170        if (lower_end)
171            reserve_early_overlap_ok(lower_start, lower_end, name);
172        if (upper_end)
173            reserve_early_overlap_ok(upper_start, upper_end, name);
174    }
175}
176
177static void __init __reserve_early(u64 start, u64 end, char *name,
178                        int overlap_ok)
179{
180    int i;
181    struct early_res *r;
182
183    i = find_overlapped_early(start, end);
184    if (i >= max_early_res)
185        panic("Too many early reservations");
186    r = &early_res[i];
187    if (r->end)
188        panic("Overlapping early reservations "
189              "%llx-%llx %s to %llx-%llx %s\n",
190              start, end - 1, name ? name : "", r->start,
191              r->end - 1, r->name);
192    r->start = start;
193    r->end = end;
194    r->overlap_ok = overlap_ok;
195    if (name)
196        strncpy(r->name, name, sizeof(r->name) - 1);
197    early_res_count++;
198}
199
200/*
201 * A few early reservtations come here.
202 *
203 * The 'overlap_ok' in the name of this routine does -not- mean it
204 * is ok for these reservations to overlap an earlier reservation.
205 * Rather it means that it is ok for subsequent reservations to
206 * overlap this one.
207 *
208 * Use this entry point to reserve early ranges when you are doing
209 * so out of "Paranoia", reserving perhaps more memory than you need,
210 * just in case, and don't mind a subsequent overlapping reservation
211 * that is known to be needed.
212 *
213 * The drop_overlaps_that_are_ok() call here isn't really needed.
214 * It would be needed if we had two colliding 'overlap_ok'
215 * reservations, so that the second such would not panic on the
216 * overlap with the first. We don't have any such as of this
217 * writing, but might as well tolerate such if it happens in
218 * the future.
219 */
220void __init reserve_early_overlap_ok(u64 start, u64 end, char *name)
221{
222    drop_overlaps_that_are_ok(start, end);
223    __reserve_early(start, end, name, 1);
224}
225
226static void __init __check_and_double_early_res(u64 ex_start, u64 ex_end)
227{
228    u64 start, end, size, mem;
229    struct early_res *new;
230
231    /* do we have enough slots left ? */
232    if ((max_early_res - early_res_count) > max(max_early_res/8, 2))
233        return;
234
235    /* double it */
236    mem = -1ULL;
237    size = sizeof(struct early_res) * max_early_res * 2;
238    if (early_res == early_res_x)
239        start = 0;
240    else
241        start = early_res[0].end;
242    end = ex_start;
243    if (start + size < end)
244        mem = find_fw_memmap_area(start, end, size,
245                     sizeof(struct early_res));
246    if (mem == -1ULL) {
247        start = ex_end;
248        end = get_max_mapped();
249        if (start + size < end)
250            mem = find_fw_memmap_area(start, end, size,
251                         sizeof(struct early_res));
252    }
253    if (mem == -1ULL)
254        panic("can not find more space for early_res array");
255
256    new = __va(mem);
257    /* save the first one for own */
258    new[0].start = mem;
259    new[0].end = mem + size;
260    new[0].overlap_ok = 0;
261    /* copy old to new */
262    if (early_res == early_res_x) {
263        memcpy(&new[1], &early_res[0],
264             sizeof(struct early_res) * max_early_res);
265        memset(&new[max_early_res+1], 0,
266             sizeof(struct early_res) * (max_early_res - 1));
267        early_res_count++;
268    } else {
269        memcpy(&new[1], &early_res[1],
270             sizeof(struct early_res) * (max_early_res - 1));
271        memset(&new[max_early_res], 0,
272             sizeof(struct early_res) * max_early_res);
273    }
274    memset(&early_res[0], 0, sizeof(struct early_res) * max_early_res);
275    early_res = new;
276    max_early_res *= 2;
277    printk(KERN_DEBUG "early_res array is doubled to %d at [%llx - %llx]\n",
278        max_early_res, mem, mem + size - 1);
279}
280
281/*
282 * Most early reservations come here.
283 *
284 * We first have drop_overlaps_that_are_ok() drop any pre-existing
285 * 'overlap_ok' ranges, so that we can then reserve this memory
286 * range without risk of panic'ing on an overlapping overlap_ok
287 * early reservation.
288 */
289void __init reserve_early(u64 start, u64 end, char *name)
290{
291    if (start >= end)
292        return;
293
294    __check_and_double_early_res(start, end);
295
296    drop_overlaps_that_are_ok(start, end);
297    __reserve_early(start, end, name, 0);
298}
299
300void __init reserve_early_without_check(u64 start, u64 end, char *name)
301{
302    struct early_res *r;
303
304    if (start >= end)
305        return;
306
307    __check_and_double_early_res(start, end);
308
309    r = &early_res[early_res_count];
310
311    r->start = start;
312    r->end = end;
313    r->overlap_ok = 0;
314    if (name)
315        strncpy(r->name, name, sizeof(r->name) - 1);
316    early_res_count++;
317}
318
319void __init free_early(u64 start, u64 end)
320{
321    struct early_res *r;
322    int i;
323
324    kmemleak_free_part(__va(start), end - start);
325
326    i = find_overlapped_early(start, end);
327    r = &early_res[i];
328    if (i >= max_early_res || r->end != end || r->start != start)
329        panic("free_early on not reserved area: %llx-%llx!",
330             start, end - 1);
331
332    drop_range(i);
333}
334
335void __init free_early_partial(u64 start, u64 end)
336{
337    struct early_res *r;
338    int i;
339
340    kmemleak_free_part(__va(start), end - start);
341
342    if (start == end)
343        return;
344
345    if (WARN_ONCE(start > end, " wrong range [%#llx, %#llx]\n", start, end))
346        return;
347
348try_next:
349    i = find_overlapped_early(start, end);
350    if (i >= max_early_res)
351        return;
352
353    r = &early_res[i];
354    /* hole ? */
355    if (r->end >= end && r->start <= start) {
356        drop_range_partial(i, start, end);
357        return;
358    }
359
360    drop_range_partial(i, start, end);
361    goto try_next;
362}
363
364#ifdef CONFIG_NO_BOOTMEM
365static void __init subtract_early_res(struct range *range, int az)
366{
367    int i, count;
368    u64 final_start, final_end;
369    int idx = 0;
370
371    count = 0;
372    for (i = 0; i < max_early_res && early_res[i].end; i++)
373        count++;
374
375    /* need to skip first one ?*/
376    if (early_res != early_res_x)
377        idx = 1;
378
379#define DEBUG_PRINT_EARLY_RES 1
380
381#if DEBUG_PRINT_EARLY_RES
382    printk(KERN_INFO "Subtract (%d early reservations)\n", count);
383#endif
384    for (i = idx; i < count; i++) {
385        struct early_res *r = &early_res[i];
386#if DEBUG_PRINT_EARLY_RES
387        printk(KERN_INFO " #%d [%010llx - %010llx] %15s\n", i,
388            r->start, r->end, r->name);
389#endif
390        final_start = PFN_DOWN(r->start);
391        final_end = PFN_UP(r->end);
392        if (final_start >= final_end)
393            continue;
394        subtract_range(range, az, final_start, final_end);
395    }
396
397}
398
399int __init get_free_all_memory_range(struct range **rangep, int nodeid)
400{
401    int i, count;
402    u64 start = 0, end;
403    u64 size;
404    u64 mem;
405    struct range *range;
406    int nr_range;
407
408    count = 0;
409    for (i = 0; i < max_early_res && early_res[i].end; i++)
410        count++;
411
412    count *= 2;
413
414    size = sizeof(struct range) * count;
415    end = get_max_mapped();
416#ifdef MAX_DMA32_PFN
417    if (end > (MAX_DMA32_PFN << PAGE_SHIFT))
418        start = MAX_DMA32_PFN << PAGE_SHIFT;
419#endif
420    mem = find_fw_memmap_area(start, end, size, sizeof(struct range));
421    if (mem == -1ULL)
422        panic("can not find more space for range free");
423
424    range = __va(mem);
425    /* use early_node_map[] and early_res to get range array at first */
426    memset(range, 0, size);
427    nr_range = 0;
428
429    /* need to go over early_node_map to find out good range for node */
430    nr_range = add_from_early_node_map(range, count, nr_range, nodeid);
431#ifdef CONFIG_X86_32
432    subtract_range(range, count, max_low_pfn, -1ULL);
433#endif
434    subtract_early_res(range, count);
435    nr_range = clean_sort_range(range, count);
436
437    /* need to clear it ? */
438    if (nodeid == MAX_NUMNODES) {
439        memset(&early_res[0], 0,
440             sizeof(struct early_res) * max_early_res);
441        early_res = NULL;
442        max_early_res = 0;
443    }
444
445    *rangep = range;
446    return nr_range;
447}
448#else
449void __init early_res_to_bootmem(u64 start, u64 end)
450{
451    int i, count;
452    u64 final_start, final_end;
453    int idx = 0;
454
455    count = 0;
456    for (i = 0; i < max_early_res && early_res[i].end; i++)
457        count++;
458
459    /* need to skip first one ?*/
460    if (early_res != early_res_x)
461        idx = 1;
462
463    printk(KERN_INFO "(%d/%d early reservations) ==> bootmem [%010llx - %010llx]\n",
464             count - idx, max_early_res, start, end);
465    for (i = idx; i < count; i++) {
466        struct early_res *r = &early_res[i];
467        printk(KERN_INFO " #%d [%010llx - %010llx] %16s", i,
468            r->start, r->end, r->name);
469        final_start = max(start, r->start);
470        final_end = min(end, r->end);
471        if (final_start >= final_end) {
472            printk(KERN_CONT "\n");
473            continue;
474        }
475        printk(KERN_CONT " ==> [%010llx - %010llx]\n",
476            final_start, final_end);
477        reserve_bootmem_generic(final_start, final_end - final_start,
478                BOOTMEM_DEFAULT);
479    }
480    /* clear them */
481    memset(&early_res[0], 0, sizeof(struct early_res) * max_early_res);
482    early_res = NULL;
483    max_early_res = 0;
484    early_res_count = 0;
485}
486#endif
487
488/* Check for already reserved areas */
489static inline int __init bad_addr(u64 *addrp, u64 size, u64 align)
490{
491    int i;
492    u64 addr = *addrp;
493    int changed = 0;
494    struct early_res *r;
495again:
496    i = find_overlapped_early(addr, addr + size);
497    r = &early_res[i];
498    if (i < max_early_res && r->end) {
499        *addrp = addr = round_up(r->end, align);
500        changed = 1;
501        goto again;
502    }
503    return changed;
504}
505
506/* Check for already reserved areas */
507static inline int __init bad_addr_size(u64 *addrp, u64 *sizep, u64 align)
508{
509    int i;
510    u64 addr = *addrp, last;
511    u64 size = *sizep;
512    int changed = 0;
513again:
514    last = addr + size;
515    for (i = 0; i < max_early_res && early_res[i].end; i++) {
516        struct early_res *r = &early_res[i];
517        if (last > r->start && addr < r->start) {
518            size = r->start - addr;
519            changed = 1;
520            goto again;
521        }
522        if (last > r->end && addr < r->end) {
523            addr = round_up(r->end, align);
524            size = last - addr;
525            changed = 1;
526            goto again;
527        }
528        if (last <= r->end && addr >= r->start) {
529            (*sizep)++;
530            return 0;
531        }
532    }
533    if (changed) {
534        *addrp = addr;
535        *sizep = size;
536    }
537    return changed;
538}
539
540/*
541 * Find a free area with specified alignment in a specific range.
542 * only with the area.between start to end is active range from early_node_map
543 * so they are good as RAM
544 */
545u64 __init find_early_area(u64 ei_start, u64 ei_last, u64 start, u64 end,
546             u64 size, u64 align)
547{
548    u64 addr, last;
549
550    addr = round_up(ei_start, align);
551    if (addr < start)
552        addr = round_up(start, align);
553    if (addr >= ei_last)
554        goto out;
555    while (bad_addr(&addr, size, align) && addr+size <= ei_last)
556        ;
557    last = addr + size;
558    if (last > ei_last)
559        goto out;
560    if (last > end)
561        goto out;
562
563    return addr;
564
565out:
566    return -1ULL;
567}
568
569u64 __init find_early_area_size(u64 ei_start, u64 ei_last, u64 start,
570             u64 *sizep, u64 align)
571{
572    u64 addr, last;
573
574    addr = round_up(ei_start, align);
575    if (addr < start)
576        addr = round_up(start, align);
577    if (addr >= ei_last)
578        goto out;
579    *sizep = ei_last - addr;
580    while (bad_addr_size(&addr, sizep, align) && addr + *sizep <= ei_last)
581        ;
582    last = addr + *sizep;
583    if (last > ei_last)
584        goto out;
585
586    return addr;
587
588out:
589    return -1ULL;
590}
591

Archive Download this file



interactive