Root/kernel/resource.c

1/*
2 * linux/kernel/resource.c
3 *
4 * Copyright (C) 1999 Linus Torvalds
5 * Copyright (C) 1999 Martin Mares <mj@ucw.cz>
6 *
7 * Arbitrary resource management.
8 */
9
10#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
12#include <linux/export.h>
13#include <linux/errno.h>
14#include <linux/ioport.h>
15#include <linux/init.h>
16#include <linux/slab.h>
17#include <linux/spinlock.h>
18#include <linux/fs.h>
19#include <linux/proc_fs.h>
20#include <linux/sched.h>
21#include <linux/seq_file.h>
22#include <linux/device.h>
23#include <linux/pfn.h>
24#include <linux/mm.h>
25#include <asm/io.h>
26
27
28struct resource ioport_resource = {
29    .name = "PCI IO",
30    .start = 0,
31    .end = IO_SPACE_LIMIT,
32    .flags = IORESOURCE_IO,
33};
34EXPORT_SYMBOL(ioport_resource);
35
36struct resource iomem_resource = {
37    .name = "PCI mem",
38    .start = 0,
39    .end = -1,
40    .flags = IORESOURCE_MEM,
41};
42EXPORT_SYMBOL(iomem_resource);
43
44/* constraints to be met while allocating resources */
45struct resource_constraint {
46    resource_size_t min, max, align;
47    resource_size_t (*alignf)(void *, const struct resource *,
48            resource_size_t, resource_size_t);
49    void *alignf_data;
50};
51
52static DEFINE_RWLOCK(resource_lock);
53
54/*
55 * For memory hotplug, there is no way to free resource entries allocated
56 * by boot mem after the system is up. So for reusing the resource entry
57 * we need to remember the resource.
58 */
59static struct resource *bootmem_resource_free;
60static DEFINE_SPINLOCK(bootmem_resource_lock);
61
62static void *r_next(struct seq_file *m, void *v, loff_t *pos)
63{
64    struct resource *p = v;
65    (*pos)++;
66    if (p->child)
67        return p->child;
68    while (!p->sibling && p->parent)
69        p = p->parent;
70    return p->sibling;
71}
72
73#ifdef CONFIG_PROC_FS
74
75enum { MAX_IORES_LEVEL = 5 };
76
77static void *r_start(struct seq_file *m, loff_t *pos)
78    __acquires(resource_lock)
79{
80    struct resource *p = m->private;
81    loff_t l = 0;
82    read_lock(&resource_lock);
83    for (p = p->child; p && l < *pos; p = r_next(m, p, &l))
84        ;
85    return p;
86}
87
88static void r_stop(struct seq_file *m, void *v)
89    __releases(resource_lock)
90{
91    read_unlock(&resource_lock);
92}
93
94static int r_show(struct seq_file *m, void *v)
95{
96    struct resource *root = m->private;
97    struct resource *r = v, *p;
98    int width = root->end < 0x10000 ? 4 : 8;
99    int depth;
100
101    for (depth = 0, p = r; depth < MAX_IORES_LEVEL; depth++, p = p->parent)
102        if (p->parent == root)
103            break;
104    seq_printf(m, "%*s%0*llx-%0*llx : %s\n",
105            depth * 2, "",
106            width, (unsigned long long) r->start,
107            width, (unsigned long long) r->end,
108            r->name ? r->name : "<BAD>");
109    return 0;
110}
111
112static const struct seq_operations resource_op = {
113    .start = r_start,
114    .next = r_next,
115    .stop = r_stop,
116    .show = r_show,
117};
118
119static int ioports_open(struct inode *inode, struct file *file)
120{
121    int res = seq_open(file, &resource_op);
122    if (!res) {
123        struct seq_file *m = file->private_data;
124        m->private = &ioport_resource;
125    }
126    return res;
127}
128
129static int iomem_open(struct inode *inode, struct file *file)
130{
131    int res = seq_open(file, &resource_op);
132    if (!res) {
133        struct seq_file *m = file->private_data;
134        m->private = &iomem_resource;
135    }
136    return res;
137}
138
139static const struct file_operations proc_ioports_operations = {
140    .open = ioports_open,
141    .read = seq_read,
142    .llseek = seq_lseek,
143    .release = seq_release,
144};
145
146static const struct file_operations proc_iomem_operations = {
147    .open = iomem_open,
148    .read = seq_read,
149    .llseek = seq_lseek,
150    .release = seq_release,
151};
152
153static int __init ioresources_init(void)
154{
155    proc_create("ioports", 0, NULL, &proc_ioports_operations);
156    proc_create("iomem", 0, NULL, &proc_iomem_operations);
157    return 0;
158}
159__initcall(ioresources_init);
160
161#endif /* CONFIG_PROC_FS */
162
163static void free_resource(struct resource *res)
164{
165    if (!res)
166        return;
167
168    if (!PageSlab(virt_to_head_page(res))) {
169        spin_lock(&bootmem_resource_lock);
170        res->sibling = bootmem_resource_free;
171        bootmem_resource_free = res;
172        spin_unlock(&bootmem_resource_lock);
173    } else {
174        kfree(res);
175    }
176}
177
178static struct resource *alloc_resource(gfp_t flags)
179{
180    struct resource *res = NULL;
181
182    spin_lock(&bootmem_resource_lock);
183    if (bootmem_resource_free) {
184        res = bootmem_resource_free;
185        bootmem_resource_free = res->sibling;
186    }
187    spin_unlock(&bootmem_resource_lock);
188
189    if (res)
190        memset(res, 0, sizeof(struct resource));
191    else
192        res = kzalloc(sizeof(struct resource), flags);
193
194    return res;
195}
196
197/* Return the conflict entry if you can't request it */
198static struct resource * __request_resource(struct resource *root, struct resource *new)
199{
200    resource_size_t start = new->start;
201    resource_size_t end = new->end;
202    struct resource *tmp, **p;
203
204    if (end < start)
205        return root;
206    if (start < root->start)
207        return root;
208    if (end > root->end)
209        return root;
210    p = &root->child;
211    for (;;) {
212        tmp = *p;
213        if (!tmp || tmp->start > end) {
214            new->sibling = tmp;
215            *p = new;
216            new->parent = root;
217            return NULL;
218        }
219        p = &tmp->sibling;
220        if (tmp->end < start)
221            continue;
222        return tmp;
223    }
224}
225
226static int __release_resource(struct resource *old)
227{
228    struct resource *tmp, **p;
229
230    p = &old->parent->child;
231    for (;;) {
232        tmp = *p;
233        if (!tmp)
234            break;
235        if (tmp == old) {
236            *p = tmp->sibling;
237            old->parent = NULL;
238            return 0;
239        }
240        p = &tmp->sibling;
241    }
242    return -EINVAL;
243}
244
245static void __release_child_resources(struct resource *r)
246{
247    struct resource *tmp, *p;
248    resource_size_t size;
249
250    p = r->child;
251    r->child = NULL;
252    while (p) {
253        tmp = p;
254        p = p->sibling;
255
256        tmp->parent = NULL;
257        tmp->sibling = NULL;
258        __release_child_resources(tmp);
259
260        printk(KERN_DEBUG "release child resource %pR\n", tmp);
261        /* need to restore size, and keep flags */
262        size = resource_size(tmp);
263        tmp->start = 0;
264        tmp->end = size - 1;
265    }
266}
267
268void release_child_resources(struct resource *r)
269{
270    write_lock(&resource_lock);
271    __release_child_resources(r);
272    write_unlock(&resource_lock);
273}
274
275/**
276 * request_resource_conflict - request and reserve an I/O or memory resource
277 * @root: root resource descriptor
278 * @new: resource descriptor desired by caller
279 *
280 * Returns 0 for success, conflict resource on error.
281 */
282struct resource *request_resource_conflict(struct resource *root, struct resource *new)
283{
284    struct resource *conflict;
285
286    write_lock(&resource_lock);
287    conflict = __request_resource(root, new);
288    write_unlock(&resource_lock);
289    return conflict;
290}
291
292/**
293 * request_resource - request and reserve an I/O or memory resource
294 * @root: root resource descriptor
295 * @new: resource descriptor desired by caller
296 *
297 * Returns 0 for success, negative error code on error.
298 */
299int request_resource(struct resource *root, struct resource *new)
300{
301    struct resource *conflict;
302
303    conflict = request_resource_conflict(root, new);
304    return conflict ? -EBUSY : 0;
305}
306
307EXPORT_SYMBOL(request_resource);
308
309/**
310 * release_resource - release a previously reserved resource
311 * @old: resource pointer
312 */
313int release_resource(struct resource *old)
314{
315    int retval;
316
317    write_lock(&resource_lock);
318    retval = __release_resource(old);
319    write_unlock(&resource_lock);
320    return retval;
321}
322
323EXPORT_SYMBOL(release_resource);
324
325#if !defined(CONFIG_ARCH_HAS_WALK_MEMORY)
326/*
327 * Finds the lowest memory reosurce exists within [res->start.res->end)
328 * the caller must specify res->start, res->end, res->flags and "name".
329 * If found, returns 0, res is overwritten, if not found, returns -1.
330 */
331static int find_next_system_ram(struct resource *res, char *name)
332{
333    resource_size_t start, end;
334    struct resource *p;
335
336    BUG_ON(!res);
337
338    start = res->start;
339    end = res->end;
340    BUG_ON(start >= end);
341
342    read_lock(&resource_lock);
343    for (p = iomem_resource.child; p ; p = p->sibling) {
344        /* system ram is just marked as IORESOURCE_MEM */
345        if (p->flags != res->flags)
346            continue;
347        if (name && strcmp(p->name, name))
348            continue;
349        if (p->start > end) {
350            p = NULL;
351            break;
352        }
353        if ((p->end >= start) && (p->start < end))
354            break;
355    }
356    read_unlock(&resource_lock);
357    if (!p)
358        return -1;
359    /* copy data */
360    if (res->start < p->start)
361        res->start = p->start;
362    if (res->end > p->end)
363        res->end = p->end;
364    return 0;
365}
366
367/*
368 * This function calls callback against all memory range of "System RAM"
369 * which are marked as IORESOURCE_MEM and IORESOUCE_BUSY.
370 * Now, this function is only for "System RAM".
371 */
372int walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
373        void *arg, int (*func)(unsigned long, unsigned long, void *))
374{
375    struct resource res;
376    unsigned long pfn, end_pfn;
377    u64 orig_end;
378    int ret = -1;
379
380    res.start = (u64) start_pfn << PAGE_SHIFT;
381    res.end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1;
382    res.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
383    orig_end = res.end;
384    while ((res.start < res.end) &&
385        (find_next_system_ram(&res, "System RAM") >= 0)) {
386        pfn = (res.start + PAGE_SIZE - 1) >> PAGE_SHIFT;
387        end_pfn = (res.end + 1) >> PAGE_SHIFT;
388        if (end_pfn > pfn)
389            ret = (*func)(pfn, end_pfn - pfn, arg);
390        if (ret)
391            break;
392        res.start = res.end + 1;
393        res.end = orig_end;
394    }
395    return ret;
396}
397
398#endif
399
400static int __is_ram(unsigned long pfn, unsigned long nr_pages, void *arg)
401{
402    return 1;
403}
404/*
405 * This generic page_is_ram() returns true if specified address is
406 * registered as "System RAM" in iomem_resource list.
407 */
408int __weak page_is_ram(unsigned long pfn)
409{
410    return walk_system_ram_range(pfn, 1, NULL, __is_ram) == 1;
411}
412EXPORT_SYMBOL_GPL(page_is_ram);
413
414void __weak arch_remove_reservations(struct resource *avail)
415{
416}
417
418static resource_size_t simple_align_resource(void *data,
419                         const struct resource *avail,
420                         resource_size_t size,
421                         resource_size_t align)
422{
423    return avail->start;
424}
425
426static void resource_clip(struct resource *res, resource_size_t min,
427              resource_size_t max)
428{
429    if (res->start < min)
430        res->start = min;
431    if (res->end > max)
432        res->end = max;
433}
434
435/*
436 * Find empty slot in the resource tree with the given range and
437 * alignment constraints
438 */
439static int __find_resource(struct resource *root, struct resource *old,
440             struct resource *new,
441             resource_size_t size,
442             struct resource_constraint *constraint)
443{
444    struct resource *this = root->child;
445    struct resource tmp = *new, avail, alloc;
446
447    tmp.start = root->start;
448    /*
449     * Skip past an allocated resource that starts at 0, since the assignment
450     * of this->start - 1 to tmp->end below would cause an underflow.
451     */
452    if (this && this->start == root->start) {
453        tmp.start = (this == old) ? old->start : this->end + 1;
454        this = this->sibling;
455    }
456    for(;;) {
457        if (this)
458            tmp.end = (this == old) ? this->end : this->start - 1;
459        else
460            tmp.end = root->end;
461
462        if (tmp.end < tmp.start)
463            goto next;
464
465        resource_clip(&tmp, constraint->min, constraint->max);
466        arch_remove_reservations(&tmp);
467
468        /* Check for overflow after ALIGN() */
469        avail.start = ALIGN(tmp.start, constraint->align);
470        avail.end = tmp.end;
471        avail.flags = new->flags & ~IORESOURCE_UNSET;
472        if (avail.start >= tmp.start) {
473            alloc.flags = avail.flags;
474            alloc.start = constraint->alignf(constraint->alignf_data, &avail,
475                    size, constraint->align);
476            alloc.end = alloc.start + size - 1;
477            if (resource_contains(&avail, &alloc)) {
478                new->start = alloc.start;
479                new->end = alloc.end;
480                return 0;
481            }
482        }
483
484next: if (!this || this->end == root->end)
485            break;
486
487        if (this != old)
488            tmp.start = this->end + 1;
489        this = this->sibling;
490    }
491    return -EBUSY;
492}
493
494/*
495 * Find empty slot in the resource tree given range and alignment.
496 */
497static int find_resource(struct resource *root, struct resource *new,
498            resource_size_t size,
499            struct resource_constraint *constraint)
500{
501    return __find_resource(root, NULL, new, size, constraint);
502}
503
504/**
505 * reallocate_resource - allocate a slot in the resource tree given range & alignment.
506 * The resource will be relocated if the new size cannot be reallocated in the
507 * current location.
508 *
509 * @root: root resource descriptor
510 * @old: resource descriptor desired by caller
511 * @newsize: new size of the resource descriptor
512 * @constraint: the size and alignment constraints to be met.
513 */
514static int reallocate_resource(struct resource *root, struct resource *old,
515            resource_size_t newsize,
516            struct resource_constraint *constraint)
517{
518    int err=0;
519    struct resource new = *old;
520    struct resource *conflict;
521
522    write_lock(&resource_lock);
523
524    if ((err = __find_resource(root, old, &new, newsize, constraint)))
525        goto out;
526
527    if (resource_contains(&new, old)) {
528        old->start = new.start;
529        old->end = new.end;
530        goto out;
531    }
532
533    if (old->child) {
534        err = -EBUSY;
535        goto out;
536    }
537
538    if (resource_contains(old, &new)) {
539        old->start = new.start;
540        old->end = new.end;
541    } else {
542        __release_resource(old);
543        *old = new;
544        conflict = __request_resource(root, old);
545        BUG_ON(conflict);
546    }
547out:
548    write_unlock(&resource_lock);
549    return err;
550}
551
552
553/**
554 * allocate_resource - allocate empty slot in the resource tree given range & alignment.
555 * The resource will be reallocated with a new size if it was already allocated
556 * @root: root resource descriptor
557 * @new: resource descriptor desired by caller
558 * @size: requested resource region size
559 * @min: minimum boundary to allocate
560 * @max: maximum boundary to allocate
561 * @align: alignment requested, in bytes
562 * @alignf: alignment function, optional, called if not NULL
563 * @alignf_data: arbitrary data to pass to the @alignf function
564 */
565int allocate_resource(struct resource *root, struct resource *new,
566              resource_size_t size, resource_size_t min,
567              resource_size_t max, resource_size_t align,
568              resource_size_t (*alignf)(void *,
569                        const struct resource *,
570                        resource_size_t,
571                        resource_size_t),
572              void *alignf_data)
573{
574    int err;
575    struct resource_constraint constraint;
576
577    if (!alignf)
578        alignf = simple_align_resource;
579
580    constraint.min = min;
581    constraint.max = max;
582    constraint.align = align;
583    constraint.alignf = alignf;
584    constraint.alignf_data = alignf_data;
585
586    if ( new->parent ) {
587        /* resource is already allocated, try reallocating with
588           the new constraints */
589        return reallocate_resource(root, new, size, &constraint);
590    }
591
592    write_lock(&resource_lock);
593    err = find_resource(root, new, size, &constraint);
594    if (err >= 0 && __request_resource(root, new))
595        err = -EBUSY;
596    write_unlock(&resource_lock);
597    return err;
598}
599
600EXPORT_SYMBOL(allocate_resource);
601
602/**
603 * lookup_resource - find an existing resource by a resource start address
604 * @root: root resource descriptor
605 * @start: resource start address
606 *
607 * Returns a pointer to the resource if found, NULL otherwise
608 */
609struct resource *lookup_resource(struct resource *root, resource_size_t start)
610{
611    struct resource *res;
612
613    read_lock(&resource_lock);
614    for (res = root->child; res; res = res->sibling) {
615        if (res->start == start)
616            break;
617    }
618    read_unlock(&resource_lock);
619
620    return res;
621}
622
623/*
624 * Insert a resource into the resource tree. If successful, return NULL,
625 * otherwise return the conflicting resource (compare to __request_resource())
626 */
627static struct resource * __insert_resource(struct resource *parent, struct resource *new)
628{
629    struct resource *first, *next;
630
631    for (;; parent = first) {
632        first = __request_resource(parent, new);
633        if (!first)
634            return first;
635
636        if (first == parent)
637            return first;
638        if (WARN_ON(first == new)) /* duplicated insertion */
639            return first;
640
641        if ((first->start > new->start) || (first->end < new->end))
642            break;
643        if ((first->start == new->start) && (first->end == new->end))
644            break;
645    }
646
647    for (next = first; ; next = next->sibling) {
648        /* Partial overlap? Bad, and unfixable */
649        if (next->start < new->start || next->end > new->end)
650            return next;
651        if (!next->sibling)
652            break;
653        if (next->sibling->start > new->end)
654            break;
655    }
656
657    new->parent = parent;
658    new->sibling = next->sibling;
659    new->child = first;
660
661    next->sibling = NULL;
662    for (next = first; next; next = next->sibling)
663        next->parent = new;
664
665    if (parent->child == first) {
666        parent->child = new;
667    } else {
668        next = parent->child;
669        while (next->sibling != first)
670            next = next->sibling;
671        next->sibling = new;
672    }
673    return NULL;
674}
675
676/**
677 * insert_resource_conflict - Inserts resource in the resource tree
678 * @parent: parent of the new resource
679 * @new: new resource to insert
680 *
681 * Returns 0 on success, conflict resource if the resource can't be inserted.
682 *
683 * This function is equivalent to request_resource_conflict when no conflict
684 * happens. If a conflict happens, and the conflicting resources
685 * entirely fit within the range of the new resource, then the new
686 * resource is inserted and the conflicting resources become children of
687 * the new resource.
688 */
689struct resource *insert_resource_conflict(struct resource *parent, struct resource *new)
690{
691    struct resource *conflict;
692
693    write_lock(&resource_lock);
694    conflict = __insert_resource(parent, new);
695    write_unlock(&resource_lock);
696    return conflict;
697}
698
699/**
700 * insert_resource - Inserts a resource in the resource tree
701 * @parent: parent of the new resource
702 * @new: new resource to insert
703 *
704 * Returns 0 on success, -EBUSY if the resource can't be inserted.
705 */
706int insert_resource(struct resource *parent, struct resource *new)
707{
708    struct resource *conflict;
709
710    conflict = insert_resource_conflict(parent, new);
711    return conflict ? -EBUSY : 0;
712}
713
714/**
715 * insert_resource_expand_to_fit - Insert a resource into the resource tree
716 * @root: root resource descriptor
717 * @new: new resource to insert
718 *
719 * Insert a resource into the resource tree, possibly expanding it in order
720 * to make it encompass any conflicting resources.
721 */
722void insert_resource_expand_to_fit(struct resource *root, struct resource *new)
723{
724    if (new->parent)
725        return;
726
727    write_lock(&resource_lock);
728    for (;;) {
729        struct resource *conflict;
730
731        conflict = __insert_resource(root, new);
732        if (!conflict)
733            break;
734        if (conflict == root)
735            break;
736
737        /* Ok, expand resource to cover the conflict, then try again .. */
738        if (conflict->start < new->start)
739            new->start = conflict->start;
740        if (conflict->end > new->end)
741            new->end = conflict->end;
742
743        printk("Expanded resource %s due to conflict with %s\n", new->name, conflict->name);
744    }
745    write_unlock(&resource_lock);
746}
747
748static int __adjust_resource(struct resource *res, resource_size_t start,
749                resource_size_t size)
750{
751    struct resource *tmp, *parent = res->parent;
752    resource_size_t end = start + size - 1;
753    int result = -EBUSY;
754
755    if (!parent)
756        goto skip;
757
758    if ((start < parent->start) || (end > parent->end))
759        goto out;
760
761    if (res->sibling && (res->sibling->start <= end))
762        goto out;
763
764    tmp = parent->child;
765    if (tmp != res) {
766        while (tmp->sibling != res)
767            tmp = tmp->sibling;
768        if (start <= tmp->end)
769            goto out;
770    }
771
772skip:
773    for (tmp = res->child; tmp; tmp = tmp->sibling)
774        if ((tmp->start < start) || (tmp->end > end))
775            goto out;
776
777    res->start = start;
778    res->end = end;
779    result = 0;
780
781 out:
782    return result;
783}
784
785/**
786 * adjust_resource - modify a resource's start and size
787 * @res: resource to modify
788 * @start: new start value
789 * @size: new size
790 *
791 * Given an existing resource, change its start and size to match the
792 * arguments. Returns 0 on success, -EBUSY if it can't fit.
793 * Existing children of the resource are assumed to be immutable.
794 */
795int adjust_resource(struct resource *res, resource_size_t start,
796            resource_size_t size)
797{
798    int result;
799
800    write_lock(&resource_lock);
801    result = __adjust_resource(res, start, size);
802    write_unlock(&resource_lock);
803    return result;
804}
805EXPORT_SYMBOL(adjust_resource);
806
807static void __init __reserve_region_with_split(struct resource *root,
808        resource_size_t start, resource_size_t end,
809        const char *name)
810{
811    struct resource *parent = root;
812    struct resource *conflict;
813    struct resource *res = alloc_resource(GFP_ATOMIC);
814    struct resource *next_res = NULL;
815
816    if (!res)
817        return;
818
819    res->name = name;
820    res->start = start;
821    res->end = end;
822    res->flags = IORESOURCE_BUSY;
823
824    while (1) {
825
826        conflict = __request_resource(parent, res);
827        if (!conflict) {
828            if (!next_res)
829                break;
830            res = next_res;
831            next_res = NULL;
832            continue;
833        }
834
835        /* conflict covered whole area */
836        if (conflict->start <= res->start &&
837                conflict->end >= res->end) {
838            free_resource(res);
839            WARN_ON(next_res);
840            break;
841        }
842
843        /* failed, split and try again */
844        if (conflict->start > res->start) {
845            end = res->end;
846            res->end = conflict->start - 1;
847            if (conflict->end < end) {
848                next_res = alloc_resource(GFP_ATOMIC);
849                if (!next_res) {
850                    free_resource(res);
851                    break;
852                }
853                next_res->name = name;
854                next_res->start = conflict->end + 1;
855                next_res->end = end;
856                next_res->flags = IORESOURCE_BUSY;
857            }
858        } else {
859            res->start = conflict->end + 1;
860        }
861    }
862
863}
864
865void __init reserve_region_with_split(struct resource *root,
866        resource_size_t start, resource_size_t end,
867        const char *name)
868{
869    int abort = 0;
870
871    write_lock(&resource_lock);
872    if (root->start > start || root->end < end) {
873        pr_err("requested range [0x%llx-0x%llx] not in root %pr\n",
874               (unsigned long long)start, (unsigned long long)end,
875               root);
876        if (start > root->end || end < root->start)
877            abort = 1;
878        else {
879            if (end > root->end)
880                end = root->end;
881            if (start < root->start)
882                start = root->start;
883            pr_err("fixing request to [0x%llx-0x%llx]\n",
884                   (unsigned long long)start,
885                   (unsigned long long)end);
886        }
887        dump_stack();
888    }
889    if (!abort)
890        __reserve_region_with_split(root, start, end, name);
891    write_unlock(&resource_lock);
892}
893
894/**
895 * resource_alignment - calculate resource's alignment
896 * @res: resource pointer
897 *
898 * Returns alignment on success, 0 (invalid alignment) on failure.
899 */
900resource_size_t resource_alignment(struct resource *res)
901{
902    switch (res->flags & (IORESOURCE_SIZEALIGN | IORESOURCE_STARTALIGN)) {
903    case IORESOURCE_SIZEALIGN:
904        return resource_size(res);
905    case IORESOURCE_STARTALIGN:
906        return res->start;
907    default:
908        return 0;
909    }
910}
911
912/*
913 * This is compatibility stuff for IO resources.
914 *
915 * Note how this, unlike the above, knows about
916 * the IO flag meanings (busy etc).
917 *
918 * request_region creates a new busy region.
919 *
920 * check_region returns non-zero if the area is already busy.
921 *
922 * release_region releases a matching busy region.
923 */
924
925static DECLARE_WAIT_QUEUE_HEAD(muxed_resource_wait);
926
927/**
928 * __request_region - create a new busy resource region
929 * @parent: parent resource descriptor
930 * @start: resource start address
931 * @n: resource region size
932 * @name: reserving caller's ID string
933 * @flags: IO resource flags
934 */
935struct resource * __request_region(struct resource *parent,
936                   resource_size_t start, resource_size_t n,
937                   const char *name, int flags)
938{
939    DECLARE_WAITQUEUE(wait, current);
940    struct resource *res = alloc_resource(GFP_KERNEL);
941
942    if (!res)
943        return NULL;
944
945    res->name = name;
946    res->start = start;
947    res->end = start + n - 1;
948    res->flags = resource_type(parent);
949    res->flags |= IORESOURCE_BUSY | flags;
950
951    write_lock(&resource_lock);
952
953    for (;;) {
954        struct resource *conflict;
955
956        conflict = __request_resource(parent, res);
957        if (!conflict)
958            break;
959        if (conflict != parent) {
960            parent = conflict;
961            if (!(conflict->flags & IORESOURCE_BUSY))
962                continue;
963        }
964        if (conflict->flags & flags & IORESOURCE_MUXED) {
965            add_wait_queue(&muxed_resource_wait, &wait);
966            write_unlock(&resource_lock);
967            set_current_state(TASK_UNINTERRUPTIBLE);
968            schedule();
969            remove_wait_queue(&muxed_resource_wait, &wait);
970            write_lock(&resource_lock);
971            continue;
972        }
973        /* Uhhuh, that didn't work out.. */
974        free_resource(res);
975        res = NULL;
976        break;
977    }
978    write_unlock(&resource_lock);
979    return res;
980}
981EXPORT_SYMBOL(__request_region);
982
983/**
984 * __check_region - check if a resource region is busy or free
985 * @parent: parent resource descriptor
986 * @start: resource start address
987 * @n: resource region size
988 *
989 * Returns 0 if the region is free at the moment it is checked,
990 * returns %-EBUSY if the region is busy.
991 *
992 * NOTE:
993 * This function is deprecated because its use is racy.
994 * Even if it returns 0, a subsequent call to request_region()
995 * may fail because another driver etc. just allocated the region.
996 * Do NOT use it. It will be removed from the kernel.
997 */
998int __check_region(struct resource *parent, resource_size_t start,
999            resource_size_t n)
1000{
1001    struct resource * res;
1002
1003    res = __request_region(parent, start, n, "check-region", 0);
1004    if (!res)
1005        return -EBUSY;
1006
1007    release_resource(res);
1008    free_resource(res);
1009    return 0;
1010}
1011EXPORT_SYMBOL(__check_region);
1012
1013/**
1014 * __release_region - release a previously reserved resource region
1015 * @parent: parent resource descriptor
1016 * @start: resource start address
1017 * @n: resource region size
1018 *
1019 * The described resource region must match a currently busy region.
1020 */
1021void __release_region(struct resource *parent, resource_size_t start,
1022            resource_size_t n)
1023{
1024    struct resource **p;
1025    resource_size_t end;
1026
1027    p = &parent->child;
1028    end = start + n - 1;
1029
1030    write_lock(&resource_lock);
1031
1032    for (;;) {
1033        struct resource *res = *p;
1034
1035        if (!res)
1036            break;
1037        if (res->start <= start && res->end >= end) {
1038            if (!(res->flags & IORESOURCE_BUSY)) {
1039                p = &res->child;
1040                continue;
1041            }
1042            if (res->start != start || res->end != end)
1043                break;
1044            *p = res->sibling;
1045            write_unlock(&resource_lock);
1046            if (res->flags & IORESOURCE_MUXED)
1047                wake_up(&muxed_resource_wait);
1048            free_resource(res);
1049            return;
1050        }
1051        p = &res->sibling;
1052    }
1053
1054    write_unlock(&resource_lock);
1055
1056    printk(KERN_WARNING "Trying to free nonexistent resource "
1057        "<%016llx-%016llx>\n", (unsigned long long)start,
1058        (unsigned long long)end);
1059}
1060EXPORT_SYMBOL(__release_region);
1061
1062#ifdef CONFIG_MEMORY_HOTREMOVE
1063/**
1064 * release_mem_region_adjustable - release a previously reserved memory region
1065 * @parent: parent resource descriptor
1066 * @start: resource start address
1067 * @size: resource region size
1068 *
1069 * This interface is intended for memory hot-delete. The requested region
1070 * is released from a currently busy memory resource. The requested region
1071 * must either match exactly or fit into a single busy resource entry. In
1072 * the latter case, the remaining resource is adjusted accordingly.
1073 * Existing children of the busy memory resource must be immutable in the
1074 * request.
1075 *
1076 * Note:
1077 * - Additional release conditions, such as overlapping region, can be
1078 * supported after they are confirmed as valid cases.
1079 * - When a busy memory resource gets split into two entries, the code
1080 * assumes that all children remain in the lower address entry for
1081 * simplicity. Enhance this logic when necessary.
1082 */
1083int release_mem_region_adjustable(struct resource *parent,
1084            resource_size_t start, resource_size_t size)
1085{
1086    struct resource **p;
1087    struct resource *res;
1088    struct resource *new_res;
1089    resource_size_t end;
1090    int ret = -EINVAL;
1091
1092    end = start + size - 1;
1093    if ((start < parent->start) || (end > parent->end))
1094        return ret;
1095
1096    /* The alloc_resource() result gets checked later */
1097    new_res = alloc_resource(GFP_KERNEL);
1098
1099    p = &parent->child;
1100    write_lock(&resource_lock);
1101
1102    while ((res = *p)) {
1103        if (res->start >= end)
1104            break;
1105
1106        /* look for the next resource if it does not fit into */
1107        if (res->start > start || res->end < end) {
1108            p = &res->sibling;
1109            continue;
1110        }
1111
1112        if (!(res->flags & IORESOURCE_MEM))
1113            break;
1114
1115        if (!(res->flags & IORESOURCE_BUSY)) {
1116            p = &res->child;
1117            continue;
1118        }
1119
1120        /* found the target resource; let's adjust accordingly */
1121        if (res->start == start && res->end == end) {
1122            /* free the whole entry */
1123            *p = res->sibling;
1124            free_resource(res);
1125            ret = 0;
1126        } else if (res->start == start && res->end != end) {
1127            /* adjust the start */
1128            ret = __adjust_resource(res, end + 1,
1129                        res->end - end);
1130        } else if (res->start != start && res->end == end) {
1131            /* adjust the end */
1132            ret = __adjust_resource(res, res->start,
1133                        start - res->start);
1134        } else {
1135            /* split into two entries */
1136            if (!new_res) {
1137                ret = -ENOMEM;
1138                break;
1139            }
1140            new_res->name = res->name;
1141            new_res->start = end + 1;
1142            new_res->end = res->end;
1143            new_res->flags = res->flags;
1144            new_res->parent = res->parent;
1145            new_res->sibling = res->sibling;
1146            new_res->child = NULL;
1147
1148            ret = __adjust_resource(res, res->start,
1149                        start - res->start);
1150            if (ret)
1151                break;
1152            res->sibling = new_res;
1153            new_res = NULL;
1154        }
1155
1156        break;
1157    }
1158
1159    write_unlock(&resource_lock);
1160    free_resource(new_res);
1161    return ret;
1162}
1163#endif /* CONFIG_MEMORY_HOTREMOVE */
1164
1165/*
1166 * Managed region resource
1167 */
1168struct region_devres {
1169    struct resource *parent;
1170    resource_size_t start;
1171    resource_size_t n;
1172};
1173
1174static void devm_region_release(struct device *dev, void *res)
1175{
1176    struct region_devres *this = res;
1177
1178    __release_region(this->parent, this->start, this->n);
1179}
1180
1181static int devm_region_match(struct device *dev, void *res, void *match_data)
1182{
1183    struct region_devres *this = res, *match = match_data;
1184
1185    return this->parent == match->parent &&
1186        this->start == match->start && this->n == match->n;
1187}
1188
1189struct resource * __devm_request_region(struct device *dev,
1190                struct resource *parent, resource_size_t start,
1191                resource_size_t n, const char *name)
1192{
1193    struct region_devres *dr = NULL;
1194    struct resource *res;
1195
1196    dr = devres_alloc(devm_region_release, sizeof(struct region_devres),
1197              GFP_KERNEL);
1198    if (!dr)
1199        return NULL;
1200
1201    dr->parent = parent;
1202    dr->start = start;
1203    dr->n = n;
1204
1205    res = __request_region(parent, start, n, name, 0);
1206    if (res)
1207        devres_add(dev, dr);
1208    else
1209        devres_free(dr);
1210
1211    return res;
1212}
1213EXPORT_SYMBOL(__devm_request_region);
1214
1215void __devm_release_region(struct device *dev, struct resource *parent,
1216               resource_size_t start, resource_size_t n)
1217{
1218    struct region_devres match_data = { parent, start, n };
1219
1220    __release_region(parent, start, n);
1221    WARN_ON(devres_destroy(dev, devm_region_release, devm_region_match,
1222                   &match_data));
1223}
1224EXPORT_SYMBOL(__devm_release_region);
1225
1226/*
1227 * Called from init/main.c to reserve IO ports.
1228 */
1229#define MAXRESERVE 4
1230static int __init reserve_setup(char *str)
1231{
1232    static int reserved;
1233    static struct resource reserve[MAXRESERVE];
1234
1235    for (;;) {
1236        unsigned int io_start, io_num;
1237        int x = reserved;
1238
1239        if (get_option (&str, &io_start) != 2)
1240            break;
1241        if (get_option (&str, &io_num) == 0)
1242            break;
1243        if (x < MAXRESERVE) {
1244            struct resource *res = reserve + x;
1245            res->name = "reserved";
1246            res->start = io_start;
1247            res->end = io_start + io_num - 1;
1248            res->flags = IORESOURCE_BUSY;
1249            res->child = NULL;
1250            if (request_resource(res->start >= 0x10000 ? &iomem_resource : &ioport_resource, res) == 0)
1251                reserved = x+1;
1252        }
1253    }
1254    return 1;
1255}
1256
1257__setup("reserve=", reserve_setup);
1258
1259/*
1260 * Check if the requested addr and size spans more than any slot in the
1261 * iomem resource tree.
1262 */
1263int iomem_map_sanity_check(resource_size_t addr, unsigned long size)
1264{
1265    struct resource *p = &iomem_resource;
1266    int err = 0;
1267    loff_t l;
1268
1269    read_lock(&resource_lock);
1270    for (p = p->child; p ; p = r_next(NULL, p, &l)) {
1271        /*
1272         * We can probably skip the resources without
1273         * IORESOURCE_IO attribute?
1274         */
1275        if (p->start >= addr + size)
1276            continue;
1277        if (p->end < addr)
1278            continue;
1279        if (PFN_DOWN(p->start) <= PFN_DOWN(addr) &&
1280            PFN_DOWN(p->end) >= PFN_DOWN(addr + size - 1))
1281            continue;
1282        /*
1283         * if a resource is "BUSY", it's not a hardware resource
1284         * but a driver mapping of such a resource; we don't want
1285         * to warn for those; some drivers legitimately map only
1286         * partial hardware resources. (example: vesafb)
1287         */
1288        if (p->flags & IORESOURCE_BUSY)
1289            continue;
1290
1291        printk(KERN_WARNING "resource map sanity check conflict: "
1292               "0x%llx 0x%llx 0x%llx 0x%llx %s\n",
1293               (unsigned long long)addr,
1294               (unsigned long long)(addr + size - 1),
1295               (unsigned long long)p->start,
1296               (unsigned long long)p->end,
1297               p->name);
1298        err = -1;
1299        break;
1300    }
1301    read_unlock(&resource_lock);
1302
1303    return err;
1304}
1305
1306#ifdef CONFIG_STRICT_DEVMEM
1307static int strict_iomem_checks = 1;
1308#else
1309static int strict_iomem_checks;
1310#endif
1311
1312/*
1313 * check if an address is reserved in the iomem resource tree
1314 * returns 1 if reserved, 0 if not reserved.
1315 */
1316int iomem_is_exclusive(u64 addr)
1317{
1318    struct resource *p = &iomem_resource;
1319    int err = 0;
1320    loff_t l;
1321    int size = PAGE_SIZE;
1322
1323    if (!strict_iomem_checks)
1324        return 0;
1325
1326    addr = addr & PAGE_MASK;
1327
1328    read_lock(&resource_lock);
1329    for (p = p->child; p ; p = r_next(NULL, p, &l)) {
1330        /*
1331         * We can probably skip the resources without
1332         * IORESOURCE_IO attribute?
1333         */
1334        if (p->start >= addr + size)
1335            break;
1336        if (p->end < addr)
1337            continue;
1338        if (p->flags & IORESOURCE_BUSY &&
1339             p->flags & IORESOURCE_EXCLUSIVE) {
1340            err = 1;
1341            break;
1342        }
1343    }
1344    read_unlock(&resource_lock);
1345
1346    return err;
1347}
1348
1349static int __init strict_iomem(char *str)
1350{
1351    if (strstr(str, "relaxed"))
1352        strict_iomem_checks = 0;
1353    if (strstr(str, "strict"))
1354        strict_iomem_checks = 1;
1355    return 1;
1356}
1357
1358__setup("iomem=", strict_iomem);
1359

Archive Download this file



interactive