Root/
1 | /* |
2 | * linux/kernel/resource.c |
3 | * |
4 | * Copyright (C) 1999 Linus Torvalds |
5 | * Copyright (C) 1999 Martin Mares <mj@ucw.cz> |
6 | * |
7 | * Arbitrary resource management. |
8 | */ |
9 | |
10 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
11 | |
12 | #include <linux/export.h> |
13 | #include <linux/errno.h> |
14 | #include <linux/ioport.h> |
15 | #include <linux/init.h> |
16 | #include <linux/slab.h> |
17 | #include <linux/spinlock.h> |
18 | #include <linux/fs.h> |
19 | #include <linux/proc_fs.h> |
20 | #include <linux/sched.h> |
21 | #include <linux/seq_file.h> |
22 | #include <linux/device.h> |
23 | #include <linux/pfn.h> |
24 | #include <asm/io.h> |
25 | |
26 | |
27 | struct resource ioport_resource = { |
28 | .name = "PCI IO", |
29 | .start = 0, |
30 | .end = IO_SPACE_LIMIT, |
31 | .flags = IORESOURCE_IO, |
32 | }; |
33 | EXPORT_SYMBOL(ioport_resource); |
34 | |
35 | struct resource iomem_resource = { |
36 | .name = "PCI mem", |
37 | .start = 0, |
38 | .end = -1, |
39 | .flags = IORESOURCE_MEM, |
40 | }; |
41 | EXPORT_SYMBOL(iomem_resource); |
42 | |
43 | /* constraints to be met while allocating resources */ |
44 | struct resource_constraint { |
45 | resource_size_t min, max, align; |
46 | resource_size_t (*alignf)(void *, const struct resource *, |
47 | resource_size_t, resource_size_t); |
48 | void *alignf_data; |
49 | }; |
50 | |
51 | static DEFINE_RWLOCK(resource_lock); |
52 | |
53 | static void *r_next(struct seq_file *m, void *v, loff_t *pos) |
54 | { |
55 | struct resource *p = v; |
56 | (*pos)++; |
57 | if (p->child) |
58 | return p->child; |
59 | while (!p->sibling && p->parent) |
60 | p = p->parent; |
61 | return p->sibling; |
62 | } |
63 | |
64 | #ifdef CONFIG_PROC_FS |
65 | |
66 | enum { MAX_IORES_LEVEL = 5 }; |
67 | |
68 | static void *r_start(struct seq_file *m, loff_t *pos) |
69 | __acquires(resource_lock) |
70 | { |
71 | struct resource *p = m->private; |
72 | loff_t l = 0; |
73 | read_lock(&resource_lock); |
74 | for (p = p->child; p && l < *pos; p = r_next(m, p, &l)) |
75 | ; |
76 | return p; |
77 | } |
78 | |
79 | static void r_stop(struct seq_file *m, void *v) |
80 | __releases(resource_lock) |
81 | { |
82 | read_unlock(&resource_lock); |
83 | } |
84 | |
85 | static int r_show(struct seq_file *m, void *v) |
86 | { |
87 | struct resource *root = m->private; |
88 | struct resource *r = v, *p; |
89 | int width = root->end < 0x10000 ? 4 : 8; |
90 | int depth; |
91 | |
92 | for (depth = 0, p = r; depth < MAX_IORES_LEVEL; depth++, p = p->parent) |
93 | if (p->parent == root) |
94 | break; |
95 | seq_printf(m, "%*s%0*llx-%0*llx : %s\n", |
96 | depth * 2, "", |
97 | width, (unsigned long long) r->start, |
98 | width, (unsigned long long) r->end, |
99 | r->name ? r->name : "<BAD>"); |
100 | return 0; |
101 | } |
102 | |
103 | static const struct seq_operations resource_op = { |
104 | .start = r_start, |
105 | .next = r_next, |
106 | .stop = r_stop, |
107 | .show = r_show, |
108 | }; |
109 | |
110 | static int ioports_open(struct inode *inode, struct file *file) |
111 | { |
112 | int res = seq_open(file, &resource_op); |
113 | if (!res) { |
114 | struct seq_file *m = file->private_data; |
115 | m->private = &ioport_resource; |
116 | } |
117 | return res; |
118 | } |
119 | |
120 | static int iomem_open(struct inode *inode, struct file *file) |
121 | { |
122 | int res = seq_open(file, &resource_op); |
123 | if (!res) { |
124 | struct seq_file *m = file->private_data; |
125 | m->private = &iomem_resource; |
126 | } |
127 | return res; |
128 | } |
129 | |
130 | static const struct file_operations proc_ioports_operations = { |
131 | .open = ioports_open, |
132 | .read = seq_read, |
133 | .llseek = seq_lseek, |
134 | .release = seq_release, |
135 | }; |
136 | |
137 | static const struct file_operations proc_iomem_operations = { |
138 | .open = iomem_open, |
139 | .read = seq_read, |
140 | .llseek = seq_lseek, |
141 | .release = seq_release, |
142 | }; |
143 | |
144 | static int __init ioresources_init(void) |
145 | { |
146 | proc_create("ioports", 0, NULL, &proc_ioports_operations); |
147 | proc_create("iomem", 0, NULL, &proc_iomem_operations); |
148 | return 0; |
149 | } |
150 | __initcall(ioresources_init); |
151 | |
152 | #endif /* CONFIG_PROC_FS */ |
153 | |
154 | /* Return the conflict entry if you can't request it */ |
155 | static struct resource * __request_resource(struct resource *root, struct resource *new) |
156 | { |
157 | resource_size_t start = new->start; |
158 | resource_size_t end = new->end; |
159 | struct resource *tmp, **p; |
160 | |
161 | if (end < start) |
162 | return root; |
163 | if (start < root->start) |
164 | return root; |
165 | if (end > root->end) |
166 | return root; |
167 | p = &root->child; |
168 | for (;;) { |
169 | tmp = *p; |
170 | if (!tmp || tmp->start > end) { |
171 | new->sibling = tmp; |
172 | *p = new; |
173 | new->parent = root; |
174 | return NULL; |
175 | } |
176 | p = &tmp->sibling; |
177 | if (tmp->end < start) |
178 | continue; |
179 | return tmp; |
180 | } |
181 | } |
182 | |
183 | static int __release_resource(struct resource *old) |
184 | { |
185 | struct resource *tmp, **p; |
186 | |
187 | p = &old->parent->child; |
188 | for (;;) { |
189 | tmp = *p; |
190 | if (!tmp) |
191 | break; |
192 | if (tmp == old) { |
193 | *p = tmp->sibling; |
194 | old->parent = NULL; |
195 | return 0; |
196 | } |
197 | p = &tmp->sibling; |
198 | } |
199 | return -EINVAL; |
200 | } |
201 | |
202 | static void __release_child_resources(struct resource *r) |
203 | { |
204 | struct resource *tmp, *p; |
205 | resource_size_t size; |
206 | |
207 | p = r->child; |
208 | r->child = NULL; |
209 | while (p) { |
210 | tmp = p; |
211 | p = p->sibling; |
212 | |
213 | tmp->parent = NULL; |
214 | tmp->sibling = NULL; |
215 | __release_child_resources(tmp); |
216 | |
217 | printk(KERN_DEBUG "release child resource %pR\n", tmp); |
218 | /* need to restore size, and keep flags */ |
219 | size = resource_size(tmp); |
220 | tmp->start = 0; |
221 | tmp->end = size - 1; |
222 | } |
223 | } |
224 | |
225 | void release_child_resources(struct resource *r) |
226 | { |
227 | write_lock(&resource_lock); |
228 | __release_child_resources(r); |
229 | write_unlock(&resource_lock); |
230 | } |
231 | |
232 | /** |
233 | * request_resource_conflict - request and reserve an I/O or memory resource |
234 | * @root: root resource descriptor |
235 | * @new: resource descriptor desired by caller |
236 | * |
237 | * Returns 0 for success, conflict resource on error. |
238 | */ |
239 | struct resource *request_resource_conflict(struct resource *root, struct resource *new) |
240 | { |
241 | struct resource *conflict; |
242 | |
243 | write_lock(&resource_lock); |
244 | conflict = __request_resource(root, new); |
245 | write_unlock(&resource_lock); |
246 | return conflict; |
247 | } |
248 | |
249 | /** |
250 | * request_resource - request and reserve an I/O or memory resource |
251 | * @root: root resource descriptor |
252 | * @new: resource descriptor desired by caller |
253 | * |
254 | * Returns 0 for success, negative error code on error. |
255 | */ |
256 | int request_resource(struct resource *root, struct resource *new) |
257 | { |
258 | struct resource *conflict; |
259 | |
260 | conflict = request_resource_conflict(root, new); |
261 | return conflict ? -EBUSY : 0; |
262 | } |
263 | |
264 | EXPORT_SYMBOL(request_resource); |
265 | |
266 | /** |
267 | * release_resource - release a previously reserved resource |
268 | * @old: resource pointer |
269 | */ |
270 | int release_resource(struct resource *old) |
271 | { |
272 | int retval; |
273 | |
274 | write_lock(&resource_lock); |
275 | retval = __release_resource(old); |
276 | write_unlock(&resource_lock); |
277 | return retval; |
278 | } |
279 | |
280 | EXPORT_SYMBOL(release_resource); |
281 | |
282 | #if !defined(CONFIG_ARCH_HAS_WALK_MEMORY) |
283 | /* |
284 | * Finds the lowest memory reosurce exists within [res->start.res->end) |
285 | * the caller must specify res->start, res->end, res->flags and "name". |
286 | * If found, returns 0, res is overwritten, if not found, returns -1. |
287 | */ |
288 | static int find_next_system_ram(struct resource *res, char *name) |
289 | { |
290 | resource_size_t start, end; |
291 | struct resource *p; |
292 | |
293 | BUG_ON(!res); |
294 | |
295 | start = res->start; |
296 | end = res->end; |
297 | BUG_ON(start >= end); |
298 | |
299 | read_lock(&resource_lock); |
300 | for (p = iomem_resource.child; p ; p = p->sibling) { |
301 | /* system ram is just marked as IORESOURCE_MEM */ |
302 | if (p->flags != res->flags) |
303 | continue; |
304 | if (name && strcmp(p->name, name)) |
305 | continue; |
306 | if (p->start > end) { |
307 | p = NULL; |
308 | break; |
309 | } |
310 | if ((p->end >= start) && (p->start < end)) |
311 | break; |
312 | } |
313 | read_unlock(&resource_lock); |
314 | if (!p) |
315 | return -1; |
316 | /* copy data */ |
317 | if (res->start < p->start) |
318 | res->start = p->start; |
319 | if (res->end > p->end) |
320 | res->end = p->end; |
321 | return 0; |
322 | } |
323 | |
324 | /* |
325 | * This function calls callback against all memory range of "System RAM" |
326 | * which are marked as IORESOURCE_MEM and IORESOUCE_BUSY. |
327 | * Now, this function is only for "System RAM". |
328 | */ |
329 | int walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages, |
330 | void *arg, int (*func)(unsigned long, unsigned long, void *)) |
331 | { |
332 | struct resource res; |
333 | unsigned long pfn, end_pfn; |
334 | u64 orig_end; |
335 | int ret = -1; |
336 | |
337 | res.start = (u64) start_pfn << PAGE_SHIFT; |
338 | res.end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1; |
339 | res.flags = IORESOURCE_MEM | IORESOURCE_BUSY; |
340 | orig_end = res.end; |
341 | while ((res.start < res.end) && |
342 | (find_next_system_ram(&res, "System RAM") >= 0)) { |
343 | pfn = (res.start + PAGE_SIZE - 1) >> PAGE_SHIFT; |
344 | end_pfn = (res.end + 1) >> PAGE_SHIFT; |
345 | if (end_pfn > pfn) |
346 | ret = (*func)(pfn, end_pfn - pfn, arg); |
347 | if (ret) |
348 | break; |
349 | res.start = res.end + 1; |
350 | res.end = orig_end; |
351 | } |
352 | return ret; |
353 | } |
354 | |
355 | #endif |
356 | |
357 | static int __is_ram(unsigned long pfn, unsigned long nr_pages, void *arg) |
358 | { |
359 | return 1; |
360 | } |
361 | /* |
362 | * This generic page_is_ram() returns true if specified address is |
363 | * registered as "System RAM" in iomem_resource list. |
364 | */ |
365 | int __weak page_is_ram(unsigned long pfn) |
366 | { |
367 | return walk_system_ram_range(pfn, 1, NULL, __is_ram) == 1; |
368 | } |
369 | |
370 | void __weak arch_remove_reservations(struct resource *avail) |
371 | { |
372 | } |
373 | |
374 | static resource_size_t simple_align_resource(void *data, |
375 | const struct resource *avail, |
376 | resource_size_t size, |
377 | resource_size_t align) |
378 | { |
379 | return avail->start; |
380 | } |
381 | |
382 | static void resource_clip(struct resource *res, resource_size_t min, |
383 | resource_size_t max) |
384 | { |
385 | if (res->start < min) |
386 | res->start = min; |
387 | if (res->end > max) |
388 | res->end = max; |
389 | } |
390 | |
391 | static bool resource_contains(struct resource *res1, struct resource *res2) |
392 | { |
393 | return res1->start <= res2->start && res1->end >= res2->end; |
394 | } |
395 | |
396 | /* |
397 | * Find empty slot in the resource tree with the given range and |
398 | * alignment constraints |
399 | */ |
400 | static int __find_resource(struct resource *root, struct resource *old, |
401 | struct resource *new, |
402 | resource_size_t size, |
403 | struct resource_constraint *constraint) |
404 | { |
405 | struct resource *this = root->child; |
406 | struct resource tmp = *new, avail, alloc; |
407 | |
408 | tmp.flags = new->flags; |
409 | tmp.start = root->start; |
410 | /* |
411 | * Skip past an allocated resource that starts at 0, since the assignment |
412 | * of this->start - 1 to tmp->end below would cause an underflow. |
413 | */ |
414 | if (this && this->start == root->start) { |
415 | tmp.start = (this == old) ? old->start : this->end + 1; |
416 | this = this->sibling; |
417 | } |
418 | for(;;) { |
419 | if (this) |
420 | tmp.end = (this == old) ? this->end : this->start - 1; |
421 | else |
422 | tmp.end = root->end; |
423 | |
424 | if (tmp.end < tmp.start) |
425 | goto next; |
426 | |
427 | resource_clip(&tmp, constraint->min, constraint->max); |
428 | arch_remove_reservations(&tmp); |
429 | |
430 | /* Check for overflow after ALIGN() */ |
431 | avail = *new; |
432 | avail.start = ALIGN(tmp.start, constraint->align); |
433 | avail.end = tmp.end; |
434 | if (avail.start >= tmp.start) { |
435 | alloc.start = constraint->alignf(constraint->alignf_data, &avail, |
436 | size, constraint->align); |
437 | alloc.end = alloc.start + size - 1; |
438 | if (resource_contains(&avail, &alloc)) { |
439 | new->start = alloc.start; |
440 | new->end = alloc.end; |
441 | return 0; |
442 | } |
443 | } |
444 | |
445 | next: if (!this || this->end == root->end) |
446 | break; |
447 | |
448 | if (this != old) |
449 | tmp.start = this->end + 1; |
450 | this = this->sibling; |
451 | } |
452 | return -EBUSY; |
453 | } |
454 | |
455 | /* |
456 | * Find empty slot in the resource tree given range and alignment. |
457 | */ |
458 | static int find_resource(struct resource *root, struct resource *new, |
459 | resource_size_t size, |
460 | struct resource_constraint *constraint) |
461 | { |
462 | return __find_resource(root, NULL, new, size, constraint); |
463 | } |
464 | |
465 | /** |
466 | * reallocate_resource - allocate a slot in the resource tree given range & alignment. |
467 | * The resource will be relocated if the new size cannot be reallocated in the |
468 | * current location. |
469 | * |
470 | * @root: root resource descriptor |
471 | * @old: resource descriptor desired by caller |
472 | * @newsize: new size of the resource descriptor |
473 | * @constraint: the size and alignment constraints to be met. |
474 | */ |
475 | int reallocate_resource(struct resource *root, struct resource *old, |
476 | resource_size_t newsize, |
477 | struct resource_constraint *constraint) |
478 | { |
479 | int err=0; |
480 | struct resource new = *old; |
481 | struct resource *conflict; |
482 | |
483 | write_lock(&resource_lock); |
484 | |
485 | if ((err = __find_resource(root, old, &new, newsize, constraint))) |
486 | goto out; |
487 | |
488 | if (resource_contains(&new, old)) { |
489 | old->start = new.start; |
490 | old->end = new.end; |
491 | goto out; |
492 | } |
493 | |
494 | if (old->child) { |
495 | err = -EBUSY; |
496 | goto out; |
497 | } |
498 | |
499 | if (resource_contains(old, &new)) { |
500 | old->start = new.start; |
501 | old->end = new.end; |
502 | } else { |
503 | __release_resource(old); |
504 | *old = new; |
505 | conflict = __request_resource(root, old); |
506 | BUG_ON(conflict); |
507 | } |
508 | out: |
509 | write_unlock(&resource_lock); |
510 | return err; |
511 | } |
512 | |
513 | |
514 | /** |
515 | * allocate_resource - allocate empty slot in the resource tree given range & alignment. |
516 | * The resource will be reallocated with a new size if it was already allocated |
517 | * @root: root resource descriptor |
518 | * @new: resource descriptor desired by caller |
519 | * @size: requested resource region size |
520 | * @min: minimum boundary to allocate |
521 | * @max: maximum boundary to allocate |
522 | * @align: alignment requested, in bytes |
523 | * @alignf: alignment function, optional, called if not NULL |
524 | * @alignf_data: arbitrary data to pass to the @alignf function |
525 | */ |
526 | int allocate_resource(struct resource *root, struct resource *new, |
527 | resource_size_t size, resource_size_t min, |
528 | resource_size_t max, resource_size_t align, |
529 | resource_size_t (*alignf)(void *, |
530 | const struct resource *, |
531 | resource_size_t, |
532 | resource_size_t), |
533 | void *alignf_data) |
534 | { |
535 | int err; |
536 | struct resource_constraint constraint; |
537 | |
538 | if (!alignf) |
539 | alignf = simple_align_resource; |
540 | |
541 | constraint.min = min; |
542 | constraint.max = max; |
543 | constraint.align = align; |
544 | constraint.alignf = alignf; |
545 | constraint.alignf_data = alignf_data; |
546 | |
547 | if ( new->parent ) { |
548 | /* resource is already allocated, try reallocating with |
549 | the new constraints */ |
550 | return reallocate_resource(root, new, size, &constraint); |
551 | } |
552 | |
553 | write_lock(&resource_lock); |
554 | err = find_resource(root, new, size, &constraint); |
555 | if (err >= 0 && __request_resource(root, new)) |
556 | err = -EBUSY; |
557 | write_unlock(&resource_lock); |
558 | return err; |
559 | } |
560 | |
561 | EXPORT_SYMBOL(allocate_resource); |
562 | |
563 | /** |
564 | * lookup_resource - find an existing resource by a resource start address |
565 | * @root: root resource descriptor |
566 | * @start: resource start address |
567 | * |
568 | * Returns a pointer to the resource if found, NULL otherwise |
569 | */ |
570 | struct resource *lookup_resource(struct resource *root, resource_size_t start) |
571 | { |
572 | struct resource *res; |
573 | |
574 | read_lock(&resource_lock); |
575 | for (res = root->child; res; res = res->sibling) { |
576 | if (res->start == start) |
577 | break; |
578 | } |
579 | read_unlock(&resource_lock); |
580 | |
581 | return res; |
582 | } |
583 | |
584 | /* |
585 | * Insert a resource into the resource tree. If successful, return NULL, |
586 | * otherwise return the conflicting resource (compare to __request_resource()) |
587 | */ |
588 | static struct resource * __insert_resource(struct resource *parent, struct resource *new) |
589 | { |
590 | struct resource *first, *next; |
591 | |
592 | for (;; parent = first) { |
593 | first = __request_resource(parent, new); |
594 | if (!first) |
595 | return first; |
596 | |
597 | if (first == parent) |
598 | return first; |
599 | if (WARN_ON(first == new)) /* duplicated insertion */ |
600 | return first; |
601 | |
602 | if ((first->start > new->start) || (first->end < new->end)) |
603 | break; |
604 | if ((first->start == new->start) && (first->end == new->end)) |
605 | break; |
606 | } |
607 | |
608 | for (next = first; ; next = next->sibling) { |
609 | /* Partial overlap? Bad, and unfixable */ |
610 | if (next->start < new->start || next->end > new->end) |
611 | return next; |
612 | if (!next->sibling) |
613 | break; |
614 | if (next->sibling->start > new->end) |
615 | break; |
616 | } |
617 | |
618 | new->parent = parent; |
619 | new->sibling = next->sibling; |
620 | new->child = first; |
621 | |
622 | next->sibling = NULL; |
623 | for (next = first; next; next = next->sibling) |
624 | next->parent = new; |
625 | |
626 | if (parent->child == first) { |
627 | parent->child = new; |
628 | } else { |
629 | next = parent->child; |
630 | while (next->sibling != first) |
631 | next = next->sibling; |
632 | next->sibling = new; |
633 | } |
634 | return NULL; |
635 | } |
636 | |
637 | /** |
638 | * insert_resource_conflict - Inserts resource in the resource tree |
639 | * @parent: parent of the new resource |
640 | * @new: new resource to insert |
641 | * |
642 | * Returns 0 on success, conflict resource if the resource can't be inserted. |
643 | * |
644 | * This function is equivalent to request_resource_conflict when no conflict |
645 | * happens. If a conflict happens, and the conflicting resources |
646 | * entirely fit within the range of the new resource, then the new |
647 | * resource is inserted and the conflicting resources become children of |
648 | * the new resource. |
649 | */ |
650 | struct resource *insert_resource_conflict(struct resource *parent, struct resource *new) |
651 | { |
652 | struct resource *conflict; |
653 | |
654 | write_lock(&resource_lock); |
655 | conflict = __insert_resource(parent, new); |
656 | write_unlock(&resource_lock); |
657 | return conflict; |
658 | } |
659 | |
660 | /** |
661 | * insert_resource - Inserts a resource in the resource tree |
662 | * @parent: parent of the new resource |
663 | * @new: new resource to insert |
664 | * |
665 | * Returns 0 on success, -EBUSY if the resource can't be inserted. |
666 | */ |
667 | int insert_resource(struct resource *parent, struct resource *new) |
668 | { |
669 | struct resource *conflict; |
670 | |
671 | conflict = insert_resource_conflict(parent, new); |
672 | return conflict ? -EBUSY : 0; |
673 | } |
674 | |
675 | /** |
676 | * insert_resource_expand_to_fit - Insert a resource into the resource tree |
677 | * @root: root resource descriptor |
678 | * @new: new resource to insert |
679 | * |
680 | * Insert a resource into the resource tree, possibly expanding it in order |
681 | * to make it encompass any conflicting resources. |
682 | */ |
683 | void insert_resource_expand_to_fit(struct resource *root, struct resource *new) |
684 | { |
685 | if (new->parent) |
686 | return; |
687 | |
688 | write_lock(&resource_lock); |
689 | for (;;) { |
690 | struct resource *conflict; |
691 | |
692 | conflict = __insert_resource(root, new); |
693 | if (!conflict) |
694 | break; |
695 | if (conflict == root) |
696 | break; |
697 | |
698 | /* Ok, expand resource to cover the conflict, then try again .. */ |
699 | if (conflict->start < new->start) |
700 | new->start = conflict->start; |
701 | if (conflict->end > new->end) |
702 | new->end = conflict->end; |
703 | |
704 | printk("Expanded resource %s due to conflict with %s\n", new->name, conflict->name); |
705 | } |
706 | write_unlock(&resource_lock); |
707 | } |
708 | |
709 | /** |
710 | * adjust_resource - modify a resource's start and size |
711 | * @res: resource to modify |
712 | * @start: new start value |
713 | * @size: new size |
714 | * |
715 | * Given an existing resource, change its start and size to match the |
716 | * arguments. Returns 0 on success, -EBUSY if it can't fit. |
717 | * Existing children of the resource are assumed to be immutable. |
718 | */ |
719 | int adjust_resource(struct resource *res, resource_size_t start, resource_size_t size) |
720 | { |
721 | struct resource *tmp, *parent = res->parent; |
722 | resource_size_t end = start + size - 1; |
723 | int result = -EBUSY; |
724 | |
725 | write_lock(&resource_lock); |
726 | |
727 | if (!parent) |
728 | goto skip; |
729 | |
730 | if ((start < parent->start) || (end > parent->end)) |
731 | goto out; |
732 | |
733 | if (res->sibling && (res->sibling->start <= end)) |
734 | goto out; |
735 | |
736 | tmp = parent->child; |
737 | if (tmp != res) { |
738 | while (tmp->sibling != res) |
739 | tmp = tmp->sibling; |
740 | if (start <= tmp->end) |
741 | goto out; |
742 | } |
743 | |
744 | skip: |
745 | for (tmp = res->child; tmp; tmp = tmp->sibling) |
746 | if ((tmp->start < start) || (tmp->end > end)) |
747 | goto out; |
748 | |
749 | res->start = start; |
750 | res->end = end; |
751 | result = 0; |
752 | |
753 | out: |
754 | write_unlock(&resource_lock); |
755 | return result; |
756 | } |
757 | EXPORT_SYMBOL(adjust_resource); |
758 | |
759 | static void __init __reserve_region_with_split(struct resource *root, |
760 | resource_size_t start, resource_size_t end, |
761 | const char *name) |
762 | { |
763 | struct resource *parent = root; |
764 | struct resource *conflict; |
765 | struct resource *res = kzalloc(sizeof(*res), GFP_ATOMIC); |
766 | struct resource *next_res = NULL; |
767 | |
768 | if (!res) |
769 | return; |
770 | |
771 | res->name = name; |
772 | res->start = start; |
773 | res->end = end; |
774 | res->flags = IORESOURCE_BUSY; |
775 | |
776 | while (1) { |
777 | |
778 | conflict = __request_resource(parent, res); |
779 | if (!conflict) { |
780 | if (!next_res) |
781 | break; |
782 | res = next_res; |
783 | next_res = NULL; |
784 | continue; |
785 | } |
786 | |
787 | /* conflict covered whole area */ |
788 | if (conflict->start <= res->start && |
789 | conflict->end >= res->end) { |
790 | kfree(res); |
791 | WARN_ON(next_res); |
792 | break; |
793 | } |
794 | |
795 | /* failed, split and try again */ |
796 | if (conflict->start > res->start) { |
797 | end = res->end; |
798 | res->end = conflict->start - 1; |
799 | if (conflict->end < end) { |
800 | next_res = kzalloc(sizeof(*next_res), |
801 | GFP_ATOMIC); |
802 | if (!next_res) { |
803 | kfree(res); |
804 | break; |
805 | } |
806 | next_res->name = name; |
807 | next_res->start = conflict->end + 1; |
808 | next_res->end = end; |
809 | next_res->flags = IORESOURCE_BUSY; |
810 | } |
811 | } else { |
812 | res->start = conflict->end + 1; |
813 | } |
814 | } |
815 | |
816 | } |
817 | |
818 | void __init reserve_region_with_split(struct resource *root, |
819 | resource_size_t start, resource_size_t end, |
820 | const char *name) |
821 | { |
822 | int abort = 0; |
823 | |
824 | write_lock(&resource_lock); |
825 | if (root->start > start || root->end < end) { |
826 | pr_err("requested range [0x%llx-0x%llx] not in root %pr\n", |
827 | (unsigned long long)start, (unsigned long long)end, |
828 | root); |
829 | if (start > root->end || end < root->start) |
830 | abort = 1; |
831 | else { |
832 | if (end > root->end) |
833 | end = root->end; |
834 | if (start < root->start) |
835 | start = root->start; |
836 | pr_err("fixing request to [0x%llx-0x%llx]\n", |
837 | (unsigned long long)start, |
838 | (unsigned long long)end); |
839 | } |
840 | dump_stack(); |
841 | } |
842 | if (!abort) |
843 | __reserve_region_with_split(root, start, end, name); |
844 | write_unlock(&resource_lock); |
845 | } |
846 | |
847 | /** |
848 | * resource_alignment - calculate resource's alignment |
849 | * @res: resource pointer |
850 | * |
851 | * Returns alignment on success, 0 (invalid alignment) on failure. |
852 | */ |
853 | resource_size_t resource_alignment(struct resource *res) |
854 | { |
855 | switch (res->flags & (IORESOURCE_SIZEALIGN | IORESOURCE_STARTALIGN)) { |
856 | case IORESOURCE_SIZEALIGN: |
857 | return resource_size(res); |
858 | case IORESOURCE_STARTALIGN: |
859 | return res->start; |
860 | default: |
861 | return 0; |
862 | } |
863 | } |
864 | |
865 | /* |
866 | * This is compatibility stuff for IO resources. |
867 | * |
868 | * Note how this, unlike the above, knows about |
869 | * the IO flag meanings (busy etc). |
870 | * |
871 | * request_region creates a new busy region. |
872 | * |
873 | * check_region returns non-zero if the area is already busy. |
874 | * |
875 | * release_region releases a matching busy region. |
876 | */ |
877 | |
878 | static DECLARE_WAIT_QUEUE_HEAD(muxed_resource_wait); |
879 | |
880 | /** |
881 | * __request_region - create a new busy resource region |
882 | * @parent: parent resource descriptor |
883 | * @start: resource start address |
884 | * @n: resource region size |
885 | * @name: reserving caller's ID string |
886 | * @flags: IO resource flags |
887 | */ |
888 | struct resource * __request_region(struct resource *parent, |
889 | resource_size_t start, resource_size_t n, |
890 | const char *name, int flags) |
891 | { |
892 | DECLARE_WAITQUEUE(wait, current); |
893 | struct resource *res = kzalloc(sizeof(*res), GFP_KERNEL); |
894 | |
895 | if (!res) |
896 | return NULL; |
897 | |
898 | res->name = name; |
899 | res->start = start; |
900 | res->end = start + n - 1; |
901 | res->flags = IORESOURCE_BUSY; |
902 | res->flags |= flags; |
903 | |
904 | write_lock(&resource_lock); |
905 | |
906 | for (;;) { |
907 | struct resource *conflict; |
908 | |
909 | conflict = __request_resource(parent, res); |
910 | if (!conflict) |
911 | break; |
912 | if (conflict != parent) { |
913 | parent = conflict; |
914 | if (!(conflict->flags & IORESOURCE_BUSY)) |
915 | continue; |
916 | } |
917 | if (conflict->flags & flags & IORESOURCE_MUXED) { |
918 | add_wait_queue(&muxed_resource_wait, &wait); |
919 | write_unlock(&resource_lock); |
920 | set_current_state(TASK_UNINTERRUPTIBLE); |
921 | schedule(); |
922 | remove_wait_queue(&muxed_resource_wait, &wait); |
923 | write_lock(&resource_lock); |
924 | continue; |
925 | } |
926 | /* Uhhuh, that didn't work out.. */ |
927 | kfree(res); |
928 | res = NULL; |
929 | break; |
930 | } |
931 | write_unlock(&resource_lock); |
932 | return res; |
933 | } |
934 | EXPORT_SYMBOL(__request_region); |
935 | |
936 | /** |
937 | * __check_region - check if a resource region is busy or free |
938 | * @parent: parent resource descriptor |
939 | * @start: resource start address |
940 | * @n: resource region size |
941 | * |
942 | * Returns 0 if the region is free at the moment it is checked, |
943 | * returns %-EBUSY if the region is busy. |
944 | * |
945 | * NOTE: |
946 | * This function is deprecated because its use is racy. |
947 | * Even if it returns 0, a subsequent call to request_region() |
948 | * may fail because another driver etc. just allocated the region. |
949 | * Do NOT use it. It will be removed from the kernel. |
950 | */ |
951 | int __check_region(struct resource *parent, resource_size_t start, |
952 | resource_size_t n) |
953 | { |
954 | struct resource * res; |
955 | |
956 | res = __request_region(parent, start, n, "check-region", 0); |
957 | if (!res) |
958 | return -EBUSY; |
959 | |
960 | release_resource(res); |
961 | kfree(res); |
962 | return 0; |
963 | } |
964 | EXPORT_SYMBOL(__check_region); |
965 | |
966 | /** |
967 | * __release_region - release a previously reserved resource region |
968 | * @parent: parent resource descriptor |
969 | * @start: resource start address |
970 | * @n: resource region size |
971 | * |
972 | * The described resource region must match a currently busy region. |
973 | */ |
974 | void __release_region(struct resource *parent, resource_size_t start, |
975 | resource_size_t n) |
976 | { |
977 | struct resource **p; |
978 | resource_size_t end; |
979 | |
980 | p = &parent->child; |
981 | end = start + n - 1; |
982 | |
983 | write_lock(&resource_lock); |
984 | |
985 | for (;;) { |
986 | struct resource *res = *p; |
987 | |
988 | if (!res) |
989 | break; |
990 | if (res->start <= start && res->end >= end) { |
991 | if (!(res->flags & IORESOURCE_BUSY)) { |
992 | p = &res->child; |
993 | continue; |
994 | } |
995 | if (res->start != start || res->end != end) |
996 | break; |
997 | *p = res->sibling; |
998 | write_unlock(&resource_lock); |
999 | if (res->flags & IORESOURCE_MUXED) |
1000 | wake_up(&muxed_resource_wait); |
1001 | kfree(res); |
1002 | return; |
1003 | } |
1004 | p = &res->sibling; |
1005 | } |
1006 | |
1007 | write_unlock(&resource_lock); |
1008 | |
1009 | printk(KERN_WARNING "Trying to free nonexistent resource " |
1010 | "<%016llx-%016llx>\n", (unsigned long long)start, |
1011 | (unsigned long long)end); |
1012 | } |
1013 | EXPORT_SYMBOL(__release_region); |
1014 | |
1015 | /* |
1016 | * Managed region resource |
1017 | */ |
1018 | struct region_devres { |
1019 | struct resource *parent; |
1020 | resource_size_t start; |
1021 | resource_size_t n; |
1022 | }; |
1023 | |
1024 | static void devm_region_release(struct device *dev, void *res) |
1025 | { |
1026 | struct region_devres *this = res; |
1027 | |
1028 | __release_region(this->parent, this->start, this->n); |
1029 | } |
1030 | |
1031 | static int devm_region_match(struct device *dev, void *res, void *match_data) |
1032 | { |
1033 | struct region_devres *this = res, *match = match_data; |
1034 | |
1035 | return this->parent == match->parent && |
1036 | this->start == match->start && this->n == match->n; |
1037 | } |
1038 | |
1039 | struct resource * __devm_request_region(struct device *dev, |
1040 | struct resource *parent, resource_size_t start, |
1041 | resource_size_t n, const char *name) |
1042 | { |
1043 | struct region_devres *dr = NULL; |
1044 | struct resource *res; |
1045 | |
1046 | dr = devres_alloc(devm_region_release, sizeof(struct region_devres), |
1047 | GFP_KERNEL); |
1048 | if (!dr) |
1049 | return NULL; |
1050 | |
1051 | dr->parent = parent; |
1052 | dr->start = start; |
1053 | dr->n = n; |
1054 | |
1055 | res = __request_region(parent, start, n, name, 0); |
1056 | if (res) |
1057 | devres_add(dev, dr); |
1058 | else |
1059 | devres_free(dr); |
1060 | |
1061 | return res; |
1062 | } |
1063 | EXPORT_SYMBOL(__devm_request_region); |
1064 | |
1065 | void __devm_release_region(struct device *dev, struct resource *parent, |
1066 | resource_size_t start, resource_size_t n) |
1067 | { |
1068 | struct region_devres match_data = { parent, start, n }; |
1069 | |
1070 | __release_region(parent, start, n); |
1071 | WARN_ON(devres_destroy(dev, devm_region_release, devm_region_match, |
1072 | &match_data)); |
1073 | } |
1074 | EXPORT_SYMBOL(__devm_release_region); |
1075 | |
1076 | /* |
1077 | * Called from init/main.c to reserve IO ports. |
1078 | */ |
1079 | #define MAXRESERVE 4 |
1080 | static int __init reserve_setup(char *str) |
1081 | { |
1082 | static int reserved; |
1083 | static struct resource reserve[MAXRESERVE]; |
1084 | |
1085 | for (;;) { |
1086 | unsigned int io_start, io_num; |
1087 | int x = reserved; |
1088 | |
1089 | if (get_option (&str, &io_start) != 2) |
1090 | break; |
1091 | if (get_option (&str, &io_num) == 0) |
1092 | break; |
1093 | if (x < MAXRESERVE) { |
1094 | struct resource *res = reserve + x; |
1095 | res->name = "reserved"; |
1096 | res->start = io_start; |
1097 | res->end = io_start + io_num - 1; |
1098 | res->flags = IORESOURCE_BUSY; |
1099 | res->child = NULL; |
1100 | if (request_resource(res->start >= 0x10000 ? &iomem_resource : &ioport_resource, res) == 0) |
1101 | reserved = x+1; |
1102 | } |
1103 | } |
1104 | return 1; |
1105 | } |
1106 | |
1107 | __setup("reserve=", reserve_setup); |
1108 | |
1109 | /* |
1110 | * Check if the requested addr and size spans more than any slot in the |
1111 | * iomem resource tree. |
1112 | */ |
1113 | int iomem_map_sanity_check(resource_size_t addr, unsigned long size) |
1114 | { |
1115 | struct resource *p = &iomem_resource; |
1116 | int err = 0; |
1117 | loff_t l; |
1118 | |
1119 | read_lock(&resource_lock); |
1120 | for (p = p->child; p ; p = r_next(NULL, p, &l)) { |
1121 | /* |
1122 | * We can probably skip the resources without |
1123 | * IORESOURCE_IO attribute? |
1124 | */ |
1125 | if (p->start >= addr + size) |
1126 | continue; |
1127 | if (p->end < addr) |
1128 | continue; |
1129 | if (PFN_DOWN(p->start) <= PFN_DOWN(addr) && |
1130 | PFN_DOWN(p->end) >= PFN_DOWN(addr + size - 1)) |
1131 | continue; |
1132 | /* |
1133 | * if a resource is "BUSY", it's not a hardware resource |
1134 | * but a driver mapping of such a resource; we don't want |
1135 | * to warn for those; some drivers legitimately map only |
1136 | * partial hardware resources. (example: vesafb) |
1137 | */ |
1138 | if (p->flags & IORESOURCE_BUSY) |
1139 | continue; |
1140 | |
1141 | printk(KERN_WARNING "resource map sanity check conflict: " |
1142 | "0x%llx 0x%llx 0x%llx 0x%llx %s\n", |
1143 | (unsigned long long)addr, |
1144 | (unsigned long long)(addr + size - 1), |
1145 | (unsigned long long)p->start, |
1146 | (unsigned long long)p->end, |
1147 | p->name); |
1148 | err = -1; |
1149 | break; |
1150 | } |
1151 | read_unlock(&resource_lock); |
1152 | |
1153 | return err; |
1154 | } |
1155 | |
1156 | #ifdef CONFIG_STRICT_DEVMEM |
1157 | static int strict_iomem_checks = 1; |
1158 | #else |
1159 | static int strict_iomem_checks; |
1160 | #endif |
1161 | |
1162 | /* |
1163 | * check if an address is reserved in the iomem resource tree |
1164 | * returns 1 if reserved, 0 if not reserved. |
1165 | */ |
1166 | int iomem_is_exclusive(u64 addr) |
1167 | { |
1168 | struct resource *p = &iomem_resource; |
1169 | int err = 0; |
1170 | loff_t l; |
1171 | int size = PAGE_SIZE; |
1172 | |
1173 | if (!strict_iomem_checks) |
1174 | return 0; |
1175 | |
1176 | addr = addr & PAGE_MASK; |
1177 | |
1178 | read_lock(&resource_lock); |
1179 | for (p = p->child; p ; p = r_next(NULL, p, &l)) { |
1180 | /* |
1181 | * We can probably skip the resources without |
1182 | * IORESOURCE_IO attribute? |
1183 | */ |
1184 | if (p->start >= addr + size) |
1185 | break; |
1186 | if (p->end < addr) |
1187 | continue; |
1188 | if (p->flags & IORESOURCE_BUSY && |
1189 | p->flags & IORESOURCE_EXCLUSIVE) { |
1190 | err = 1; |
1191 | break; |
1192 | } |
1193 | } |
1194 | read_unlock(&resource_lock); |
1195 | |
1196 | return err; |
1197 | } |
1198 | |
1199 | static int __init strict_iomem(char *str) |
1200 | { |
1201 | if (strstr(str, "relaxed")) |
1202 | strict_iomem_checks = 0; |
1203 | if (strstr(str, "strict")) |
1204 | strict_iomem_checks = 1; |
1205 | return 1; |
1206 | } |
1207 | |
1208 | __setup("iomem=", strict_iomem); |
1209 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9