Root/
1 | /* |
2 | * linux/kernel/resource.c |
3 | * |
4 | * Copyright (C) 1999 Linus Torvalds |
5 | * Copyright (C) 1999 Martin Mares <mj@ucw.cz> |
6 | * |
7 | * Arbitrary resource management. |
8 | */ |
9 | |
10 | #include <linux/export.h> |
11 | #include <linux/errno.h> |
12 | #include <linux/ioport.h> |
13 | #include <linux/init.h> |
14 | #include <linux/slab.h> |
15 | #include <linux/spinlock.h> |
16 | #include <linux/fs.h> |
17 | #include <linux/proc_fs.h> |
18 | #include <linux/sched.h> |
19 | #include <linux/seq_file.h> |
20 | #include <linux/device.h> |
21 | #include <linux/pfn.h> |
22 | #include <asm/io.h> |
23 | |
24 | |
25 | struct resource ioport_resource = { |
26 | .name = "PCI IO", |
27 | .start = 0, |
28 | .end = IO_SPACE_LIMIT, |
29 | .flags = IORESOURCE_IO, |
30 | }; |
31 | EXPORT_SYMBOL(ioport_resource); |
32 | |
33 | struct resource iomem_resource = { |
34 | .name = "PCI mem", |
35 | .start = 0, |
36 | .end = -1, |
37 | .flags = IORESOURCE_MEM, |
38 | }; |
39 | EXPORT_SYMBOL(iomem_resource); |
40 | |
41 | /* constraints to be met while allocating resources */ |
42 | struct resource_constraint { |
43 | resource_size_t min, max, align; |
44 | resource_size_t (*alignf)(void *, const struct resource *, |
45 | resource_size_t, resource_size_t); |
46 | void *alignf_data; |
47 | }; |
48 | |
49 | static DEFINE_RWLOCK(resource_lock); |
50 | |
51 | static void *r_next(struct seq_file *m, void *v, loff_t *pos) |
52 | { |
53 | struct resource *p = v; |
54 | (*pos)++; |
55 | if (p->child) |
56 | return p->child; |
57 | while (!p->sibling && p->parent) |
58 | p = p->parent; |
59 | return p->sibling; |
60 | } |
61 | |
62 | #ifdef CONFIG_PROC_FS |
63 | |
64 | enum { MAX_IORES_LEVEL = 5 }; |
65 | |
66 | static void *r_start(struct seq_file *m, loff_t *pos) |
67 | __acquires(resource_lock) |
68 | { |
69 | struct resource *p = m->private; |
70 | loff_t l = 0; |
71 | read_lock(&resource_lock); |
72 | for (p = p->child; p && l < *pos; p = r_next(m, p, &l)) |
73 | ; |
74 | return p; |
75 | } |
76 | |
77 | static void r_stop(struct seq_file *m, void *v) |
78 | __releases(resource_lock) |
79 | { |
80 | read_unlock(&resource_lock); |
81 | } |
82 | |
83 | static int r_show(struct seq_file *m, void *v) |
84 | { |
85 | struct resource *root = m->private; |
86 | struct resource *r = v, *p; |
87 | int width = root->end < 0x10000 ? 4 : 8; |
88 | int depth; |
89 | |
90 | for (depth = 0, p = r; depth < MAX_IORES_LEVEL; depth++, p = p->parent) |
91 | if (p->parent == root) |
92 | break; |
93 | seq_printf(m, "%*s%0*llx-%0*llx : %s\n", |
94 | depth * 2, "", |
95 | width, (unsigned long long) r->start, |
96 | width, (unsigned long long) r->end, |
97 | r->name ? r->name : "<BAD>"); |
98 | return 0; |
99 | } |
100 | |
101 | static const struct seq_operations resource_op = { |
102 | .start = r_start, |
103 | .next = r_next, |
104 | .stop = r_stop, |
105 | .show = r_show, |
106 | }; |
107 | |
108 | static int ioports_open(struct inode *inode, struct file *file) |
109 | { |
110 | int res = seq_open(file, &resource_op); |
111 | if (!res) { |
112 | struct seq_file *m = file->private_data; |
113 | m->private = &ioport_resource; |
114 | } |
115 | return res; |
116 | } |
117 | |
118 | static int iomem_open(struct inode *inode, struct file *file) |
119 | { |
120 | int res = seq_open(file, &resource_op); |
121 | if (!res) { |
122 | struct seq_file *m = file->private_data; |
123 | m->private = &iomem_resource; |
124 | } |
125 | return res; |
126 | } |
127 | |
128 | static const struct file_operations proc_ioports_operations = { |
129 | .open = ioports_open, |
130 | .read = seq_read, |
131 | .llseek = seq_lseek, |
132 | .release = seq_release, |
133 | }; |
134 | |
135 | static const struct file_operations proc_iomem_operations = { |
136 | .open = iomem_open, |
137 | .read = seq_read, |
138 | .llseek = seq_lseek, |
139 | .release = seq_release, |
140 | }; |
141 | |
142 | static int __init ioresources_init(void) |
143 | { |
144 | proc_create("ioports", 0, NULL, &proc_ioports_operations); |
145 | proc_create("iomem", 0, NULL, &proc_iomem_operations); |
146 | return 0; |
147 | } |
148 | __initcall(ioresources_init); |
149 | |
150 | #endif /* CONFIG_PROC_FS */ |
151 | |
152 | /* Return the conflict entry if you can't request it */ |
153 | static struct resource * __request_resource(struct resource *root, struct resource *new) |
154 | { |
155 | resource_size_t start = new->start; |
156 | resource_size_t end = new->end; |
157 | struct resource *tmp, **p; |
158 | |
159 | if (end < start) |
160 | return root; |
161 | if (start < root->start) |
162 | return root; |
163 | if (end > root->end) |
164 | return root; |
165 | p = &root->child; |
166 | for (;;) { |
167 | tmp = *p; |
168 | if (!tmp || tmp->start > end) { |
169 | new->sibling = tmp; |
170 | *p = new; |
171 | new->parent = root; |
172 | return NULL; |
173 | } |
174 | p = &tmp->sibling; |
175 | if (tmp->end < start) |
176 | continue; |
177 | return tmp; |
178 | } |
179 | } |
180 | |
181 | static int __release_resource(struct resource *old) |
182 | { |
183 | struct resource *tmp, **p; |
184 | |
185 | p = &old->parent->child; |
186 | for (;;) { |
187 | tmp = *p; |
188 | if (!tmp) |
189 | break; |
190 | if (tmp == old) { |
191 | *p = tmp->sibling; |
192 | old->parent = NULL; |
193 | return 0; |
194 | } |
195 | p = &tmp->sibling; |
196 | } |
197 | return -EINVAL; |
198 | } |
199 | |
200 | static void __release_child_resources(struct resource *r) |
201 | { |
202 | struct resource *tmp, *p; |
203 | resource_size_t size; |
204 | |
205 | p = r->child; |
206 | r->child = NULL; |
207 | while (p) { |
208 | tmp = p; |
209 | p = p->sibling; |
210 | |
211 | tmp->parent = NULL; |
212 | tmp->sibling = NULL; |
213 | __release_child_resources(tmp); |
214 | |
215 | printk(KERN_DEBUG "release child resource %pR\n", tmp); |
216 | /* need to restore size, and keep flags */ |
217 | size = resource_size(tmp); |
218 | tmp->start = 0; |
219 | tmp->end = size - 1; |
220 | } |
221 | } |
222 | |
223 | void release_child_resources(struct resource *r) |
224 | { |
225 | write_lock(&resource_lock); |
226 | __release_child_resources(r); |
227 | write_unlock(&resource_lock); |
228 | } |
229 | |
230 | /** |
231 | * request_resource_conflict - request and reserve an I/O or memory resource |
232 | * @root: root resource descriptor |
233 | * @new: resource descriptor desired by caller |
234 | * |
235 | * Returns 0 for success, conflict resource on error. |
236 | */ |
237 | struct resource *request_resource_conflict(struct resource *root, struct resource *new) |
238 | { |
239 | struct resource *conflict; |
240 | |
241 | write_lock(&resource_lock); |
242 | conflict = __request_resource(root, new); |
243 | write_unlock(&resource_lock); |
244 | return conflict; |
245 | } |
246 | |
247 | /** |
248 | * request_resource - request and reserve an I/O or memory resource |
249 | * @root: root resource descriptor |
250 | * @new: resource descriptor desired by caller |
251 | * |
252 | * Returns 0 for success, negative error code on error. |
253 | */ |
254 | int request_resource(struct resource *root, struct resource *new) |
255 | { |
256 | struct resource *conflict; |
257 | |
258 | conflict = request_resource_conflict(root, new); |
259 | return conflict ? -EBUSY : 0; |
260 | } |
261 | |
262 | EXPORT_SYMBOL(request_resource); |
263 | |
264 | /** |
265 | * release_resource - release a previously reserved resource |
266 | * @old: resource pointer |
267 | */ |
268 | int release_resource(struct resource *old) |
269 | { |
270 | int retval; |
271 | |
272 | write_lock(&resource_lock); |
273 | retval = __release_resource(old); |
274 | write_unlock(&resource_lock); |
275 | return retval; |
276 | } |
277 | |
278 | EXPORT_SYMBOL(release_resource); |
279 | |
280 | #if !defined(CONFIG_ARCH_HAS_WALK_MEMORY) |
281 | /* |
282 | * Finds the lowest memory reosurce exists within [res->start.res->end) |
283 | * the caller must specify res->start, res->end, res->flags and "name". |
284 | * If found, returns 0, res is overwritten, if not found, returns -1. |
285 | */ |
286 | static int find_next_system_ram(struct resource *res, char *name) |
287 | { |
288 | resource_size_t start, end; |
289 | struct resource *p; |
290 | |
291 | BUG_ON(!res); |
292 | |
293 | start = res->start; |
294 | end = res->end; |
295 | BUG_ON(start >= end); |
296 | |
297 | read_lock(&resource_lock); |
298 | for (p = iomem_resource.child; p ; p = p->sibling) { |
299 | /* system ram is just marked as IORESOURCE_MEM */ |
300 | if (p->flags != res->flags) |
301 | continue; |
302 | if (name && strcmp(p->name, name)) |
303 | continue; |
304 | if (p->start > end) { |
305 | p = NULL; |
306 | break; |
307 | } |
308 | if ((p->end >= start) && (p->start < end)) |
309 | break; |
310 | } |
311 | read_unlock(&resource_lock); |
312 | if (!p) |
313 | return -1; |
314 | /* copy data */ |
315 | if (res->start < p->start) |
316 | res->start = p->start; |
317 | if (res->end > p->end) |
318 | res->end = p->end; |
319 | return 0; |
320 | } |
321 | |
322 | /* |
323 | * This function calls callback against all memory range of "System RAM" |
324 | * which are marked as IORESOURCE_MEM and IORESOUCE_BUSY. |
325 | * Now, this function is only for "System RAM". |
326 | */ |
327 | int walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages, |
328 | void *arg, int (*func)(unsigned long, unsigned long, void *)) |
329 | { |
330 | struct resource res; |
331 | unsigned long pfn, end_pfn; |
332 | u64 orig_end; |
333 | int ret = -1; |
334 | |
335 | res.start = (u64) start_pfn << PAGE_SHIFT; |
336 | res.end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1; |
337 | res.flags = IORESOURCE_MEM | IORESOURCE_BUSY; |
338 | orig_end = res.end; |
339 | while ((res.start < res.end) && |
340 | (find_next_system_ram(&res, "System RAM") >= 0)) { |
341 | pfn = (res.start + PAGE_SIZE - 1) >> PAGE_SHIFT; |
342 | end_pfn = (res.end + 1) >> PAGE_SHIFT; |
343 | if (end_pfn > pfn) |
344 | ret = (*func)(pfn, end_pfn - pfn, arg); |
345 | if (ret) |
346 | break; |
347 | res.start = res.end + 1; |
348 | res.end = orig_end; |
349 | } |
350 | return ret; |
351 | } |
352 | |
353 | #endif |
354 | |
355 | static int __is_ram(unsigned long pfn, unsigned long nr_pages, void *arg) |
356 | { |
357 | return 1; |
358 | } |
359 | /* |
360 | * This generic page_is_ram() returns true if specified address is |
361 | * registered as "System RAM" in iomem_resource list. |
362 | */ |
363 | int __weak page_is_ram(unsigned long pfn) |
364 | { |
365 | return walk_system_ram_range(pfn, 1, NULL, __is_ram) == 1; |
366 | } |
367 | |
368 | void __weak arch_remove_reservations(struct resource *avail) |
369 | { |
370 | } |
371 | |
372 | static resource_size_t simple_align_resource(void *data, |
373 | const struct resource *avail, |
374 | resource_size_t size, |
375 | resource_size_t align) |
376 | { |
377 | return avail->start; |
378 | } |
379 | |
380 | static void resource_clip(struct resource *res, resource_size_t min, |
381 | resource_size_t max) |
382 | { |
383 | if (res->start < min) |
384 | res->start = min; |
385 | if (res->end > max) |
386 | res->end = max; |
387 | } |
388 | |
389 | static bool resource_contains(struct resource *res1, struct resource *res2) |
390 | { |
391 | return res1->start <= res2->start && res1->end >= res2->end; |
392 | } |
393 | |
394 | /* |
395 | * Find empty slot in the resource tree with the given range and |
396 | * alignment constraints |
397 | */ |
398 | static int __find_resource(struct resource *root, struct resource *old, |
399 | struct resource *new, |
400 | resource_size_t size, |
401 | struct resource_constraint *constraint) |
402 | { |
403 | struct resource *this = root->child; |
404 | struct resource tmp = *new, avail, alloc; |
405 | |
406 | tmp.flags = new->flags; |
407 | tmp.start = root->start; |
408 | /* |
409 | * Skip past an allocated resource that starts at 0, since the assignment |
410 | * of this->start - 1 to tmp->end below would cause an underflow. |
411 | */ |
412 | if (this && this->start == root->start) { |
413 | tmp.start = (this == old) ? old->start : this->end + 1; |
414 | this = this->sibling; |
415 | } |
416 | for(;;) { |
417 | if (this) |
418 | tmp.end = (this == old) ? this->end : this->start - 1; |
419 | else |
420 | tmp.end = root->end; |
421 | |
422 | if (tmp.end < tmp.start) |
423 | goto next; |
424 | |
425 | resource_clip(&tmp, constraint->min, constraint->max); |
426 | arch_remove_reservations(&tmp); |
427 | |
428 | /* Check for overflow after ALIGN() */ |
429 | avail = *new; |
430 | avail.start = ALIGN(tmp.start, constraint->align); |
431 | avail.end = tmp.end; |
432 | if (avail.start >= tmp.start) { |
433 | alloc.start = constraint->alignf(constraint->alignf_data, &avail, |
434 | size, constraint->align); |
435 | alloc.end = alloc.start + size - 1; |
436 | if (resource_contains(&avail, &alloc)) { |
437 | new->start = alloc.start; |
438 | new->end = alloc.end; |
439 | return 0; |
440 | } |
441 | } |
442 | |
443 | next: if (!this || this->end == root->end) |
444 | break; |
445 | |
446 | if (this != old) |
447 | tmp.start = this->end + 1; |
448 | this = this->sibling; |
449 | } |
450 | return -EBUSY; |
451 | } |
452 | |
453 | /* |
454 | * Find empty slot in the resource tree given range and alignment. |
455 | */ |
456 | static int find_resource(struct resource *root, struct resource *new, |
457 | resource_size_t size, |
458 | struct resource_constraint *constraint) |
459 | { |
460 | return __find_resource(root, NULL, new, size, constraint); |
461 | } |
462 | |
463 | /** |
464 | * reallocate_resource - allocate a slot in the resource tree given range & alignment. |
465 | * The resource will be relocated if the new size cannot be reallocated in the |
466 | * current location. |
467 | * |
468 | * @root: root resource descriptor |
469 | * @old: resource descriptor desired by caller |
470 | * @newsize: new size of the resource descriptor |
471 | * @constraint: the size and alignment constraints to be met. |
472 | */ |
473 | int reallocate_resource(struct resource *root, struct resource *old, |
474 | resource_size_t newsize, |
475 | struct resource_constraint *constraint) |
476 | { |
477 | int err=0; |
478 | struct resource new = *old; |
479 | struct resource *conflict; |
480 | |
481 | write_lock(&resource_lock); |
482 | |
483 | if ((err = __find_resource(root, old, &new, newsize, constraint))) |
484 | goto out; |
485 | |
486 | if (resource_contains(&new, old)) { |
487 | old->start = new.start; |
488 | old->end = new.end; |
489 | goto out; |
490 | } |
491 | |
492 | if (old->child) { |
493 | err = -EBUSY; |
494 | goto out; |
495 | } |
496 | |
497 | if (resource_contains(old, &new)) { |
498 | old->start = new.start; |
499 | old->end = new.end; |
500 | } else { |
501 | __release_resource(old); |
502 | *old = new; |
503 | conflict = __request_resource(root, old); |
504 | BUG_ON(conflict); |
505 | } |
506 | out: |
507 | write_unlock(&resource_lock); |
508 | return err; |
509 | } |
510 | |
511 | |
512 | /** |
513 | * allocate_resource - allocate empty slot in the resource tree given range & alignment. |
514 | * The resource will be reallocated with a new size if it was already allocated |
515 | * @root: root resource descriptor |
516 | * @new: resource descriptor desired by caller |
517 | * @size: requested resource region size |
518 | * @min: minimum size to allocate |
519 | * @max: maximum size to allocate |
520 | * @align: alignment requested, in bytes |
521 | * @alignf: alignment function, optional, called if not NULL |
522 | * @alignf_data: arbitrary data to pass to the @alignf function |
523 | */ |
524 | int allocate_resource(struct resource *root, struct resource *new, |
525 | resource_size_t size, resource_size_t min, |
526 | resource_size_t max, resource_size_t align, |
527 | resource_size_t (*alignf)(void *, |
528 | const struct resource *, |
529 | resource_size_t, |
530 | resource_size_t), |
531 | void *alignf_data) |
532 | { |
533 | int err; |
534 | struct resource_constraint constraint; |
535 | |
536 | if (!alignf) |
537 | alignf = simple_align_resource; |
538 | |
539 | constraint.min = min; |
540 | constraint.max = max; |
541 | constraint.align = align; |
542 | constraint.alignf = alignf; |
543 | constraint.alignf_data = alignf_data; |
544 | |
545 | if ( new->parent ) { |
546 | /* resource is already allocated, try reallocating with |
547 | the new constraints */ |
548 | return reallocate_resource(root, new, size, &constraint); |
549 | } |
550 | |
551 | write_lock(&resource_lock); |
552 | err = find_resource(root, new, size, &constraint); |
553 | if (err >= 0 && __request_resource(root, new)) |
554 | err = -EBUSY; |
555 | write_unlock(&resource_lock); |
556 | return err; |
557 | } |
558 | |
559 | EXPORT_SYMBOL(allocate_resource); |
560 | |
561 | /** |
562 | * lookup_resource - find an existing resource by a resource start address |
563 | * @root: root resource descriptor |
564 | * @start: resource start address |
565 | * |
566 | * Returns a pointer to the resource if found, NULL otherwise |
567 | */ |
568 | struct resource *lookup_resource(struct resource *root, resource_size_t start) |
569 | { |
570 | struct resource *res; |
571 | |
572 | read_lock(&resource_lock); |
573 | for (res = root->child; res; res = res->sibling) { |
574 | if (res->start == start) |
575 | break; |
576 | } |
577 | read_unlock(&resource_lock); |
578 | |
579 | return res; |
580 | } |
581 | |
582 | /* |
583 | * Insert a resource into the resource tree. If successful, return NULL, |
584 | * otherwise return the conflicting resource (compare to __request_resource()) |
585 | */ |
586 | static struct resource * __insert_resource(struct resource *parent, struct resource *new) |
587 | { |
588 | struct resource *first, *next; |
589 | |
590 | for (;; parent = first) { |
591 | first = __request_resource(parent, new); |
592 | if (!first) |
593 | return first; |
594 | |
595 | if (first == parent) |
596 | return first; |
597 | if (WARN_ON(first == new)) /* duplicated insertion */ |
598 | return first; |
599 | |
600 | if ((first->start > new->start) || (first->end < new->end)) |
601 | break; |
602 | if ((first->start == new->start) && (first->end == new->end)) |
603 | break; |
604 | } |
605 | |
606 | for (next = first; ; next = next->sibling) { |
607 | /* Partial overlap? Bad, and unfixable */ |
608 | if (next->start < new->start || next->end > new->end) |
609 | return next; |
610 | if (!next->sibling) |
611 | break; |
612 | if (next->sibling->start > new->end) |
613 | break; |
614 | } |
615 | |
616 | new->parent = parent; |
617 | new->sibling = next->sibling; |
618 | new->child = first; |
619 | |
620 | next->sibling = NULL; |
621 | for (next = first; next; next = next->sibling) |
622 | next->parent = new; |
623 | |
624 | if (parent->child == first) { |
625 | parent->child = new; |
626 | } else { |
627 | next = parent->child; |
628 | while (next->sibling != first) |
629 | next = next->sibling; |
630 | next->sibling = new; |
631 | } |
632 | return NULL; |
633 | } |
634 | |
635 | /** |
636 | * insert_resource_conflict - Inserts resource in the resource tree |
637 | * @parent: parent of the new resource |
638 | * @new: new resource to insert |
639 | * |
640 | * Returns 0 on success, conflict resource if the resource can't be inserted. |
641 | * |
642 | * This function is equivalent to request_resource_conflict when no conflict |
643 | * happens. If a conflict happens, and the conflicting resources |
644 | * entirely fit within the range of the new resource, then the new |
645 | * resource is inserted and the conflicting resources become children of |
646 | * the new resource. |
647 | */ |
648 | struct resource *insert_resource_conflict(struct resource *parent, struct resource *new) |
649 | { |
650 | struct resource *conflict; |
651 | |
652 | write_lock(&resource_lock); |
653 | conflict = __insert_resource(parent, new); |
654 | write_unlock(&resource_lock); |
655 | return conflict; |
656 | } |
657 | |
658 | /** |
659 | * insert_resource - Inserts a resource in the resource tree |
660 | * @parent: parent of the new resource |
661 | * @new: new resource to insert |
662 | * |
663 | * Returns 0 on success, -EBUSY if the resource can't be inserted. |
664 | */ |
665 | int insert_resource(struct resource *parent, struct resource *new) |
666 | { |
667 | struct resource *conflict; |
668 | |
669 | conflict = insert_resource_conflict(parent, new); |
670 | return conflict ? -EBUSY : 0; |
671 | } |
672 | |
673 | /** |
674 | * insert_resource_expand_to_fit - Insert a resource into the resource tree |
675 | * @root: root resource descriptor |
676 | * @new: new resource to insert |
677 | * |
678 | * Insert a resource into the resource tree, possibly expanding it in order |
679 | * to make it encompass any conflicting resources. |
680 | */ |
681 | void insert_resource_expand_to_fit(struct resource *root, struct resource *new) |
682 | { |
683 | if (new->parent) |
684 | return; |
685 | |
686 | write_lock(&resource_lock); |
687 | for (;;) { |
688 | struct resource *conflict; |
689 | |
690 | conflict = __insert_resource(root, new); |
691 | if (!conflict) |
692 | break; |
693 | if (conflict == root) |
694 | break; |
695 | |
696 | /* Ok, expand resource to cover the conflict, then try again .. */ |
697 | if (conflict->start < new->start) |
698 | new->start = conflict->start; |
699 | if (conflict->end > new->end) |
700 | new->end = conflict->end; |
701 | |
702 | printk("Expanded resource %s due to conflict with %s\n", new->name, conflict->name); |
703 | } |
704 | write_unlock(&resource_lock); |
705 | } |
706 | |
707 | /** |
708 | * adjust_resource - modify a resource's start and size |
709 | * @res: resource to modify |
710 | * @start: new start value |
711 | * @size: new size |
712 | * |
713 | * Given an existing resource, change its start and size to match the |
714 | * arguments. Returns 0 on success, -EBUSY if it can't fit. |
715 | * Existing children of the resource are assumed to be immutable. |
716 | */ |
717 | int adjust_resource(struct resource *res, resource_size_t start, resource_size_t size) |
718 | { |
719 | struct resource *tmp, *parent = res->parent; |
720 | resource_size_t end = start + size - 1; |
721 | int result = -EBUSY; |
722 | |
723 | write_lock(&resource_lock); |
724 | |
725 | if ((start < parent->start) || (end > parent->end)) |
726 | goto out; |
727 | |
728 | for (tmp = res->child; tmp; tmp = tmp->sibling) { |
729 | if ((tmp->start < start) || (tmp->end > end)) |
730 | goto out; |
731 | } |
732 | |
733 | if (res->sibling && (res->sibling->start <= end)) |
734 | goto out; |
735 | |
736 | tmp = parent->child; |
737 | if (tmp != res) { |
738 | while (tmp->sibling != res) |
739 | tmp = tmp->sibling; |
740 | if (start <= tmp->end) |
741 | goto out; |
742 | } |
743 | |
744 | res->start = start; |
745 | res->end = end; |
746 | result = 0; |
747 | |
748 | out: |
749 | write_unlock(&resource_lock); |
750 | return result; |
751 | } |
752 | EXPORT_SYMBOL(adjust_resource); |
753 | |
754 | static void __init __reserve_region_with_split(struct resource *root, |
755 | resource_size_t start, resource_size_t end, |
756 | const char *name) |
757 | { |
758 | struct resource *parent = root; |
759 | struct resource *conflict; |
760 | struct resource *res = kzalloc(sizeof(*res), GFP_ATOMIC); |
761 | |
762 | if (!res) |
763 | return; |
764 | |
765 | res->name = name; |
766 | res->start = start; |
767 | res->end = end; |
768 | res->flags = IORESOURCE_BUSY; |
769 | |
770 | conflict = __request_resource(parent, res); |
771 | if (!conflict) |
772 | return; |
773 | |
774 | /* failed, split and try again */ |
775 | kfree(res); |
776 | |
777 | /* conflict covered whole area */ |
778 | if (conflict->start <= start && conflict->end >= end) |
779 | return; |
780 | |
781 | if (conflict->start > start) |
782 | __reserve_region_with_split(root, start, conflict->start-1, name); |
783 | if (conflict->end < end) |
784 | __reserve_region_with_split(root, conflict->end+1, end, name); |
785 | } |
786 | |
787 | void __init reserve_region_with_split(struct resource *root, |
788 | resource_size_t start, resource_size_t end, |
789 | const char *name) |
790 | { |
791 | write_lock(&resource_lock); |
792 | __reserve_region_with_split(root, start, end, name); |
793 | write_unlock(&resource_lock); |
794 | } |
795 | |
796 | /** |
797 | * resource_alignment - calculate resource's alignment |
798 | * @res: resource pointer |
799 | * |
800 | * Returns alignment on success, 0 (invalid alignment) on failure. |
801 | */ |
802 | resource_size_t resource_alignment(struct resource *res) |
803 | { |
804 | switch (res->flags & (IORESOURCE_SIZEALIGN | IORESOURCE_STARTALIGN)) { |
805 | case IORESOURCE_SIZEALIGN: |
806 | return resource_size(res); |
807 | case IORESOURCE_STARTALIGN: |
808 | return res->start; |
809 | default: |
810 | return 0; |
811 | } |
812 | } |
813 | |
814 | /* |
815 | * This is compatibility stuff for IO resources. |
816 | * |
817 | * Note how this, unlike the above, knows about |
818 | * the IO flag meanings (busy etc). |
819 | * |
820 | * request_region creates a new busy region. |
821 | * |
822 | * check_region returns non-zero if the area is already busy. |
823 | * |
824 | * release_region releases a matching busy region. |
825 | */ |
826 | |
827 | static DECLARE_WAIT_QUEUE_HEAD(muxed_resource_wait); |
828 | |
829 | /** |
830 | * __request_region - create a new busy resource region |
831 | * @parent: parent resource descriptor |
832 | * @start: resource start address |
833 | * @n: resource region size |
834 | * @name: reserving caller's ID string |
835 | * @flags: IO resource flags |
836 | */ |
837 | struct resource * __request_region(struct resource *parent, |
838 | resource_size_t start, resource_size_t n, |
839 | const char *name, int flags) |
840 | { |
841 | DECLARE_WAITQUEUE(wait, current); |
842 | struct resource *res = kzalloc(sizeof(*res), GFP_KERNEL); |
843 | |
844 | if (!res) |
845 | return NULL; |
846 | |
847 | res->name = name; |
848 | res->start = start; |
849 | res->end = start + n - 1; |
850 | res->flags = IORESOURCE_BUSY; |
851 | res->flags |= flags; |
852 | |
853 | write_lock(&resource_lock); |
854 | |
855 | for (;;) { |
856 | struct resource *conflict; |
857 | |
858 | conflict = __request_resource(parent, res); |
859 | if (!conflict) |
860 | break; |
861 | if (conflict != parent) { |
862 | parent = conflict; |
863 | if (!(conflict->flags & IORESOURCE_BUSY)) |
864 | continue; |
865 | } |
866 | if (conflict->flags & flags & IORESOURCE_MUXED) { |
867 | add_wait_queue(&muxed_resource_wait, &wait); |
868 | write_unlock(&resource_lock); |
869 | set_current_state(TASK_UNINTERRUPTIBLE); |
870 | schedule(); |
871 | remove_wait_queue(&muxed_resource_wait, &wait); |
872 | write_lock(&resource_lock); |
873 | continue; |
874 | } |
875 | /* Uhhuh, that didn't work out.. */ |
876 | kfree(res); |
877 | res = NULL; |
878 | break; |
879 | } |
880 | write_unlock(&resource_lock); |
881 | return res; |
882 | } |
883 | EXPORT_SYMBOL(__request_region); |
884 | |
885 | /** |
886 | * __check_region - check if a resource region is busy or free |
887 | * @parent: parent resource descriptor |
888 | * @start: resource start address |
889 | * @n: resource region size |
890 | * |
891 | * Returns 0 if the region is free at the moment it is checked, |
892 | * returns %-EBUSY if the region is busy. |
893 | * |
894 | * NOTE: |
895 | * This function is deprecated because its use is racy. |
896 | * Even if it returns 0, a subsequent call to request_region() |
897 | * may fail because another driver etc. just allocated the region. |
898 | * Do NOT use it. It will be removed from the kernel. |
899 | */ |
900 | int __check_region(struct resource *parent, resource_size_t start, |
901 | resource_size_t n) |
902 | { |
903 | struct resource * res; |
904 | |
905 | res = __request_region(parent, start, n, "check-region", 0); |
906 | if (!res) |
907 | return -EBUSY; |
908 | |
909 | release_resource(res); |
910 | kfree(res); |
911 | return 0; |
912 | } |
913 | EXPORT_SYMBOL(__check_region); |
914 | |
915 | /** |
916 | * __release_region - release a previously reserved resource region |
917 | * @parent: parent resource descriptor |
918 | * @start: resource start address |
919 | * @n: resource region size |
920 | * |
921 | * The described resource region must match a currently busy region. |
922 | */ |
923 | void __release_region(struct resource *parent, resource_size_t start, |
924 | resource_size_t n) |
925 | { |
926 | struct resource **p; |
927 | resource_size_t end; |
928 | |
929 | p = &parent->child; |
930 | end = start + n - 1; |
931 | |
932 | write_lock(&resource_lock); |
933 | |
934 | for (;;) { |
935 | struct resource *res = *p; |
936 | |
937 | if (!res) |
938 | break; |
939 | if (res->start <= start && res->end >= end) { |
940 | if (!(res->flags & IORESOURCE_BUSY)) { |
941 | p = &res->child; |
942 | continue; |
943 | } |
944 | if (res->start != start || res->end != end) |
945 | break; |
946 | *p = res->sibling; |
947 | write_unlock(&resource_lock); |
948 | if (res->flags & IORESOURCE_MUXED) |
949 | wake_up(&muxed_resource_wait); |
950 | kfree(res); |
951 | return; |
952 | } |
953 | p = &res->sibling; |
954 | } |
955 | |
956 | write_unlock(&resource_lock); |
957 | |
958 | printk(KERN_WARNING "Trying to free nonexistent resource " |
959 | "<%016llx-%016llx>\n", (unsigned long long)start, |
960 | (unsigned long long)end); |
961 | } |
962 | EXPORT_SYMBOL(__release_region); |
963 | |
964 | /* |
965 | * Managed region resource |
966 | */ |
967 | struct region_devres { |
968 | struct resource *parent; |
969 | resource_size_t start; |
970 | resource_size_t n; |
971 | }; |
972 | |
973 | static void devm_region_release(struct device *dev, void *res) |
974 | { |
975 | struct region_devres *this = res; |
976 | |
977 | __release_region(this->parent, this->start, this->n); |
978 | } |
979 | |
980 | static int devm_region_match(struct device *dev, void *res, void *match_data) |
981 | { |
982 | struct region_devres *this = res, *match = match_data; |
983 | |
984 | return this->parent == match->parent && |
985 | this->start == match->start && this->n == match->n; |
986 | } |
987 | |
988 | struct resource * __devm_request_region(struct device *dev, |
989 | struct resource *parent, resource_size_t start, |
990 | resource_size_t n, const char *name) |
991 | { |
992 | struct region_devres *dr = NULL; |
993 | struct resource *res; |
994 | |
995 | dr = devres_alloc(devm_region_release, sizeof(struct region_devres), |
996 | GFP_KERNEL); |
997 | if (!dr) |
998 | return NULL; |
999 | |
1000 | dr->parent = parent; |
1001 | dr->start = start; |
1002 | dr->n = n; |
1003 | |
1004 | res = __request_region(parent, start, n, name, 0); |
1005 | if (res) |
1006 | devres_add(dev, dr); |
1007 | else |
1008 | devres_free(dr); |
1009 | |
1010 | return res; |
1011 | } |
1012 | EXPORT_SYMBOL(__devm_request_region); |
1013 | |
1014 | void __devm_release_region(struct device *dev, struct resource *parent, |
1015 | resource_size_t start, resource_size_t n) |
1016 | { |
1017 | struct region_devres match_data = { parent, start, n }; |
1018 | |
1019 | __release_region(parent, start, n); |
1020 | WARN_ON(devres_destroy(dev, devm_region_release, devm_region_match, |
1021 | &match_data)); |
1022 | } |
1023 | EXPORT_SYMBOL(__devm_release_region); |
1024 | |
1025 | /* |
1026 | * Called from init/main.c to reserve IO ports. |
1027 | */ |
1028 | #define MAXRESERVE 4 |
1029 | static int __init reserve_setup(char *str) |
1030 | { |
1031 | static int reserved; |
1032 | static struct resource reserve[MAXRESERVE]; |
1033 | |
1034 | for (;;) { |
1035 | unsigned int io_start, io_num; |
1036 | int x = reserved; |
1037 | |
1038 | if (get_option (&str, &io_start) != 2) |
1039 | break; |
1040 | if (get_option (&str, &io_num) == 0) |
1041 | break; |
1042 | if (x < MAXRESERVE) { |
1043 | struct resource *res = reserve + x; |
1044 | res->name = "reserved"; |
1045 | res->start = io_start; |
1046 | res->end = io_start + io_num - 1; |
1047 | res->flags = IORESOURCE_BUSY; |
1048 | res->child = NULL; |
1049 | if (request_resource(res->start >= 0x10000 ? &iomem_resource : &ioport_resource, res) == 0) |
1050 | reserved = x+1; |
1051 | } |
1052 | } |
1053 | return 1; |
1054 | } |
1055 | |
1056 | __setup("reserve=", reserve_setup); |
1057 | |
1058 | /* |
1059 | * Check if the requested addr and size spans more than any slot in the |
1060 | * iomem resource tree. |
1061 | */ |
1062 | int iomem_map_sanity_check(resource_size_t addr, unsigned long size) |
1063 | { |
1064 | struct resource *p = &iomem_resource; |
1065 | int err = 0; |
1066 | loff_t l; |
1067 | |
1068 | read_lock(&resource_lock); |
1069 | for (p = p->child; p ; p = r_next(NULL, p, &l)) { |
1070 | /* |
1071 | * We can probably skip the resources without |
1072 | * IORESOURCE_IO attribute? |
1073 | */ |
1074 | if (p->start >= addr + size) |
1075 | continue; |
1076 | if (p->end < addr) |
1077 | continue; |
1078 | if (PFN_DOWN(p->start) <= PFN_DOWN(addr) && |
1079 | PFN_DOWN(p->end) >= PFN_DOWN(addr + size - 1)) |
1080 | continue; |
1081 | /* |
1082 | * if a resource is "BUSY", it's not a hardware resource |
1083 | * but a driver mapping of such a resource; we don't want |
1084 | * to warn for those; some drivers legitimately map only |
1085 | * partial hardware resources. (example: vesafb) |
1086 | */ |
1087 | if (p->flags & IORESOURCE_BUSY) |
1088 | continue; |
1089 | |
1090 | printk(KERN_WARNING "resource map sanity check conflict: " |
1091 | "0x%llx 0x%llx 0x%llx 0x%llx %s\n", |
1092 | (unsigned long long)addr, |
1093 | (unsigned long long)(addr + size - 1), |
1094 | (unsigned long long)p->start, |
1095 | (unsigned long long)p->end, |
1096 | p->name); |
1097 | err = -1; |
1098 | break; |
1099 | } |
1100 | read_unlock(&resource_lock); |
1101 | |
1102 | return err; |
1103 | } |
1104 | |
1105 | #ifdef CONFIG_STRICT_DEVMEM |
1106 | static int strict_iomem_checks = 1; |
1107 | #else |
1108 | static int strict_iomem_checks; |
1109 | #endif |
1110 | |
1111 | /* |
1112 | * check if an address is reserved in the iomem resource tree |
1113 | * returns 1 if reserved, 0 if not reserved. |
1114 | */ |
1115 | int iomem_is_exclusive(u64 addr) |
1116 | { |
1117 | struct resource *p = &iomem_resource; |
1118 | int err = 0; |
1119 | loff_t l; |
1120 | int size = PAGE_SIZE; |
1121 | |
1122 | if (!strict_iomem_checks) |
1123 | return 0; |
1124 | |
1125 | addr = addr & PAGE_MASK; |
1126 | |
1127 | read_lock(&resource_lock); |
1128 | for (p = p->child; p ; p = r_next(NULL, p, &l)) { |
1129 | /* |
1130 | * We can probably skip the resources without |
1131 | * IORESOURCE_IO attribute? |
1132 | */ |
1133 | if (p->start >= addr + size) |
1134 | break; |
1135 | if (p->end < addr) |
1136 | continue; |
1137 | if (p->flags & IORESOURCE_BUSY && |
1138 | p->flags & IORESOURCE_EXCLUSIVE) { |
1139 | err = 1; |
1140 | break; |
1141 | } |
1142 | } |
1143 | read_unlock(&resource_lock); |
1144 | |
1145 | return err; |
1146 | } |
1147 | |
1148 | static int __init strict_iomem(char *str) |
1149 | { |
1150 | if (strstr(str, "relaxed")) |
1151 | strict_iomem_checks = 0; |
1152 | if (strstr(str, "strict")) |
1153 | strict_iomem_checks = 1; |
1154 | return 1; |
1155 | } |
1156 | |
1157 | __setup("iomem=", strict_iomem); |
1158 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9