Root/
1 | /* |
2 | * linux/kernel/resource.c |
3 | * |
4 | * Copyright (C) 1999 Linus Torvalds |
5 | * Copyright (C) 1999 Martin Mares <mj@ucw.cz> |
6 | * |
7 | * Arbitrary resource management. |
8 | */ |
9 | |
10 | #include <linux/module.h> |
11 | #include <linux/errno.h> |
12 | #include <linux/ioport.h> |
13 | #include <linux/init.h> |
14 | #include <linux/slab.h> |
15 | #include <linux/spinlock.h> |
16 | #include <linux/fs.h> |
17 | #include <linux/proc_fs.h> |
18 | #include <linux/sched.h> |
19 | #include <linux/seq_file.h> |
20 | #include <linux/device.h> |
21 | #include <linux/pfn.h> |
22 | #include <asm/io.h> |
23 | |
24 | |
25 | struct resource ioport_resource = { |
26 | .name = "PCI IO", |
27 | .start = 0, |
28 | .end = IO_SPACE_LIMIT, |
29 | .flags = IORESOURCE_IO, |
30 | }; |
31 | EXPORT_SYMBOL(ioport_resource); |
32 | |
33 | struct resource iomem_resource = { |
34 | .name = "PCI mem", |
35 | .start = 0, |
36 | .end = -1, |
37 | .flags = IORESOURCE_MEM, |
38 | }; |
39 | EXPORT_SYMBOL(iomem_resource); |
40 | |
41 | /* constraints to be met while allocating resources */ |
42 | struct resource_constraint { |
43 | resource_size_t min, max, align; |
44 | resource_size_t (*alignf)(void *, const struct resource *, |
45 | resource_size_t, resource_size_t); |
46 | void *alignf_data; |
47 | }; |
48 | |
49 | static DEFINE_RWLOCK(resource_lock); |
50 | |
51 | static void *r_next(struct seq_file *m, void *v, loff_t *pos) |
52 | { |
53 | struct resource *p = v; |
54 | (*pos)++; |
55 | if (p->child) |
56 | return p->child; |
57 | while (!p->sibling && p->parent) |
58 | p = p->parent; |
59 | return p->sibling; |
60 | } |
61 | |
62 | #ifdef CONFIG_PROC_FS |
63 | |
64 | enum { MAX_IORES_LEVEL = 5 }; |
65 | |
66 | static void *r_start(struct seq_file *m, loff_t *pos) |
67 | __acquires(resource_lock) |
68 | { |
69 | struct resource *p = m->private; |
70 | loff_t l = 0; |
71 | read_lock(&resource_lock); |
72 | for (p = p->child; p && l < *pos; p = r_next(m, p, &l)) |
73 | ; |
74 | return p; |
75 | } |
76 | |
77 | static void r_stop(struct seq_file *m, void *v) |
78 | __releases(resource_lock) |
79 | { |
80 | read_unlock(&resource_lock); |
81 | } |
82 | |
83 | static int r_show(struct seq_file *m, void *v) |
84 | { |
85 | struct resource *root = m->private; |
86 | struct resource *r = v, *p; |
87 | int width = root->end < 0x10000 ? 4 : 8; |
88 | int depth; |
89 | |
90 | for (depth = 0, p = r; depth < MAX_IORES_LEVEL; depth++, p = p->parent) |
91 | if (p->parent == root) |
92 | break; |
93 | seq_printf(m, "%*s%0*llx-%0*llx : %s\n", |
94 | depth * 2, "", |
95 | width, (unsigned long long) r->start, |
96 | width, (unsigned long long) r->end, |
97 | r->name ? r->name : "<BAD>"); |
98 | return 0; |
99 | } |
100 | |
101 | static const struct seq_operations resource_op = { |
102 | .start = r_start, |
103 | .next = r_next, |
104 | .stop = r_stop, |
105 | .show = r_show, |
106 | }; |
107 | |
108 | static int ioports_open(struct inode *inode, struct file *file) |
109 | { |
110 | int res = seq_open(file, &resource_op); |
111 | if (!res) { |
112 | struct seq_file *m = file->private_data; |
113 | m->private = &ioport_resource; |
114 | } |
115 | return res; |
116 | } |
117 | |
118 | static int iomem_open(struct inode *inode, struct file *file) |
119 | { |
120 | int res = seq_open(file, &resource_op); |
121 | if (!res) { |
122 | struct seq_file *m = file->private_data; |
123 | m->private = &iomem_resource; |
124 | } |
125 | return res; |
126 | } |
127 | |
128 | static const struct file_operations proc_ioports_operations = { |
129 | .open = ioports_open, |
130 | .read = seq_read, |
131 | .llseek = seq_lseek, |
132 | .release = seq_release, |
133 | }; |
134 | |
135 | static const struct file_operations proc_iomem_operations = { |
136 | .open = iomem_open, |
137 | .read = seq_read, |
138 | .llseek = seq_lseek, |
139 | .release = seq_release, |
140 | }; |
141 | |
142 | static int __init ioresources_init(void) |
143 | { |
144 | proc_create("ioports", 0, NULL, &proc_ioports_operations); |
145 | proc_create("iomem", 0, NULL, &proc_iomem_operations); |
146 | return 0; |
147 | } |
148 | __initcall(ioresources_init); |
149 | |
150 | #endif /* CONFIG_PROC_FS */ |
151 | |
152 | /* Return the conflict entry if you can't request it */ |
153 | static struct resource * __request_resource(struct resource *root, struct resource *new) |
154 | { |
155 | resource_size_t start = new->start; |
156 | resource_size_t end = new->end; |
157 | struct resource *tmp, **p; |
158 | |
159 | if (end < start) |
160 | return root; |
161 | if (start < root->start) |
162 | return root; |
163 | if (end > root->end) |
164 | return root; |
165 | p = &root->child; |
166 | for (;;) { |
167 | tmp = *p; |
168 | if (!tmp || tmp->start > end) { |
169 | new->sibling = tmp; |
170 | *p = new; |
171 | new->parent = root; |
172 | return NULL; |
173 | } |
174 | p = &tmp->sibling; |
175 | if (tmp->end < start) |
176 | continue; |
177 | return tmp; |
178 | } |
179 | } |
180 | |
181 | static int __release_resource(struct resource *old) |
182 | { |
183 | struct resource *tmp, **p; |
184 | |
185 | p = &old->parent->child; |
186 | for (;;) { |
187 | tmp = *p; |
188 | if (!tmp) |
189 | break; |
190 | if (tmp == old) { |
191 | *p = tmp->sibling; |
192 | old->parent = NULL; |
193 | return 0; |
194 | } |
195 | p = &tmp->sibling; |
196 | } |
197 | return -EINVAL; |
198 | } |
199 | |
200 | static void __release_child_resources(struct resource *r) |
201 | { |
202 | struct resource *tmp, *p; |
203 | resource_size_t size; |
204 | |
205 | p = r->child; |
206 | r->child = NULL; |
207 | while (p) { |
208 | tmp = p; |
209 | p = p->sibling; |
210 | |
211 | tmp->parent = NULL; |
212 | tmp->sibling = NULL; |
213 | __release_child_resources(tmp); |
214 | |
215 | printk(KERN_DEBUG "release child resource %pR\n", tmp); |
216 | /* need to restore size, and keep flags */ |
217 | size = resource_size(tmp); |
218 | tmp->start = 0; |
219 | tmp->end = size - 1; |
220 | } |
221 | } |
222 | |
223 | void release_child_resources(struct resource *r) |
224 | { |
225 | write_lock(&resource_lock); |
226 | __release_child_resources(r); |
227 | write_unlock(&resource_lock); |
228 | } |
229 | |
230 | /** |
231 | * request_resource_conflict - request and reserve an I/O or memory resource |
232 | * @root: root resource descriptor |
233 | * @new: resource descriptor desired by caller |
234 | * |
235 | * Returns 0 for success, conflict resource on error. |
236 | */ |
237 | struct resource *request_resource_conflict(struct resource *root, struct resource *new) |
238 | { |
239 | struct resource *conflict; |
240 | |
241 | write_lock(&resource_lock); |
242 | conflict = __request_resource(root, new); |
243 | write_unlock(&resource_lock); |
244 | return conflict; |
245 | } |
246 | |
247 | /** |
248 | * request_resource - request and reserve an I/O or memory resource |
249 | * @root: root resource descriptor |
250 | * @new: resource descriptor desired by caller |
251 | * |
252 | * Returns 0 for success, negative error code on error. |
253 | */ |
254 | int request_resource(struct resource *root, struct resource *new) |
255 | { |
256 | struct resource *conflict; |
257 | |
258 | conflict = request_resource_conflict(root, new); |
259 | return conflict ? -EBUSY : 0; |
260 | } |
261 | |
262 | EXPORT_SYMBOL(request_resource); |
263 | |
264 | /** |
265 | * release_resource - release a previously reserved resource |
266 | * @old: resource pointer |
267 | */ |
268 | int release_resource(struct resource *old) |
269 | { |
270 | int retval; |
271 | |
272 | write_lock(&resource_lock); |
273 | retval = __release_resource(old); |
274 | write_unlock(&resource_lock); |
275 | return retval; |
276 | } |
277 | |
278 | EXPORT_SYMBOL(release_resource); |
279 | |
280 | #if !defined(CONFIG_ARCH_HAS_WALK_MEMORY) |
281 | /* |
282 | * Finds the lowest memory reosurce exists within [res->start.res->end) |
283 | * the caller must specify res->start, res->end, res->flags and "name". |
284 | * If found, returns 0, res is overwritten, if not found, returns -1. |
285 | */ |
286 | static int find_next_system_ram(struct resource *res, char *name) |
287 | { |
288 | resource_size_t start, end; |
289 | struct resource *p; |
290 | |
291 | BUG_ON(!res); |
292 | |
293 | start = res->start; |
294 | end = res->end; |
295 | BUG_ON(start >= end); |
296 | |
297 | read_lock(&resource_lock); |
298 | for (p = iomem_resource.child; p ; p = p->sibling) { |
299 | /* system ram is just marked as IORESOURCE_MEM */ |
300 | if (p->flags != res->flags) |
301 | continue; |
302 | if (name && strcmp(p->name, name)) |
303 | continue; |
304 | if (p->start > end) { |
305 | p = NULL; |
306 | break; |
307 | } |
308 | if ((p->end >= start) && (p->start < end)) |
309 | break; |
310 | } |
311 | read_unlock(&resource_lock); |
312 | if (!p) |
313 | return -1; |
314 | /* copy data */ |
315 | if (res->start < p->start) |
316 | res->start = p->start; |
317 | if (res->end > p->end) |
318 | res->end = p->end; |
319 | return 0; |
320 | } |
321 | |
322 | /* |
323 | * This function calls callback against all memory range of "System RAM" |
324 | * which are marked as IORESOURCE_MEM and IORESOUCE_BUSY. |
325 | * Now, this function is only for "System RAM". |
326 | */ |
327 | int walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages, |
328 | void *arg, int (*func)(unsigned long, unsigned long, void *)) |
329 | { |
330 | struct resource res; |
331 | unsigned long pfn, end_pfn; |
332 | u64 orig_end; |
333 | int ret = -1; |
334 | |
335 | res.start = (u64) start_pfn << PAGE_SHIFT; |
336 | res.end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1; |
337 | res.flags = IORESOURCE_MEM | IORESOURCE_BUSY; |
338 | orig_end = res.end; |
339 | while ((res.start < res.end) && |
340 | (find_next_system_ram(&res, "System RAM") >= 0)) { |
341 | pfn = (res.start + PAGE_SIZE - 1) >> PAGE_SHIFT; |
342 | end_pfn = (res.end + 1) >> PAGE_SHIFT; |
343 | if (end_pfn > pfn) |
344 | ret = (*func)(pfn, end_pfn - pfn, arg); |
345 | if (ret) |
346 | break; |
347 | res.start = res.end + 1; |
348 | res.end = orig_end; |
349 | } |
350 | return ret; |
351 | } |
352 | |
353 | #endif |
354 | |
355 | static int __is_ram(unsigned long pfn, unsigned long nr_pages, void *arg) |
356 | { |
357 | return 1; |
358 | } |
359 | /* |
360 | * This generic page_is_ram() returns true if specified address is |
361 | * registered as "System RAM" in iomem_resource list. |
362 | */ |
363 | int __weak page_is_ram(unsigned long pfn) |
364 | { |
365 | return walk_system_ram_range(pfn, 1, NULL, __is_ram) == 1; |
366 | } |
367 | |
368 | void __weak arch_remove_reservations(struct resource *avail) |
369 | { |
370 | } |
371 | |
372 | static resource_size_t simple_align_resource(void *data, |
373 | const struct resource *avail, |
374 | resource_size_t size, |
375 | resource_size_t align) |
376 | { |
377 | return avail->start; |
378 | } |
379 | |
380 | static void resource_clip(struct resource *res, resource_size_t min, |
381 | resource_size_t max) |
382 | { |
383 | if (res->start < min) |
384 | res->start = min; |
385 | if (res->end > max) |
386 | res->end = max; |
387 | } |
388 | |
389 | static bool resource_contains(struct resource *res1, struct resource *res2) |
390 | { |
391 | return res1->start <= res2->start && res1->end >= res2->end; |
392 | } |
393 | |
394 | /* |
395 | * Find empty slot in the resource tree with the given range and |
396 | * alignment constraints |
397 | */ |
398 | static int __find_resource(struct resource *root, struct resource *old, |
399 | struct resource *new, |
400 | resource_size_t size, |
401 | struct resource_constraint *constraint) |
402 | { |
403 | struct resource *this = root->child; |
404 | struct resource tmp = *new, avail, alloc; |
405 | |
406 | tmp.flags = new->flags; |
407 | tmp.start = root->start; |
408 | /* |
409 | * Skip past an allocated resource that starts at 0, since the assignment |
410 | * of this->start - 1 to tmp->end below would cause an underflow. |
411 | */ |
412 | if (this && this->start == root->start) { |
413 | tmp.start = (this == old) ? old->start : this->end + 1; |
414 | this = this->sibling; |
415 | } |
416 | for(;;) { |
417 | if (this) |
418 | tmp.end = (this == old) ? this->end : this->start - 1; |
419 | else |
420 | tmp.end = root->end; |
421 | |
422 | resource_clip(&tmp, constraint->min, constraint->max); |
423 | arch_remove_reservations(&tmp); |
424 | |
425 | /* Check for overflow after ALIGN() */ |
426 | avail = *new; |
427 | avail.start = ALIGN(tmp.start, constraint->align); |
428 | avail.end = tmp.end; |
429 | if (avail.start >= tmp.start) { |
430 | alloc.start = constraint->alignf(constraint->alignf_data, &avail, |
431 | size, constraint->align); |
432 | alloc.end = alloc.start + size - 1; |
433 | if (resource_contains(&avail, &alloc)) { |
434 | new->start = alloc.start; |
435 | new->end = alloc.end; |
436 | return 0; |
437 | } |
438 | } |
439 | if (!this) |
440 | break; |
441 | if (this != old) |
442 | tmp.start = this->end + 1; |
443 | this = this->sibling; |
444 | } |
445 | return -EBUSY; |
446 | } |
447 | |
448 | /* |
449 | * Find empty slot in the resource tree given range and alignment. |
450 | */ |
451 | static int find_resource(struct resource *root, struct resource *new, |
452 | resource_size_t size, |
453 | struct resource_constraint *constraint) |
454 | { |
455 | return __find_resource(root, NULL, new, size, constraint); |
456 | } |
457 | |
458 | /** |
459 | * reallocate_resource - allocate a slot in the resource tree given range & alignment. |
460 | * The resource will be relocated if the new size cannot be reallocated in the |
461 | * current location. |
462 | * |
463 | * @root: root resource descriptor |
464 | * @old: resource descriptor desired by caller |
465 | * @newsize: new size of the resource descriptor |
466 | * @constraint: the size and alignment constraints to be met. |
467 | */ |
468 | int reallocate_resource(struct resource *root, struct resource *old, |
469 | resource_size_t newsize, |
470 | struct resource_constraint *constraint) |
471 | { |
472 | int err=0; |
473 | struct resource new = *old; |
474 | struct resource *conflict; |
475 | |
476 | write_lock(&resource_lock); |
477 | |
478 | if ((err = __find_resource(root, old, &new, newsize, constraint))) |
479 | goto out; |
480 | |
481 | if (resource_contains(&new, old)) { |
482 | old->start = new.start; |
483 | old->end = new.end; |
484 | goto out; |
485 | } |
486 | |
487 | if (old->child) { |
488 | err = -EBUSY; |
489 | goto out; |
490 | } |
491 | |
492 | if (resource_contains(old, &new)) { |
493 | old->start = new.start; |
494 | old->end = new.end; |
495 | } else { |
496 | __release_resource(old); |
497 | *old = new; |
498 | conflict = __request_resource(root, old); |
499 | BUG_ON(conflict); |
500 | } |
501 | out: |
502 | write_unlock(&resource_lock); |
503 | return err; |
504 | } |
505 | |
506 | |
507 | /** |
508 | * allocate_resource - allocate empty slot in the resource tree given range & alignment. |
509 | * The resource will be reallocated with a new size if it was already allocated |
510 | * @root: root resource descriptor |
511 | * @new: resource descriptor desired by caller |
512 | * @size: requested resource region size |
513 | * @min: minimum size to allocate |
514 | * @max: maximum size to allocate |
515 | * @align: alignment requested, in bytes |
516 | * @alignf: alignment function, optional, called if not NULL |
517 | * @alignf_data: arbitrary data to pass to the @alignf function |
518 | */ |
519 | int allocate_resource(struct resource *root, struct resource *new, |
520 | resource_size_t size, resource_size_t min, |
521 | resource_size_t max, resource_size_t align, |
522 | resource_size_t (*alignf)(void *, |
523 | const struct resource *, |
524 | resource_size_t, |
525 | resource_size_t), |
526 | void *alignf_data) |
527 | { |
528 | int err; |
529 | struct resource_constraint constraint; |
530 | |
531 | if (!alignf) |
532 | alignf = simple_align_resource; |
533 | |
534 | constraint.min = min; |
535 | constraint.max = max; |
536 | constraint.align = align; |
537 | constraint.alignf = alignf; |
538 | constraint.alignf_data = alignf_data; |
539 | |
540 | if ( new->parent ) { |
541 | /* resource is already allocated, try reallocating with |
542 | the new constraints */ |
543 | return reallocate_resource(root, new, size, &constraint); |
544 | } |
545 | |
546 | write_lock(&resource_lock); |
547 | err = find_resource(root, new, size, &constraint); |
548 | if (err >= 0 && __request_resource(root, new)) |
549 | err = -EBUSY; |
550 | write_unlock(&resource_lock); |
551 | return err; |
552 | } |
553 | |
554 | EXPORT_SYMBOL(allocate_resource); |
555 | |
556 | /* |
557 | * Insert a resource into the resource tree. If successful, return NULL, |
558 | * otherwise return the conflicting resource (compare to __request_resource()) |
559 | */ |
560 | static struct resource * __insert_resource(struct resource *parent, struct resource *new) |
561 | { |
562 | struct resource *first, *next; |
563 | |
564 | for (;; parent = first) { |
565 | first = __request_resource(parent, new); |
566 | if (!first) |
567 | return first; |
568 | |
569 | if (first == parent) |
570 | return first; |
571 | if (WARN_ON(first == new)) /* duplicated insertion */ |
572 | return first; |
573 | |
574 | if ((first->start > new->start) || (first->end < new->end)) |
575 | break; |
576 | if ((first->start == new->start) && (first->end == new->end)) |
577 | break; |
578 | } |
579 | |
580 | for (next = first; ; next = next->sibling) { |
581 | /* Partial overlap? Bad, and unfixable */ |
582 | if (next->start < new->start || next->end > new->end) |
583 | return next; |
584 | if (!next->sibling) |
585 | break; |
586 | if (next->sibling->start > new->end) |
587 | break; |
588 | } |
589 | |
590 | new->parent = parent; |
591 | new->sibling = next->sibling; |
592 | new->child = first; |
593 | |
594 | next->sibling = NULL; |
595 | for (next = first; next; next = next->sibling) |
596 | next->parent = new; |
597 | |
598 | if (parent->child == first) { |
599 | parent->child = new; |
600 | } else { |
601 | next = parent->child; |
602 | while (next->sibling != first) |
603 | next = next->sibling; |
604 | next->sibling = new; |
605 | } |
606 | return NULL; |
607 | } |
608 | |
609 | /** |
610 | * insert_resource_conflict - Inserts resource in the resource tree |
611 | * @parent: parent of the new resource |
612 | * @new: new resource to insert |
613 | * |
614 | * Returns 0 on success, conflict resource if the resource can't be inserted. |
615 | * |
616 | * This function is equivalent to request_resource_conflict when no conflict |
617 | * happens. If a conflict happens, and the conflicting resources |
618 | * entirely fit within the range of the new resource, then the new |
619 | * resource is inserted and the conflicting resources become children of |
620 | * the new resource. |
621 | */ |
622 | struct resource *insert_resource_conflict(struct resource *parent, struct resource *new) |
623 | { |
624 | struct resource *conflict; |
625 | |
626 | write_lock(&resource_lock); |
627 | conflict = __insert_resource(parent, new); |
628 | write_unlock(&resource_lock); |
629 | return conflict; |
630 | } |
631 | |
632 | /** |
633 | * insert_resource - Inserts a resource in the resource tree |
634 | * @parent: parent of the new resource |
635 | * @new: new resource to insert |
636 | * |
637 | * Returns 0 on success, -EBUSY if the resource can't be inserted. |
638 | */ |
639 | int insert_resource(struct resource *parent, struct resource *new) |
640 | { |
641 | struct resource *conflict; |
642 | |
643 | conflict = insert_resource_conflict(parent, new); |
644 | return conflict ? -EBUSY : 0; |
645 | } |
646 | |
647 | /** |
648 | * insert_resource_expand_to_fit - Insert a resource into the resource tree |
649 | * @root: root resource descriptor |
650 | * @new: new resource to insert |
651 | * |
652 | * Insert a resource into the resource tree, possibly expanding it in order |
653 | * to make it encompass any conflicting resources. |
654 | */ |
655 | void insert_resource_expand_to_fit(struct resource *root, struct resource *new) |
656 | { |
657 | if (new->parent) |
658 | return; |
659 | |
660 | write_lock(&resource_lock); |
661 | for (;;) { |
662 | struct resource *conflict; |
663 | |
664 | conflict = __insert_resource(root, new); |
665 | if (!conflict) |
666 | break; |
667 | if (conflict == root) |
668 | break; |
669 | |
670 | /* Ok, expand resource to cover the conflict, then try again .. */ |
671 | if (conflict->start < new->start) |
672 | new->start = conflict->start; |
673 | if (conflict->end > new->end) |
674 | new->end = conflict->end; |
675 | |
676 | printk("Expanded resource %s due to conflict with %s\n", new->name, conflict->name); |
677 | } |
678 | write_unlock(&resource_lock); |
679 | } |
680 | |
681 | /** |
682 | * adjust_resource - modify a resource's start and size |
683 | * @res: resource to modify |
684 | * @start: new start value |
685 | * @size: new size |
686 | * |
687 | * Given an existing resource, change its start and size to match the |
688 | * arguments. Returns 0 on success, -EBUSY if it can't fit. |
689 | * Existing children of the resource are assumed to be immutable. |
690 | */ |
691 | int adjust_resource(struct resource *res, resource_size_t start, resource_size_t size) |
692 | { |
693 | struct resource *tmp, *parent = res->parent; |
694 | resource_size_t end = start + size - 1; |
695 | int result = -EBUSY; |
696 | |
697 | write_lock(&resource_lock); |
698 | |
699 | if ((start < parent->start) || (end > parent->end)) |
700 | goto out; |
701 | |
702 | for (tmp = res->child; tmp; tmp = tmp->sibling) { |
703 | if ((tmp->start < start) || (tmp->end > end)) |
704 | goto out; |
705 | } |
706 | |
707 | if (res->sibling && (res->sibling->start <= end)) |
708 | goto out; |
709 | |
710 | tmp = parent->child; |
711 | if (tmp != res) { |
712 | while (tmp->sibling != res) |
713 | tmp = tmp->sibling; |
714 | if (start <= tmp->end) |
715 | goto out; |
716 | } |
717 | |
718 | res->start = start; |
719 | res->end = end; |
720 | result = 0; |
721 | |
722 | out: |
723 | write_unlock(&resource_lock); |
724 | return result; |
725 | } |
726 | |
727 | static void __init __reserve_region_with_split(struct resource *root, |
728 | resource_size_t start, resource_size_t end, |
729 | const char *name) |
730 | { |
731 | struct resource *parent = root; |
732 | struct resource *conflict; |
733 | struct resource *res = kzalloc(sizeof(*res), GFP_ATOMIC); |
734 | |
735 | if (!res) |
736 | return; |
737 | |
738 | res->name = name; |
739 | res->start = start; |
740 | res->end = end; |
741 | res->flags = IORESOURCE_BUSY; |
742 | |
743 | conflict = __request_resource(parent, res); |
744 | if (!conflict) |
745 | return; |
746 | |
747 | /* failed, split and try again */ |
748 | kfree(res); |
749 | |
750 | /* conflict covered whole area */ |
751 | if (conflict->start <= start && conflict->end >= end) |
752 | return; |
753 | |
754 | if (conflict->start > start) |
755 | __reserve_region_with_split(root, start, conflict->start-1, name); |
756 | if (conflict->end < end) |
757 | __reserve_region_with_split(root, conflict->end+1, end, name); |
758 | } |
759 | |
760 | void __init reserve_region_with_split(struct resource *root, |
761 | resource_size_t start, resource_size_t end, |
762 | const char *name) |
763 | { |
764 | write_lock(&resource_lock); |
765 | __reserve_region_with_split(root, start, end, name); |
766 | write_unlock(&resource_lock); |
767 | } |
768 | |
769 | EXPORT_SYMBOL(adjust_resource); |
770 | |
771 | /** |
772 | * resource_alignment - calculate resource's alignment |
773 | * @res: resource pointer |
774 | * |
775 | * Returns alignment on success, 0 (invalid alignment) on failure. |
776 | */ |
777 | resource_size_t resource_alignment(struct resource *res) |
778 | { |
779 | switch (res->flags & (IORESOURCE_SIZEALIGN | IORESOURCE_STARTALIGN)) { |
780 | case IORESOURCE_SIZEALIGN: |
781 | return resource_size(res); |
782 | case IORESOURCE_STARTALIGN: |
783 | return res->start; |
784 | default: |
785 | return 0; |
786 | } |
787 | } |
788 | |
789 | /* |
790 | * This is compatibility stuff for IO resources. |
791 | * |
792 | * Note how this, unlike the above, knows about |
793 | * the IO flag meanings (busy etc). |
794 | * |
795 | * request_region creates a new busy region. |
796 | * |
797 | * check_region returns non-zero if the area is already busy. |
798 | * |
799 | * release_region releases a matching busy region. |
800 | */ |
801 | |
802 | static DECLARE_WAIT_QUEUE_HEAD(muxed_resource_wait); |
803 | |
804 | /** |
805 | * __request_region - create a new busy resource region |
806 | * @parent: parent resource descriptor |
807 | * @start: resource start address |
808 | * @n: resource region size |
809 | * @name: reserving caller's ID string |
810 | * @flags: IO resource flags |
811 | */ |
812 | struct resource * __request_region(struct resource *parent, |
813 | resource_size_t start, resource_size_t n, |
814 | const char *name, int flags) |
815 | { |
816 | DECLARE_WAITQUEUE(wait, current); |
817 | struct resource *res = kzalloc(sizeof(*res), GFP_KERNEL); |
818 | |
819 | if (!res) |
820 | return NULL; |
821 | |
822 | res->name = name; |
823 | res->start = start; |
824 | res->end = start + n - 1; |
825 | res->flags = IORESOURCE_BUSY; |
826 | res->flags |= flags; |
827 | |
828 | write_lock(&resource_lock); |
829 | |
830 | for (;;) { |
831 | struct resource *conflict; |
832 | |
833 | conflict = __request_resource(parent, res); |
834 | if (!conflict) |
835 | break; |
836 | if (conflict != parent) { |
837 | parent = conflict; |
838 | if (!(conflict->flags & IORESOURCE_BUSY)) |
839 | continue; |
840 | } |
841 | if (conflict->flags & flags & IORESOURCE_MUXED) { |
842 | add_wait_queue(&muxed_resource_wait, &wait); |
843 | write_unlock(&resource_lock); |
844 | set_current_state(TASK_UNINTERRUPTIBLE); |
845 | schedule(); |
846 | remove_wait_queue(&muxed_resource_wait, &wait); |
847 | write_lock(&resource_lock); |
848 | continue; |
849 | } |
850 | /* Uhhuh, that didn't work out.. */ |
851 | kfree(res); |
852 | res = NULL; |
853 | break; |
854 | } |
855 | write_unlock(&resource_lock); |
856 | return res; |
857 | } |
858 | EXPORT_SYMBOL(__request_region); |
859 | |
860 | /** |
861 | * __check_region - check if a resource region is busy or free |
862 | * @parent: parent resource descriptor |
863 | * @start: resource start address |
864 | * @n: resource region size |
865 | * |
866 | * Returns 0 if the region is free at the moment it is checked, |
867 | * returns %-EBUSY if the region is busy. |
868 | * |
869 | * NOTE: |
870 | * This function is deprecated because its use is racy. |
871 | * Even if it returns 0, a subsequent call to request_region() |
872 | * may fail because another driver etc. just allocated the region. |
873 | * Do NOT use it. It will be removed from the kernel. |
874 | */ |
875 | int __check_region(struct resource *parent, resource_size_t start, |
876 | resource_size_t n) |
877 | { |
878 | struct resource * res; |
879 | |
880 | res = __request_region(parent, start, n, "check-region", 0); |
881 | if (!res) |
882 | return -EBUSY; |
883 | |
884 | release_resource(res); |
885 | kfree(res); |
886 | return 0; |
887 | } |
888 | EXPORT_SYMBOL(__check_region); |
889 | |
890 | /** |
891 | * __release_region - release a previously reserved resource region |
892 | * @parent: parent resource descriptor |
893 | * @start: resource start address |
894 | * @n: resource region size |
895 | * |
896 | * The described resource region must match a currently busy region. |
897 | */ |
898 | void __release_region(struct resource *parent, resource_size_t start, |
899 | resource_size_t n) |
900 | { |
901 | struct resource **p; |
902 | resource_size_t end; |
903 | |
904 | p = &parent->child; |
905 | end = start + n - 1; |
906 | |
907 | write_lock(&resource_lock); |
908 | |
909 | for (;;) { |
910 | struct resource *res = *p; |
911 | |
912 | if (!res) |
913 | break; |
914 | if (res->start <= start && res->end >= end) { |
915 | if (!(res->flags & IORESOURCE_BUSY)) { |
916 | p = &res->child; |
917 | continue; |
918 | } |
919 | if (res->start != start || res->end != end) |
920 | break; |
921 | *p = res->sibling; |
922 | write_unlock(&resource_lock); |
923 | if (res->flags & IORESOURCE_MUXED) |
924 | wake_up(&muxed_resource_wait); |
925 | kfree(res); |
926 | return; |
927 | } |
928 | p = &res->sibling; |
929 | } |
930 | |
931 | write_unlock(&resource_lock); |
932 | |
933 | printk(KERN_WARNING "Trying to free nonexistent resource " |
934 | "<%016llx-%016llx>\n", (unsigned long long)start, |
935 | (unsigned long long)end); |
936 | } |
937 | EXPORT_SYMBOL(__release_region); |
938 | |
939 | /* |
940 | * Managed region resource |
941 | */ |
942 | struct region_devres { |
943 | struct resource *parent; |
944 | resource_size_t start; |
945 | resource_size_t n; |
946 | }; |
947 | |
948 | static void devm_region_release(struct device *dev, void *res) |
949 | { |
950 | struct region_devres *this = res; |
951 | |
952 | __release_region(this->parent, this->start, this->n); |
953 | } |
954 | |
955 | static int devm_region_match(struct device *dev, void *res, void *match_data) |
956 | { |
957 | struct region_devres *this = res, *match = match_data; |
958 | |
959 | return this->parent == match->parent && |
960 | this->start == match->start && this->n == match->n; |
961 | } |
962 | |
963 | struct resource * __devm_request_region(struct device *dev, |
964 | struct resource *parent, resource_size_t start, |
965 | resource_size_t n, const char *name) |
966 | { |
967 | struct region_devres *dr = NULL; |
968 | struct resource *res; |
969 | |
970 | dr = devres_alloc(devm_region_release, sizeof(struct region_devres), |
971 | GFP_KERNEL); |
972 | if (!dr) |
973 | return NULL; |
974 | |
975 | dr->parent = parent; |
976 | dr->start = start; |
977 | dr->n = n; |
978 | |
979 | res = __request_region(parent, start, n, name, 0); |
980 | if (res) |
981 | devres_add(dev, dr); |
982 | else |
983 | devres_free(dr); |
984 | |
985 | return res; |
986 | } |
987 | EXPORT_SYMBOL(__devm_request_region); |
988 | |
989 | void __devm_release_region(struct device *dev, struct resource *parent, |
990 | resource_size_t start, resource_size_t n) |
991 | { |
992 | struct region_devres match_data = { parent, start, n }; |
993 | |
994 | __release_region(parent, start, n); |
995 | WARN_ON(devres_destroy(dev, devm_region_release, devm_region_match, |
996 | &match_data)); |
997 | } |
998 | EXPORT_SYMBOL(__devm_release_region); |
999 | |
1000 | /* |
1001 | * Called from init/main.c to reserve IO ports. |
1002 | */ |
1003 | #define MAXRESERVE 4 |
1004 | static int __init reserve_setup(char *str) |
1005 | { |
1006 | static int reserved; |
1007 | static struct resource reserve[MAXRESERVE]; |
1008 | |
1009 | for (;;) { |
1010 | unsigned int io_start, io_num; |
1011 | int x = reserved; |
1012 | |
1013 | if (get_option (&str, &io_start) != 2) |
1014 | break; |
1015 | if (get_option (&str, &io_num) == 0) |
1016 | break; |
1017 | if (x < MAXRESERVE) { |
1018 | struct resource *res = reserve + x; |
1019 | res->name = "reserved"; |
1020 | res->start = io_start; |
1021 | res->end = io_start + io_num - 1; |
1022 | res->flags = IORESOURCE_BUSY; |
1023 | res->child = NULL; |
1024 | if (request_resource(res->start >= 0x10000 ? &iomem_resource : &ioport_resource, res) == 0) |
1025 | reserved = x+1; |
1026 | } |
1027 | } |
1028 | return 1; |
1029 | } |
1030 | |
1031 | __setup("reserve=", reserve_setup); |
1032 | |
1033 | /* |
1034 | * Check if the requested addr and size spans more than any slot in the |
1035 | * iomem resource tree. |
1036 | */ |
1037 | int iomem_map_sanity_check(resource_size_t addr, unsigned long size) |
1038 | { |
1039 | struct resource *p = &iomem_resource; |
1040 | int err = 0; |
1041 | loff_t l; |
1042 | |
1043 | read_lock(&resource_lock); |
1044 | for (p = p->child; p ; p = r_next(NULL, p, &l)) { |
1045 | /* |
1046 | * We can probably skip the resources without |
1047 | * IORESOURCE_IO attribute? |
1048 | */ |
1049 | if (p->start >= addr + size) |
1050 | continue; |
1051 | if (p->end < addr) |
1052 | continue; |
1053 | if (PFN_DOWN(p->start) <= PFN_DOWN(addr) && |
1054 | PFN_DOWN(p->end) >= PFN_DOWN(addr + size - 1)) |
1055 | continue; |
1056 | /* |
1057 | * if a resource is "BUSY", it's not a hardware resource |
1058 | * but a driver mapping of such a resource; we don't want |
1059 | * to warn for those; some drivers legitimately map only |
1060 | * partial hardware resources. (example: vesafb) |
1061 | */ |
1062 | if (p->flags & IORESOURCE_BUSY) |
1063 | continue; |
1064 | |
1065 | printk(KERN_WARNING "resource map sanity check conflict: " |
1066 | "0x%llx 0x%llx 0x%llx 0x%llx %s\n", |
1067 | (unsigned long long)addr, |
1068 | (unsigned long long)(addr + size - 1), |
1069 | (unsigned long long)p->start, |
1070 | (unsigned long long)p->end, |
1071 | p->name); |
1072 | err = -1; |
1073 | break; |
1074 | } |
1075 | read_unlock(&resource_lock); |
1076 | |
1077 | return err; |
1078 | } |
1079 | |
1080 | #ifdef CONFIG_STRICT_DEVMEM |
1081 | static int strict_iomem_checks = 1; |
1082 | #else |
1083 | static int strict_iomem_checks; |
1084 | #endif |
1085 | |
1086 | /* |
1087 | * check if an address is reserved in the iomem resource tree |
1088 | * returns 1 if reserved, 0 if not reserved. |
1089 | */ |
1090 | int iomem_is_exclusive(u64 addr) |
1091 | { |
1092 | struct resource *p = &iomem_resource; |
1093 | int err = 0; |
1094 | loff_t l; |
1095 | int size = PAGE_SIZE; |
1096 | |
1097 | if (!strict_iomem_checks) |
1098 | return 0; |
1099 | |
1100 | addr = addr & PAGE_MASK; |
1101 | |
1102 | read_lock(&resource_lock); |
1103 | for (p = p->child; p ; p = r_next(NULL, p, &l)) { |
1104 | /* |
1105 | * We can probably skip the resources without |
1106 | * IORESOURCE_IO attribute? |
1107 | */ |
1108 | if (p->start >= addr + size) |
1109 | break; |
1110 | if (p->end < addr) |
1111 | continue; |
1112 | if (p->flags & IORESOURCE_BUSY && |
1113 | p->flags & IORESOURCE_EXCLUSIVE) { |
1114 | err = 1; |
1115 | break; |
1116 | } |
1117 | } |
1118 | read_unlock(&resource_lock); |
1119 | |
1120 | return err; |
1121 | } |
1122 | |
1123 | static int __init strict_iomem(char *str) |
1124 | { |
1125 | if (strstr(str, "relaxed")) |
1126 | strict_iomem_checks = 0; |
1127 | if (strstr(str, "strict")) |
1128 | strict_iomem_checks = 1; |
1129 | return 1; |
1130 | } |
1131 | |
1132 | __setup("iomem=", strict_iomem); |
1133 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9