Root/
1 | /* |
2 | * probe.c - PCI detection and setup code |
3 | */ |
4 | |
5 | #include <linux/kernel.h> |
6 | #include <linux/delay.h> |
7 | #include <linux/init.h> |
8 | #include <linux/pci.h> |
9 | #include <linux/slab.h> |
10 | #include <linux/module.h> |
11 | #include <linux/cpumask.h> |
12 | #include <linux/pci-aspm.h> |
13 | #include <asm-generic/pci-bridge.h> |
14 | #include "pci.h" |
15 | |
16 | #define CARDBUS_LATENCY_TIMER 176 /* secondary latency timer */ |
17 | #define CARDBUS_RESERVE_BUSNR 3 |
18 | |
19 | struct resource busn_resource = { |
20 | .name = "PCI busn", |
21 | .start = 0, |
22 | .end = 255, |
23 | .flags = IORESOURCE_BUS, |
24 | }; |
25 | |
26 | /* Ugh. Need to stop exporting this to modules. */ |
27 | LIST_HEAD(pci_root_buses); |
28 | EXPORT_SYMBOL(pci_root_buses); |
29 | |
30 | static LIST_HEAD(pci_domain_busn_res_list); |
31 | |
32 | struct pci_domain_busn_res { |
33 | struct list_head list; |
34 | struct resource res; |
35 | int domain_nr; |
36 | }; |
37 | |
38 | static struct resource *get_pci_domain_busn_res(int domain_nr) |
39 | { |
40 | struct pci_domain_busn_res *r; |
41 | |
42 | list_for_each_entry(r, &pci_domain_busn_res_list, list) |
43 | if (r->domain_nr == domain_nr) |
44 | return &r->res; |
45 | |
46 | r = kzalloc(sizeof(*r), GFP_KERNEL); |
47 | if (!r) |
48 | return NULL; |
49 | |
50 | r->domain_nr = domain_nr; |
51 | r->res.start = 0; |
52 | r->res.end = 0xff; |
53 | r->res.flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED; |
54 | |
55 | list_add_tail(&r->list, &pci_domain_busn_res_list); |
56 | |
57 | return &r->res; |
58 | } |
59 | |
60 | static int find_anything(struct device *dev, void *data) |
61 | { |
62 | return 1; |
63 | } |
64 | |
65 | /* |
66 | * Some device drivers need know if pci is initiated. |
67 | * Basically, we think pci is not initiated when there |
68 | * is no device to be found on the pci_bus_type. |
69 | */ |
70 | int no_pci_devices(void) |
71 | { |
72 | struct device *dev; |
73 | int no_devices; |
74 | |
75 | dev = bus_find_device(&pci_bus_type, NULL, NULL, find_anything); |
76 | no_devices = (dev == NULL); |
77 | put_device(dev); |
78 | return no_devices; |
79 | } |
80 | EXPORT_SYMBOL(no_pci_devices); |
81 | |
82 | /* |
83 | * PCI Bus Class |
84 | */ |
85 | static void release_pcibus_dev(struct device *dev) |
86 | { |
87 | struct pci_bus *pci_bus = to_pci_bus(dev); |
88 | |
89 | if (pci_bus->bridge) |
90 | put_device(pci_bus->bridge); |
91 | pci_bus_remove_resources(pci_bus); |
92 | pci_release_bus_of_node(pci_bus); |
93 | kfree(pci_bus); |
94 | } |
95 | |
96 | static struct class pcibus_class = { |
97 | .name = "pci_bus", |
98 | .dev_release = &release_pcibus_dev, |
99 | .dev_attrs = pcibus_dev_attrs, |
100 | }; |
101 | |
102 | static int __init pcibus_class_init(void) |
103 | { |
104 | return class_register(&pcibus_class); |
105 | } |
106 | postcore_initcall(pcibus_class_init); |
107 | |
108 | static u64 pci_size(u64 base, u64 maxbase, u64 mask) |
109 | { |
110 | u64 size = mask & maxbase; /* Find the significant bits */ |
111 | if (!size) |
112 | return 0; |
113 | |
114 | /* Get the lowest of them to find the decode size, and |
115 | from that the extent. */ |
116 | size = (size & ~(size-1)) - 1; |
117 | |
118 | /* base == maxbase can be valid only if the BAR has |
119 | already been programmed with all 1s. */ |
120 | if (base == maxbase && ((base | size) & mask) != mask) |
121 | return 0; |
122 | |
123 | return size; |
124 | } |
125 | |
126 | static inline unsigned long decode_bar(struct pci_dev *dev, u32 bar) |
127 | { |
128 | u32 mem_type; |
129 | unsigned long flags; |
130 | |
131 | if ((bar & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) { |
132 | flags = bar & ~PCI_BASE_ADDRESS_IO_MASK; |
133 | flags |= IORESOURCE_IO; |
134 | return flags; |
135 | } |
136 | |
137 | flags = bar & ~PCI_BASE_ADDRESS_MEM_MASK; |
138 | flags |= IORESOURCE_MEM; |
139 | if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH) |
140 | flags |= IORESOURCE_PREFETCH; |
141 | |
142 | mem_type = bar & PCI_BASE_ADDRESS_MEM_TYPE_MASK; |
143 | switch (mem_type) { |
144 | case PCI_BASE_ADDRESS_MEM_TYPE_32: |
145 | break; |
146 | case PCI_BASE_ADDRESS_MEM_TYPE_1M: |
147 | /* 1M mem BAR treated as 32-bit BAR */ |
148 | break; |
149 | case PCI_BASE_ADDRESS_MEM_TYPE_64: |
150 | flags |= IORESOURCE_MEM_64; |
151 | break; |
152 | default: |
153 | /* mem unknown type treated as 32-bit BAR */ |
154 | break; |
155 | } |
156 | return flags; |
157 | } |
158 | |
159 | /** |
160 | * pci_read_base - read a PCI BAR |
161 | * @dev: the PCI device |
162 | * @type: type of the BAR |
163 | * @res: resource buffer to be filled in |
164 | * @pos: BAR position in the config space |
165 | * |
166 | * Returns 1 if the BAR is 64-bit, or 0 if 32-bit. |
167 | */ |
168 | int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, |
169 | struct resource *res, unsigned int pos) |
170 | { |
171 | u32 l, sz, mask; |
172 | u16 orig_cmd; |
173 | struct pci_bus_region region; |
174 | bool bar_too_big = false, bar_disabled = false; |
175 | |
176 | mask = type ? PCI_ROM_ADDRESS_MASK : ~0; |
177 | |
178 | /* No printks while decoding is disabled! */ |
179 | if (!dev->mmio_always_on) { |
180 | pci_read_config_word(dev, PCI_COMMAND, &orig_cmd); |
181 | pci_write_config_word(dev, PCI_COMMAND, |
182 | orig_cmd & ~(PCI_COMMAND_MEMORY | PCI_COMMAND_IO)); |
183 | } |
184 | |
185 | res->name = pci_name(dev); |
186 | |
187 | pci_read_config_dword(dev, pos, &l); |
188 | pci_write_config_dword(dev, pos, l | mask); |
189 | pci_read_config_dword(dev, pos, &sz); |
190 | pci_write_config_dword(dev, pos, l); |
191 | |
192 | /* |
193 | * All bits set in sz means the device isn't working properly. |
194 | * If the BAR isn't implemented, all bits must be 0. If it's a |
195 | * memory BAR or a ROM, bit 0 must be clear; if it's an io BAR, bit |
196 | * 1 must be clear. |
197 | */ |
198 | if (!sz || sz == 0xffffffff) |
199 | goto fail; |
200 | |
201 | /* |
202 | * I don't know how l can have all bits set. Copied from old code. |
203 | * Maybe it fixes a bug on some ancient platform. |
204 | */ |
205 | if (l == 0xffffffff) |
206 | l = 0; |
207 | |
208 | if (type == pci_bar_unknown) { |
209 | res->flags = decode_bar(dev, l); |
210 | res->flags |= IORESOURCE_SIZEALIGN; |
211 | if (res->flags & IORESOURCE_IO) { |
212 | l &= PCI_BASE_ADDRESS_IO_MASK; |
213 | mask = PCI_BASE_ADDRESS_IO_MASK & (u32) IO_SPACE_LIMIT; |
214 | } else { |
215 | l &= PCI_BASE_ADDRESS_MEM_MASK; |
216 | mask = (u32)PCI_BASE_ADDRESS_MEM_MASK; |
217 | } |
218 | } else { |
219 | res->flags |= (l & IORESOURCE_ROM_ENABLE); |
220 | l &= PCI_ROM_ADDRESS_MASK; |
221 | mask = (u32)PCI_ROM_ADDRESS_MASK; |
222 | } |
223 | |
224 | if (res->flags & IORESOURCE_MEM_64) { |
225 | u64 l64 = l; |
226 | u64 sz64 = sz; |
227 | u64 mask64 = mask | (u64)~0 << 32; |
228 | |
229 | pci_read_config_dword(dev, pos + 4, &l); |
230 | pci_write_config_dword(dev, pos + 4, ~0); |
231 | pci_read_config_dword(dev, pos + 4, &sz); |
232 | pci_write_config_dword(dev, pos + 4, l); |
233 | |
234 | l64 |= ((u64)l << 32); |
235 | sz64 |= ((u64)sz << 32); |
236 | |
237 | sz64 = pci_size(l64, sz64, mask64); |
238 | |
239 | if (!sz64) |
240 | goto fail; |
241 | |
242 | if ((sizeof(resource_size_t) < 8) && (sz64 > 0x100000000ULL)) { |
243 | bar_too_big = true; |
244 | goto fail; |
245 | } |
246 | |
247 | if ((sizeof(resource_size_t) < 8) && l) { |
248 | /* Address above 32-bit boundary; disable the BAR */ |
249 | pci_write_config_dword(dev, pos, 0); |
250 | pci_write_config_dword(dev, pos + 4, 0); |
251 | region.start = 0; |
252 | region.end = sz64; |
253 | pcibios_bus_to_resource(dev, res, ®ion); |
254 | bar_disabled = true; |
255 | } else { |
256 | region.start = l64; |
257 | region.end = l64 + sz64; |
258 | pcibios_bus_to_resource(dev, res, ®ion); |
259 | } |
260 | } else { |
261 | sz = pci_size(l, sz, mask); |
262 | |
263 | if (!sz) |
264 | goto fail; |
265 | |
266 | region.start = l; |
267 | region.end = l + sz; |
268 | pcibios_bus_to_resource(dev, res, ®ion); |
269 | } |
270 | |
271 | goto out; |
272 | |
273 | |
274 | fail: |
275 | res->flags = 0; |
276 | out: |
277 | if (!dev->mmio_always_on) |
278 | pci_write_config_word(dev, PCI_COMMAND, orig_cmd); |
279 | |
280 | if (bar_too_big) |
281 | dev_err(&dev->dev, "reg %x: can't handle 64-bit BAR\n", pos); |
282 | if (res->flags && !bar_disabled) |
283 | dev_printk(KERN_DEBUG, &dev->dev, "reg %x: %pR\n", pos, res); |
284 | |
285 | return (res->flags & IORESOURCE_MEM_64) ? 1 : 0; |
286 | } |
287 | |
288 | static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom) |
289 | { |
290 | unsigned int pos, reg; |
291 | |
292 | for (pos = 0; pos < howmany; pos++) { |
293 | struct resource *res = &dev->resource[pos]; |
294 | reg = PCI_BASE_ADDRESS_0 + (pos << 2); |
295 | pos += __pci_read_base(dev, pci_bar_unknown, res, reg); |
296 | } |
297 | |
298 | if (rom) { |
299 | struct resource *res = &dev->resource[PCI_ROM_RESOURCE]; |
300 | dev->rom_base_reg = rom; |
301 | res->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH | |
302 | IORESOURCE_READONLY | IORESOURCE_CACHEABLE | |
303 | IORESOURCE_SIZEALIGN; |
304 | __pci_read_base(dev, pci_bar_mem32, res, rom); |
305 | } |
306 | } |
307 | |
308 | static void __devinit pci_read_bridge_io(struct pci_bus *child) |
309 | { |
310 | struct pci_dev *dev = child->self; |
311 | u8 io_base_lo, io_limit_lo; |
312 | unsigned long io_mask, io_granularity, base, limit; |
313 | struct pci_bus_region region; |
314 | struct resource *res; |
315 | |
316 | io_mask = PCI_IO_RANGE_MASK; |
317 | io_granularity = 0x1000; |
318 | if (dev->io_window_1k) { |
319 | /* Support 1K I/O space granularity */ |
320 | io_mask = PCI_IO_1K_RANGE_MASK; |
321 | io_granularity = 0x400; |
322 | } |
323 | |
324 | res = child->resource[0]; |
325 | pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo); |
326 | pci_read_config_byte(dev, PCI_IO_LIMIT, &io_limit_lo); |
327 | base = (io_base_lo & io_mask) << 8; |
328 | limit = (io_limit_lo & io_mask) << 8; |
329 | |
330 | if ((io_base_lo & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32) { |
331 | u16 io_base_hi, io_limit_hi; |
332 | |
333 | pci_read_config_word(dev, PCI_IO_BASE_UPPER16, &io_base_hi); |
334 | pci_read_config_word(dev, PCI_IO_LIMIT_UPPER16, &io_limit_hi); |
335 | base |= ((unsigned long) io_base_hi << 16); |
336 | limit |= ((unsigned long) io_limit_hi << 16); |
337 | } |
338 | |
339 | if (base <= limit) { |
340 | res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO; |
341 | region.start = base; |
342 | region.end = limit + io_granularity - 1; |
343 | pcibios_bus_to_resource(dev, res, ®ion); |
344 | dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res); |
345 | } |
346 | } |
347 | |
348 | static void __devinit pci_read_bridge_mmio(struct pci_bus *child) |
349 | { |
350 | struct pci_dev *dev = child->self; |
351 | u16 mem_base_lo, mem_limit_lo; |
352 | unsigned long base, limit; |
353 | struct pci_bus_region region; |
354 | struct resource *res; |
355 | |
356 | res = child->resource[1]; |
357 | pci_read_config_word(dev, PCI_MEMORY_BASE, &mem_base_lo); |
358 | pci_read_config_word(dev, PCI_MEMORY_LIMIT, &mem_limit_lo); |
359 | base = ((unsigned long) mem_base_lo & PCI_MEMORY_RANGE_MASK) << 16; |
360 | limit = ((unsigned long) mem_limit_lo & PCI_MEMORY_RANGE_MASK) << 16; |
361 | if (base <= limit) { |
362 | res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM; |
363 | region.start = base; |
364 | region.end = limit + 0xfffff; |
365 | pcibios_bus_to_resource(dev, res, ®ion); |
366 | dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res); |
367 | } |
368 | } |
369 | |
370 | static void __devinit pci_read_bridge_mmio_pref(struct pci_bus *child) |
371 | { |
372 | struct pci_dev *dev = child->self; |
373 | u16 mem_base_lo, mem_limit_lo; |
374 | unsigned long base, limit; |
375 | struct pci_bus_region region; |
376 | struct resource *res; |
377 | |
378 | res = child->resource[2]; |
379 | pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo); |
380 | pci_read_config_word(dev, PCI_PREF_MEMORY_LIMIT, &mem_limit_lo); |
381 | base = ((unsigned long) mem_base_lo & PCI_PREF_RANGE_MASK) << 16; |
382 | limit = ((unsigned long) mem_limit_lo & PCI_PREF_RANGE_MASK) << 16; |
383 | |
384 | if ((mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) { |
385 | u32 mem_base_hi, mem_limit_hi; |
386 | |
387 | pci_read_config_dword(dev, PCI_PREF_BASE_UPPER32, &mem_base_hi); |
388 | pci_read_config_dword(dev, PCI_PREF_LIMIT_UPPER32, &mem_limit_hi); |
389 | |
390 | /* |
391 | * Some bridges set the base > limit by default, and some |
392 | * (broken) BIOSes do not initialize them. If we find |
393 | * this, just assume they are not being used. |
394 | */ |
395 | if (mem_base_hi <= mem_limit_hi) { |
396 | #if BITS_PER_LONG == 64 |
397 | base |= ((unsigned long) mem_base_hi) << 32; |
398 | limit |= ((unsigned long) mem_limit_hi) << 32; |
399 | #else |
400 | if (mem_base_hi || mem_limit_hi) { |
401 | dev_err(&dev->dev, "can't handle 64-bit " |
402 | "address space for bridge\n"); |
403 | return; |
404 | } |
405 | #endif |
406 | } |
407 | } |
408 | if (base <= limit) { |
409 | res->flags = (mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) | |
410 | IORESOURCE_MEM | IORESOURCE_PREFETCH; |
411 | if (res->flags & PCI_PREF_RANGE_TYPE_64) |
412 | res->flags |= IORESOURCE_MEM_64; |
413 | region.start = base; |
414 | region.end = limit + 0xfffff; |
415 | pcibios_bus_to_resource(dev, res, ®ion); |
416 | dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res); |
417 | } |
418 | } |
419 | |
420 | void __devinit pci_read_bridge_bases(struct pci_bus *child) |
421 | { |
422 | struct pci_dev *dev = child->self; |
423 | struct resource *res; |
424 | int i; |
425 | |
426 | if (pci_is_root_bus(child)) /* It's a host bus, nothing to read */ |
427 | return; |
428 | |
429 | dev_info(&dev->dev, "PCI bridge to %pR%s\n", |
430 | &child->busn_res, |
431 | dev->transparent ? " (subtractive decode)" : ""); |
432 | |
433 | pci_bus_remove_resources(child); |
434 | for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) |
435 | child->resource[i] = &dev->resource[PCI_BRIDGE_RESOURCES+i]; |
436 | |
437 | pci_read_bridge_io(child); |
438 | pci_read_bridge_mmio(child); |
439 | pci_read_bridge_mmio_pref(child); |
440 | |
441 | if (dev->transparent) { |
442 | pci_bus_for_each_resource(child->parent, res, i) { |
443 | if (res) { |
444 | pci_bus_add_resource(child, res, |
445 | PCI_SUBTRACTIVE_DECODE); |
446 | dev_printk(KERN_DEBUG, &dev->dev, |
447 | " bridge window %pR (subtractive decode)\n", |
448 | res); |
449 | } |
450 | } |
451 | } |
452 | } |
453 | |
454 | static struct pci_bus * pci_alloc_bus(void) |
455 | { |
456 | struct pci_bus *b; |
457 | |
458 | b = kzalloc(sizeof(*b), GFP_KERNEL); |
459 | if (b) { |
460 | INIT_LIST_HEAD(&b->node); |
461 | INIT_LIST_HEAD(&b->children); |
462 | INIT_LIST_HEAD(&b->devices); |
463 | INIT_LIST_HEAD(&b->slots); |
464 | INIT_LIST_HEAD(&b->resources); |
465 | b->max_bus_speed = PCI_SPEED_UNKNOWN; |
466 | b->cur_bus_speed = PCI_SPEED_UNKNOWN; |
467 | } |
468 | return b; |
469 | } |
470 | |
471 | static struct pci_host_bridge *pci_alloc_host_bridge(struct pci_bus *b) |
472 | { |
473 | struct pci_host_bridge *bridge; |
474 | |
475 | bridge = kzalloc(sizeof(*bridge), GFP_KERNEL); |
476 | if (bridge) { |
477 | INIT_LIST_HEAD(&bridge->windows); |
478 | bridge->bus = b; |
479 | } |
480 | |
481 | return bridge; |
482 | } |
483 | |
484 | static unsigned char pcix_bus_speed[] = { |
485 | PCI_SPEED_UNKNOWN, /* 0 */ |
486 | PCI_SPEED_66MHz_PCIX, /* 1 */ |
487 | PCI_SPEED_100MHz_PCIX, /* 2 */ |
488 | PCI_SPEED_133MHz_PCIX, /* 3 */ |
489 | PCI_SPEED_UNKNOWN, /* 4 */ |
490 | PCI_SPEED_66MHz_PCIX_ECC, /* 5 */ |
491 | PCI_SPEED_100MHz_PCIX_ECC, /* 6 */ |
492 | PCI_SPEED_133MHz_PCIX_ECC, /* 7 */ |
493 | PCI_SPEED_UNKNOWN, /* 8 */ |
494 | PCI_SPEED_66MHz_PCIX_266, /* 9 */ |
495 | PCI_SPEED_100MHz_PCIX_266, /* A */ |
496 | PCI_SPEED_133MHz_PCIX_266, /* B */ |
497 | PCI_SPEED_UNKNOWN, /* C */ |
498 | PCI_SPEED_66MHz_PCIX_533, /* D */ |
499 | PCI_SPEED_100MHz_PCIX_533, /* E */ |
500 | PCI_SPEED_133MHz_PCIX_533 /* F */ |
501 | }; |
502 | |
503 | static unsigned char pcie_link_speed[] = { |
504 | PCI_SPEED_UNKNOWN, /* 0 */ |
505 | PCIE_SPEED_2_5GT, /* 1 */ |
506 | PCIE_SPEED_5_0GT, /* 2 */ |
507 | PCIE_SPEED_8_0GT, /* 3 */ |
508 | PCI_SPEED_UNKNOWN, /* 4 */ |
509 | PCI_SPEED_UNKNOWN, /* 5 */ |
510 | PCI_SPEED_UNKNOWN, /* 6 */ |
511 | PCI_SPEED_UNKNOWN, /* 7 */ |
512 | PCI_SPEED_UNKNOWN, /* 8 */ |
513 | PCI_SPEED_UNKNOWN, /* 9 */ |
514 | PCI_SPEED_UNKNOWN, /* A */ |
515 | PCI_SPEED_UNKNOWN, /* B */ |
516 | PCI_SPEED_UNKNOWN, /* C */ |
517 | PCI_SPEED_UNKNOWN, /* D */ |
518 | PCI_SPEED_UNKNOWN, /* E */ |
519 | PCI_SPEED_UNKNOWN /* F */ |
520 | }; |
521 | |
522 | void pcie_update_link_speed(struct pci_bus *bus, u16 linksta) |
523 | { |
524 | bus->cur_bus_speed = pcie_link_speed[linksta & 0xf]; |
525 | } |
526 | EXPORT_SYMBOL_GPL(pcie_update_link_speed); |
527 | |
528 | static unsigned char agp_speeds[] = { |
529 | AGP_UNKNOWN, |
530 | AGP_1X, |
531 | AGP_2X, |
532 | AGP_4X, |
533 | AGP_8X |
534 | }; |
535 | |
536 | static enum pci_bus_speed agp_speed(int agp3, int agpstat) |
537 | { |
538 | int index = 0; |
539 | |
540 | if (agpstat & 4) |
541 | index = 3; |
542 | else if (agpstat & 2) |
543 | index = 2; |
544 | else if (agpstat & 1) |
545 | index = 1; |
546 | else |
547 | goto out; |
548 | |
549 | if (agp3) { |
550 | index += 2; |
551 | if (index == 5) |
552 | index = 0; |
553 | } |
554 | |
555 | out: |
556 | return agp_speeds[index]; |
557 | } |
558 | |
559 | |
560 | static void pci_set_bus_speed(struct pci_bus *bus) |
561 | { |
562 | struct pci_dev *bridge = bus->self; |
563 | int pos; |
564 | |
565 | pos = pci_find_capability(bridge, PCI_CAP_ID_AGP); |
566 | if (!pos) |
567 | pos = pci_find_capability(bridge, PCI_CAP_ID_AGP3); |
568 | if (pos) { |
569 | u32 agpstat, agpcmd; |
570 | |
571 | pci_read_config_dword(bridge, pos + PCI_AGP_STATUS, &agpstat); |
572 | bus->max_bus_speed = agp_speed(agpstat & 8, agpstat & 7); |
573 | |
574 | pci_read_config_dword(bridge, pos + PCI_AGP_COMMAND, &agpcmd); |
575 | bus->cur_bus_speed = agp_speed(agpstat & 8, agpcmd & 7); |
576 | } |
577 | |
578 | pos = pci_find_capability(bridge, PCI_CAP_ID_PCIX); |
579 | if (pos) { |
580 | u16 status; |
581 | enum pci_bus_speed max; |
582 | pci_read_config_word(bridge, pos + 2, &status); |
583 | |
584 | if (status & 0x8000) { |
585 | max = PCI_SPEED_133MHz_PCIX_533; |
586 | } else if (status & 0x4000) { |
587 | max = PCI_SPEED_133MHz_PCIX_266; |
588 | } else if (status & 0x0002) { |
589 | if (((status >> 12) & 0x3) == 2) { |
590 | max = PCI_SPEED_133MHz_PCIX_ECC; |
591 | } else { |
592 | max = PCI_SPEED_133MHz_PCIX; |
593 | } |
594 | } else { |
595 | max = PCI_SPEED_66MHz_PCIX; |
596 | } |
597 | |
598 | bus->max_bus_speed = max; |
599 | bus->cur_bus_speed = pcix_bus_speed[(status >> 6) & 0xf]; |
600 | |
601 | return; |
602 | } |
603 | |
604 | pos = pci_find_capability(bridge, PCI_CAP_ID_EXP); |
605 | if (pos) { |
606 | u32 linkcap; |
607 | u16 linksta; |
608 | |
609 | pci_read_config_dword(bridge, pos + PCI_EXP_LNKCAP, &linkcap); |
610 | bus->max_bus_speed = pcie_link_speed[linkcap & 0xf]; |
611 | |
612 | pci_read_config_word(bridge, pos + PCI_EXP_LNKSTA, &linksta); |
613 | pcie_update_link_speed(bus, linksta); |
614 | } |
615 | } |
616 | |
617 | |
618 | static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent, |
619 | struct pci_dev *bridge, int busnr) |
620 | { |
621 | struct pci_bus *child; |
622 | int i; |
623 | |
624 | /* |
625 | * Allocate a new bus, and inherit stuff from the parent.. |
626 | */ |
627 | child = pci_alloc_bus(); |
628 | if (!child) |
629 | return NULL; |
630 | |
631 | child->parent = parent; |
632 | child->ops = parent->ops; |
633 | child->sysdata = parent->sysdata; |
634 | child->bus_flags = parent->bus_flags; |
635 | |
636 | /* initialize some portions of the bus device, but don't register it |
637 | * now as the parent is not properly set up yet. This device will get |
638 | * registered later in pci_bus_add_devices() |
639 | */ |
640 | child->dev.class = &pcibus_class; |
641 | dev_set_name(&child->dev, "%04x:%02x", pci_domain_nr(child), busnr); |
642 | |
643 | /* |
644 | * Set up the primary, secondary and subordinate |
645 | * bus numbers. |
646 | */ |
647 | child->number = child->busn_res.start = busnr; |
648 | child->primary = parent->busn_res.start; |
649 | child->busn_res.end = 0xff; |
650 | |
651 | if (!bridge) |
652 | return child; |
653 | |
654 | child->self = bridge; |
655 | child->bridge = get_device(&bridge->dev); |
656 | pci_set_bus_of_node(child); |
657 | pci_set_bus_speed(child); |
658 | |
659 | /* Set up default resource pointers and names.. */ |
660 | for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) { |
661 | child->resource[i] = &bridge->resource[PCI_BRIDGE_RESOURCES+i]; |
662 | child->resource[i]->name = child->name; |
663 | } |
664 | bridge->subordinate = child; |
665 | |
666 | return child; |
667 | } |
668 | |
669 | struct pci_bus *__ref pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev, int busnr) |
670 | { |
671 | struct pci_bus *child; |
672 | |
673 | child = pci_alloc_child_bus(parent, dev, busnr); |
674 | if (child) { |
675 | down_write(&pci_bus_sem); |
676 | list_add_tail(&child->node, &parent->children); |
677 | up_write(&pci_bus_sem); |
678 | } |
679 | return child; |
680 | } |
681 | |
682 | static void pci_fixup_parent_subordinate_busnr(struct pci_bus *child, int max) |
683 | { |
684 | struct pci_bus *parent = child->parent; |
685 | |
686 | /* Attempts to fix that up are really dangerous unless |
687 | we're going to re-assign all bus numbers. */ |
688 | if (!pcibios_assign_all_busses()) |
689 | return; |
690 | |
691 | while (parent->parent && parent->busn_res.end < max) { |
692 | parent->busn_res.end = max; |
693 | pci_write_config_byte(parent->self, PCI_SUBORDINATE_BUS, max); |
694 | parent = parent->parent; |
695 | } |
696 | } |
697 | |
698 | /* |
699 | * If it's a bridge, configure it and scan the bus behind it. |
700 | * For CardBus bridges, we don't scan behind as the devices will |
701 | * be handled by the bridge driver itself. |
702 | * |
703 | * We need to process bridges in two passes -- first we scan those |
704 | * already configured by the BIOS and after we are done with all of |
705 | * them, we proceed to assigning numbers to the remaining buses in |
706 | * order to avoid overlaps between old and new bus numbers. |
707 | */ |
708 | int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass) |
709 | { |
710 | struct pci_bus *child; |
711 | int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS); |
712 | u32 buses, i, j = 0; |
713 | u16 bctl; |
714 | u8 primary, secondary, subordinate; |
715 | int broken = 0; |
716 | |
717 | pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses); |
718 | primary = buses & 0xFF; |
719 | secondary = (buses >> 8) & 0xFF; |
720 | subordinate = (buses >> 16) & 0xFF; |
721 | |
722 | dev_dbg(&dev->dev, "scanning [bus %02x-%02x] behind bridge, pass %d\n", |
723 | secondary, subordinate, pass); |
724 | |
725 | if (!primary && (primary != bus->number) && secondary && subordinate) { |
726 | dev_warn(&dev->dev, "Primary bus is hard wired to 0\n"); |
727 | primary = bus->number; |
728 | } |
729 | |
730 | /* Check if setup is sensible at all */ |
731 | if (!pass && |
732 | (primary != bus->number || secondary <= bus->number)) { |
733 | dev_dbg(&dev->dev, "bus configuration invalid, reconfiguring\n"); |
734 | broken = 1; |
735 | } |
736 | |
737 | /* Disable MasterAbortMode during probing to avoid reporting |
738 | of bus errors (in some architectures) */ |
739 | pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bctl); |
740 | pci_write_config_word(dev, PCI_BRIDGE_CONTROL, |
741 | bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT); |
742 | |
743 | if ((secondary || subordinate) && !pcibios_assign_all_busses() && |
744 | !is_cardbus && !broken) { |
745 | unsigned int cmax; |
746 | /* |
747 | * Bus already configured by firmware, process it in the first |
748 | * pass and just note the configuration. |
749 | */ |
750 | if (pass) |
751 | goto out; |
752 | |
753 | /* |
754 | * If we already got to this bus through a different bridge, |
755 | * don't re-add it. This can happen with the i450NX chipset. |
756 | * |
757 | * However, we continue to descend down the hierarchy and |
758 | * scan remaining child buses. |
759 | */ |
760 | child = pci_find_bus(pci_domain_nr(bus), secondary); |
761 | if (!child) { |
762 | child = pci_add_new_bus(bus, dev, secondary); |
763 | if (!child) |
764 | goto out; |
765 | child->primary = primary; |
766 | pci_bus_insert_busn_res(child, secondary, subordinate); |
767 | child->bridge_ctl = bctl; |
768 | } |
769 | |
770 | cmax = pci_scan_child_bus(child); |
771 | if (cmax > max) |
772 | max = cmax; |
773 | if (child->busn_res.end > max) |
774 | max = child->busn_res.end; |
775 | } else { |
776 | /* |
777 | * We need to assign a number to this bus which we always |
778 | * do in the second pass. |
779 | */ |
780 | if (!pass) { |
781 | if (pcibios_assign_all_busses() || broken) |
782 | /* Temporarily disable forwarding of the |
783 | configuration cycles on all bridges in |
784 | this bus segment to avoid possible |
785 | conflicts in the second pass between two |
786 | bridges programmed with overlapping |
787 | bus ranges. */ |
788 | pci_write_config_dword(dev, PCI_PRIMARY_BUS, |
789 | buses & ~0xffffff); |
790 | goto out; |
791 | } |
792 | |
793 | /* Clear errors */ |
794 | pci_write_config_word(dev, PCI_STATUS, 0xffff); |
795 | |
796 | /* Prevent assigning a bus number that already exists. |
797 | * This can happen when a bridge is hot-plugged, so in |
798 | * this case we only re-scan this bus. */ |
799 | child = pci_find_bus(pci_domain_nr(bus), max+1); |
800 | if (!child) { |
801 | child = pci_add_new_bus(bus, dev, ++max); |
802 | if (!child) |
803 | goto out; |
804 | pci_bus_insert_busn_res(child, max, 0xff); |
805 | } |
806 | buses = (buses & 0xff000000) |
807 | | ((unsigned int)(child->primary) << 0) |
808 | | ((unsigned int)(child->busn_res.start) << 8) |
809 | | ((unsigned int)(child->busn_res.end) << 16); |
810 | |
811 | /* |
812 | * yenta.c forces a secondary latency timer of 176. |
813 | * Copy that behaviour here. |
814 | */ |
815 | if (is_cardbus) { |
816 | buses &= ~0xff000000; |
817 | buses |= CARDBUS_LATENCY_TIMER << 24; |
818 | } |
819 | |
820 | /* |
821 | * We need to blast all three values with a single write. |
822 | */ |
823 | pci_write_config_dword(dev, PCI_PRIMARY_BUS, buses); |
824 | |
825 | if (!is_cardbus) { |
826 | child->bridge_ctl = bctl; |
827 | /* |
828 | * Adjust subordinate busnr in parent buses. |
829 | * We do this before scanning for children because |
830 | * some devices may not be detected if the bios |
831 | * was lazy. |
832 | */ |
833 | pci_fixup_parent_subordinate_busnr(child, max); |
834 | /* Now we can scan all subordinate buses... */ |
835 | max = pci_scan_child_bus(child); |
836 | /* |
837 | * now fix it up again since we have found |
838 | * the real value of max. |
839 | */ |
840 | pci_fixup_parent_subordinate_busnr(child, max); |
841 | } else { |
842 | /* |
843 | * For CardBus bridges, we leave 4 bus numbers |
844 | * as cards with a PCI-to-PCI bridge can be |
845 | * inserted later. |
846 | */ |
847 | for (i=0; i<CARDBUS_RESERVE_BUSNR; i++) { |
848 | struct pci_bus *parent = bus; |
849 | if (pci_find_bus(pci_domain_nr(bus), |
850 | max+i+1)) |
851 | break; |
852 | while (parent->parent) { |
853 | if ((!pcibios_assign_all_busses()) && |
854 | (parent->busn_res.end > max) && |
855 | (parent->busn_res.end <= max+i)) { |
856 | j = 1; |
857 | } |
858 | parent = parent->parent; |
859 | } |
860 | if (j) { |
861 | /* |
862 | * Often, there are two cardbus bridges |
863 | * -- try to leave one valid bus number |
864 | * for each one. |
865 | */ |
866 | i /= 2; |
867 | break; |
868 | } |
869 | } |
870 | max += i; |
871 | pci_fixup_parent_subordinate_busnr(child, max); |
872 | } |
873 | /* |
874 | * Set the subordinate bus number to its real value. |
875 | */ |
876 | pci_bus_update_busn_res_end(child, max); |
877 | pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, max); |
878 | } |
879 | |
880 | sprintf(child->name, |
881 | (is_cardbus ? "PCI CardBus %04x:%02x" : "PCI Bus %04x:%02x"), |
882 | pci_domain_nr(bus), child->number); |
883 | |
884 | /* Has only triggered on CardBus, fixup is in yenta_socket */ |
885 | while (bus->parent) { |
886 | if ((child->busn_res.end > bus->busn_res.end) || |
887 | (child->number > bus->busn_res.end) || |
888 | (child->number < bus->number) || |
889 | (child->busn_res.end < bus->number)) { |
890 | dev_info(&child->dev, "%pR %s " |
891 | "hidden behind%s bridge %s %pR\n", |
892 | &child->busn_res, |
893 | (bus->number > child->busn_res.end && |
894 | bus->busn_res.end < child->number) ? |
895 | "wholly" : "partially", |
896 | bus->self->transparent ? " transparent" : "", |
897 | dev_name(&bus->dev), |
898 | &bus->busn_res); |
899 | } |
900 | bus = bus->parent; |
901 | } |
902 | |
903 | out: |
904 | pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bctl); |
905 | |
906 | return max; |
907 | } |
908 | |
909 | /* |
910 | * Read interrupt line and base address registers. |
911 | * The architecture-dependent code can tweak these, of course. |
912 | */ |
913 | static void pci_read_irq(struct pci_dev *dev) |
914 | { |
915 | unsigned char irq; |
916 | |
917 | pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &irq); |
918 | dev->pin = irq; |
919 | if (irq) |
920 | pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq); |
921 | dev->irq = irq; |
922 | } |
923 | |
924 | void set_pcie_port_type(struct pci_dev *pdev) |
925 | { |
926 | int pos; |
927 | u16 reg16; |
928 | |
929 | pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); |
930 | if (!pos) |
931 | return; |
932 | pdev->is_pcie = 1; |
933 | pdev->pcie_cap = pos; |
934 | pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, ®16); |
935 | pdev->pcie_type = (reg16 & PCI_EXP_FLAGS_TYPE) >> 4; |
936 | pci_read_config_word(pdev, pos + PCI_EXP_DEVCAP, ®16); |
937 | pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD; |
938 | } |
939 | |
940 | void set_pcie_hotplug_bridge(struct pci_dev *pdev) |
941 | { |
942 | int pos; |
943 | u16 reg16; |
944 | u32 reg32; |
945 | |
946 | pos = pci_pcie_cap(pdev); |
947 | if (!pos) |
948 | return; |
949 | pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, ®16); |
950 | if (!(reg16 & PCI_EXP_FLAGS_SLOT)) |
951 | return; |
952 | pci_read_config_dword(pdev, pos + PCI_EXP_SLTCAP, ®32); |
953 | if (reg32 & PCI_EXP_SLTCAP_HPC) |
954 | pdev->is_hotplug_bridge = 1; |
955 | } |
956 | |
957 | #define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED) |
958 | |
959 | /** |
960 | * pci_setup_device - fill in class and map information of a device |
961 | * @dev: the device structure to fill |
962 | * |
963 | * Initialize the device structure with information about the device's |
964 | * vendor,class,memory and IO-space addresses,IRQ lines etc. |
965 | * Called at initialisation of the PCI subsystem and by CardBus services. |
966 | * Returns 0 on success and negative if unknown type of device (not normal, |
967 | * bridge or CardBus). |
968 | */ |
969 | int pci_setup_device(struct pci_dev *dev) |
970 | { |
971 | u32 class; |
972 | u8 hdr_type; |
973 | struct pci_slot *slot; |
974 | int pos = 0; |
975 | struct pci_bus_region region; |
976 | struct resource *res; |
977 | |
978 | if (pci_read_config_byte(dev, PCI_HEADER_TYPE, &hdr_type)) |
979 | return -EIO; |
980 | |
981 | dev->sysdata = dev->bus->sysdata; |
982 | dev->dev.parent = dev->bus->bridge; |
983 | dev->dev.bus = &pci_bus_type; |
984 | dev->hdr_type = hdr_type & 0x7f; |
985 | dev->multifunction = !!(hdr_type & 0x80); |
986 | dev->error_state = pci_channel_io_normal; |
987 | set_pcie_port_type(dev); |
988 | |
989 | list_for_each_entry(slot, &dev->bus->slots, list) |
990 | if (PCI_SLOT(dev->devfn) == slot->number) |
991 | dev->slot = slot; |
992 | |
993 | /* Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer) |
994 | set this higher, assuming the system even supports it. */ |
995 | dev->dma_mask = 0xffffffff; |
996 | |
997 | dev_set_name(&dev->dev, "%04x:%02x:%02x.%d", pci_domain_nr(dev->bus), |
998 | dev->bus->number, PCI_SLOT(dev->devfn), |
999 | PCI_FUNC(dev->devfn)); |
1000 | |
1001 | pci_read_config_dword(dev, PCI_CLASS_REVISION, &class); |
1002 | dev->revision = class & 0xff; |
1003 | dev->class = class >> 8; /* upper 3 bytes */ |
1004 | |
1005 | dev_printk(KERN_DEBUG, &dev->dev, "[%04x:%04x] type %02x class %#08x\n", |
1006 | dev->vendor, dev->device, dev->hdr_type, dev->class); |
1007 | |
1008 | /* need to have dev->class ready */ |
1009 | dev->cfg_size = pci_cfg_space_size(dev); |
1010 | |
1011 | /* "Unknown power state" */ |
1012 | dev->current_state = PCI_UNKNOWN; |
1013 | |
1014 | /* Early fixups, before probing the BARs */ |
1015 | pci_fixup_device(pci_fixup_early, dev); |
1016 | /* device class may be changed after fixup */ |
1017 | class = dev->class >> 8; |
1018 | |
1019 | switch (dev->hdr_type) { /* header type */ |
1020 | case PCI_HEADER_TYPE_NORMAL: /* standard header */ |
1021 | if (class == PCI_CLASS_BRIDGE_PCI) |
1022 | goto bad; |
1023 | pci_read_irq(dev); |
1024 | pci_read_bases(dev, 6, PCI_ROM_ADDRESS); |
1025 | pci_read_config_word(dev, PCI_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor); |
1026 | pci_read_config_word(dev, PCI_SUBSYSTEM_ID, &dev->subsystem_device); |
1027 | |
1028 | /* |
1029 | * Do the ugly legacy mode stuff here rather than broken chip |
1030 | * quirk code. Legacy mode ATA controllers have fixed |
1031 | * addresses. These are not always echoed in BAR0-3, and |
1032 | * BAR0-3 in a few cases contain junk! |
1033 | */ |
1034 | if (class == PCI_CLASS_STORAGE_IDE) { |
1035 | u8 progif; |
1036 | pci_read_config_byte(dev, PCI_CLASS_PROG, &progif); |
1037 | if ((progif & 1) == 0) { |
1038 | region.start = 0x1F0; |
1039 | region.end = 0x1F7; |
1040 | res = &dev->resource[0]; |
1041 | res->flags = LEGACY_IO_RESOURCE; |
1042 | pcibios_bus_to_resource(dev, res, ®ion); |
1043 | region.start = 0x3F6; |
1044 | region.end = 0x3F6; |
1045 | res = &dev->resource[1]; |
1046 | res->flags = LEGACY_IO_RESOURCE; |
1047 | pcibios_bus_to_resource(dev, res, ®ion); |
1048 | } |
1049 | if ((progif & 4) == 0) { |
1050 | region.start = 0x170; |
1051 | region.end = 0x177; |
1052 | res = &dev->resource[2]; |
1053 | res->flags = LEGACY_IO_RESOURCE; |
1054 | pcibios_bus_to_resource(dev, res, ®ion); |
1055 | region.start = 0x376; |
1056 | region.end = 0x376; |
1057 | res = &dev->resource[3]; |
1058 | res->flags = LEGACY_IO_RESOURCE; |
1059 | pcibios_bus_to_resource(dev, res, ®ion); |
1060 | } |
1061 | } |
1062 | break; |
1063 | |
1064 | case PCI_HEADER_TYPE_BRIDGE: /* bridge header */ |
1065 | if (class != PCI_CLASS_BRIDGE_PCI) |
1066 | goto bad; |
1067 | /* The PCI-to-PCI bridge spec requires that subtractive |
1068 | decoding (i.e. transparent) bridge must have programming |
1069 | interface code of 0x01. */ |
1070 | pci_read_irq(dev); |
1071 | dev->transparent = ((dev->class & 0xff) == 1); |
1072 | pci_read_bases(dev, 2, PCI_ROM_ADDRESS1); |
1073 | set_pcie_hotplug_bridge(dev); |
1074 | pos = pci_find_capability(dev, PCI_CAP_ID_SSVID); |
1075 | if (pos) { |
1076 | pci_read_config_word(dev, pos + PCI_SSVID_VENDOR_ID, &dev->subsystem_vendor); |
1077 | pci_read_config_word(dev, pos + PCI_SSVID_DEVICE_ID, &dev->subsystem_device); |
1078 | } |
1079 | break; |
1080 | |
1081 | case PCI_HEADER_TYPE_CARDBUS: /* CardBus bridge header */ |
1082 | if (class != PCI_CLASS_BRIDGE_CARDBUS) |
1083 | goto bad; |
1084 | pci_read_irq(dev); |
1085 | pci_read_bases(dev, 1, 0); |
1086 | pci_read_config_word(dev, PCI_CB_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor); |
1087 | pci_read_config_word(dev, PCI_CB_SUBSYSTEM_ID, &dev->subsystem_device); |
1088 | break; |
1089 | |
1090 | default: /* unknown header */ |
1091 | dev_err(&dev->dev, "unknown header type %02x, " |
1092 | "ignoring device\n", dev->hdr_type); |
1093 | return -EIO; |
1094 | |
1095 | bad: |
1096 | dev_err(&dev->dev, "ignoring class %#08x (doesn't match header " |
1097 | "type %02x)\n", dev->class, dev->hdr_type); |
1098 | dev->class = PCI_CLASS_NOT_DEFINED; |
1099 | } |
1100 | |
1101 | /* We found a fine healthy device, go go go... */ |
1102 | return 0; |
1103 | } |
1104 | |
1105 | static void pci_release_capabilities(struct pci_dev *dev) |
1106 | { |
1107 | pci_vpd_release(dev); |
1108 | pci_iov_release(dev); |
1109 | pci_free_cap_save_buffers(dev); |
1110 | } |
1111 | |
1112 | /** |
1113 | * pci_release_dev - free a pci device structure when all users of it are finished. |
1114 | * @dev: device that's been disconnected |
1115 | * |
1116 | * Will be called only by the device core when all users of this pci device are |
1117 | * done. |
1118 | */ |
1119 | static void pci_release_dev(struct device *dev) |
1120 | { |
1121 | struct pci_dev *pci_dev; |
1122 | |
1123 | pci_dev = to_pci_dev(dev); |
1124 | pci_release_capabilities(pci_dev); |
1125 | pci_release_of_node(pci_dev); |
1126 | kfree(pci_dev); |
1127 | } |
1128 | |
1129 | /** |
1130 | * pci_cfg_space_size - get the configuration space size of the PCI device. |
1131 | * @dev: PCI device |
1132 | * |
1133 | * Regular PCI devices have 256 bytes, but PCI-X 2 and PCI Express devices |
1134 | * have 4096 bytes. Even if the device is capable, that doesn't mean we can |
1135 | * access it. Maybe we don't have a way to generate extended config space |
1136 | * accesses, or the device is behind a reverse Express bridge. So we try |
1137 | * reading the dword at 0x100 which must either be 0 or a valid extended |
1138 | * capability header. |
1139 | */ |
1140 | int pci_cfg_space_size_ext(struct pci_dev *dev) |
1141 | { |
1142 | u32 status; |
1143 | int pos = PCI_CFG_SPACE_SIZE; |
1144 | |
1145 | if (pci_read_config_dword(dev, pos, &status) != PCIBIOS_SUCCESSFUL) |
1146 | goto fail; |
1147 | if (status == 0xffffffff) |
1148 | goto fail; |
1149 | |
1150 | return PCI_CFG_SPACE_EXP_SIZE; |
1151 | |
1152 | fail: |
1153 | return PCI_CFG_SPACE_SIZE; |
1154 | } |
1155 | |
1156 | int pci_cfg_space_size(struct pci_dev *dev) |
1157 | { |
1158 | int pos; |
1159 | u32 status; |
1160 | u16 class; |
1161 | |
1162 | class = dev->class >> 8; |
1163 | if (class == PCI_CLASS_BRIDGE_HOST) |
1164 | return pci_cfg_space_size_ext(dev); |
1165 | |
1166 | pos = pci_pcie_cap(dev); |
1167 | if (!pos) { |
1168 | pos = pci_find_capability(dev, PCI_CAP_ID_PCIX); |
1169 | if (!pos) |
1170 | goto fail; |
1171 | |
1172 | pci_read_config_dword(dev, pos + PCI_X_STATUS, &status); |
1173 | if (!(status & (PCI_X_STATUS_266MHZ | PCI_X_STATUS_533MHZ))) |
1174 | goto fail; |
1175 | } |
1176 | |
1177 | return pci_cfg_space_size_ext(dev); |
1178 | |
1179 | fail: |
1180 | return PCI_CFG_SPACE_SIZE; |
1181 | } |
1182 | |
1183 | static void pci_release_bus_bridge_dev(struct device *dev) |
1184 | { |
1185 | struct pci_host_bridge *bridge = to_pci_host_bridge(dev); |
1186 | |
1187 | if (bridge->release_fn) |
1188 | bridge->release_fn(bridge); |
1189 | |
1190 | pci_free_resource_list(&bridge->windows); |
1191 | |
1192 | kfree(bridge); |
1193 | } |
1194 | |
1195 | struct pci_dev *alloc_pci_dev(void) |
1196 | { |
1197 | struct pci_dev *dev; |
1198 | |
1199 | dev = kzalloc(sizeof(struct pci_dev), GFP_KERNEL); |
1200 | if (!dev) |
1201 | return NULL; |
1202 | |
1203 | INIT_LIST_HEAD(&dev->bus_list); |
1204 | |
1205 | return dev; |
1206 | } |
1207 | EXPORT_SYMBOL(alloc_pci_dev); |
1208 | |
1209 | bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l, |
1210 | int crs_timeout) |
1211 | { |
1212 | int delay = 1; |
1213 | |
1214 | if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l)) |
1215 | return false; |
1216 | |
1217 | /* some broken boards return 0 or ~0 if a slot is empty: */ |
1218 | if (*l == 0xffffffff || *l == 0x00000000 || |
1219 | *l == 0x0000ffff || *l == 0xffff0000) |
1220 | return false; |
1221 | |
1222 | /* Configuration request Retry Status */ |
1223 | while (*l == 0xffff0001) { |
1224 | if (!crs_timeout) |
1225 | return false; |
1226 | |
1227 | msleep(delay); |
1228 | delay *= 2; |
1229 | if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l)) |
1230 | return false; |
1231 | /* Card hasn't responded in 60 seconds? Must be stuck. */ |
1232 | if (delay > crs_timeout) { |
1233 | printk(KERN_WARNING "pci %04x:%02x:%02x.%d: not " |
1234 | "responding\n", pci_domain_nr(bus), |
1235 | bus->number, PCI_SLOT(devfn), |
1236 | PCI_FUNC(devfn)); |
1237 | return false; |
1238 | } |
1239 | } |
1240 | |
1241 | return true; |
1242 | } |
1243 | EXPORT_SYMBOL(pci_bus_read_dev_vendor_id); |
1244 | |
1245 | /* |
1246 | * Read the config data for a PCI device, sanity-check it |
1247 | * and fill in the dev structure... |
1248 | */ |
1249 | static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn) |
1250 | { |
1251 | struct pci_dev *dev; |
1252 | u32 l; |
1253 | |
1254 | if (!pci_bus_read_dev_vendor_id(bus, devfn, &l, 60*1000)) |
1255 | return NULL; |
1256 | |
1257 | dev = alloc_pci_dev(); |
1258 | if (!dev) |
1259 | return NULL; |
1260 | |
1261 | dev->bus = bus; |
1262 | dev->devfn = devfn; |
1263 | dev->vendor = l & 0xffff; |
1264 | dev->device = (l >> 16) & 0xffff; |
1265 | |
1266 | pci_set_of_node(dev); |
1267 | |
1268 | if (pci_setup_device(dev)) { |
1269 | kfree(dev); |
1270 | return NULL; |
1271 | } |
1272 | |
1273 | return dev; |
1274 | } |
1275 | |
1276 | static void pci_init_capabilities(struct pci_dev *dev) |
1277 | { |
1278 | /* MSI/MSI-X list */ |
1279 | pci_msi_init_pci_dev(dev); |
1280 | |
1281 | /* Buffers for saving PCIe and PCI-X capabilities */ |
1282 | pci_allocate_cap_save_buffers(dev); |
1283 | |
1284 | /* Power Management */ |
1285 | pci_pm_init(dev); |
1286 | platform_pci_wakeup_init(dev); |
1287 | |
1288 | /* Vital Product Data */ |
1289 | pci_vpd_pci22_init(dev); |
1290 | |
1291 | /* Alternative Routing-ID Forwarding */ |
1292 | pci_enable_ari(dev); |
1293 | |
1294 | /* Single Root I/O Virtualization */ |
1295 | pci_iov_init(dev); |
1296 | |
1297 | /* Enable ACS P2P upstream forwarding */ |
1298 | pci_enable_acs(dev); |
1299 | } |
1300 | |
1301 | void pci_device_add(struct pci_dev *dev, struct pci_bus *bus) |
1302 | { |
1303 | device_initialize(&dev->dev); |
1304 | dev->dev.release = pci_release_dev; |
1305 | pci_dev_get(dev); |
1306 | |
1307 | dev->dev.dma_mask = &dev->dma_mask; |
1308 | dev->dev.dma_parms = &dev->dma_parms; |
1309 | dev->dev.coherent_dma_mask = 0xffffffffull; |
1310 | |
1311 | pci_set_dma_max_seg_size(dev, 65536); |
1312 | pci_set_dma_seg_boundary(dev, 0xffffffff); |
1313 | |
1314 | /* Fix up broken headers */ |
1315 | pci_fixup_device(pci_fixup_header, dev); |
1316 | |
1317 | /* moved out from quirk header fixup code */ |
1318 | pci_reassigndev_resource_alignment(dev); |
1319 | |
1320 | /* Clear the state_saved flag. */ |
1321 | dev->state_saved = false; |
1322 | |
1323 | /* Initialize various capabilities */ |
1324 | pci_init_capabilities(dev); |
1325 | |
1326 | /* |
1327 | * Add the device to our list of discovered devices |
1328 | * and the bus list for fixup functions, etc. |
1329 | */ |
1330 | down_write(&pci_bus_sem); |
1331 | list_add_tail(&dev->bus_list, &bus->devices); |
1332 | up_write(&pci_bus_sem); |
1333 | } |
1334 | |
1335 | struct pci_dev *__ref pci_scan_single_device(struct pci_bus *bus, int devfn) |
1336 | { |
1337 | struct pci_dev *dev; |
1338 | |
1339 | dev = pci_get_slot(bus, devfn); |
1340 | if (dev) { |
1341 | pci_dev_put(dev); |
1342 | return dev; |
1343 | } |
1344 | |
1345 | dev = pci_scan_device(bus, devfn); |
1346 | if (!dev) |
1347 | return NULL; |
1348 | |
1349 | pci_device_add(dev, bus); |
1350 | |
1351 | return dev; |
1352 | } |
1353 | EXPORT_SYMBOL(pci_scan_single_device); |
1354 | |
1355 | static unsigned next_ari_fn(struct pci_dev *dev, unsigned fn) |
1356 | { |
1357 | u16 cap; |
1358 | unsigned pos, next_fn; |
1359 | |
1360 | if (!dev) |
1361 | return 0; |
1362 | |
1363 | pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI); |
1364 | if (!pos) |
1365 | return 0; |
1366 | pci_read_config_word(dev, pos + 4, &cap); |
1367 | next_fn = cap >> 8; |
1368 | if (next_fn <= fn) |
1369 | return 0; |
1370 | return next_fn; |
1371 | } |
1372 | |
1373 | static unsigned next_trad_fn(struct pci_dev *dev, unsigned fn) |
1374 | { |
1375 | return (fn + 1) % 8; |
1376 | } |
1377 | |
1378 | static unsigned no_next_fn(struct pci_dev *dev, unsigned fn) |
1379 | { |
1380 | return 0; |
1381 | } |
1382 | |
1383 | static int only_one_child(struct pci_bus *bus) |
1384 | { |
1385 | struct pci_dev *parent = bus->self; |
1386 | |
1387 | if (!parent || !pci_is_pcie(parent)) |
1388 | return 0; |
1389 | if (parent->pcie_type == PCI_EXP_TYPE_ROOT_PORT) |
1390 | return 1; |
1391 | if (parent->pcie_type == PCI_EXP_TYPE_DOWNSTREAM && |
1392 | !pci_has_flag(PCI_SCAN_ALL_PCIE_DEVS)) |
1393 | return 1; |
1394 | return 0; |
1395 | } |
1396 | |
1397 | /** |
1398 | * pci_scan_slot - scan a PCI slot on a bus for devices. |
1399 | * @bus: PCI bus to scan |
1400 | * @devfn: slot number to scan (must have zero function.) |
1401 | * |
1402 | * Scan a PCI slot on the specified PCI bus for devices, adding |
1403 | * discovered devices to the @bus->devices list. New devices |
1404 | * will not have is_added set. |
1405 | * |
1406 | * Returns the number of new devices found. |
1407 | */ |
1408 | int pci_scan_slot(struct pci_bus *bus, int devfn) |
1409 | { |
1410 | unsigned fn, nr = 0; |
1411 | struct pci_dev *dev; |
1412 | unsigned (*next_fn)(struct pci_dev *, unsigned) = no_next_fn; |
1413 | |
1414 | if (only_one_child(bus) && (devfn > 0)) |
1415 | return 0; /* Already scanned the entire slot */ |
1416 | |
1417 | dev = pci_scan_single_device(bus, devfn); |
1418 | if (!dev) |
1419 | return 0; |
1420 | if (!dev->is_added) |
1421 | nr++; |
1422 | |
1423 | if (pci_ari_enabled(bus)) |
1424 | next_fn = next_ari_fn; |
1425 | else if (dev->multifunction) |
1426 | next_fn = next_trad_fn; |
1427 | |
1428 | for (fn = next_fn(dev, 0); fn > 0; fn = next_fn(dev, fn)) { |
1429 | dev = pci_scan_single_device(bus, devfn + fn); |
1430 | if (dev) { |
1431 | if (!dev->is_added) |
1432 | nr++; |
1433 | dev->multifunction = 1; |
1434 | } |
1435 | } |
1436 | |
1437 | /* only one slot has pcie device */ |
1438 | if (bus->self && nr) |
1439 | pcie_aspm_init_link_state(bus->self); |
1440 | |
1441 | return nr; |
1442 | } |
1443 | |
1444 | static int pcie_find_smpss(struct pci_dev *dev, void *data) |
1445 | { |
1446 | u8 *smpss = data; |
1447 | |
1448 | if (!pci_is_pcie(dev)) |
1449 | return 0; |
1450 | |
1451 | /* For PCIE hotplug enabled slots not connected directly to a |
1452 | * PCI-E root port, there can be problems when hotplugging |
1453 | * devices. This is due to the possibility of hotplugging a |
1454 | * device into the fabric with a smaller MPS that the devices |
1455 | * currently running have configured. Modifying the MPS on the |
1456 | * running devices could cause a fatal bus error due to an |
1457 | * incoming frame being larger than the newly configured MPS. |
1458 | * To work around this, the MPS for the entire fabric must be |
1459 | * set to the minimum size. Any devices hotplugged into this |
1460 | * fabric will have the minimum MPS set. If the PCI hotplug |
1461 | * slot is directly connected to the root port and there are not |
1462 | * other devices on the fabric (which seems to be the most |
1463 | * common case), then this is not an issue and MPS discovery |
1464 | * will occur as normal. |
1465 | */ |
1466 | if (dev->is_hotplug_bridge && (!list_is_singular(&dev->bus->devices) || |
1467 | (dev->bus->self && |
1468 | dev->bus->self->pcie_type != PCI_EXP_TYPE_ROOT_PORT))) |
1469 | *smpss = 0; |
1470 | |
1471 | if (*smpss > dev->pcie_mpss) |
1472 | *smpss = dev->pcie_mpss; |
1473 | |
1474 | return 0; |
1475 | } |
1476 | |
1477 | static void pcie_write_mps(struct pci_dev *dev, int mps) |
1478 | { |
1479 | int rc; |
1480 | |
1481 | if (pcie_bus_config == PCIE_BUS_PERFORMANCE) { |
1482 | mps = 128 << dev->pcie_mpss; |
1483 | |
1484 | if (dev->pcie_type != PCI_EXP_TYPE_ROOT_PORT && dev->bus->self) |
1485 | /* For "Performance", the assumption is made that |
1486 | * downstream communication will never be larger than |
1487 | * the MRRS. So, the MPS only needs to be configured |
1488 | * for the upstream communication. This being the case, |
1489 | * walk from the top down and set the MPS of the child |
1490 | * to that of the parent bus. |
1491 | * |
1492 | * Configure the device MPS with the smaller of the |
1493 | * device MPSS or the bridge MPS (which is assumed to be |
1494 | * properly configured at this point to the largest |
1495 | * allowable MPS based on its parent bus). |
1496 | */ |
1497 | mps = min(mps, pcie_get_mps(dev->bus->self)); |
1498 | } |
1499 | |
1500 | rc = pcie_set_mps(dev, mps); |
1501 | if (rc) |
1502 | dev_err(&dev->dev, "Failed attempting to set the MPS\n"); |
1503 | } |
1504 | |
1505 | static void pcie_write_mrrs(struct pci_dev *dev) |
1506 | { |
1507 | int rc, mrrs; |
1508 | |
1509 | /* In the "safe" case, do not configure the MRRS. There appear to be |
1510 | * issues with setting MRRS to 0 on a number of devices. |
1511 | */ |
1512 | if (pcie_bus_config != PCIE_BUS_PERFORMANCE) |
1513 | return; |
1514 | |
1515 | /* For Max performance, the MRRS must be set to the largest supported |
1516 | * value. However, it cannot be configured larger than the MPS the |
1517 | * device or the bus can support. This should already be properly |
1518 | * configured by a prior call to pcie_write_mps. |
1519 | */ |
1520 | mrrs = pcie_get_mps(dev); |
1521 | |
1522 | /* MRRS is a R/W register. Invalid values can be written, but a |
1523 | * subsequent read will verify if the value is acceptable or not. |
1524 | * If the MRRS value provided is not acceptable (e.g., too large), |
1525 | * shrink the value until it is acceptable to the HW. |
1526 | */ |
1527 | while (mrrs != pcie_get_readrq(dev) && mrrs >= 128) { |
1528 | rc = pcie_set_readrq(dev, mrrs); |
1529 | if (!rc) |
1530 | break; |
1531 | |
1532 | dev_warn(&dev->dev, "Failed attempting to set the MRRS\n"); |
1533 | mrrs /= 2; |
1534 | } |
1535 | |
1536 | if (mrrs < 128) |
1537 | dev_err(&dev->dev, "MRRS was unable to be configured with a " |
1538 | "safe value. If problems are experienced, try running " |
1539 | "with pci=pcie_bus_safe.\n"); |
1540 | } |
1541 | |
1542 | static int pcie_bus_configure_set(struct pci_dev *dev, void *data) |
1543 | { |
1544 | int mps, orig_mps; |
1545 | |
1546 | if (!pci_is_pcie(dev)) |
1547 | return 0; |
1548 | |
1549 | mps = 128 << *(u8 *)data; |
1550 | orig_mps = pcie_get_mps(dev); |
1551 | |
1552 | pcie_write_mps(dev, mps); |
1553 | pcie_write_mrrs(dev); |
1554 | |
1555 | dev_info(&dev->dev, "PCI-E Max Payload Size set to %4d/%4d (was %4d), " |
1556 | "Max Read Rq %4d\n", pcie_get_mps(dev), 128 << dev->pcie_mpss, |
1557 | orig_mps, pcie_get_readrq(dev)); |
1558 | |
1559 | return 0; |
1560 | } |
1561 | |
1562 | /* pcie_bus_configure_settings requires that pci_walk_bus work in a top-down, |
1563 | * parents then children fashion. If this changes, then this code will not |
1564 | * work as designed. |
1565 | */ |
1566 | void pcie_bus_configure_settings(struct pci_bus *bus, u8 mpss) |
1567 | { |
1568 | u8 smpss; |
1569 | |
1570 | if (!pci_is_pcie(bus->self)) |
1571 | return; |
1572 | |
1573 | if (pcie_bus_config == PCIE_BUS_TUNE_OFF) |
1574 | return; |
1575 | |
1576 | /* FIXME - Peer to peer DMA is possible, though the endpoint would need |
1577 | * to be aware to the MPS of the destination. To work around this, |
1578 | * simply force the MPS of the entire system to the smallest possible. |
1579 | */ |
1580 | if (pcie_bus_config == PCIE_BUS_PEER2PEER) |
1581 | smpss = 0; |
1582 | |
1583 | if (pcie_bus_config == PCIE_BUS_SAFE) { |
1584 | smpss = mpss; |
1585 | |
1586 | pcie_find_smpss(bus->self, &smpss); |
1587 | pci_walk_bus(bus, pcie_find_smpss, &smpss); |
1588 | } |
1589 | |
1590 | pcie_bus_configure_set(bus->self, &smpss); |
1591 | pci_walk_bus(bus, pcie_bus_configure_set, &smpss); |
1592 | } |
1593 | EXPORT_SYMBOL_GPL(pcie_bus_configure_settings); |
1594 | |
1595 | unsigned int __devinit pci_scan_child_bus(struct pci_bus *bus) |
1596 | { |
1597 | unsigned int devfn, pass, max = bus->busn_res.start; |
1598 | struct pci_dev *dev; |
1599 | |
1600 | dev_dbg(&bus->dev, "scanning bus\n"); |
1601 | |
1602 | /* Go find them, Rover! */ |
1603 | for (devfn = 0; devfn < 0x100; devfn += 8) |
1604 | pci_scan_slot(bus, devfn); |
1605 | |
1606 | /* Reserve buses for SR-IOV capability. */ |
1607 | max += pci_iov_bus_range(bus); |
1608 | |
1609 | /* |
1610 | * After performing arch-dependent fixup of the bus, look behind |
1611 | * all PCI-to-PCI bridges on this bus. |
1612 | */ |
1613 | if (!bus->is_added) { |
1614 | dev_dbg(&bus->dev, "fixups for bus\n"); |
1615 | pcibios_fixup_bus(bus); |
1616 | if (pci_is_root_bus(bus)) |
1617 | bus->is_added = 1; |
1618 | } |
1619 | |
1620 | for (pass=0; pass < 2; pass++) |
1621 | list_for_each_entry(dev, &bus->devices, bus_list) { |
1622 | if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE || |
1623 | dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) |
1624 | max = pci_scan_bridge(bus, dev, max, pass); |
1625 | } |
1626 | |
1627 | /* |
1628 | * We've scanned the bus and so we know all about what's on |
1629 | * the other side of any bridges that may be on this bus plus |
1630 | * any devices. |
1631 | * |
1632 | * Return how far we've got finding sub-buses. |
1633 | */ |
1634 | dev_dbg(&bus->dev, "bus scan returning with max=%02x\n", max); |
1635 | return max; |
1636 | } |
1637 | |
1638 | struct pci_bus *pci_create_root_bus(struct device *parent, int bus, |
1639 | struct pci_ops *ops, void *sysdata, struct list_head *resources) |
1640 | { |
1641 | int error; |
1642 | struct pci_host_bridge *bridge; |
1643 | struct pci_bus *b, *b2; |
1644 | struct pci_host_bridge_window *window, *n; |
1645 | struct resource *res; |
1646 | resource_size_t offset; |
1647 | char bus_addr[64]; |
1648 | char *fmt; |
1649 | |
1650 | |
1651 | b = pci_alloc_bus(); |
1652 | if (!b) |
1653 | return NULL; |
1654 | |
1655 | b->sysdata = sysdata; |
1656 | b->ops = ops; |
1657 | b2 = pci_find_bus(pci_domain_nr(b), bus); |
1658 | if (b2) { |
1659 | /* If we already got to this bus through a different bridge, ignore it */ |
1660 | dev_dbg(&b2->dev, "bus already known\n"); |
1661 | goto err_out; |
1662 | } |
1663 | |
1664 | bridge = pci_alloc_host_bridge(b); |
1665 | if (!bridge) |
1666 | goto err_out; |
1667 | |
1668 | bridge->dev.parent = parent; |
1669 | bridge->dev.release = pci_release_bus_bridge_dev; |
1670 | dev_set_name(&bridge->dev, "pci%04x:%02x", pci_domain_nr(b), bus); |
1671 | error = device_register(&bridge->dev); |
1672 | if (error) |
1673 | goto bridge_dev_reg_err; |
1674 | b->bridge = get_device(&bridge->dev); |
1675 | device_enable_async_suspend(b->bridge); |
1676 | pci_set_bus_of_node(b); |
1677 | |
1678 | if (!parent) |
1679 | set_dev_node(b->bridge, pcibus_to_node(b)); |
1680 | |
1681 | b->dev.class = &pcibus_class; |
1682 | b->dev.parent = b->bridge; |
1683 | dev_set_name(&b->dev, "%04x:%02x", pci_domain_nr(b), bus); |
1684 | error = device_register(&b->dev); |
1685 | if (error) |
1686 | goto class_dev_reg_err; |
1687 | |
1688 | /* Create legacy_io and legacy_mem files for this bus */ |
1689 | pci_create_legacy_files(b); |
1690 | |
1691 | b->number = b->busn_res.start = bus; |
1692 | |
1693 | if (parent) |
1694 | dev_info(parent, "PCI host bridge to bus %s\n", dev_name(&b->dev)); |
1695 | else |
1696 | printk(KERN_INFO "PCI host bridge to bus %s\n", dev_name(&b->dev)); |
1697 | |
1698 | /* Add initial resources to the bus */ |
1699 | list_for_each_entry_safe(window, n, resources, list) { |
1700 | list_move_tail(&window->list, &bridge->windows); |
1701 | res = window->res; |
1702 | offset = window->offset; |
1703 | if (res->flags & IORESOURCE_BUS) |
1704 | pci_bus_insert_busn_res(b, bus, res->end); |
1705 | else |
1706 | pci_bus_add_resource(b, res, 0); |
1707 | if (offset) { |
1708 | if (resource_type(res) == IORESOURCE_IO) |
1709 | fmt = " (bus address [%#06llx-%#06llx])"; |
1710 | else |
1711 | fmt = " (bus address [%#010llx-%#010llx])"; |
1712 | snprintf(bus_addr, sizeof(bus_addr), fmt, |
1713 | (unsigned long long) (res->start - offset), |
1714 | (unsigned long long) (res->end - offset)); |
1715 | } else |
1716 | bus_addr[0] = '\0'; |
1717 | dev_info(&b->dev, "root bus resource %pR%s\n", res, bus_addr); |
1718 | } |
1719 | |
1720 | down_write(&pci_bus_sem); |
1721 | list_add_tail(&b->node, &pci_root_buses); |
1722 | up_write(&pci_bus_sem); |
1723 | |
1724 | return b; |
1725 | |
1726 | class_dev_reg_err: |
1727 | put_device(&bridge->dev); |
1728 | device_unregister(&bridge->dev); |
1729 | bridge_dev_reg_err: |
1730 | kfree(bridge); |
1731 | err_out: |
1732 | kfree(b); |
1733 | return NULL; |
1734 | } |
1735 | |
1736 | int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int bus_max) |
1737 | { |
1738 | struct resource *res = &b->busn_res; |
1739 | struct resource *parent_res, *conflict; |
1740 | |
1741 | res->start = bus; |
1742 | res->end = bus_max; |
1743 | res->flags = IORESOURCE_BUS; |
1744 | |
1745 | if (!pci_is_root_bus(b)) |
1746 | parent_res = &b->parent->busn_res; |
1747 | else { |
1748 | parent_res = get_pci_domain_busn_res(pci_domain_nr(b)); |
1749 | res->flags |= IORESOURCE_PCI_FIXED; |
1750 | } |
1751 | |
1752 | conflict = insert_resource_conflict(parent_res, res); |
1753 | |
1754 | if (conflict) |
1755 | dev_printk(KERN_DEBUG, &b->dev, |
1756 | "busn_res: can not insert %pR under %s%pR (conflicts with %s %pR)\n", |
1757 | res, pci_is_root_bus(b) ? "domain " : "", |
1758 | parent_res, conflict->name, conflict); |
1759 | else |
1760 | dev_printk(KERN_DEBUG, &b->dev, |
1761 | "busn_res: %pR is inserted under %s%pR\n", |
1762 | res, pci_is_root_bus(b) ? "domain " : "", |
1763 | parent_res); |
1764 | |
1765 | return conflict == NULL; |
1766 | } |
1767 | |
1768 | int pci_bus_update_busn_res_end(struct pci_bus *b, int bus_max) |
1769 | { |
1770 | struct resource *res = &b->busn_res; |
1771 | struct resource old_res = *res; |
1772 | resource_size_t size; |
1773 | int ret; |
1774 | |
1775 | if (res->start > bus_max) |
1776 | return -EINVAL; |
1777 | |
1778 | size = bus_max - res->start + 1; |
1779 | ret = adjust_resource(res, res->start, size); |
1780 | dev_printk(KERN_DEBUG, &b->dev, |
1781 | "busn_res: %pR end %s updated to %02x\n", |
1782 | &old_res, ret ? "can not be" : "is", bus_max); |
1783 | |
1784 | if (!ret && !res->parent) |
1785 | pci_bus_insert_busn_res(b, res->start, res->end); |
1786 | |
1787 | return ret; |
1788 | } |
1789 | |
1790 | void pci_bus_release_busn_res(struct pci_bus *b) |
1791 | { |
1792 | struct resource *res = &b->busn_res; |
1793 | int ret; |
1794 | |
1795 | if (!res->flags || !res->parent) |
1796 | return; |
1797 | |
1798 | ret = release_resource(res); |
1799 | dev_printk(KERN_DEBUG, &b->dev, |
1800 | "busn_res: %pR %s released\n", |
1801 | res, ret ? "can not be" : "is"); |
1802 | } |
1803 | |
1804 | struct pci_bus * __devinit pci_scan_root_bus(struct device *parent, int bus, |
1805 | struct pci_ops *ops, void *sysdata, struct list_head *resources) |
1806 | { |
1807 | struct pci_host_bridge_window *window; |
1808 | bool found = false; |
1809 | struct pci_bus *b; |
1810 | int max; |
1811 | |
1812 | list_for_each_entry(window, resources, list) |
1813 | if (window->res->flags & IORESOURCE_BUS) { |
1814 | found = true; |
1815 | break; |
1816 | } |
1817 | |
1818 | b = pci_create_root_bus(parent, bus, ops, sysdata, resources); |
1819 | if (!b) |
1820 | return NULL; |
1821 | |
1822 | if (!found) { |
1823 | dev_info(&b->dev, |
1824 | "No busn resource found for root bus, will use [bus %02x-ff]\n", |
1825 | bus); |
1826 | pci_bus_insert_busn_res(b, bus, 255); |
1827 | } |
1828 | |
1829 | max = pci_scan_child_bus(b); |
1830 | |
1831 | if (!found) |
1832 | pci_bus_update_busn_res_end(b, max); |
1833 | |
1834 | pci_bus_add_devices(b); |
1835 | return b; |
1836 | } |
1837 | EXPORT_SYMBOL(pci_scan_root_bus); |
1838 | |
1839 | /* Deprecated; use pci_scan_root_bus() instead */ |
1840 | struct pci_bus * __devinit pci_scan_bus_parented(struct device *parent, |
1841 | int bus, struct pci_ops *ops, void *sysdata) |
1842 | { |
1843 | LIST_HEAD(resources); |
1844 | struct pci_bus *b; |
1845 | |
1846 | pci_add_resource(&resources, &ioport_resource); |
1847 | pci_add_resource(&resources, &iomem_resource); |
1848 | pci_add_resource(&resources, &busn_resource); |
1849 | b = pci_create_root_bus(parent, bus, ops, sysdata, &resources); |
1850 | if (b) |
1851 | pci_scan_child_bus(b); |
1852 | else |
1853 | pci_free_resource_list(&resources); |
1854 | return b; |
1855 | } |
1856 | EXPORT_SYMBOL(pci_scan_bus_parented); |
1857 | |
1858 | struct pci_bus * __devinit pci_scan_bus(int bus, struct pci_ops *ops, |
1859 | void *sysdata) |
1860 | { |
1861 | LIST_HEAD(resources); |
1862 | struct pci_bus *b; |
1863 | |
1864 | pci_add_resource(&resources, &ioport_resource); |
1865 | pci_add_resource(&resources, &iomem_resource); |
1866 | pci_add_resource(&resources, &busn_resource); |
1867 | b = pci_create_root_bus(NULL, bus, ops, sysdata, &resources); |
1868 | if (b) { |
1869 | pci_scan_child_bus(b); |
1870 | pci_bus_add_devices(b); |
1871 | } else { |
1872 | pci_free_resource_list(&resources); |
1873 | } |
1874 | return b; |
1875 | } |
1876 | EXPORT_SYMBOL(pci_scan_bus); |
1877 | |
1878 | #ifdef CONFIG_HOTPLUG |
1879 | /** |
1880 | * pci_rescan_bus_bridge_resize - scan a PCI bus for devices. |
1881 | * @bridge: PCI bridge for the bus to scan |
1882 | * |
1883 | * Scan a PCI bus and child buses for new devices, add them, |
1884 | * and enable them, resizing bridge mmio/io resource if necessary |
1885 | * and possible. The caller must ensure the child devices are already |
1886 | * removed for resizing to occur. |
1887 | * |
1888 | * Returns the max number of subordinate bus discovered. |
1889 | */ |
1890 | unsigned int __ref pci_rescan_bus_bridge_resize(struct pci_dev *bridge) |
1891 | { |
1892 | unsigned int max; |
1893 | struct pci_bus *bus = bridge->subordinate; |
1894 | |
1895 | max = pci_scan_child_bus(bus); |
1896 | |
1897 | pci_assign_unassigned_bridge_resources(bridge); |
1898 | |
1899 | pci_bus_add_devices(bus); |
1900 | |
1901 | return max; |
1902 | } |
1903 | |
1904 | EXPORT_SYMBOL(pci_add_new_bus); |
1905 | EXPORT_SYMBOL(pci_scan_slot); |
1906 | EXPORT_SYMBOL(pci_scan_bridge); |
1907 | EXPORT_SYMBOL_GPL(pci_scan_child_bus); |
1908 | #endif |
1909 | |
1910 | static int __init pci_sort_bf_cmp(const struct device *d_a, const struct device *d_b) |
1911 | { |
1912 | const struct pci_dev *a = to_pci_dev(d_a); |
1913 | const struct pci_dev *b = to_pci_dev(d_b); |
1914 | |
1915 | if (pci_domain_nr(a->bus) < pci_domain_nr(b->bus)) return -1; |
1916 | else if (pci_domain_nr(a->bus) > pci_domain_nr(b->bus)) return 1; |
1917 | |
1918 | if (a->bus->number < b->bus->number) return -1; |
1919 | else if (a->bus->number > b->bus->number) return 1; |
1920 | |
1921 | if (a->devfn < b->devfn) return -1; |
1922 | else if (a->devfn > b->devfn) return 1; |
1923 | |
1924 | return 0; |
1925 | } |
1926 | |
1927 | void __init pci_sort_breadthfirst(void) |
1928 | { |
1929 | bus_sort_breadthfirst(&pci_bus_type, &pci_sort_bf_cmp); |
1930 | } |
1931 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9