Root/
1 | /* |
2 | * Broadcom specific AMBA |
3 | * PCI Host |
4 | * |
5 | * Licensed under the GNU/GPL. See COPYING for details. |
6 | */ |
7 | |
8 | #include "bcma_private.h" |
9 | #include <linux/slab.h> |
10 | #include <linux/bcma/bcma.h> |
11 | #include <linux/pci.h> |
12 | #include <linux/module.h> |
13 | |
14 | static void bcma_host_pci_switch_core(struct bcma_device *core) |
15 | { |
16 | pci_write_config_dword(core->bus->host_pci, BCMA_PCI_BAR0_WIN, |
17 | core->addr); |
18 | pci_write_config_dword(core->bus->host_pci, BCMA_PCI_BAR0_WIN2, |
19 | core->wrap); |
20 | core->bus->mapped_core = core; |
21 | bcma_debug(core->bus, "Switched to core: 0x%X\n", core->id.id); |
22 | } |
23 | |
24 | /* Provides access to the requested core. Returns base offset that has to be |
25 | * used. It makes use of fixed windows when possible. */ |
26 | static u16 bcma_host_pci_provide_access_to_core(struct bcma_device *core) |
27 | { |
28 | switch (core->id.id) { |
29 | case BCMA_CORE_CHIPCOMMON: |
30 | return 3 * BCMA_CORE_SIZE; |
31 | case BCMA_CORE_PCIE: |
32 | return 2 * BCMA_CORE_SIZE; |
33 | } |
34 | |
35 | if (core->bus->mapped_core != core) |
36 | bcma_host_pci_switch_core(core); |
37 | return 0; |
38 | } |
39 | |
40 | static u8 bcma_host_pci_read8(struct bcma_device *core, u16 offset) |
41 | { |
42 | offset += bcma_host_pci_provide_access_to_core(core); |
43 | return ioread8(core->bus->mmio + offset); |
44 | } |
45 | |
46 | static u16 bcma_host_pci_read16(struct bcma_device *core, u16 offset) |
47 | { |
48 | offset += bcma_host_pci_provide_access_to_core(core); |
49 | return ioread16(core->bus->mmio + offset); |
50 | } |
51 | |
52 | static u32 bcma_host_pci_read32(struct bcma_device *core, u16 offset) |
53 | { |
54 | offset += bcma_host_pci_provide_access_to_core(core); |
55 | return ioread32(core->bus->mmio + offset); |
56 | } |
57 | |
58 | static void bcma_host_pci_write8(struct bcma_device *core, u16 offset, |
59 | u8 value) |
60 | { |
61 | offset += bcma_host_pci_provide_access_to_core(core); |
62 | iowrite8(value, core->bus->mmio + offset); |
63 | } |
64 | |
65 | static void bcma_host_pci_write16(struct bcma_device *core, u16 offset, |
66 | u16 value) |
67 | { |
68 | offset += bcma_host_pci_provide_access_to_core(core); |
69 | iowrite16(value, core->bus->mmio + offset); |
70 | } |
71 | |
72 | static void bcma_host_pci_write32(struct bcma_device *core, u16 offset, |
73 | u32 value) |
74 | { |
75 | offset += bcma_host_pci_provide_access_to_core(core); |
76 | iowrite32(value, core->bus->mmio + offset); |
77 | } |
78 | |
79 | #ifdef CONFIG_BCMA_BLOCKIO |
80 | void bcma_host_pci_block_read(struct bcma_device *core, void *buffer, |
81 | size_t count, u16 offset, u8 reg_width) |
82 | { |
83 | void __iomem *addr = core->bus->mmio + offset; |
84 | if (core->bus->mapped_core != core) |
85 | bcma_host_pci_switch_core(core); |
86 | switch (reg_width) { |
87 | case sizeof(u8): |
88 | ioread8_rep(addr, buffer, count); |
89 | break; |
90 | case sizeof(u16): |
91 | WARN_ON(count & 1); |
92 | ioread16_rep(addr, buffer, count >> 1); |
93 | break; |
94 | case sizeof(u32): |
95 | WARN_ON(count & 3); |
96 | ioread32_rep(addr, buffer, count >> 2); |
97 | break; |
98 | default: |
99 | WARN_ON(1); |
100 | } |
101 | } |
102 | |
103 | void bcma_host_pci_block_write(struct bcma_device *core, const void *buffer, |
104 | size_t count, u16 offset, u8 reg_width) |
105 | { |
106 | void __iomem *addr = core->bus->mmio + offset; |
107 | if (core->bus->mapped_core != core) |
108 | bcma_host_pci_switch_core(core); |
109 | switch (reg_width) { |
110 | case sizeof(u8): |
111 | iowrite8_rep(addr, buffer, count); |
112 | break; |
113 | case sizeof(u16): |
114 | WARN_ON(count & 1); |
115 | iowrite16_rep(addr, buffer, count >> 1); |
116 | break; |
117 | case sizeof(u32): |
118 | WARN_ON(count & 3); |
119 | iowrite32_rep(addr, buffer, count >> 2); |
120 | break; |
121 | default: |
122 | WARN_ON(1); |
123 | } |
124 | } |
125 | #endif |
126 | |
127 | static u32 bcma_host_pci_aread32(struct bcma_device *core, u16 offset) |
128 | { |
129 | if (core->bus->mapped_core != core) |
130 | bcma_host_pci_switch_core(core); |
131 | return ioread32(core->bus->mmio + (1 * BCMA_CORE_SIZE) + offset); |
132 | } |
133 | |
134 | static void bcma_host_pci_awrite32(struct bcma_device *core, u16 offset, |
135 | u32 value) |
136 | { |
137 | if (core->bus->mapped_core != core) |
138 | bcma_host_pci_switch_core(core); |
139 | iowrite32(value, core->bus->mmio + (1 * BCMA_CORE_SIZE) + offset); |
140 | } |
141 | |
142 | const struct bcma_host_ops bcma_host_pci_ops = { |
143 | .read8 = bcma_host_pci_read8, |
144 | .read16 = bcma_host_pci_read16, |
145 | .read32 = bcma_host_pci_read32, |
146 | .write8 = bcma_host_pci_write8, |
147 | .write16 = bcma_host_pci_write16, |
148 | .write32 = bcma_host_pci_write32, |
149 | #ifdef CONFIG_BCMA_BLOCKIO |
150 | .block_read = bcma_host_pci_block_read, |
151 | .block_write = bcma_host_pci_block_write, |
152 | #endif |
153 | .aread32 = bcma_host_pci_aread32, |
154 | .awrite32 = bcma_host_pci_awrite32, |
155 | }; |
156 | |
157 | static int __devinit bcma_host_pci_probe(struct pci_dev *dev, |
158 | const struct pci_device_id *id) |
159 | { |
160 | struct bcma_bus *bus; |
161 | int err = -ENOMEM; |
162 | const char *name; |
163 | u32 val; |
164 | |
165 | /* Alloc */ |
166 | bus = kzalloc(sizeof(*bus), GFP_KERNEL); |
167 | if (!bus) |
168 | goto out; |
169 | |
170 | /* Basic PCI configuration */ |
171 | err = pci_enable_device(dev); |
172 | if (err) |
173 | goto err_kfree_bus; |
174 | |
175 | name = dev_name(&dev->dev); |
176 | if (dev->driver && dev->driver->name) |
177 | name = dev->driver->name; |
178 | err = pci_request_regions(dev, name); |
179 | if (err) |
180 | goto err_pci_disable; |
181 | pci_set_master(dev); |
182 | |
183 | /* Disable the RETRY_TIMEOUT register (0x41) to keep |
184 | * PCI Tx retries from interfering with C3 CPU state */ |
185 | pci_read_config_dword(dev, 0x40, &val); |
186 | if ((val & 0x0000ff00) != 0) |
187 | pci_write_config_dword(dev, 0x40, val & 0xffff00ff); |
188 | |
189 | /* SSB needed additional powering up, do we have any AMBA PCI cards? */ |
190 | if (!pci_is_pcie(dev)) |
191 | bcma_err(bus, "PCI card detected, report problems.\n"); |
192 | |
193 | /* Map MMIO */ |
194 | err = -ENOMEM; |
195 | bus->mmio = pci_iomap(dev, 0, ~0UL); |
196 | if (!bus->mmio) |
197 | goto err_pci_release_regions; |
198 | |
199 | /* Host specific */ |
200 | bus->host_pci = dev; |
201 | bus->hosttype = BCMA_HOSTTYPE_PCI; |
202 | bus->ops = &bcma_host_pci_ops; |
203 | |
204 | bus->boardinfo.vendor = bus->host_pci->subsystem_vendor; |
205 | bus->boardinfo.type = bus->host_pci->subsystem_device; |
206 | |
207 | /* Register */ |
208 | err = bcma_bus_register(bus); |
209 | if (err) |
210 | goto err_pci_unmap_mmio; |
211 | |
212 | pci_set_drvdata(dev, bus); |
213 | |
214 | out: |
215 | return err; |
216 | |
217 | err_pci_unmap_mmio: |
218 | pci_iounmap(dev, bus->mmio); |
219 | err_pci_release_regions: |
220 | pci_release_regions(dev); |
221 | err_pci_disable: |
222 | pci_disable_device(dev); |
223 | err_kfree_bus: |
224 | kfree(bus); |
225 | return err; |
226 | } |
227 | |
228 | static void __devexit bcma_host_pci_remove(struct pci_dev *dev) |
229 | { |
230 | struct bcma_bus *bus = pci_get_drvdata(dev); |
231 | |
232 | bcma_bus_unregister(bus); |
233 | pci_iounmap(dev, bus->mmio); |
234 | pci_release_regions(dev); |
235 | pci_disable_device(dev); |
236 | kfree(bus); |
237 | pci_set_drvdata(dev, NULL); |
238 | } |
239 | |
240 | #ifdef CONFIG_PM |
241 | static int bcma_host_pci_suspend(struct device *dev) |
242 | { |
243 | struct pci_dev *pdev = to_pci_dev(dev); |
244 | struct bcma_bus *bus = pci_get_drvdata(pdev); |
245 | |
246 | bus->mapped_core = NULL; |
247 | |
248 | return bcma_bus_suspend(bus); |
249 | } |
250 | |
251 | static int bcma_host_pci_resume(struct device *dev) |
252 | { |
253 | struct pci_dev *pdev = to_pci_dev(dev); |
254 | struct bcma_bus *bus = pci_get_drvdata(pdev); |
255 | |
256 | return bcma_bus_resume(bus); |
257 | } |
258 | |
259 | static SIMPLE_DEV_PM_OPS(bcma_pm_ops, bcma_host_pci_suspend, |
260 | bcma_host_pci_resume); |
261 | #define BCMA_PM_OPS (&bcma_pm_ops) |
262 | |
263 | #else /* CONFIG_PM */ |
264 | |
265 | #define BCMA_PM_OPS NULL |
266 | |
267 | #endif /* CONFIG_PM */ |
268 | |
269 | static DEFINE_PCI_DEVICE_TABLE(bcma_pci_bridge_tbl) = { |
270 | { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x0576) }, |
271 | { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 43224) }, |
272 | { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4331) }, |
273 | { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4353) }, |
274 | { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4357) }, |
275 | { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4359) }, |
276 | { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4727) }, |
277 | { 0, }, |
278 | }; |
279 | MODULE_DEVICE_TABLE(pci, bcma_pci_bridge_tbl); |
280 | |
281 | static struct pci_driver bcma_pci_bridge_driver = { |
282 | .name = "bcma-pci-bridge", |
283 | .id_table = bcma_pci_bridge_tbl, |
284 | .probe = bcma_host_pci_probe, |
285 | .remove = __devexit_p(bcma_host_pci_remove), |
286 | .driver.pm = BCMA_PM_OPS, |
287 | }; |
288 | |
289 | int __init bcma_host_pci_init(void) |
290 | { |
291 | return pci_register_driver(&bcma_pci_bridge_driver); |
292 | } |
293 | |
294 | void __exit bcma_host_pci_exit(void) |
295 | { |
296 | pci_unregister_driver(&bcma_pci_bridge_driver); |
297 | } |
298 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9