Root/
1 | /* |
2 | * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved. |
3 | * |
4 | * This program is free software; you can redistribute it and/or modify it |
5 | * under the terms of the GNU General Public License as published by the Free |
6 | * Software Foundation; either version 2 of the License, or (at your option) |
7 | * any later version. |
8 | * |
9 | * This program is distributed in the hope that it will be useful, but WITHOUT |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
12 | * more details. |
13 | * |
14 | * You should have received a copy of the GNU General Public License along with |
15 | * this program; if not, write to the Free Software Foundation, Inc., 59 |
16 | * Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
17 | * |
18 | * The full GNU General Public License is included in this distribution in the |
19 | * file called COPYING. |
20 | */ |
21 | |
22 | /* |
23 | * This driver supports an interface for DCA clients and providers to meet. |
24 | */ |
25 | |
26 | #include <linux/kernel.h> |
27 | #include <linux/notifier.h> |
28 | #include <linux/device.h> |
29 | #include <linux/dca.h> |
30 | #include <linux/slab.h> |
31 | #include <linux/module.h> |
32 | |
33 | #define DCA_VERSION "1.12.1" |
34 | |
35 | MODULE_VERSION(DCA_VERSION); |
36 | MODULE_LICENSE("GPL"); |
37 | MODULE_AUTHOR("Intel Corporation"); |
38 | |
39 | static DEFINE_RAW_SPINLOCK(dca_lock); |
40 | |
41 | static LIST_HEAD(dca_domains); |
42 | |
43 | static BLOCKING_NOTIFIER_HEAD(dca_provider_chain); |
44 | |
45 | static int dca_providers_blocked; |
46 | |
47 | static struct pci_bus *dca_pci_rc_from_dev(struct device *dev) |
48 | { |
49 | struct pci_dev *pdev = to_pci_dev(dev); |
50 | struct pci_bus *bus = pdev->bus; |
51 | |
52 | while (bus->parent) |
53 | bus = bus->parent; |
54 | |
55 | return bus; |
56 | } |
57 | |
58 | static struct dca_domain *dca_allocate_domain(struct pci_bus *rc) |
59 | { |
60 | struct dca_domain *domain; |
61 | |
62 | domain = kzalloc(sizeof(*domain), GFP_NOWAIT); |
63 | if (!domain) |
64 | return NULL; |
65 | |
66 | INIT_LIST_HEAD(&domain->dca_providers); |
67 | domain->pci_rc = rc; |
68 | |
69 | return domain; |
70 | } |
71 | |
72 | static void dca_free_domain(struct dca_domain *domain) |
73 | { |
74 | list_del(&domain->node); |
75 | kfree(domain); |
76 | } |
77 | |
78 | static int dca_provider_ioat_ver_3_0(struct device *dev) |
79 | { |
80 | struct pci_dev *pdev = to_pci_dev(dev); |
81 | |
82 | return ((pdev->vendor == PCI_VENDOR_ID_INTEL) && |
83 | ((pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG0) || |
84 | (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG1) || |
85 | (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG2) || |
86 | (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG3) || |
87 | (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG4) || |
88 | (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG5) || |
89 | (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG6) || |
90 | (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG7))); |
91 | } |
92 | |
93 | static void unregister_dca_providers(void) |
94 | { |
95 | struct dca_provider *dca, *_dca; |
96 | struct list_head unregistered_providers; |
97 | struct dca_domain *domain; |
98 | unsigned long flags; |
99 | |
100 | blocking_notifier_call_chain(&dca_provider_chain, |
101 | DCA_PROVIDER_REMOVE, NULL); |
102 | |
103 | INIT_LIST_HEAD(&unregistered_providers); |
104 | |
105 | raw_spin_lock_irqsave(&dca_lock, flags); |
106 | |
107 | if (list_empty(&dca_domains)) { |
108 | raw_spin_unlock_irqrestore(&dca_lock, flags); |
109 | return; |
110 | } |
111 | |
112 | /* at this point only one domain in the list is expected */ |
113 | domain = list_first_entry(&dca_domains, struct dca_domain, node); |
114 | |
115 | list_for_each_entry_safe(dca, _dca, &domain->dca_providers, node) |
116 | list_move(&dca->node, &unregistered_providers); |
117 | |
118 | dca_free_domain(domain); |
119 | |
120 | raw_spin_unlock_irqrestore(&dca_lock, flags); |
121 | |
122 | list_for_each_entry_safe(dca, _dca, &unregistered_providers, node) { |
123 | dca_sysfs_remove_provider(dca); |
124 | list_del(&dca->node); |
125 | } |
126 | } |
127 | |
128 | static struct dca_domain *dca_find_domain(struct pci_bus *rc) |
129 | { |
130 | struct dca_domain *domain; |
131 | |
132 | list_for_each_entry(domain, &dca_domains, node) |
133 | if (domain->pci_rc == rc) |
134 | return domain; |
135 | |
136 | return NULL; |
137 | } |
138 | |
139 | static struct dca_domain *dca_get_domain(struct device *dev) |
140 | { |
141 | struct pci_bus *rc; |
142 | struct dca_domain *domain; |
143 | |
144 | rc = dca_pci_rc_from_dev(dev); |
145 | domain = dca_find_domain(rc); |
146 | |
147 | if (!domain) { |
148 | if (dca_provider_ioat_ver_3_0(dev) && !list_empty(&dca_domains)) |
149 | dca_providers_blocked = 1; |
150 | } |
151 | |
152 | return domain; |
153 | } |
154 | |
155 | static struct dca_provider *dca_find_provider_by_dev(struct device *dev) |
156 | { |
157 | struct dca_provider *dca; |
158 | struct pci_bus *rc; |
159 | struct dca_domain *domain; |
160 | |
161 | if (dev) { |
162 | rc = dca_pci_rc_from_dev(dev); |
163 | domain = dca_find_domain(rc); |
164 | if (!domain) |
165 | return NULL; |
166 | } else { |
167 | if (!list_empty(&dca_domains)) |
168 | domain = list_first_entry(&dca_domains, |
169 | struct dca_domain, |
170 | node); |
171 | else |
172 | return NULL; |
173 | } |
174 | |
175 | list_for_each_entry(dca, &domain->dca_providers, node) |
176 | if ((!dev) || (dca->ops->dev_managed(dca, dev))) |
177 | return dca; |
178 | |
179 | return NULL; |
180 | } |
181 | |
182 | /** |
183 | * dca_add_requester - add a dca client to the list |
184 | * @dev - the device that wants dca service |
185 | */ |
186 | int dca_add_requester(struct device *dev) |
187 | { |
188 | struct dca_provider *dca; |
189 | int err, slot = -ENODEV; |
190 | unsigned long flags; |
191 | struct pci_bus *pci_rc; |
192 | struct dca_domain *domain; |
193 | |
194 | if (!dev) |
195 | return -EFAULT; |
196 | |
197 | raw_spin_lock_irqsave(&dca_lock, flags); |
198 | |
199 | /* check if the requester has not been added already */ |
200 | dca = dca_find_provider_by_dev(dev); |
201 | if (dca) { |
202 | raw_spin_unlock_irqrestore(&dca_lock, flags); |
203 | return -EEXIST; |
204 | } |
205 | |
206 | pci_rc = dca_pci_rc_from_dev(dev); |
207 | domain = dca_find_domain(pci_rc); |
208 | if (!domain) { |
209 | raw_spin_unlock_irqrestore(&dca_lock, flags); |
210 | return -ENODEV; |
211 | } |
212 | |
213 | list_for_each_entry(dca, &domain->dca_providers, node) { |
214 | slot = dca->ops->add_requester(dca, dev); |
215 | if (slot >= 0) |
216 | break; |
217 | } |
218 | |
219 | raw_spin_unlock_irqrestore(&dca_lock, flags); |
220 | |
221 | if (slot < 0) |
222 | return slot; |
223 | |
224 | err = dca_sysfs_add_req(dca, dev, slot); |
225 | if (err) { |
226 | raw_spin_lock_irqsave(&dca_lock, flags); |
227 | if (dca == dca_find_provider_by_dev(dev)) |
228 | dca->ops->remove_requester(dca, dev); |
229 | raw_spin_unlock_irqrestore(&dca_lock, flags); |
230 | return err; |
231 | } |
232 | |
233 | return 0; |
234 | } |
235 | EXPORT_SYMBOL_GPL(dca_add_requester); |
236 | |
237 | /** |
238 | * dca_remove_requester - remove a dca client from the list |
239 | * @dev - the device that wants dca service |
240 | */ |
241 | int dca_remove_requester(struct device *dev) |
242 | { |
243 | struct dca_provider *dca; |
244 | int slot; |
245 | unsigned long flags; |
246 | |
247 | if (!dev) |
248 | return -EFAULT; |
249 | |
250 | raw_spin_lock_irqsave(&dca_lock, flags); |
251 | dca = dca_find_provider_by_dev(dev); |
252 | if (!dca) { |
253 | raw_spin_unlock_irqrestore(&dca_lock, flags); |
254 | return -ENODEV; |
255 | } |
256 | slot = dca->ops->remove_requester(dca, dev); |
257 | raw_spin_unlock_irqrestore(&dca_lock, flags); |
258 | |
259 | if (slot < 0) |
260 | return slot; |
261 | |
262 | dca_sysfs_remove_req(dca, slot); |
263 | |
264 | return 0; |
265 | } |
266 | EXPORT_SYMBOL_GPL(dca_remove_requester); |
267 | |
268 | /** |
269 | * dca_common_get_tag - return the dca tag (serves both new and old api) |
270 | * @dev - the device that wants dca service |
271 | * @cpu - the cpuid as returned by get_cpu() |
272 | */ |
273 | u8 dca_common_get_tag(struct device *dev, int cpu) |
274 | { |
275 | struct dca_provider *dca; |
276 | u8 tag; |
277 | unsigned long flags; |
278 | |
279 | raw_spin_lock_irqsave(&dca_lock, flags); |
280 | |
281 | dca = dca_find_provider_by_dev(dev); |
282 | if (!dca) { |
283 | raw_spin_unlock_irqrestore(&dca_lock, flags); |
284 | return -ENODEV; |
285 | } |
286 | tag = dca->ops->get_tag(dca, dev, cpu); |
287 | |
288 | raw_spin_unlock_irqrestore(&dca_lock, flags); |
289 | return tag; |
290 | } |
291 | |
292 | /** |
293 | * dca3_get_tag - return the dca tag to the requester device |
294 | * for the given cpu (new api) |
295 | * @dev - the device that wants dca service |
296 | * @cpu - the cpuid as returned by get_cpu() |
297 | */ |
298 | u8 dca3_get_tag(struct device *dev, int cpu) |
299 | { |
300 | if (!dev) |
301 | return -EFAULT; |
302 | |
303 | return dca_common_get_tag(dev, cpu); |
304 | } |
305 | EXPORT_SYMBOL_GPL(dca3_get_tag); |
306 | |
307 | /** |
308 | * dca_get_tag - return the dca tag for the given cpu (old api) |
309 | * @cpu - the cpuid as returned by get_cpu() |
310 | */ |
311 | u8 dca_get_tag(int cpu) |
312 | { |
313 | struct device *dev = NULL; |
314 | |
315 | return dca_common_get_tag(dev, cpu); |
316 | } |
317 | EXPORT_SYMBOL_GPL(dca_get_tag); |
318 | |
319 | /** |
320 | * alloc_dca_provider - get data struct for describing a dca provider |
321 | * @ops - pointer to struct of dca operation function pointers |
322 | * @priv_size - size of extra mem to be added for provider's needs |
323 | */ |
324 | struct dca_provider *alloc_dca_provider(struct dca_ops *ops, int priv_size) |
325 | { |
326 | struct dca_provider *dca; |
327 | int alloc_size; |
328 | |
329 | alloc_size = (sizeof(*dca) + priv_size); |
330 | dca = kzalloc(alloc_size, GFP_KERNEL); |
331 | if (!dca) |
332 | return NULL; |
333 | dca->ops = ops; |
334 | |
335 | return dca; |
336 | } |
337 | EXPORT_SYMBOL_GPL(alloc_dca_provider); |
338 | |
339 | /** |
340 | * free_dca_provider - release the dca provider data struct |
341 | * @ops - pointer to struct of dca operation function pointers |
342 | * @priv_size - size of extra mem to be added for provider's needs |
343 | */ |
344 | void free_dca_provider(struct dca_provider *dca) |
345 | { |
346 | kfree(dca); |
347 | } |
348 | EXPORT_SYMBOL_GPL(free_dca_provider); |
349 | |
350 | /** |
351 | * register_dca_provider - register a dca provider |
352 | * @dca - struct created by alloc_dca_provider() |
353 | * @dev - device providing dca services |
354 | */ |
355 | int register_dca_provider(struct dca_provider *dca, struct device *dev) |
356 | { |
357 | int err; |
358 | unsigned long flags; |
359 | struct dca_domain *domain, *newdomain = NULL; |
360 | |
361 | raw_spin_lock_irqsave(&dca_lock, flags); |
362 | if (dca_providers_blocked) { |
363 | raw_spin_unlock_irqrestore(&dca_lock, flags); |
364 | return -ENODEV; |
365 | } |
366 | raw_spin_unlock_irqrestore(&dca_lock, flags); |
367 | |
368 | err = dca_sysfs_add_provider(dca, dev); |
369 | if (err) |
370 | return err; |
371 | |
372 | raw_spin_lock_irqsave(&dca_lock, flags); |
373 | domain = dca_get_domain(dev); |
374 | if (!domain) { |
375 | struct pci_bus *rc; |
376 | |
377 | if (dca_providers_blocked) { |
378 | raw_spin_unlock_irqrestore(&dca_lock, flags); |
379 | dca_sysfs_remove_provider(dca); |
380 | unregister_dca_providers(); |
381 | return -ENODEV; |
382 | } |
383 | |
384 | raw_spin_unlock_irqrestore(&dca_lock, flags); |
385 | rc = dca_pci_rc_from_dev(dev); |
386 | newdomain = dca_allocate_domain(rc); |
387 | if (!newdomain) |
388 | return -ENODEV; |
389 | raw_spin_lock_irqsave(&dca_lock, flags); |
390 | /* Recheck, we might have raced after dropping the lock */ |
391 | domain = dca_get_domain(dev); |
392 | if (!domain) { |
393 | domain = newdomain; |
394 | newdomain = NULL; |
395 | list_add(&domain->node, &dca_domains); |
396 | } |
397 | } |
398 | list_add(&dca->node, &domain->dca_providers); |
399 | raw_spin_unlock_irqrestore(&dca_lock, flags); |
400 | |
401 | blocking_notifier_call_chain(&dca_provider_chain, |
402 | DCA_PROVIDER_ADD, NULL); |
403 | kfree(newdomain); |
404 | return 0; |
405 | } |
406 | EXPORT_SYMBOL_GPL(register_dca_provider); |
407 | |
408 | /** |
409 | * unregister_dca_provider - remove a dca provider |
410 | * @dca - struct created by alloc_dca_provider() |
411 | */ |
412 | void unregister_dca_provider(struct dca_provider *dca, struct device *dev) |
413 | { |
414 | unsigned long flags; |
415 | struct pci_bus *pci_rc; |
416 | struct dca_domain *domain; |
417 | |
418 | blocking_notifier_call_chain(&dca_provider_chain, |
419 | DCA_PROVIDER_REMOVE, NULL); |
420 | |
421 | raw_spin_lock_irqsave(&dca_lock, flags); |
422 | |
423 | list_del(&dca->node); |
424 | |
425 | pci_rc = dca_pci_rc_from_dev(dev); |
426 | domain = dca_find_domain(pci_rc); |
427 | if (list_empty(&domain->dca_providers)) |
428 | dca_free_domain(domain); |
429 | |
430 | raw_spin_unlock_irqrestore(&dca_lock, flags); |
431 | |
432 | dca_sysfs_remove_provider(dca); |
433 | } |
434 | EXPORT_SYMBOL_GPL(unregister_dca_provider); |
435 | |
436 | /** |
437 | * dca_register_notify - register a client's notifier callback |
438 | */ |
439 | void dca_register_notify(struct notifier_block *nb) |
440 | { |
441 | blocking_notifier_chain_register(&dca_provider_chain, nb); |
442 | } |
443 | EXPORT_SYMBOL_GPL(dca_register_notify); |
444 | |
445 | /** |
446 | * dca_unregister_notify - remove a client's notifier callback |
447 | */ |
448 | void dca_unregister_notify(struct notifier_block *nb) |
449 | { |
450 | blocking_notifier_chain_unregister(&dca_provider_chain, nb); |
451 | } |
452 | EXPORT_SYMBOL_GPL(dca_unregister_notify); |
453 | |
454 | static int __init dca_init(void) |
455 | { |
456 | pr_info("dca service started, version %s\n", DCA_VERSION); |
457 | return dca_sysfs_init(); |
458 | } |
459 | |
460 | static void __exit dca_exit(void) |
461 | { |
462 | dca_sysfs_exit(); |
463 | } |
464 | |
465 | arch_initcall(dca_init); |
466 | module_exit(dca_exit); |
467 | |
468 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9