Root/
1 | /* |
2 | * Char device for device raw access |
3 | * |
4 | * Copyright (C) 2005-2007 Kristian Hoegsberg <krh@bitplanet.net> |
5 | * |
6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License as published by |
8 | * the Free Software Foundation; either version 2 of the License, or |
9 | * (at your option) any later version. |
10 | * |
11 | * This program is distributed in the hope that it will be useful, |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
14 | * GNU General Public License for more details. |
15 | * |
16 | * You should have received a copy of the GNU General Public License |
17 | * along with this program; if not, write to the Free Software Foundation, |
18 | * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
19 | */ |
20 | |
21 | #include <linux/bug.h> |
22 | #include <linux/compat.h> |
23 | #include <linux/delay.h> |
24 | #include <linux/device.h> |
25 | #include <linux/dma-mapping.h> |
26 | #include <linux/errno.h> |
27 | #include <linux/firewire.h> |
28 | #include <linux/firewire-cdev.h> |
29 | #include <linux/idr.h> |
30 | #include <linux/irqflags.h> |
31 | #include <linux/jiffies.h> |
32 | #include <linux/kernel.h> |
33 | #include <linux/kref.h> |
34 | #include <linux/mm.h> |
35 | #include <linux/module.h> |
36 | #include <linux/mutex.h> |
37 | #include <linux/poll.h> |
38 | #include <linux/sched.h> /* required for linux/wait.h */ |
39 | #include <linux/slab.h> |
40 | #include <linux/spinlock.h> |
41 | #include <linux/string.h> |
42 | #include <linux/time.h> |
43 | #include <linux/uaccess.h> |
44 | #include <linux/vmalloc.h> |
45 | #include <linux/wait.h> |
46 | #include <linux/workqueue.h> |
47 | |
48 | |
49 | #include "core.h" |
50 | |
51 | /* |
52 | * ABI version history is documented in linux/firewire-cdev.h. |
53 | */ |
54 | #define FW_CDEV_KERNEL_VERSION 5 |
55 | #define FW_CDEV_VERSION_EVENT_REQUEST2 4 |
56 | #define FW_CDEV_VERSION_ALLOCATE_REGION_END 4 |
57 | |
58 | struct client { |
59 | u32 version; |
60 | struct fw_device *device; |
61 | |
62 | spinlock_t lock; |
63 | bool in_shutdown; |
64 | struct idr resource_idr; |
65 | struct list_head event_list; |
66 | wait_queue_head_t wait; |
67 | wait_queue_head_t tx_flush_wait; |
68 | u64 bus_reset_closure; |
69 | |
70 | struct fw_iso_context *iso_context; |
71 | u64 iso_closure; |
72 | struct fw_iso_buffer buffer; |
73 | unsigned long vm_start; |
74 | bool buffer_is_mapped; |
75 | |
76 | struct list_head phy_receiver_link; |
77 | u64 phy_receiver_closure; |
78 | |
79 | struct list_head link; |
80 | struct kref kref; |
81 | }; |
82 | |
83 | static inline void client_get(struct client *client) |
84 | { |
85 | kref_get(&client->kref); |
86 | } |
87 | |
88 | static void client_release(struct kref *kref) |
89 | { |
90 | struct client *client = container_of(kref, struct client, kref); |
91 | |
92 | fw_device_put(client->device); |
93 | kfree(client); |
94 | } |
95 | |
96 | static void client_put(struct client *client) |
97 | { |
98 | kref_put(&client->kref, client_release); |
99 | } |
100 | |
101 | struct client_resource; |
102 | typedef void (*client_resource_release_fn_t)(struct client *, |
103 | struct client_resource *); |
104 | struct client_resource { |
105 | client_resource_release_fn_t release; |
106 | int handle; |
107 | }; |
108 | |
109 | struct address_handler_resource { |
110 | struct client_resource resource; |
111 | struct fw_address_handler handler; |
112 | __u64 closure; |
113 | struct client *client; |
114 | }; |
115 | |
116 | struct outbound_transaction_resource { |
117 | struct client_resource resource; |
118 | struct fw_transaction transaction; |
119 | }; |
120 | |
121 | struct inbound_transaction_resource { |
122 | struct client_resource resource; |
123 | struct fw_card *card; |
124 | struct fw_request *request; |
125 | void *data; |
126 | size_t length; |
127 | }; |
128 | |
129 | struct descriptor_resource { |
130 | struct client_resource resource; |
131 | struct fw_descriptor descriptor; |
132 | u32 data[0]; |
133 | }; |
134 | |
135 | struct iso_resource { |
136 | struct client_resource resource; |
137 | struct client *client; |
138 | /* Schedule work and access todo only with client->lock held. */ |
139 | struct delayed_work work; |
140 | enum {ISO_RES_ALLOC, ISO_RES_REALLOC, ISO_RES_DEALLOC, |
141 | ISO_RES_ALLOC_ONCE, ISO_RES_DEALLOC_ONCE,} todo; |
142 | int generation; |
143 | u64 channels; |
144 | s32 bandwidth; |
145 | struct iso_resource_event *e_alloc, *e_dealloc; |
146 | }; |
147 | |
148 | static void release_iso_resource(struct client *, struct client_resource *); |
149 | |
150 | static void schedule_iso_resource(struct iso_resource *r, unsigned long delay) |
151 | { |
152 | client_get(r->client); |
153 | if (!queue_delayed_work(fw_workqueue, &r->work, delay)) |
154 | client_put(r->client); |
155 | } |
156 | |
157 | static void schedule_if_iso_resource(struct client_resource *resource) |
158 | { |
159 | if (resource->release == release_iso_resource) |
160 | schedule_iso_resource(container_of(resource, |
161 | struct iso_resource, resource), 0); |
162 | } |
163 | |
164 | /* |
165 | * dequeue_event() just kfree()'s the event, so the event has to be |
166 | * the first field in a struct XYZ_event. |
167 | */ |
168 | struct event { |
169 | struct { void *data; size_t size; } v[2]; |
170 | struct list_head link; |
171 | }; |
172 | |
173 | struct bus_reset_event { |
174 | struct event event; |
175 | struct fw_cdev_event_bus_reset reset; |
176 | }; |
177 | |
178 | struct outbound_transaction_event { |
179 | struct event event; |
180 | struct client *client; |
181 | struct outbound_transaction_resource r; |
182 | struct fw_cdev_event_response response; |
183 | }; |
184 | |
185 | struct inbound_transaction_event { |
186 | struct event event; |
187 | union { |
188 | struct fw_cdev_event_request request; |
189 | struct fw_cdev_event_request2 request2; |
190 | } req; |
191 | }; |
192 | |
193 | struct iso_interrupt_event { |
194 | struct event event; |
195 | struct fw_cdev_event_iso_interrupt interrupt; |
196 | }; |
197 | |
198 | struct iso_interrupt_mc_event { |
199 | struct event event; |
200 | struct fw_cdev_event_iso_interrupt_mc interrupt; |
201 | }; |
202 | |
203 | struct iso_resource_event { |
204 | struct event event; |
205 | struct fw_cdev_event_iso_resource iso_resource; |
206 | }; |
207 | |
208 | struct outbound_phy_packet_event { |
209 | struct event event; |
210 | struct client *client; |
211 | struct fw_packet p; |
212 | struct fw_cdev_event_phy_packet phy_packet; |
213 | }; |
214 | |
215 | struct inbound_phy_packet_event { |
216 | struct event event; |
217 | struct fw_cdev_event_phy_packet phy_packet; |
218 | }; |
219 | |
220 | #ifdef CONFIG_COMPAT |
221 | static void __user *u64_to_uptr(u64 value) |
222 | { |
223 | if (is_compat_task()) |
224 | return compat_ptr(value); |
225 | else |
226 | return (void __user *)(unsigned long)value; |
227 | } |
228 | |
229 | static u64 uptr_to_u64(void __user *ptr) |
230 | { |
231 | if (is_compat_task()) |
232 | return ptr_to_compat(ptr); |
233 | else |
234 | return (u64)(unsigned long)ptr; |
235 | } |
236 | #else |
237 | static inline void __user *u64_to_uptr(u64 value) |
238 | { |
239 | return (void __user *)(unsigned long)value; |
240 | } |
241 | |
242 | static inline u64 uptr_to_u64(void __user *ptr) |
243 | { |
244 | return (u64)(unsigned long)ptr; |
245 | } |
246 | #endif /* CONFIG_COMPAT */ |
247 | |
248 | static int fw_device_op_open(struct inode *inode, struct file *file) |
249 | { |
250 | struct fw_device *device; |
251 | struct client *client; |
252 | |
253 | device = fw_device_get_by_devt(inode->i_rdev); |
254 | if (device == NULL) |
255 | return -ENODEV; |
256 | |
257 | if (fw_device_is_shutdown(device)) { |
258 | fw_device_put(device); |
259 | return -ENODEV; |
260 | } |
261 | |
262 | client = kzalloc(sizeof(*client), GFP_KERNEL); |
263 | if (client == NULL) { |
264 | fw_device_put(device); |
265 | return -ENOMEM; |
266 | } |
267 | |
268 | client->device = device; |
269 | spin_lock_init(&client->lock); |
270 | idr_init(&client->resource_idr); |
271 | INIT_LIST_HEAD(&client->event_list); |
272 | init_waitqueue_head(&client->wait); |
273 | init_waitqueue_head(&client->tx_flush_wait); |
274 | INIT_LIST_HEAD(&client->phy_receiver_link); |
275 | INIT_LIST_HEAD(&client->link); |
276 | kref_init(&client->kref); |
277 | |
278 | file->private_data = client; |
279 | |
280 | return nonseekable_open(inode, file); |
281 | } |
282 | |
283 | static void queue_event(struct client *client, struct event *event, |
284 | void *data0, size_t size0, void *data1, size_t size1) |
285 | { |
286 | unsigned long flags; |
287 | |
288 | event->v[0].data = data0; |
289 | event->v[0].size = size0; |
290 | event->v[1].data = data1; |
291 | event->v[1].size = size1; |
292 | |
293 | spin_lock_irqsave(&client->lock, flags); |
294 | if (client->in_shutdown) |
295 | kfree(event); |
296 | else |
297 | list_add_tail(&event->link, &client->event_list); |
298 | spin_unlock_irqrestore(&client->lock, flags); |
299 | |
300 | wake_up_interruptible(&client->wait); |
301 | } |
302 | |
303 | static int dequeue_event(struct client *client, |
304 | char __user *buffer, size_t count) |
305 | { |
306 | struct event *event; |
307 | size_t size, total; |
308 | int i, ret; |
309 | |
310 | ret = wait_event_interruptible(client->wait, |
311 | !list_empty(&client->event_list) || |
312 | fw_device_is_shutdown(client->device)); |
313 | if (ret < 0) |
314 | return ret; |
315 | |
316 | if (list_empty(&client->event_list) && |
317 | fw_device_is_shutdown(client->device)) |
318 | return -ENODEV; |
319 | |
320 | spin_lock_irq(&client->lock); |
321 | event = list_first_entry(&client->event_list, struct event, link); |
322 | list_del(&event->link); |
323 | spin_unlock_irq(&client->lock); |
324 | |
325 | total = 0; |
326 | for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) { |
327 | size = min(event->v[i].size, count - total); |
328 | if (copy_to_user(buffer + total, event->v[i].data, size)) { |
329 | ret = -EFAULT; |
330 | goto out; |
331 | } |
332 | total += size; |
333 | } |
334 | ret = total; |
335 | |
336 | out: |
337 | kfree(event); |
338 | |
339 | return ret; |
340 | } |
341 | |
342 | static ssize_t fw_device_op_read(struct file *file, char __user *buffer, |
343 | size_t count, loff_t *offset) |
344 | { |
345 | struct client *client = file->private_data; |
346 | |
347 | return dequeue_event(client, buffer, count); |
348 | } |
349 | |
350 | static void fill_bus_reset_event(struct fw_cdev_event_bus_reset *event, |
351 | struct client *client) |
352 | { |
353 | struct fw_card *card = client->device->card; |
354 | |
355 | spin_lock_irq(&card->lock); |
356 | |
357 | event->closure = client->bus_reset_closure; |
358 | event->type = FW_CDEV_EVENT_BUS_RESET; |
359 | event->generation = client->device->generation; |
360 | event->node_id = client->device->node_id; |
361 | event->local_node_id = card->local_node->node_id; |
362 | event->bm_node_id = card->bm_node_id; |
363 | event->irm_node_id = card->irm_node->node_id; |
364 | event->root_node_id = card->root_node->node_id; |
365 | |
366 | spin_unlock_irq(&card->lock); |
367 | } |
368 | |
369 | static void for_each_client(struct fw_device *device, |
370 | void (*callback)(struct client *client)) |
371 | { |
372 | struct client *c; |
373 | |
374 | mutex_lock(&device->client_list_mutex); |
375 | list_for_each_entry(c, &device->client_list, link) |
376 | callback(c); |
377 | mutex_unlock(&device->client_list_mutex); |
378 | } |
379 | |
380 | static int schedule_reallocations(int id, void *p, void *data) |
381 | { |
382 | schedule_if_iso_resource(p); |
383 | |
384 | return 0; |
385 | } |
386 | |
387 | static void queue_bus_reset_event(struct client *client) |
388 | { |
389 | struct bus_reset_event *e; |
390 | |
391 | e = kzalloc(sizeof(*e), GFP_KERNEL); |
392 | if (e == NULL) { |
393 | fw_notice(client->device->card, "out of memory when allocating event\n"); |
394 | return; |
395 | } |
396 | |
397 | fill_bus_reset_event(&e->reset, client); |
398 | |
399 | queue_event(client, &e->event, |
400 | &e->reset, sizeof(e->reset), NULL, 0); |
401 | |
402 | spin_lock_irq(&client->lock); |
403 | idr_for_each(&client->resource_idr, schedule_reallocations, client); |
404 | spin_unlock_irq(&client->lock); |
405 | } |
406 | |
407 | void fw_device_cdev_update(struct fw_device *device) |
408 | { |
409 | for_each_client(device, queue_bus_reset_event); |
410 | } |
411 | |
412 | static void wake_up_client(struct client *client) |
413 | { |
414 | wake_up_interruptible(&client->wait); |
415 | } |
416 | |
417 | void fw_device_cdev_remove(struct fw_device *device) |
418 | { |
419 | for_each_client(device, wake_up_client); |
420 | } |
421 | |
422 | union ioctl_arg { |
423 | struct fw_cdev_get_info get_info; |
424 | struct fw_cdev_send_request send_request; |
425 | struct fw_cdev_allocate allocate; |
426 | struct fw_cdev_deallocate deallocate; |
427 | struct fw_cdev_send_response send_response; |
428 | struct fw_cdev_initiate_bus_reset initiate_bus_reset; |
429 | struct fw_cdev_add_descriptor add_descriptor; |
430 | struct fw_cdev_remove_descriptor remove_descriptor; |
431 | struct fw_cdev_create_iso_context create_iso_context; |
432 | struct fw_cdev_queue_iso queue_iso; |
433 | struct fw_cdev_start_iso start_iso; |
434 | struct fw_cdev_stop_iso stop_iso; |
435 | struct fw_cdev_get_cycle_timer get_cycle_timer; |
436 | struct fw_cdev_allocate_iso_resource allocate_iso_resource; |
437 | struct fw_cdev_send_stream_packet send_stream_packet; |
438 | struct fw_cdev_get_cycle_timer2 get_cycle_timer2; |
439 | struct fw_cdev_send_phy_packet send_phy_packet; |
440 | struct fw_cdev_receive_phy_packets receive_phy_packets; |
441 | struct fw_cdev_set_iso_channels set_iso_channels; |
442 | struct fw_cdev_flush_iso flush_iso; |
443 | }; |
444 | |
445 | static int ioctl_get_info(struct client *client, union ioctl_arg *arg) |
446 | { |
447 | struct fw_cdev_get_info *a = &arg->get_info; |
448 | struct fw_cdev_event_bus_reset bus_reset; |
449 | unsigned long ret = 0; |
450 | |
451 | client->version = a->version; |
452 | a->version = FW_CDEV_KERNEL_VERSION; |
453 | a->card = client->device->card->index; |
454 | |
455 | down_read(&fw_device_rwsem); |
456 | |
457 | if (a->rom != 0) { |
458 | size_t want = a->rom_length; |
459 | size_t have = client->device->config_rom_length * 4; |
460 | |
461 | ret = copy_to_user(u64_to_uptr(a->rom), |
462 | client->device->config_rom, min(want, have)); |
463 | } |
464 | a->rom_length = client->device->config_rom_length * 4; |
465 | |
466 | up_read(&fw_device_rwsem); |
467 | |
468 | if (ret != 0) |
469 | return -EFAULT; |
470 | |
471 | mutex_lock(&client->device->client_list_mutex); |
472 | |
473 | client->bus_reset_closure = a->bus_reset_closure; |
474 | if (a->bus_reset != 0) { |
475 | fill_bus_reset_event(&bus_reset, client); |
476 | ret = copy_to_user(u64_to_uptr(a->bus_reset), |
477 | &bus_reset, sizeof(bus_reset)); |
478 | } |
479 | if (ret == 0 && list_empty(&client->link)) |
480 | list_add_tail(&client->link, &client->device->client_list); |
481 | |
482 | mutex_unlock(&client->device->client_list_mutex); |
483 | |
484 | return ret ? -EFAULT : 0; |
485 | } |
486 | |
487 | static int add_client_resource(struct client *client, |
488 | struct client_resource *resource, gfp_t gfp_mask) |
489 | { |
490 | unsigned long flags; |
491 | int ret; |
492 | |
493 | retry: |
494 | if (idr_pre_get(&client->resource_idr, gfp_mask) == 0) |
495 | return -ENOMEM; |
496 | |
497 | spin_lock_irqsave(&client->lock, flags); |
498 | if (client->in_shutdown) |
499 | ret = -ECANCELED; |
500 | else |
501 | ret = idr_get_new(&client->resource_idr, resource, |
502 | &resource->handle); |
503 | if (ret >= 0) { |
504 | client_get(client); |
505 | schedule_if_iso_resource(resource); |
506 | } |
507 | spin_unlock_irqrestore(&client->lock, flags); |
508 | |
509 | if (ret == -EAGAIN) |
510 | goto retry; |
511 | |
512 | return ret < 0 ? ret : 0; |
513 | } |
514 | |
515 | static int release_client_resource(struct client *client, u32 handle, |
516 | client_resource_release_fn_t release, |
517 | struct client_resource **return_resource) |
518 | { |
519 | struct client_resource *resource; |
520 | |
521 | spin_lock_irq(&client->lock); |
522 | if (client->in_shutdown) |
523 | resource = NULL; |
524 | else |
525 | resource = idr_find(&client->resource_idr, handle); |
526 | if (resource && resource->release == release) |
527 | idr_remove(&client->resource_idr, handle); |
528 | spin_unlock_irq(&client->lock); |
529 | |
530 | if (!(resource && resource->release == release)) |
531 | return -EINVAL; |
532 | |
533 | if (return_resource) |
534 | *return_resource = resource; |
535 | else |
536 | resource->release(client, resource); |
537 | |
538 | client_put(client); |
539 | |
540 | return 0; |
541 | } |
542 | |
543 | static void release_transaction(struct client *client, |
544 | struct client_resource *resource) |
545 | { |
546 | } |
547 | |
548 | static void complete_transaction(struct fw_card *card, int rcode, |
549 | void *payload, size_t length, void *data) |
550 | { |
551 | struct outbound_transaction_event *e = data; |
552 | struct fw_cdev_event_response *rsp = &e->response; |
553 | struct client *client = e->client; |
554 | unsigned long flags; |
555 | |
556 | if (length < rsp->length) |
557 | rsp->length = length; |
558 | if (rcode == RCODE_COMPLETE) |
559 | memcpy(rsp->data, payload, rsp->length); |
560 | |
561 | spin_lock_irqsave(&client->lock, flags); |
562 | idr_remove(&client->resource_idr, e->r.resource.handle); |
563 | if (client->in_shutdown) |
564 | wake_up(&client->tx_flush_wait); |
565 | spin_unlock_irqrestore(&client->lock, flags); |
566 | |
567 | rsp->type = FW_CDEV_EVENT_RESPONSE; |
568 | rsp->rcode = rcode; |
569 | |
570 | /* |
571 | * In the case that sizeof(*rsp) doesn't align with the position of the |
572 | * data, and the read is short, preserve an extra copy of the data |
573 | * to stay compatible with a pre-2.6.27 bug. Since the bug is harmless |
574 | * for short reads and some apps depended on it, this is both safe |
575 | * and prudent for compatibility. |
576 | */ |
577 | if (rsp->length <= sizeof(*rsp) - offsetof(typeof(*rsp), data)) |
578 | queue_event(client, &e->event, rsp, sizeof(*rsp), |
579 | rsp->data, rsp->length); |
580 | else |
581 | queue_event(client, &e->event, rsp, sizeof(*rsp) + rsp->length, |
582 | NULL, 0); |
583 | |
584 | /* Drop the idr's reference */ |
585 | client_put(client); |
586 | } |
587 | |
588 | static int init_request(struct client *client, |
589 | struct fw_cdev_send_request *request, |
590 | int destination_id, int speed) |
591 | { |
592 | struct outbound_transaction_event *e; |
593 | int ret; |
594 | |
595 | if (request->tcode != TCODE_STREAM_DATA && |
596 | (request->length > 4096 || request->length > 512 << speed)) |
597 | return -EIO; |
598 | |
599 | if (request->tcode == TCODE_WRITE_QUADLET_REQUEST && |
600 | request->length < 4) |
601 | return -EINVAL; |
602 | |
603 | e = kmalloc(sizeof(*e) + request->length, GFP_KERNEL); |
604 | if (e == NULL) |
605 | return -ENOMEM; |
606 | |
607 | e->client = client; |
608 | e->response.length = request->length; |
609 | e->response.closure = request->closure; |
610 | |
611 | if (request->data && |
612 | copy_from_user(e->response.data, |
613 | u64_to_uptr(request->data), request->length)) { |
614 | ret = -EFAULT; |
615 | goto failed; |
616 | } |
617 | |
618 | e->r.resource.release = release_transaction; |
619 | ret = add_client_resource(client, &e->r.resource, GFP_KERNEL); |
620 | if (ret < 0) |
621 | goto failed; |
622 | |
623 | fw_send_request(client->device->card, &e->r.transaction, |
624 | request->tcode, destination_id, request->generation, |
625 | speed, request->offset, e->response.data, |
626 | request->length, complete_transaction, e); |
627 | return 0; |
628 | |
629 | failed: |
630 | kfree(e); |
631 | |
632 | return ret; |
633 | } |
634 | |
635 | static int ioctl_send_request(struct client *client, union ioctl_arg *arg) |
636 | { |
637 | switch (arg->send_request.tcode) { |
638 | case TCODE_WRITE_QUADLET_REQUEST: |
639 | case TCODE_WRITE_BLOCK_REQUEST: |
640 | case TCODE_READ_QUADLET_REQUEST: |
641 | case TCODE_READ_BLOCK_REQUEST: |
642 | case TCODE_LOCK_MASK_SWAP: |
643 | case TCODE_LOCK_COMPARE_SWAP: |
644 | case TCODE_LOCK_FETCH_ADD: |
645 | case TCODE_LOCK_LITTLE_ADD: |
646 | case TCODE_LOCK_BOUNDED_ADD: |
647 | case TCODE_LOCK_WRAP_ADD: |
648 | case TCODE_LOCK_VENDOR_DEPENDENT: |
649 | break; |
650 | default: |
651 | return -EINVAL; |
652 | } |
653 | |
654 | return init_request(client, &arg->send_request, client->device->node_id, |
655 | client->device->max_speed); |
656 | } |
657 | |
658 | static inline bool is_fcp_request(struct fw_request *request) |
659 | { |
660 | return request == NULL; |
661 | } |
662 | |
663 | static void release_request(struct client *client, |
664 | struct client_resource *resource) |
665 | { |
666 | struct inbound_transaction_resource *r = container_of(resource, |
667 | struct inbound_transaction_resource, resource); |
668 | |
669 | if (is_fcp_request(r->request)) |
670 | kfree(r->data); |
671 | else |
672 | fw_send_response(r->card, r->request, RCODE_CONFLICT_ERROR); |
673 | |
674 | fw_card_put(r->card); |
675 | kfree(r); |
676 | } |
677 | |
678 | static void handle_request(struct fw_card *card, struct fw_request *request, |
679 | int tcode, int destination, int source, |
680 | int generation, unsigned long long offset, |
681 | void *payload, size_t length, void *callback_data) |
682 | { |
683 | struct address_handler_resource *handler = callback_data; |
684 | struct inbound_transaction_resource *r; |
685 | struct inbound_transaction_event *e; |
686 | size_t event_size0; |
687 | void *fcp_frame = NULL; |
688 | int ret; |
689 | |
690 | /* card may be different from handler->client->device->card */ |
691 | fw_card_get(card); |
692 | |
693 | r = kmalloc(sizeof(*r), GFP_ATOMIC); |
694 | e = kmalloc(sizeof(*e), GFP_ATOMIC); |
695 | if (r == NULL || e == NULL) { |
696 | fw_notice(card, "out of memory when allocating event\n"); |
697 | goto failed; |
698 | } |
699 | r->card = card; |
700 | r->request = request; |
701 | r->data = payload; |
702 | r->length = length; |
703 | |
704 | if (is_fcp_request(request)) { |
705 | /* |
706 | * FIXME: Let core-transaction.c manage a |
707 | * single reference-counted copy? |
708 | */ |
709 | fcp_frame = kmemdup(payload, length, GFP_ATOMIC); |
710 | if (fcp_frame == NULL) |
711 | goto failed; |
712 | |
713 | r->data = fcp_frame; |
714 | } |
715 | |
716 | r->resource.release = release_request; |
717 | ret = add_client_resource(handler->client, &r->resource, GFP_ATOMIC); |
718 | if (ret < 0) |
719 | goto failed; |
720 | |
721 | if (handler->client->version < FW_CDEV_VERSION_EVENT_REQUEST2) { |
722 | struct fw_cdev_event_request *req = &e->req.request; |
723 | |
724 | if (tcode & 0x10) |
725 | tcode = TCODE_LOCK_REQUEST; |
726 | |
727 | req->type = FW_CDEV_EVENT_REQUEST; |
728 | req->tcode = tcode; |
729 | req->offset = offset; |
730 | req->length = length; |
731 | req->handle = r->resource.handle; |
732 | req->closure = handler->closure; |
733 | event_size0 = sizeof(*req); |
734 | } else { |
735 | struct fw_cdev_event_request2 *req = &e->req.request2; |
736 | |
737 | req->type = FW_CDEV_EVENT_REQUEST2; |
738 | req->tcode = tcode; |
739 | req->offset = offset; |
740 | req->source_node_id = source; |
741 | req->destination_node_id = destination; |
742 | req->card = card->index; |
743 | req->generation = generation; |
744 | req->length = length; |
745 | req->handle = r->resource.handle; |
746 | req->closure = handler->closure; |
747 | event_size0 = sizeof(*req); |
748 | } |
749 | |
750 | queue_event(handler->client, &e->event, |
751 | &e->req, event_size0, r->data, length); |
752 | return; |
753 | |
754 | failed: |
755 | kfree(r); |
756 | kfree(e); |
757 | kfree(fcp_frame); |
758 | |
759 | if (!is_fcp_request(request)) |
760 | fw_send_response(card, request, RCODE_CONFLICT_ERROR); |
761 | |
762 | fw_card_put(card); |
763 | } |
764 | |
765 | static void release_address_handler(struct client *client, |
766 | struct client_resource *resource) |
767 | { |
768 | struct address_handler_resource *r = |
769 | container_of(resource, struct address_handler_resource, resource); |
770 | |
771 | fw_core_remove_address_handler(&r->handler); |
772 | kfree(r); |
773 | } |
774 | |
775 | static int ioctl_allocate(struct client *client, union ioctl_arg *arg) |
776 | { |
777 | struct fw_cdev_allocate *a = &arg->allocate; |
778 | struct address_handler_resource *r; |
779 | struct fw_address_region region; |
780 | int ret; |
781 | |
782 | r = kmalloc(sizeof(*r), GFP_KERNEL); |
783 | if (r == NULL) |
784 | return -ENOMEM; |
785 | |
786 | region.start = a->offset; |
787 | if (client->version < FW_CDEV_VERSION_ALLOCATE_REGION_END) |
788 | region.end = a->offset + a->length; |
789 | else |
790 | region.end = a->region_end; |
791 | |
792 | r->handler.length = a->length; |
793 | r->handler.address_callback = handle_request; |
794 | r->handler.callback_data = r; |
795 | r->closure = a->closure; |
796 | r->client = client; |
797 | |
798 | ret = fw_core_add_address_handler(&r->handler, ®ion); |
799 | if (ret < 0) { |
800 | kfree(r); |
801 | return ret; |
802 | } |
803 | a->offset = r->handler.offset; |
804 | |
805 | r->resource.release = release_address_handler; |
806 | ret = add_client_resource(client, &r->resource, GFP_KERNEL); |
807 | if (ret < 0) { |
808 | release_address_handler(client, &r->resource); |
809 | return ret; |
810 | } |
811 | a->handle = r->resource.handle; |
812 | |
813 | return 0; |
814 | } |
815 | |
816 | static int ioctl_deallocate(struct client *client, union ioctl_arg *arg) |
817 | { |
818 | return release_client_resource(client, arg->deallocate.handle, |
819 | release_address_handler, NULL); |
820 | } |
821 | |
822 | static int ioctl_send_response(struct client *client, union ioctl_arg *arg) |
823 | { |
824 | struct fw_cdev_send_response *a = &arg->send_response; |
825 | struct client_resource *resource; |
826 | struct inbound_transaction_resource *r; |
827 | int ret = 0; |
828 | |
829 | if (release_client_resource(client, a->handle, |
830 | release_request, &resource) < 0) |
831 | return -EINVAL; |
832 | |
833 | r = container_of(resource, struct inbound_transaction_resource, |
834 | resource); |
835 | if (is_fcp_request(r->request)) |
836 | goto out; |
837 | |
838 | if (a->length != fw_get_response_length(r->request)) { |
839 | ret = -EINVAL; |
840 | kfree(r->request); |
841 | goto out; |
842 | } |
843 | if (copy_from_user(r->data, u64_to_uptr(a->data), a->length)) { |
844 | ret = -EFAULT; |
845 | kfree(r->request); |
846 | goto out; |
847 | } |
848 | fw_send_response(r->card, r->request, a->rcode); |
849 | out: |
850 | fw_card_put(r->card); |
851 | kfree(r); |
852 | |
853 | return ret; |
854 | } |
855 | |
856 | static int ioctl_initiate_bus_reset(struct client *client, union ioctl_arg *arg) |
857 | { |
858 | fw_schedule_bus_reset(client->device->card, true, |
859 | arg->initiate_bus_reset.type == FW_CDEV_SHORT_RESET); |
860 | return 0; |
861 | } |
862 | |
863 | static void release_descriptor(struct client *client, |
864 | struct client_resource *resource) |
865 | { |
866 | struct descriptor_resource *r = |
867 | container_of(resource, struct descriptor_resource, resource); |
868 | |
869 | fw_core_remove_descriptor(&r->descriptor); |
870 | kfree(r); |
871 | } |
872 | |
873 | static int ioctl_add_descriptor(struct client *client, union ioctl_arg *arg) |
874 | { |
875 | struct fw_cdev_add_descriptor *a = &arg->add_descriptor; |
876 | struct descriptor_resource *r; |
877 | int ret; |
878 | |
879 | /* Access policy: Allow this ioctl only on local nodes' device files. */ |
880 | if (!client->device->is_local) |
881 | return -ENOSYS; |
882 | |
883 | if (a->length > 256) |
884 | return -EINVAL; |
885 | |
886 | r = kmalloc(sizeof(*r) + a->length * 4, GFP_KERNEL); |
887 | if (r == NULL) |
888 | return -ENOMEM; |
889 | |
890 | if (copy_from_user(r->data, u64_to_uptr(a->data), a->length * 4)) { |
891 | ret = -EFAULT; |
892 | goto failed; |
893 | } |
894 | |
895 | r->descriptor.length = a->length; |
896 | r->descriptor.immediate = a->immediate; |
897 | r->descriptor.key = a->key; |
898 | r->descriptor.data = r->data; |
899 | |
900 | ret = fw_core_add_descriptor(&r->descriptor); |
901 | if (ret < 0) |
902 | goto failed; |
903 | |
904 | r->resource.release = release_descriptor; |
905 | ret = add_client_resource(client, &r->resource, GFP_KERNEL); |
906 | if (ret < 0) { |
907 | fw_core_remove_descriptor(&r->descriptor); |
908 | goto failed; |
909 | } |
910 | a->handle = r->resource.handle; |
911 | |
912 | return 0; |
913 | failed: |
914 | kfree(r); |
915 | |
916 | return ret; |
917 | } |
918 | |
919 | static int ioctl_remove_descriptor(struct client *client, union ioctl_arg *arg) |
920 | { |
921 | return release_client_resource(client, arg->remove_descriptor.handle, |
922 | release_descriptor, NULL); |
923 | } |
924 | |
925 | static void iso_callback(struct fw_iso_context *context, u32 cycle, |
926 | size_t header_length, void *header, void *data) |
927 | { |
928 | struct client *client = data; |
929 | struct iso_interrupt_event *e; |
930 | |
931 | e = kmalloc(sizeof(*e) + header_length, GFP_ATOMIC); |
932 | if (e == NULL) { |
933 | fw_notice(context->card, "out of memory when allocating event\n"); |
934 | return; |
935 | } |
936 | e->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT; |
937 | e->interrupt.closure = client->iso_closure; |
938 | e->interrupt.cycle = cycle; |
939 | e->interrupt.header_length = header_length; |
940 | memcpy(e->interrupt.header, header, header_length); |
941 | queue_event(client, &e->event, &e->interrupt, |
942 | sizeof(e->interrupt) + header_length, NULL, 0); |
943 | } |
944 | |
945 | static void iso_mc_callback(struct fw_iso_context *context, |
946 | dma_addr_t completed, void *data) |
947 | { |
948 | struct client *client = data; |
949 | struct iso_interrupt_mc_event *e; |
950 | |
951 | e = kmalloc(sizeof(*e), GFP_ATOMIC); |
952 | if (e == NULL) { |
953 | fw_notice(context->card, "out of memory when allocating event\n"); |
954 | return; |
955 | } |
956 | e->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL; |
957 | e->interrupt.closure = client->iso_closure; |
958 | e->interrupt.completed = fw_iso_buffer_lookup(&client->buffer, |
959 | completed); |
960 | queue_event(client, &e->event, &e->interrupt, |
961 | sizeof(e->interrupt), NULL, 0); |
962 | } |
963 | |
964 | static enum dma_data_direction iso_dma_direction(struct fw_iso_context *context) |
965 | { |
966 | if (context->type == FW_ISO_CONTEXT_TRANSMIT) |
967 | return DMA_TO_DEVICE; |
968 | else |
969 | return DMA_FROM_DEVICE; |
970 | } |
971 | |
972 | static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg) |
973 | { |
974 | struct fw_cdev_create_iso_context *a = &arg->create_iso_context; |
975 | struct fw_iso_context *context; |
976 | fw_iso_callback_t cb; |
977 | int ret; |
978 | |
979 | BUILD_BUG_ON(FW_CDEV_ISO_CONTEXT_TRANSMIT != FW_ISO_CONTEXT_TRANSMIT || |
980 | FW_CDEV_ISO_CONTEXT_RECEIVE != FW_ISO_CONTEXT_RECEIVE || |
981 | FW_CDEV_ISO_CONTEXT_RECEIVE_MULTICHANNEL != |
982 | FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL); |
983 | |
984 | switch (a->type) { |
985 | case FW_ISO_CONTEXT_TRANSMIT: |
986 | if (a->speed > SCODE_3200 || a->channel > 63) |
987 | return -EINVAL; |
988 | |
989 | cb = iso_callback; |
990 | break; |
991 | |
992 | case FW_ISO_CONTEXT_RECEIVE: |
993 | if (a->header_size < 4 || (a->header_size & 3) || |
994 | a->channel > 63) |
995 | return -EINVAL; |
996 | |
997 | cb = iso_callback; |
998 | break; |
999 | |
1000 | case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: |
1001 | cb = (fw_iso_callback_t)iso_mc_callback; |
1002 | break; |
1003 | |
1004 | default: |
1005 | return -EINVAL; |
1006 | } |
1007 | |
1008 | context = fw_iso_context_create(client->device->card, a->type, |
1009 | a->channel, a->speed, a->header_size, cb, client); |
1010 | if (IS_ERR(context)) |
1011 | return PTR_ERR(context); |
1012 | |
1013 | /* We only support one context at this time. */ |
1014 | spin_lock_irq(&client->lock); |
1015 | if (client->iso_context != NULL) { |
1016 | spin_unlock_irq(&client->lock); |
1017 | fw_iso_context_destroy(context); |
1018 | |
1019 | return -EBUSY; |
1020 | } |
1021 | if (!client->buffer_is_mapped) { |
1022 | ret = fw_iso_buffer_map_dma(&client->buffer, |
1023 | client->device->card, |
1024 | iso_dma_direction(context)); |
1025 | if (ret < 0) { |
1026 | spin_unlock_irq(&client->lock); |
1027 | fw_iso_context_destroy(context); |
1028 | |
1029 | return ret; |
1030 | } |
1031 | client->buffer_is_mapped = true; |
1032 | } |
1033 | client->iso_closure = a->closure; |
1034 | client->iso_context = context; |
1035 | spin_unlock_irq(&client->lock); |
1036 | |
1037 | a->handle = 0; |
1038 | |
1039 | return 0; |
1040 | } |
1041 | |
1042 | static int ioctl_set_iso_channels(struct client *client, union ioctl_arg *arg) |
1043 | { |
1044 | struct fw_cdev_set_iso_channels *a = &arg->set_iso_channels; |
1045 | struct fw_iso_context *ctx = client->iso_context; |
1046 | |
1047 | if (ctx == NULL || a->handle != 0) |
1048 | return -EINVAL; |
1049 | |
1050 | return fw_iso_context_set_channels(ctx, &a->channels); |
1051 | } |
1052 | |
1053 | /* Macros for decoding the iso packet control header. */ |
1054 | #define GET_PAYLOAD_LENGTH(v) ((v) & 0xffff) |
1055 | #define GET_INTERRUPT(v) (((v) >> 16) & 0x01) |
1056 | #define GET_SKIP(v) (((v) >> 17) & 0x01) |
1057 | #define GET_TAG(v) (((v) >> 18) & 0x03) |
1058 | #define GET_SY(v) (((v) >> 20) & 0x0f) |
1059 | #define GET_HEADER_LENGTH(v) (((v) >> 24) & 0xff) |
1060 | |
1061 | static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg) |
1062 | { |
1063 | struct fw_cdev_queue_iso *a = &arg->queue_iso; |
1064 | struct fw_cdev_iso_packet __user *p, *end, *next; |
1065 | struct fw_iso_context *ctx = client->iso_context; |
1066 | unsigned long payload, buffer_end, transmit_header_bytes = 0; |
1067 | u32 control; |
1068 | int count; |
1069 | struct { |
1070 | struct fw_iso_packet packet; |
1071 | u8 header[256]; |
1072 | } u; |
1073 | |
1074 | if (ctx == NULL || a->handle != 0) |
1075 | return -EINVAL; |
1076 | |
1077 | /* |
1078 | * If the user passes a non-NULL data pointer, has mmap()'ed |
1079 | * the iso buffer, and the pointer points inside the buffer, |
1080 | * we setup the payload pointers accordingly. Otherwise we |
1081 | * set them both to 0, which will still let packets with |
1082 | * payload_length == 0 through. In other words, if no packets |
1083 | * use the indirect payload, the iso buffer need not be mapped |
1084 | * and the a->data pointer is ignored. |
1085 | */ |
1086 | payload = (unsigned long)a->data - client->vm_start; |
1087 | buffer_end = client->buffer.page_count << PAGE_SHIFT; |
1088 | if (a->data == 0 || client->buffer.pages == NULL || |
1089 | payload >= buffer_end) { |
1090 | payload = 0; |
1091 | buffer_end = 0; |
1092 | } |
1093 | |
1094 | if (ctx->type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL && payload & 3) |
1095 | return -EINVAL; |
1096 | |
1097 | p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(a->packets); |
1098 | if (!access_ok(VERIFY_READ, p, a->size)) |
1099 | return -EFAULT; |
1100 | |
1101 | end = (void __user *)p + a->size; |
1102 | count = 0; |
1103 | while (p < end) { |
1104 | if (get_user(control, &p->control)) |
1105 | return -EFAULT; |
1106 | u.packet.payload_length = GET_PAYLOAD_LENGTH(control); |
1107 | u.packet.interrupt = GET_INTERRUPT(control); |
1108 | u.packet.skip = GET_SKIP(control); |
1109 | u.packet.tag = GET_TAG(control); |
1110 | u.packet.sy = GET_SY(control); |
1111 | u.packet.header_length = GET_HEADER_LENGTH(control); |
1112 | |
1113 | switch (ctx->type) { |
1114 | case FW_ISO_CONTEXT_TRANSMIT: |
1115 | if (u.packet.header_length & 3) |
1116 | return -EINVAL; |
1117 | transmit_header_bytes = u.packet.header_length; |
1118 | break; |
1119 | |
1120 | case FW_ISO_CONTEXT_RECEIVE: |
1121 | if (u.packet.header_length == 0 || |
1122 | u.packet.header_length % ctx->header_size != 0) |
1123 | return -EINVAL; |
1124 | break; |
1125 | |
1126 | case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: |
1127 | if (u.packet.payload_length == 0 || |
1128 | u.packet.payload_length & 3) |
1129 | return -EINVAL; |
1130 | break; |
1131 | } |
1132 | |
1133 | next = (struct fw_cdev_iso_packet __user *) |
1134 | &p->header[transmit_header_bytes / 4]; |
1135 | if (next > end) |
1136 | return -EINVAL; |
1137 | if (__copy_from_user |
1138 | (u.packet.header, p->header, transmit_header_bytes)) |
1139 | return -EFAULT; |
1140 | if (u.packet.skip && ctx->type == FW_ISO_CONTEXT_TRANSMIT && |
1141 | u.packet.header_length + u.packet.payload_length > 0) |
1142 | return -EINVAL; |
1143 | if (payload + u.packet.payload_length > buffer_end) |
1144 | return -EINVAL; |
1145 | |
1146 | if (fw_iso_context_queue(ctx, &u.packet, |
1147 | &client->buffer, payload)) |
1148 | break; |
1149 | |
1150 | p = next; |
1151 | payload += u.packet.payload_length; |
1152 | count++; |
1153 | } |
1154 | fw_iso_context_queue_flush(ctx); |
1155 | |
1156 | a->size -= uptr_to_u64(p) - a->packets; |
1157 | a->packets = uptr_to_u64(p); |
1158 | a->data = client->vm_start + payload; |
1159 | |
1160 | return count; |
1161 | } |
1162 | |
1163 | static int ioctl_start_iso(struct client *client, union ioctl_arg *arg) |
1164 | { |
1165 | struct fw_cdev_start_iso *a = &arg->start_iso; |
1166 | |
1167 | BUILD_BUG_ON( |
1168 | FW_CDEV_ISO_CONTEXT_MATCH_TAG0 != FW_ISO_CONTEXT_MATCH_TAG0 || |
1169 | FW_CDEV_ISO_CONTEXT_MATCH_TAG1 != FW_ISO_CONTEXT_MATCH_TAG1 || |
1170 | FW_CDEV_ISO_CONTEXT_MATCH_TAG2 != FW_ISO_CONTEXT_MATCH_TAG2 || |
1171 | FW_CDEV_ISO_CONTEXT_MATCH_TAG3 != FW_ISO_CONTEXT_MATCH_TAG3 || |
1172 | FW_CDEV_ISO_CONTEXT_MATCH_ALL_TAGS != FW_ISO_CONTEXT_MATCH_ALL_TAGS); |
1173 | |
1174 | if (client->iso_context == NULL || a->handle != 0) |
1175 | return -EINVAL; |
1176 | |
1177 | if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE && |
1178 | (a->tags == 0 || a->tags > 15 || a->sync > 15)) |
1179 | return -EINVAL; |
1180 | |
1181 | return fw_iso_context_start(client->iso_context, |
1182 | a->cycle, a->sync, a->tags); |
1183 | } |
1184 | |
1185 | static int ioctl_stop_iso(struct client *client, union ioctl_arg *arg) |
1186 | { |
1187 | struct fw_cdev_stop_iso *a = &arg->stop_iso; |
1188 | |
1189 | if (client->iso_context == NULL || a->handle != 0) |
1190 | return -EINVAL; |
1191 | |
1192 | return fw_iso_context_stop(client->iso_context); |
1193 | } |
1194 | |
1195 | static int ioctl_flush_iso(struct client *client, union ioctl_arg *arg) |
1196 | { |
1197 | struct fw_cdev_flush_iso *a = &arg->flush_iso; |
1198 | |
1199 | if (client->iso_context == NULL || a->handle != 0) |
1200 | return -EINVAL; |
1201 | |
1202 | return fw_iso_context_flush_completions(client->iso_context); |
1203 | } |
1204 | |
1205 | static int ioctl_get_cycle_timer2(struct client *client, union ioctl_arg *arg) |
1206 | { |
1207 | struct fw_cdev_get_cycle_timer2 *a = &arg->get_cycle_timer2; |
1208 | struct fw_card *card = client->device->card; |
1209 | struct timespec ts = {0, 0}; |
1210 | u32 cycle_time; |
1211 | int ret = 0; |
1212 | |
1213 | local_irq_disable(); |
1214 | |
1215 | cycle_time = card->driver->read_csr(card, CSR_CYCLE_TIME); |
1216 | |
1217 | switch (a->clk_id) { |
1218 | case CLOCK_REALTIME: getnstimeofday(&ts); break; |
1219 | case CLOCK_MONOTONIC: do_posix_clock_monotonic_gettime(&ts); break; |
1220 | case CLOCK_MONOTONIC_RAW: getrawmonotonic(&ts); break; |
1221 | default: |
1222 | ret = -EINVAL; |
1223 | } |
1224 | |
1225 | local_irq_enable(); |
1226 | |
1227 | a->tv_sec = ts.tv_sec; |
1228 | a->tv_nsec = ts.tv_nsec; |
1229 | a->cycle_timer = cycle_time; |
1230 | |
1231 | return ret; |
1232 | } |
1233 | |
1234 | static int ioctl_get_cycle_timer(struct client *client, union ioctl_arg *arg) |
1235 | { |
1236 | struct fw_cdev_get_cycle_timer *a = &arg->get_cycle_timer; |
1237 | struct fw_cdev_get_cycle_timer2 ct2; |
1238 | |
1239 | ct2.clk_id = CLOCK_REALTIME; |
1240 | ioctl_get_cycle_timer2(client, (union ioctl_arg *)&ct2); |
1241 | |
1242 | a->local_time = ct2.tv_sec * USEC_PER_SEC + ct2.tv_nsec / NSEC_PER_USEC; |
1243 | a->cycle_timer = ct2.cycle_timer; |
1244 | |
1245 | return 0; |
1246 | } |
1247 | |
1248 | static void iso_resource_work(struct work_struct *work) |
1249 | { |
1250 | struct iso_resource_event *e; |
1251 | struct iso_resource *r = |
1252 | container_of(work, struct iso_resource, work.work); |
1253 | struct client *client = r->client; |
1254 | int generation, channel, bandwidth, todo; |
1255 | bool skip, free, success; |
1256 | |
1257 | spin_lock_irq(&client->lock); |
1258 | generation = client->device->generation; |
1259 | todo = r->todo; |
1260 | /* Allow 1000ms grace period for other reallocations. */ |
1261 | if (todo == ISO_RES_ALLOC && |
1262 | time_before64(get_jiffies_64(), |
1263 | client->device->card->reset_jiffies + HZ)) { |
1264 | schedule_iso_resource(r, DIV_ROUND_UP(HZ, 3)); |
1265 | skip = true; |
1266 | } else { |
1267 | /* We could be called twice within the same generation. */ |
1268 | skip = todo == ISO_RES_REALLOC && |
1269 | r->generation == generation; |
1270 | } |
1271 | free = todo == ISO_RES_DEALLOC || |
1272 | todo == ISO_RES_ALLOC_ONCE || |
1273 | todo == ISO_RES_DEALLOC_ONCE; |
1274 | r->generation = generation; |
1275 | spin_unlock_irq(&client->lock); |
1276 | |
1277 | if (skip) |
1278 | goto out; |
1279 | |
1280 | bandwidth = r->bandwidth; |
1281 | |
1282 | fw_iso_resource_manage(client->device->card, generation, |
1283 | r->channels, &channel, &bandwidth, |
1284 | todo == ISO_RES_ALLOC || |
1285 | todo == ISO_RES_REALLOC || |
1286 | todo == ISO_RES_ALLOC_ONCE); |
1287 | /* |
1288 | * Is this generation outdated already? As long as this resource sticks |
1289 | * in the idr, it will be scheduled again for a newer generation or at |
1290 | * shutdown. |
1291 | */ |
1292 | if (channel == -EAGAIN && |
1293 | (todo == ISO_RES_ALLOC || todo == ISO_RES_REALLOC)) |
1294 | goto out; |
1295 | |
1296 | success = channel >= 0 || bandwidth > 0; |
1297 | |
1298 | spin_lock_irq(&client->lock); |
1299 | /* |
1300 | * Transit from allocation to reallocation, except if the client |
1301 | * requested deallocation in the meantime. |
1302 | */ |
1303 | if (r->todo == ISO_RES_ALLOC) |
1304 | r->todo = ISO_RES_REALLOC; |
1305 | /* |
1306 | * Allocation or reallocation failure? Pull this resource out of the |
1307 | * idr and prepare for deletion, unless the client is shutting down. |
1308 | */ |
1309 | if (r->todo == ISO_RES_REALLOC && !success && |
1310 | !client->in_shutdown && |
1311 | idr_find(&client->resource_idr, r->resource.handle)) { |
1312 | idr_remove(&client->resource_idr, r->resource.handle); |
1313 | client_put(client); |
1314 | free = true; |
1315 | } |
1316 | spin_unlock_irq(&client->lock); |
1317 | |
1318 | if (todo == ISO_RES_ALLOC && channel >= 0) |
1319 | r->channels = 1ULL << channel; |
1320 | |
1321 | if (todo == ISO_RES_REALLOC && success) |
1322 | goto out; |
1323 | |
1324 | if (todo == ISO_RES_ALLOC || todo == ISO_RES_ALLOC_ONCE) { |
1325 | e = r->e_alloc; |
1326 | r->e_alloc = NULL; |
1327 | } else { |
1328 | e = r->e_dealloc; |
1329 | r->e_dealloc = NULL; |
1330 | } |
1331 | e->iso_resource.handle = r->resource.handle; |
1332 | e->iso_resource.channel = channel; |
1333 | e->iso_resource.bandwidth = bandwidth; |
1334 | |
1335 | queue_event(client, &e->event, |
1336 | &e->iso_resource, sizeof(e->iso_resource), NULL, 0); |
1337 | |
1338 | if (free) { |
1339 | cancel_delayed_work(&r->work); |
1340 | kfree(r->e_alloc); |
1341 | kfree(r->e_dealloc); |
1342 | kfree(r); |
1343 | } |
1344 | out: |
1345 | client_put(client); |
1346 | } |
1347 | |
1348 | static void release_iso_resource(struct client *client, |
1349 | struct client_resource *resource) |
1350 | { |
1351 | struct iso_resource *r = |
1352 | container_of(resource, struct iso_resource, resource); |
1353 | |
1354 | spin_lock_irq(&client->lock); |
1355 | r->todo = ISO_RES_DEALLOC; |
1356 | schedule_iso_resource(r, 0); |
1357 | spin_unlock_irq(&client->lock); |
1358 | } |
1359 | |
1360 | static int init_iso_resource(struct client *client, |
1361 | struct fw_cdev_allocate_iso_resource *request, int todo) |
1362 | { |
1363 | struct iso_resource_event *e1, *e2; |
1364 | struct iso_resource *r; |
1365 | int ret; |
1366 | |
1367 | if ((request->channels == 0 && request->bandwidth == 0) || |
1368 | request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL || |
1369 | request->bandwidth < 0) |
1370 | return -EINVAL; |
1371 | |
1372 | r = kmalloc(sizeof(*r), GFP_KERNEL); |
1373 | e1 = kmalloc(sizeof(*e1), GFP_KERNEL); |
1374 | e2 = kmalloc(sizeof(*e2), GFP_KERNEL); |
1375 | if (r == NULL || e1 == NULL || e2 == NULL) { |
1376 | ret = -ENOMEM; |
1377 | goto fail; |
1378 | } |
1379 | |
1380 | INIT_DELAYED_WORK(&r->work, iso_resource_work); |
1381 | r->client = client; |
1382 | r->todo = todo; |
1383 | r->generation = -1; |
1384 | r->channels = request->channels; |
1385 | r->bandwidth = request->bandwidth; |
1386 | r->e_alloc = e1; |
1387 | r->e_dealloc = e2; |
1388 | |
1389 | e1->iso_resource.closure = request->closure; |
1390 | e1->iso_resource.type = FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED; |
1391 | e2->iso_resource.closure = request->closure; |
1392 | e2->iso_resource.type = FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED; |
1393 | |
1394 | if (todo == ISO_RES_ALLOC) { |
1395 | r->resource.release = release_iso_resource; |
1396 | ret = add_client_resource(client, &r->resource, GFP_KERNEL); |
1397 | if (ret < 0) |
1398 | goto fail; |
1399 | } else { |
1400 | r->resource.release = NULL; |
1401 | r->resource.handle = -1; |
1402 | schedule_iso_resource(r, 0); |
1403 | } |
1404 | request->handle = r->resource.handle; |
1405 | |
1406 | return 0; |
1407 | fail: |
1408 | kfree(r); |
1409 | kfree(e1); |
1410 | kfree(e2); |
1411 | |
1412 | return ret; |
1413 | } |
1414 | |
1415 | static int ioctl_allocate_iso_resource(struct client *client, |
1416 | union ioctl_arg *arg) |
1417 | { |
1418 | return init_iso_resource(client, |
1419 | &arg->allocate_iso_resource, ISO_RES_ALLOC); |
1420 | } |
1421 | |
1422 | static int ioctl_deallocate_iso_resource(struct client *client, |
1423 | union ioctl_arg *arg) |
1424 | { |
1425 | return release_client_resource(client, |
1426 | arg->deallocate.handle, release_iso_resource, NULL); |
1427 | } |
1428 | |
1429 | static int ioctl_allocate_iso_resource_once(struct client *client, |
1430 | union ioctl_arg *arg) |
1431 | { |
1432 | return init_iso_resource(client, |
1433 | &arg->allocate_iso_resource, ISO_RES_ALLOC_ONCE); |
1434 | } |
1435 | |
1436 | static int ioctl_deallocate_iso_resource_once(struct client *client, |
1437 | union ioctl_arg *arg) |
1438 | { |
1439 | return init_iso_resource(client, |
1440 | &arg->allocate_iso_resource, ISO_RES_DEALLOC_ONCE); |
1441 | } |
1442 | |
1443 | /* |
1444 | * Returns a speed code: Maximum speed to or from this device, |
1445 | * limited by the device's link speed, the local node's link speed, |
1446 | * and all PHY port speeds between the two links. |
1447 | */ |
1448 | static int ioctl_get_speed(struct client *client, union ioctl_arg *arg) |
1449 | { |
1450 | return client->device->max_speed; |
1451 | } |
1452 | |
1453 | static int ioctl_send_broadcast_request(struct client *client, |
1454 | union ioctl_arg *arg) |
1455 | { |
1456 | struct fw_cdev_send_request *a = &arg->send_request; |
1457 | |
1458 | switch (a->tcode) { |
1459 | case TCODE_WRITE_QUADLET_REQUEST: |
1460 | case TCODE_WRITE_BLOCK_REQUEST: |
1461 | break; |
1462 | default: |
1463 | return -EINVAL; |
1464 | } |
1465 | |
1466 | /* Security policy: Only allow accesses to Units Space. */ |
1467 | if (a->offset < CSR_REGISTER_BASE + CSR_CONFIG_ROM_END) |
1468 | return -EACCES; |
1469 | |
1470 | return init_request(client, a, LOCAL_BUS | 0x3f, SCODE_100); |
1471 | } |
1472 | |
1473 | static int ioctl_send_stream_packet(struct client *client, union ioctl_arg *arg) |
1474 | { |
1475 | struct fw_cdev_send_stream_packet *a = &arg->send_stream_packet; |
1476 | struct fw_cdev_send_request request; |
1477 | int dest; |
1478 | |
1479 | if (a->speed > client->device->card->link_speed || |
1480 | a->length > 1024 << a->speed) |
1481 | return -EIO; |
1482 | |
1483 | if (a->tag > 3 || a->channel > 63 || a->sy > 15) |
1484 | return -EINVAL; |
1485 | |
1486 | dest = fw_stream_packet_destination_id(a->tag, a->channel, a->sy); |
1487 | request.tcode = TCODE_STREAM_DATA; |
1488 | request.length = a->length; |
1489 | request.closure = a->closure; |
1490 | request.data = a->data; |
1491 | request.generation = a->generation; |
1492 | |
1493 | return init_request(client, &request, dest, a->speed); |
1494 | } |
1495 | |
1496 | static void outbound_phy_packet_callback(struct fw_packet *packet, |
1497 | struct fw_card *card, int status) |
1498 | { |
1499 | struct outbound_phy_packet_event *e = |
1500 | container_of(packet, struct outbound_phy_packet_event, p); |
1501 | |
1502 | switch (status) { |
1503 | /* expected: */ |
1504 | case ACK_COMPLETE: e->phy_packet.rcode = RCODE_COMPLETE; break; |
1505 | /* should never happen with PHY packets: */ |
1506 | case ACK_PENDING: e->phy_packet.rcode = RCODE_COMPLETE; break; |
1507 | case ACK_BUSY_X: |
1508 | case ACK_BUSY_A: |
1509 | case ACK_BUSY_B: e->phy_packet.rcode = RCODE_BUSY; break; |
1510 | case ACK_DATA_ERROR: e->phy_packet.rcode = RCODE_DATA_ERROR; break; |
1511 | case ACK_TYPE_ERROR: e->phy_packet.rcode = RCODE_TYPE_ERROR; break; |
1512 | /* stale generation; cancelled; on certain controllers: no ack */ |
1513 | default: e->phy_packet.rcode = status; break; |
1514 | } |
1515 | e->phy_packet.data[0] = packet->timestamp; |
1516 | |
1517 | queue_event(e->client, &e->event, &e->phy_packet, |
1518 | sizeof(e->phy_packet) + e->phy_packet.length, NULL, 0); |
1519 | client_put(e->client); |
1520 | } |
1521 | |
1522 | static int ioctl_send_phy_packet(struct client *client, union ioctl_arg *arg) |
1523 | { |
1524 | struct fw_cdev_send_phy_packet *a = &arg->send_phy_packet; |
1525 | struct fw_card *card = client->device->card; |
1526 | struct outbound_phy_packet_event *e; |
1527 | |
1528 | /* Access policy: Allow this ioctl only on local nodes' device files. */ |
1529 | if (!client->device->is_local) |
1530 | return -ENOSYS; |
1531 | |
1532 | e = kzalloc(sizeof(*e) + 4, GFP_KERNEL); |
1533 | if (e == NULL) |
1534 | return -ENOMEM; |
1535 | |
1536 | client_get(client); |
1537 | e->client = client; |
1538 | e->p.speed = SCODE_100; |
1539 | e->p.generation = a->generation; |
1540 | e->p.header[0] = TCODE_LINK_INTERNAL << 4; |
1541 | e->p.header[1] = a->data[0]; |
1542 | e->p.header[2] = a->data[1]; |
1543 | e->p.header_length = 12; |
1544 | e->p.callback = outbound_phy_packet_callback; |
1545 | e->phy_packet.closure = a->closure; |
1546 | e->phy_packet.type = FW_CDEV_EVENT_PHY_PACKET_SENT; |
1547 | if (is_ping_packet(a->data)) |
1548 | e->phy_packet.length = 4; |
1549 | |
1550 | card->driver->send_request(card, &e->p); |
1551 | |
1552 | return 0; |
1553 | } |
1554 | |
1555 | static int ioctl_receive_phy_packets(struct client *client, union ioctl_arg *arg) |
1556 | { |
1557 | struct fw_cdev_receive_phy_packets *a = &arg->receive_phy_packets; |
1558 | struct fw_card *card = client->device->card; |
1559 | |
1560 | /* Access policy: Allow this ioctl only on local nodes' device files. */ |
1561 | if (!client->device->is_local) |
1562 | return -ENOSYS; |
1563 | |
1564 | spin_lock_irq(&card->lock); |
1565 | |
1566 | list_move_tail(&client->phy_receiver_link, &card->phy_receiver_list); |
1567 | client->phy_receiver_closure = a->closure; |
1568 | |
1569 | spin_unlock_irq(&card->lock); |
1570 | |
1571 | return 0; |
1572 | } |
1573 | |
1574 | void fw_cdev_handle_phy_packet(struct fw_card *card, struct fw_packet *p) |
1575 | { |
1576 | struct client *client; |
1577 | struct inbound_phy_packet_event *e; |
1578 | unsigned long flags; |
1579 | |
1580 | spin_lock_irqsave(&card->lock, flags); |
1581 | |
1582 | list_for_each_entry(client, &card->phy_receiver_list, phy_receiver_link) { |
1583 | e = kmalloc(sizeof(*e) + 8, GFP_ATOMIC); |
1584 | if (e == NULL) { |
1585 | fw_notice(card, "out of memory when allocating event\n"); |
1586 | break; |
1587 | } |
1588 | e->phy_packet.closure = client->phy_receiver_closure; |
1589 | e->phy_packet.type = FW_CDEV_EVENT_PHY_PACKET_RECEIVED; |
1590 | e->phy_packet.rcode = RCODE_COMPLETE; |
1591 | e->phy_packet.length = 8; |
1592 | e->phy_packet.data[0] = p->header[1]; |
1593 | e->phy_packet.data[1] = p->header[2]; |
1594 | queue_event(client, &e->event, |
1595 | &e->phy_packet, sizeof(e->phy_packet) + 8, NULL, 0); |
1596 | } |
1597 | |
1598 | spin_unlock_irqrestore(&card->lock, flags); |
1599 | } |
1600 | |
1601 | static int (* const ioctl_handlers[])(struct client *, union ioctl_arg *) = { |
1602 | [0x00] = ioctl_get_info, |
1603 | [0x01] = ioctl_send_request, |
1604 | [0x02] = ioctl_allocate, |
1605 | [0x03] = ioctl_deallocate, |
1606 | [0x04] = ioctl_send_response, |
1607 | [0x05] = ioctl_initiate_bus_reset, |
1608 | [0x06] = ioctl_add_descriptor, |
1609 | [0x07] = ioctl_remove_descriptor, |
1610 | [0x08] = ioctl_create_iso_context, |
1611 | [0x09] = ioctl_queue_iso, |
1612 | [0x0a] = ioctl_start_iso, |
1613 | [0x0b] = ioctl_stop_iso, |
1614 | [0x0c] = ioctl_get_cycle_timer, |
1615 | [0x0d] = ioctl_allocate_iso_resource, |
1616 | [0x0e] = ioctl_deallocate_iso_resource, |
1617 | [0x0f] = ioctl_allocate_iso_resource_once, |
1618 | [0x10] = ioctl_deallocate_iso_resource_once, |
1619 | [0x11] = ioctl_get_speed, |
1620 | [0x12] = ioctl_send_broadcast_request, |
1621 | [0x13] = ioctl_send_stream_packet, |
1622 | [0x14] = ioctl_get_cycle_timer2, |
1623 | [0x15] = ioctl_send_phy_packet, |
1624 | [0x16] = ioctl_receive_phy_packets, |
1625 | [0x17] = ioctl_set_iso_channels, |
1626 | [0x18] = ioctl_flush_iso, |
1627 | }; |
1628 | |
1629 | static int dispatch_ioctl(struct client *client, |
1630 | unsigned int cmd, void __user *arg) |
1631 | { |
1632 | union ioctl_arg buffer; |
1633 | int ret; |
1634 | |
1635 | if (fw_device_is_shutdown(client->device)) |
1636 | return -ENODEV; |
1637 | |
1638 | if (_IOC_TYPE(cmd) != '#' || |
1639 | _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers) || |
1640 | _IOC_SIZE(cmd) > sizeof(buffer)) |
1641 | return -ENOTTY; |
1642 | |
1643 | if (_IOC_DIR(cmd) == _IOC_READ) |
1644 | memset(&buffer, 0, _IOC_SIZE(cmd)); |
1645 | |
1646 | if (_IOC_DIR(cmd) & _IOC_WRITE) |
1647 | if (copy_from_user(&buffer, arg, _IOC_SIZE(cmd))) |
1648 | return -EFAULT; |
1649 | |
1650 | ret = ioctl_handlers[_IOC_NR(cmd)](client, &buffer); |
1651 | if (ret < 0) |
1652 | return ret; |
1653 | |
1654 | if (_IOC_DIR(cmd) & _IOC_READ) |
1655 | if (copy_to_user(arg, &buffer, _IOC_SIZE(cmd))) |
1656 | return -EFAULT; |
1657 | |
1658 | return ret; |
1659 | } |
1660 | |
1661 | static long fw_device_op_ioctl(struct file *file, |
1662 | unsigned int cmd, unsigned long arg) |
1663 | { |
1664 | return dispatch_ioctl(file->private_data, cmd, (void __user *)arg); |
1665 | } |
1666 | |
1667 | #ifdef CONFIG_COMPAT |
1668 | static long fw_device_op_compat_ioctl(struct file *file, |
1669 | unsigned int cmd, unsigned long arg) |
1670 | { |
1671 | return dispatch_ioctl(file->private_data, cmd, compat_ptr(arg)); |
1672 | } |
1673 | #endif |
1674 | |
1675 | static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma) |
1676 | { |
1677 | struct client *client = file->private_data; |
1678 | unsigned long size; |
1679 | int page_count, ret; |
1680 | |
1681 | if (fw_device_is_shutdown(client->device)) |
1682 | return -ENODEV; |
1683 | |
1684 | /* FIXME: We could support multiple buffers, but we don't. */ |
1685 | if (client->buffer.pages != NULL) |
1686 | return -EBUSY; |
1687 | |
1688 | if (!(vma->vm_flags & VM_SHARED)) |
1689 | return -EINVAL; |
1690 | |
1691 | if (vma->vm_start & ~PAGE_MASK) |
1692 | return -EINVAL; |
1693 | |
1694 | client->vm_start = vma->vm_start; |
1695 | size = vma->vm_end - vma->vm_start; |
1696 | page_count = size >> PAGE_SHIFT; |
1697 | if (size & ~PAGE_MASK) |
1698 | return -EINVAL; |
1699 | |
1700 | ret = fw_iso_buffer_alloc(&client->buffer, page_count); |
1701 | if (ret < 0) |
1702 | return ret; |
1703 | |
1704 | spin_lock_irq(&client->lock); |
1705 | if (client->iso_context) { |
1706 | ret = fw_iso_buffer_map_dma(&client->buffer, |
1707 | client->device->card, |
1708 | iso_dma_direction(client->iso_context)); |
1709 | client->buffer_is_mapped = (ret == 0); |
1710 | } |
1711 | spin_unlock_irq(&client->lock); |
1712 | if (ret < 0) |
1713 | goto fail; |
1714 | |
1715 | ret = fw_iso_buffer_map_vma(&client->buffer, vma); |
1716 | if (ret < 0) |
1717 | goto fail; |
1718 | |
1719 | return 0; |
1720 | fail: |
1721 | fw_iso_buffer_destroy(&client->buffer, client->device->card); |
1722 | return ret; |
1723 | } |
1724 | |
1725 | static int is_outbound_transaction_resource(int id, void *p, void *data) |
1726 | { |
1727 | struct client_resource *resource = p; |
1728 | |
1729 | return resource->release == release_transaction; |
1730 | } |
1731 | |
1732 | static int has_outbound_transactions(struct client *client) |
1733 | { |
1734 | int ret; |
1735 | |
1736 | spin_lock_irq(&client->lock); |
1737 | ret = idr_for_each(&client->resource_idr, |
1738 | is_outbound_transaction_resource, NULL); |
1739 | spin_unlock_irq(&client->lock); |
1740 | |
1741 | return ret; |
1742 | } |
1743 | |
1744 | static int shutdown_resource(int id, void *p, void *data) |
1745 | { |
1746 | struct client_resource *resource = p; |
1747 | struct client *client = data; |
1748 | |
1749 | resource->release(client, resource); |
1750 | client_put(client); |
1751 | |
1752 | return 0; |
1753 | } |
1754 | |
1755 | static int fw_device_op_release(struct inode *inode, struct file *file) |
1756 | { |
1757 | struct client *client = file->private_data; |
1758 | struct event *event, *next_event; |
1759 | |
1760 | spin_lock_irq(&client->device->card->lock); |
1761 | list_del(&client->phy_receiver_link); |
1762 | spin_unlock_irq(&client->device->card->lock); |
1763 | |
1764 | mutex_lock(&client->device->client_list_mutex); |
1765 | list_del(&client->link); |
1766 | mutex_unlock(&client->device->client_list_mutex); |
1767 | |
1768 | if (client->iso_context) |
1769 | fw_iso_context_destroy(client->iso_context); |
1770 | |
1771 | if (client->buffer.pages) |
1772 | fw_iso_buffer_destroy(&client->buffer, client->device->card); |
1773 | |
1774 | /* Freeze client->resource_idr and client->event_list */ |
1775 | spin_lock_irq(&client->lock); |
1776 | client->in_shutdown = true; |
1777 | spin_unlock_irq(&client->lock); |
1778 | |
1779 | wait_event(client->tx_flush_wait, !has_outbound_transactions(client)); |
1780 | |
1781 | idr_for_each(&client->resource_idr, shutdown_resource, client); |
1782 | idr_remove_all(&client->resource_idr); |
1783 | idr_destroy(&client->resource_idr); |
1784 | |
1785 | list_for_each_entry_safe(event, next_event, &client->event_list, link) |
1786 | kfree(event); |
1787 | |
1788 | client_put(client); |
1789 | |
1790 | return 0; |
1791 | } |
1792 | |
1793 | static unsigned int fw_device_op_poll(struct file *file, poll_table * pt) |
1794 | { |
1795 | struct client *client = file->private_data; |
1796 | unsigned int mask = 0; |
1797 | |
1798 | poll_wait(file, &client->wait, pt); |
1799 | |
1800 | if (fw_device_is_shutdown(client->device)) |
1801 | mask |= POLLHUP | POLLERR; |
1802 | if (!list_empty(&client->event_list)) |
1803 | mask |= POLLIN | POLLRDNORM; |
1804 | |
1805 | return mask; |
1806 | } |
1807 | |
1808 | const struct file_operations fw_device_ops = { |
1809 | .owner = THIS_MODULE, |
1810 | .llseek = no_llseek, |
1811 | .open = fw_device_op_open, |
1812 | .read = fw_device_op_read, |
1813 | .unlocked_ioctl = fw_device_op_ioctl, |
1814 | .mmap = fw_device_op_mmap, |
1815 | .release = fw_device_op_release, |
1816 | .poll = fw_device_op_poll, |
1817 | #ifdef CONFIG_COMPAT |
1818 | .compat_ioctl = fw_device_op_compat_ioctl, |
1819 | #endif |
1820 | }; |
1821 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9