Root/
1 | /* |
2 | * bsg.c - block layer implementation of the sg v4 interface |
3 | * |
4 | * Copyright (C) 2004 Jens Axboe <axboe@suse.de> SUSE Labs |
5 | * Copyright (C) 2004 Peter M. Jones <pjones@redhat.com> |
6 | * |
7 | * This file is subject to the terms and conditions of the GNU General Public |
8 | * License version 2. See the file "COPYING" in the main directory of this |
9 | * archive for more details. |
10 | * |
11 | */ |
12 | #include <linux/module.h> |
13 | #include <linux/init.h> |
14 | #include <linux/file.h> |
15 | #include <linux/blkdev.h> |
16 | #include <linux/poll.h> |
17 | #include <linux/cdev.h> |
18 | #include <linux/jiffies.h> |
19 | #include <linux/percpu.h> |
20 | #include <linux/uio.h> |
21 | #include <linux/idr.h> |
22 | #include <linux/bsg.h> |
23 | #include <linux/slab.h> |
24 | |
25 | #include <scsi/scsi.h> |
26 | #include <scsi/scsi_ioctl.h> |
27 | #include <scsi/scsi_cmnd.h> |
28 | #include <scsi/scsi_device.h> |
29 | #include <scsi/scsi_driver.h> |
30 | #include <scsi/sg.h> |
31 | |
32 | #define BSG_DESCRIPTION "Block layer SCSI generic (bsg) driver" |
33 | #define BSG_VERSION "0.4" |
34 | |
35 | struct bsg_device { |
36 | struct request_queue *queue; |
37 | spinlock_t lock; |
38 | struct list_head busy_list; |
39 | struct list_head done_list; |
40 | struct hlist_node dev_list; |
41 | atomic_t ref_count; |
42 | int queued_cmds; |
43 | int done_cmds; |
44 | wait_queue_head_t wq_done; |
45 | wait_queue_head_t wq_free; |
46 | char name[20]; |
47 | int max_queue; |
48 | unsigned long flags; |
49 | }; |
50 | |
51 | enum { |
52 | BSG_F_BLOCK = 1, |
53 | }; |
54 | |
55 | #define BSG_DEFAULT_CMDS 64 |
56 | #define BSG_MAX_DEVS 32768 |
57 | |
58 | #undef BSG_DEBUG |
59 | |
60 | #ifdef BSG_DEBUG |
61 | #define dprintk(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ##args) |
62 | #else |
63 | #define dprintk(fmt, args...) |
64 | #endif |
65 | |
66 | static DEFINE_MUTEX(bsg_mutex); |
67 | static DEFINE_IDR(bsg_minor_idr); |
68 | |
69 | #define BSG_LIST_ARRAY_SIZE 8 |
70 | static struct hlist_head bsg_device_list[BSG_LIST_ARRAY_SIZE]; |
71 | |
72 | static struct class *bsg_class; |
73 | static int bsg_major; |
74 | |
75 | static struct kmem_cache *bsg_cmd_cachep; |
76 | |
77 | /* |
78 | * our internal command type |
79 | */ |
80 | struct bsg_command { |
81 | struct bsg_device *bd; |
82 | struct list_head list; |
83 | struct request *rq; |
84 | struct bio *bio; |
85 | struct bio *bidi_bio; |
86 | int err; |
87 | struct sg_io_v4 hdr; |
88 | char sense[SCSI_SENSE_BUFFERSIZE]; |
89 | }; |
90 | |
91 | static void bsg_free_command(struct bsg_command *bc) |
92 | { |
93 | struct bsg_device *bd = bc->bd; |
94 | unsigned long flags; |
95 | |
96 | kmem_cache_free(bsg_cmd_cachep, bc); |
97 | |
98 | spin_lock_irqsave(&bd->lock, flags); |
99 | bd->queued_cmds--; |
100 | spin_unlock_irqrestore(&bd->lock, flags); |
101 | |
102 | wake_up(&bd->wq_free); |
103 | } |
104 | |
105 | static struct bsg_command *bsg_alloc_command(struct bsg_device *bd) |
106 | { |
107 | struct bsg_command *bc = ERR_PTR(-EINVAL); |
108 | |
109 | spin_lock_irq(&bd->lock); |
110 | |
111 | if (bd->queued_cmds >= bd->max_queue) |
112 | goto out; |
113 | |
114 | bd->queued_cmds++; |
115 | spin_unlock_irq(&bd->lock); |
116 | |
117 | bc = kmem_cache_zalloc(bsg_cmd_cachep, GFP_KERNEL); |
118 | if (unlikely(!bc)) { |
119 | spin_lock_irq(&bd->lock); |
120 | bd->queued_cmds--; |
121 | bc = ERR_PTR(-ENOMEM); |
122 | goto out; |
123 | } |
124 | |
125 | bc->bd = bd; |
126 | INIT_LIST_HEAD(&bc->list); |
127 | dprintk("%s: returning free cmd %p\n", bd->name, bc); |
128 | return bc; |
129 | out: |
130 | spin_unlock_irq(&bd->lock); |
131 | return bc; |
132 | } |
133 | |
134 | static inline struct hlist_head *bsg_dev_idx_hash(int index) |
135 | { |
136 | return &bsg_device_list[index & (BSG_LIST_ARRAY_SIZE - 1)]; |
137 | } |
138 | |
139 | static int bsg_io_schedule(struct bsg_device *bd) |
140 | { |
141 | DEFINE_WAIT(wait); |
142 | int ret = 0; |
143 | |
144 | spin_lock_irq(&bd->lock); |
145 | |
146 | BUG_ON(bd->done_cmds > bd->queued_cmds); |
147 | |
148 | /* |
149 | * -ENOSPC or -ENODATA? I'm going for -ENODATA, meaning "I have no |
150 | * work to do", even though we return -ENOSPC after this same test |
151 | * during bsg_write() -- there, it means our buffer can't have more |
152 | * bsg_commands added to it, thus has no space left. |
153 | */ |
154 | if (bd->done_cmds == bd->queued_cmds) { |
155 | ret = -ENODATA; |
156 | goto unlock; |
157 | } |
158 | |
159 | if (!test_bit(BSG_F_BLOCK, &bd->flags)) { |
160 | ret = -EAGAIN; |
161 | goto unlock; |
162 | } |
163 | |
164 | prepare_to_wait(&bd->wq_done, &wait, TASK_UNINTERRUPTIBLE); |
165 | spin_unlock_irq(&bd->lock); |
166 | io_schedule(); |
167 | finish_wait(&bd->wq_done, &wait); |
168 | |
169 | return ret; |
170 | unlock: |
171 | spin_unlock_irq(&bd->lock); |
172 | return ret; |
173 | } |
174 | |
175 | static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq, |
176 | struct sg_io_v4 *hdr, struct bsg_device *bd, |
177 | fmode_t has_write_perm) |
178 | { |
179 | if (hdr->request_len > BLK_MAX_CDB) { |
180 | rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL); |
181 | if (!rq->cmd) |
182 | return -ENOMEM; |
183 | } |
184 | |
185 | if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request, |
186 | hdr->request_len)) |
187 | return -EFAULT; |
188 | |
189 | if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) { |
190 | if (blk_verify_command(rq->cmd, has_write_perm)) |
191 | return -EPERM; |
192 | } else if (!capable(CAP_SYS_RAWIO)) |
193 | return -EPERM; |
194 | |
195 | /* |
196 | * fill in request structure |
197 | */ |
198 | rq->cmd_len = hdr->request_len; |
199 | rq->cmd_type = REQ_TYPE_BLOCK_PC; |
200 | |
201 | rq->timeout = msecs_to_jiffies(hdr->timeout); |
202 | if (!rq->timeout) |
203 | rq->timeout = q->sg_timeout; |
204 | if (!rq->timeout) |
205 | rq->timeout = BLK_DEFAULT_SG_TIMEOUT; |
206 | if (rq->timeout < BLK_MIN_SG_TIMEOUT) |
207 | rq->timeout = BLK_MIN_SG_TIMEOUT; |
208 | |
209 | return 0; |
210 | } |
211 | |
212 | /* |
213 | * Check if sg_io_v4 from user is allowed and valid |
214 | */ |
215 | static int |
216 | bsg_validate_sgv4_hdr(struct request_queue *q, struct sg_io_v4 *hdr, int *rw) |
217 | { |
218 | int ret = 0; |
219 | |
220 | if (hdr->guard != 'Q') |
221 | return -EINVAL; |
222 | |
223 | switch (hdr->protocol) { |
224 | case BSG_PROTOCOL_SCSI: |
225 | switch (hdr->subprotocol) { |
226 | case BSG_SUB_PROTOCOL_SCSI_CMD: |
227 | case BSG_SUB_PROTOCOL_SCSI_TRANSPORT: |
228 | break; |
229 | default: |
230 | ret = -EINVAL; |
231 | } |
232 | break; |
233 | default: |
234 | ret = -EINVAL; |
235 | } |
236 | |
237 | *rw = hdr->dout_xfer_len ? WRITE : READ; |
238 | return ret; |
239 | } |
240 | |
241 | /* |
242 | * map sg_io_v4 to a request. |
243 | */ |
244 | static struct request * |
245 | bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm, |
246 | u8 *sense) |
247 | { |
248 | struct request_queue *q = bd->queue; |
249 | struct request *rq, *next_rq = NULL; |
250 | int ret, rw; |
251 | unsigned int dxfer_len; |
252 | void __user *dxferp = NULL; |
253 | struct bsg_class_device *bcd = &q->bsg_dev; |
254 | |
255 | /* if the LLD has been removed then the bsg_unregister_queue will |
256 | * eventually be called and the class_dev was freed, so we can no |
257 | * longer use this request_queue. Return no such address. |
258 | */ |
259 | if (!bcd->class_dev) |
260 | return ERR_PTR(-ENXIO); |
261 | |
262 | dprintk("map hdr %llx/%u %llx/%u\n", (unsigned long long) hdr->dout_xferp, |
263 | hdr->dout_xfer_len, (unsigned long long) hdr->din_xferp, |
264 | hdr->din_xfer_len); |
265 | |
266 | ret = bsg_validate_sgv4_hdr(q, hdr, &rw); |
267 | if (ret) |
268 | return ERR_PTR(ret); |
269 | |
270 | /* |
271 | * map scatter-gather elements separately and string them to request |
272 | */ |
273 | rq = blk_get_request(q, rw, GFP_KERNEL); |
274 | if (!rq) |
275 | return ERR_PTR(-ENOMEM); |
276 | ret = blk_fill_sgv4_hdr_rq(q, rq, hdr, bd, has_write_perm); |
277 | if (ret) |
278 | goto out; |
279 | |
280 | if (rw == WRITE && hdr->din_xfer_len) { |
281 | if (!test_bit(QUEUE_FLAG_BIDI, &q->queue_flags)) { |
282 | ret = -EOPNOTSUPP; |
283 | goto out; |
284 | } |
285 | |
286 | next_rq = blk_get_request(q, READ, GFP_KERNEL); |
287 | if (!next_rq) { |
288 | ret = -ENOMEM; |
289 | goto out; |
290 | } |
291 | rq->next_rq = next_rq; |
292 | next_rq->cmd_type = rq->cmd_type; |
293 | |
294 | dxferp = (void __user *)(unsigned long)hdr->din_xferp; |
295 | ret = blk_rq_map_user(q, next_rq, NULL, dxferp, |
296 | hdr->din_xfer_len, GFP_KERNEL); |
297 | if (ret) |
298 | goto out; |
299 | } |
300 | |
301 | if (hdr->dout_xfer_len) { |
302 | dxfer_len = hdr->dout_xfer_len; |
303 | dxferp = (void __user *)(unsigned long)hdr->dout_xferp; |
304 | } else if (hdr->din_xfer_len) { |
305 | dxfer_len = hdr->din_xfer_len; |
306 | dxferp = (void __user *)(unsigned long)hdr->din_xferp; |
307 | } else |
308 | dxfer_len = 0; |
309 | |
310 | if (dxfer_len) { |
311 | ret = blk_rq_map_user(q, rq, NULL, dxferp, dxfer_len, |
312 | GFP_KERNEL); |
313 | if (ret) |
314 | goto out; |
315 | } |
316 | |
317 | rq->sense = sense; |
318 | rq->sense_len = 0; |
319 | |
320 | return rq; |
321 | out: |
322 | if (rq->cmd != rq->__cmd) |
323 | kfree(rq->cmd); |
324 | blk_put_request(rq); |
325 | if (next_rq) { |
326 | blk_rq_unmap_user(next_rq->bio); |
327 | blk_put_request(next_rq); |
328 | } |
329 | return ERR_PTR(ret); |
330 | } |
331 | |
332 | /* |
333 | * async completion call-back from the block layer, when scsi/ide/whatever |
334 | * calls end_that_request_last() on a request |
335 | */ |
336 | static void bsg_rq_end_io(struct request *rq, int uptodate) |
337 | { |
338 | struct bsg_command *bc = rq->end_io_data; |
339 | struct bsg_device *bd = bc->bd; |
340 | unsigned long flags; |
341 | |
342 | dprintk("%s: finished rq %p bc %p, bio %p stat %d\n", |
343 | bd->name, rq, bc, bc->bio, uptodate); |
344 | |
345 | bc->hdr.duration = jiffies_to_msecs(jiffies - bc->hdr.duration); |
346 | |
347 | spin_lock_irqsave(&bd->lock, flags); |
348 | list_move_tail(&bc->list, &bd->done_list); |
349 | bd->done_cmds++; |
350 | spin_unlock_irqrestore(&bd->lock, flags); |
351 | |
352 | wake_up(&bd->wq_done); |
353 | } |
354 | |
355 | /* |
356 | * do final setup of a 'bc' and submit the matching 'rq' to the block |
357 | * layer for io |
358 | */ |
359 | static void bsg_add_command(struct bsg_device *bd, struct request_queue *q, |
360 | struct bsg_command *bc, struct request *rq) |
361 | { |
362 | int at_head = (0 == (bc->hdr.flags & BSG_FLAG_Q_AT_TAIL)); |
363 | |
364 | /* |
365 | * add bc command to busy queue and submit rq for io |
366 | */ |
367 | bc->rq = rq; |
368 | bc->bio = rq->bio; |
369 | if (rq->next_rq) |
370 | bc->bidi_bio = rq->next_rq->bio; |
371 | bc->hdr.duration = jiffies; |
372 | spin_lock_irq(&bd->lock); |
373 | list_add_tail(&bc->list, &bd->busy_list); |
374 | spin_unlock_irq(&bd->lock); |
375 | |
376 | dprintk("%s: queueing rq %p, bc %p\n", bd->name, rq, bc); |
377 | |
378 | rq->end_io_data = bc; |
379 | blk_execute_rq_nowait(q, NULL, rq, at_head, bsg_rq_end_io); |
380 | } |
381 | |
382 | static struct bsg_command *bsg_next_done_cmd(struct bsg_device *bd) |
383 | { |
384 | struct bsg_command *bc = NULL; |
385 | |
386 | spin_lock_irq(&bd->lock); |
387 | if (bd->done_cmds) { |
388 | bc = list_first_entry(&bd->done_list, struct bsg_command, list); |
389 | list_del(&bc->list); |
390 | bd->done_cmds--; |
391 | } |
392 | spin_unlock_irq(&bd->lock); |
393 | |
394 | return bc; |
395 | } |
396 | |
397 | /* |
398 | * Get a finished command from the done list |
399 | */ |
400 | static struct bsg_command *bsg_get_done_cmd(struct bsg_device *bd) |
401 | { |
402 | struct bsg_command *bc; |
403 | int ret; |
404 | |
405 | do { |
406 | bc = bsg_next_done_cmd(bd); |
407 | if (bc) |
408 | break; |
409 | |
410 | if (!test_bit(BSG_F_BLOCK, &bd->flags)) { |
411 | bc = ERR_PTR(-EAGAIN); |
412 | break; |
413 | } |
414 | |
415 | ret = wait_event_interruptible(bd->wq_done, bd->done_cmds); |
416 | if (ret) { |
417 | bc = ERR_PTR(-ERESTARTSYS); |
418 | break; |
419 | } |
420 | } while (1); |
421 | |
422 | dprintk("%s: returning done %p\n", bd->name, bc); |
423 | |
424 | return bc; |
425 | } |
426 | |
427 | static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr, |
428 | struct bio *bio, struct bio *bidi_bio) |
429 | { |
430 | int ret = 0; |
431 | |
432 | dprintk("rq %p bio %p 0x%x\n", rq, bio, rq->errors); |
433 | /* |
434 | * fill in all the output members |
435 | */ |
436 | hdr->device_status = rq->errors & 0xff; |
437 | hdr->transport_status = host_byte(rq->errors); |
438 | hdr->driver_status = driver_byte(rq->errors); |
439 | hdr->info = 0; |
440 | if (hdr->device_status || hdr->transport_status || hdr->driver_status) |
441 | hdr->info |= SG_INFO_CHECK; |
442 | hdr->response_len = 0; |
443 | |
444 | if (rq->sense_len && hdr->response) { |
445 | int len = min_t(unsigned int, hdr->max_response_len, |
446 | rq->sense_len); |
447 | |
448 | ret = copy_to_user((void __user *)(unsigned long)hdr->response, |
449 | rq->sense, len); |
450 | if (!ret) |
451 | hdr->response_len = len; |
452 | else |
453 | ret = -EFAULT; |
454 | } |
455 | |
456 | if (rq->next_rq) { |
457 | hdr->dout_resid = rq->resid_len; |
458 | hdr->din_resid = rq->next_rq->resid_len; |
459 | blk_rq_unmap_user(bidi_bio); |
460 | blk_put_request(rq->next_rq); |
461 | } else if (rq_data_dir(rq) == READ) |
462 | hdr->din_resid = rq->resid_len; |
463 | else |
464 | hdr->dout_resid = rq->resid_len; |
465 | |
466 | /* |
467 | * If the request generated a negative error number, return it |
468 | * (providing we aren't already returning an error); if it's |
469 | * just a protocol response (i.e. non negative), that gets |
470 | * processed above. |
471 | */ |
472 | if (!ret && rq->errors < 0) |
473 | ret = rq->errors; |
474 | |
475 | blk_rq_unmap_user(bio); |
476 | if (rq->cmd != rq->__cmd) |
477 | kfree(rq->cmd); |
478 | blk_put_request(rq); |
479 | |
480 | return ret; |
481 | } |
482 | |
483 | static int bsg_complete_all_commands(struct bsg_device *bd) |
484 | { |
485 | struct bsg_command *bc; |
486 | int ret, tret; |
487 | |
488 | dprintk("%s: entered\n", bd->name); |
489 | |
490 | /* |
491 | * wait for all commands to complete |
492 | */ |
493 | ret = 0; |
494 | do { |
495 | ret = bsg_io_schedule(bd); |
496 | /* |
497 | * look for -ENODATA specifically -- we'll sometimes get |
498 | * -ERESTARTSYS when we've taken a signal, but we can't |
499 | * return until we're done freeing the queue, so ignore |
500 | * it. The signal will get handled when we're done freeing |
501 | * the bsg_device. |
502 | */ |
503 | } while (ret != -ENODATA); |
504 | |
505 | /* |
506 | * discard done commands |
507 | */ |
508 | ret = 0; |
509 | do { |
510 | spin_lock_irq(&bd->lock); |
511 | if (!bd->queued_cmds) { |
512 | spin_unlock_irq(&bd->lock); |
513 | break; |
514 | } |
515 | spin_unlock_irq(&bd->lock); |
516 | |
517 | bc = bsg_get_done_cmd(bd); |
518 | if (IS_ERR(bc)) |
519 | break; |
520 | |
521 | tret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio, |
522 | bc->bidi_bio); |
523 | if (!ret) |
524 | ret = tret; |
525 | |
526 | bsg_free_command(bc); |
527 | } while (1); |
528 | |
529 | return ret; |
530 | } |
531 | |
532 | static int |
533 | __bsg_read(char __user *buf, size_t count, struct bsg_device *bd, |
534 | const struct iovec *iov, ssize_t *bytes_read) |
535 | { |
536 | struct bsg_command *bc; |
537 | int nr_commands, ret; |
538 | |
539 | if (count % sizeof(struct sg_io_v4)) |
540 | return -EINVAL; |
541 | |
542 | ret = 0; |
543 | nr_commands = count / sizeof(struct sg_io_v4); |
544 | while (nr_commands) { |
545 | bc = bsg_get_done_cmd(bd); |
546 | if (IS_ERR(bc)) { |
547 | ret = PTR_ERR(bc); |
548 | break; |
549 | } |
550 | |
551 | /* |
552 | * this is the only case where we need to copy data back |
553 | * after completing the request. so do that here, |
554 | * bsg_complete_work() cannot do that for us |
555 | */ |
556 | ret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio, |
557 | bc->bidi_bio); |
558 | |
559 | if (copy_to_user(buf, &bc->hdr, sizeof(bc->hdr))) |
560 | ret = -EFAULT; |
561 | |
562 | bsg_free_command(bc); |
563 | |
564 | if (ret) |
565 | break; |
566 | |
567 | buf += sizeof(struct sg_io_v4); |
568 | *bytes_read += sizeof(struct sg_io_v4); |
569 | nr_commands--; |
570 | } |
571 | |
572 | return ret; |
573 | } |
574 | |
575 | static inline void bsg_set_block(struct bsg_device *bd, struct file *file) |
576 | { |
577 | if (file->f_flags & O_NONBLOCK) |
578 | clear_bit(BSG_F_BLOCK, &bd->flags); |
579 | else |
580 | set_bit(BSG_F_BLOCK, &bd->flags); |
581 | } |
582 | |
583 | /* |
584 | * Check if the error is a "real" error that we should return. |
585 | */ |
586 | static inline int err_block_err(int ret) |
587 | { |
588 | if (ret && ret != -ENOSPC && ret != -ENODATA && ret != -EAGAIN) |
589 | return 1; |
590 | |
591 | return 0; |
592 | } |
593 | |
594 | static ssize_t |
595 | bsg_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) |
596 | { |
597 | struct bsg_device *bd = file->private_data; |
598 | int ret; |
599 | ssize_t bytes_read; |
600 | |
601 | dprintk("%s: read %Zd bytes\n", bd->name, count); |
602 | |
603 | bsg_set_block(bd, file); |
604 | |
605 | bytes_read = 0; |
606 | ret = __bsg_read(buf, count, bd, NULL, &bytes_read); |
607 | *ppos = bytes_read; |
608 | |
609 | if (!bytes_read || err_block_err(ret)) |
610 | bytes_read = ret; |
611 | |
612 | return bytes_read; |
613 | } |
614 | |
615 | static int __bsg_write(struct bsg_device *bd, const char __user *buf, |
616 | size_t count, ssize_t *bytes_written, |
617 | fmode_t has_write_perm) |
618 | { |
619 | struct bsg_command *bc; |
620 | struct request *rq; |
621 | int ret, nr_commands; |
622 | |
623 | if (count % sizeof(struct sg_io_v4)) |
624 | return -EINVAL; |
625 | |
626 | nr_commands = count / sizeof(struct sg_io_v4); |
627 | rq = NULL; |
628 | bc = NULL; |
629 | ret = 0; |
630 | while (nr_commands) { |
631 | struct request_queue *q = bd->queue; |
632 | |
633 | bc = bsg_alloc_command(bd); |
634 | if (IS_ERR(bc)) { |
635 | ret = PTR_ERR(bc); |
636 | bc = NULL; |
637 | break; |
638 | } |
639 | |
640 | if (copy_from_user(&bc->hdr, buf, sizeof(bc->hdr))) { |
641 | ret = -EFAULT; |
642 | break; |
643 | } |
644 | |
645 | /* |
646 | * get a request, fill in the blanks, and add to request queue |
647 | */ |
648 | rq = bsg_map_hdr(bd, &bc->hdr, has_write_perm, bc->sense); |
649 | if (IS_ERR(rq)) { |
650 | ret = PTR_ERR(rq); |
651 | rq = NULL; |
652 | break; |
653 | } |
654 | |
655 | bsg_add_command(bd, q, bc, rq); |
656 | bc = NULL; |
657 | rq = NULL; |
658 | nr_commands--; |
659 | buf += sizeof(struct sg_io_v4); |
660 | *bytes_written += sizeof(struct sg_io_v4); |
661 | } |
662 | |
663 | if (bc) |
664 | bsg_free_command(bc); |
665 | |
666 | return ret; |
667 | } |
668 | |
669 | static ssize_t |
670 | bsg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) |
671 | { |
672 | struct bsg_device *bd = file->private_data; |
673 | ssize_t bytes_written; |
674 | int ret; |
675 | |
676 | dprintk("%s: write %Zd bytes\n", bd->name, count); |
677 | |
678 | bsg_set_block(bd, file); |
679 | |
680 | bytes_written = 0; |
681 | ret = __bsg_write(bd, buf, count, &bytes_written, |
682 | file->f_mode & FMODE_WRITE); |
683 | |
684 | *ppos = bytes_written; |
685 | |
686 | /* |
687 | * return bytes written on non-fatal errors |
688 | */ |
689 | if (!bytes_written || err_block_err(ret)) |
690 | bytes_written = ret; |
691 | |
692 | dprintk("%s: returning %Zd\n", bd->name, bytes_written); |
693 | return bytes_written; |
694 | } |
695 | |
696 | static struct bsg_device *bsg_alloc_device(void) |
697 | { |
698 | struct bsg_device *bd; |
699 | |
700 | bd = kzalloc(sizeof(struct bsg_device), GFP_KERNEL); |
701 | if (unlikely(!bd)) |
702 | return NULL; |
703 | |
704 | spin_lock_init(&bd->lock); |
705 | |
706 | bd->max_queue = BSG_DEFAULT_CMDS; |
707 | |
708 | INIT_LIST_HEAD(&bd->busy_list); |
709 | INIT_LIST_HEAD(&bd->done_list); |
710 | INIT_HLIST_NODE(&bd->dev_list); |
711 | |
712 | init_waitqueue_head(&bd->wq_free); |
713 | init_waitqueue_head(&bd->wq_done); |
714 | return bd; |
715 | } |
716 | |
717 | static void bsg_kref_release_function(struct kref *kref) |
718 | { |
719 | struct bsg_class_device *bcd = |
720 | container_of(kref, struct bsg_class_device, ref); |
721 | struct device *parent = bcd->parent; |
722 | |
723 | if (bcd->release) |
724 | bcd->release(bcd->parent); |
725 | |
726 | put_device(parent); |
727 | } |
728 | |
729 | static int bsg_put_device(struct bsg_device *bd) |
730 | { |
731 | int ret = 0, do_free; |
732 | struct request_queue *q = bd->queue; |
733 | |
734 | mutex_lock(&bsg_mutex); |
735 | |
736 | do_free = atomic_dec_and_test(&bd->ref_count); |
737 | if (!do_free) { |
738 | mutex_unlock(&bsg_mutex); |
739 | goto out; |
740 | } |
741 | |
742 | hlist_del(&bd->dev_list); |
743 | mutex_unlock(&bsg_mutex); |
744 | |
745 | dprintk("%s: tearing down\n", bd->name); |
746 | |
747 | /* |
748 | * close can always block |
749 | */ |
750 | set_bit(BSG_F_BLOCK, &bd->flags); |
751 | |
752 | /* |
753 | * correct error detection baddies here again. it's the responsibility |
754 | * of the app to properly reap commands before close() if it wants |
755 | * fool-proof error detection |
756 | */ |
757 | ret = bsg_complete_all_commands(bd); |
758 | |
759 | kfree(bd); |
760 | out: |
761 | kref_put(&q->bsg_dev.ref, bsg_kref_release_function); |
762 | if (do_free) |
763 | blk_put_queue(q); |
764 | return ret; |
765 | } |
766 | |
767 | static struct bsg_device *bsg_add_device(struct inode *inode, |
768 | struct request_queue *rq, |
769 | struct file *file) |
770 | { |
771 | struct bsg_device *bd; |
772 | #ifdef BSG_DEBUG |
773 | unsigned char buf[32]; |
774 | #endif |
775 | if (!blk_get_queue(rq)) |
776 | return ERR_PTR(-ENXIO); |
777 | |
778 | bd = bsg_alloc_device(); |
779 | if (!bd) { |
780 | blk_put_queue(rq); |
781 | return ERR_PTR(-ENOMEM); |
782 | } |
783 | |
784 | bd->queue = rq; |
785 | |
786 | bsg_set_block(bd, file); |
787 | |
788 | atomic_set(&bd->ref_count, 1); |
789 | mutex_lock(&bsg_mutex); |
790 | hlist_add_head(&bd->dev_list, bsg_dev_idx_hash(iminor(inode))); |
791 | |
792 | strncpy(bd->name, dev_name(rq->bsg_dev.class_dev), sizeof(bd->name) - 1); |
793 | dprintk("bound to <%s>, max queue %d\n", |
794 | format_dev_t(buf, inode->i_rdev), bd->max_queue); |
795 | |
796 | mutex_unlock(&bsg_mutex); |
797 | return bd; |
798 | } |
799 | |
800 | static struct bsg_device *__bsg_get_device(int minor, struct request_queue *q) |
801 | { |
802 | struct bsg_device *bd; |
803 | |
804 | mutex_lock(&bsg_mutex); |
805 | |
806 | hlist_for_each_entry(bd, bsg_dev_idx_hash(minor), dev_list) { |
807 | if (bd->queue == q) { |
808 | atomic_inc(&bd->ref_count); |
809 | goto found; |
810 | } |
811 | } |
812 | bd = NULL; |
813 | found: |
814 | mutex_unlock(&bsg_mutex); |
815 | return bd; |
816 | } |
817 | |
818 | static struct bsg_device *bsg_get_device(struct inode *inode, struct file *file) |
819 | { |
820 | struct bsg_device *bd; |
821 | struct bsg_class_device *bcd; |
822 | |
823 | /* |
824 | * find the class device |
825 | */ |
826 | mutex_lock(&bsg_mutex); |
827 | bcd = idr_find(&bsg_minor_idr, iminor(inode)); |
828 | if (bcd) |
829 | kref_get(&bcd->ref); |
830 | mutex_unlock(&bsg_mutex); |
831 | |
832 | if (!bcd) |
833 | return ERR_PTR(-ENODEV); |
834 | |
835 | bd = __bsg_get_device(iminor(inode), bcd->queue); |
836 | if (bd) |
837 | return bd; |
838 | |
839 | bd = bsg_add_device(inode, bcd->queue, file); |
840 | if (IS_ERR(bd)) |
841 | kref_put(&bcd->ref, bsg_kref_release_function); |
842 | |
843 | return bd; |
844 | } |
845 | |
846 | static int bsg_open(struct inode *inode, struct file *file) |
847 | { |
848 | struct bsg_device *bd; |
849 | |
850 | bd = bsg_get_device(inode, file); |
851 | |
852 | if (IS_ERR(bd)) |
853 | return PTR_ERR(bd); |
854 | |
855 | file->private_data = bd; |
856 | return 0; |
857 | } |
858 | |
859 | static int bsg_release(struct inode *inode, struct file *file) |
860 | { |
861 | struct bsg_device *bd = file->private_data; |
862 | |
863 | file->private_data = NULL; |
864 | return bsg_put_device(bd); |
865 | } |
866 | |
867 | static unsigned int bsg_poll(struct file *file, poll_table *wait) |
868 | { |
869 | struct bsg_device *bd = file->private_data; |
870 | unsigned int mask = 0; |
871 | |
872 | poll_wait(file, &bd->wq_done, wait); |
873 | poll_wait(file, &bd->wq_free, wait); |
874 | |
875 | spin_lock_irq(&bd->lock); |
876 | if (!list_empty(&bd->done_list)) |
877 | mask |= POLLIN | POLLRDNORM; |
878 | if (bd->queued_cmds < bd->max_queue) |
879 | mask |= POLLOUT; |
880 | spin_unlock_irq(&bd->lock); |
881 | |
882 | return mask; |
883 | } |
884 | |
885 | static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg) |
886 | { |
887 | struct bsg_device *bd = file->private_data; |
888 | int __user *uarg = (int __user *) arg; |
889 | int ret; |
890 | |
891 | switch (cmd) { |
892 | /* |
893 | * our own ioctls |
894 | */ |
895 | case SG_GET_COMMAND_Q: |
896 | return put_user(bd->max_queue, uarg); |
897 | case SG_SET_COMMAND_Q: { |
898 | int queue; |
899 | |
900 | if (get_user(queue, uarg)) |
901 | return -EFAULT; |
902 | if (queue < 1) |
903 | return -EINVAL; |
904 | |
905 | spin_lock_irq(&bd->lock); |
906 | bd->max_queue = queue; |
907 | spin_unlock_irq(&bd->lock); |
908 | return 0; |
909 | } |
910 | |
911 | /* |
912 | * SCSI/sg ioctls |
913 | */ |
914 | case SG_GET_VERSION_NUM: |
915 | case SCSI_IOCTL_GET_IDLUN: |
916 | case SCSI_IOCTL_GET_BUS_NUMBER: |
917 | case SG_SET_TIMEOUT: |
918 | case SG_GET_TIMEOUT: |
919 | case SG_GET_RESERVED_SIZE: |
920 | case SG_SET_RESERVED_SIZE: |
921 | case SG_EMULATED_HOST: |
922 | case SCSI_IOCTL_SEND_COMMAND: { |
923 | void __user *uarg = (void __user *) arg; |
924 | return scsi_cmd_ioctl(bd->queue, NULL, file->f_mode, cmd, uarg); |
925 | } |
926 | case SG_IO: { |
927 | struct request *rq; |
928 | struct bio *bio, *bidi_bio = NULL; |
929 | struct sg_io_v4 hdr; |
930 | int at_head; |
931 | u8 sense[SCSI_SENSE_BUFFERSIZE]; |
932 | |
933 | if (copy_from_user(&hdr, uarg, sizeof(hdr))) |
934 | return -EFAULT; |
935 | |
936 | rq = bsg_map_hdr(bd, &hdr, file->f_mode & FMODE_WRITE, sense); |
937 | if (IS_ERR(rq)) |
938 | return PTR_ERR(rq); |
939 | |
940 | bio = rq->bio; |
941 | if (rq->next_rq) |
942 | bidi_bio = rq->next_rq->bio; |
943 | |
944 | at_head = (0 == (hdr.flags & BSG_FLAG_Q_AT_TAIL)); |
945 | blk_execute_rq(bd->queue, NULL, rq, at_head); |
946 | ret = blk_complete_sgv4_hdr_rq(rq, &hdr, bio, bidi_bio); |
947 | |
948 | if (copy_to_user(uarg, &hdr, sizeof(hdr))) |
949 | return -EFAULT; |
950 | |
951 | return ret; |
952 | } |
953 | /* |
954 | * block device ioctls |
955 | */ |
956 | default: |
957 | #if 0 |
958 | return ioctl_by_bdev(bd->bdev, cmd, arg); |
959 | #else |
960 | return -ENOTTY; |
961 | #endif |
962 | } |
963 | } |
964 | |
965 | static const struct file_operations bsg_fops = { |
966 | .read = bsg_read, |
967 | .write = bsg_write, |
968 | .poll = bsg_poll, |
969 | .open = bsg_open, |
970 | .release = bsg_release, |
971 | .unlocked_ioctl = bsg_ioctl, |
972 | .owner = THIS_MODULE, |
973 | .llseek = default_llseek, |
974 | }; |
975 | |
976 | void bsg_unregister_queue(struct request_queue *q) |
977 | { |
978 | struct bsg_class_device *bcd = &q->bsg_dev; |
979 | |
980 | if (!bcd->class_dev) |
981 | return; |
982 | |
983 | mutex_lock(&bsg_mutex); |
984 | idr_remove(&bsg_minor_idr, bcd->minor); |
985 | if (q->kobj.sd) |
986 | sysfs_remove_link(&q->kobj, "bsg"); |
987 | device_unregister(bcd->class_dev); |
988 | bcd->class_dev = NULL; |
989 | kref_put(&bcd->ref, bsg_kref_release_function); |
990 | mutex_unlock(&bsg_mutex); |
991 | } |
992 | EXPORT_SYMBOL_GPL(bsg_unregister_queue); |
993 | |
994 | int bsg_register_queue(struct request_queue *q, struct device *parent, |
995 | const char *name, void (*release)(struct device *)) |
996 | { |
997 | struct bsg_class_device *bcd; |
998 | dev_t dev; |
999 | int ret; |
1000 | struct device *class_dev = NULL; |
1001 | const char *devname; |
1002 | |
1003 | if (name) |
1004 | devname = name; |
1005 | else |
1006 | devname = dev_name(parent); |
1007 | |
1008 | /* |
1009 | * we need a proper transport to send commands, not a stacked device |
1010 | */ |
1011 | if (!q->request_fn) |
1012 | return 0; |
1013 | |
1014 | bcd = &q->bsg_dev; |
1015 | memset(bcd, 0, sizeof(*bcd)); |
1016 | |
1017 | mutex_lock(&bsg_mutex); |
1018 | |
1019 | ret = idr_alloc(&bsg_minor_idr, bcd, 0, BSG_MAX_DEVS, GFP_KERNEL); |
1020 | if (ret < 0) { |
1021 | if (ret == -ENOSPC) { |
1022 | printk(KERN_ERR "bsg: too many bsg devices\n"); |
1023 | ret = -EINVAL; |
1024 | } |
1025 | goto unlock; |
1026 | } |
1027 | |
1028 | bcd->minor = ret; |
1029 | bcd->queue = q; |
1030 | bcd->parent = get_device(parent); |
1031 | bcd->release = release; |
1032 | kref_init(&bcd->ref); |
1033 | dev = MKDEV(bsg_major, bcd->minor); |
1034 | class_dev = device_create(bsg_class, parent, dev, NULL, "%s", devname); |
1035 | if (IS_ERR(class_dev)) { |
1036 | ret = PTR_ERR(class_dev); |
1037 | goto put_dev; |
1038 | } |
1039 | bcd->class_dev = class_dev; |
1040 | |
1041 | if (q->kobj.sd) { |
1042 | ret = sysfs_create_link(&q->kobj, &bcd->class_dev->kobj, "bsg"); |
1043 | if (ret) |
1044 | goto unregister_class_dev; |
1045 | } |
1046 | |
1047 | mutex_unlock(&bsg_mutex); |
1048 | return 0; |
1049 | |
1050 | unregister_class_dev: |
1051 | device_unregister(class_dev); |
1052 | put_dev: |
1053 | put_device(parent); |
1054 | idr_remove(&bsg_minor_idr, bcd->minor); |
1055 | unlock: |
1056 | mutex_unlock(&bsg_mutex); |
1057 | return ret; |
1058 | } |
1059 | EXPORT_SYMBOL_GPL(bsg_register_queue); |
1060 | |
1061 | static struct cdev bsg_cdev; |
1062 | |
1063 | static char *bsg_devnode(struct device *dev, umode_t *mode) |
1064 | { |
1065 | return kasprintf(GFP_KERNEL, "bsg/%s", dev_name(dev)); |
1066 | } |
1067 | |
1068 | static int __init bsg_init(void) |
1069 | { |
1070 | int ret, i; |
1071 | dev_t devid; |
1072 | |
1073 | bsg_cmd_cachep = kmem_cache_create("bsg_cmd", |
1074 | sizeof(struct bsg_command), 0, 0, NULL); |
1075 | if (!bsg_cmd_cachep) { |
1076 | printk(KERN_ERR "bsg: failed creating slab cache\n"); |
1077 | return -ENOMEM; |
1078 | } |
1079 | |
1080 | for (i = 0; i < BSG_LIST_ARRAY_SIZE; i++) |
1081 | INIT_HLIST_HEAD(&bsg_device_list[i]); |
1082 | |
1083 | bsg_class = class_create(THIS_MODULE, "bsg"); |
1084 | if (IS_ERR(bsg_class)) { |
1085 | ret = PTR_ERR(bsg_class); |
1086 | goto destroy_kmemcache; |
1087 | } |
1088 | bsg_class->devnode = bsg_devnode; |
1089 | |
1090 | ret = alloc_chrdev_region(&devid, 0, BSG_MAX_DEVS, "bsg"); |
1091 | if (ret) |
1092 | goto destroy_bsg_class; |
1093 | |
1094 | bsg_major = MAJOR(devid); |
1095 | |
1096 | cdev_init(&bsg_cdev, &bsg_fops); |
1097 | ret = cdev_add(&bsg_cdev, MKDEV(bsg_major, 0), BSG_MAX_DEVS); |
1098 | if (ret) |
1099 | goto unregister_chrdev; |
1100 | |
1101 | printk(KERN_INFO BSG_DESCRIPTION " version " BSG_VERSION |
1102 | " loaded (major %d)\n", bsg_major); |
1103 | return 0; |
1104 | unregister_chrdev: |
1105 | unregister_chrdev_region(MKDEV(bsg_major, 0), BSG_MAX_DEVS); |
1106 | destroy_bsg_class: |
1107 | class_destroy(bsg_class); |
1108 | destroy_kmemcache: |
1109 | kmem_cache_destroy(bsg_cmd_cachep); |
1110 | return ret; |
1111 | } |
1112 | |
1113 | MODULE_AUTHOR("Jens Axboe"); |
1114 | MODULE_DESCRIPTION(BSG_DESCRIPTION); |
1115 | MODULE_LICENSE("GPL"); |
1116 | |
1117 | device_initcall(bsg_init); |
1118 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9