Root/
1 | /* |
2 | * bsg.c - block layer implementation of the sg v4 interface |
3 | * |
4 | * Copyright (C) 2004 Jens Axboe <axboe@suse.de> SUSE Labs |
5 | * Copyright (C) 2004 Peter M. Jones <pjones@redhat.com> |
6 | * |
7 | * This file is subject to the terms and conditions of the GNU General Public |
8 | * License version 2. See the file "COPYING" in the main directory of this |
9 | * archive for more details. |
10 | * |
11 | */ |
12 | #include <linux/module.h> |
13 | #include <linux/init.h> |
14 | #include <linux/file.h> |
15 | #include <linux/blkdev.h> |
16 | #include <linux/poll.h> |
17 | #include <linux/cdev.h> |
18 | #include <linux/jiffies.h> |
19 | #include <linux/percpu.h> |
20 | #include <linux/uio.h> |
21 | #include <linux/idr.h> |
22 | #include <linux/bsg.h> |
23 | #include <linux/smp_lock.h> |
24 | #include <linux/slab.h> |
25 | |
26 | #include <scsi/scsi.h> |
27 | #include <scsi/scsi_ioctl.h> |
28 | #include <scsi/scsi_cmnd.h> |
29 | #include <scsi/scsi_device.h> |
30 | #include <scsi/scsi_driver.h> |
31 | #include <scsi/sg.h> |
32 | |
33 | #define BSG_DESCRIPTION "Block layer SCSI generic (bsg) driver" |
34 | #define BSG_VERSION "0.4" |
35 | |
36 | struct bsg_device { |
37 | struct request_queue *queue; |
38 | spinlock_t lock; |
39 | struct list_head busy_list; |
40 | struct list_head done_list; |
41 | struct hlist_node dev_list; |
42 | atomic_t ref_count; |
43 | int queued_cmds; |
44 | int done_cmds; |
45 | wait_queue_head_t wq_done; |
46 | wait_queue_head_t wq_free; |
47 | char name[20]; |
48 | int max_queue; |
49 | unsigned long flags; |
50 | }; |
51 | |
52 | enum { |
53 | BSG_F_BLOCK = 1, |
54 | }; |
55 | |
56 | #define BSG_DEFAULT_CMDS 64 |
57 | #define BSG_MAX_DEVS 32768 |
58 | |
59 | #undef BSG_DEBUG |
60 | |
61 | #ifdef BSG_DEBUG |
62 | #define dprintk(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ##args) |
63 | #else |
64 | #define dprintk(fmt, args...) |
65 | #endif |
66 | |
67 | static DEFINE_MUTEX(bsg_mutex); |
68 | static DEFINE_IDR(bsg_minor_idr); |
69 | |
70 | #define BSG_LIST_ARRAY_SIZE 8 |
71 | static struct hlist_head bsg_device_list[BSG_LIST_ARRAY_SIZE]; |
72 | |
73 | static struct class *bsg_class; |
74 | static int bsg_major; |
75 | |
76 | static struct kmem_cache *bsg_cmd_cachep; |
77 | |
78 | /* |
79 | * our internal command type |
80 | */ |
81 | struct bsg_command { |
82 | struct bsg_device *bd; |
83 | struct list_head list; |
84 | struct request *rq; |
85 | struct bio *bio; |
86 | struct bio *bidi_bio; |
87 | int err; |
88 | struct sg_io_v4 hdr; |
89 | char sense[SCSI_SENSE_BUFFERSIZE]; |
90 | }; |
91 | |
92 | static void bsg_free_command(struct bsg_command *bc) |
93 | { |
94 | struct bsg_device *bd = bc->bd; |
95 | unsigned long flags; |
96 | |
97 | kmem_cache_free(bsg_cmd_cachep, bc); |
98 | |
99 | spin_lock_irqsave(&bd->lock, flags); |
100 | bd->queued_cmds--; |
101 | spin_unlock_irqrestore(&bd->lock, flags); |
102 | |
103 | wake_up(&bd->wq_free); |
104 | } |
105 | |
106 | static struct bsg_command *bsg_alloc_command(struct bsg_device *bd) |
107 | { |
108 | struct bsg_command *bc = ERR_PTR(-EINVAL); |
109 | |
110 | spin_lock_irq(&bd->lock); |
111 | |
112 | if (bd->queued_cmds >= bd->max_queue) |
113 | goto out; |
114 | |
115 | bd->queued_cmds++; |
116 | spin_unlock_irq(&bd->lock); |
117 | |
118 | bc = kmem_cache_zalloc(bsg_cmd_cachep, GFP_KERNEL); |
119 | if (unlikely(!bc)) { |
120 | spin_lock_irq(&bd->lock); |
121 | bd->queued_cmds--; |
122 | bc = ERR_PTR(-ENOMEM); |
123 | goto out; |
124 | } |
125 | |
126 | bc->bd = bd; |
127 | INIT_LIST_HEAD(&bc->list); |
128 | dprintk("%s: returning free cmd %p\n", bd->name, bc); |
129 | return bc; |
130 | out: |
131 | spin_unlock_irq(&bd->lock); |
132 | return bc; |
133 | } |
134 | |
135 | static inline struct hlist_head *bsg_dev_idx_hash(int index) |
136 | { |
137 | return &bsg_device_list[index & (BSG_LIST_ARRAY_SIZE - 1)]; |
138 | } |
139 | |
140 | static int bsg_io_schedule(struct bsg_device *bd) |
141 | { |
142 | DEFINE_WAIT(wait); |
143 | int ret = 0; |
144 | |
145 | spin_lock_irq(&bd->lock); |
146 | |
147 | BUG_ON(bd->done_cmds > bd->queued_cmds); |
148 | |
149 | /* |
150 | * -ENOSPC or -ENODATA? I'm going for -ENODATA, meaning "I have no |
151 | * work to do", even though we return -ENOSPC after this same test |
152 | * during bsg_write() -- there, it means our buffer can't have more |
153 | * bsg_commands added to it, thus has no space left. |
154 | */ |
155 | if (bd->done_cmds == bd->queued_cmds) { |
156 | ret = -ENODATA; |
157 | goto unlock; |
158 | } |
159 | |
160 | if (!test_bit(BSG_F_BLOCK, &bd->flags)) { |
161 | ret = -EAGAIN; |
162 | goto unlock; |
163 | } |
164 | |
165 | prepare_to_wait(&bd->wq_done, &wait, TASK_UNINTERRUPTIBLE); |
166 | spin_unlock_irq(&bd->lock); |
167 | io_schedule(); |
168 | finish_wait(&bd->wq_done, &wait); |
169 | |
170 | return ret; |
171 | unlock: |
172 | spin_unlock_irq(&bd->lock); |
173 | return ret; |
174 | } |
175 | |
176 | static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq, |
177 | struct sg_io_v4 *hdr, struct bsg_device *bd, |
178 | fmode_t has_write_perm) |
179 | { |
180 | if (hdr->request_len > BLK_MAX_CDB) { |
181 | rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL); |
182 | if (!rq->cmd) |
183 | return -ENOMEM; |
184 | } |
185 | |
186 | if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request, |
187 | hdr->request_len)) |
188 | return -EFAULT; |
189 | |
190 | if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) { |
191 | if (blk_verify_command(rq->cmd, has_write_perm)) |
192 | return -EPERM; |
193 | } else if (!capable(CAP_SYS_RAWIO)) |
194 | return -EPERM; |
195 | |
196 | /* |
197 | * fill in request structure |
198 | */ |
199 | rq->cmd_len = hdr->request_len; |
200 | rq->cmd_type = REQ_TYPE_BLOCK_PC; |
201 | |
202 | rq->timeout = msecs_to_jiffies(hdr->timeout); |
203 | if (!rq->timeout) |
204 | rq->timeout = q->sg_timeout; |
205 | if (!rq->timeout) |
206 | rq->timeout = BLK_DEFAULT_SG_TIMEOUT; |
207 | if (rq->timeout < BLK_MIN_SG_TIMEOUT) |
208 | rq->timeout = BLK_MIN_SG_TIMEOUT; |
209 | |
210 | return 0; |
211 | } |
212 | |
213 | /* |
214 | * Check if sg_io_v4 from user is allowed and valid |
215 | */ |
216 | static int |
217 | bsg_validate_sgv4_hdr(struct request_queue *q, struct sg_io_v4 *hdr, int *rw) |
218 | { |
219 | int ret = 0; |
220 | |
221 | if (hdr->guard != 'Q') |
222 | return -EINVAL; |
223 | |
224 | switch (hdr->protocol) { |
225 | case BSG_PROTOCOL_SCSI: |
226 | switch (hdr->subprotocol) { |
227 | case BSG_SUB_PROTOCOL_SCSI_CMD: |
228 | case BSG_SUB_PROTOCOL_SCSI_TRANSPORT: |
229 | break; |
230 | default: |
231 | ret = -EINVAL; |
232 | } |
233 | break; |
234 | default: |
235 | ret = -EINVAL; |
236 | } |
237 | |
238 | *rw = hdr->dout_xfer_len ? WRITE : READ; |
239 | return ret; |
240 | } |
241 | |
242 | /* |
243 | * map sg_io_v4 to a request. |
244 | */ |
245 | static struct request * |
246 | bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm, |
247 | u8 *sense) |
248 | { |
249 | struct request_queue *q = bd->queue; |
250 | struct request *rq, *next_rq = NULL; |
251 | int ret, rw; |
252 | unsigned int dxfer_len; |
253 | void *dxferp = NULL; |
254 | |
255 | dprintk("map hdr %llx/%u %llx/%u\n", (unsigned long long) hdr->dout_xferp, |
256 | hdr->dout_xfer_len, (unsigned long long) hdr->din_xferp, |
257 | hdr->din_xfer_len); |
258 | |
259 | ret = bsg_validate_sgv4_hdr(q, hdr, &rw); |
260 | if (ret) |
261 | return ERR_PTR(ret); |
262 | |
263 | /* |
264 | * map scatter-gather elements separately and string them to request |
265 | */ |
266 | rq = blk_get_request(q, rw, GFP_KERNEL); |
267 | if (!rq) |
268 | return ERR_PTR(-ENOMEM); |
269 | ret = blk_fill_sgv4_hdr_rq(q, rq, hdr, bd, has_write_perm); |
270 | if (ret) |
271 | goto out; |
272 | |
273 | if (rw == WRITE && hdr->din_xfer_len) { |
274 | if (!test_bit(QUEUE_FLAG_BIDI, &q->queue_flags)) { |
275 | ret = -EOPNOTSUPP; |
276 | goto out; |
277 | } |
278 | |
279 | next_rq = blk_get_request(q, READ, GFP_KERNEL); |
280 | if (!next_rq) { |
281 | ret = -ENOMEM; |
282 | goto out; |
283 | } |
284 | rq->next_rq = next_rq; |
285 | next_rq->cmd_type = rq->cmd_type; |
286 | |
287 | dxferp = (void*)(unsigned long)hdr->din_xferp; |
288 | ret = blk_rq_map_user(q, next_rq, NULL, dxferp, |
289 | hdr->din_xfer_len, GFP_KERNEL); |
290 | if (ret) |
291 | goto out; |
292 | } |
293 | |
294 | if (hdr->dout_xfer_len) { |
295 | dxfer_len = hdr->dout_xfer_len; |
296 | dxferp = (void*)(unsigned long)hdr->dout_xferp; |
297 | } else if (hdr->din_xfer_len) { |
298 | dxfer_len = hdr->din_xfer_len; |
299 | dxferp = (void*)(unsigned long)hdr->din_xferp; |
300 | } else |
301 | dxfer_len = 0; |
302 | |
303 | if (dxfer_len) { |
304 | ret = blk_rq_map_user(q, rq, NULL, dxferp, dxfer_len, |
305 | GFP_KERNEL); |
306 | if (ret) |
307 | goto out; |
308 | } |
309 | |
310 | rq->sense = sense; |
311 | rq->sense_len = 0; |
312 | |
313 | return rq; |
314 | out: |
315 | if (rq->cmd != rq->__cmd) |
316 | kfree(rq->cmd); |
317 | blk_put_request(rq); |
318 | if (next_rq) { |
319 | blk_rq_unmap_user(next_rq->bio); |
320 | blk_put_request(next_rq); |
321 | } |
322 | return ERR_PTR(ret); |
323 | } |
324 | |
325 | /* |
326 | * async completion call-back from the block layer, when scsi/ide/whatever |
327 | * calls end_that_request_last() on a request |
328 | */ |
329 | static void bsg_rq_end_io(struct request *rq, int uptodate) |
330 | { |
331 | struct bsg_command *bc = rq->end_io_data; |
332 | struct bsg_device *bd = bc->bd; |
333 | unsigned long flags; |
334 | |
335 | dprintk("%s: finished rq %p bc %p, bio %p stat %d\n", |
336 | bd->name, rq, bc, bc->bio, uptodate); |
337 | |
338 | bc->hdr.duration = jiffies_to_msecs(jiffies - bc->hdr.duration); |
339 | |
340 | spin_lock_irqsave(&bd->lock, flags); |
341 | list_move_tail(&bc->list, &bd->done_list); |
342 | bd->done_cmds++; |
343 | spin_unlock_irqrestore(&bd->lock, flags); |
344 | |
345 | wake_up(&bd->wq_done); |
346 | } |
347 | |
348 | /* |
349 | * do final setup of a 'bc' and submit the matching 'rq' to the block |
350 | * layer for io |
351 | */ |
352 | static void bsg_add_command(struct bsg_device *bd, struct request_queue *q, |
353 | struct bsg_command *bc, struct request *rq) |
354 | { |
355 | int at_head = (0 == (bc->hdr.flags & BSG_FLAG_Q_AT_TAIL)); |
356 | |
357 | /* |
358 | * add bc command to busy queue and submit rq for io |
359 | */ |
360 | bc->rq = rq; |
361 | bc->bio = rq->bio; |
362 | if (rq->next_rq) |
363 | bc->bidi_bio = rq->next_rq->bio; |
364 | bc->hdr.duration = jiffies; |
365 | spin_lock_irq(&bd->lock); |
366 | list_add_tail(&bc->list, &bd->busy_list); |
367 | spin_unlock_irq(&bd->lock); |
368 | |
369 | dprintk("%s: queueing rq %p, bc %p\n", bd->name, rq, bc); |
370 | |
371 | rq->end_io_data = bc; |
372 | blk_execute_rq_nowait(q, NULL, rq, at_head, bsg_rq_end_io); |
373 | } |
374 | |
375 | static struct bsg_command *bsg_next_done_cmd(struct bsg_device *bd) |
376 | { |
377 | struct bsg_command *bc = NULL; |
378 | |
379 | spin_lock_irq(&bd->lock); |
380 | if (bd->done_cmds) { |
381 | bc = list_first_entry(&bd->done_list, struct bsg_command, list); |
382 | list_del(&bc->list); |
383 | bd->done_cmds--; |
384 | } |
385 | spin_unlock_irq(&bd->lock); |
386 | |
387 | return bc; |
388 | } |
389 | |
390 | /* |
391 | * Get a finished command from the done list |
392 | */ |
393 | static struct bsg_command *bsg_get_done_cmd(struct bsg_device *bd) |
394 | { |
395 | struct bsg_command *bc; |
396 | int ret; |
397 | |
398 | do { |
399 | bc = bsg_next_done_cmd(bd); |
400 | if (bc) |
401 | break; |
402 | |
403 | if (!test_bit(BSG_F_BLOCK, &bd->flags)) { |
404 | bc = ERR_PTR(-EAGAIN); |
405 | break; |
406 | } |
407 | |
408 | ret = wait_event_interruptible(bd->wq_done, bd->done_cmds); |
409 | if (ret) { |
410 | bc = ERR_PTR(-ERESTARTSYS); |
411 | break; |
412 | } |
413 | } while (1); |
414 | |
415 | dprintk("%s: returning done %p\n", bd->name, bc); |
416 | |
417 | return bc; |
418 | } |
419 | |
420 | static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr, |
421 | struct bio *bio, struct bio *bidi_bio) |
422 | { |
423 | int ret = 0; |
424 | |
425 | dprintk("rq %p bio %p 0x%x\n", rq, bio, rq->errors); |
426 | /* |
427 | * fill in all the output members |
428 | */ |
429 | hdr->device_status = status_byte(rq->errors); |
430 | hdr->transport_status = host_byte(rq->errors); |
431 | hdr->driver_status = driver_byte(rq->errors); |
432 | hdr->info = 0; |
433 | if (hdr->device_status || hdr->transport_status || hdr->driver_status) |
434 | hdr->info |= SG_INFO_CHECK; |
435 | hdr->response_len = 0; |
436 | |
437 | if (rq->sense_len && hdr->response) { |
438 | int len = min_t(unsigned int, hdr->max_response_len, |
439 | rq->sense_len); |
440 | |
441 | ret = copy_to_user((void*)(unsigned long)hdr->response, |
442 | rq->sense, len); |
443 | if (!ret) |
444 | hdr->response_len = len; |
445 | else |
446 | ret = -EFAULT; |
447 | } |
448 | |
449 | if (rq->next_rq) { |
450 | hdr->dout_resid = rq->resid_len; |
451 | hdr->din_resid = rq->next_rq->resid_len; |
452 | blk_rq_unmap_user(bidi_bio); |
453 | blk_put_request(rq->next_rq); |
454 | } else if (rq_data_dir(rq) == READ) |
455 | hdr->din_resid = rq->resid_len; |
456 | else |
457 | hdr->dout_resid = rq->resid_len; |
458 | |
459 | /* |
460 | * If the request generated a negative error number, return it |
461 | * (providing we aren't already returning an error); if it's |
462 | * just a protocol response (i.e. non negative), that gets |
463 | * processed above. |
464 | */ |
465 | if (!ret && rq->errors < 0) |
466 | ret = rq->errors; |
467 | |
468 | blk_rq_unmap_user(bio); |
469 | if (rq->cmd != rq->__cmd) |
470 | kfree(rq->cmd); |
471 | blk_put_request(rq); |
472 | |
473 | return ret; |
474 | } |
475 | |
476 | static int bsg_complete_all_commands(struct bsg_device *bd) |
477 | { |
478 | struct bsg_command *bc; |
479 | int ret, tret; |
480 | |
481 | dprintk("%s: entered\n", bd->name); |
482 | |
483 | /* |
484 | * wait for all commands to complete |
485 | */ |
486 | ret = 0; |
487 | do { |
488 | ret = bsg_io_schedule(bd); |
489 | /* |
490 | * look for -ENODATA specifically -- we'll sometimes get |
491 | * -ERESTARTSYS when we've taken a signal, but we can't |
492 | * return until we're done freeing the queue, so ignore |
493 | * it. The signal will get handled when we're done freeing |
494 | * the bsg_device. |
495 | */ |
496 | } while (ret != -ENODATA); |
497 | |
498 | /* |
499 | * discard done commands |
500 | */ |
501 | ret = 0; |
502 | do { |
503 | spin_lock_irq(&bd->lock); |
504 | if (!bd->queued_cmds) { |
505 | spin_unlock_irq(&bd->lock); |
506 | break; |
507 | } |
508 | spin_unlock_irq(&bd->lock); |
509 | |
510 | bc = bsg_get_done_cmd(bd); |
511 | if (IS_ERR(bc)) |
512 | break; |
513 | |
514 | tret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio, |
515 | bc->bidi_bio); |
516 | if (!ret) |
517 | ret = tret; |
518 | |
519 | bsg_free_command(bc); |
520 | } while (1); |
521 | |
522 | return ret; |
523 | } |
524 | |
525 | static int |
526 | __bsg_read(char __user *buf, size_t count, struct bsg_device *bd, |
527 | const struct iovec *iov, ssize_t *bytes_read) |
528 | { |
529 | struct bsg_command *bc; |
530 | int nr_commands, ret; |
531 | |
532 | if (count % sizeof(struct sg_io_v4)) |
533 | return -EINVAL; |
534 | |
535 | ret = 0; |
536 | nr_commands = count / sizeof(struct sg_io_v4); |
537 | while (nr_commands) { |
538 | bc = bsg_get_done_cmd(bd); |
539 | if (IS_ERR(bc)) { |
540 | ret = PTR_ERR(bc); |
541 | break; |
542 | } |
543 | |
544 | /* |
545 | * this is the only case where we need to copy data back |
546 | * after completing the request. so do that here, |
547 | * bsg_complete_work() cannot do that for us |
548 | */ |
549 | ret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio, |
550 | bc->bidi_bio); |
551 | |
552 | if (copy_to_user(buf, &bc->hdr, sizeof(bc->hdr))) |
553 | ret = -EFAULT; |
554 | |
555 | bsg_free_command(bc); |
556 | |
557 | if (ret) |
558 | break; |
559 | |
560 | buf += sizeof(struct sg_io_v4); |
561 | *bytes_read += sizeof(struct sg_io_v4); |
562 | nr_commands--; |
563 | } |
564 | |
565 | return ret; |
566 | } |
567 | |
568 | static inline void bsg_set_block(struct bsg_device *bd, struct file *file) |
569 | { |
570 | if (file->f_flags & O_NONBLOCK) |
571 | clear_bit(BSG_F_BLOCK, &bd->flags); |
572 | else |
573 | set_bit(BSG_F_BLOCK, &bd->flags); |
574 | } |
575 | |
576 | /* |
577 | * Check if the error is a "real" error that we should return. |
578 | */ |
579 | static inline int err_block_err(int ret) |
580 | { |
581 | if (ret && ret != -ENOSPC && ret != -ENODATA && ret != -EAGAIN) |
582 | return 1; |
583 | |
584 | return 0; |
585 | } |
586 | |
587 | static ssize_t |
588 | bsg_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) |
589 | { |
590 | struct bsg_device *bd = file->private_data; |
591 | int ret; |
592 | ssize_t bytes_read; |
593 | |
594 | dprintk("%s: read %Zd bytes\n", bd->name, count); |
595 | |
596 | bsg_set_block(bd, file); |
597 | |
598 | bytes_read = 0; |
599 | ret = __bsg_read(buf, count, bd, NULL, &bytes_read); |
600 | *ppos = bytes_read; |
601 | |
602 | if (!bytes_read || (bytes_read && err_block_err(ret))) |
603 | bytes_read = ret; |
604 | |
605 | return bytes_read; |
606 | } |
607 | |
608 | static int __bsg_write(struct bsg_device *bd, const char __user *buf, |
609 | size_t count, ssize_t *bytes_written, |
610 | fmode_t has_write_perm) |
611 | { |
612 | struct bsg_command *bc; |
613 | struct request *rq; |
614 | int ret, nr_commands; |
615 | |
616 | if (count % sizeof(struct sg_io_v4)) |
617 | return -EINVAL; |
618 | |
619 | nr_commands = count / sizeof(struct sg_io_v4); |
620 | rq = NULL; |
621 | bc = NULL; |
622 | ret = 0; |
623 | while (nr_commands) { |
624 | struct request_queue *q = bd->queue; |
625 | |
626 | bc = bsg_alloc_command(bd); |
627 | if (IS_ERR(bc)) { |
628 | ret = PTR_ERR(bc); |
629 | bc = NULL; |
630 | break; |
631 | } |
632 | |
633 | if (copy_from_user(&bc->hdr, buf, sizeof(bc->hdr))) { |
634 | ret = -EFAULT; |
635 | break; |
636 | } |
637 | |
638 | /* |
639 | * get a request, fill in the blanks, and add to request queue |
640 | */ |
641 | rq = bsg_map_hdr(bd, &bc->hdr, has_write_perm, bc->sense); |
642 | if (IS_ERR(rq)) { |
643 | ret = PTR_ERR(rq); |
644 | rq = NULL; |
645 | break; |
646 | } |
647 | |
648 | bsg_add_command(bd, q, bc, rq); |
649 | bc = NULL; |
650 | rq = NULL; |
651 | nr_commands--; |
652 | buf += sizeof(struct sg_io_v4); |
653 | *bytes_written += sizeof(struct sg_io_v4); |
654 | } |
655 | |
656 | if (bc) |
657 | bsg_free_command(bc); |
658 | |
659 | return ret; |
660 | } |
661 | |
662 | static ssize_t |
663 | bsg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) |
664 | { |
665 | struct bsg_device *bd = file->private_data; |
666 | ssize_t bytes_written; |
667 | int ret; |
668 | |
669 | dprintk("%s: write %Zd bytes\n", bd->name, count); |
670 | |
671 | bsg_set_block(bd, file); |
672 | |
673 | bytes_written = 0; |
674 | ret = __bsg_write(bd, buf, count, &bytes_written, |
675 | file->f_mode & FMODE_WRITE); |
676 | |
677 | *ppos = bytes_written; |
678 | |
679 | /* |
680 | * return bytes written on non-fatal errors |
681 | */ |
682 | if (!bytes_written || (bytes_written && err_block_err(ret))) |
683 | bytes_written = ret; |
684 | |
685 | dprintk("%s: returning %Zd\n", bd->name, bytes_written); |
686 | return bytes_written; |
687 | } |
688 | |
689 | static struct bsg_device *bsg_alloc_device(void) |
690 | { |
691 | struct bsg_device *bd; |
692 | |
693 | bd = kzalloc(sizeof(struct bsg_device), GFP_KERNEL); |
694 | if (unlikely(!bd)) |
695 | return NULL; |
696 | |
697 | spin_lock_init(&bd->lock); |
698 | |
699 | bd->max_queue = BSG_DEFAULT_CMDS; |
700 | |
701 | INIT_LIST_HEAD(&bd->busy_list); |
702 | INIT_LIST_HEAD(&bd->done_list); |
703 | INIT_HLIST_NODE(&bd->dev_list); |
704 | |
705 | init_waitqueue_head(&bd->wq_free); |
706 | init_waitqueue_head(&bd->wq_done); |
707 | return bd; |
708 | } |
709 | |
710 | static void bsg_kref_release_function(struct kref *kref) |
711 | { |
712 | struct bsg_class_device *bcd = |
713 | container_of(kref, struct bsg_class_device, ref); |
714 | struct device *parent = bcd->parent; |
715 | |
716 | if (bcd->release) |
717 | bcd->release(bcd->parent); |
718 | |
719 | put_device(parent); |
720 | } |
721 | |
722 | static int bsg_put_device(struct bsg_device *bd) |
723 | { |
724 | int ret = 0, do_free; |
725 | struct request_queue *q = bd->queue; |
726 | |
727 | mutex_lock(&bsg_mutex); |
728 | |
729 | do_free = atomic_dec_and_test(&bd->ref_count); |
730 | if (!do_free) { |
731 | mutex_unlock(&bsg_mutex); |
732 | goto out; |
733 | } |
734 | |
735 | hlist_del(&bd->dev_list); |
736 | mutex_unlock(&bsg_mutex); |
737 | |
738 | dprintk("%s: tearing down\n", bd->name); |
739 | |
740 | /* |
741 | * close can always block |
742 | */ |
743 | set_bit(BSG_F_BLOCK, &bd->flags); |
744 | |
745 | /* |
746 | * correct error detection baddies here again. it's the responsibility |
747 | * of the app to properly reap commands before close() if it wants |
748 | * fool-proof error detection |
749 | */ |
750 | ret = bsg_complete_all_commands(bd); |
751 | |
752 | kfree(bd); |
753 | out: |
754 | kref_put(&q->bsg_dev.ref, bsg_kref_release_function); |
755 | if (do_free) |
756 | blk_put_queue(q); |
757 | return ret; |
758 | } |
759 | |
760 | static struct bsg_device *bsg_add_device(struct inode *inode, |
761 | struct request_queue *rq, |
762 | struct file *file) |
763 | { |
764 | struct bsg_device *bd; |
765 | int ret; |
766 | #ifdef BSG_DEBUG |
767 | unsigned char buf[32]; |
768 | #endif |
769 | ret = blk_get_queue(rq); |
770 | if (ret) |
771 | return ERR_PTR(-ENXIO); |
772 | |
773 | bd = bsg_alloc_device(); |
774 | if (!bd) { |
775 | blk_put_queue(rq); |
776 | return ERR_PTR(-ENOMEM); |
777 | } |
778 | |
779 | bd->queue = rq; |
780 | |
781 | bsg_set_block(bd, file); |
782 | |
783 | atomic_set(&bd->ref_count, 1); |
784 | mutex_lock(&bsg_mutex); |
785 | hlist_add_head(&bd->dev_list, bsg_dev_idx_hash(iminor(inode))); |
786 | |
787 | strncpy(bd->name, dev_name(rq->bsg_dev.class_dev), sizeof(bd->name) - 1); |
788 | dprintk("bound to <%s>, max queue %d\n", |
789 | format_dev_t(buf, inode->i_rdev), bd->max_queue); |
790 | |
791 | mutex_unlock(&bsg_mutex); |
792 | return bd; |
793 | } |
794 | |
795 | static struct bsg_device *__bsg_get_device(int minor, struct request_queue *q) |
796 | { |
797 | struct bsg_device *bd; |
798 | struct hlist_node *entry; |
799 | |
800 | mutex_lock(&bsg_mutex); |
801 | |
802 | hlist_for_each_entry(bd, entry, bsg_dev_idx_hash(minor), dev_list) { |
803 | if (bd->queue == q) { |
804 | atomic_inc(&bd->ref_count); |
805 | goto found; |
806 | } |
807 | } |
808 | bd = NULL; |
809 | found: |
810 | mutex_unlock(&bsg_mutex); |
811 | return bd; |
812 | } |
813 | |
814 | static struct bsg_device *bsg_get_device(struct inode *inode, struct file *file) |
815 | { |
816 | struct bsg_device *bd; |
817 | struct bsg_class_device *bcd; |
818 | |
819 | /* |
820 | * find the class device |
821 | */ |
822 | mutex_lock(&bsg_mutex); |
823 | bcd = idr_find(&bsg_minor_idr, iminor(inode)); |
824 | if (bcd) |
825 | kref_get(&bcd->ref); |
826 | mutex_unlock(&bsg_mutex); |
827 | |
828 | if (!bcd) |
829 | return ERR_PTR(-ENODEV); |
830 | |
831 | bd = __bsg_get_device(iminor(inode), bcd->queue); |
832 | if (bd) |
833 | return bd; |
834 | |
835 | bd = bsg_add_device(inode, bcd->queue, file); |
836 | if (IS_ERR(bd)) |
837 | kref_put(&bcd->ref, bsg_kref_release_function); |
838 | |
839 | return bd; |
840 | } |
841 | |
842 | static int bsg_open(struct inode *inode, struct file *file) |
843 | { |
844 | struct bsg_device *bd; |
845 | |
846 | lock_kernel(); |
847 | bd = bsg_get_device(inode, file); |
848 | unlock_kernel(); |
849 | |
850 | if (IS_ERR(bd)) |
851 | return PTR_ERR(bd); |
852 | |
853 | file->private_data = bd; |
854 | return 0; |
855 | } |
856 | |
857 | static int bsg_release(struct inode *inode, struct file *file) |
858 | { |
859 | struct bsg_device *bd = file->private_data; |
860 | |
861 | file->private_data = NULL; |
862 | return bsg_put_device(bd); |
863 | } |
864 | |
865 | static unsigned int bsg_poll(struct file *file, poll_table *wait) |
866 | { |
867 | struct bsg_device *bd = file->private_data; |
868 | unsigned int mask = 0; |
869 | |
870 | poll_wait(file, &bd->wq_done, wait); |
871 | poll_wait(file, &bd->wq_free, wait); |
872 | |
873 | spin_lock_irq(&bd->lock); |
874 | if (!list_empty(&bd->done_list)) |
875 | mask |= POLLIN | POLLRDNORM; |
876 | if (bd->queued_cmds >= bd->max_queue) |
877 | mask |= POLLOUT; |
878 | spin_unlock_irq(&bd->lock); |
879 | |
880 | return mask; |
881 | } |
882 | |
883 | static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg) |
884 | { |
885 | struct bsg_device *bd = file->private_data; |
886 | int __user *uarg = (int __user *) arg; |
887 | int ret; |
888 | |
889 | switch (cmd) { |
890 | /* |
891 | * our own ioctls |
892 | */ |
893 | case SG_GET_COMMAND_Q: |
894 | return put_user(bd->max_queue, uarg); |
895 | case SG_SET_COMMAND_Q: { |
896 | int queue; |
897 | |
898 | if (get_user(queue, uarg)) |
899 | return -EFAULT; |
900 | if (queue < 1) |
901 | return -EINVAL; |
902 | |
903 | spin_lock_irq(&bd->lock); |
904 | bd->max_queue = queue; |
905 | spin_unlock_irq(&bd->lock); |
906 | return 0; |
907 | } |
908 | |
909 | /* |
910 | * SCSI/sg ioctls |
911 | */ |
912 | case SG_GET_VERSION_NUM: |
913 | case SCSI_IOCTL_GET_IDLUN: |
914 | case SCSI_IOCTL_GET_BUS_NUMBER: |
915 | case SG_SET_TIMEOUT: |
916 | case SG_GET_TIMEOUT: |
917 | case SG_GET_RESERVED_SIZE: |
918 | case SG_SET_RESERVED_SIZE: |
919 | case SG_EMULATED_HOST: |
920 | case SCSI_IOCTL_SEND_COMMAND: { |
921 | void __user *uarg = (void __user *) arg; |
922 | return scsi_cmd_ioctl(bd->queue, NULL, file->f_mode, cmd, uarg); |
923 | } |
924 | case SG_IO: { |
925 | struct request *rq; |
926 | struct bio *bio, *bidi_bio = NULL; |
927 | struct sg_io_v4 hdr; |
928 | int at_head; |
929 | u8 sense[SCSI_SENSE_BUFFERSIZE]; |
930 | |
931 | if (copy_from_user(&hdr, uarg, sizeof(hdr))) |
932 | return -EFAULT; |
933 | |
934 | rq = bsg_map_hdr(bd, &hdr, file->f_mode & FMODE_WRITE, sense); |
935 | if (IS_ERR(rq)) |
936 | return PTR_ERR(rq); |
937 | |
938 | bio = rq->bio; |
939 | if (rq->next_rq) |
940 | bidi_bio = rq->next_rq->bio; |
941 | |
942 | at_head = (0 == (hdr.flags & BSG_FLAG_Q_AT_TAIL)); |
943 | blk_execute_rq(bd->queue, NULL, rq, at_head); |
944 | ret = blk_complete_sgv4_hdr_rq(rq, &hdr, bio, bidi_bio); |
945 | |
946 | if (copy_to_user(uarg, &hdr, sizeof(hdr))) |
947 | return -EFAULT; |
948 | |
949 | return ret; |
950 | } |
951 | /* |
952 | * block device ioctls |
953 | */ |
954 | default: |
955 | #if 0 |
956 | return ioctl_by_bdev(bd->bdev, cmd, arg); |
957 | #else |
958 | return -ENOTTY; |
959 | #endif |
960 | } |
961 | } |
962 | |
963 | static const struct file_operations bsg_fops = { |
964 | .read = bsg_read, |
965 | .write = bsg_write, |
966 | .poll = bsg_poll, |
967 | .open = bsg_open, |
968 | .release = bsg_release, |
969 | .unlocked_ioctl = bsg_ioctl, |
970 | .owner = THIS_MODULE, |
971 | }; |
972 | |
973 | void bsg_unregister_queue(struct request_queue *q) |
974 | { |
975 | struct bsg_class_device *bcd = &q->bsg_dev; |
976 | |
977 | if (!bcd->class_dev) |
978 | return; |
979 | |
980 | mutex_lock(&bsg_mutex); |
981 | idr_remove(&bsg_minor_idr, bcd->minor); |
982 | sysfs_remove_link(&q->kobj, "bsg"); |
983 | device_unregister(bcd->class_dev); |
984 | bcd->class_dev = NULL; |
985 | kref_put(&bcd->ref, bsg_kref_release_function); |
986 | mutex_unlock(&bsg_mutex); |
987 | } |
988 | EXPORT_SYMBOL_GPL(bsg_unregister_queue); |
989 | |
990 | int bsg_register_queue(struct request_queue *q, struct device *parent, |
991 | const char *name, void (*release)(struct device *)) |
992 | { |
993 | struct bsg_class_device *bcd; |
994 | dev_t dev; |
995 | int ret, minor; |
996 | struct device *class_dev = NULL; |
997 | const char *devname; |
998 | |
999 | if (name) |
1000 | devname = name; |
1001 | else |
1002 | devname = dev_name(parent); |
1003 | |
1004 | /* |
1005 | * we need a proper transport to send commands, not a stacked device |
1006 | */ |
1007 | if (!q->request_fn) |
1008 | return 0; |
1009 | |
1010 | bcd = &q->bsg_dev; |
1011 | memset(bcd, 0, sizeof(*bcd)); |
1012 | |
1013 | mutex_lock(&bsg_mutex); |
1014 | |
1015 | ret = idr_pre_get(&bsg_minor_idr, GFP_KERNEL); |
1016 | if (!ret) { |
1017 | ret = -ENOMEM; |
1018 | goto unlock; |
1019 | } |
1020 | |
1021 | ret = idr_get_new(&bsg_minor_idr, bcd, &minor); |
1022 | if (ret < 0) |
1023 | goto unlock; |
1024 | |
1025 | if (minor >= BSG_MAX_DEVS) { |
1026 | printk(KERN_ERR "bsg: too many bsg devices\n"); |
1027 | ret = -EINVAL; |
1028 | goto remove_idr; |
1029 | } |
1030 | |
1031 | bcd->minor = minor; |
1032 | bcd->queue = q; |
1033 | bcd->parent = get_device(parent); |
1034 | bcd->release = release; |
1035 | kref_init(&bcd->ref); |
1036 | dev = MKDEV(bsg_major, bcd->minor); |
1037 | class_dev = device_create(bsg_class, parent, dev, NULL, "%s", devname); |
1038 | if (IS_ERR(class_dev)) { |
1039 | ret = PTR_ERR(class_dev); |
1040 | goto put_dev; |
1041 | } |
1042 | bcd->class_dev = class_dev; |
1043 | |
1044 | if (q->kobj.sd) { |
1045 | ret = sysfs_create_link(&q->kobj, &bcd->class_dev->kobj, "bsg"); |
1046 | if (ret) |
1047 | goto unregister_class_dev; |
1048 | } |
1049 | |
1050 | mutex_unlock(&bsg_mutex); |
1051 | return 0; |
1052 | |
1053 | unregister_class_dev: |
1054 | device_unregister(class_dev); |
1055 | put_dev: |
1056 | put_device(parent); |
1057 | remove_idr: |
1058 | idr_remove(&bsg_minor_idr, minor); |
1059 | unlock: |
1060 | mutex_unlock(&bsg_mutex); |
1061 | return ret; |
1062 | } |
1063 | EXPORT_SYMBOL_GPL(bsg_register_queue); |
1064 | |
1065 | static struct cdev bsg_cdev; |
1066 | |
1067 | static char *bsg_devnode(struct device *dev, mode_t *mode) |
1068 | { |
1069 | return kasprintf(GFP_KERNEL, "bsg/%s", dev_name(dev)); |
1070 | } |
1071 | |
1072 | static int __init bsg_init(void) |
1073 | { |
1074 | int ret, i; |
1075 | dev_t devid; |
1076 | |
1077 | bsg_cmd_cachep = kmem_cache_create("bsg_cmd", |
1078 | sizeof(struct bsg_command), 0, 0, NULL); |
1079 | if (!bsg_cmd_cachep) { |
1080 | printk(KERN_ERR "bsg: failed creating slab cache\n"); |
1081 | return -ENOMEM; |
1082 | } |
1083 | |
1084 | for (i = 0; i < BSG_LIST_ARRAY_SIZE; i++) |
1085 | INIT_HLIST_HEAD(&bsg_device_list[i]); |
1086 | |
1087 | bsg_class = class_create(THIS_MODULE, "bsg"); |
1088 | if (IS_ERR(bsg_class)) { |
1089 | ret = PTR_ERR(bsg_class); |
1090 | goto destroy_kmemcache; |
1091 | } |
1092 | bsg_class->devnode = bsg_devnode; |
1093 | |
1094 | ret = alloc_chrdev_region(&devid, 0, BSG_MAX_DEVS, "bsg"); |
1095 | if (ret) |
1096 | goto destroy_bsg_class; |
1097 | |
1098 | bsg_major = MAJOR(devid); |
1099 | |
1100 | cdev_init(&bsg_cdev, &bsg_fops); |
1101 | ret = cdev_add(&bsg_cdev, MKDEV(bsg_major, 0), BSG_MAX_DEVS); |
1102 | if (ret) |
1103 | goto unregister_chrdev; |
1104 | |
1105 | printk(KERN_INFO BSG_DESCRIPTION " version " BSG_VERSION |
1106 | " loaded (major %d)\n", bsg_major); |
1107 | return 0; |
1108 | unregister_chrdev: |
1109 | unregister_chrdev_region(MKDEV(bsg_major, 0), BSG_MAX_DEVS); |
1110 | destroy_bsg_class: |
1111 | class_destroy(bsg_class); |
1112 | destroy_kmemcache: |
1113 | kmem_cache_destroy(bsg_cmd_cachep); |
1114 | return ret; |
1115 | } |
1116 | |
1117 | MODULE_AUTHOR("Jens Axboe"); |
1118 | MODULE_DESCRIPTION(BSG_DESCRIPTION); |
1119 | MODULE_LICENSE("GPL"); |
1120 | |
1121 | device_initcall(bsg_init); |
1122 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9