Root/
1 | /******************************************************************************* |
2 | * Filename: target_core_iblock.c |
3 | * |
4 | * This file contains the Storage Engine <-> Linux BlockIO transport |
5 | * specific functions. |
6 | * |
7 | * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc. |
8 | * Copyright (c) 2005, 2006, 2007 SBE, Inc. |
9 | * Copyright (c) 2007-2010 Rising Tide Systems |
10 | * Copyright (c) 2008-2010 Linux-iSCSI.org |
11 | * |
12 | * Nicholas A. Bellinger <nab@kernel.org> |
13 | * |
14 | * This program is free software; you can redistribute it and/or modify |
15 | * it under the terms of the GNU General Public License as published by |
16 | * the Free Software Foundation; either version 2 of the License, or |
17 | * (at your option) any later version. |
18 | * |
19 | * This program is distributed in the hope that it will be useful, |
20 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
21 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
22 | * GNU General Public License for more details. |
23 | * |
24 | * You should have received a copy of the GNU General Public License |
25 | * along with this program; if not, write to the Free Software |
26 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
27 | * |
28 | ******************************************************************************/ |
29 | |
30 | #include <linux/string.h> |
31 | #include <linux/parser.h> |
32 | #include <linux/timer.h> |
33 | #include <linux/fs.h> |
34 | #include <linux/blkdev.h> |
35 | #include <linux/slab.h> |
36 | #include <linux/spinlock.h> |
37 | #include <linux/bio.h> |
38 | #include <linux/genhd.h> |
39 | #include <linux/file.h> |
40 | #include <linux/module.h> |
41 | #include <scsi/scsi.h> |
42 | #include <scsi/scsi_host.h> |
43 | #include <asm/unaligned.h> |
44 | |
45 | #include <target/target_core_base.h> |
46 | #include <target/target_core_backend.h> |
47 | |
48 | #include "target_core_iblock.h" |
49 | |
50 | #define IBLOCK_MAX_BIO_PER_TASK 32 /* max # of bios to submit at a time */ |
51 | #define IBLOCK_BIO_POOL_SIZE 128 |
52 | |
53 | static struct se_subsystem_api iblock_template; |
54 | |
55 | static void iblock_bio_done(struct bio *, int); |
56 | |
57 | /* iblock_attach_hba(): (Part of se_subsystem_api_t template) |
58 | * |
59 | * |
60 | */ |
61 | static int iblock_attach_hba(struct se_hba *hba, u32 host_id) |
62 | { |
63 | pr_debug("CORE_HBA[%d] - TCM iBlock HBA Driver %s on" |
64 | " Generic Target Core Stack %s\n", hba->hba_id, |
65 | IBLOCK_VERSION, TARGET_CORE_MOD_VERSION); |
66 | return 0; |
67 | } |
68 | |
69 | static void iblock_detach_hba(struct se_hba *hba) |
70 | { |
71 | } |
72 | |
73 | static void *iblock_allocate_virtdevice(struct se_hba *hba, const char *name) |
74 | { |
75 | struct iblock_dev *ib_dev = NULL; |
76 | |
77 | ib_dev = kzalloc(sizeof(struct iblock_dev), GFP_KERNEL); |
78 | if (!ib_dev) { |
79 | pr_err("Unable to allocate struct iblock_dev\n"); |
80 | return NULL; |
81 | } |
82 | |
83 | pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name); |
84 | |
85 | return ib_dev; |
86 | } |
87 | |
88 | static struct se_device *iblock_create_virtdevice( |
89 | struct se_hba *hba, |
90 | struct se_subsystem_dev *se_dev, |
91 | void *p) |
92 | { |
93 | struct iblock_dev *ib_dev = p; |
94 | struct se_device *dev; |
95 | struct se_dev_limits dev_limits; |
96 | struct block_device *bd = NULL; |
97 | struct request_queue *q; |
98 | struct queue_limits *limits; |
99 | u32 dev_flags = 0; |
100 | fmode_t mode; |
101 | int ret = -EINVAL; |
102 | |
103 | if (!ib_dev) { |
104 | pr_err("Unable to locate struct iblock_dev parameter\n"); |
105 | return ERR_PTR(ret); |
106 | } |
107 | memset(&dev_limits, 0, sizeof(struct se_dev_limits)); |
108 | |
109 | ib_dev->ibd_bio_set = bioset_create(IBLOCK_BIO_POOL_SIZE, 0); |
110 | if (!ib_dev->ibd_bio_set) { |
111 | pr_err("IBLOCK: Unable to create bioset()\n"); |
112 | return ERR_PTR(-ENOMEM); |
113 | } |
114 | pr_debug("IBLOCK: Created bio_set()\n"); |
115 | /* |
116 | * iblock_check_configfs_dev_params() ensures that ib_dev->ibd_udev_path |
117 | * must already have been set in order for echo 1 > $HBA/$DEV/enable to run. |
118 | */ |
119 | pr_debug( "IBLOCK: Claiming struct block_device: %s\n", |
120 | ib_dev->ibd_udev_path); |
121 | |
122 | mode = FMODE_READ|FMODE_EXCL; |
123 | if (!ib_dev->ibd_readonly) |
124 | mode |= FMODE_WRITE; |
125 | |
126 | bd = blkdev_get_by_path(ib_dev->ibd_udev_path, mode, ib_dev); |
127 | if (IS_ERR(bd)) { |
128 | ret = PTR_ERR(bd); |
129 | goto failed; |
130 | } |
131 | /* |
132 | * Setup the local scope queue_limits from struct request_queue->limits |
133 | * to pass into transport_add_device_to_core_hba() as struct se_dev_limits. |
134 | */ |
135 | q = bdev_get_queue(bd); |
136 | limits = &dev_limits.limits; |
137 | limits->logical_block_size = bdev_logical_block_size(bd); |
138 | limits->max_hw_sectors = UINT_MAX; |
139 | limits->max_sectors = UINT_MAX; |
140 | dev_limits.hw_queue_depth = q->nr_requests; |
141 | dev_limits.queue_depth = q->nr_requests; |
142 | |
143 | ib_dev->ibd_bd = bd; |
144 | |
145 | dev = transport_add_device_to_core_hba(hba, |
146 | &iblock_template, se_dev, dev_flags, ib_dev, |
147 | &dev_limits, "IBLOCK", IBLOCK_VERSION); |
148 | if (!dev) |
149 | goto failed; |
150 | |
151 | /* |
152 | * Check if the underlying struct block_device request_queue supports |
153 | * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM |
154 | * in ATA and we need to set TPE=1 |
155 | */ |
156 | if (blk_queue_discard(q)) { |
157 | dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count = |
158 | q->limits.max_discard_sectors; |
159 | /* |
160 | * Currently hardcoded to 1 in Linux/SCSI code.. |
161 | */ |
162 | dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count = 1; |
163 | dev->se_sub_dev->se_dev_attrib.unmap_granularity = |
164 | q->limits.discard_granularity >> 9; |
165 | dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment = |
166 | q->limits.discard_alignment; |
167 | |
168 | pr_debug("IBLOCK: BLOCK Discard support available," |
169 | " disabled by default\n"); |
170 | } |
171 | |
172 | if (blk_queue_nonrot(q)) |
173 | dev->se_sub_dev->se_dev_attrib.is_nonrot = 1; |
174 | |
175 | return dev; |
176 | |
177 | failed: |
178 | if (ib_dev->ibd_bio_set) { |
179 | bioset_free(ib_dev->ibd_bio_set); |
180 | ib_dev->ibd_bio_set = NULL; |
181 | } |
182 | ib_dev->ibd_bd = NULL; |
183 | return ERR_PTR(ret); |
184 | } |
185 | |
186 | static void iblock_free_device(void *p) |
187 | { |
188 | struct iblock_dev *ib_dev = p; |
189 | |
190 | if (ib_dev->ibd_bd != NULL) |
191 | blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL); |
192 | if (ib_dev->ibd_bio_set != NULL) |
193 | bioset_free(ib_dev->ibd_bio_set); |
194 | kfree(ib_dev); |
195 | } |
196 | |
197 | static unsigned long long iblock_emulate_read_cap_with_block_size( |
198 | struct se_device *dev, |
199 | struct block_device *bd, |
200 | struct request_queue *q) |
201 | { |
202 | unsigned long long blocks_long = (div_u64(i_size_read(bd->bd_inode), |
203 | bdev_logical_block_size(bd)) - 1); |
204 | u32 block_size = bdev_logical_block_size(bd); |
205 | |
206 | if (block_size == dev->se_sub_dev->se_dev_attrib.block_size) |
207 | return blocks_long; |
208 | |
209 | switch (block_size) { |
210 | case 4096: |
211 | switch (dev->se_sub_dev->se_dev_attrib.block_size) { |
212 | case 2048: |
213 | blocks_long <<= 1; |
214 | break; |
215 | case 1024: |
216 | blocks_long <<= 2; |
217 | break; |
218 | case 512: |
219 | blocks_long <<= 3; |
220 | default: |
221 | break; |
222 | } |
223 | break; |
224 | case 2048: |
225 | switch (dev->se_sub_dev->se_dev_attrib.block_size) { |
226 | case 4096: |
227 | blocks_long >>= 1; |
228 | break; |
229 | case 1024: |
230 | blocks_long <<= 1; |
231 | break; |
232 | case 512: |
233 | blocks_long <<= 2; |
234 | break; |
235 | default: |
236 | break; |
237 | } |
238 | break; |
239 | case 1024: |
240 | switch (dev->se_sub_dev->se_dev_attrib.block_size) { |
241 | case 4096: |
242 | blocks_long >>= 2; |
243 | break; |
244 | case 2048: |
245 | blocks_long >>= 1; |
246 | break; |
247 | case 512: |
248 | blocks_long <<= 1; |
249 | break; |
250 | default: |
251 | break; |
252 | } |
253 | break; |
254 | case 512: |
255 | switch (dev->se_sub_dev->se_dev_attrib.block_size) { |
256 | case 4096: |
257 | blocks_long >>= 3; |
258 | break; |
259 | case 2048: |
260 | blocks_long >>= 2; |
261 | break; |
262 | case 1024: |
263 | blocks_long >>= 1; |
264 | break; |
265 | default: |
266 | break; |
267 | } |
268 | break; |
269 | default: |
270 | break; |
271 | } |
272 | |
273 | return blocks_long; |
274 | } |
275 | |
276 | static void iblock_end_io_flush(struct bio *bio, int err) |
277 | { |
278 | struct se_cmd *cmd = bio->bi_private; |
279 | |
280 | if (err) |
281 | pr_err("IBLOCK: cache flush failed: %d\n", err); |
282 | |
283 | if (cmd) { |
284 | if (err) { |
285 | cmd->scsi_sense_reason = |
286 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
287 | target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION); |
288 | } else { |
289 | target_complete_cmd(cmd, SAM_STAT_GOOD); |
290 | } |
291 | } |
292 | |
293 | bio_put(bio); |
294 | } |
295 | |
296 | /* |
297 | * Implement SYCHRONIZE CACHE. Note that we can't handle lba ranges and must |
298 | * always flush the whole cache. |
299 | */ |
300 | static int iblock_execute_sync_cache(struct se_cmd *cmd) |
301 | { |
302 | struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr; |
303 | int immed = (cmd->t_task_cdb[1] & 0x2); |
304 | struct bio *bio; |
305 | |
306 | /* |
307 | * If the Immediate bit is set, queue up the GOOD response |
308 | * for this SYNCHRONIZE_CACHE op. |
309 | */ |
310 | if (immed) |
311 | target_complete_cmd(cmd, SAM_STAT_GOOD); |
312 | |
313 | bio = bio_alloc(GFP_KERNEL, 0); |
314 | bio->bi_end_io = iblock_end_io_flush; |
315 | bio->bi_bdev = ib_dev->ibd_bd; |
316 | if (!immed) |
317 | bio->bi_private = cmd; |
318 | submit_bio(WRITE_FLUSH, bio); |
319 | return 0; |
320 | } |
321 | |
322 | static int iblock_execute_unmap(struct se_cmd *cmd) |
323 | { |
324 | struct se_device *dev = cmd->se_dev; |
325 | struct iblock_dev *ibd = dev->dev_ptr; |
326 | unsigned char *buf, *ptr = NULL; |
327 | sector_t lba; |
328 | int size; |
329 | u32 range; |
330 | int ret = 0; |
331 | int dl, bd_dl; |
332 | |
333 | if (cmd->data_length < 8) { |
334 | pr_warn("UNMAP parameter list length %u too small\n", |
335 | cmd->data_length); |
336 | cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; |
337 | return -EINVAL; |
338 | } |
339 | |
340 | buf = transport_kmap_data_sg(cmd); |
341 | |
342 | dl = get_unaligned_be16(&buf[0]); |
343 | bd_dl = get_unaligned_be16(&buf[2]); |
344 | |
345 | size = cmd->data_length - 8; |
346 | if (bd_dl > size) |
347 | pr_warn("UNMAP parameter list length %u too small, ignoring bd_dl %u\n", |
348 | cmd->data_length, bd_dl); |
349 | else |
350 | size = bd_dl; |
351 | |
352 | if (size / 16 > dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) { |
353 | cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; |
354 | ret = -EINVAL; |
355 | goto err; |
356 | } |
357 | |
358 | /* First UNMAP block descriptor starts at 8 byte offset */ |
359 | ptr = &buf[8]; |
360 | pr_debug("UNMAP: Sub: %s Using dl: %u bd_dl: %u size: %u" |
361 | " ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr); |
362 | |
363 | while (size >= 16) { |
364 | lba = get_unaligned_be64(&ptr[0]); |
365 | range = get_unaligned_be32(&ptr[8]); |
366 | pr_debug("UNMAP: Using lba: %llu and range: %u\n", |
367 | (unsigned long long)lba, range); |
368 | |
369 | if (range > dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count) { |
370 | cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; |
371 | ret = -EINVAL; |
372 | goto err; |
373 | } |
374 | |
375 | if (lba + range > dev->transport->get_blocks(dev) + 1) { |
376 | cmd->scsi_sense_reason = TCM_ADDRESS_OUT_OF_RANGE; |
377 | ret = -EINVAL; |
378 | goto err; |
379 | } |
380 | |
381 | ret = blkdev_issue_discard(ibd->ibd_bd, lba, range, |
382 | GFP_KERNEL, 0); |
383 | if (ret < 0) { |
384 | pr_err("blkdev_issue_discard() failed: %d\n", |
385 | ret); |
386 | goto err; |
387 | } |
388 | |
389 | ptr += 16; |
390 | size -= 16; |
391 | } |
392 | |
393 | err: |
394 | transport_kunmap_data_sg(cmd); |
395 | if (!ret) |
396 | target_complete_cmd(cmd, GOOD); |
397 | return ret; |
398 | } |
399 | |
400 | static int iblock_execute_write_same(struct se_cmd *cmd) |
401 | { |
402 | struct iblock_dev *ibd = cmd->se_dev->dev_ptr; |
403 | int ret; |
404 | |
405 | ret = blkdev_issue_discard(ibd->ibd_bd, cmd->t_task_lba, |
406 | spc_get_write_same_sectors(cmd), GFP_KERNEL, |
407 | 0); |
408 | if (ret < 0) { |
409 | pr_debug("blkdev_issue_discard() failed for WRITE_SAME\n"); |
410 | return ret; |
411 | } |
412 | |
413 | target_complete_cmd(cmd, GOOD); |
414 | return 0; |
415 | } |
416 | |
417 | enum { |
418 | Opt_udev_path, Opt_readonly, Opt_force, Opt_err |
419 | }; |
420 | |
421 | static match_table_t tokens = { |
422 | {Opt_udev_path, "udev_path=%s"}, |
423 | {Opt_readonly, "readonly=%d"}, |
424 | {Opt_force, "force=%d"}, |
425 | {Opt_err, NULL} |
426 | }; |
427 | |
428 | static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba, |
429 | struct se_subsystem_dev *se_dev, |
430 | const char *page, ssize_t count) |
431 | { |
432 | struct iblock_dev *ib_dev = se_dev->se_dev_su_ptr; |
433 | char *orig, *ptr, *arg_p, *opts; |
434 | substring_t args[MAX_OPT_ARGS]; |
435 | int ret = 0, token; |
436 | unsigned long tmp_readonly; |
437 | |
438 | opts = kstrdup(page, GFP_KERNEL); |
439 | if (!opts) |
440 | return -ENOMEM; |
441 | |
442 | orig = opts; |
443 | |
444 | while ((ptr = strsep(&opts, ",\n")) != NULL) { |
445 | if (!*ptr) |
446 | continue; |
447 | |
448 | token = match_token(ptr, tokens, args); |
449 | switch (token) { |
450 | case Opt_udev_path: |
451 | if (ib_dev->ibd_bd) { |
452 | pr_err("Unable to set udev_path= while" |
453 | " ib_dev->ibd_bd exists\n"); |
454 | ret = -EEXIST; |
455 | goto out; |
456 | } |
457 | arg_p = match_strdup(&args[0]); |
458 | if (!arg_p) { |
459 | ret = -ENOMEM; |
460 | break; |
461 | } |
462 | snprintf(ib_dev->ibd_udev_path, SE_UDEV_PATH_LEN, |
463 | "%s", arg_p); |
464 | kfree(arg_p); |
465 | pr_debug("IBLOCK: Referencing UDEV path: %s\n", |
466 | ib_dev->ibd_udev_path); |
467 | ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH; |
468 | break; |
469 | case Opt_readonly: |
470 | arg_p = match_strdup(&args[0]); |
471 | if (!arg_p) { |
472 | ret = -ENOMEM; |
473 | break; |
474 | } |
475 | ret = strict_strtoul(arg_p, 0, &tmp_readonly); |
476 | kfree(arg_p); |
477 | if (ret < 0) { |
478 | pr_err("strict_strtoul() failed for" |
479 | " readonly=\n"); |
480 | goto out; |
481 | } |
482 | ib_dev->ibd_readonly = tmp_readonly; |
483 | pr_debug("IBLOCK: readonly: %d\n", ib_dev->ibd_readonly); |
484 | break; |
485 | case Opt_force: |
486 | break; |
487 | default: |
488 | break; |
489 | } |
490 | } |
491 | |
492 | out: |
493 | kfree(orig); |
494 | return (!ret) ? count : ret; |
495 | } |
496 | |
497 | static ssize_t iblock_check_configfs_dev_params( |
498 | struct se_hba *hba, |
499 | struct se_subsystem_dev *se_dev) |
500 | { |
501 | struct iblock_dev *ibd = se_dev->se_dev_su_ptr; |
502 | |
503 | if (!(ibd->ibd_flags & IBDF_HAS_UDEV_PATH)) { |
504 | pr_err("Missing udev_path= parameters for IBLOCK\n"); |
505 | return -EINVAL; |
506 | } |
507 | |
508 | return 0; |
509 | } |
510 | |
511 | static ssize_t iblock_show_configfs_dev_params( |
512 | struct se_hba *hba, |
513 | struct se_subsystem_dev *se_dev, |
514 | char *b) |
515 | { |
516 | struct iblock_dev *ibd = se_dev->se_dev_su_ptr; |
517 | struct block_device *bd = ibd->ibd_bd; |
518 | char buf[BDEVNAME_SIZE]; |
519 | ssize_t bl = 0; |
520 | |
521 | if (bd) |
522 | bl += sprintf(b + bl, "iBlock device: %s", |
523 | bdevname(bd, buf)); |
524 | if (ibd->ibd_flags & IBDF_HAS_UDEV_PATH) |
525 | bl += sprintf(b + bl, " UDEV PATH: %s", |
526 | ibd->ibd_udev_path); |
527 | bl += sprintf(b + bl, " readonly: %d\n", ibd->ibd_readonly); |
528 | |
529 | bl += sprintf(b + bl, " "); |
530 | if (bd) { |
531 | bl += sprintf(b + bl, "Major: %d Minor: %d %s\n", |
532 | MAJOR(bd->bd_dev), MINOR(bd->bd_dev), (!bd->bd_contains) ? |
533 | "" : (bd->bd_holder == ibd) ? |
534 | "CLAIMED: IBLOCK" : "CLAIMED: OS"); |
535 | } else { |
536 | bl += sprintf(b + bl, "Major: 0 Minor: 0\n"); |
537 | } |
538 | |
539 | return bl; |
540 | } |
541 | |
542 | static void iblock_complete_cmd(struct se_cmd *cmd) |
543 | { |
544 | struct iblock_req *ibr = cmd->priv; |
545 | u8 status; |
546 | |
547 | if (!atomic_dec_and_test(&ibr->pending)) |
548 | return; |
549 | |
550 | if (atomic_read(&ibr->ib_bio_err_cnt)) |
551 | status = SAM_STAT_CHECK_CONDITION; |
552 | else |
553 | status = SAM_STAT_GOOD; |
554 | |
555 | target_complete_cmd(cmd, status); |
556 | kfree(ibr); |
557 | } |
558 | |
559 | static void iblock_bio_destructor(struct bio *bio) |
560 | { |
561 | struct se_cmd *cmd = bio->bi_private; |
562 | struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr; |
563 | |
564 | bio_free(bio, ib_dev->ibd_bio_set); |
565 | } |
566 | |
567 | static struct bio * |
568 | iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num) |
569 | { |
570 | struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr; |
571 | struct bio *bio; |
572 | |
573 | /* |
574 | * Only allocate as many vector entries as the bio code allows us to, |
575 | * we'll loop later on until we have handled the whole request. |
576 | */ |
577 | if (sg_num > BIO_MAX_PAGES) |
578 | sg_num = BIO_MAX_PAGES; |
579 | |
580 | bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set); |
581 | if (!bio) { |
582 | pr_err("Unable to allocate memory for bio\n"); |
583 | return NULL; |
584 | } |
585 | |
586 | bio->bi_bdev = ib_dev->ibd_bd; |
587 | bio->bi_private = cmd; |
588 | bio->bi_destructor = iblock_bio_destructor; |
589 | bio->bi_end_io = &iblock_bio_done; |
590 | bio->bi_sector = lba; |
591 | return bio; |
592 | } |
593 | |
594 | static void iblock_submit_bios(struct bio_list *list, int rw) |
595 | { |
596 | struct blk_plug plug; |
597 | struct bio *bio; |
598 | |
599 | blk_start_plug(&plug); |
600 | while ((bio = bio_list_pop(list))) |
601 | submit_bio(rw, bio); |
602 | blk_finish_plug(&plug); |
603 | } |
604 | |
605 | static int iblock_execute_rw(struct se_cmd *cmd) |
606 | { |
607 | struct scatterlist *sgl = cmd->t_data_sg; |
608 | u32 sgl_nents = cmd->t_data_nents; |
609 | enum dma_data_direction data_direction = cmd->data_direction; |
610 | struct se_device *dev = cmd->se_dev; |
611 | struct iblock_req *ibr; |
612 | struct bio *bio; |
613 | struct bio_list list; |
614 | struct scatterlist *sg; |
615 | u32 sg_num = sgl_nents; |
616 | sector_t block_lba; |
617 | unsigned bio_cnt; |
618 | int rw; |
619 | int i; |
620 | |
621 | if (data_direction == DMA_TO_DEVICE) { |
622 | /* |
623 | * Force data to disk if we pretend to not have a volatile |
624 | * write cache, or the initiator set the Force Unit Access bit. |
625 | */ |
626 | if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache == 0 || |
627 | (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 && |
628 | (cmd->se_cmd_flags & SCF_FUA))) |
629 | rw = WRITE_FUA; |
630 | else |
631 | rw = WRITE; |
632 | } else { |
633 | rw = READ; |
634 | } |
635 | |
636 | /* |
637 | * Convert the blocksize advertised to the initiator to the 512 byte |
638 | * units unconditionally used by the Linux block layer. |
639 | */ |
640 | if (dev->se_sub_dev->se_dev_attrib.block_size == 4096) |
641 | block_lba = (cmd->t_task_lba << 3); |
642 | else if (dev->se_sub_dev->se_dev_attrib.block_size == 2048) |
643 | block_lba = (cmd->t_task_lba << 2); |
644 | else if (dev->se_sub_dev->se_dev_attrib.block_size == 1024) |
645 | block_lba = (cmd->t_task_lba << 1); |
646 | else if (dev->se_sub_dev->se_dev_attrib.block_size == 512) |
647 | block_lba = cmd->t_task_lba; |
648 | else { |
649 | pr_err("Unsupported SCSI -> BLOCK LBA conversion:" |
650 | " %u\n", dev->se_sub_dev->se_dev_attrib.block_size); |
651 | cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
652 | return -ENOSYS; |
653 | } |
654 | |
655 | ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL); |
656 | if (!ibr) |
657 | goto fail; |
658 | cmd->priv = ibr; |
659 | |
660 | bio = iblock_get_bio(cmd, block_lba, sgl_nents); |
661 | if (!bio) |
662 | goto fail_free_ibr; |
663 | |
664 | bio_list_init(&list); |
665 | bio_list_add(&list, bio); |
666 | |
667 | atomic_set(&ibr->pending, 2); |
668 | bio_cnt = 1; |
669 | |
670 | for_each_sg(sgl, sg, sgl_nents, i) { |
671 | /* |
672 | * XXX: if the length the device accepts is shorter than the |
673 | * length of the S/G list entry this will cause and |
674 | * endless loop. Better hope no driver uses huge pages. |
675 | */ |
676 | while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset) |
677 | != sg->length) { |
678 | if (bio_cnt >= IBLOCK_MAX_BIO_PER_TASK) { |
679 | iblock_submit_bios(&list, rw); |
680 | bio_cnt = 0; |
681 | } |
682 | |
683 | bio = iblock_get_bio(cmd, block_lba, sg_num); |
684 | if (!bio) |
685 | goto fail_put_bios; |
686 | |
687 | atomic_inc(&ibr->pending); |
688 | bio_list_add(&list, bio); |
689 | bio_cnt++; |
690 | } |
691 | |
692 | /* Always in 512 byte units for Linux/Block */ |
693 | block_lba += sg->length >> IBLOCK_LBA_SHIFT; |
694 | sg_num--; |
695 | } |
696 | |
697 | iblock_submit_bios(&list, rw); |
698 | iblock_complete_cmd(cmd); |
699 | return 0; |
700 | |
701 | fail_put_bios: |
702 | while ((bio = bio_list_pop(&list))) |
703 | bio_put(bio); |
704 | fail_free_ibr: |
705 | kfree(ibr); |
706 | cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
707 | fail: |
708 | return -ENOMEM; |
709 | } |
710 | |
711 | static u32 iblock_get_device_rev(struct se_device *dev) |
712 | { |
713 | return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */ |
714 | } |
715 | |
716 | static u32 iblock_get_device_type(struct se_device *dev) |
717 | { |
718 | return TYPE_DISK; |
719 | } |
720 | |
721 | static sector_t iblock_get_blocks(struct se_device *dev) |
722 | { |
723 | struct iblock_dev *ibd = dev->dev_ptr; |
724 | struct block_device *bd = ibd->ibd_bd; |
725 | struct request_queue *q = bdev_get_queue(bd); |
726 | |
727 | return iblock_emulate_read_cap_with_block_size(dev, bd, q); |
728 | } |
729 | |
730 | static void iblock_bio_done(struct bio *bio, int err) |
731 | { |
732 | struct se_cmd *cmd = bio->bi_private; |
733 | struct iblock_req *ibr = cmd->priv; |
734 | |
735 | /* |
736 | * Set -EIO if !BIO_UPTODATE and the passed is still err=0 |
737 | */ |
738 | if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && !err) |
739 | err = -EIO; |
740 | |
741 | if (err != 0) { |
742 | pr_err("test_bit(BIO_UPTODATE) failed for bio: %p," |
743 | " err: %d\n", bio, err); |
744 | /* |
745 | * Bump the ib_bio_err_cnt and release bio. |
746 | */ |
747 | atomic_inc(&ibr->ib_bio_err_cnt); |
748 | smp_mb__after_atomic_inc(); |
749 | } |
750 | |
751 | bio_put(bio); |
752 | |
753 | iblock_complete_cmd(cmd); |
754 | } |
755 | |
756 | static struct spc_ops iblock_spc_ops = { |
757 | .execute_rw = iblock_execute_rw, |
758 | .execute_sync_cache = iblock_execute_sync_cache, |
759 | .execute_write_same = iblock_execute_write_same, |
760 | .execute_unmap = iblock_execute_unmap, |
761 | }; |
762 | |
763 | static int iblock_parse_cdb(struct se_cmd *cmd) |
764 | { |
765 | return sbc_parse_cdb(cmd, &iblock_spc_ops); |
766 | } |
767 | |
768 | static struct se_subsystem_api iblock_template = { |
769 | .name = "iblock", |
770 | .owner = THIS_MODULE, |
771 | .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV, |
772 | .write_cache_emulated = 1, |
773 | .fua_write_emulated = 1, |
774 | .attach_hba = iblock_attach_hba, |
775 | .detach_hba = iblock_detach_hba, |
776 | .allocate_virtdevice = iblock_allocate_virtdevice, |
777 | .create_virtdevice = iblock_create_virtdevice, |
778 | .free_device = iblock_free_device, |
779 | .parse_cdb = iblock_parse_cdb, |
780 | .check_configfs_dev_params = iblock_check_configfs_dev_params, |
781 | .set_configfs_dev_params = iblock_set_configfs_dev_params, |
782 | .show_configfs_dev_params = iblock_show_configfs_dev_params, |
783 | .get_device_rev = iblock_get_device_rev, |
784 | .get_device_type = iblock_get_device_type, |
785 | .get_blocks = iblock_get_blocks, |
786 | }; |
787 | |
788 | static int __init iblock_module_init(void) |
789 | { |
790 | return transport_subsystem_register(&iblock_template); |
791 | } |
792 | |
793 | static void iblock_module_exit(void) |
794 | { |
795 | transport_subsystem_release(&iblock_template); |
796 | } |
797 | |
798 | MODULE_DESCRIPTION("TCM IBLOCK subsystem plugin"); |
799 | MODULE_AUTHOR("nab@Linux-iSCSI.org"); |
800 | MODULE_LICENSE("GPL"); |
801 | |
802 | module_init(iblock_module_init); |
803 | module_exit(iblock_module_exit); |
804 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9