Root/
1 | /******************************************************************************* |
2 | * Filename: target_core_file.c |
3 | * |
4 | * This file contains the Storage Engine <-> FILEIO transport specific functions |
5 | * |
6 | * (c) Copyright 2005-2012 RisingTide Systems LLC. |
7 | * |
8 | * Nicholas A. Bellinger <nab@kernel.org> |
9 | * |
10 | * This program is free software; you can redistribute it and/or modify |
11 | * it under the terms of the GNU General Public License as published by |
12 | * the Free Software Foundation; either version 2 of the License, or |
13 | * (at your option) any later version. |
14 | * |
15 | * This program is distributed in the hope that it will be useful, |
16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
18 | * GNU General Public License for more details. |
19 | * |
20 | * You should have received a copy of the GNU General Public License |
21 | * along with this program; if not, write to the Free Software |
22 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
23 | * |
24 | ******************************************************************************/ |
25 | |
26 | #include <linux/string.h> |
27 | #include <linux/parser.h> |
28 | #include <linux/timer.h> |
29 | #include <linux/blkdev.h> |
30 | #include <linux/slab.h> |
31 | #include <linux/spinlock.h> |
32 | #include <linux/module.h> |
33 | #include <scsi/scsi.h> |
34 | #include <scsi/scsi_host.h> |
35 | |
36 | #include <target/target_core_base.h> |
37 | #include <target/target_core_backend.h> |
38 | |
39 | #include "target_core_file.h" |
40 | |
41 | static inline struct fd_dev *FD_DEV(struct se_device *dev) |
42 | { |
43 | return container_of(dev, struct fd_dev, dev); |
44 | } |
45 | |
46 | /* fd_attach_hba(): (Part of se_subsystem_api_t template) |
47 | * |
48 | * |
49 | */ |
50 | static int fd_attach_hba(struct se_hba *hba, u32 host_id) |
51 | { |
52 | struct fd_host *fd_host; |
53 | |
54 | fd_host = kzalloc(sizeof(struct fd_host), GFP_KERNEL); |
55 | if (!fd_host) { |
56 | pr_err("Unable to allocate memory for struct fd_host\n"); |
57 | return -ENOMEM; |
58 | } |
59 | |
60 | fd_host->fd_host_id = host_id; |
61 | |
62 | hba->hba_ptr = fd_host; |
63 | |
64 | pr_debug("CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic" |
65 | " Target Core Stack %s\n", hba->hba_id, FD_VERSION, |
66 | TARGET_CORE_MOD_VERSION); |
67 | pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic" |
68 | " MaxSectors: %u\n", |
69 | hba->hba_id, fd_host->fd_host_id, FD_MAX_SECTORS); |
70 | |
71 | return 0; |
72 | } |
73 | |
74 | static void fd_detach_hba(struct se_hba *hba) |
75 | { |
76 | struct fd_host *fd_host = hba->hba_ptr; |
77 | |
78 | pr_debug("CORE_HBA[%d] - Detached FILEIO HBA: %u from Generic" |
79 | " Target Core\n", hba->hba_id, fd_host->fd_host_id); |
80 | |
81 | kfree(fd_host); |
82 | hba->hba_ptr = NULL; |
83 | } |
84 | |
85 | static struct se_device *fd_alloc_device(struct se_hba *hba, const char *name) |
86 | { |
87 | struct fd_dev *fd_dev; |
88 | struct fd_host *fd_host = hba->hba_ptr; |
89 | |
90 | fd_dev = kzalloc(sizeof(struct fd_dev), GFP_KERNEL); |
91 | if (!fd_dev) { |
92 | pr_err("Unable to allocate memory for struct fd_dev\n"); |
93 | return NULL; |
94 | } |
95 | |
96 | fd_dev->fd_host = fd_host; |
97 | |
98 | pr_debug("FILEIO: Allocated fd_dev for %p\n", name); |
99 | |
100 | return &fd_dev->dev; |
101 | } |
102 | |
103 | static int fd_configure_device(struct se_device *dev) |
104 | { |
105 | struct fd_dev *fd_dev = FD_DEV(dev); |
106 | struct fd_host *fd_host = dev->se_hba->hba_ptr; |
107 | struct file *file; |
108 | struct inode *inode = NULL; |
109 | int flags, ret = -EINVAL; |
110 | |
111 | if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) { |
112 | pr_err("Missing fd_dev_name=\n"); |
113 | return -EINVAL; |
114 | } |
115 | |
116 | /* |
117 | * Use O_DSYNC by default instead of O_SYNC to forgo syncing |
118 | * of pure timestamp updates. |
119 | */ |
120 | flags = O_RDWR | O_CREAT | O_LARGEFILE | O_DSYNC; |
121 | |
122 | /* |
123 | * Optionally allow fd_buffered_io=1 to be enabled for people |
124 | * who want use the fs buffer cache as an WriteCache mechanism. |
125 | * |
126 | * This means that in event of a hard failure, there is a risk |
127 | * of silent data-loss if the SCSI client has *not* performed a |
128 | * forced unit access (FUA) write, or issued SYNCHRONIZE_CACHE |
129 | * to write-out the entire device cache. |
130 | */ |
131 | if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) { |
132 | pr_debug("FILEIO: Disabling O_DSYNC, using buffered FILEIO\n"); |
133 | flags &= ~O_DSYNC; |
134 | } |
135 | |
136 | file = filp_open(fd_dev->fd_dev_name, flags, 0600); |
137 | if (IS_ERR(file)) { |
138 | pr_err("filp_open(%s) failed\n", fd_dev->fd_dev_name); |
139 | ret = PTR_ERR(file); |
140 | goto fail; |
141 | } |
142 | fd_dev->fd_file = file; |
143 | /* |
144 | * If using a block backend with this struct file, we extract |
145 | * fd_dev->fd_[block,dev]_size from struct block_device. |
146 | * |
147 | * Otherwise, we use the passed fd_size= from configfs |
148 | */ |
149 | inode = file->f_mapping->host; |
150 | if (S_ISBLK(inode->i_mode)) { |
151 | struct request_queue *q = bdev_get_queue(inode->i_bdev); |
152 | unsigned long long dev_size; |
153 | |
154 | dev->dev_attrib.hw_block_size = |
155 | bdev_logical_block_size(inode->i_bdev); |
156 | dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q); |
157 | |
158 | /* |
159 | * Determine the number of bytes from i_size_read() minus |
160 | * one (1) logical sector from underlying struct block_device |
161 | */ |
162 | dev_size = (i_size_read(file->f_mapping->host) - |
163 | fd_dev->fd_block_size); |
164 | |
165 | pr_debug("FILEIO: Using size: %llu bytes from struct" |
166 | " block_device blocks: %llu logical_block_size: %d\n", |
167 | dev_size, div_u64(dev_size, fd_dev->fd_block_size), |
168 | fd_dev->fd_block_size); |
169 | } else { |
170 | if (!(fd_dev->fbd_flags & FBDF_HAS_SIZE)) { |
171 | pr_err("FILEIO: Missing fd_dev_size=" |
172 | " parameter, and no backing struct" |
173 | " block_device\n"); |
174 | goto fail; |
175 | } |
176 | |
177 | dev->dev_attrib.hw_block_size = FD_BLOCKSIZE; |
178 | dev->dev_attrib.hw_max_sectors = FD_MAX_SECTORS; |
179 | } |
180 | |
181 | fd_dev->fd_block_size = dev->dev_attrib.hw_block_size; |
182 | |
183 | dev->dev_attrib.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH; |
184 | |
185 | if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) { |
186 | pr_debug("FILEIO: Forcing setting of emulate_write_cache=1" |
187 | " with FDBD_HAS_BUFFERED_IO_WCE\n"); |
188 | dev->dev_attrib.emulate_write_cache = 1; |
189 | } |
190 | |
191 | fd_dev->fd_dev_id = fd_host->fd_host_dev_id_count++; |
192 | fd_dev->fd_queue_depth = dev->queue_depth; |
193 | /* |
194 | * Limit WRITE_SAME w/ UNMAP=0 emulation to 8k Number of LBAs (NoLB) |
195 | * based upon struct iovec limit for vfs_writev() |
196 | */ |
197 | dev->dev_attrib.max_write_same_len = 0x1000; |
198 | |
199 | pr_debug("CORE_FILE[%u] - Added TCM FILEIO Device ID: %u at %s," |
200 | " %llu total bytes\n", fd_host->fd_host_id, fd_dev->fd_dev_id, |
201 | fd_dev->fd_dev_name, fd_dev->fd_dev_size); |
202 | |
203 | return 0; |
204 | fail: |
205 | if (fd_dev->fd_file) { |
206 | filp_close(fd_dev->fd_file, NULL); |
207 | fd_dev->fd_file = NULL; |
208 | } |
209 | return ret; |
210 | } |
211 | |
212 | static void fd_free_device(struct se_device *dev) |
213 | { |
214 | struct fd_dev *fd_dev = FD_DEV(dev); |
215 | |
216 | if (fd_dev->fd_file) { |
217 | filp_close(fd_dev->fd_file, NULL); |
218 | fd_dev->fd_file = NULL; |
219 | } |
220 | |
221 | kfree(fd_dev); |
222 | } |
223 | |
224 | static int fd_do_rw(struct se_cmd *cmd, struct scatterlist *sgl, |
225 | u32 sgl_nents, int is_write) |
226 | { |
227 | struct se_device *se_dev = cmd->se_dev; |
228 | struct fd_dev *dev = FD_DEV(se_dev); |
229 | struct file *fd = dev->fd_file; |
230 | struct scatterlist *sg; |
231 | struct iovec *iov; |
232 | mm_segment_t old_fs; |
233 | loff_t pos = (cmd->t_task_lba * se_dev->dev_attrib.block_size); |
234 | int ret = 0, i; |
235 | |
236 | iov = kzalloc(sizeof(struct iovec) * sgl_nents, GFP_KERNEL); |
237 | if (!iov) { |
238 | pr_err("Unable to allocate fd_do_readv iov[]\n"); |
239 | return -ENOMEM; |
240 | } |
241 | |
242 | for_each_sg(sgl, sg, sgl_nents, i) { |
243 | iov[i].iov_len = sg->length; |
244 | iov[i].iov_base = kmap(sg_page(sg)) + sg->offset; |
245 | } |
246 | |
247 | old_fs = get_fs(); |
248 | set_fs(get_ds()); |
249 | |
250 | if (is_write) |
251 | ret = vfs_writev(fd, &iov[0], sgl_nents, &pos); |
252 | else |
253 | ret = vfs_readv(fd, &iov[0], sgl_nents, &pos); |
254 | |
255 | set_fs(old_fs); |
256 | |
257 | for_each_sg(sgl, sg, sgl_nents, i) |
258 | kunmap(sg_page(sg)); |
259 | |
260 | kfree(iov); |
261 | |
262 | if (is_write) { |
263 | if (ret < 0 || ret != cmd->data_length) { |
264 | pr_err("%s() write returned %d\n", __func__, ret); |
265 | return (ret < 0 ? ret : -EINVAL); |
266 | } |
267 | } else { |
268 | /* |
269 | * Return zeros and GOOD status even if the READ did not return |
270 | * the expected virt_size for struct file w/o a backing struct |
271 | * block_device. |
272 | */ |
273 | if (S_ISBLK(file_inode(fd)->i_mode)) { |
274 | if (ret < 0 || ret != cmd->data_length) { |
275 | pr_err("%s() returned %d, expecting %u for " |
276 | "S_ISBLK\n", __func__, ret, |
277 | cmd->data_length); |
278 | return (ret < 0 ? ret : -EINVAL); |
279 | } |
280 | } else { |
281 | if (ret < 0) { |
282 | pr_err("%s() returned %d for non S_ISBLK\n", |
283 | __func__, ret); |
284 | return ret; |
285 | } |
286 | } |
287 | } |
288 | return 1; |
289 | } |
290 | |
291 | static sense_reason_t |
292 | fd_execute_sync_cache(struct se_cmd *cmd) |
293 | { |
294 | struct se_device *dev = cmd->se_dev; |
295 | struct fd_dev *fd_dev = FD_DEV(dev); |
296 | int immed = (cmd->t_task_cdb[1] & 0x2); |
297 | loff_t start, end; |
298 | int ret; |
299 | |
300 | /* |
301 | * If the Immediate bit is set, queue up the GOOD response |
302 | * for this SYNCHRONIZE_CACHE op |
303 | */ |
304 | if (immed) |
305 | target_complete_cmd(cmd, SAM_STAT_GOOD); |
306 | |
307 | /* |
308 | * Determine if we will be flushing the entire device. |
309 | */ |
310 | if (cmd->t_task_lba == 0 && cmd->data_length == 0) { |
311 | start = 0; |
312 | end = LLONG_MAX; |
313 | } else { |
314 | start = cmd->t_task_lba * dev->dev_attrib.block_size; |
315 | if (cmd->data_length) |
316 | end = start + cmd->data_length; |
317 | else |
318 | end = LLONG_MAX; |
319 | } |
320 | |
321 | ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1); |
322 | if (ret != 0) |
323 | pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret); |
324 | |
325 | if (immed) |
326 | return 0; |
327 | |
328 | if (ret) |
329 | target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION); |
330 | else |
331 | target_complete_cmd(cmd, SAM_STAT_GOOD); |
332 | |
333 | return 0; |
334 | } |
335 | |
336 | static unsigned char * |
337 | fd_setup_write_same_buf(struct se_cmd *cmd, struct scatterlist *sg, |
338 | unsigned int len) |
339 | { |
340 | struct se_device *se_dev = cmd->se_dev; |
341 | unsigned int block_size = se_dev->dev_attrib.block_size; |
342 | unsigned int i = 0, end; |
343 | unsigned char *buf, *p, *kmap_buf; |
344 | |
345 | buf = kzalloc(min_t(unsigned int, len, PAGE_SIZE), GFP_KERNEL); |
346 | if (!buf) { |
347 | pr_err("Unable to allocate fd_execute_write_same buf\n"); |
348 | return NULL; |
349 | } |
350 | |
351 | kmap_buf = kmap(sg_page(sg)) + sg->offset; |
352 | if (!kmap_buf) { |
353 | pr_err("kmap() failed in fd_setup_write_same\n"); |
354 | kfree(buf); |
355 | return NULL; |
356 | } |
357 | /* |
358 | * Fill local *buf to contain multiple WRITE_SAME blocks up to |
359 | * min(len, PAGE_SIZE) |
360 | */ |
361 | p = buf; |
362 | end = min_t(unsigned int, len, PAGE_SIZE); |
363 | |
364 | while (i < end) { |
365 | memcpy(p, kmap_buf, block_size); |
366 | |
367 | i += block_size; |
368 | p += block_size; |
369 | } |
370 | kunmap(sg_page(sg)); |
371 | |
372 | return buf; |
373 | } |
374 | |
375 | static sense_reason_t |
376 | fd_execute_write_same(struct se_cmd *cmd) |
377 | { |
378 | struct se_device *se_dev = cmd->se_dev; |
379 | struct fd_dev *fd_dev = FD_DEV(se_dev); |
380 | struct file *f = fd_dev->fd_file; |
381 | struct scatterlist *sg; |
382 | struct iovec *iov; |
383 | mm_segment_t old_fs; |
384 | sector_t nolb = sbc_get_write_same_sectors(cmd); |
385 | loff_t pos = cmd->t_task_lba * se_dev->dev_attrib.block_size; |
386 | unsigned int len, len_tmp, iov_num; |
387 | int i, rc; |
388 | unsigned char *buf; |
389 | |
390 | if (!nolb) { |
391 | target_complete_cmd(cmd, SAM_STAT_GOOD); |
392 | return 0; |
393 | } |
394 | sg = &cmd->t_data_sg[0]; |
395 | |
396 | if (cmd->t_data_nents > 1 || |
397 | sg->length != cmd->se_dev->dev_attrib.block_size) { |
398 | pr_err("WRITE_SAME: Illegal SGL t_data_nents: %u length: %u" |
399 | " block_size: %u\n", cmd->t_data_nents, sg->length, |
400 | cmd->se_dev->dev_attrib.block_size); |
401 | return TCM_INVALID_CDB_FIELD; |
402 | } |
403 | |
404 | len = len_tmp = nolb * se_dev->dev_attrib.block_size; |
405 | iov_num = DIV_ROUND_UP(len, PAGE_SIZE); |
406 | |
407 | buf = fd_setup_write_same_buf(cmd, sg, len); |
408 | if (!buf) |
409 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
410 | |
411 | iov = vzalloc(sizeof(struct iovec) * iov_num); |
412 | if (!iov) { |
413 | pr_err("Unable to allocate fd_execute_write_same iovecs\n"); |
414 | kfree(buf); |
415 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
416 | } |
417 | /* |
418 | * Map the single fabric received scatterlist block now populated |
419 | * in *buf into each iovec for I/O submission. |
420 | */ |
421 | for (i = 0; i < iov_num; i++) { |
422 | iov[i].iov_base = buf; |
423 | iov[i].iov_len = min_t(unsigned int, len_tmp, PAGE_SIZE); |
424 | len_tmp -= iov[i].iov_len; |
425 | } |
426 | |
427 | old_fs = get_fs(); |
428 | set_fs(get_ds()); |
429 | rc = vfs_writev(f, &iov[0], iov_num, &pos); |
430 | set_fs(old_fs); |
431 | |
432 | vfree(iov); |
433 | kfree(buf); |
434 | |
435 | if (rc < 0 || rc != len) { |
436 | pr_err("vfs_writev() returned %d for write same\n", rc); |
437 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
438 | } |
439 | |
440 | target_complete_cmd(cmd, SAM_STAT_GOOD); |
441 | return 0; |
442 | } |
443 | |
444 | static sense_reason_t |
445 | fd_execute_rw(struct se_cmd *cmd) |
446 | { |
447 | struct scatterlist *sgl = cmd->t_data_sg; |
448 | u32 sgl_nents = cmd->t_data_nents; |
449 | enum dma_data_direction data_direction = cmd->data_direction; |
450 | struct se_device *dev = cmd->se_dev; |
451 | int ret = 0; |
452 | |
453 | /* |
454 | * Call vectorized fileio functions to map struct scatterlist |
455 | * physical memory addresses to struct iovec virtual memory. |
456 | */ |
457 | if (data_direction == DMA_FROM_DEVICE) { |
458 | ret = fd_do_rw(cmd, sgl, sgl_nents, 0); |
459 | } else { |
460 | ret = fd_do_rw(cmd, sgl, sgl_nents, 1); |
461 | /* |
462 | * Perform implict vfs_fsync_range() for fd_do_writev() ops |
463 | * for SCSI WRITEs with Forced Unit Access (FUA) set. |
464 | * Allow this to happen independent of WCE=0 setting. |
465 | */ |
466 | if (ret > 0 && |
467 | dev->dev_attrib.emulate_fua_write > 0 && |
468 | (cmd->se_cmd_flags & SCF_FUA)) { |
469 | struct fd_dev *fd_dev = FD_DEV(dev); |
470 | loff_t start = cmd->t_task_lba * |
471 | dev->dev_attrib.block_size; |
472 | loff_t end = start + cmd->data_length; |
473 | |
474 | vfs_fsync_range(fd_dev->fd_file, start, end, 1); |
475 | } |
476 | } |
477 | |
478 | if (ret < 0) |
479 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
480 | |
481 | if (ret) |
482 | target_complete_cmd(cmd, SAM_STAT_GOOD); |
483 | return 0; |
484 | } |
485 | |
486 | enum { |
487 | Opt_fd_dev_name, Opt_fd_dev_size, Opt_fd_buffered_io, Opt_err |
488 | }; |
489 | |
490 | static match_table_t tokens = { |
491 | {Opt_fd_dev_name, "fd_dev_name=%s"}, |
492 | {Opt_fd_dev_size, "fd_dev_size=%s"}, |
493 | {Opt_fd_buffered_io, "fd_buffered_io=%d"}, |
494 | {Opt_err, NULL} |
495 | }; |
496 | |
497 | static ssize_t fd_set_configfs_dev_params(struct se_device *dev, |
498 | const char *page, ssize_t count) |
499 | { |
500 | struct fd_dev *fd_dev = FD_DEV(dev); |
501 | char *orig, *ptr, *arg_p, *opts; |
502 | substring_t args[MAX_OPT_ARGS]; |
503 | int ret = 0, arg, token; |
504 | |
505 | opts = kstrdup(page, GFP_KERNEL); |
506 | if (!opts) |
507 | return -ENOMEM; |
508 | |
509 | orig = opts; |
510 | |
511 | while ((ptr = strsep(&opts, ",\n")) != NULL) { |
512 | if (!*ptr) |
513 | continue; |
514 | |
515 | token = match_token(ptr, tokens, args); |
516 | switch (token) { |
517 | case Opt_fd_dev_name: |
518 | if (match_strlcpy(fd_dev->fd_dev_name, &args[0], |
519 | FD_MAX_DEV_NAME) == 0) { |
520 | ret = -EINVAL; |
521 | break; |
522 | } |
523 | pr_debug("FILEIO: Referencing Path: %s\n", |
524 | fd_dev->fd_dev_name); |
525 | fd_dev->fbd_flags |= FBDF_HAS_PATH; |
526 | break; |
527 | case Opt_fd_dev_size: |
528 | arg_p = match_strdup(&args[0]); |
529 | if (!arg_p) { |
530 | ret = -ENOMEM; |
531 | break; |
532 | } |
533 | ret = strict_strtoull(arg_p, 0, &fd_dev->fd_dev_size); |
534 | kfree(arg_p); |
535 | if (ret < 0) { |
536 | pr_err("strict_strtoull() failed for" |
537 | " fd_dev_size=\n"); |
538 | goto out; |
539 | } |
540 | pr_debug("FILEIO: Referencing Size: %llu" |
541 | " bytes\n", fd_dev->fd_dev_size); |
542 | fd_dev->fbd_flags |= FBDF_HAS_SIZE; |
543 | break; |
544 | case Opt_fd_buffered_io: |
545 | match_int(args, &arg); |
546 | if (arg != 1) { |
547 | pr_err("bogus fd_buffered_io=%d value\n", arg); |
548 | ret = -EINVAL; |
549 | goto out; |
550 | } |
551 | |
552 | pr_debug("FILEIO: Using buffered I/O" |
553 | " operations for struct fd_dev\n"); |
554 | |
555 | fd_dev->fbd_flags |= FDBD_HAS_BUFFERED_IO_WCE; |
556 | break; |
557 | default: |
558 | break; |
559 | } |
560 | } |
561 | |
562 | out: |
563 | kfree(orig); |
564 | return (!ret) ? count : ret; |
565 | } |
566 | |
567 | static ssize_t fd_show_configfs_dev_params(struct se_device *dev, char *b) |
568 | { |
569 | struct fd_dev *fd_dev = FD_DEV(dev); |
570 | ssize_t bl = 0; |
571 | |
572 | bl = sprintf(b + bl, "TCM FILEIO ID: %u", fd_dev->fd_dev_id); |
573 | bl += sprintf(b + bl, " File: %s Size: %llu Mode: %s\n", |
574 | fd_dev->fd_dev_name, fd_dev->fd_dev_size, |
575 | (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) ? |
576 | "Buffered-WCE" : "O_DSYNC"); |
577 | return bl; |
578 | } |
579 | |
580 | static sector_t fd_get_blocks(struct se_device *dev) |
581 | { |
582 | struct fd_dev *fd_dev = FD_DEV(dev); |
583 | struct file *f = fd_dev->fd_file; |
584 | struct inode *i = f->f_mapping->host; |
585 | unsigned long long dev_size; |
586 | /* |
587 | * When using a file that references an underlying struct block_device, |
588 | * ensure dev_size is always based on the current inode size in order |
589 | * to handle underlying block_device resize operations. |
590 | */ |
591 | if (S_ISBLK(i->i_mode)) |
592 | dev_size = (i_size_read(i) - fd_dev->fd_block_size); |
593 | else |
594 | dev_size = fd_dev->fd_dev_size; |
595 | |
596 | return div_u64(dev_size, dev->dev_attrib.block_size); |
597 | } |
598 | |
599 | static struct sbc_ops fd_sbc_ops = { |
600 | .execute_rw = fd_execute_rw, |
601 | .execute_sync_cache = fd_execute_sync_cache, |
602 | .execute_write_same = fd_execute_write_same, |
603 | }; |
604 | |
605 | static sense_reason_t |
606 | fd_parse_cdb(struct se_cmd *cmd) |
607 | { |
608 | return sbc_parse_cdb(cmd, &fd_sbc_ops); |
609 | } |
610 | |
611 | static struct se_subsystem_api fileio_template = { |
612 | .name = "fileio", |
613 | .inquiry_prod = "FILEIO", |
614 | .inquiry_rev = FD_VERSION, |
615 | .owner = THIS_MODULE, |
616 | .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV, |
617 | .attach_hba = fd_attach_hba, |
618 | .detach_hba = fd_detach_hba, |
619 | .alloc_device = fd_alloc_device, |
620 | .configure_device = fd_configure_device, |
621 | .free_device = fd_free_device, |
622 | .parse_cdb = fd_parse_cdb, |
623 | .set_configfs_dev_params = fd_set_configfs_dev_params, |
624 | .show_configfs_dev_params = fd_show_configfs_dev_params, |
625 | .get_device_type = sbc_get_device_type, |
626 | .get_blocks = fd_get_blocks, |
627 | }; |
628 | |
629 | static int __init fileio_module_init(void) |
630 | { |
631 | return transport_subsystem_register(&fileio_template); |
632 | } |
633 | |
634 | static void __exit fileio_module_exit(void) |
635 | { |
636 | transport_subsystem_release(&fileio_template); |
637 | } |
638 | |
639 | MODULE_DESCRIPTION("TCM FILEIO subsystem plugin"); |
640 | MODULE_AUTHOR("nab@Linux-iSCSI.org"); |
641 | MODULE_LICENSE("GPL"); |
642 | |
643 | module_init(fileio_module_init); |
644 | module_exit(fileio_module_exit); |
645 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9