Root/
1 | /******************************************************************************* |
2 | * Filename: target_core_rd.c |
3 | * |
4 | * This file contains the Storage Engine <-> Ramdisk transport |
5 | * specific functions. |
6 | * |
7 | * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc. |
8 | * Copyright (c) 2005, 2006, 2007 SBE, Inc. |
9 | * Copyright (c) 2007-2010 Rising Tide Systems |
10 | * Copyright (c) 2008-2010 Linux-iSCSI.org |
11 | * |
12 | * Nicholas A. Bellinger <nab@kernel.org> |
13 | * |
14 | * This program is free software; you can redistribute it and/or modify |
15 | * it under the terms of the GNU General Public License as published by |
16 | * the Free Software Foundation; either version 2 of the License, or |
17 | * (at your option) any later version. |
18 | * |
19 | * This program is distributed in the hope that it will be useful, |
20 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
21 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
22 | * GNU General Public License for more details. |
23 | * |
24 | * You should have received a copy of the GNU General Public License |
25 | * along with this program; if not, write to the Free Software |
26 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
27 | * |
28 | ******************************************************************************/ |
29 | |
30 | #include <linux/string.h> |
31 | #include <linux/parser.h> |
32 | #include <linux/timer.h> |
33 | #include <linux/blkdev.h> |
34 | #include <linux/slab.h> |
35 | #include <linux/spinlock.h> |
36 | #include <scsi/scsi.h> |
37 | #include <scsi/scsi_host.h> |
38 | |
39 | #include <target/target_core_base.h> |
40 | #include <target/target_core_backend.h> |
41 | |
42 | #include "target_core_rd.h" |
43 | |
44 | static struct se_subsystem_api rd_mcp_template; |
45 | |
46 | /* rd_attach_hba(): (Part of se_subsystem_api_t template) |
47 | * |
48 | * |
49 | */ |
50 | static int rd_attach_hba(struct se_hba *hba, u32 host_id) |
51 | { |
52 | struct rd_host *rd_host; |
53 | |
54 | rd_host = kzalloc(sizeof(struct rd_host), GFP_KERNEL); |
55 | if (!rd_host) { |
56 | pr_err("Unable to allocate memory for struct rd_host\n"); |
57 | return -ENOMEM; |
58 | } |
59 | |
60 | rd_host->rd_host_id = host_id; |
61 | |
62 | hba->hba_ptr = rd_host; |
63 | |
64 | pr_debug("CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on" |
65 | " Generic Target Core Stack %s\n", hba->hba_id, |
66 | RD_HBA_VERSION, TARGET_CORE_MOD_VERSION); |
67 | |
68 | return 0; |
69 | } |
70 | |
71 | static void rd_detach_hba(struct se_hba *hba) |
72 | { |
73 | struct rd_host *rd_host = hba->hba_ptr; |
74 | |
75 | pr_debug("CORE_HBA[%d] - Detached Ramdisk HBA: %u from" |
76 | " Generic Target Core\n", hba->hba_id, rd_host->rd_host_id); |
77 | |
78 | kfree(rd_host); |
79 | hba->hba_ptr = NULL; |
80 | } |
81 | |
82 | /* rd_release_device_space(): |
83 | * |
84 | * |
85 | */ |
86 | static void rd_release_device_space(struct rd_dev *rd_dev) |
87 | { |
88 | u32 i, j, page_count = 0, sg_per_table; |
89 | struct rd_dev_sg_table *sg_table; |
90 | struct page *pg; |
91 | struct scatterlist *sg; |
92 | |
93 | if (!rd_dev->sg_table_array || !rd_dev->sg_table_count) |
94 | return; |
95 | |
96 | sg_table = rd_dev->sg_table_array; |
97 | |
98 | for (i = 0; i < rd_dev->sg_table_count; i++) { |
99 | sg = sg_table[i].sg_table; |
100 | sg_per_table = sg_table[i].rd_sg_count; |
101 | |
102 | for (j = 0; j < sg_per_table; j++) { |
103 | pg = sg_page(&sg[j]); |
104 | if (pg) { |
105 | __free_page(pg); |
106 | page_count++; |
107 | } |
108 | } |
109 | |
110 | kfree(sg); |
111 | } |
112 | |
113 | pr_debug("CORE_RD[%u] - Released device space for Ramdisk" |
114 | " Device ID: %u, pages %u in %u tables total bytes %lu\n", |
115 | rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count, |
116 | rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE); |
117 | |
118 | kfree(sg_table); |
119 | rd_dev->sg_table_array = NULL; |
120 | rd_dev->sg_table_count = 0; |
121 | } |
122 | |
123 | |
124 | /* rd_build_device_space(): |
125 | * |
126 | * |
127 | */ |
128 | static int rd_build_device_space(struct rd_dev *rd_dev) |
129 | { |
130 | u32 i = 0, j, page_offset = 0, sg_per_table, sg_tables, total_sg_needed; |
131 | u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE / |
132 | sizeof(struct scatterlist)); |
133 | struct rd_dev_sg_table *sg_table; |
134 | struct page *pg; |
135 | struct scatterlist *sg; |
136 | |
137 | if (rd_dev->rd_page_count <= 0) { |
138 | pr_err("Illegal page count: %u for Ramdisk device\n", |
139 | rd_dev->rd_page_count); |
140 | return -EINVAL; |
141 | } |
142 | total_sg_needed = rd_dev->rd_page_count; |
143 | |
144 | sg_tables = (total_sg_needed / max_sg_per_table) + 1; |
145 | |
146 | sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL); |
147 | if (!sg_table) { |
148 | pr_err("Unable to allocate memory for Ramdisk" |
149 | " scatterlist tables\n"); |
150 | return -ENOMEM; |
151 | } |
152 | |
153 | rd_dev->sg_table_array = sg_table; |
154 | rd_dev->sg_table_count = sg_tables; |
155 | |
156 | while (total_sg_needed) { |
157 | sg_per_table = (total_sg_needed > max_sg_per_table) ? |
158 | max_sg_per_table : total_sg_needed; |
159 | |
160 | sg = kzalloc(sg_per_table * sizeof(struct scatterlist), |
161 | GFP_KERNEL); |
162 | if (!sg) { |
163 | pr_err("Unable to allocate scatterlist array" |
164 | " for struct rd_dev\n"); |
165 | return -ENOMEM; |
166 | } |
167 | |
168 | sg_init_table(sg, sg_per_table); |
169 | |
170 | sg_table[i].sg_table = sg; |
171 | sg_table[i].rd_sg_count = sg_per_table; |
172 | sg_table[i].page_start_offset = page_offset; |
173 | sg_table[i++].page_end_offset = (page_offset + sg_per_table) |
174 | - 1; |
175 | |
176 | for (j = 0; j < sg_per_table; j++) { |
177 | pg = alloc_pages(GFP_KERNEL, 0); |
178 | if (!pg) { |
179 | pr_err("Unable to allocate scatterlist" |
180 | " pages for struct rd_dev_sg_table\n"); |
181 | return -ENOMEM; |
182 | } |
183 | sg_assign_page(&sg[j], pg); |
184 | sg[j].length = PAGE_SIZE; |
185 | } |
186 | |
187 | page_offset += sg_per_table; |
188 | total_sg_needed -= sg_per_table; |
189 | } |
190 | |
191 | pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u space of" |
192 | " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id, |
193 | rd_dev->rd_dev_id, rd_dev->rd_page_count, |
194 | rd_dev->sg_table_count); |
195 | |
196 | return 0; |
197 | } |
198 | |
199 | static void *rd_allocate_virtdevice(struct se_hba *hba, const char *name) |
200 | { |
201 | struct rd_dev *rd_dev; |
202 | struct rd_host *rd_host = hba->hba_ptr; |
203 | |
204 | rd_dev = kzalloc(sizeof(struct rd_dev), GFP_KERNEL); |
205 | if (!rd_dev) { |
206 | pr_err("Unable to allocate memory for struct rd_dev\n"); |
207 | return NULL; |
208 | } |
209 | |
210 | rd_dev->rd_host = rd_host; |
211 | |
212 | return rd_dev; |
213 | } |
214 | |
215 | static struct se_device *rd_create_virtdevice(struct se_hba *hba, |
216 | struct se_subsystem_dev *se_dev, void *p) |
217 | { |
218 | struct se_device *dev; |
219 | struct se_dev_limits dev_limits; |
220 | struct rd_dev *rd_dev = p; |
221 | struct rd_host *rd_host = hba->hba_ptr; |
222 | int dev_flags = 0, ret; |
223 | char prod[16], rev[4]; |
224 | |
225 | memset(&dev_limits, 0, sizeof(struct se_dev_limits)); |
226 | |
227 | ret = rd_build_device_space(rd_dev); |
228 | if (ret < 0) |
229 | goto fail; |
230 | |
231 | snprintf(prod, 16, "RAMDISK-MCP"); |
232 | snprintf(rev, 4, "%s", RD_MCP_VERSION); |
233 | |
234 | dev_limits.limits.logical_block_size = RD_BLOCKSIZE; |
235 | dev_limits.limits.max_hw_sectors = UINT_MAX; |
236 | dev_limits.limits.max_sectors = UINT_MAX; |
237 | dev_limits.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH; |
238 | dev_limits.queue_depth = RD_DEVICE_QUEUE_DEPTH; |
239 | |
240 | dev = transport_add_device_to_core_hba(hba, |
241 | &rd_mcp_template, se_dev, dev_flags, rd_dev, |
242 | &dev_limits, prod, rev); |
243 | if (!dev) |
244 | goto fail; |
245 | |
246 | rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++; |
247 | |
248 | pr_debug("CORE_RD[%u] - Added TCM MEMCPY Ramdisk Device ID: %u of" |
249 | " %u pages in %u tables, %lu total bytes\n", |
250 | rd_host->rd_host_id, rd_dev->rd_dev_id, rd_dev->rd_page_count, |
251 | rd_dev->sg_table_count, |
252 | (unsigned long)(rd_dev->rd_page_count * PAGE_SIZE)); |
253 | |
254 | return dev; |
255 | |
256 | fail: |
257 | rd_release_device_space(rd_dev); |
258 | return ERR_PTR(ret); |
259 | } |
260 | |
261 | static void rd_free_device(void *p) |
262 | { |
263 | struct rd_dev *rd_dev = p; |
264 | |
265 | rd_release_device_space(rd_dev); |
266 | kfree(rd_dev); |
267 | } |
268 | |
269 | static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page) |
270 | { |
271 | u32 i; |
272 | struct rd_dev_sg_table *sg_table; |
273 | |
274 | for (i = 0; i < rd_dev->sg_table_count; i++) { |
275 | sg_table = &rd_dev->sg_table_array[i]; |
276 | if ((sg_table->page_start_offset <= page) && |
277 | (sg_table->page_end_offset >= page)) |
278 | return sg_table; |
279 | } |
280 | |
281 | pr_err("Unable to locate struct rd_dev_sg_table for page: %u\n", |
282 | page); |
283 | |
284 | return NULL; |
285 | } |
286 | |
287 | static int rd_execute_rw(struct se_cmd *cmd) |
288 | { |
289 | struct scatterlist *sgl = cmd->t_data_sg; |
290 | u32 sgl_nents = cmd->t_data_nents; |
291 | enum dma_data_direction data_direction = cmd->data_direction; |
292 | struct se_device *se_dev = cmd->se_dev; |
293 | struct rd_dev *dev = se_dev->dev_ptr; |
294 | struct rd_dev_sg_table *table; |
295 | struct scatterlist *rd_sg; |
296 | struct sg_mapping_iter m; |
297 | u32 rd_offset; |
298 | u32 rd_size; |
299 | u32 rd_page; |
300 | u32 src_len; |
301 | u64 tmp; |
302 | |
303 | tmp = cmd->t_task_lba * se_dev->se_sub_dev->se_dev_attrib.block_size; |
304 | rd_offset = do_div(tmp, PAGE_SIZE); |
305 | rd_page = tmp; |
306 | rd_size = cmd->data_length; |
307 | |
308 | table = rd_get_sg_table(dev, rd_page); |
309 | if (!table) |
310 | return -EINVAL; |
311 | |
312 | rd_sg = &table->sg_table[rd_page - table->page_start_offset]; |
313 | |
314 | pr_debug("RD[%u]: %s LBA: %llu, Size: %u Page: %u, Offset: %u\n", |
315 | dev->rd_dev_id, |
316 | data_direction == DMA_FROM_DEVICE ? "Read" : "Write", |
317 | cmd->t_task_lba, rd_size, rd_page, rd_offset); |
318 | |
319 | src_len = PAGE_SIZE - rd_offset; |
320 | sg_miter_start(&m, sgl, sgl_nents, |
321 | data_direction == DMA_FROM_DEVICE ? |
322 | SG_MITER_TO_SG : SG_MITER_FROM_SG); |
323 | while (rd_size) { |
324 | u32 len; |
325 | void *rd_addr; |
326 | |
327 | sg_miter_next(&m); |
328 | len = min((u32)m.length, src_len); |
329 | m.consumed = len; |
330 | |
331 | rd_addr = sg_virt(rd_sg) + rd_offset; |
332 | |
333 | if (data_direction == DMA_FROM_DEVICE) |
334 | memcpy(m.addr, rd_addr, len); |
335 | else |
336 | memcpy(rd_addr, m.addr, len); |
337 | |
338 | rd_size -= len; |
339 | if (!rd_size) |
340 | continue; |
341 | |
342 | src_len -= len; |
343 | if (src_len) { |
344 | rd_offset += len; |
345 | continue; |
346 | } |
347 | |
348 | /* rd page completed, next one please */ |
349 | rd_page++; |
350 | rd_offset = 0; |
351 | src_len = PAGE_SIZE; |
352 | if (rd_page <= table->page_end_offset) { |
353 | rd_sg++; |
354 | continue; |
355 | } |
356 | |
357 | table = rd_get_sg_table(dev, rd_page); |
358 | if (!table) { |
359 | sg_miter_stop(&m); |
360 | return -EINVAL; |
361 | } |
362 | |
363 | /* since we increment, the first sg entry is correct */ |
364 | rd_sg = table->sg_table; |
365 | } |
366 | sg_miter_stop(&m); |
367 | |
368 | target_complete_cmd(cmd, SAM_STAT_GOOD); |
369 | return 0; |
370 | } |
371 | |
372 | enum { |
373 | Opt_rd_pages, Opt_err |
374 | }; |
375 | |
376 | static match_table_t tokens = { |
377 | {Opt_rd_pages, "rd_pages=%d"}, |
378 | {Opt_err, NULL} |
379 | }; |
380 | |
381 | static ssize_t rd_set_configfs_dev_params( |
382 | struct se_hba *hba, |
383 | struct se_subsystem_dev *se_dev, |
384 | const char *page, |
385 | ssize_t count) |
386 | { |
387 | struct rd_dev *rd_dev = se_dev->se_dev_su_ptr; |
388 | char *orig, *ptr, *opts; |
389 | substring_t args[MAX_OPT_ARGS]; |
390 | int ret = 0, arg, token; |
391 | |
392 | opts = kstrdup(page, GFP_KERNEL); |
393 | if (!opts) |
394 | return -ENOMEM; |
395 | |
396 | orig = opts; |
397 | |
398 | while ((ptr = strsep(&opts, ",\n")) != NULL) { |
399 | if (!*ptr) |
400 | continue; |
401 | |
402 | token = match_token(ptr, tokens, args); |
403 | switch (token) { |
404 | case Opt_rd_pages: |
405 | match_int(args, &arg); |
406 | rd_dev->rd_page_count = arg; |
407 | pr_debug("RAMDISK: Referencing Page" |
408 | " Count: %u\n", rd_dev->rd_page_count); |
409 | rd_dev->rd_flags |= RDF_HAS_PAGE_COUNT; |
410 | break; |
411 | default: |
412 | break; |
413 | } |
414 | } |
415 | |
416 | kfree(orig); |
417 | return (!ret) ? count : ret; |
418 | } |
419 | |
420 | static ssize_t rd_check_configfs_dev_params(struct se_hba *hba, struct se_subsystem_dev *se_dev) |
421 | { |
422 | struct rd_dev *rd_dev = se_dev->se_dev_su_ptr; |
423 | |
424 | if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) { |
425 | pr_debug("Missing rd_pages= parameter\n"); |
426 | return -EINVAL; |
427 | } |
428 | |
429 | return 0; |
430 | } |
431 | |
432 | static ssize_t rd_show_configfs_dev_params( |
433 | struct se_hba *hba, |
434 | struct se_subsystem_dev *se_dev, |
435 | char *b) |
436 | { |
437 | struct rd_dev *rd_dev = se_dev->se_dev_su_ptr; |
438 | ssize_t bl = sprintf(b, "TCM RamDisk ID: %u RamDisk Makeup: rd_mcp\n", |
439 | rd_dev->rd_dev_id); |
440 | bl += sprintf(b + bl, " PAGES/PAGE_SIZE: %u*%lu" |
441 | " SG_table_count: %u\n", rd_dev->rd_page_count, |
442 | PAGE_SIZE, rd_dev->sg_table_count); |
443 | return bl; |
444 | } |
445 | |
446 | static u32 rd_get_device_rev(struct se_device *dev) |
447 | { |
448 | return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */ |
449 | } |
450 | |
451 | static u32 rd_get_device_type(struct se_device *dev) |
452 | { |
453 | return TYPE_DISK; |
454 | } |
455 | |
456 | static sector_t rd_get_blocks(struct se_device *dev) |
457 | { |
458 | struct rd_dev *rd_dev = dev->dev_ptr; |
459 | unsigned long long blocks_long = ((rd_dev->rd_page_count * PAGE_SIZE) / |
460 | dev->se_sub_dev->se_dev_attrib.block_size) - 1; |
461 | |
462 | return blocks_long; |
463 | } |
464 | |
465 | static struct spc_ops rd_spc_ops = { |
466 | .execute_rw = rd_execute_rw, |
467 | }; |
468 | |
469 | static int rd_parse_cdb(struct se_cmd *cmd) |
470 | { |
471 | return sbc_parse_cdb(cmd, &rd_spc_ops); |
472 | } |
473 | |
474 | static struct se_subsystem_api rd_mcp_template = { |
475 | .name = "rd_mcp", |
476 | .transport_type = TRANSPORT_PLUGIN_VHBA_VDEV, |
477 | .attach_hba = rd_attach_hba, |
478 | .detach_hba = rd_detach_hba, |
479 | .allocate_virtdevice = rd_allocate_virtdevice, |
480 | .create_virtdevice = rd_create_virtdevice, |
481 | .free_device = rd_free_device, |
482 | .parse_cdb = rd_parse_cdb, |
483 | .check_configfs_dev_params = rd_check_configfs_dev_params, |
484 | .set_configfs_dev_params = rd_set_configfs_dev_params, |
485 | .show_configfs_dev_params = rd_show_configfs_dev_params, |
486 | .get_device_rev = rd_get_device_rev, |
487 | .get_device_type = rd_get_device_type, |
488 | .get_blocks = rd_get_blocks, |
489 | }; |
490 | |
491 | int __init rd_module_init(void) |
492 | { |
493 | int ret; |
494 | |
495 | ret = transport_subsystem_register(&rd_mcp_template); |
496 | if (ret < 0) { |
497 | return ret; |
498 | } |
499 | |
500 | return 0; |
501 | } |
502 | |
503 | void rd_module_exit(void) |
504 | { |
505 | transport_subsystem_release(&rd_mcp_template); |
506 | } |
507 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9