Root/
1 | /* -*- linux-c -*- |
2 | * viodasd.c |
3 | * Authors: Dave Boutcher <boutcher@us.ibm.com> |
4 | * Ryan Arnold <ryanarn@us.ibm.com> |
5 | * Colin Devilbiss <devilbis@us.ibm.com> |
6 | * Stephen Rothwell |
7 | * |
8 | * (C) Copyright 2000-2004 IBM Corporation |
9 | * |
10 | * This program is free software; you can redistribute it and/or |
11 | * modify it under the terms of the GNU General Public License as |
12 | * published by the Free Software Foundation; either version 2 of the |
13 | * License, or (at your option) any later version. |
14 | * |
15 | * This program is distributed in the hope that it will be useful, |
16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
18 | * GNU General Public License for more details. |
19 | * |
20 | * You should have received a copy of the GNU General Public License |
21 | * along with this program; if not, write to the Free Software |
22 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
23 | * |
24 | * This routine provides access to disk space (termed "DASD" in historical |
25 | * IBM terms) owned and managed by an OS/400 partition running on the |
26 | * same box as this Linux partition. |
27 | * |
28 | * All disk operations are performed by sending messages back and forth to |
29 | * the OS/400 partition. |
30 | */ |
31 | |
32 | #define pr_fmt(fmt) "viod: " fmt |
33 | |
34 | #include <linux/major.h> |
35 | #include <linux/fs.h> |
36 | #include <linux/module.h> |
37 | #include <linux/kernel.h> |
38 | #include <linux/blkdev.h> |
39 | #include <linux/genhd.h> |
40 | #include <linux/hdreg.h> |
41 | #include <linux/errno.h> |
42 | #include <linux/init.h> |
43 | #include <linux/string.h> |
44 | #include <linux/dma-mapping.h> |
45 | #include <linux/completion.h> |
46 | #include <linux/device.h> |
47 | #include <linux/scatterlist.h> |
48 | |
49 | #include <asm/uaccess.h> |
50 | #include <asm/vio.h> |
51 | #include <asm/iseries/hv_types.h> |
52 | #include <asm/iseries/hv_lp_event.h> |
53 | #include <asm/iseries/hv_lp_config.h> |
54 | #include <asm/iseries/vio.h> |
55 | #include <asm/firmware.h> |
56 | |
57 | MODULE_DESCRIPTION("iSeries Virtual DASD"); |
58 | MODULE_AUTHOR("Dave Boutcher"); |
59 | MODULE_LICENSE("GPL"); |
60 | |
61 | /* |
62 | * We only support 7 partitions per physical disk....so with minor |
63 | * numbers 0-255 we get a maximum of 32 disks. |
64 | */ |
65 | #define VIOD_GENHD_NAME "iseries/vd" |
66 | |
67 | #define VIOD_VERS "1.64" |
68 | |
69 | enum { |
70 | PARTITION_SHIFT = 3, |
71 | MAX_DISKNO = HVMAXARCHITECTEDVIRTUALDISKS, |
72 | MAX_DISK_NAME = FIELD_SIZEOF(struct gendisk, disk_name) |
73 | }; |
74 | |
75 | static DEFINE_SPINLOCK(viodasd_spinlock); |
76 | |
77 | #define VIOMAXREQ 16 |
78 | |
79 | #define DEVICE_NO(cell) ((struct viodasd_device *)(cell) - &viodasd_devices[0]) |
80 | |
81 | struct viodasd_waitevent { |
82 | struct completion com; |
83 | int rc; |
84 | u16 sub_result; |
85 | int max_disk; /* open */ |
86 | }; |
87 | |
88 | static const struct vio_error_entry viodasd_err_table[] = { |
89 | { 0x0201, EINVAL, "Invalid Range" }, |
90 | { 0x0202, EINVAL, "Invalid Token" }, |
91 | { 0x0203, EIO, "DMA Error" }, |
92 | { 0x0204, EIO, "Use Error" }, |
93 | { 0x0205, EIO, "Release Error" }, |
94 | { 0x0206, EINVAL, "Invalid Disk" }, |
95 | { 0x0207, EBUSY, "Cant Lock" }, |
96 | { 0x0208, EIO, "Already Locked" }, |
97 | { 0x0209, EIO, "Already Unlocked" }, |
98 | { 0x020A, EIO, "Invalid Arg" }, |
99 | { 0x020B, EIO, "Bad IFS File" }, |
100 | { 0x020C, EROFS, "Read Only Device" }, |
101 | { 0x02FF, EIO, "Internal Error" }, |
102 | { 0x0000, 0, NULL }, |
103 | }; |
104 | |
105 | /* |
106 | * Figure out the biggest I/O request (in sectors) we can accept |
107 | */ |
108 | #define VIODASD_MAXSECTORS (4096 / 512 * VIOMAXBLOCKDMA) |
109 | |
110 | /* |
111 | * Number of disk I/O requests we've sent to OS/400 |
112 | */ |
113 | static int num_req_outstanding; |
114 | |
115 | /* |
116 | * This is our internal structure for keeping track of disk devices |
117 | */ |
118 | struct viodasd_device { |
119 | u16 cylinders; |
120 | u16 tracks; |
121 | u16 sectors; |
122 | u16 bytes_per_sector; |
123 | u64 size; |
124 | int read_only; |
125 | spinlock_t q_lock; |
126 | struct gendisk *disk; |
127 | struct device *dev; |
128 | } viodasd_devices[MAX_DISKNO]; |
129 | |
130 | /* |
131 | * External open entry point. |
132 | */ |
133 | static int viodasd_open(struct block_device *bdev, fmode_t mode) |
134 | { |
135 | struct viodasd_device *d = bdev->bd_disk->private_data; |
136 | HvLpEvent_Rc hvrc; |
137 | struct viodasd_waitevent we; |
138 | u16 flags = 0; |
139 | |
140 | if (d->read_only) { |
141 | if (mode & FMODE_WRITE) |
142 | return -EROFS; |
143 | flags = vioblockflags_ro; |
144 | } |
145 | |
146 | init_completion(&we.com); |
147 | |
148 | /* Send the open event to OS/400 */ |
149 | hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp, |
150 | HvLpEvent_Type_VirtualIo, |
151 | viomajorsubtype_blockio | vioblockopen, |
152 | HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck, |
153 | viopath_sourceinst(viopath_hostLp), |
154 | viopath_targetinst(viopath_hostLp), |
155 | (u64)(unsigned long)&we, VIOVERSION << 16, |
156 | ((u64)DEVICE_NO(d) << 48) | ((u64)flags << 32), |
157 | 0, 0, 0); |
158 | if (hvrc != 0) { |
159 | pr_warning("HV open failed %d\n", (int)hvrc); |
160 | return -EIO; |
161 | } |
162 | |
163 | wait_for_completion(&we.com); |
164 | |
165 | /* Check the return code */ |
166 | if (we.rc != 0) { |
167 | const struct vio_error_entry *err = |
168 | vio_lookup_rc(viodasd_err_table, we.sub_result); |
169 | |
170 | pr_warning("bad rc opening disk: %d:0x%04x (%s)\n", |
171 | (int)we.rc, we.sub_result, err->msg); |
172 | return -EIO; |
173 | } |
174 | |
175 | return 0; |
176 | } |
177 | |
178 | /* |
179 | * External release entry point. |
180 | */ |
181 | static int viodasd_release(struct gendisk *disk, fmode_t mode) |
182 | { |
183 | struct viodasd_device *d = disk->private_data; |
184 | HvLpEvent_Rc hvrc; |
185 | |
186 | /* Send the event to OS/400. We DON'T expect a response */ |
187 | hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp, |
188 | HvLpEvent_Type_VirtualIo, |
189 | viomajorsubtype_blockio | vioblockclose, |
190 | HvLpEvent_AckInd_NoAck, HvLpEvent_AckType_ImmediateAck, |
191 | viopath_sourceinst(viopath_hostLp), |
192 | viopath_targetinst(viopath_hostLp), |
193 | 0, VIOVERSION << 16, |
194 | ((u64)DEVICE_NO(d) << 48) /* | ((u64)flags << 32) */, |
195 | 0, 0, 0); |
196 | if (hvrc != 0) |
197 | pr_warning("HV close call failed %d\n", (int)hvrc); |
198 | return 0; |
199 | } |
200 | |
201 | |
202 | /* External ioctl entry point. |
203 | */ |
204 | static int viodasd_getgeo(struct block_device *bdev, struct hd_geometry *geo) |
205 | { |
206 | struct gendisk *disk = bdev->bd_disk; |
207 | struct viodasd_device *d = disk->private_data; |
208 | |
209 | geo->sectors = d->sectors ? d->sectors : 32; |
210 | geo->heads = d->tracks ? d->tracks : 64; |
211 | geo->cylinders = d->cylinders ? d->cylinders : |
212 | get_capacity(disk) / (geo->sectors * geo->heads); |
213 | |
214 | return 0; |
215 | } |
216 | |
217 | /* |
218 | * Our file operations table |
219 | */ |
220 | static const struct block_device_operations viodasd_fops = { |
221 | .owner = THIS_MODULE, |
222 | .open = viodasd_open, |
223 | .release = viodasd_release, |
224 | .getgeo = viodasd_getgeo, |
225 | }; |
226 | |
227 | /* |
228 | * End a request |
229 | */ |
230 | static void viodasd_end_request(struct request *req, int error, |
231 | int num_sectors) |
232 | { |
233 | __blk_end_request(req, error, num_sectors << 9); |
234 | } |
235 | |
236 | /* |
237 | * Send an actual I/O request to OS/400 |
238 | */ |
239 | static int send_request(struct request *req) |
240 | { |
241 | u64 start; |
242 | int direction; |
243 | int nsg; |
244 | u16 viocmd; |
245 | HvLpEvent_Rc hvrc; |
246 | struct vioblocklpevent *bevent; |
247 | struct HvLpEvent *hev; |
248 | struct scatterlist sg[VIOMAXBLOCKDMA]; |
249 | int sgindex; |
250 | struct viodasd_device *d; |
251 | unsigned long flags; |
252 | |
253 | start = (u64)blk_rq_pos(req) << 9; |
254 | |
255 | if (rq_data_dir(req) == READ) { |
256 | direction = DMA_FROM_DEVICE; |
257 | viocmd = viomajorsubtype_blockio | vioblockread; |
258 | } else { |
259 | direction = DMA_TO_DEVICE; |
260 | viocmd = viomajorsubtype_blockio | vioblockwrite; |
261 | } |
262 | |
263 | d = req->rq_disk->private_data; |
264 | |
265 | /* Now build the scatter-gather list */ |
266 | sg_init_table(sg, VIOMAXBLOCKDMA); |
267 | nsg = blk_rq_map_sg(req->q, req, sg); |
268 | nsg = dma_map_sg(d->dev, sg, nsg, direction); |
269 | |
270 | spin_lock_irqsave(&viodasd_spinlock, flags); |
271 | num_req_outstanding++; |
272 | |
273 | /* This optimization handles a single DMA block */ |
274 | if (nsg == 1) |
275 | hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp, |
276 | HvLpEvent_Type_VirtualIo, viocmd, |
277 | HvLpEvent_AckInd_DoAck, |
278 | HvLpEvent_AckType_ImmediateAck, |
279 | viopath_sourceinst(viopath_hostLp), |
280 | viopath_targetinst(viopath_hostLp), |
281 | (u64)(unsigned long)req, VIOVERSION << 16, |
282 | ((u64)DEVICE_NO(d) << 48), start, |
283 | ((u64)sg_dma_address(&sg[0])) << 32, |
284 | sg_dma_len(&sg[0])); |
285 | else { |
286 | bevent = (struct vioblocklpevent *) |
287 | vio_get_event_buffer(viomajorsubtype_blockio); |
288 | if (bevent == NULL) { |
289 | pr_warning("error allocating disk event buffer\n"); |
290 | goto error_ret; |
291 | } |
292 | |
293 | /* |
294 | * Now build up the actual request. Note that we store |
295 | * the pointer to the request in the correlation |
296 | * token so we can match the response up later |
297 | */ |
298 | memset(bevent, 0, sizeof(struct vioblocklpevent)); |
299 | hev = &bevent->event; |
300 | hev->flags = HV_LP_EVENT_VALID | HV_LP_EVENT_DO_ACK | |
301 | HV_LP_EVENT_INT; |
302 | hev->xType = HvLpEvent_Type_VirtualIo; |
303 | hev->xSubtype = viocmd; |
304 | hev->xSourceLp = HvLpConfig_getLpIndex(); |
305 | hev->xTargetLp = viopath_hostLp; |
306 | hev->xSizeMinus1 = |
307 | offsetof(struct vioblocklpevent, u.rw_data.dma_info) + |
308 | (sizeof(bevent->u.rw_data.dma_info[0]) * nsg) - 1; |
309 | hev->xSourceInstanceId = viopath_sourceinst(viopath_hostLp); |
310 | hev->xTargetInstanceId = viopath_targetinst(viopath_hostLp); |
311 | hev->xCorrelationToken = (u64)req; |
312 | bevent->version = VIOVERSION; |
313 | bevent->disk = DEVICE_NO(d); |
314 | bevent->u.rw_data.offset = start; |
315 | |
316 | /* |
317 | * Copy just the dma information from the sg list |
318 | * into the request |
319 | */ |
320 | for (sgindex = 0; sgindex < nsg; sgindex++) { |
321 | bevent->u.rw_data.dma_info[sgindex].token = |
322 | sg_dma_address(&sg[sgindex]); |
323 | bevent->u.rw_data.dma_info[sgindex].len = |
324 | sg_dma_len(&sg[sgindex]); |
325 | } |
326 | |
327 | /* Send the request */ |
328 | hvrc = HvCallEvent_signalLpEvent(&bevent->event); |
329 | vio_free_event_buffer(viomajorsubtype_blockio, bevent); |
330 | } |
331 | |
332 | if (hvrc != HvLpEvent_Rc_Good) { |
333 | pr_warning("error sending disk event to OS/400 (rc %d)\n", |
334 | (int)hvrc); |
335 | goto error_ret; |
336 | } |
337 | spin_unlock_irqrestore(&viodasd_spinlock, flags); |
338 | return 0; |
339 | |
340 | error_ret: |
341 | num_req_outstanding--; |
342 | spin_unlock_irqrestore(&viodasd_spinlock, flags); |
343 | dma_unmap_sg(d->dev, sg, nsg, direction); |
344 | return -1; |
345 | } |
346 | |
347 | /* |
348 | * This is the external request processing routine |
349 | */ |
350 | static void do_viodasd_request(struct request_queue *q) |
351 | { |
352 | struct request *req; |
353 | |
354 | /* |
355 | * If we already have the maximum number of requests |
356 | * outstanding to OS/400 just bail out. We'll come |
357 | * back later. |
358 | */ |
359 | while (num_req_outstanding < VIOMAXREQ) { |
360 | req = blk_fetch_request(q); |
361 | if (req == NULL) |
362 | return; |
363 | /* check that request contains a valid command */ |
364 | if (!blk_fs_request(req)) { |
365 | viodasd_end_request(req, -EIO, blk_rq_sectors(req)); |
366 | continue; |
367 | } |
368 | /* Try sending the request */ |
369 | if (send_request(req) != 0) |
370 | viodasd_end_request(req, -EIO, blk_rq_sectors(req)); |
371 | } |
372 | } |
373 | |
374 | /* |
375 | * Probe a single disk and fill in the viodasd_device structure |
376 | * for it. |
377 | */ |
378 | static int probe_disk(struct viodasd_device *d) |
379 | { |
380 | HvLpEvent_Rc hvrc; |
381 | struct viodasd_waitevent we; |
382 | int dev_no = DEVICE_NO(d); |
383 | struct gendisk *g; |
384 | struct request_queue *q; |
385 | u16 flags = 0; |
386 | |
387 | retry: |
388 | init_completion(&we.com); |
389 | |
390 | /* Send the open event to OS/400 */ |
391 | hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp, |
392 | HvLpEvent_Type_VirtualIo, |
393 | viomajorsubtype_blockio | vioblockopen, |
394 | HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck, |
395 | viopath_sourceinst(viopath_hostLp), |
396 | viopath_targetinst(viopath_hostLp), |
397 | (u64)(unsigned long)&we, VIOVERSION << 16, |
398 | ((u64)dev_no << 48) | ((u64)flags<< 32), |
399 | 0, 0, 0); |
400 | if (hvrc != 0) { |
401 | pr_warning("bad rc on HV open %d\n", (int)hvrc); |
402 | return 0; |
403 | } |
404 | |
405 | wait_for_completion(&we.com); |
406 | |
407 | if (we.rc != 0) { |
408 | if (flags != 0) |
409 | return 0; |
410 | /* try again with read only flag set */ |
411 | flags = vioblockflags_ro; |
412 | goto retry; |
413 | } |
414 | if (we.max_disk > (MAX_DISKNO - 1)) { |
415 | printk_once(KERN_INFO pr_fmt("Only examining the first %d of %d disks connected\n"), |
416 | MAX_DISKNO, we.max_disk + 1); |
417 | } |
418 | |
419 | /* Send the close event to OS/400. We DON'T expect a response */ |
420 | hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp, |
421 | HvLpEvent_Type_VirtualIo, |
422 | viomajorsubtype_blockio | vioblockclose, |
423 | HvLpEvent_AckInd_NoAck, HvLpEvent_AckType_ImmediateAck, |
424 | viopath_sourceinst(viopath_hostLp), |
425 | viopath_targetinst(viopath_hostLp), |
426 | 0, VIOVERSION << 16, |
427 | ((u64)dev_no << 48) | ((u64)flags << 32), |
428 | 0, 0, 0); |
429 | if (hvrc != 0) { |
430 | pr_warning("bad rc sending event to OS/400 %d\n", (int)hvrc); |
431 | return 0; |
432 | } |
433 | |
434 | if (d->dev == NULL) { |
435 | /* this is when we reprobe for new disks */ |
436 | if (vio_create_viodasd(dev_no) == NULL) { |
437 | pr_warning("cannot allocate virtual device for disk %d\n", |
438 | dev_no); |
439 | return 0; |
440 | } |
441 | /* |
442 | * The vio_create_viodasd will have recursed into this |
443 | * routine with d->dev set to the new vio device and |
444 | * will finish the setup of the disk below. |
445 | */ |
446 | return 1; |
447 | } |
448 | |
449 | /* create the request queue for the disk */ |
450 | spin_lock_init(&d->q_lock); |
451 | q = blk_init_queue(do_viodasd_request, &d->q_lock); |
452 | if (q == NULL) { |
453 | pr_warning("cannot allocate queue for disk %d\n", dev_no); |
454 | return 0; |
455 | } |
456 | g = alloc_disk(1 << PARTITION_SHIFT); |
457 | if (g == NULL) { |
458 | pr_warning("cannot allocate disk structure for disk %d\n", |
459 | dev_no); |
460 | blk_cleanup_queue(q); |
461 | return 0; |
462 | } |
463 | |
464 | d->disk = g; |
465 | blk_queue_max_segments(q, VIOMAXBLOCKDMA); |
466 | blk_queue_max_hw_sectors(q, VIODASD_MAXSECTORS); |
467 | g->major = VIODASD_MAJOR; |
468 | g->first_minor = dev_no << PARTITION_SHIFT; |
469 | if (dev_no >= 26) |
470 | snprintf(g->disk_name, sizeof(g->disk_name), |
471 | VIOD_GENHD_NAME "%c%c", |
472 | 'a' + (dev_no / 26) - 1, 'a' + (dev_no % 26)); |
473 | else |
474 | snprintf(g->disk_name, sizeof(g->disk_name), |
475 | VIOD_GENHD_NAME "%c", 'a' + (dev_no % 26)); |
476 | g->fops = &viodasd_fops; |
477 | g->queue = q; |
478 | g->private_data = d; |
479 | g->driverfs_dev = d->dev; |
480 | set_capacity(g, d->size >> 9); |
481 | |
482 | pr_info("disk %d: %lu sectors (%lu MB) CHS=%d/%d/%d sector size %d%s\n", |
483 | dev_no, (unsigned long)(d->size >> 9), |
484 | (unsigned long)(d->size >> 20), |
485 | (int)d->cylinders, (int)d->tracks, |
486 | (int)d->sectors, (int)d->bytes_per_sector, |
487 | d->read_only ? " (RO)" : ""); |
488 | |
489 | /* register us in the global list */ |
490 | add_disk(g); |
491 | return 1; |
492 | } |
493 | |
494 | /* returns the total number of scatterlist elements converted */ |
495 | static int block_event_to_scatterlist(const struct vioblocklpevent *bevent, |
496 | struct scatterlist *sg, int *total_len) |
497 | { |
498 | int i, numsg; |
499 | const struct rw_data *rw_data = &bevent->u.rw_data; |
500 | static const int offset = |
501 | offsetof(struct vioblocklpevent, u.rw_data.dma_info); |
502 | static const int element_size = sizeof(rw_data->dma_info[0]); |
503 | |
504 | numsg = ((bevent->event.xSizeMinus1 + 1) - offset) / element_size; |
505 | if (numsg > VIOMAXBLOCKDMA) |
506 | numsg = VIOMAXBLOCKDMA; |
507 | |
508 | *total_len = 0; |
509 | sg_init_table(sg, VIOMAXBLOCKDMA); |
510 | for (i = 0; (i < numsg) && (rw_data->dma_info[i].len > 0); ++i) { |
511 | sg_dma_address(&sg[i]) = rw_data->dma_info[i].token; |
512 | sg_dma_len(&sg[i]) = rw_data->dma_info[i].len; |
513 | *total_len += rw_data->dma_info[i].len; |
514 | } |
515 | return i; |
516 | } |
517 | |
518 | /* |
519 | * Restart all queues, starting with the one _after_ the disk given, |
520 | * thus reducing the chance of starvation of higher numbered disks. |
521 | */ |
522 | static void viodasd_restart_all_queues_starting_from(int first_index) |
523 | { |
524 | int i; |
525 | |
526 | for (i = first_index + 1; i < MAX_DISKNO; ++i) |
527 | if (viodasd_devices[i].disk) |
528 | blk_run_queue(viodasd_devices[i].disk->queue); |
529 | for (i = 0; i <= first_index; ++i) |
530 | if (viodasd_devices[i].disk) |
531 | blk_run_queue(viodasd_devices[i].disk->queue); |
532 | } |
533 | |
534 | /* |
535 | * For read and write requests, decrement the number of outstanding requests, |
536 | * Free the DMA buffers we allocated. |
537 | */ |
538 | static int viodasd_handle_read_write(struct vioblocklpevent *bevent) |
539 | { |
540 | int num_sg, num_sect, pci_direction, total_len; |
541 | struct request *req; |
542 | struct scatterlist sg[VIOMAXBLOCKDMA]; |
543 | struct HvLpEvent *event = &bevent->event; |
544 | unsigned long irq_flags; |
545 | struct viodasd_device *d; |
546 | int error; |
547 | spinlock_t *qlock; |
548 | |
549 | num_sg = block_event_to_scatterlist(bevent, sg, &total_len); |
550 | num_sect = total_len >> 9; |
551 | if (event->xSubtype == (viomajorsubtype_blockio | vioblockread)) |
552 | pci_direction = DMA_FROM_DEVICE; |
553 | else |
554 | pci_direction = DMA_TO_DEVICE; |
555 | req = (struct request *)bevent->event.xCorrelationToken; |
556 | d = req->rq_disk->private_data; |
557 | |
558 | dma_unmap_sg(d->dev, sg, num_sg, pci_direction); |
559 | |
560 | /* |
561 | * Since this is running in interrupt mode, we need to make sure |
562 | * we're not stepping on any global I/O operations |
563 | */ |
564 | spin_lock_irqsave(&viodasd_spinlock, irq_flags); |
565 | num_req_outstanding--; |
566 | spin_unlock_irqrestore(&viodasd_spinlock, irq_flags); |
567 | |
568 | error = (event->xRc == HvLpEvent_Rc_Good) ? 0 : -EIO; |
569 | if (error) { |
570 | const struct vio_error_entry *err; |
571 | err = vio_lookup_rc(viodasd_err_table, bevent->sub_result); |
572 | pr_warning("read/write error %d:0x%04x (%s)\n", |
573 | event->xRc, bevent->sub_result, err->msg); |
574 | num_sect = blk_rq_sectors(req); |
575 | } |
576 | qlock = req->q->queue_lock; |
577 | spin_lock_irqsave(qlock, irq_flags); |
578 | viodasd_end_request(req, error, num_sect); |
579 | spin_unlock_irqrestore(qlock, irq_flags); |
580 | |
581 | /* Finally, try to get more requests off of this device's queue */ |
582 | viodasd_restart_all_queues_starting_from(DEVICE_NO(d)); |
583 | |
584 | return 0; |
585 | } |
586 | |
587 | /* This routine handles incoming block LP events */ |
588 | static void handle_block_event(struct HvLpEvent *event) |
589 | { |
590 | struct vioblocklpevent *bevent = (struct vioblocklpevent *)event; |
591 | struct viodasd_waitevent *pwe; |
592 | |
593 | if (event == NULL) |
594 | /* Notification that a partition went away! */ |
595 | return; |
596 | /* First, we should NEVER get an int here...only acks */ |
597 | if (hvlpevent_is_int(event)) { |
598 | pr_warning("Yikes! got an int in viodasd event handler!\n"); |
599 | if (hvlpevent_need_ack(event)) { |
600 | event->xRc = HvLpEvent_Rc_InvalidSubtype; |
601 | HvCallEvent_ackLpEvent(event); |
602 | } |
603 | } |
604 | |
605 | switch (event->xSubtype & VIOMINOR_SUBTYPE_MASK) { |
606 | case vioblockopen: |
607 | /* |
608 | * Handle a response to an open request. We get all the |
609 | * disk information in the response, so update it. The |
610 | * correlation token contains a pointer to a waitevent |
611 | * structure that has a completion in it. update the |
612 | * return code in the waitevent structure and post the |
613 | * completion to wake up the guy who sent the request |
614 | */ |
615 | pwe = (struct viodasd_waitevent *)event->xCorrelationToken; |
616 | pwe->rc = event->xRc; |
617 | pwe->sub_result = bevent->sub_result; |
618 | if (event->xRc == HvLpEvent_Rc_Good) { |
619 | const struct open_data *data = &bevent->u.open_data; |
620 | struct viodasd_device *device = |
621 | &viodasd_devices[bevent->disk]; |
622 | device->read_only = |
623 | bevent->flags & vioblockflags_ro; |
624 | device->size = data->disk_size; |
625 | device->cylinders = data->cylinders; |
626 | device->tracks = data->tracks; |
627 | device->sectors = data->sectors; |
628 | device->bytes_per_sector = data->bytes_per_sector; |
629 | pwe->max_disk = data->max_disk; |
630 | } |
631 | complete(&pwe->com); |
632 | break; |
633 | case vioblockclose: |
634 | break; |
635 | case vioblockread: |
636 | case vioblockwrite: |
637 | viodasd_handle_read_write(bevent); |
638 | break; |
639 | |
640 | default: |
641 | pr_warning("invalid subtype!"); |
642 | if (hvlpevent_need_ack(event)) { |
643 | event->xRc = HvLpEvent_Rc_InvalidSubtype; |
644 | HvCallEvent_ackLpEvent(event); |
645 | } |
646 | } |
647 | } |
648 | |
649 | /* |
650 | * Get the driver to reprobe for more disks. |
651 | */ |
652 | static ssize_t probe_disks(struct device_driver *drv, const char *buf, |
653 | size_t count) |
654 | { |
655 | struct viodasd_device *d; |
656 | |
657 | for (d = viodasd_devices; d < &viodasd_devices[MAX_DISKNO]; d++) { |
658 | if (d->disk == NULL) |
659 | probe_disk(d); |
660 | } |
661 | return count; |
662 | } |
663 | static DRIVER_ATTR(probe, S_IWUSR, NULL, probe_disks); |
664 | |
665 | static int viodasd_probe(struct vio_dev *vdev, const struct vio_device_id *id) |
666 | { |
667 | struct viodasd_device *d = &viodasd_devices[vdev->unit_address]; |
668 | |
669 | d->dev = &vdev->dev; |
670 | if (!probe_disk(d)) |
671 | return -ENODEV; |
672 | return 0; |
673 | } |
674 | |
675 | static int viodasd_remove(struct vio_dev *vdev) |
676 | { |
677 | struct viodasd_device *d; |
678 | |
679 | d = &viodasd_devices[vdev->unit_address]; |
680 | if (d->disk) { |
681 | del_gendisk(d->disk); |
682 | blk_cleanup_queue(d->disk->queue); |
683 | put_disk(d->disk); |
684 | d->disk = NULL; |
685 | } |
686 | d->dev = NULL; |
687 | return 0; |
688 | } |
689 | |
690 | /** |
691 | * viodasd_device_table: Used by vio.c to match devices that we |
692 | * support. |
693 | */ |
694 | static struct vio_device_id viodasd_device_table[] __devinitdata = { |
695 | { "block", "IBM,iSeries-viodasd" }, |
696 | { "", "" } |
697 | }; |
698 | MODULE_DEVICE_TABLE(vio, viodasd_device_table); |
699 | |
700 | static struct vio_driver viodasd_driver = { |
701 | .id_table = viodasd_device_table, |
702 | .probe = viodasd_probe, |
703 | .remove = viodasd_remove, |
704 | .driver = { |
705 | .name = "viodasd", |
706 | .owner = THIS_MODULE, |
707 | } |
708 | }; |
709 | |
710 | static int need_delete_probe; |
711 | |
712 | /* |
713 | * Initialize the whole device driver. Handle module and non-module |
714 | * versions |
715 | */ |
716 | static int __init viodasd_init(void) |
717 | { |
718 | int rc; |
719 | |
720 | if (!firmware_has_feature(FW_FEATURE_ISERIES)) { |
721 | rc = -ENODEV; |
722 | goto early_fail; |
723 | } |
724 | |
725 | /* Try to open to our host lp */ |
726 | if (viopath_hostLp == HvLpIndexInvalid) |
727 | vio_set_hostlp(); |
728 | |
729 | if (viopath_hostLp == HvLpIndexInvalid) { |
730 | pr_warning("invalid hosting partition\n"); |
731 | rc = -EIO; |
732 | goto early_fail; |
733 | } |
734 | |
735 | pr_info("vers " VIOD_VERS ", hosting partition %d\n", viopath_hostLp); |
736 | |
737 | /* register the block device */ |
738 | rc = register_blkdev(VIODASD_MAJOR, VIOD_GENHD_NAME); |
739 | if (rc) { |
740 | pr_warning("Unable to get major number %d for %s\n", |
741 | VIODASD_MAJOR, VIOD_GENHD_NAME); |
742 | goto early_fail; |
743 | } |
744 | /* Actually open the path to the hosting partition */ |
745 | rc = viopath_open(viopath_hostLp, viomajorsubtype_blockio, |
746 | VIOMAXREQ + 2); |
747 | if (rc) { |
748 | pr_warning("error opening path to host partition %d\n", |
749 | viopath_hostLp); |
750 | goto unregister_blk; |
751 | } |
752 | |
753 | /* Initialize our request handler */ |
754 | vio_setHandler(viomajorsubtype_blockio, handle_block_event); |
755 | |
756 | rc = vio_register_driver(&viodasd_driver); |
757 | if (rc) { |
758 | pr_warning("vio_register_driver failed\n"); |
759 | goto unset_handler; |
760 | } |
761 | |
762 | /* |
763 | * If this call fails, it just means that we cannot dynamically |
764 | * add virtual disks, but the driver will still work fine for |
765 | * all existing disk, so ignore the failure. |
766 | */ |
767 | if (!driver_create_file(&viodasd_driver.driver, &driver_attr_probe)) |
768 | need_delete_probe = 1; |
769 | |
770 | return 0; |
771 | |
772 | unset_handler: |
773 | vio_clearHandler(viomajorsubtype_blockio); |
774 | viopath_close(viopath_hostLp, viomajorsubtype_blockio, VIOMAXREQ + 2); |
775 | unregister_blk: |
776 | unregister_blkdev(VIODASD_MAJOR, VIOD_GENHD_NAME); |
777 | early_fail: |
778 | return rc; |
779 | } |
780 | module_init(viodasd_init); |
781 | |
782 | void __exit viodasd_exit(void) |
783 | { |
784 | if (need_delete_probe) |
785 | driver_remove_file(&viodasd_driver.driver, &driver_attr_probe); |
786 | vio_unregister_driver(&viodasd_driver); |
787 | vio_clearHandler(viomajorsubtype_blockio); |
788 | viopath_close(viopath_hostLp, viomajorsubtype_blockio, VIOMAXREQ + 2); |
789 | unregister_blkdev(VIODASD_MAJOR, VIOD_GENHD_NAME); |
790 | } |
791 | module_exit(viodasd_exit); |
792 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9