| 1 | --- a/drivers/scsi/hosts.c |
| 2 | +++ b/drivers/scsi/hosts.c |
| 3 | @@ -107,8 +107,21 @@ scsi_unregister(struct Scsi_Host * sh){ |
| 4 | if (shn) shn->host_registered = 0; |
| 5 | /* else {} : This should not happen, we should panic here... */ |
| 6 | |
| 7 | + /* If we are removing the last host registered, it is safe to reuse |
| 8 | + * its host number (this avoids "holes" at boot time) (DB) |
| 9 | + * It is also safe to reuse those of numbers directly below which have |
| 10 | + * been released earlier (to avoid some holes in numbering). |
| 11 | + */ |
| 12 | + if(sh->host_no == max_scsi_hosts - 1) { |
| 13 | + while(--max_scsi_hosts >= next_scsi_host) { |
| 14 | + shpnt = scsi_hostlist; |
| 15 | + while(shpnt && shpnt->host_no != max_scsi_hosts - 1) |
| 16 | + shpnt = shpnt->next; |
| 17 | + if(shpnt) |
| 18 | + break; |
| 19 | + } |
| 20 | + } |
| 21 | next_scsi_host--; |
| 22 | - |
| 23 | kfree((char *) sh); |
| 24 | } |
| 25 | |
| 26 | --- a/drivers/usb/hcd.c |
| 27 | +++ b/drivers/usb/hcd.c |
| 28 | @@ -1105,7 +1105,8 @@ static int hcd_submit_urb (struct urb *u |
| 29 | break; |
| 30 | case PIPE_BULK: |
| 31 | allowed |= USB_DISABLE_SPD | USB_QUEUE_BULK |
| 32 | - | USB_ZERO_PACKET | URB_NO_INTERRUPT; |
| 33 | + | USB_ZERO_PACKET | URB_NO_INTERRUPT |
| 34 | + | URB_NO_TRANSFER_DMA_MAP; |
| 35 | break; |
| 36 | case PIPE_INTERRUPT: |
| 37 | allowed |= USB_DISABLE_SPD; |
| 38 | @@ -1212,7 +1213,8 @@ static int hcd_submit_urb (struct urb *u |
| 39 | urb->setup_packet, |
| 40 | sizeof (struct usb_ctrlrequest), |
| 41 | PCI_DMA_TODEVICE); |
| 42 | - if (urb->transfer_buffer_length != 0) |
| 43 | + if (urb->transfer_buffer_length != 0 |
| 44 | + && !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)) |
| 45 | urb->transfer_dma = pci_map_single ( |
| 46 | hcd->pdev, |
| 47 | urb->transfer_buffer, |
| 48 | --- a/drivers/usb/host/ehci-hcd.c |
| 49 | +++ b/drivers/usb/host/ehci-hcd.c |
| 50 | @@ -399,6 +399,27 @@ static int ehci_start (struct usb_hcd *h |
| 51 | ehci_mem_cleanup (ehci); |
| 52 | return retval; |
| 53 | } |
| 54 | + |
| 55 | +{ |
| 56 | + int misc_reg; |
| 57 | + u32 vendor_id; |
| 58 | + |
| 59 | + pci_read_config_dword (ehci->hcd.pdev, PCI_VENDOR_ID, &vendor_id); |
| 60 | + if (vendor_id == 0x31041106) { |
| 61 | + /* VIA 6212 */ |
| 62 | + printk(KERN_INFO "EHCI: Enabling VIA 6212 workarounds\n", misc_reg); |
| 63 | + pci_read_config_byte(ehci->hcd.pdev, 0x49, &misc_reg); |
| 64 | + misc_reg &= ~0x20; |
| 65 | + pci_write_config_byte(ehci->hcd.pdev, 0x49, misc_reg); |
| 66 | + pci_read_config_byte(ehci->hcd.pdev, 0x49, &misc_reg); |
| 67 | + |
| 68 | + pci_read_config_byte(ehci->hcd.pdev, 0x4b, &misc_reg); |
| 69 | + misc_reg |= 0x20; |
| 70 | + pci_write_config_byte(ehci->hcd.pdev, 0x4b, misc_reg); |
| 71 | + pci_read_config_byte(ehci->hcd.pdev, 0x4b, &misc_reg); |
| 72 | + } |
| 73 | +} |
| 74 | + |
| 75 | writel (INTR_MASK, &ehci->regs->intr_enable); |
| 76 | writel (ehci->periodic_dma, &ehci->regs->frame_list); |
| 77 | |
| 78 | --- a/drivers/usb/host/ehci-q.c |
| 79 | +++ b/drivers/usb/host/ehci-q.c |
| 80 | @@ -791,6 +791,8 @@ static void qh_link_async (struct ehci_h |
| 81 | writel (cmd, &ehci->regs->command); |
| 82 | ehci->hcd.state = USB_STATE_RUNNING; |
| 83 | /* posted write need not be known to HC yet ... */ |
| 84 | + |
| 85 | + timer_action (ehci, TIMER_IO_WATCHDOG); |
| 86 | } |
| 87 | } |
| 88 | |
| 89 | --- a/drivers/usb/host/usb-uhci.c |
| 90 | +++ b/drivers/usb/host/usb-uhci.c |
| 91 | @@ -3034,6 +3034,21 @@ uhci_pci_probe (struct pci_dev *dev, con |
| 92 | |
| 93 | pci_set_master(dev); |
| 94 | |
| 95 | + { |
| 96 | + u8 misc_reg; |
| 97 | + u32 vendor_id; |
| 98 | + |
| 99 | + pci_read_config_dword (dev, PCI_VENDOR_ID, &vendor_id); |
| 100 | + if (vendor_id == 0x30381106) { |
| 101 | + /* VIA 6212 */ |
| 102 | + printk(KERN_INFO "UHCI: Enabling VIA 6212 workarounds\n"); |
| 103 | + pci_read_config_byte(dev, 0x41, &misc_reg); |
| 104 | + misc_reg &= ~0x10; |
| 105 | + pci_write_config_byte(dev, 0x41, misc_reg); |
| 106 | + pci_read_config_byte(dev, 0x41, &misc_reg); |
| 107 | + } |
| 108 | + } |
| 109 | + |
| 110 | /* Search for the IO base address.. */ |
| 111 | for (i = 0; i < 6; i++) { |
| 112 | |
| 113 | --- a/drivers/usb/storage/transport.c |
| 114 | +++ b/drivers/usb/storage/transport.c |
| 115 | @@ -54,6 +54,22 @@ |
| 116 | #include <linux/sched.h> |
| 117 | #include <linux/errno.h> |
| 118 | #include <linux/slab.h> |
| 119 | +#include <linux/pci.h> |
| 120 | +#include "../hcd.h" |
| 121 | + |
| 122 | +/* These definitions mirror those in pci.h, so they can be used |
| 123 | + * interchangeably with their PCI_ counterparts */ |
| 124 | +enum dma_data_direction { |
| 125 | + DMA_BIDIRECTIONAL = 0, |
| 126 | + DMA_TO_DEVICE = 1, |
| 127 | + DMA_FROM_DEVICE = 2, |
| 128 | + DMA_NONE = 3, |
| 129 | +}; |
| 130 | + |
| 131 | +#define dma_map_sg(d,s,n,dir) pci_map_sg(d,s,n,dir) |
| 132 | +#define dma_unmap_sg(d,s,n,dir) pci_unmap_sg(d,s,n,dir) |
| 133 | + |
| 134 | + |
| 135 | |
| 136 | /*********************************************************************** |
| 137 | * Helper routines |
| 138 | @@ -554,6 +570,543 @@ int usb_stor_transfer_partial(struct us_ |
| 139 | return US_BULK_TRANSFER_SHORT; |
| 140 | } |
| 141 | |
| 142 | +/*-------------------------------------------------------------------*/ |
| 143 | +/** |
| 144 | + * usb_buffer_unmap_sg - free DMA mapping(s) for a scatterlist |
| 145 | + * @dev: device to which the scatterlist will be mapped |
| 146 | + * @pipe: endpoint defining the mapping direction |
| 147 | + * @sg: the scatterlist to unmap |
| 148 | + * @n_hw_ents: the positive return value from usb_buffer_map_sg |
| 149 | + * |
| 150 | + * Reverses the effect of usb_buffer_map_sg(). |
| 151 | + */ |
| 152 | +static void usb_buffer_unmap_sg (struct usb_device *dev, unsigned pipe, |
| 153 | + struct scatterlist *sg, int n_hw_ents) |
| 154 | +{ |
| 155 | + struct usb_bus *bus; |
| 156 | + struct usb_hcd *hcd; |
| 157 | + struct pci_dev *pdev; |
| 158 | + |
| 159 | + if (!dev |
| 160 | + || !(bus = dev->bus) |
| 161 | + || !(hcd = bus->hcpriv) |
| 162 | + || !(pdev = hcd->pdev) |
| 163 | + || !pdev->dma_mask) |
| 164 | + return; |
| 165 | + |
| 166 | + dma_unmap_sg (pdev, sg, n_hw_ents, |
| 167 | + usb_pipein (pipe) ? DMA_FROM_DEVICE : DMA_TO_DEVICE); |
| 168 | +} |
| 169 | + |
| 170 | +/** |
| 171 | + * usb_buffer_map_sg - create scatterlist DMA mapping(s) for an endpoint |
| 172 | + * @dev: device to which the scatterlist will be mapped |
| 173 | + * @pipe: endpoint defining the mapping direction |
| 174 | + * @sg: the scatterlist to map |
| 175 | + * @nents: the number of entries in the scatterlist |
| 176 | + * |
| 177 | + * Return value is either < 0 (indicating no buffers could be mapped), or |
| 178 | + * the number of DMA mapping array entries in the scatterlist. |
| 179 | + * |
| 180 | + * The caller is responsible for placing the resulting DMA addresses from |
| 181 | + * the scatterlist into URB transfer buffer pointers, and for setting the |
| 182 | + * URB_NO_TRANSFER_DMA_MAP transfer flag in each of those URBs. |
| 183 | + * |
| 184 | + * Top I/O rates come from queuing URBs, instead of waiting for each one |
| 185 | + * to complete before starting the next I/O. This is particularly easy |
| 186 | + * to do with scatterlists. Just allocate and submit one URB for each DMA |
| 187 | + * mapping entry returned, stopping on the first error or when all succeed. |
| 188 | + * Better yet, use the usb_sg_*() calls, which do that (and more) for you. |
| 189 | + * |
| 190 | + * This call would normally be used when translating scatterlist requests, |
| 191 | + * rather than usb_buffer_map(), since on some hardware (with IOMMUs) it |
| 192 | + * may be able to coalesce mappings for improved I/O efficiency. |
| 193 | + * |
| 194 | + * Reverse the effect of this call with usb_buffer_unmap_sg(). |
| 195 | + */ |
| 196 | +static int usb_buffer_map_sg (struct usb_device *dev, unsigned pipe, |
| 197 | + struct scatterlist *sg, int nents) |
| 198 | +{ |
| 199 | + struct usb_bus *bus; |
| 200 | + struct usb_hcd *hcd; |
| 201 | + struct pci_dev *pdev; |
| 202 | + |
| 203 | + if (!dev |
| 204 | + || usb_pipecontrol (pipe) |
| 205 | + || !(bus = dev->bus) |
| 206 | + || !(hcd = bus->hcpriv) |
| 207 | + || !(pdev = hcd->pdev) |
| 208 | + || !pdev->dma_mask) |
| 209 | + return -1; |
| 210 | + |
| 211 | + // FIXME generic api broken like pci, can't report errors |
| 212 | + return dma_map_sg (pdev, sg, nents, |
| 213 | + usb_pipein (pipe) ? DMA_FROM_DEVICE : DMA_TO_DEVICE); |
| 214 | +} |
| 215 | + |
| 216 | +static void sg_clean (struct usb_sg_request *io) |
| 217 | +{ |
| 218 | + struct usb_hcd *hcd = io->dev->bus->hcpriv; |
| 219 | + struct pci_dev *pdev = hcd->pdev; |
| 220 | + |
| 221 | + if (io->urbs) { |
| 222 | + while (io->entries--) |
| 223 | + usb_free_urb (io->urbs [io->entries]); |
| 224 | + kfree (io->urbs); |
| 225 | + io->urbs = 0; |
| 226 | + } |
| 227 | + if (pdev->dma_mask != 0) |
| 228 | + usb_buffer_unmap_sg (io->dev, io->pipe, io->sg, io->nents); |
| 229 | + io->dev = 0; |
| 230 | +} |
| 231 | + |
| 232 | +static void sg_complete (struct urb *urb) |
| 233 | +{ |
| 234 | + struct usb_sg_request *io = (struct usb_sg_request *) urb->context; |
| 235 | + |
| 236 | + spin_lock (&io->lock); |
| 237 | + |
| 238 | + /* In 2.5 we require hcds' endpoint queues not to progress after fault |
| 239 | + * reports, until the completion callback (this!) returns. That lets |
| 240 | + * device driver code (like this routine) unlink queued urbs first, |
| 241 | + * if it needs to, since the HC won't work on them at all. So it's |
| 242 | + * not possible for page N+1 to overwrite page N, and so on. |
| 243 | + * |
| 244 | + * That's only for "hard" faults; "soft" faults (unlinks) sometimes |
| 245 | + * complete before the HCD can get requests away from hardware, |
| 246 | + * though never during cleanup after a hard fault. |
| 247 | + */ |
| 248 | + if (io->status |
| 249 | + && (io->status != -ECONNRESET |
| 250 | + || urb->status != -ECONNRESET) |
| 251 | + && urb->actual_length) { |
| 252 | + US_DEBUGP("Error: %s ep%d%s scatterlist error %d/%d\n", |
| 253 | + io->dev->devpath, |
| 254 | + usb_pipeendpoint (urb->pipe), |
| 255 | + usb_pipein (urb->pipe) ? "in" : "out", |
| 256 | + urb->status, io->status); |
| 257 | + // BUG (); |
| 258 | + } |
| 259 | + |
| 260 | + if (urb->status && urb->status != -ECONNRESET) { |
| 261 | + int i, found, status; |
| 262 | + |
| 263 | + io->status = urb->status; |
| 264 | + |
| 265 | + /* the previous urbs, and this one, completed already. |
| 266 | + * unlink pending urbs so they won't rx/tx bad data. |
| 267 | + */ |
| 268 | + for (i = 0, found = 0; i < io->entries; i++) { |
| 269 | + if (!io->urbs [i]) |
| 270 | + continue; |
| 271 | + if (found) { |
| 272 | + status = usb_unlink_urb (io->urbs [i]); |
| 273 | + if (status != -EINPROGRESS && status != -EBUSY) |
| 274 | + US_DEBUGP("Error: %s, unlink --> %d\n", __FUNCTION__, status); |
| 275 | + } else if (urb == io->urbs [i]) |
| 276 | + found = 1; |
| 277 | + } |
| 278 | + } |
| 279 | + urb->dev = 0; |
| 280 | + |
| 281 | + /* on the last completion, signal usb_sg_wait() */ |
| 282 | + io->bytes += urb->actual_length; |
| 283 | + io->count--; |
| 284 | + if (!io->count) |
| 285 | + complete (&io->complete); |
| 286 | + |
| 287 | + spin_unlock (&io->lock); |
| 288 | +} |
| 289 | + |
| 290 | +/** |
| 291 | + * usb_sg_init - initializes scatterlist-based bulk/interrupt I/O request |
| 292 | + * @io: request block being initialized. until usb_sg_wait() returns, |
| 293 | + * treat this as a pointer to an opaque block of memory, |
| 294 | + * @dev: the usb device that will send or receive the data |
| 295 | + * @pipe: endpoint "pipe" used to transfer the data |
| 296 | + * @period: polling rate for interrupt endpoints, in frames or |
| 297 | + * (for high speed endpoints) microframes; ignored for bulk |
| 298 | + * @sg: scatterlist entries |
| 299 | + * @nents: how many entries in the scatterlist |
| 300 | + * @length: how many bytes to send from the scatterlist, or zero to |
| 301 | + * send every byte identified in the list. |
| 302 | + * @mem_flags: SLAB_* flags affecting memory allocations in this call |
| 303 | + * |
| 304 | + * Returns zero for success, else a negative errno value. This initializes a |
| 305 | + * scatter/gather request, allocating resources such as I/O mappings and urb |
| 306 | + * memory (except maybe memory used by USB controller drivers). |
| 307 | + * |
| 308 | + * The request must be issued using usb_sg_wait(), which waits for the I/O to |
| 309 | + * complete (or to be canceled) and then cleans up all resources allocated by |
| 310 | + * usb_sg_init(). |
| 311 | + * |
| 312 | + * The request may be canceled with usb_sg_cancel(), either before or after |
| 313 | + * usb_sg_wait() is called. |
| 314 | + */ |
| 315 | +int usb_sg_init ( |
| 316 | + struct usb_sg_request *io, |
| 317 | + struct usb_device *dev, |
| 318 | + unsigned pipe, |
| 319 | + unsigned period, |
| 320 | + struct scatterlist *sg, |
| 321 | + int nents, |
| 322 | + size_t length, |
| 323 | + int mem_flags |
| 324 | +) |
| 325 | +{ |
| 326 | + int i; |
| 327 | + int urb_flags; |
| 328 | + int dma; |
| 329 | + struct usb_hcd *hcd; |
| 330 | + |
| 331 | + hcd = dev->bus->hcpriv; |
| 332 | + |
| 333 | + if (!io || !dev || !sg |
| 334 | + || usb_pipecontrol (pipe) |
| 335 | + || usb_pipeisoc (pipe) |
| 336 | + || nents <= 0) |
| 337 | + return -EINVAL; |
| 338 | + |
| 339 | + spin_lock_init (&io->lock); |
| 340 | + io->dev = dev; |
| 341 | + io->pipe = pipe; |
| 342 | + io->sg = sg; |
| 343 | + io->nents = nents; |
| 344 | + |
| 345 | + /* not all host controllers use DMA (like the mainstream pci ones); |
| 346 | + * they can use PIO (sl811) or be software over another transport. |
| 347 | + */ |
| 348 | + dma = (hcd->pdev->dma_mask != 0); |
| 349 | + if (dma) |
| 350 | + io->entries = usb_buffer_map_sg (dev, pipe, sg, nents); |
| 351 | + else |
| 352 | + io->entries = nents; |
| 353 | + |
| 354 | + /* initialize all the urbs we'll use */ |
| 355 | + if (io->entries <= 0) |
| 356 | + return io->entries; |
| 357 | + |
| 358 | + io->count = 0; |
| 359 | + io->urbs = kmalloc (io->entries * sizeof *io->urbs, mem_flags); |
| 360 | + if (!io->urbs) |
| 361 | + goto nomem; |
| 362 | + |
| 363 | + urb_flags = USB_ASYNC_UNLINK | URB_NO_INTERRUPT | URB_NO_TRANSFER_DMA_MAP; |
| 364 | + if (usb_pipein (pipe)) |
| 365 | + urb_flags |= URB_SHORT_NOT_OK; |
| 366 | + |
| 367 | + for (i = 0; i < io->entries; i++, io->count = i) { |
| 368 | + unsigned len; |
| 369 | + |
| 370 | + io->urbs [i] = usb_alloc_urb (0); |
| 371 | + if (!io->urbs [i]) { |
| 372 | + io->entries = i; |
| 373 | + goto nomem; |
| 374 | + } |
| 375 | + |
| 376 | + io->urbs [i]->dev = 0; |
| 377 | + io->urbs [i]->pipe = pipe; |
| 378 | + io->urbs [i]->interval = period; |
| 379 | + io->urbs [i]->transfer_flags = urb_flags; |
| 380 | + |
| 381 | + io->urbs [i]->complete = sg_complete; |
| 382 | + io->urbs [i]->context = io; |
| 383 | + io->urbs [i]->status = -EINPROGRESS; |
| 384 | + io->urbs [i]->actual_length = 0; |
| 385 | + |
| 386 | + if (dma) { |
| 387 | + /* hc may use _only_ transfer_dma */ |
| 388 | + io->urbs [i]->transfer_dma = sg_dma_address (sg + i); |
| 389 | + len = sg_dma_len (sg + i); |
| 390 | + } else { |
| 391 | + /* hc may use _only_ transfer_buffer */ |
| 392 | + io->urbs [i]->transfer_buffer = |
| 393 | + page_address (sg [i].page) + sg [i].offset; |
| 394 | + len = sg [i].length; |
| 395 | + } |
| 396 | + |
| 397 | + if (length) { |
| 398 | + len = min_t (unsigned, len, length); |
| 399 | + length -= len; |
| 400 | + if (length == 0) |
| 401 | + io->entries = i + 1; |
| 402 | + } |
| 403 | + io->urbs [i]->transfer_buffer_length = len; |
| 404 | + } |
| 405 | + io->urbs [--i]->transfer_flags &= ~URB_NO_INTERRUPT; |
| 406 | + |
| 407 | + /* transaction state */ |
| 408 | + io->status = 0; |
| 409 | + io->bytes = 0; |
| 410 | + init_completion (&io->complete); |
| 411 | + return 0; |
| 412 | + |
| 413 | +nomem: |
| 414 | + sg_clean (io); |
| 415 | + return -ENOMEM; |
| 416 | +} |
| 417 | + |
| 418 | +/** |
| 419 | + * usb_sg_cancel - stop scatter/gather i/o issued by usb_sg_wait() |
| 420 | + * @io: request block, initialized with usb_sg_init() |
| 421 | + * |
| 422 | + * This stops a request after it has been started by usb_sg_wait(). |
| 423 | + * It can also prevents one initialized by usb_sg_init() from starting, |
| 424 | + * so that call just frees resources allocated to the request. |
| 425 | + */ |
| 426 | +void usb_sg_cancel (struct usb_sg_request *io) |
| 427 | +{ |
| 428 | + unsigned long flags; |
| 429 | + |
| 430 | + spin_lock_irqsave (&io->lock, flags); |
| 431 | + |
| 432 | + /* shut everything down, if it didn't already */ |
| 433 | + if (!io->status) { |
| 434 | + int i; |
| 435 | + |
| 436 | + io->status = -ECONNRESET; |
| 437 | + for (i = 0; i < io->entries; i++) { |
| 438 | + int retval; |
| 439 | + |
| 440 | + if (!io->urbs [i]->dev) |
| 441 | + continue; |
| 442 | + retval = usb_unlink_urb (io->urbs [i]); |
| 443 | + if (retval != -EINPROGRESS && retval != -EBUSY) |
| 444 | + US_DEBUGP("WARNING: %s, unlink --> %d\n", __FUNCTION__, retval); |
| 445 | + } |
| 446 | + } |
| 447 | + spin_unlock_irqrestore (&io->lock, flags); |
| 448 | +} |
| 449 | + |
| 450 | +/** |
| 451 | + * usb_sg_wait - synchronously execute scatter/gather request |
| 452 | + * @io: request block handle, as initialized with usb_sg_init(). |
| 453 | + * some fields become accessible when this call returns. |
| 454 | + * Context: !in_interrupt () |
| 455 | + * |
| 456 | + * This function blocks until the specified I/O operation completes. It |
| 457 | + * leverages the grouping of the related I/O requests to get good transfer |
| 458 | + * rates, by queueing the requests. At higher speeds, such queuing can |
| 459 | + * significantly improve USB throughput. |
| 460 | + * |
| 461 | + * There are three kinds of completion for this function. |
| 462 | + * (1) success, where io->status is zero. The number of io->bytes |
| 463 | + * transferred is as requested. |
| 464 | + * (2) error, where io->status is a negative errno value. The number |
| 465 | + * of io->bytes transferred before the error is usually less |
| 466 | + * than requested, and can be nonzero. |
| 467 | + * (3) cancelation, a type of error with status -ECONNRESET that |
| 468 | + * is initiated by usb_sg_cancel(). |
| 469 | + * |
| 470 | + * When this function returns, all memory allocated through usb_sg_init() or |
| 471 | + * this call will have been freed. The request block parameter may still be |
| 472 | + * passed to usb_sg_cancel(), or it may be freed. It could also be |
| 473 | + * reinitialized and then reused. |
| 474 | + * |
| 475 | + * Data Transfer Rates: |
| 476 | + * |
| 477 | + * Bulk transfers are valid for full or high speed endpoints. |
| 478 | + * The best full speed data rate is 19 packets of 64 bytes each |
| 479 | + * per frame, or 1216 bytes per millisecond. |
| 480 | + * The best high speed data rate is 13 packets of 512 bytes each |
| 481 | + * per microframe, or 52 KBytes per millisecond. |
| 482 | + * |
| 483 | + * The reason to use interrupt transfers through this API would most likely |
| 484 | + * be to reserve high speed bandwidth, where up to 24 KBytes per millisecond |
| 485 | + * could be transferred. That capability is less useful for low or full |
| 486 | + * speed interrupt endpoints, which allow at most one packet per millisecond, |
| 487 | + * of at most 8 or 64 bytes (respectively). |
| 488 | + */ |
| 489 | +void usb_sg_wait (struct usb_sg_request *io) |
| 490 | +{ |
| 491 | + int i, entries = io->entries; |
| 492 | + |
| 493 | + /* queue the urbs. */ |
| 494 | + spin_lock_irq (&io->lock); |
| 495 | + for (i = 0; i < entries && !io->status; i++) { |
| 496 | + int retval; |
| 497 | + |
| 498 | + io->urbs [i]->dev = io->dev; |
| 499 | + retval = usb_submit_urb (io->urbs [i]); |
| 500 | + |
| 501 | + /* after we submit, let completions or cancelations fire; |
| 502 | + * we handshake using io->status. |
| 503 | + */ |
| 504 | + spin_unlock_irq (&io->lock); |
| 505 | + switch (retval) { |
| 506 | + /* maybe we retrying will recover */ |
| 507 | + case -ENXIO: // hc didn't queue this one |
| 508 | + case -EAGAIN: |
| 509 | + case -ENOMEM: |
| 510 | + io->urbs [i]->dev = 0; |
| 511 | + retval = 0; |
| 512 | + i--; |
| 513 | + yield (); |
| 514 | + break; |
| 515 | + |
| 516 | + /* no error? continue immediately. |
| 517 | + * |
| 518 | + * NOTE: to work better with UHCI (4K I/O buffer may |
| 519 | + * need 3K of TDs) it may be good to limit how many |
| 520 | + * URBs are queued at once; N milliseconds? |
| 521 | + */ |
| 522 | + case 0: |
| 523 | + cpu_relax (); |
| 524 | + break; |
| 525 | + |
| 526 | + /* fail any uncompleted urbs */ |
| 527 | + default: |
| 528 | + spin_lock_irq (&io->lock); |
| 529 | + io->count -= entries - i; |
| 530 | + if (io->status == -EINPROGRESS) |
| 531 | + io->status = retval; |
| 532 | + if (io->count == 0) |
| 533 | + complete (&io->complete); |
| 534 | + spin_unlock_irq (&io->lock); |
| 535 | + |
| 536 | + io->urbs [i]->dev = 0; |
| 537 | + io->urbs [i]->status = retval; |
| 538 | + |
| 539 | + US_DEBUGP("%s, submit --> %d\n", __FUNCTION__, retval); |
| 540 | + usb_sg_cancel (io); |
| 541 | + } |
| 542 | + spin_lock_irq (&io->lock); |
| 543 | + if (retval && io->status == -ECONNRESET) |
| 544 | + io->status = retval; |
| 545 | + } |
| 546 | + spin_unlock_irq (&io->lock); |
| 547 | + |
| 548 | + /* OK, yes, this could be packaged as non-blocking. |
| 549 | + * So could the submit loop above ... but it's easier to |
| 550 | + * solve neither problem than to solve both! |
| 551 | + */ |
| 552 | + wait_for_completion (&io->complete); |
| 553 | + |
| 554 | + sg_clean (io); |
| 555 | +} |
| 556 | + |
| 557 | +/* |
| 558 | + * Interpret the results of a URB transfer |
| 559 | + * |
| 560 | + * This function prints appropriate debugging messages, clears halts on |
| 561 | + * non-control endpoints, and translates the status to the corresponding |
| 562 | + * USB_STOR_XFER_xxx return code. |
| 563 | + */ |
| 564 | +static int interpret_urb_result(struct us_data *us, unsigned int pipe, |
| 565 | + unsigned int length, int result, unsigned int partial) |
| 566 | +{ |
| 567 | + US_DEBUGP("Status code %d; transferred %u/%u\n", |
| 568 | + result, partial, length); |
| 569 | + switch (result) { |
| 570 | + |
| 571 | + /* no error code; did we send all the data? */ |
| 572 | + case 0: |
| 573 | + if (partial != length) { |
| 574 | + US_DEBUGP("-- short transfer\n"); |
| 575 | + return USB_STOR_XFER_SHORT; |
| 576 | + } |
| 577 | + |
| 578 | + US_DEBUGP("-- transfer complete\n"); |
| 579 | + return USB_STOR_XFER_GOOD; |
| 580 | + |
| 581 | + /* stalled */ |
| 582 | + case -EPIPE: |
| 583 | + /* for control endpoints, (used by CB[I]) a stall indicates |
| 584 | + * a failed command */ |
| 585 | + if (usb_pipecontrol(pipe)) { |
| 586 | + US_DEBUGP("-- stall on control pipe\n"); |
| 587 | + return USB_STOR_XFER_STALLED; |
| 588 | + } |
| 589 | + |
| 590 | + /* for other sorts of endpoint, clear the stall */ |
| 591 | + US_DEBUGP("clearing endpoint halt for pipe 0x%x\n", pipe); |
| 592 | + if (usb_stor_clear_halt(us, pipe) < 0) |
| 593 | + return USB_STOR_XFER_ERROR; |
| 594 | + return USB_STOR_XFER_STALLED; |
| 595 | + |
| 596 | + /* timeout or excessively long NAK */ |
| 597 | + case -ETIMEDOUT: |
| 598 | + US_DEBUGP("-- timeout or NAK\n"); |
| 599 | + return USB_STOR_XFER_ERROR; |
| 600 | + |
| 601 | + /* babble - the device tried to send more than we wanted to read */ |
| 602 | + case -EOVERFLOW: |
| 603 | + US_DEBUGP("-- babble\n"); |
| 604 | + return USB_STOR_XFER_LONG; |
| 605 | + |
| 606 | + /* the transfer was cancelled by abort, disconnect, or timeout */ |
| 607 | + case -ECONNRESET: |
| 608 | + US_DEBUGP("-- transfer cancelled\n"); |
| 609 | + return USB_STOR_XFER_ERROR; |
| 610 | + |
| 611 | + /* short scatter-gather read transfer */ |
| 612 | + case -EREMOTEIO: |
| 613 | + US_DEBUGP("-- short read transfer\n"); |
| 614 | + return USB_STOR_XFER_SHORT; |
| 615 | + |
| 616 | + /* abort or disconnect in progress */ |
| 617 | + case -EIO: |
| 618 | + US_DEBUGP("-- abort or disconnect in progress\n"); |
| 619 | + return USB_STOR_XFER_ERROR; |
| 620 | + |
| 621 | + /* the catch-all error case */ |
| 622 | + default: |
| 623 | + US_DEBUGP("-- unknown error\n"); |
| 624 | + return USB_STOR_XFER_ERROR; |
| 625 | + } |
| 626 | +} |
| 627 | + |
| 628 | +/* |
| 629 | + * Transfer a scatter-gather list via bulk transfer |
| 630 | + * |
| 631 | + * This function does basically the same thing as usb_stor_bulk_msg() |
| 632 | + * above, but it uses the usbcore scatter-gather library. |
| 633 | + */ |
| 634 | +int usb_stor_bulk_transfer_sglist(struct us_data *us, unsigned int pipe, |
| 635 | + struct scatterlist *sg, int num_sg, unsigned int length, |
| 636 | + unsigned int *act_len) |
| 637 | +{ |
| 638 | + int result; |
| 639 | + |
| 640 | + /* don't submit s-g requests during abort/disconnect processing */ |
| 641 | + if (us->flags & ABORTING_OR_DISCONNECTING) |
| 642 | + return USB_STOR_XFER_ERROR; |
| 643 | + |
| 644 | + /* initialize the scatter-gather request block */ |
| 645 | + US_DEBUGP("%s: xfer %u bytes, %d entries\n", __FUNCTION__, |
| 646 | + length, num_sg); |
| 647 | + result = usb_sg_init(&us->current_sg, us->pusb_dev, pipe, 0, |
| 648 | + sg, num_sg, length, SLAB_NOIO); |
| 649 | + if (result) { |
| 650 | + US_DEBUGP("usb_sg_init returned %d\n", result); |
| 651 | + return USB_STOR_XFER_ERROR; |
| 652 | + } |
| 653 | + |
| 654 | + /* since the block has been initialized successfully, it's now |
| 655 | + * okay to cancel it */ |
| 656 | + set_bit(US_FLIDX_SG_ACTIVE, &us->flags); |
| 657 | + |
| 658 | + /* did an abort/disconnect occur during the submission? */ |
| 659 | + if (us->flags & ABORTING_OR_DISCONNECTING) { |
| 660 | + |
| 661 | + /* cancel the request, if it hasn't been cancelled already */ |
| 662 | + if (test_and_clear_bit(US_FLIDX_SG_ACTIVE, &us->flags)) { |
| 663 | + US_DEBUGP("-- cancelling sg request\n"); |
| 664 | + usb_sg_cancel(&us->current_sg); |
| 665 | + } |
| 666 | + } |
| 667 | + |
| 668 | + /* wait for the completion of the transfer */ |
| 669 | + usb_sg_wait(&us->current_sg); |
| 670 | + clear_bit(US_FLIDX_SG_ACTIVE, &us->flags); |
| 671 | + |
| 672 | + result = us->current_sg.status; |
| 673 | + if (act_len) |
| 674 | + *act_len = us->current_sg.bytes; |
| 675 | + return interpret_urb_result(us, pipe, length, result, |
| 676 | + us->current_sg.bytes); |
| 677 | +} |
| 678 | + |
| 679 | /* |
| 680 | * Transfer an entire SCSI command's worth of data payload over the bulk |
| 681 | * pipe. |
| 682 | @@ -569,6 +1122,8 @@ void usb_stor_transfer(Scsi_Cmnd *srb, s |
| 683 | struct scatterlist *sg; |
| 684 | unsigned int total_transferred = 0; |
| 685 | unsigned int transfer_amount; |
| 686 | + unsigned int partial; |
| 687 | + unsigned int pipe; |
| 688 | |
| 689 | /* calculate how much we want to transfer */ |
| 690 | transfer_amount = usb_stor_transfer_length(srb); |
| 691 | @@ -585,23 +1140,34 @@ void usb_stor_transfer(Scsi_Cmnd *srb, s |
| 692 | * make the appropriate requests for each, until done |
| 693 | */ |
| 694 | sg = (struct scatterlist *) srb->request_buffer; |
| 695 | - for (i = 0; i < srb->use_sg; i++) { |
| 696 | - |
| 697 | - /* transfer the lesser of the next buffer or the |
| 698 | - * remaining data */ |
| 699 | - if (transfer_amount - total_transferred >= |
| 700 | - sg[i].length) { |
| 701 | - result = usb_stor_transfer_partial(us, |
| 702 | - sg[i].address, sg[i].length); |
| 703 | - total_transferred += sg[i].length; |
| 704 | - } else |
| 705 | - result = usb_stor_transfer_partial(us, |
| 706 | - sg[i].address, |
| 707 | - transfer_amount - total_transferred); |
| 708 | - |
| 709 | - /* if we get an error, end the loop here */ |
| 710 | - if (result) |
| 711 | - break; |
| 712 | + if (us->pusb_dev->speed == USB_SPEED_HIGH) { |
| 713 | + /* calculate the appropriate pipe information */ |
| 714 | + if (us->srb->sc_data_direction == SCSI_DATA_READ) |
| 715 | + pipe = usb_rcvbulkpipe(us->pusb_dev, us->ep_in); |
| 716 | + else |
| 717 | + pipe = usb_sndbulkpipe(us->pusb_dev, us->ep_out); |
| 718 | + /* use the usb core scatter-gather primitives */ |
| 719 | + result = usb_stor_bulk_transfer_sglist(us, pipe, |
| 720 | + sg, srb->use_sg, transfer_amount, &partial); |
| 721 | + } else { |
| 722 | + for (i = 0; i < srb->use_sg; i++) { |
| 723 | + |
| 724 | + /* transfer the lesser of the next buffer or the |
| 725 | + * remaining data */ |
| 726 | + if (transfer_amount - total_transferred >= |
| 727 | + sg[i].length) { |
| 728 | + result = usb_stor_transfer_partial(us, |
| 729 | + sg[i].address, sg[i].length); |
| 730 | + total_transferred += sg[i].length; |
| 731 | + } else |
| 732 | + result = usb_stor_transfer_partial(us, |
| 733 | + sg[i].address, |
| 734 | + transfer_amount - total_transferred); |
| 735 | + |
| 736 | + /* if we get an error, end the loop here */ |
| 737 | + if (result) |
| 738 | + break; |
| 739 | + } |
| 740 | } |
| 741 | } |
| 742 | else |
| 743 | --- a/drivers/usb/storage/transport.h |
| 744 | +++ b/drivers/usb/storage/transport.h |
| 745 | @@ -127,6 +127,16 @@ struct bulk_cs_wrap { |
| 746 | #define US_BULK_TRANSFER_ABORTED 3 /* transfer canceled */ |
| 747 | |
| 748 | /* |
| 749 | + * usb_stor_bulk_transfer_xxx() return codes, in order of severity |
| 750 | + */ |
| 751 | + |
| 752 | +#define USB_STOR_XFER_GOOD 0 /* good transfer */ |
| 753 | +#define USB_STOR_XFER_SHORT 1 /* transferred less than expected */ |
| 754 | +#define USB_STOR_XFER_STALLED 2 /* endpoint stalled */ |
| 755 | +#define USB_STOR_XFER_LONG 3 /* device tried to send too much */ |
| 756 | +#define USB_STOR_XFER_ERROR 4 /* transfer died in the middle */ |
| 757 | + |
| 758 | +/* |
| 759 | * Transport return codes |
| 760 | */ |
| 761 | |
| 762 | --- a/drivers/usb/storage/usb.h |
| 763 | +++ b/drivers/usb/storage/usb.h |
| 764 | @@ -111,6 +111,60 @@ typedef int (*trans_reset)(struct us_dat |
| 765 | typedef void (*proto_cmnd)(Scsi_Cmnd*, struct us_data*); |
| 766 | typedef void (*extra_data_destructor)(void *); /* extra data destructor */ |
| 767 | |
| 768 | +/* Dynamic flag definitions: used in set_bit() etc. */ |
| 769 | +#define US_FLIDX_URB_ACTIVE 18 /* 0x00040000 current_urb is in use */ |
| 770 | +#define US_FLIDX_SG_ACTIVE 19 /* 0x00080000 current_sg is in use */ |
| 771 | +#define US_FLIDX_ABORTING 20 /* 0x00100000 abort is in progress */ |
| 772 | +#define US_FLIDX_DISCONNECTING 21 /* 0x00200000 disconnect in progress */ |
| 773 | +#define ABORTING_OR_DISCONNECTING ((1UL << US_FLIDX_ABORTING) | \ |
| 774 | + (1UL << US_FLIDX_DISCONNECTING)) |
| 775 | +#define US_FLIDX_RESETTING 22 /* 0x00400000 device reset in progress */ |
| 776 | + |
| 777 | +/* processing state machine states */ |
| 778 | +#define US_STATE_IDLE 1 |
| 779 | +#define US_STATE_RUNNING 2 |
| 780 | +#define US_STATE_RESETTING 3 |
| 781 | +#define US_STATE_ABORTING 4 |
| 782 | + |
| 783 | +/** |
| 784 | + * struct usb_sg_request - support for scatter/gather I/O |
| 785 | + * @status: zero indicates success, else negative errno |
| 786 | + * @bytes: counts bytes transferred. |
| 787 | + * |
| 788 | + * These requests are initialized using usb_sg_init(), and then are used |
| 789 | + * as request handles passed to usb_sg_wait() or usb_sg_cancel(). Most |
| 790 | + * members of the request object aren't for driver access. |
| 791 | + * |
| 792 | + * The status and bytecount values are valid only after usb_sg_wait() |
| 793 | + * returns. If the status is zero, then the bytecount matches the total |
| 794 | + * from the request. |
| 795 | + * |
| 796 | + * After an error completion, drivers may need to clear a halt condition |
| 797 | + * on the endpoint. |
| 798 | + */ |
| 799 | +struct usb_sg_request { |
| 800 | + int status; |
| 801 | + size_t bytes; |
| 802 | + |
| 803 | + /* |
| 804 | + * members below are private to usbcore, |
| 805 | + * and are not provided for driver access! |
| 806 | + */ |
| 807 | + spinlock_t lock; |
| 808 | + |
| 809 | + struct usb_device *dev; |
| 810 | + int pipe; |
| 811 | + struct scatterlist *sg; |
| 812 | + int nents; |
| 813 | + |
| 814 | + int entries; |
| 815 | + struct urb **urbs; |
| 816 | + |
| 817 | + int count; |
| 818 | + struct completion complete; |
| 819 | +}; |
| 820 | + |
| 821 | + |
| 822 | /* we allocate one of these for every device that we remember */ |
| 823 | struct us_data { |
| 824 | struct us_data *next; /* next device */ |
| 825 | @@ -171,6 +225,7 @@ struct us_data { |
| 826 | struct urb *current_urb; /* non-int USB requests */ |
| 827 | struct completion current_done; /* the done flag */ |
| 828 | unsigned int tag; /* tag for bulk CBW/CSW */ |
| 829 | + struct usb_sg_request current_sg; /* scatter-gather req. */ |
| 830 | |
| 831 | /* the semaphore for sleeping the control thread */ |
| 832 | struct semaphore sema; /* to sleep thread on */ |
| 833 | --- a/include/linux/usb.h |
| 834 | +++ b/include/linux/usb.h |
| 835 | @@ -483,6 +483,8 @@ struct usb_driver { |
| 836 | #define URB_NO_INTERRUPT 0x0080 /* HINT: no non-error interrupt needed */ |
| 837 | /* ... less overhead for QUEUE_BULK */ |
| 838 | #define USB_TIMEOUT_KILLED 0x1000 // only set by HCD! |
| 839 | +#define URB_NO_TRANSFER_DMA_MAP 0x0400 /* urb->transfer_dma valid on submit */ |
| 840 | +#define URB_NO_SETUP_DMA_MAP 0x0800 /* urb->setup_dma valid on submit */ |
| 841 | |
| 842 | struct iso_packet_descriptor |
| 843 | { |
| 844 | |