Root/target/linux/etrax/files/drivers/usb/host/hc-crisv10.c

1/*
2 *
3 * ETRAX 100LX USB Host Controller Driver
4 *
5 * Copyright (C) 2005 - 2008 Axis Communications AB
6 *
7 * Author: Konrad Eriksson <konrad.eriksson@axis.se>
8 *
9 */
10
11#include <linux/module.h>
12#include <linux/kernel.h>
13#include <linux/init.h>
14#include <linux/moduleparam.h>
15#include <linux/spinlock.h>
16#include <linux/usb.h>
17#include <linux/platform_device.h>
18
19#include <asm/io.h>
20#include <asm/irq.h>
21#include <asm/arch/dma.h>
22#include <asm/arch/io_interface_mux.h>
23
24#include "../core/hcd.h"
25#include "../core/hub.h"
26#include "hc-crisv10.h"
27#include "hc-cris-dbg.h"
28
29
30/***************************************************************************/
31/***************************************************************************/
32/* Host Controller settings */
33/***************************************************************************/
34/***************************************************************************/
35
36#define VERSION "1.00-openwrt_diff-v1"
37#define COPYRIGHT "(c) 2005, 2006 Axis Communications AB"
38#define DESCRIPTION "ETRAX 100LX USB Host Controller"
39
40#define ETRAX_USB_HC_IRQ USB_HC_IRQ_NBR
41#define ETRAX_USB_RX_IRQ USB_DMA_RX_IRQ_NBR
42#define ETRAX_USB_TX_IRQ USB_DMA_TX_IRQ_NBR
43
44/* Number of physical ports in Etrax 100LX */
45#define USB_ROOT_HUB_PORTS 2
46
47const char hc_name[] = "hc-crisv10";
48const char product_desc[] = DESCRIPTION;
49
50/* The number of epids is, among other things, used for pre-allocating
51   ctrl, bulk and isoc EP descriptors (one for each epid).
52   Assumed to be > 1 when initiating the DMA lists. */
53#define NBR_OF_EPIDS 32
54
55/* Support interrupt traffic intervals up to 128 ms. */
56#define MAX_INTR_INTERVAL 128
57
58/* If periodic traffic (intr or isoc) is to be used, then one entry in the EP
59   table must be "invalid". By this we mean that we shouldn't care about epid
60   attentions for this epid, or at least handle them differently from epid
61   attentions for "valid" epids. This define determines which one to use
62   (don't change it). */
63#define INVALID_EPID 31
64/* A special epid for the bulk dummys. */
65#define DUMMY_EPID 30
66
67/* Module settings */
68
69MODULE_DESCRIPTION(DESCRIPTION);
70MODULE_LICENSE("GPL");
71MODULE_AUTHOR("Konrad Eriksson <konrad.eriksson@axis.se>");
72
73
74/* Module parameters */
75
76/* 0 = No ports enabled
77   1 = Only port 1 enabled (on board ethernet on devboard)
78   2 = Only port 2 enabled (external connector on devboard)
79   3 = Both ports enabled
80*/
81static unsigned int ports = 3;
82module_param(ports, uint, S_IRUGO);
83MODULE_PARM_DESC(ports, "Bitmask indicating USB ports to use");
84
85
86/***************************************************************************/
87/***************************************************************************/
88/* Shared global variables for this module */
89/***************************************************************************/
90/***************************************************************************/
91
92/* EP descriptor lists for non period transfers. Must be 32-bit aligned. */
93static volatile struct USB_EP_Desc TxBulkEPList[NBR_OF_EPIDS] __attribute__ ((aligned (4)));
94
95static volatile struct USB_EP_Desc TxCtrlEPList[NBR_OF_EPIDS] __attribute__ ((aligned (4)));
96
97/* EP descriptor lists for period transfers. Must be 32-bit aligned. */
98static volatile struct USB_EP_Desc TxIntrEPList[MAX_INTR_INTERVAL] __attribute__ ((aligned (4)));
99static volatile struct USB_SB_Desc TxIntrSB_zout __attribute__ ((aligned (4)));
100
101static volatile struct USB_EP_Desc TxIsocEPList[NBR_OF_EPIDS] __attribute__ ((aligned (4)));
102static volatile struct USB_SB_Desc TxIsocSB_zout __attribute__ ((aligned (4)));
103
104static volatile struct USB_SB_Desc TxIsocSBList[NBR_OF_EPIDS] __attribute__ ((aligned (4)));
105
106/* After each enabled bulk EP IN we put two disabled EP descriptors with the eol flag set,
107   causing the DMA to stop the DMA channel. The first of these two has the intr flag set, which
108   gives us a dma8_sub0_descr interrupt. When we receive this, we advance the DMA one step in the
109   EP list and then restart the bulk channel, thus forcing a switch between bulk EP descriptors
110   in each frame. */
111static volatile struct USB_EP_Desc TxBulkDummyEPList[NBR_OF_EPIDS][2] __attribute__ ((aligned (4)));
112
113/* List of URB pointers, where each points to the active URB for a epid.
114   For Bulk, Ctrl and Intr this means which URB that currently is added to
115   DMA lists (Isoc URBs are all directly added to DMA lists). As soon as
116   URB has completed is the queue examined and the first URB in queue is
117   removed and moved to the activeUrbList while its state change to STARTED and
118   its transfer(s) gets added to DMA list (exception Isoc where URBs enter
119   state STARTED directly and added transfers added to DMA lists). */
120static struct urb *activeUrbList[NBR_OF_EPIDS];
121
122/* Additional software state info for each epid */
123static struct etrax_epid epid_state[NBR_OF_EPIDS];
124
125/* Timer handles for bulk traffic timer used to avoid DMA bug where DMA stops
126   even if there is new data waiting to be processed */
127static struct timer_list bulk_start_timer = TIMER_INITIALIZER(NULL, 0, 0);
128static struct timer_list bulk_eot_timer = TIMER_INITIALIZER(NULL, 0, 0);
129
130/* We want the start timer to expire before the eot timer, because the former
131   might start traffic, thus making it unnecessary for the latter to time
132   out. */
133#define BULK_START_TIMER_INTERVAL (HZ/50) /* 20 ms */
134#define BULK_EOT_TIMER_INTERVAL (HZ/16) /* 60 ms */
135
136/* Delay before a URB completion happen when it's scheduled to be delayed */
137#define LATER_TIMER_DELAY (HZ/50) /* 20 ms */
138
139/* Simplifying macros for checking software state info of a epid */
140/* ----------------------------------------------------------------------- */
141#define epid_inuse(epid) epid_state[epid].inuse
142#define epid_out_traffic(epid) epid_state[epid].out_traffic
143#define epid_isoc(epid) (epid_state[epid].type == PIPE_ISOCHRONOUS ? 1 : 0)
144#define epid_intr(epid) (epid_state[epid].type == PIPE_INTERRUPT ? 1 : 0)
145
146
147/***************************************************************************/
148/***************************************************************************/
149/* DEBUG FUNCTIONS */
150/***************************************************************************/
151/***************************************************************************/
152/* Note that these functions are always available in their "__" variants,
153   for use in error situations. The "__" missing variants are controlled by
154   the USB_DEBUG_DESC/USB_DEBUG_URB macros. */
155static void __dump_urb(struct urb* purb)
156{
157  struct crisv10_urb_priv *urb_priv = purb->hcpriv;
158  int urb_num = -1;
159  if(urb_priv) {
160    urb_num = urb_priv->urb_num;
161  }
162  printk("\nURB:0x%x[%d]\n", (unsigned int)purb, urb_num);
163  printk("dev :0x%08lx\n", (unsigned long)purb->dev);
164  printk("pipe :0x%08x\n", purb->pipe);
165  printk("status :%d\n", purb->status);
166  printk("transfer_flags :0x%08x\n", purb->transfer_flags);
167  printk("transfer_buffer :0x%08lx\n", (unsigned long)purb->transfer_buffer);
168  printk("transfer_buffer_length:%d\n", purb->transfer_buffer_length);
169  printk("actual_length :%d\n", purb->actual_length);
170  printk("setup_packet :0x%08lx\n", (unsigned long)purb->setup_packet);
171  printk("start_frame :%d\n", purb->start_frame);
172  printk("number_of_packets :%d\n", purb->number_of_packets);
173  printk("interval :%d\n", purb->interval);
174  printk("error_count :%d\n", purb->error_count);
175  printk("context :0x%08lx\n", (unsigned long)purb->context);
176  printk("complete :0x%08lx\n\n", (unsigned long)purb->complete);
177}
178
179static void __dump_in_desc(volatile struct USB_IN_Desc *in)
180{
181  printk("\nUSB_IN_Desc at 0x%08lx\n", (unsigned long)in);
182  printk(" sw_len : 0x%04x (%d)\n", in->sw_len, in->sw_len);
183  printk(" command : 0x%04x\n", in->command);
184  printk(" next : 0x%08lx\n", in->next);
185  printk(" buf : 0x%08lx\n", in->buf);
186  printk(" hw_len : 0x%04x (%d)\n", in->hw_len, in->hw_len);
187  printk(" status : 0x%04x\n\n", in->status);
188}
189
190static void __dump_sb_desc(volatile struct USB_SB_Desc *sb)
191{
192  char tt = (sb->command & 0x30) >> 4;
193  char *tt_string;
194
195  switch (tt) {
196  case 0:
197    tt_string = "zout";
198    break;
199  case 1:
200    tt_string = "in";
201    break;
202  case 2:
203    tt_string = "out";
204    break;
205  case 3:
206    tt_string = "setup";
207    break;
208  default:
209    tt_string = "unknown (weird)";
210  }
211
212  printk(" USB_SB_Desc at 0x%08lx ", (unsigned long)sb);
213  printk(" command:0x%04x (", sb->command);
214  printk("rem:%d ", (sb->command & 0x3f00) >> 8);
215  printk("full:%d ", (sb->command & 0x40) >> 6);
216  printk("tt:%d(%s) ", tt, tt_string);
217  printk("intr:%d ", (sb->command & 0x8) >> 3);
218  printk("eot:%d ", (sb->command & 0x2) >> 1);
219  printk("eol:%d)", sb->command & 0x1);
220  printk(" sw_len:0x%04x(%d)", sb->sw_len, sb->sw_len);
221  printk(" next:0x%08lx", sb->next);
222  printk(" buf:0x%08lx\n", sb->buf);
223}
224
225
226static void __dump_ep_desc(volatile struct USB_EP_Desc *ep)
227{
228  printk("USB_EP_Desc at 0x%08lx ", (unsigned long)ep);
229  printk(" command:0x%04x (", ep->command);
230  printk("ep_id:%d ", (ep->command & 0x1f00) >> 8);
231  printk("enable:%d ", (ep->command & 0x10) >> 4);
232  printk("intr:%d ", (ep->command & 0x8) >> 3);
233  printk("eof:%d ", (ep->command & 0x2) >> 1);
234  printk("eol:%d)", ep->command & 0x1);
235  printk(" hw_len:0x%04x(%d)", ep->hw_len, ep->hw_len);
236  printk(" next:0x%08lx", ep->next);
237  printk(" sub:0x%08lx\n", ep->sub);
238}
239
240static inline void __dump_ep_list(int pipe_type)
241{
242  volatile struct USB_EP_Desc *ep;
243  volatile struct USB_EP_Desc *first_ep;
244  volatile struct USB_SB_Desc *sb;
245
246  switch (pipe_type)
247    {
248    case PIPE_BULK:
249      first_ep = &TxBulkEPList[0];
250      break;
251    case PIPE_CONTROL:
252      first_ep = &TxCtrlEPList[0];
253      break;
254    case PIPE_INTERRUPT:
255      first_ep = &TxIntrEPList[0];
256      break;
257    case PIPE_ISOCHRONOUS:
258      first_ep = &TxIsocEPList[0];
259      break;
260    default:
261      return;
262    }
263  ep = first_ep;
264
265  printk("\n\nDumping EP list...\n\n");
266
267  do {
268    __dump_ep_desc(ep);
269    /* Cannot phys_to_virt on 0 as it turns into 80000000, which is != 0. */
270    sb = ep->sub ? phys_to_virt(ep->sub) : 0;
271    while (sb) {
272      __dump_sb_desc(sb);
273      sb = sb->next ? phys_to_virt(sb->next) : 0;
274    }
275    ep = (volatile struct USB_EP_Desc *)(phys_to_virt(ep->next));
276
277  } while (ep != first_ep);
278}
279
280static inline void __dump_ept_data(int epid)
281{
282  unsigned long flags;
283  __u32 r_usb_ept_data;
284
285  if (epid < 0 || epid > 31) {
286    printk("Cannot dump ept data for invalid epid %d\n", epid);
287    return;
288  }
289
290  local_irq_save(flags);
291  *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, epid);
292  nop();
293  r_usb_ept_data = *R_USB_EPT_DATA;
294  local_irq_restore(flags);
295
296  printk(" R_USB_EPT_DATA = 0x%x for epid %d :\n", r_usb_ept_data, epid);
297  if (r_usb_ept_data == 0) {
298    /* No need for more detailed printing. */
299    return;
300  }
301  printk(" valid : %d\n", (r_usb_ept_data & 0x80000000) >> 31);
302  printk(" hold : %d\n", (r_usb_ept_data & 0x40000000) >> 30);
303  printk(" error_count_in : %d\n", (r_usb_ept_data & 0x30000000) >> 28);
304  printk(" t_in : %d\n", (r_usb_ept_data & 0x08000000) >> 27);
305  printk(" low_speed : %d\n", (r_usb_ept_data & 0x04000000) >> 26);
306  printk(" port : %d\n", (r_usb_ept_data & 0x03000000) >> 24);
307  printk(" error_code : %d\n", (r_usb_ept_data & 0x00c00000) >> 22);
308  printk(" t_out : %d\n", (r_usb_ept_data & 0x00200000) >> 21);
309  printk(" error_count_out : %d\n", (r_usb_ept_data & 0x00180000) >> 19);
310  printk(" max_len : %d\n", (r_usb_ept_data & 0x0003f800) >> 11);
311  printk(" ep : %d\n", (r_usb_ept_data & 0x00000780) >> 7);
312  printk(" dev : %d\n", (r_usb_ept_data & 0x0000003f));
313}
314
315static inline void __dump_ept_data_iso(int epid)
316{
317  unsigned long flags;
318  __u32 ept_data;
319
320  if (epid < 0 || epid > 31) {
321    printk("Cannot dump ept data for invalid epid %d\n", epid);
322    return;
323  }
324
325  local_irq_save(flags);
326  *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, epid);
327  nop();
328  ept_data = *R_USB_EPT_DATA_ISO;
329  local_irq_restore(flags);
330
331  printk(" R_USB_EPT_DATA = 0x%x for epid %d :\n", ept_data, epid);
332  if (ept_data == 0) {
333    /* No need for more detailed printing. */
334    return;
335  }
336  printk(" valid : %d\n", IO_EXTRACT(R_USB_EPT_DATA_ISO, valid,
337                        ept_data));
338  printk(" port : %d\n", IO_EXTRACT(R_USB_EPT_DATA_ISO, port,
339                        ept_data));
340  printk(" error_code : %d\n", IO_EXTRACT(R_USB_EPT_DATA_ISO, error_code,
341                        ept_data));
342  printk(" max_len : %d\n", IO_EXTRACT(R_USB_EPT_DATA_ISO, max_len,
343                        ept_data));
344  printk(" ep : %d\n", IO_EXTRACT(R_USB_EPT_DATA_ISO, ep,
345                        ept_data));
346  printk(" dev : %d\n", IO_EXTRACT(R_USB_EPT_DATA_ISO, dev,
347                        ept_data));
348}
349
350static inline void __dump_ept_data_list(void)
351{
352  int i;
353
354  printk("Dumping the whole R_USB_EPT_DATA list\n");
355
356  for (i = 0; i < 32; i++) {
357    __dump_ept_data(i);
358  }
359}
360
361static void debug_epid(int epid) {
362  int i;
363  
364  if(epid_isoc(epid)) {
365    __dump_ept_data_iso(epid);
366  } else {
367    __dump_ept_data(epid);
368  }
369
370  printk("Bulk:\n");
371  for(i = 0; i < 32; i++) {
372    if(IO_EXTRACT(USB_EP_command, epid, TxBulkEPList[i].command) ==
373       epid) {
374      printk("%d: ", i); __dump_ep_desc(&(TxBulkEPList[i]));
375    }
376  }
377
378  printk("Ctrl:\n");
379  for(i = 0; i < 32; i++) {
380    if(IO_EXTRACT(USB_EP_command, epid, TxCtrlEPList[i].command) ==
381       epid) {
382      printk("%d: ", i); __dump_ep_desc(&(TxCtrlEPList[i]));
383    }
384  }
385
386  printk("Intr:\n");
387  for(i = 0; i < MAX_INTR_INTERVAL; i++) {
388    if(IO_EXTRACT(USB_EP_command, epid, TxIntrEPList[i].command) ==
389       epid) {
390      printk("%d: ", i); __dump_ep_desc(&(TxIntrEPList[i]));
391    }
392  }
393  
394  printk("Isoc:\n");
395  for(i = 0; i < 32; i++) {
396    if(IO_EXTRACT(USB_EP_command, epid, TxIsocEPList[i].command) ==
397       epid) {
398      printk("%d: ", i); __dump_ep_desc(&(TxIsocEPList[i]));
399    }
400  }
401
402  __dump_ept_data_list();
403  __dump_ep_list(PIPE_INTERRUPT);
404  printk("\n\n");
405}
406
407
408
409char* hcd_status_to_str(__u8 bUsbStatus) {
410  static char hcd_status_str[128];
411  hcd_status_str[0] = '\0';
412  if(bUsbStatus & IO_STATE(R_USB_STATUS, ourun, yes)) {
413    strcat(hcd_status_str, "ourun ");
414  }
415  if(bUsbStatus & IO_STATE(R_USB_STATUS, perror, yes)) {
416    strcat(hcd_status_str, "perror ");
417  }
418  if(bUsbStatus & IO_STATE(R_USB_STATUS, device_mode, yes)) {
419    strcat(hcd_status_str, "device_mode ");
420  }
421  if(bUsbStatus & IO_STATE(R_USB_STATUS, host_mode, yes)) {
422    strcat(hcd_status_str, "host_mode ");
423  }
424  if(bUsbStatus & IO_STATE(R_USB_STATUS, started, yes)) {
425    strcat(hcd_status_str, "started ");
426  }
427  if(bUsbStatus & IO_STATE(R_USB_STATUS, running, yes)) {
428    strcat(hcd_status_str, "running ");
429  }
430  return hcd_status_str;
431}
432
433
434char* sblist_to_str(struct USB_SB_Desc* sb_desc) {
435  static char sblist_to_str_buff[128];
436  char tmp[32], tmp2[32];
437  sblist_to_str_buff[0] = '\0';
438  while(sb_desc != NULL) {
439    switch(IO_EXTRACT(USB_SB_command, tt, sb_desc->command)) {
440    case 0: sprintf(tmp, "zout"); break;
441    case 1: sprintf(tmp, "in"); break;
442    case 2: sprintf(tmp, "out"); break;
443    case 3: sprintf(tmp, "setup"); break;
444    }
445    sprintf(tmp2, "(%s %d)", tmp, sb_desc->sw_len);
446    strcat(sblist_to_str_buff, tmp2);
447    if(sb_desc->next != 0) {
448      sb_desc = phys_to_virt(sb_desc->next);
449    } else {
450      sb_desc = NULL;
451    }
452  }
453  return sblist_to_str_buff;
454}
455
456char* port_status_to_str(__u16 wPortStatus) {
457  static char port_status_str[128];
458  port_status_str[0] = '\0';
459  if(wPortStatus & IO_STATE(R_USB_RH_PORT_STATUS_1, connected, yes)) {
460    strcat(port_status_str, "connected ");
461  }
462  if(wPortStatus & IO_STATE(R_USB_RH_PORT_STATUS_1, enabled, yes)) {
463    strcat(port_status_str, "enabled ");
464  }
465  if(wPortStatus & IO_STATE(R_USB_RH_PORT_STATUS_1, suspended, yes)) {
466    strcat(port_status_str, "suspended ");
467  }
468  if(wPortStatus & IO_STATE(R_USB_RH_PORT_STATUS_1, reset, yes)) {
469    strcat(port_status_str, "reset ");
470  }
471  if(wPortStatus & IO_STATE(R_USB_RH_PORT_STATUS_1, speed, full)) {
472    strcat(port_status_str, "full-speed ");
473  } else {
474    strcat(port_status_str, "low-speed ");
475  }
476  return port_status_str;
477}
478
479
480char* endpoint_to_str(struct usb_endpoint_descriptor *ed) {
481  static char endpoint_to_str_buff[128];
482  char tmp[32];
483  int epnum = ed->bEndpointAddress & 0x0F;
484  int dir = ed->bEndpointAddress & 0x80;
485  int type = ed->bmAttributes & 0x03;
486  endpoint_to_str_buff[0] = '\0';
487  sprintf(endpoint_to_str_buff, "ep:%d ", epnum);
488  switch(type) {
489  case 0:
490    sprintf(tmp, " ctrl");
491    break;
492  case 1:
493    sprintf(tmp, " isoc");
494    break;
495  case 2:
496    sprintf(tmp, " bulk");
497    break;
498  case 3:
499    sprintf(tmp, " intr");
500    break;
501  }
502  strcat(endpoint_to_str_buff, tmp);
503  if(dir) {
504    sprintf(tmp, " in");
505  } else {
506    sprintf(tmp, " out");
507  }
508  strcat(endpoint_to_str_buff, tmp);
509
510  return endpoint_to_str_buff;
511}
512
513/* Debug helper functions for Transfer Controller */
514char* pipe_to_str(unsigned int pipe) {
515  static char pipe_to_str_buff[128];
516  char tmp[64];
517  sprintf(pipe_to_str_buff, "dir:%s", str_dir(pipe));
518  sprintf(tmp, " type:%s", str_type(pipe));
519  strcat(pipe_to_str_buff, tmp);
520
521  sprintf(tmp, " dev:%d", usb_pipedevice(pipe));
522  strcat(pipe_to_str_buff, tmp);
523  sprintf(tmp, " ep:%d", usb_pipeendpoint(pipe));
524  strcat(pipe_to_str_buff, tmp);
525  return pipe_to_str_buff;
526}
527
528
529#define USB_DEBUG_DESC 1
530
531#ifdef USB_DEBUG_DESC
532#define dump_in_desc(x) __dump_in_desc(x)
533#define dump_sb_desc(...) __dump_sb_desc(...)
534#define dump_ep_desc(x) __dump_ep_desc(x)
535#define dump_ept_data(x) __dump_ept_data(x)
536#else
537#define dump_in_desc(...) do {} while (0)
538#define dump_sb_desc(...) do {} while (0)
539#define dump_ep_desc(...) do {} while (0)
540#endif
541
542
543/* Uncomment this to enable massive function call trace
544   #define USB_DEBUG_TRACE */
545
546#ifdef USB_DEBUG_TRACE
547#define DBFENTER (printk(": Entering: %s\n", __FUNCTION__))
548#define DBFEXIT (printk(": Exiting: %s\n", __FUNCTION__))
549#else
550#define DBFENTER do {} while (0)
551#define DBFEXIT do {} while (0)
552#endif
553
554#define CHECK_ALIGN(x) if (((__u32)(x)) & 0x00000003) \
555{panic("Alignment check (DWORD) failed at %s:%s:%d\n", __FILE__, __FUNCTION__, __LINE__);}
556
557/* Most helpful debugging aid */
558#define ASSERT(expr) ((void) ((expr) ? 0 : (err("assert failed at: %s %d",__FUNCTION__, __LINE__))))
559
560
561/***************************************************************************/
562/***************************************************************************/
563/* Forward declarations */
564/***************************************************************************/
565/***************************************************************************/
566void crisv10_hcd_epid_attn_irq(struct crisv10_irq_reg *reg);
567void crisv10_hcd_port_status_irq(struct crisv10_irq_reg *reg);
568void crisv10_hcd_ctl_status_irq(struct crisv10_irq_reg *reg);
569void crisv10_hcd_isoc_eof_irq(struct crisv10_irq_reg *reg);
570
571void rh_port_status_change(__u16[]);
572int rh_clear_port_feature(__u8, __u16);
573int rh_set_port_feature(__u8, __u16);
574static void rh_disable_port(unsigned int port);
575
576static void check_finished_bulk_tx_epids(struct usb_hcd *hcd,
577                     int timer);
578
579static int tc_setup_epid(struct usb_host_endpoint *ep, struct urb *urb,
580             int mem_flags);
581static void tc_free_epid(struct usb_host_endpoint *ep);
582static int tc_allocate_epid(void);
583static void tc_finish_urb(struct usb_hcd *hcd, struct urb *urb, int status);
584static void tc_finish_urb_later(struct usb_hcd *hcd, struct urb *urb,
585                int status);
586
587static int urb_priv_create(struct usb_hcd *hcd, struct urb *urb, int epid,
588               int mem_flags);
589static void urb_priv_free(struct usb_hcd *hcd, struct urb *urb);
590
591static int crisv10_usb_check_bandwidth(struct usb_device *dev,struct urb *urb);
592static void crisv10_usb_claim_bandwidth(
593    struct usb_device *dev, struct urb *urb, int bustime, int isoc);
594static void crisv10_usb_release_bandwidth(
595    struct usb_hcd *hcd, int isoc, int bandwidth);
596
597static inline struct urb *urb_list_first(int epid);
598static inline void urb_list_add(struct urb *urb, int epid,
599                      int mem_flags);
600static inline urb_entry_t *urb_list_entry(struct urb *urb, int epid);
601static inline void urb_list_del(struct urb *urb, int epid);
602static inline void urb_list_move_last(struct urb *urb, int epid);
603static inline struct urb *urb_list_next(struct urb *urb, int epid);
604
605int create_sb_for_urb(struct urb *urb, int mem_flags);
606int init_intr_urb(struct urb *urb, int mem_flags);
607
608static inline void etrax_epid_set(__u8 index, __u32 data);
609static inline void etrax_epid_clear_error(__u8 index);
610static inline void etrax_epid_set_toggle(__u8 index, __u8 dirout,
611                          __u8 toggle);
612static inline __u8 etrax_epid_get_toggle(__u8 index, __u8 dirout);
613static inline __u32 etrax_epid_get(__u8 index);
614
615/* We're accessing the same register position in Etrax so
616   when we do full access the internal difference doesn't matter */
617#define etrax_epid_iso_set(index, data) etrax_epid_set(index, data)
618#define etrax_epid_iso_get(index) etrax_epid_get(index)
619
620
621static void tc_dma_process_isoc_urb(struct urb *urb);
622static void tc_dma_process_queue(int epid);
623static void tc_dma_unlink_intr_urb(struct urb *urb);
624static irqreturn_t tc_dma_tx_interrupt(int irq, void *vhc);
625static irqreturn_t tc_dma_rx_interrupt(int irq, void *vhc);
626
627static void tc_bulk_start_timer_func(unsigned long dummy);
628static void tc_bulk_eot_timer_func(unsigned long dummy);
629
630
631/*************************************************************/
632/*************************************************************/
633/* Host Controler Driver block */
634/*************************************************************/
635/*************************************************************/
636
637/* HCD operations */
638static irqreturn_t crisv10_hcd_top_irq(int irq, void*);
639static int crisv10_hcd_reset(struct usb_hcd *);
640static int crisv10_hcd_start(struct usb_hcd *);
641static void crisv10_hcd_stop(struct usb_hcd *);
642#ifdef CONFIG_PM
643static int crisv10_hcd_suspend(struct device *, u32, u32);
644static int crisv10_hcd_resume(struct device *, u32);
645#endif /* CONFIG_PM */
646static int crisv10_hcd_get_frame(struct usb_hcd *);
647
648static int tc_urb_enqueue(struct usb_hcd *, struct urb *, gfp_t mem_flags);
649static int tc_urb_dequeue(struct usb_hcd *, struct urb *, int);
650static void tc_endpoint_disable(struct usb_hcd *, struct usb_host_endpoint *ep);
651
652static int rh_status_data_request(struct usb_hcd *, char *);
653static int rh_control_request(struct usb_hcd *, u16, u16, u16, char*, u16);
654
655#ifdef CONFIG_PM
656static int crisv10_hcd_hub_suspend(struct usb_hcd *);
657static int crisv10_hcd_hub_resume(struct usb_hcd *);
658#endif /* CONFIG_PM */
659#ifdef CONFIG_USB_OTG
660static int crisv10_hcd_start_port_reset(struct usb_hcd *, unsigned);
661#endif /* CONFIG_USB_OTG */
662
663/* host controller driver interface */
664static const struct hc_driver crisv10_hc_driver =
665  {
666    .description = hc_name,
667    .product_desc = product_desc,
668    .hcd_priv_size = sizeof(struct crisv10_hcd),
669
670    /* Attaching IRQ handler manualy in probe() */
671    /* .irq = crisv10_hcd_irq, */
672
673    .flags = HCD_USB11,
674
675    /* called to init HCD and root hub */
676    .reset = crisv10_hcd_reset,
677    .start = crisv10_hcd_start,
678
679    /* cleanly make HCD stop writing memory and doing I/O */
680    .stop = crisv10_hcd_stop,
681
682    /* return current frame number */
683    .get_frame_number = crisv10_hcd_get_frame,
684
685
686    /* Manage i/o requests via the Transfer Controller */
687    .urb_enqueue = tc_urb_enqueue,
688    .urb_dequeue = tc_urb_dequeue,
689
690    /* hw synch, freeing endpoint resources that urb_dequeue can't */
691    .endpoint_disable = tc_endpoint_disable,
692
693
694    /* Root Hub support */
695    .hub_status_data = rh_status_data_request,
696    .hub_control = rh_control_request,
697#ifdef CONFIG_PM
698    .hub_suspend = rh_suspend_request,
699    .hub_resume = rh_resume_request,
700#endif /* CONFIG_PM */
701#ifdef CONFIG_USB_OTG
702    .start_port_reset = crisv10_hcd_start_port_reset,
703#endif /* CONFIG_USB_OTG */
704  };
705
706
707/*
708 * conversion between pointers to a hcd and the corresponding
709 * crisv10_hcd
710 */
711
712static inline struct crisv10_hcd *hcd_to_crisv10_hcd(struct usb_hcd *hcd)
713{
714    return (struct crisv10_hcd *) hcd->hcd_priv;
715}
716
717static inline struct usb_hcd *crisv10_hcd_to_hcd(struct crisv10_hcd *hcd)
718{
719    return container_of((void *) hcd, struct usb_hcd, hcd_priv);
720}
721
722/* check if specified port is in use */
723static inline int port_in_use(unsigned int port)
724{
725    return ports & (1 << port);
726}
727
728/* number of ports in use */
729static inline unsigned int num_ports(void)
730{
731    unsigned int i, num = 0;
732    for (i = 0; i < USB_ROOT_HUB_PORTS; i++)
733        if (port_in_use(i))
734            num++;
735    return num;
736}
737
738/* map hub port number to the port number used internally by the HC */
739static inline unsigned int map_port(unsigned int port)
740{
741  unsigned int i, num = 0;
742  for (i = 0; i < USB_ROOT_HUB_PORTS; i++)
743    if (port_in_use(i))
744      if (++num == port)
745    return i;
746  return -1;
747}
748
749/* size of descriptors in slab cache */
750#ifndef MAX
751#define MAX(x, y) ((x) > (y) ? (x) : (y))
752#endif
753
754
755/******************************************************************/
756/* Hardware Interrupt functions */
757/******************************************************************/
758
759/* Fast interrupt handler for HC */
760static irqreturn_t crisv10_hcd_top_irq(int irq, void *vcd)
761{
762  struct usb_hcd *hcd = vcd;
763  struct crisv10_irq_reg reg;
764  __u32 irq_mask;
765  unsigned long flags;
766
767  DBFENTER;
768
769  ASSERT(hcd != NULL);
770  reg.hcd = hcd;
771
772  /* Turn of other interrupts while handling these sensitive cases */
773  local_irq_save(flags);
774  
775  /* Read out which interrupts that are flaged */
776  irq_mask = *R_USB_IRQ_MASK_READ;
777  reg.r_usb_irq_mask_read = irq_mask;
778
779  /* Reading R_USB_STATUS clears the ctl_status interrupt. Note that
780     R_USB_STATUS must be read before R_USB_EPID_ATTN since reading the latter
781     clears the ourun and perror fields of R_USB_STATUS. */
782  reg.r_usb_status = *R_USB_STATUS;
783  
784  /* Reading R_USB_EPID_ATTN clears the iso_eof, bulk_eot and epid_attn
785     interrupts. */
786  reg.r_usb_epid_attn = *R_USB_EPID_ATTN;
787  
788  /* Reading R_USB_RH_PORT_STATUS_1 and R_USB_RH_PORT_STATUS_2 clears the
789     port_status interrupt. */
790  reg.r_usb_rh_port_status_1 = *R_USB_RH_PORT_STATUS_1;
791  reg.r_usb_rh_port_status_2 = *R_USB_RH_PORT_STATUS_2;
792  
793  /* Reading R_USB_FM_NUMBER clears the sof interrupt. */
794  /* Note: the lower 11 bits contain the actual frame number, sent with each
795     sof. */
796  reg.r_usb_fm_number = *R_USB_FM_NUMBER;
797
798  /* Interrupts are handled in order of priority. */
799  if (irq_mask & IO_MASK(R_USB_IRQ_MASK_READ, port_status)) {
800    crisv10_hcd_port_status_irq(&reg);
801  }
802  if (irq_mask & IO_MASK(R_USB_IRQ_MASK_READ, epid_attn)) {
803    crisv10_hcd_epid_attn_irq(&reg);
804  }
805  if (irq_mask & IO_MASK(R_USB_IRQ_MASK_READ, ctl_status)) {
806    crisv10_hcd_ctl_status_irq(&reg);
807  }
808  if (irq_mask & IO_MASK(R_USB_IRQ_MASK_READ, iso_eof)) {
809    crisv10_hcd_isoc_eof_irq(&reg);
810  }
811  if (irq_mask & IO_MASK(R_USB_IRQ_MASK_READ, bulk_eot)) {
812    /* Update/restart the bulk start timer since obviously the channel is
813       running. */
814    mod_timer(&bulk_start_timer, jiffies + BULK_START_TIMER_INTERVAL);
815    /* Update/restart the bulk eot timer since we just received an bulk eot
816       interrupt. */
817    mod_timer(&bulk_eot_timer, jiffies + BULK_EOT_TIMER_INTERVAL);
818
819    /* Check for finished bulk transfers on epids */
820    check_finished_bulk_tx_epids(hcd, 0);
821  }
822  local_irq_restore(flags);
823
824  DBFEXIT;
825  return IRQ_HANDLED;
826}
827
828
829void crisv10_hcd_epid_attn_irq(struct crisv10_irq_reg *reg) {
830  struct usb_hcd *hcd = reg->hcd;
831  struct crisv10_urb_priv *urb_priv;
832  int epid;
833  DBFENTER;
834
835  for (epid = 0; epid < NBR_OF_EPIDS; epid++) {
836    if (test_bit(epid, (void *)&reg->r_usb_epid_attn)) {
837      struct urb *urb;
838      __u32 ept_data;
839      int error_code;
840
841      if (epid == DUMMY_EPID || epid == INVALID_EPID) {
842    /* We definitely don't care about these ones. Besides, they are
843       always disabled, so any possible disabling caused by the
844       epid attention interrupt is irrelevant. */
845    continue;
846      }
847
848      if(!epid_inuse(epid)) {
849    irq_err("Epid attention on epid:%d that isn't in use\n", epid);
850    printk("R_USB_STATUS: 0x%x\n", reg->r_usb_status);
851    debug_epid(epid);
852    continue;
853      }
854
855      /* Note that although there are separate R_USB_EPT_DATA and
856     R_USB_EPT_DATA_ISO registers, they are located at the same address and
857     are of the same size. In other words, this read should be ok for isoc
858     also. */
859      ept_data = etrax_epid_get(epid);
860      error_code = IO_EXTRACT(R_USB_EPT_DATA, error_code, ept_data);
861
862      /* Get the active URB for this epid. We blatantly assume
863     that only this URB could have caused the epid attention. */
864      urb = activeUrbList[epid];
865      if (urb == NULL) {
866    irq_err("Attention on epid:%d error:%d with no active URB.\n",
867        epid, error_code);
868    printk("R_USB_STATUS: 0x%x\n", reg->r_usb_status);
869    debug_epid(epid);
870    continue;
871      }
872
873      urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
874      ASSERT(urb_priv);
875
876      /* Using IO_STATE_VALUE on R_USB_EPT_DATA should be ok for isoc also. */
877      if (error_code == IO_STATE_VALUE(R_USB_EPT_DATA, error_code, no_error)) {
878
879    /* Isoc traffic doesn't have error_count_in/error_count_out. */
880    if ((usb_pipetype(urb->pipe) != PIPE_ISOCHRONOUS) &&
881        (IO_EXTRACT(R_USB_EPT_DATA, error_count_in, ept_data) == 3 ||
882         IO_EXTRACT(R_USB_EPT_DATA, error_count_out, ept_data) == 3)) {
883      /* Check if URB allready is marked for late-finish, we can get
884         several 3rd error for Intr traffic when a device is unplugged */
885      if(urb_priv->later_data == NULL) {
886        /* 3rd error. */
887        irq_warn("3rd error for epid:%d (%s %s) URB:0x%x[%d]\n", epid,
888             str_dir(urb->pipe), str_type(urb->pipe),
889             (unsigned int)urb, urb_priv->urb_num);
890      
891        tc_finish_urb_later(hcd, urb, -EPROTO);
892      }
893
894    } else if (reg->r_usb_status & IO_MASK(R_USB_STATUS, perror)) {
895      irq_warn("Perror for epid:%d\n", epid);
896      printk("FM_NUMBER: %d\n", reg->r_usb_fm_number & 0x7ff);
897      printk("R_USB_STATUS: 0x%x\n", reg->r_usb_status);
898      __dump_urb(urb);
899      debug_epid(epid);
900
901      if (!(ept_data & IO_MASK(R_USB_EPT_DATA, valid))) {
902        /* invalid ep_id */
903        panic("Perror because of invalid epid."
904          " Deconfigured too early?");
905      } else {
906        /* past eof1, near eof, zout transfer, setup transfer */
907        /* Dump the urb and the relevant EP descriptor. */
908        panic("Something wrong with DMA descriptor contents."
909          " Too much traffic inserted?");
910      }
911    } else if (reg->r_usb_status & IO_MASK(R_USB_STATUS, ourun)) {
912      /* buffer ourun */
913      printk("FM_NUMBER: %d\n", reg->r_usb_fm_number & 0x7ff);
914      printk("R_USB_STATUS: 0x%x\n", reg->r_usb_status);
915      __dump_urb(urb);
916      debug_epid(epid);
917
918      panic("Buffer overrun/underrun for epid:%d. DMA too busy?", epid);
919    } else {
920      irq_warn("Attention on epid:%d (%s %s) with no error code\n", epid,
921           str_dir(urb->pipe), str_type(urb->pipe));
922      printk("R_USB_STATUS: 0x%x\n", reg->r_usb_status);
923      __dump_urb(urb);
924      debug_epid(epid);
925    }
926
927      } else if (error_code == IO_STATE_VALUE(R_USB_EPT_DATA, error_code,
928                          stall)) {
929    /* Not really a protocol error, just says that the endpoint gave
930       a stall response. Note that error_code cannot be stall for isoc. */
931    if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
932      panic("Isoc traffic cannot stall");
933    }
934
935    tc_dbg("Stall for epid:%d (%s %s) URB:0x%x\n", epid,
936           str_dir(urb->pipe), str_type(urb->pipe), (unsigned int)urb);
937    tc_finish_urb(hcd, urb, -EPIPE);
938
939      } else if (error_code == IO_STATE_VALUE(R_USB_EPT_DATA, error_code,
940                          bus_error)) {
941    /* Two devices responded to a transaction request. Must be resolved
942       by software. FIXME: Reset ports? */
943    panic("Bus error for epid %d."
944          " Two devices responded to transaction request\n",
945          epid);
946
947      } else if (error_code == IO_STATE_VALUE(R_USB_EPT_DATA, error_code,
948                          buffer_error)) {
949    /* DMA overrun or underrun. */
950    irq_warn("Buffer overrun/underrun for epid:%d (%s %s)\n", epid,
951         str_dir(urb->pipe), str_type(urb->pipe));
952
953    /* It seems that error_code = buffer_error in
954       R_USB_EPT_DATA/R_USB_EPT_DATA_ISO and ourun = yes in R_USB_STATUS
955       are the same error. */
956    tc_finish_urb(hcd, urb, -EPROTO);
957      } else {
958      irq_warn("Unknown attention on epid:%d (%s %s)\n", epid,
959           str_dir(urb->pipe), str_type(urb->pipe));
960      dump_ept_data(epid);
961      }
962    }
963  }
964  DBFEXIT;
965}
966
967void crisv10_hcd_port_status_irq(struct crisv10_irq_reg *reg)
968{
969  __u16 port_reg[USB_ROOT_HUB_PORTS];
970  DBFENTER;
971  port_reg[0] = reg->r_usb_rh_port_status_1;
972  port_reg[1] = reg->r_usb_rh_port_status_2;
973  rh_port_status_change(port_reg);
974  DBFEXIT;
975}
976
977void crisv10_hcd_isoc_eof_irq(struct crisv10_irq_reg *reg)
978{
979  int epid;
980  struct urb *urb;
981  struct crisv10_urb_priv *urb_priv;
982
983  DBFENTER;
984
985  for (epid = 0; epid < NBR_OF_EPIDS - 1; epid++) {
986
987    /* Only check epids that are in use, is valid and has SB list */
988    if (!epid_inuse(epid) || epid == INVALID_EPID ||
989    TxIsocEPList[epid].sub == 0 || epid == DUMMY_EPID) {
990      /* Nothing here to see. */
991      continue;
992    }
993    ASSERT(epid_isoc(epid));
994
995    /* Get the active URB for this epid (if any). */
996    urb = activeUrbList[epid];
997    if (urb == 0) {
998      isoc_warn("Ignoring NULL urb for epid:%d\n", epid);
999      continue;
1000    }
1001    if(!epid_out_traffic(epid)) {
1002      /* Sanity check. */
1003      ASSERT(usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS);
1004
1005      urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
1006      ASSERT(urb_priv);
1007
1008      if (urb_priv->urb_state == NOT_STARTED) {
1009    /* If ASAP is not set and urb->start_frame is the current frame,
1010       start the transfer. */
1011    if (!(urb->transfer_flags & URB_ISO_ASAP) &&
1012        (urb->start_frame == (*R_USB_FM_NUMBER & 0x7ff))) {
1013      /* EP should not be enabled if we're waiting for start_frame */
1014      ASSERT((TxIsocEPList[epid].command &
1015          IO_STATE(USB_EP_command, enable, yes)) == 0);
1016
1017      isoc_warn("Enabling isoc IN EP descr for epid %d\n", epid);
1018      TxIsocEPList[epid].command |= IO_STATE(USB_EP_command, enable, yes);
1019
1020      /* This urb is now active. */
1021      urb_priv->urb_state = STARTED;
1022      continue;
1023    }
1024      }
1025    }
1026  }
1027
1028  DBFEXIT;
1029}
1030
1031void crisv10_hcd_ctl_status_irq(struct crisv10_irq_reg *reg)
1032{
1033  struct crisv10_hcd* crisv10_hcd = hcd_to_crisv10_hcd(reg->hcd);
1034
1035  DBFENTER;
1036  ASSERT(crisv10_hcd);
1037
1038/* irq_dbg("ctr_status_irq, controller status: %s\n",
1039      hcd_status_to_str(reg->r_usb_status));*/
1040  
1041  /* FIXME: What should we do if we get ourun or perror? Dump the EP and SB
1042     list for the corresponding epid? */
1043  if (reg->r_usb_status & IO_MASK(R_USB_STATUS, ourun)) {
1044    panic("USB controller got ourun.");
1045  }
1046  if (reg->r_usb_status & IO_MASK(R_USB_STATUS, perror)) {
1047    
1048    /* Before, etrax_usb_do_intr_recover was called on this epid if it was
1049       an interrupt pipe. I don't see how re-enabling all EP descriptors
1050       will help if there was a programming error. */
1051    panic("USB controller got perror.");
1052  }
1053
1054  /* Keep track of USB Controller, if it's running or not */
1055  if(reg->r_usb_status & IO_STATE(R_USB_STATUS, running, yes)) {
1056    crisv10_hcd->running = 1;
1057  } else {
1058    crisv10_hcd->running = 0;
1059  }
1060  
1061  if (reg->r_usb_status & IO_MASK(R_USB_STATUS, device_mode)) {
1062    /* We should never operate in device mode. */
1063    panic("USB controller in device mode.");
1064  }
1065
1066  /* Set the flag to avoid getting "Unlink after no-IRQ? Controller is probably
1067     using the wrong IRQ" from hcd_unlink_urb() in drivers/usb/core/hcd.c */
1068  set_bit(HCD_FLAG_SAW_IRQ, &reg->hcd->flags);
1069  
1070  DBFEXIT;
1071}
1072
1073
1074/******************************************************************/
1075/* Host Controller interface functions */
1076/******************************************************************/
1077
1078static inline void crisv10_ready_wait(void) {
1079  volatile int timeout = 10000;
1080  /* Check the busy bit of USB controller in Etrax */
1081  while((*R_USB_COMMAND & IO_MASK(R_USB_COMMAND, busy)) &&
1082    (timeout-- > 0));
1083}
1084
1085/* reset host controller */
1086static int crisv10_hcd_reset(struct usb_hcd *hcd)
1087{
1088  DBFENTER;
1089  hcd_dbg(hcd, "reset\n");
1090
1091
1092  /* Reset the USB interface. */
1093  /*
1094  *R_USB_COMMAND =
1095    IO_STATE(R_USB_COMMAND, port_sel, nop) |
1096    IO_STATE(R_USB_COMMAND, port_cmd, reset) |
1097    IO_STATE(R_USB_COMMAND, ctrl_cmd, reset);
1098  nop();
1099  */
1100  DBFEXIT;
1101  return 0;
1102}
1103
1104/* start host controller */
1105static int crisv10_hcd_start(struct usb_hcd *hcd)
1106{
1107  DBFENTER;
1108  hcd_dbg(hcd, "start\n");
1109
1110  crisv10_ready_wait();
1111
1112  /* Start processing of USB traffic. */
1113  *R_USB_COMMAND =
1114    IO_STATE(R_USB_COMMAND, port_sel, nop) |
1115    IO_STATE(R_USB_COMMAND, port_cmd, reset) |
1116    IO_STATE(R_USB_COMMAND, ctrl_cmd, host_run);
1117
1118  nop();
1119
1120  hcd->state = HC_STATE_RUNNING;
1121
1122  DBFEXIT;
1123  return 0;
1124}
1125
1126/* stop host controller */
1127static void crisv10_hcd_stop(struct usb_hcd *hcd)
1128{
1129  DBFENTER;
1130  hcd_dbg(hcd, "stop\n");
1131  crisv10_hcd_reset(hcd);
1132  DBFEXIT;
1133}
1134
1135/* return the current frame number */
1136static int crisv10_hcd_get_frame(struct usb_hcd *hcd)
1137{
1138  DBFENTER;
1139  DBFEXIT;
1140  return (*R_USB_FM_NUMBER & 0x7ff);
1141}
1142
1143#ifdef CONFIG_USB_OTG
1144
1145static int crisv10_hcd_start_port_reset(struct usb_hcd *hcd, unsigned port)
1146{
1147  return 0; /* no-op for now */
1148}
1149
1150#endif /* CONFIG_USB_OTG */
1151
1152
1153/******************************************************************/
1154/* Root Hub functions */
1155/******************************************************************/
1156
1157/* root hub status */
1158static const struct usb_hub_status rh_hub_status =
1159  {
1160    .wHubStatus = 0,
1161    .wHubChange = 0,
1162  };
1163
1164/* root hub descriptor */
1165static const u8 rh_hub_descr[] =
1166  {
1167    0x09, /* bDescLength */
1168    0x29, /* bDescriptorType */
1169    USB_ROOT_HUB_PORTS, /* bNbrPorts */
1170    0x00, /* wHubCharacteristics */
1171    0x00,
1172    0x01, /* bPwrOn2pwrGood */
1173    0x00, /* bHubContrCurrent */
1174    0x00, /* DeviceRemovable */
1175    0xff /* PortPwrCtrlMask */
1176  };
1177
1178/* Actual holder of root hub status*/
1179struct crisv10_rh rh;
1180
1181/* Initialize root hub data structures (called from dvdrv_hcd_probe()) */
1182int rh_init(void) {
1183  int i;
1184  /* Reset port status flags */
1185  for (i = 0; i < USB_ROOT_HUB_PORTS; i++) {
1186    rh.wPortChange[i] = 0;
1187    rh.wPortStatusPrev[i] = 0;
1188  }
1189  return 0;
1190}
1191
1192#define RH_FEAT_MASK ((1<<USB_PORT_FEAT_CONNECTION)|\
1193              (1<<USB_PORT_FEAT_ENABLE)|\
1194              (1<<USB_PORT_FEAT_SUSPEND)|\
1195              (1<<USB_PORT_FEAT_RESET))
1196
1197/* Handle port status change interrupt (called from bottom part interrupt) */
1198void rh_port_status_change(__u16 port_reg[]) {
1199  int i;
1200  __u16 wChange;
1201
1202  for(i = 0; i < USB_ROOT_HUB_PORTS; i++) {
1203    /* Xor out changes since last read, masked for important flags */
1204    wChange = (port_reg[i] & RH_FEAT_MASK) ^ rh.wPortStatusPrev[i];
1205    /* Or changes together with (if any) saved changes */
1206    rh.wPortChange[i] |= wChange;
1207    /* Save new status */
1208    rh.wPortStatusPrev[i] = port_reg[i];
1209
1210    if(wChange) {
1211      rh_dbg("Interrupt port_status change port%d: %s Current-status:%s\n", i+1,
1212         port_status_to_str(wChange),
1213         port_status_to_str(port_reg[i]));
1214    }
1215  }
1216}
1217
1218/* Construct port status change bitmap for the root hub */
1219static int rh_status_data_request(struct usb_hcd *hcd, char *buf)
1220{
1221  struct crisv10_hcd* crisv10_hcd = hcd_to_crisv10_hcd(hcd);
1222  unsigned int i;
1223
1224  DBFENTER;
1225  /*
1226   * corresponds to hub status change EP (USB 2.0 spec section 11.13.4)
1227   * return bitmap indicating ports with status change
1228   */
1229  *buf = 0;
1230  spin_lock(&crisv10_hcd->lock);
1231  for (i = 1; i <= crisv10_hcd->num_ports; i++) {
1232    if (rh.wPortChange[map_port(i)]) {
1233      *buf |= (1 << i);
1234      rh_dbg("rh_status_data_request, change on port %d: %s Current Status: %s\n", i,
1235         port_status_to_str(rh.wPortChange[map_port(i)]),
1236         port_status_to_str(rh.wPortStatusPrev[map_port(i)]));
1237    }
1238  }
1239  spin_unlock(&crisv10_hcd->lock);
1240  DBFEXIT;
1241  return *buf == 0 ? 0 : 1;
1242}
1243
1244/* Handle a control request for the root hub (called from hcd_driver) */
1245static int rh_control_request(struct usb_hcd *hcd,
1246                  u16 typeReq,
1247                  u16 wValue,
1248                  u16 wIndex,
1249                  char *buf,
1250                  u16 wLength) {
1251
1252  struct crisv10_hcd *crisv10_hcd = hcd_to_crisv10_hcd(hcd);
1253  int retval = 0;
1254  int len;
1255  DBFENTER;
1256
1257  switch (typeReq) {
1258  case GetHubDescriptor:
1259    rh_dbg("GetHubDescriptor\n");
1260    len = min_t(unsigned int, sizeof rh_hub_descr, wLength);
1261    memcpy(buf, rh_hub_descr, len);
1262    buf[2] = crisv10_hcd->num_ports;
1263    break;
1264  case GetHubStatus:
1265    rh_dbg("GetHubStatus\n");
1266    len = min_t(unsigned int, sizeof rh_hub_status, wLength);
1267    memcpy(buf, &rh_hub_status, len);
1268    break;
1269  case GetPortStatus:
1270    if (!wIndex || wIndex > crisv10_hcd->num_ports)
1271      goto error;
1272    rh_dbg("GetportStatus, port:%d change:%s status:%s\n", wIndex,
1273       port_status_to_str(rh.wPortChange[map_port(wIndex)]),
1274       port_status_to_str(rh.wPortStatusPrev[map_port(wIndex)]));
1275    *(u16 *) buf = cpu_to_le16(rh.wPortStatusPrev[map_port(wIndex)]);
1276    *(u16 *) (buf + 2) = cpu_to_le16(rh.wPortChange[map_port(wIndex)]);
1277    break;
1278  case SetHubFeature:
1279    rh_dbg("SetHubFeature\n");
1280  case ClearHubFeature:
1281    rh_dbg("ClearHubFeature\n");
1282    switch (wValue) {
1283    case C_HUB_OVER_CURRENT:
1284    case C_HUB_LOCAL_POWER:
1285      rh_warn("Not implemented hub request:%d \n", typeReq);
1286      /* not implemented */
1287      break;
1288    default:
1289      goto error;
1290    }
1291    break;
1292  case SetPortFeature:
1293    if (!wIndex || wIndex > crisv10_hcd->num_ports)
1294      goto error;
1295    if(rh_set_port_feature(map_port(wIndex), wValue))
1296      goto error;
1297    break;
1298  case ClearPortFeature:
1299    if (!wIndex || wIndex > crisv10_hcd->num_ports)
1300      goto error;
1301    if(rh_clear_port_feature(map_port(wIndex), wValue))
1302      goto error;
1303    break;
1304  default:
1305    rh_warn("Unknown hub request: %d\n", typeReq);
1306  error:
1307    retval = -EPIPE;
1308  }
1309  DBFEXIT;
1310  return retval;
1311}
1312
1313int rh_set_port_feature(__u8 bPort, __u16 wFeature) {
1314  __u8 bUsbCommand = 0;
1315  __u8 reset_cnt;
1316  switch(wFeature) {
1317  case USB_PORT_FEAT_RESET:
1318    rh_dbg("SetPortFeature: reset\n");
1319
1320    if (rh.wPortStatusPrev[bPort] &
1321        IO_STATE(R_USB_RH_PORT_STATUS_1, enabled, yes))
1322    {
1323      __u8 restart_controller = 0;
1324
1325      if ( (rh.wPortStatusPrev[0] &
1326            IO_STATE(R_USB_RH_PORT_STATUS_1, enabled, yes)) &&
1327           (rh.wPortStatusPrev[1] &
1328            IO_STATE(R_USB_RH_PORT_STATUS_2, enabled, yes)) )
1329      {
1330        /* Both ports is enabled. The USB controller will not change state. */
1331        restart_controller = 0;
1332      }
1333      else
1334      {
1335        /* Only ports is enabled. The USB controller will change state and
1336           must be restarted. */
1337        restart_controller = 1;
1338      }
1339      /*
1340        In ETRAX 100LX it's not possible to reset an enabled root hub port.
1341        The workaround is to disable and enable the port before resetting it.
1342        Disabling the port can, if both ports are disabled at once, cause the
1343        USB controller to change state to HOST_MODE state.
1344        The USB controller state transition causes a lot of unwanted
1345        interrupts that must be avoided.
1346        Disabling the USB controller status and port status interrupts before
1347        disabling/resetting the port stops these interrupts.
1348
1349        These actions are performed:
1350        1. Disable USB controller status and port status interrupts.
1351        2. Disable the port
1352        3. Wait for the port to be disabled.
1353        4. Enable the port.
1354        5. Wait for the port to be enabled.
1355        6. Reset the port.
1356        7. Wait for for the reset to end.
1357        8. Wait for the USB controller entering started state.
1358        9. Order the USB controller to running state.
1359        10. Wait for the USB controller reaching running state.
1360        11. Clear all interrupts generated during the disable/enable/reset
1361            procedure.
1362        12. Enable the USB controller status and port status interrupts.
1363      */
1364
1365      /* 1. Disable USB controller status and USB port status interrupts. */
1366      *R_USB_IRQ_MASK_CLR = IO_STATE(R_USB_IRQ_MASK_CLR, ctl_status, clr);
1367      __asm__ __volatile__ (" nop");
1368      *R_USB_IRQ_MASK_CLR = IO_STATE(R_USB_IRQ_MASK_CLR, port_status, clr);
1369      __asm__ __volatile__ (" nop");
1370      
1371      {
1372
1373        /* Since an root hub port reset shall be 50 ms and the ETRAX 100LX
1374           root hub port reset is 10 ms we must perform 5 port resets to
1375           achieve a proper root hub port reset. */
1376        for (reset_cnt = 0; reset_cnt < 5; reset_cnt ++)
1377        {
1378          rh_dbg("Disable Port %d\n", bPort + 1);
1379
1380          /* 2. Disable the port*/
1381          if (bPort == 0)
1382          {
1383            *R_USB_PORT1_DISABLE = IO_STATE(R_USB_PORT1_DISABLE, disable, yes);
1384          }
1385          else
1386          {
1387            *R_USB_PORT2_DISABLE = IO_STATE(R_USB_PORT2_DISABLE, disable, yes);
1388          }
1389
1390          /* 3. Wait for the port to be disabled. */
1391          while ( (bPort == 0) ?
1392                  *R_USB_RH_PORT_STATUS_1 &
1393                    IO_STATE(R_USB_RH_PORT_STATUS_1, enabled, yes) :
1394                  *R_USB_RH_PORT_STATUS_2 &
1395                    IO_STATE(R_USB_RH_PORT_STATUS_2, enabled, yes) ) {}
1396
1397          rh_dbg("Port %d is disabled. Enable it!\n", bPort + 1);
1398
1399          /* 4. Enable the port. */
1400          if (bPort == 0)
1401          {
1402            *R_USB_PORT1_DISABLE = IO_STATE(R_USB_PORT1_DISABLE, disable, no);
1403          }
1404          else
1405          {
1406            *R_USB_PORT2_DISABLE = IO_STATE(R_USB_PORT2_DISABLE, disable, no);
1407          }
1408
1409          /* 5. Wait for the port to be enabled again. */
1410          while (!( (bPort == 0) ?
1411                    *R_USB_RH_PORT_STATUS_1 &
1412                      IO_STATE(R_USB_RH_PORT_STATUS_1, connected, yes) :
1413                    *R_USB_RH_PORT_STATUS_2 &
1414                      IO_STATE(R_USB_RH_PORT_STATUS_2, connected, yes) ) ) {}
1415
1416          rh_dbg("Port %d is enabled.\n", bPort + 1);
1417
1418          /* 6. Reset the port */
1419          crisv10_ready_wait();
1420          *R_USB_COMMAND =
1421            ( (bPort == 0) ?
1422              IO_STATE(R_USB_COMMAND, port_sel, port1):
1423              IO_STATE(R_USB_COMMAND, port_sel, port2) ) |
1424            IO_STATE(R_USB_COMMAND, port_cmd, reset) |
1425            IO_STATE(R_USB_COMMAND, busy, no) |
1426            IO_STATE(R_USB_COMMAND, ctrl_cmd, nop);
1427          rh_dbg("Port %d is resetting.\n", bPort + 1);
1428
1429          /* 7. The USB specification says that we should wait for at least
1430             10ms for device recover */
1431          udelay(10500); /* 10,5ms blocking wait */
1432    
1433          crisv10_ready_wait();
1434        }
1435      }
1436
1437
1438      /* Check if the USB controller needs to be restarted. */
1439      if (restart_controller)
1440      {
1441        /* 8. Wait for the USB controller entering started state. */
1442        while (!(*R_USB_STATUS & IO_STATE(R_USB_STATUS, started, yes))) {}
1443
1444        /* 9. Order the USB controller to running state. */
1445        crisv10_ready_wait();
1446        *R_USB_COMMAND =
1447          IO_STATE(R_USB_COMMAND, port_sel, nop) |
1448          IO_STATE(R_USB_COMMAND, port_cmd, reset) |
1449          IO_STATE(R_USB_COMMAND, busy, no) |
1450          IO_STATE(R_USB_COMMAND, ctrl_cmd, host_run);
1451
1452        /* 10. Wait for the USB controller reaching running state. */
1453        while (!(*R_USB_STATUS & IO_STATE(R_USB_STATUS, running, yes))) {}
1454      }
1455
1456      /* 11. Clear any controller or port satus interrupts before enabling
1457             the interrupts. */
1458      {
1459        u16 dummy;
1460
1461        /* Clear the port status interrupt of the reset port. */
1462        if (bPort == 0)
1463        {
1464          rh_dbg("Clearing port 1 interrupts\n");
1465          dummy = *R_USB_RH_PORT_STATUS_1;
1466        }
1467        else
1468        {
1469          rh_dbg("Clearing port 2 interrupts\n");
1470          dummy = *R_USB_RH_PORT_STATUS_2;
1471        }
1472
1473        if (restart_controller)
1474        {
1475          /* The USB controller is restarted. Clear all interupts. */
1476          rh_dbg("Clearing all interrupts\n");
1477          dummy = *R_USB_STATUS;
1478          dummy = *R_USB_RH_PORT_STATUS_1;
1479          dummy = *R_USB_RH_PORT_STATUS_2;
1480        }
1481      }
1482
1483      /* 12. Enable USB controller status and USB port status interrupts. */
1484      *R_USB_IRQ_MASK_SET = IO_STATE(R_USB_IRQ_MASK_SET, ctl_status, set);
1485      __asm__ __volatile__ (" nop");
1486      *R_USB_IRQ_MASK_SET = IO_STATE(R_USB_IRQ_MASK_SET, port_status, set);
1487      __asm__ __volatile__ (" nop");
1488
1489    }
1490    else
1491    {
1492
1493      bUsbCommand |= IO_STATE(R_USB_COMMAND, port_cmd, reset);
1494      /* Select which port via the port_sel field */
1495      bUsbCommand |= IO_FIELD(R_USB_COMMAND, port_sel, bPort+1);
1496
1497      /* Make sure the controller isn't busy. */
1498      crisv10_ready_wait();
1499      /* Send out the actual command to the USB controller */
1500      *R_USB_COMMAND = bUsbCommand;
1501
1502      /* Wait a while for controller to first become started after port reset */
1503      udelay(12000); /* 12ms blocking wait */
1504      
1505      /* Make sure the controller isn't busy. */
1506      crisv10_ready_wait();
1507
1508      /* If all enabled ports were disabled the host controller goes down into
1509         started mode, so we need to bring it back into the running state.
1510         (This is safe even if it's already in the running state.) */
1511      *R_USB_COMMAND =
1512        IO_STATE(R_USB_COMMAND, port_sel, nop) |
1513        IO_STATE(R_USB_COMMAND, port_cmd, reset) |
1514        IO_STATE(R_USB_COMMAND, ctrl_cmd, host_run);
1515    }
1516
1517    break;
1518  case USB_PORT_FEAT_SUSPEND:
1519    rh_dbg("SetPortFeature: suspend\n");
1520    bUsbCommand |= IO_STATE(R_USB_COMMAND, port_cmd, suspend);
1521    goto set;
1522    break;
1523  case USB_PORT_FEAT_POWER:
1524    rh_dbg("SetPortFeature: power\n");
1525    break;
1526  case USB_PORT_FEAT_C_CONNECTION:
1527    rh_dbg("SetPortFeature: c_connection\n");
1528    break;
1529  case USB_PORT_FEAT_C_RESET:
1530    rh_dbg("SetPortFeature: c_reset\n");
1531    break;
1532  case USB_PORT_FEAT_C_OVER_CURRENT:
1533    rh_dbg("SetPortFeature: c_over_current\n");
1534    break;
1535
1536  set:
1537    /* Select which port via the port_sel field */
1538    bUsbCommand |= IO_FIELD(R_USB_COMMAND, port_sel, bPort+1);
1539
1540    /* Make sure the controller isn't busy. */
1541    crisv10_ready_wait();
1542    /* Send out the actual command to the USB controller */
1543    *R_USB_COMMAND = bUsbCommand;
1544    break;
1545  default:
1546    rh_dbg("SetPortFeature: unknown feature\n");
1547    return -1;
1548  }
1549  return 0;
1550}
1551
1552int rh_clear_port_feature(__u8 bPort, __u16 wFeature) {
1553  switch(wFeature) {
1554  case USB_PORT_FEAT_ENABLE:
1555    rh_dbg("ClearPortFeature: enable\n");
1556    rh_disable_port(bPort);
1557    break;
1558  case USB_PORT_FEAT_SUSPEND:
1559    rh_dbg("ClearPortFeature: suspend\n");
1560    break;
1561  case USB_PORT_FEAT_POWER:
1562    rh_dbg("ClearPortFeature: power\n");
1563    break;
1564
1565  case USB_PORT_FEAT_C_ENABLE:
1566    rh_dbg("ClearPortFeature: c_enable\n");
1567    goto clear;
1568  case USB_PORT_FEAT_C_SUSPEND:
1569    rh_dbg("ClearPortFeature: c_suspend\n");
1570    goto clear;
1571  case USB_PORT_FEAT_C_CONNECTION:
1572    rh_dbg("ClearPortFeature: c_connection\n");
1573    goto clear;
1574  case USB_PORT_FEAT_C_OVER_CURRENT:
1575    rh_dbg("ClearPortFeature: c_over_current\n");
1576    goto clear;
1577  case USB_PORT_FEAT_C_RESET:
1578    rh_dbg("ClearPortFeature: c_reset\n");
1579    goto clear;
1580  clear:
1581    rh.wPortChange[bPort] &= ~(1 << (wFeature - 16));
1582    break;
1583  default:
1584    rh_dbg("ClearPortFeature: unknown feature\n");
1585    return -1;
1586  }
1587  return 0;
1588}
1589
1590
1591#ifdef CONFIG_PM
1592/* Handle a suspend request for the root hub (called from hcd_driver) */
1593static int rh_suspend_request(struct usb_hcd *hcd)
1594{
1595  return 0; /* no-op for now */
1596}
1597
1598/* Handle a resume request for the root hub (called from hcd_driver) */
1599static int rh_resume_request(struct usb_hcd *hcd)
1600{
1601  return 0; /* no-op for now */
1602}
1603#endif /* CONFIG_PM */
1604
1605
1606
1607/* Wrapper function for workaround port disable registers in USB controller */
1608static void rh_disable_port(unsigned int port) {
1609  volatile int timeout = 10000;
1610  volatile char* usb_portx_disable;
1611  switch(port) {
1612  case 0:
1613    usb_portx_disable = R_USB_PORT1_DISABLE;
1614    break;
1615  case 1:
1616    usb_portx_disable = R_USB_PORT2_DISABLE;
1617    break;
1618  default:
1619    /* Invalid port index */
1620    return;
1621  }
1622  /* Set disable flag in special register */
1623  *usb_portx_disable = IO_STATE(R_USB_PORT1_DISABLE, disable, yes);
1624  /* Wait until not enabled anymore */
1625  while((rh.wPortStatusPrev[port] &
1626    IO_STATE(R_USB_RH_PORT_STATUS_1, enabled, yes)) &&
1627    (timeout-- > 0));
1628
1629  /* clear disable flag in special register */
1630  *usb_portx_disable = IO_STATE(R_USB_PORT1_DISABLE, disable, no);
1631  rh_info("Physical port %d disabled\n", port+1);
1632}
1633
1634
1635/******************************************************************/
1636/* Transfer Controller (TC) functions */
1637/******************************************************************/
1638
1639/* FIXME: Should RX_BUF_SIZE be a config option, or maybe we should adjust it
1640   dynamically?
1641   To adjust it dynamically we would have to get an interrupt when we reach
1642   the end of the rx descriptor list, or when we get close to the end, and
1643   then allocate more descriptors. */
1644#define NBR_OF_RX_DESC 512
1645#define RX_DESC_BUF_SIZE 1024
1646#define RX_BUF_SIZE (NBR_OF_RX_DESC * RX_DESC_BUF_SIZE)
1647
1648
1649/* Local variables for Transfer Controller */
1650/* --------------------------------------- */
1651
1652/* This is a circular (double-linked) list of the active urbs for each epid.
1653   The head is never removed, and new urbs are linked onto the list as
1654   urb_entry_t elements. Don't reference urb_list directly; use the wrapper
1655   functions instead (which includes spin_locks) */
1656static struct list_head urb_list[NBR_OF_EPIDS];
1657
1658/* Read about the need and usage of this lock in submit_ctrl_urb. */
1659/* Lock for URB lists for each EPID */
1660static spinlock_t urb_list_lock;
1661
1662/* Lock for EPID array register (R_USB_EPT_x) in Etrax */
1663static spinlock_t etrax_epid_lock;
1664
1665/* Lock for dma8 sub0 handling */
1666static spinlock_t etrax_dma8_sub0_lock;
1667
1668/* DMA IN cache bug. Align the DMA IN buffers to 32 bytes, i.e. a cache line.
1669   Since RX_DESC_BUF_SIZE is 1024 is a multiple of 32, all rx buffers will be
1670   cache aligned. */
1671static volatile unsigned char RxBuf[RX_BUF_SIZE] __attribute__ ((aligned (32)));
1672static volatile struct USB_IN_Desc RxDescList[NBR_OF_RX_DESC] __attribute__ ((aligned (4)));
1673
1674/* Pointers into RxDescList. */
1675static volatile struct USB_IN_Desc *myNextRxDesc;
1676static volatile struct USB_IN_Desc *myLastRxDesc;
1677
1678/* A zout transfer makes a memory access at the address of its buf pointer,
1679   which means that setting this buf pointer to 0 will cause an access to the
1680   flash. In addition to this, setting sw_len to 0 results in a 16/32 bytes
1681   (depending on DMA burst size) transfer.
1682   Instead, we set it to 1, and point it to this buffer. */
1683static int zout_buffer[4] __attribute__ ((aligned (4)));
1684
1685/* Cache for allocating new EP and SB descriptors. */
1686static struct kmem_cache *usb_desc_cache;
1687
1688/* Cache for the data allocated in the isoc descr top half. */
1689static struct kmem_cache *isoc_compl_cache;
1690
1691/* Cache for the data allocated when delayed finishing of URBs */
1692static struct kmem_cache *later_data_cache;
1693
1694
1695/* Counter to keep track of how many Isoc EP we have sat up. Used to enable
1696   and disable iso_eof interrupt. We only need these interrupts when we have
1697   Isoc data endpoints (consumes CPU cycles).
1698   FIXME: This could be more fine granular, so this interrupt is only enabled
1699   when we have a In Isoc URB not URB_ISO_ASAP flaged queued. */
1700static int isoc_epid_counter;
1701
1702/* Protecting wrapper functions for R_USB_EPT_x */
1703/* -------------------------------------------- */
1704static inline void etrax_epid_set(__u8 index, __u32 data) {
1705  unsigned long flags;
1706  spin_lock_irqsave(&etrax_epid_lock, flags);
1707  *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, index);
1708  nop();
1709  *R_USB_EPT_DATA = data;
1710  spin_unlock_irqrestore(&etrax_epid_lock, flags);
1711}
1712
1713static inline void etrax_epid_clear_error(__u8 index) {
1714  unsigned long flags;
1715  spin_lock_irqsave(&etrax_epid_lock, flags);
1716  *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, index);
1717  nop();
1718  *R_USB_EPT_DATA &=
1719    ~(IO_MASK(R_USB_EPT_DATA, error_count_in) |
1720      IO_MASK(R_USB_EPT_DATA, error_count_out) |
1721      IO_MASK(R_USB_EPT_DATA, error_code));
1722  spin_unlock_irqrestore(&etrax_epid_lock, flags);
1723}
1724
1725static inline void etrax_epid_set_toggle(__u8 index, __u8 dirout,
1726                                             __u8 toggle) {
1727  unsigned long flags;
1728  spin_lock_irqsave(&etrax_epid_lock, flags);
1729  *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, index);
1730  nop();
1731  if(dirout) {
1732    *R_USB_EPT_DATA &= ~IO_MASK(R_USB_EPT_DATA, t_out);
1733    *R_USB_EPT_DATA |= IO_FIELD(R_USB_EPT_DATA, t_out, toggle);
1734  } else {
1735    *R_USB_EPT_DATA &= ~IO_MASK(R_USB_EPT_DATA, t_in);
1736    *R_USB_EPT_DATA |= IO_FIELD(R_USB_EPT_DATA, t_in, toggle);
1737  }
1738  spin_unlock_irqrestore(&etrax_epid_lock, flags);
1739}
1740
1741static inline __u8 etrax_epid_get_toggle(__u8 index, __u8 dirout) {
1742  unsigned long flags;
1743  __u8 toggle;
1744  spin_lock_irqsave(&etrax_epid_lock, flags);
1745  *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, index);
1746  nop();
1747  if (dirout) {
1748    toggle = IO_EXTRACT(R_USB_EPT_DATA, t_out, *R_USB_EPT_DATA);
1749  } else {
1750    toggle = IO_EXTRACT(R_USB_EPT_DATA, t_in, *R_USB_EPT_DATA);
1751  }
1752  spin_unlock_irqrestore(&etrax_epid_lock, flags);
1753  return toggle;
1754}
1755
1756
1757static inline __u32 etrax_epid_get(__u8 index) {
1758  unsigned long flags;
1759  __u32 data;
1760  spin_lock_irqsave(&etrax_epid_lock, flags);
1761  *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, index);
1762  nop();
1763  data = *R_USB_EPT_DATA;
1764  spin_unlock_irqrestore(&etrax_epid_lock, flags);
1765  return data;
1766}
1767
1768
1769
1770
1771/* Main functions for Transfer Controller */
1772/* -------------------------------------- */
1773
1774/* Init structs, memories and lists used by Transfer Controller */
1775int tc_init(struct usb_hcd *hcd) {
1776  int i;
1777  /* Clear software state info for all epids */
1778  memset(epid_state, 0, sizeof(struct etrax_epid) * NBR_OF_EPIDS);
1779
1780  /* Set Invalid and Dummy as being in use and disabled */
1781  epid_state[INVALID_EPID].inuse = 1;
1782  epid_state[DUMMY_EPID].inuse = 1;
1783  epid_state[INVALID_EPID].disabled = 1;
1784  epid_state[DUMMY_EPID].disabled = 1;
1785
1786  /* Clear counter for how many Isoc epids we have sat up */
1787  isoc_epid_counter = 0;
1788
1789  /* Initialize the urb list by initiating a head for each list.
1790     Also reset list hodling active URB for each epid */
1791  for (i = 0; i < NBR_OF_EPIDS; i++) {
1792    INIT_LIST_HEAD(&urb_list[i]);
1793    activeUrbList[i] = NULL;
1794  }
1795
1796  /* Init lock for URB lists */
1797  spin_lock_init(&urb_list_lock);
1798  /* Init lock for Etrax R_USB_EPT register */
1799  spin_lock_init(&etrax_epid_lock);
1800  /* Init lock for Etrax dma8 sub0 handling */
1801  spin_lock_init(&etrax_dma8_sub0_lock);
1802
1803  /* We use kmem_cache_* to make sure that all DMA desc. are dword aligned */
1804
1805  /* Note that we specify sizeof(struct USB_EP_Desc) as the size, but also
1806     allocate SB descriptors from this cache. This is ok since
1807     sizeof(struct USB_EP_Desc) == sizeof(struct USB_SB_Desc). */
1808  usb_desc_cache = kmem_cache_create("usb_desc_cache",
1809                     sizeof(struct USB_EP_Desc), 0,
1810                     SLAB_HWCACHE_ALIGN, 0);
1811  if(usb_desc_cache == NULL) {
1812    return -ENOMEM;
1813  }
1814
1815  /* Create slab cache for speedy allocation of memory for isoc bottom-half
1816     interrupt handling */
1817  isoc_compl_cache =
1818    kmem_cache_create("isoc_compl_cache",
1819              sizeof(struct crisv10_isoc_complete_data),
1820              0, SLAB_HWCACHE_ALIGN, 0);
1821  if(isoc_compl_cache == NULL) {
1822    return -ENOMEM;
1823  }
1824
1825  /* Create slab cache for speedy allocation of memory for later URB finish
1826     struct */
1827  later_data_cache =
1828    kmem_cache_create("later_data_cache",
1829              sizeof(struct urb_later_data),
1830              0, SLAB_HWCACHE_ALIGN, 0);
1831  if(later_data_cache == NULL) {
1832    return -ENOMEM;
1833  }
1834
1835
1836  /* Initiate the bulk start timer. */
1837  init_timer(&bulk_start_timer);
1838  bulk_start_timer.expires = jiffies + BULK_START_TIMER_INTERVAL;
1839  bulk_start_timer.function = tc_bulk_start_timer_func;
1840  add_timer(&bulk_start_timer);
1841
1842
1843  /* Initiate the bulk eot timer. */
1844  init_timer(&bulk_eot_timer);
1845  bulk_eot_timer.expires = jiffies + BULK_EOT_TIMER_INTERVAL;
1846  bulk_eot_timer.function = tc_bulk_eot_timer_func;
1847  bulk_eot_timer.data = (unsigned long)hcd;
1848  add_timer(&bulk_eot_timer);
1849
1850  return 0;
1851}
1852
1853/* Uninitialize all resources used by Transfer Controller */
1854void tc_destroy(void) {
1855
1856  /* Destroy all slab cache */
1857  kmem_cache_destroy(usb_desc_cache);
1858  kmem_cache_destroy(isoc_compl_cache);
1859  kmem_cache_destroy(later_data_cache);
1860
1861  /* Remove timers */
1862  del_timer(&bulk_start_timer);
1863  del_timer(&bulk_eot_timer);
1864}
1865
1866static void restart_dma8_sub0(void) {
1867  unsigned long flags;
1868  spin_lock_irqsave(&etrax_dma8_sub0_lock, flags);
1869  /* Verify that the dma is not running */
1870  if ((*R_DMA_CH8_SUB0_CMD & IO_MASK(R_DMA_CH8_SUB0_CMD, cmd)) == 0) {
1871    struct USB_EP_Desc *ep = (struct USB_EP_Desc *)phys_to_virt(*R_DMA_CH8_SUB0_EP);
1872    while (DUMMY_EPID == IO_EXTRACT(USB_EP_command, epid, ep->command)) {
1873      ep = (struct USB_EP_Desc *)phys_to_virt(ep->next);
1874    }
1875    /* Advance the DMA to the next EP descriptor that is not a DUMMY_EPID. */
1876    *R_DMA_CH8_SUB0_EP = virt_to_phys(ep);
1877    /* Restart the DMA */
1878    *R_DMA_CH8_SUB0_CMD = IO_STATE(R_DMA_CH8_SUB0_CMD, cmd, start);
1879  }
1880  spin_unlock_irqrestore(&etrax_dma8_sub0_lock, flags);
1881}
1882
1883/* queue an URB with the transfer controller (called from hcd_driver) */
1884static int tc_urb_enqueue(struct usb_hcd *hcd,
1885              struct urb *urb,
1886              gfp_t mem_flags) {
1887  int epid;
1888  int retval;
1889  int bustime = 0;
1890  int maxpacket;
1891  unsigned long flags;
1892  struct crisv10_urb_priv *urb_priv;
1893  struct crisv10_hcd* crisv10_hcd = hcd_to_crisv10_hcd(hcd);
1894  DBFENTER;
1895
1896  if(!(crisv10_hcd->running)) {
1897    /* The USB Controller is not running, probably because no device is
1898       attached. No idea to enqueue URBs then */
1899    tc_warn("Rejected enqueueing of URB:0x%x because no dev attached\n",
1900        (unsigned int)urb);
1901    return -ENOENT;
1902  }
1903
1904  maxpacket = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
1905  /* Special case check for In Isoc transfers. Specification states that each
1906     In Isoc transfer consists of one packet and therefore it should fit into
1907     the transfer-buffer of an URB.
1908     We do the check here to be sure (an invalid scenario can be produced with
1909     parameters to the usbtest suite) */
1910  if(usb_pipeisoc(urb->pipe) && usb_pipein(urb->pipe) &&
1911     (urb->transfer_buffer_length < maxpacket)) {
1912    tc_err("Submit In Isoc URB with buffer length:%d to pipe with maxpacketlen: %d\n", urb->transfer_buffer_length, maxpacket);
1913    return -EMSGSIZE;
1914  }
1915
1916  /* Check if there is a epid for URBs destination, if not this function
1917     set up one. */
1918  epid = tc_setup_epid(urb->ep, urb, mem_flags);
1919  if (epid < 0) {
1920    tc_err("Failed setup epid:%d for URB:0x%x\n", epid, (unsigned int)urb);
1921    DBFEXIT;
1922    return -ENOMEM;
1923  }
1924
1925  if(urb == activeUrbList[epid]) {
1926    tc_err("Resubmition of allready active URB:0x%x\n", (unsigned int)urb);
1927    return -ENXIO;
1928  }
1929
1930  if(urb_list_entry(urb, epid)) {
1931    tc_err("Resubmition of allready queued URB:0x%x\n", (unsigned int)urb);
1932    return -ENXIO;
1933  }
1934
1935  /* If we actively have flaged endpoint as disabled then refuse submition */
1936  if(epid_state[epid].disabled) {
1937    return -ENOENT;
1938  }
1939
1940  /* Allocate and init HC-private data for URB */
1941  if(urb_priv_create(hcd, urb, epid, mem_flags) != 0) {
1942    DBFEXIT;
1943    return -ENOMEM;
1944  }
1945  urb_priv = urb->hcpriv;
1946
1947  /* Check if there is enough bandwidth for periodic transfer */
1948  if(usb_pipeint(urb->pipe) || usb_pipeisoc(urb->pipe)) {
1949    /* only check (and later claim) if not already claimed */
1950    if (urb_priv->bandwidth == 0) {
1951      bustime = crisv10_usb_check_bandwidth(urb->dev, urb);
1952      if (bustime < 0) {
1953        tc_err("Not enough periodic bandwidth\n");
1954        urb_priv_free(hcd, urb);
1955        DBFEXIT;
1956        return -ENOSPC;
1957      }
1958    }
1959  }
1960
1961  tc_dbg("Enqueue URB:0x%x[%d] epid:%d (%s) bufflen:%d\n",
1962     (unsigned int)urb, urb_priv->urb_num, epid,
1963     pipe_to_str(urb->pipe), urb->transfer_buffer_length);
1964
1965  /* Create and link SBs required for this URB */
1966  retval = create_sb_for_urb(urb, mem_flags);
1967  if(retval != 0) {
1968    tc_err("Failed to create SBs for URB:0x%x[%d]\n", (unsigned int)urb,
1969       urb_priv->urb_num);
1970    urb_priv_free(hcd, urb);
1971    DBFEXIT;
1972    return retval;
1973  }
1974
1975  /* Init intr EP pool if this URB is a INTR transfer. This pool is later
1976     used when inserting EPs in the TxIntrEPList. We do the alloc here
1977     so we can't run out of memory later */
1978  if(usb_pipeint(urb->pipe)) {
1979    retval = init_intr_urb(urb, mem_flags);
1980    if(retval != 0) {
1981      tc_warn("Failed to init Intr URB\n");
1982      urb_priv_free(hcd, urb);
1983      DBFEXIT;
1984      return retval;
1985    }
1986  }
1987
1988  /* Disable other access when inserting USB */
1989  local_irq_save(flags);
1990
1991  /* Claim bandwidth, if needed */
1992  if(bustime) {
1993    crisv10_usb_claim_bandwidth(urb->dev,
1994                                urb,
1995                                bustime,
1996                                (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS));
1997  }
1998  
1999  /* Add URB to EP queue */
2000  urb_list_add(urb, epid, mem_flags);
2001
2002  if(usb_pipeisoc(urb->pipe)) {
2003    /* Special processing of Isoc URBs. */
2004    tc_dma_process_isoc_urb(urb);
2005  } else {
2006    /* Process EP queue for rest of the URB types (Bulk, Ctrl, Intr) */
2007    tc_dma_process_queue(epid);
2008  }
2009
2010  local_irq_restore(flags);
2011
2012  DBFEXIT;
2013  return 0;
2014}
2015
2016/* remove an URB from the transfer controller queues (called from hcd_driver)*/
2017static int tc_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) {
2018  struct crisv10_urb_priv *urb_priv;
2019  unsigned long flags;
2020  int epid;
2021
2022  DBFENTER;
2023  /* Disable interrupts here since a descriptor interrupt for the isoc epid
2024     will modify the sb list. This could possibly be done more granular, but
2025     urb_dequeue should not be used frequently anyway.
2026  */
2027  local_irq_save(flags);
2028
2029  urb->status = status;
2030  urb_priv = urb->hcpriv;
2031
2032  if (!urb_priv) {
2033    /* This happens if a device driver calls unlink on an urb that
2034       was never submitted (lazy driver) or if the urb was completed
2035       while dequeue was being called. */
2036    tc_warn("Dequeing of not enqueued URB:0x%x\n", (unsigned int)urb);
2037    local_irq_restore(flags);
2038    return 0;
2039  }
2040  epid = urb_priv->epid;
2041
2042  tc_warn("Dequeing %s URB:0x%x[%d] (%s %s epid:%d) status:%d %s\n",
2043      (urb == activeUrbList[epid]) ? "active" : "queued",
2044      (unsigned int)urb, urb_priv->urb_num, str_dir(urb->pipe),
2045      str_type(urb->pipe), epid, urb->status,
2046      (urb_priv->later_data) ? "later-sched" : "");
2047
2048  /* For Bulk, Ctrl and Intr are only one URB active at a time. So any URB
2049     that isn't active can be dequeued by just removing it from the queue */
2050  if(usb_pipebulk(urb->pipe) || usb_pipecontrol(urb->pipe) ||
2051     usb_pipeint(urb->pipe)) {
2052
2053    /* Check if URB haven't gone further than the queue */
2054    if(urb != activeUrbList[epid]) {
2055      ASSERT(urb_priv->later_data == NULL);
2056      tc_warn("Dequeing URB:0x%x[%d] (%s %s epid:%d) from queue"
2057          " (not active)\n", (unsigned int)urb, urb_priv->urb_num,
2058          str_dir(urb->pipe), str_type(urb->pipe), epid);
2059      
2060      /* Finish the URB with error status from USB core */
2061      tc_finish_urb(hcd, urb, urb->status);
2062      local_irq_restore(flags);
2063      return 0;
2064    }
2065  }
2066
2067  /* Set URB status to Unlink for handling when interrupt comes. */
2068  urb_priv->urb_state = UNLINK;
2069
2070  /* Differentiate dequeing of Bulk and Ctrl from Isoc and Intr */
2071  switch(usb_pipetype(urb->pipe)) {
2072  case PIPE_BULK:
2073    /* Check if EP still is enabled */
2074    if (TxBulkEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
2075      /* The EP was enabled, disable it. */
2076      TxBulkEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
2077    }
2078    /* Kicking dummy list out of the party. */
2079    TxBulkEPList[epid].next = virt_to_phys(&TxBulkEPList[(epid + 1) % NBR_OF_EPIDS]);
2080    break;
2081  case PIPE_CONTROL:
2082    /* Check if EP still is enabled */
2083    if (TxCtrlEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
2084      /* The EP was enabled, disable it. */
2085      TxCtrlEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
2086    }
2087    break;
2088  case PIPE_ISOCHRONOUS:
2089    /* Disabling, busy-wait and unlinking of Isoc SBs will be done in
2090       finish_isoc_urb(). Because there might the case when URB is dequeued
2091       but there are other valid URBs waiting */
2092
2093    /* Check if In Isoc EP still is enabled */
2094    if (TxIsocEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
2095      /* The EP was enabled, disable it. */
2096      TxIsocEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
2097    }
2098    break;
2099  case PIPE_INTERRUPT:
2100    /* Special care is taken for interrupt URBs. EPs are unlinked in
2101       tc_finish_urb */
2102    break;
2103  default:
2104    break;
2105  }
2106
2107  /* Asynchronous unlink, finish the URB later from scheduled or other
2108     event (data finished, error) */
2109  tc_finish_urb_later(hcd, urb, urb->status);
2110
2111  local_irq_restore(flags);
2112  DBFEXIT;
2113  return 0;
2114}
2115
2116
2117static void tc_sync_finish_epid(struct usb_hcd *hcd, int epid) {
2118  volatile int timeout = 10000;
2119  struct urb* urb;
2120  struct crisv10_urb_priv* urb_priv;
2121  unsigned long flags;
2122  
2123  volatile struct USB_EP_Desc *first_ep; /* First EP in the list. */
2124  volatile struct USB_EP_Desc *curr_ep; /* Current EP, the iterator. */
2125  volatile struct USB_EP_Desc *next_ep; /* The EP after current. */
2126
2127  int type = epid_state[epid].type;
2128
2129  /* Setting this flag will cause enqueue() to return -ENOENT for new
2130     submitions on this endpoint and finish_urb() wont process queue further */
2131  epid_state[epid].disabled = 1;
2132
2133  switch(type) {
2134  case PIPE_BULK:
2135    /* Check if EP still is enabled */
2136    if (TxBulkEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
2137      /* The EP was enabled, disable it. */
2138      TxBulkEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
2139      tc_warn("sync_finish: Disabling EP for epid:%d\n", epid);
2140
2141      /* Do busy-wait until DMA not using this EP descriptor anymore */
2142      while((*R_DMA_CH8_SUB0_EP ==
2143         virt_to_phys(&TxBulkEPList[epid])) &&
2144        (timeout-- > 0));
2145
2146    }
2147    break;
2148
2149  case PIPE_CONTROL:
2150    /* Check if EP still is enabled */
2151    if (TxCtrlEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
2152      /* The EP was enabled, disable it. */
2153      TxCtrlEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
2154      tc_warn("sync_finish: Disabling EP for epid:%d\n", epid);
2155
2156      /* Do busy-wait until DMA not using this EP descriptor anymore */
2157      while((*R_DMA_CH8_SUB1_EP ==
2158         virt_to_phys(&TxCtrlEPList[epid])) &&
2159        (timeout-- > 0));
2160    }
2161    break;
2162
2163  case PIPE_INTERRUPT:
2164    local_irq_save(flags);
2165    /* Disable all Intr EPs belonging to epid */
2166    first_ep = &TxIntrEPList[0];
2167    curr_ep = first_ep;
2168    do {
2169      next_ep = (struct USB_EP_Desc *)phys_to_virt(curr_ep->next);
2170      if (IO_EXTRACT(USB_EP_command, epid, next_ep->command) == epid) {
2171    /* Disable EP */
2172    next_ep->command &= ~IO_MASK(USB_EP_command, enable);
2173      }
2174      curr_ep = phys_to_virt(curr_ep->next);
2175    } while (curr_ep != first_ep);
2176
2177    local_irq_restore(flags);
2178    break;
2179
2180  case PIPE_ISOCHRONOUS:
2181    /* Check if EP still is enabled */
2182    if (TxIsocEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
2183      tc_warn("sync_finish: Disabling Isoc EP for epid:%d\n", epid);
2184      /* The EP was enabled, disable it. */
2185      TxIsocEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
2186      
2187      while((*R_DMA_CH8_SUB3_EP == virt_to_phys(&TxIsocEPList[epid])) &&
2188        (timeout-- > 0));
2189    }
2190    break;
2191  }
2192
2193  local_irq_save(flags);
2194
2195  /* Finish if there is active URB for this endpoint */
2196  if(activeUrbList[epid] != NULL) {
2197    urb = activeUrbList[epid];
2198    urb_priv = urb->hcpriv;
2199    ASSERT(urb_priv);
2200    tc_warn("Sync finish %s URB:0x%x[%d] (%s %s epid:%d) status:%d %s\n",
2201        (urb == activeUrbList[epid]) ? "active" : "queued",
2202        (unsigned int)urb, urb_priv->urb_num, str_dir(urb->pipe),
2203        str_type(urb->pipe), epid, urb->status,
2204        (urb_priv->later_data) ? "later-sched" : "");
2205
2206    tc_finish_urb(hcd, activeUrbList[epid], -ENOENT);
2207    ASSERT(activeUrbList[epid] == NULL);
2208  }
2209
2210  /* Finish any queued URBs for this endpoint. There won't be any resubmitions
2211     because epid_disabled causes enqueue() to fail for this endpoint */
2212  while((urb = urb_list_first(epid)) != NULL) {
2213    urb_priv = urb->hcpriv;
2214    ASSERT(urb_priv);
2215
2216    tc_warn("Sync finish %s URB:0x%x[%d] (%s %s epid:%d) status:%d %s\n",
2217        (urb == activeUrbList[epid]) ? "active" : "queued",
2218        (unsigned int)urb, urb_priv->urb_num, str_dir(urb->pipe),
2219        str_type(urb->pipe), epid, urb->status,
2220        (urb_priv->later_data) ? "later-sched" : "");
2221
2222    tc_finish_urb(hcd, urb, -ENOENT);
2223  }
2224  epid_state[epid].disabled = 0;
2225  local_irq_restore(flags);
2226}
2227
2228/* free resources associated with an endpoint (called from hcd_driver) */
2229static void tc_endpoint_disable(struct usb_hcd *hcd,
2230                struct usb_host_endpoint *ep) {
2231  DBFENTER;
2232  /* Only free epid if it has been allocated. We get two endpoint_disable
2233     requests for ctrl endpoints so ignore the second one */
2234  if(ep->hcpriv != NULL) {
2235    struct crisv10_ep_priv *ep_priv = ep->hcpriv;
2236    int epid = ep_priv->epid;
2237    tc_warn("endpoint_disable ep:0x%x ep-priv:0x%x (%s) (epid:%d freed)\n",
2238       (unsigned int)ep, (unsigned int)ep->hcpriv,
2239       endpoint_to_str(&(ep->desc)), epid);
2240
2241    tc_sync_finish_epid(hcd, epid);
2242
2243    ASSERT(activeUrbList[epid] == NULL);
2244    ASSERT(list_empty(&urb_list[epid]));
2245
2246    tc_free_epid(ep);
2247  } else {
2248    tc_dbg("endpoint_disable ep:0x%x ep-priv:0x%x (%s)\n", (unsigned int)ep,
2249       (unsigned int)ep->hcpriv, endpoint_to_str(&(ep->desc)));
2250  }
2251  DBFEXIT;
2252}
2253
2254static void tc_finish_urb_later_proc(struct work_struct* work) {
2255  unsigned long flags;
2256  struct urb_later_data* uld;
2257
2258  local_irq_save(flags);
2259  uld = container_of(work, struct urb_later_data, dws.work);
2260  if(uld->urb == NULL) {
2261    late_dbg("Later finish of URB = NULL (allready finished)\n");
2262  } else {
2263    struct crisv10_urb_priv* urb_priv = uld->urb->hcpriv;
2264    ASSERT(urb_priv);
2265    if(urb_priv->urb_num == uld->urb_num) {
2266      late_dbg("Later finish of URB:0x%x[%d]\n", (unsigned int)(uld->urb),
2267           urb_priv->urb_num);
2268      if(uld->status != uld->urb->status) {
2269    errno_dbg("Later-finish URB with status:%d, later-status:%d\n",
2270          uld->urb->status, uld->status);
2271      }
2272      if(uld != urb_priv->later_data) {
2273    panic("Scheduled uld not same as URBs uld\n");
2274      }
2275      tc_finish_urb(uld->hcd, uld->urb, uld->status);
2276    } else {
2277      late_warn("Ignoring later finish of URB:0x%x[%d]"
2278        ", urb_num doesn't match current URB:0x%x[%d]",
2279        (unsigned int)(uld->urb), uld->urb_num,
2280        (unsigned int)(uld->urb), urb_priv->urb_num);
2281    }
2282  }
2283  local_irq_restore(flags);
2284  kmem_cache_free(later_data_cache, uld);
2285}
2286
2287static void tc_finish_urb_later(struct usb_hcd *hcd, struct urb *urb,
2288                int status) {
2289  struct crisv10_urb_priv *urb_priv = urb->hcpriv;
2290  struct urb_later_data* uld;
2291
2292  ASSERT(urb_priv);
2293
2294  if(urb_priv->later_data != NULL) {
2295    /* Later-finish allready scheduled for this URB, just update status to
2296       return when finishing later */
2297    errno_dbg("Later-finish schedule change URB status:%d with new"
2298          " status:%d\n", urb_priv->later_data->status, status);
2299    
2300    urb_priv->later_data->status = status;
2301    return;
2302  }
2303
2304  uld = kmem_cache_alloc(later_data_cache, GFP_ATOMIC);
2305  ASSERT(uld);
2306
2307  uld->hcd = hcd;
2308  uld->urb = urb;
2309  uld->urb_num = urb_priv->urb_num;
2310  uld->status = status;
2311
2312  INIT_DELAYED_WORK(&uld->dws, tc_finish_urb_later_proc);
2313  urb_priv->later_data = uld;
2314
2315  /* Schedule the finishing of the URB to happen later */
2316  schedule_delayed_work(&uld->dws, LATER_TIMER_DELAY);
2317}
2318
2319static void tc_finish_isoc_urb(struct usb_hcd *hcd, struct urb *urb,
2320                   int status);
2321
2322static void tc_finish_urb(struct usb_hcd *hcd, struct urb *urb, int status) {
2323  struct crisv10_hcd* crisv10_hcd = hcd_to_crisv10_hcd(hcd);
2324  struct crisv10_urb_priv *urb_priv = urb->hcpriv;
2325  int epid;
2326  char toggle;
2327  int urb_num;
2328
2329  DBFENTER;
2330  ASSERT(urb_priv != NULL);
2331  epid = urb_priv->epid;
2332  urb_num = urb_priv->urb_num;
2333
2334  if(urb != activeUrbList[epid]) {
2335    if(urb_list_entry(urb, epid)) {
2336      /* Remove this URB from the list. Only happens when URB are finished
2337     before having been processed (dequeing) */
2338      urb_list_del(urb, epid);
2339    } else {
2340      tc_warn("Finishing of URB:0x%x[%d] neither active or in queue for"
2341          " epid:%d\n", (unsigned int)urb, urb_num, epid);
2342    }
2343  }
2344
2345  /* Cancel any pending later-finish of this URB */
2346  if(urb_priv->later_data) {
2347    urb_priv->later_data->urb = NULL;
2348  }
2349
2350  /* For an IN pipe, we always set the actual length, regardless of whether
2351     there was an error or not (which means the device driver can use the data
2352     if it wants to). */
2353  if(usb_pipein(urb->pipe)) {
2354    urb->actual_length = urb_priv->rx_offset;
2355  } else {
2356    /* Set actual_length for OUT urbs also; the USB mass storage driver seems
2357       to want that. */
2358    if (status == 0 && urb->status == -EINPROGRESS) {
2359      urb->actual_length = urb->transfer_buffer_length;
2360    } else {
2361      /* We wouldn't know of any partial writes if there was an error. */
2362      urb->actual_length = 0;
2363    }
2364  }
2365
2366
2367  /* URB status mangling */
2368  if(urb->status == -EINPROGRESS) {
2369    /* The USB core hasn't changed the status, let's set our finish status */
2370    urb->status = status;
2371
2372    if ((status == 0) && (urb->transfer_flags & URB_SHORT_NOT_OK) &&
2373    usb_pipein(urb->pipe) &&
2374    (urb->actual_length != urb->transfer_buffer_length)) {
2375      /* URB_SHORT_NOT_OK means that short reads (shorter than the endpoint's
2376     max length) is to be treated as an error. */
2377      errno_dbg("Finishing URB:0x%x[%d] with SHORT_NOT_OK flag and short"
2378        " data:%d\n", (unsigned int)urb, urb_num,
2379        urb->actual_length);
2380      urb->status = -EREMOTEIO;
2381    }
2382
2383    if(urb_priv->urb_state == UNLINK) {
2384      /* URB has been requested to be unlinked asynchronously */
2385      urb->status = -ECONNRESET;
2386      errno_dbg("Fixing unlink status of URB:0x%x[%d] to:%d\n",
2387        (unsigned int)urb, urb_num, urb->status);
2388    }
2389  } else {
2390    /* The USB Core wants to signal some error via the URB, pass it through */
2391  }
2392
2393  /* use completely different finish function for Isoc URBs */
2394  if(usb_pipeisoc(urb->pipe)) {
2395    tc_finish_isoc_urb(hcd, urb, status);
2396    return;
2397  }
2398
2399  /* Do special unlinking of EPs for Intr traffic */
2400  if(usb_pipeint(urb->pipe)) {
2401    tc_dma_unlink_intr_urb(urb);
2402  }
2403
2404  /* Release allocated bandwidth for periodic transfers */
2405  if(usb_pipeint(urb->pipe) || usb_pipeisoc(urb->pipe))
2406    crisv10_usb_release_bandwidth(hcd,
2407                                  usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS,
2408                                  urb_priv->bandwidth);
2409
2410  /* This URB is active on EP */
2411  if(urb == activeUrbList[epid]) {
2412    /* We need to fiddle with the toggle bits because the hardware doesn't do
2413       it for us. */
2414    toggle = etrax_epid_get_toggle(epid, usb_pipeout(urb->pipe));
2415    usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
2416          usb_pipeout(urb->pipe), toggle);
2417
2418    /* Checks for Ctrl and Bulk EPs */
2419    switch(usb_pipetype(urb->pipe)) {
2420    case PIPE_BULK:
2421      /* Check so Bulk EP realy is disabled before finishing active URB */
2422      ASSERT((TxBulkEPList[epid].command & IO_MASK(USB_EP_command, enable)) ==
2423         IO_STATE(USB_EP_command, enable, no));
2424      /* Disable sub-pointer for EP to avoid next tx_interrupt() to
2425     process Bulk EP. */
2426      TxBulkEPList[epid].sub = 0;
2427      /* No need to wait for the DMA before changing the next pointer.
2428     The modulo NBR_OF_EPIDS isn't actually necessary, since we will never use
2429     the last one (INVALID_EPID) for actual traffic. */
2430      TxBulkEPList[epid].next =
2431    virt_to_phys(&TxBulkEPList[(epid + 1) % NBR_OF_EPIDS]);
2432      break;
2433    case PIPE_CONTROL:
2434      /* Check so Ctrl EP realy is disabled before finishing active URB */
2435      ASSERT((TxCtrlEPList[epid].command & IO_MASK(USB_EP_command, enable)) ==
2436         IO_STATE(USB_EP_command, enable, no));
2437      /* Disable sub-pointer for EP to avoid next tx_interrupt() to
2438     process Ctrl EP. */
2439      TxCtrlEPList[epid].sub = 0;
2440      break;
2441    }
2442  }
2443
2444  /* Free HC-private URB data*/
2445  urb_priv_free(hcd, urb);
2446
2447  if(urb->status) {
2448    errno_dbg("finish_urb (URB:0x%x[%d] %s %s) (data:%d) status:%d\n",
2449          (unsigned int)urb, urb_num, str_dir(urb->pipe),
2450          str_type(urb->pipe), urb->actual_length, urb->status);
2451  } else {
2452    tc_dbg("finish_urb (URB:0x%x[%d] %s %s) (data:%d) status:%d\n",
2453       (unsigned int)urb, urb_num, str_dir(urb->pipe),
2454       str_type(urb->pipe), urb->actual_length, urb->status);
2455  }
2456
2457  /* If we just finished an active URB, clear active pointer. */
2458  if (urb == activeUrbList[epid]) {
2459    /* Make URB not active on EP anymore */
2460    activeUrbList[epid] = NULL;
2461
2462    if(urb->status == 0) {
2463      /* URB finished sucessfully, process queue to see if there are any more
2464     URBs waiting before we call completion function.*/
2465      if(crisv10_hcd->running) {
2466    /* Only process queue if USB controller is running */
2467    tc_dma_process_queue(epid);
2468      } else {
2469    tc_warn("No processing of queue for epid:%d, USB Controller not"
2470        " running\n", epid);
2471      }
2472    }
2473  }
2474
2475  /* Hand the URB from HCD to its USB device driver, using its completion
2476      functions */
2477  usb_hcd_giveback_urb (hcd, urb, status);
2478
2479  /* Check the queue once more if the URB returned with error, because we
2480     didn't do it before the completion function because the specification
2481     states that the queue should not restart until all it's unlinked
2482     URBs have been fully retired, with the completion functions run */
2483  if(crisv10_hcd->running) {
2484    /* Only process queue if USB controller is running */
2485    tc_dma_process_queue(epid);
2486  } else {
2487    tc_warn("No processing of queue for epid:%d, USB Controller not running\n",
2488        epid);
2489  }
2490
2491  DBFEXIT;
2492}
2493
2494static void tc_finish_isoc_urb(struct usb_hcd *hcd, struct urb *urb,
2495                   int status) {
2496  struct crisv10_urb_priv *urb_priv = urb->hcpriv;
2497  int epid, i;
2498  volatile int timeout = 10000;
2499  int bandwidth = 0;
2500
2501  ASSERT(urb_priv);
2502  epid = urb_priv->epid;
2503
2504  ASSERT(usb_pipeisoc(urb->pipe));
2505
2506  /* Set that all isoc packets have status and length set before
2507     completing the urb. */
2508  for (i = urb_priv->isoc_packet_counter; i < urb->number_of_packets; i++){
2509    urb->iso_frame_desc[i].actual_length = 0;
2510    urb->iso_frame_desc[i].status = -EPROTO;
2511  }
2512
2513  /* Check if the URB is currently active (done or error) */
2514  if(urb == activeUrbList[epid]) {
2515    /* Check if there are another In Isoc URB queued for this epid */
2516    if (!list_empty(&urb_list[epid])&& !epid_state[epid].disabled) {
2517      /* Move it from queue to active and mark it started so Isoc transfers
2518     won't be interrupted.
2519     All Isoc URBs data transfers are already added to DMA lists so we
2520     don't have to insert anything in DMA lists here. */
2521      activeUrbList[epid] = urb_list_first(epid);
2522      ((struct crisv10_urb_priv *)(activeUrbList[epid]->hcpriv))->urb_state =
2523    STARTED;
2524      urb_list_del(activeUrbList[epid], epid);
2525
2526      if(urb->status) {
2527    errno_dbg("finish_isoc_urb (URB:0x%x[%d] %s %s) (%d of %d packets)"
2528          " status:%d, new waiting URB:0x%x[%d]\n",
2529          (unsigned int)urb, urb_priv->urb_num, str_dir(urb->pipe),
2530          str_type(urb->pipe), urb_priv->isoc_packet_counter,
2531          urb->number_of_packets, urb->status,
2532          (unsigned int)activeUrbList[epid],
2533          ((struct crisv10_urb_priv *)(activeUrbList[epid]->hcpriv))->urb_num);
2534      }
2535
2536    } else { /* No other URB queued for this epid */
2537      if(urb->status) {
2538    errno_dbg("finish_isoc_urb (URB:0x%x[%d] %s %s) (%d of %d packets)"
2539          " status:%d, no new URB waiting\n",
2540          (unsigned int)urb, urb_priv->urb_num, str_dir(urb->pipe),
2541          str_type(urb->pipe), urb_priv->isoc_packet_counter,
2542          urb->number_of_packets, urb->status);
2543      }
2544
2545      /* Check if EP is still enabled, then shut it down. */
2546      if (TxIsocEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
2547    isoc_dbg("Isoc EP enabled for epid:%d, disabling it\n", epid);
2548
2549    /* Should only occur for In Isoc EPs where SB isn't consumed. */
2550    ASSERT(usb_pipein(urb->pipe));
2551
2552    /* Disable it and wait for it to stop */
2553    TxIsocEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
2554    
2555    /* Ah, the luxury of busy-wait. */
2556    while((*R_DMA_CH8_SUB3_EP == virt_to_phys(&TxIsocEPList[epid])) &&
2557          (timeout-- > 0));
2558      }
2559
2560      /* Unlink SB to say that epid is finished. */
2561      TxIsocEPList[epid].sub = 0;
2562      TxIsocEPList[epid].hw_len = 0;
2563
2564      /* No URB active for EP anymore */
2565      activeUrbList[epid] = NULL;
2566    }
2567  } else { /* Finishing of not active URB (queued up with SBs thought) */
2568    isoc_warn("finish_isoc_urb (URB:0x%x %s) (%d of %d packets) status:%d,"
2569          " SB queued but not active\n",
2570          (unsigned int)urb, str_dir(urb->pipe),
2571          urb_priv->isoc_packet_counter, urb->number_of_packets,
2572          urb->status);
2573    if(usb_pipeout(urb->pipe)) {
2574      /* Finishing of not yet active Out Isoc URB needs unlinking of SBs. */
2575      struct USB_SB_Desc *iter_sb, *prev_sb, *next_sb;
2576
2577      iter_sb = TxIsocEPList[epid].sub ?
2578    phys_to_virt(TxIsocEPList[epid].sub) : 0;
2579      prev_sb = 0;
2580
2581      /* SB that is linked before this URBs first SB */
2582      while (iter_sb && (iter_sb != urb_priv->first_sb)) {
2583    prev_sb = iter_sb;
2584    iter_sb = iter_sb->next ? phys_to_virt(iter_sb->next) : 0;
2585      }
2586
2587      if (iter_sb == 0) {
2588    /* Unlink of the URB currently being transmitted. */
2589    prev_sb = 0;
2590    iter_sb = TxIsocEPList[epid].sub ? phys_to_virt(TxIsocEPList[epid].sub) : 0;
2591      }
2592
2593      while (iter_sb && (iter_sb != urb_priv->last_sb)) {
2594    iter_sb = iter_sb->next ? phys_to_virt(iter_sb->next) : 0;
2595      }
2596
2597      if (iter_sb) {
2598    next_sb = iter_sb->next ? phys_to_virt(iter_sb->next) : 0;
2599      } else {
2600    /* This should only happen if the DMA has completed
2601       processing the SB list for this EP while interrupts
2602       are disabled. */
2603    isoc_dbg("Isoc urb not found, already sent?\n");
2604    next_sb = 0;
2605      }
2606      if (prev_sb) {
2607    prev_sb->next = next_sb ? virt_to_phys(next_sb) : 0;
2608      } else {
2609    TxIsocEPList[epid].sub = next_sb ? virt_to_phys(next_sb) : 0;
2610      }
2611    }
2612  }
2613
2614  /* Free HC-private URB data*/
2615  bandwidth = urb_priv->bandwidth;
2616  urb_priv_free(hcd, urb);
2617
2618  crisv10_usb_release_bandwidth(hcd, usb_pipeisoc(urb->pipe), bandwidth);
2619
2620  /* Hand the URB from HCD to its USB device driver, using its completion
2621      functions */
2622  usb_hcd_giveback_urb (hcd, urb, status);
2623}
2624
2625static __u32 urb_num = 0;
2626
2627/* allocate and initialize URB private data */
2628static int urb_priv_create(struct usb_hcd *hcd, struct urb *urb, int epid,
2629               int mem_flags) {
2630  struct crisv10_urb_priv *urb_priv;
2631  
2632  urb_priv = kmalloc(sizeof *urb_priv, mem_flags);
2633  if (!urb_priv)
2634    return -ENOMEM;
2635  memset(urb_priv, 0, sizeof *urb_priv);
2636
2637  urb_priv->epid = epid;
2638  urb_priv->urb_state = NOT_STARTED;
2639
2640  urb->hcpriv = urb_priv;
2641  /* Assign URB a sequence number, and increment counter */
2642  urb_priv->urb_num = urb_num;
2643  urb_num++;
2644  urb_priv->bandwidth = 0;
2645  return 0;
2646}
2647
2648/* free URB private data */
2649static void urb_priv_free(struct usb_hcd *hcd, struct urb *urb) {
2650  int i;
2651  struct crisv10_urb_priv *urb_priv = urb->hcpriv;
2652  ASSERT(urb_priv != 0);
2653
2654  /* Check it has any SBs linked that needs to be freed*/
2655  if(urb_priv->first_sb != NULL) {
2656    struct USB_SB_Desc *next_sb, *first_sb, *last_sb;
2657    int i = 0;
2658    first_sb = urb_priv->first_sb;
2659    last_sb = urb_priv->last_sb;
2660    ASSERT(last_sb);
2661    while(first_sb != last_sb) {
2662      next_sb = (struct USB_SB_Desc *)phys_to_virt(first_sb->next);
2663      kmem_cache_free(usb_desc_cache, first_sb);
2664      first_sb = next_sb;
2665      i++;
2666    }
2667    kmem_cache_free(usb_desc_cache, last_sb);
2668    i++;
2669  }
2670
2671  /* Check if it has any EPs in its Intr pool that also needs to be freed */
2672  if(urb_priv->intr_ep_pool_length > 0) {
2673    for(i = 0; i < urb_priv->intr_ep_pool_length; i++) {
2674      kfree(urb_priv->intr_ep_pool[i]);
2675    }
2676    /*
2677    tc_dbg("Freed %d EPs from URB:0x%x EP pool\n",
2678         urb_priv->intr_ep_pool_length, (unsigned int)urb);
2679    */
2680  }
2681
2682  kfree(urb_priv);
2683  urb->hcpriv = NULL;
2684}
2685
2686static int ep_priv_create(struct usb_host_endpoint *ep, int mem_flags) {
2687  struct crisv10_ep_priv *ep_priv;
2688  
2689  ep_priv = kmalloc(sizeof *ep_priv, mem_flags);
2690  if (!ep_priv)
2691    return -ENOMEM;
2692  memset(ep_priv, 0, sizeof *ep_priv);
2693
2694  ep->hcpriv = ep_priv;
2695  return 0;
2696}
2697
2698static void ep_priv_free(struct usb_host_endpoint *ep) {
2699  struct crisv10_ep_priv *ep_priv = ep->hcpriv;
2700  ASSERT(ep_priv);
2701  kfree(ep_priv);
2702  ep->hcpriv = NULL;
2703}
2704
2705/*
2706 * usb_check_bandwidth():
2707 *
2708 * old_alloc is from host_controller->bandwidth_allocated in microseconds;
2709 * bustime is from calc_bus_time(), but converted to microseconds.
2710 *
2711 * returns <bustime in us> if successful,
2712 * or -ENOSPC if bandwidth request fails.
2713 *
2714 * FIXME:
2715 * This initial implementation does not use Endpoint.bInterval
2716 * in managing bandwidth allocation.
2717 * It probably needs to be expanded to use Endpoint.bInterval.
2718 * This can be done as a later enhancement (correction).
2719 *
2720 * This will also probably require some kind of
2721 * frame allocation tracking...meaning, for example,
2722 * that if multiple drivers request interrupts every 10 USB frames,
2723 * they don't all have to be allocated at
2724 * frame numbers N, N+10, N+20, etc. Some of them could be at
2725 * N+11, N+21, N+31, etc., and others at
2726 * N+12, N+22, N+32, etc.
2727 *
2728 * Similarly for isochronous transfers...
2729 *
2730 * Individual HCDs can schedule more directly ... this logic
2731 * is not correct for high speed transfers.
2732 */
2733static int crisv10_usb_check_bandwidth(
2734  struct usb_device *dev,
2735  struct urb *urb)
2736{
2737  unsigned int pipe = urb->pipe;
2738  long bustime;
2739  int is_in = usb_pipein (pipe);
2740  int is_iso = usb_pipeisoc (pipe);
2741  int old_alloc = dev->bus->bandwidth_allocated;
2742  int new_alloc;
2743
2744  bustime = NS_TO_US (usb_calc_bus_time (dev->speed, is_in, is_iso,
2745                                         usb_maxpacket (dev, pipe, !is_in)));
2746  if (is_iso)
2747    bustime /= urb->number_of_packets;
2748
2749  new_alloc = old_alloc + (int) bustime;
2750  if (new_alloc > FRAME_TIME_MAX_USECS_ALLOC) {
2751    dev_dbg (&dev->dev, "usb_check_bandwidth FAILED: %d + %ld = %d usec\n",
2752             old_alloc, bustime, new_alloc);
2753    bustime = -ENOSPC; /* report error */
2754  }
2755
2756  return bustime;
2757}
2758
2759/**
2760 * usb_claim_bandwidth - records bandwidth for a periodic transfer
2761 * @dev: source/target of request
2762 * @urb: request (urb->dev == dev)
2763 * @bustime: bandwidth consumed, in (average) microseconds per frame
2764 * @isoc: true iff the request is isochronous
2765 *
2766 * HCDs are expected not to overcommit periodic bandwidth, and to record such
2767 * reservations whenever endpoints are added to the periodic schedule.
2768 *
2769 * FIXME averaging per-frame is suboptimal. Better to sum over the HCD's
2770 * entire periodic schedule ... 32 frames for OHCI, 1024 for UHCI, settable
2771 * for EHCI (256/512/1024 frames, default 1024) and have the bus expose how
2772 * large its periodic schedule is.
2773 */
2774static void crisv10_usb_claim_bandwidth(
2775  struct usb_device *dev,
2776  struct urb *urb, int bustime, int isoc)
2777{
2778  dev->bus->bandwidth_allocated += bustime;
2779  if (isoc)
2780    dev->bus->bandwidth_isoc_reqs++;
2781  else
2782    dev->bus->bandwidth_int_reqs++;
2783  struct crisv10_urb_priv *urb_priv;
2784  urb_priv = urb->hcpriv;
2785  urb_priv->bandwidth = bustime;
2786}
2787
2788/**
2789 * usb_release_bandwidth - reverses effect of usb_claim_bandwidth()
2790 * @hcd: host controller
2791 * @isoc: true iff the request is isochronous
2792 * @bandwidth: bandwidth returned
2793 *
2794 * This records that previously allocated bandwidth has been released.
2795 * Bandwidth is released when endpoints are removed from the host controller's
2796 * periodic schedule.
2797 */
2798static void crisv10_usb_release_bandwidth(
2799  struct usb_hcd *hcd,
2800  int isoc,
2801  int bandwidth)
2802{
2803  hcd_to_bus(hcd)->bandwidth_allocated -= bandwidth;
2804  if (isoc)
2805    hcd_to_bus(hcd)->bandwidth_isoc_reqs--;
2806  else
2807    hcd_to_bus(hcd)->bandwidth_int_reqs--;
2808}
2809
2810
2811/* EPID handling functions, managing EP-list in Etrax through wrappers */
2812/* ------------------------------------------------------------------- */
2813
2814/* Sets up a new EPID for an endpoint or returns existing if found */
2815static int tc_setup_epid(struct usb_host_endpoint *ep, struct urb *urb,
2816             int mem_flags) {
2817  int epid;
2818  char devnum, endpoint, out_traffic, slow;
2819  int maxlen;
2820  __u32 epid_data;
2821  struct crisv10_ep_priv *ep_priv = ep->hcpriv;
2822  
2823  DBFENTER;
2824  
2825  /* Check if a valid epid already is setup for this endpoint */
2826  if(ep_priv != NULL) {
2827    return ep_priv->epid;
2828  }
2829
2830  /* We must find and initiate a new epid for this urb. */
2831  epid = tc_allocate_epid();
2832  
2833  if (epid == -1) {
2834    /* Failed to allocate a new epid. */
2835    DBFEXIT;
2836    return epid;
2837  }
2838  
2839  /* We now have a new epid to use. Claim it. */
2840  epid_state[epid].inuse = 1;
2841  
2842  /* Init private data for new endpoint */
2843  if(ep_priv_create(ep, mem_flags) != 0) {
2844    return -ENOMEM;
2845  }
2846  ep_priv = ep->hcpriv;
2847  ep_priv->epid = epid;
2848
2849  devnum = usb_pipedevice(urb->pipe);
2850  endpoint = usb_pipeendpoint(urb->pipe);
2851  slow = (urb->dev->speed == USB_SPEED_LOW);
2852  maxlen = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
2853
2854  if (usb_pipetype(urb->pipe) == PIPE_CONTROL) {
2855    /* We want both IN and OUT control traffic to be put on the same
2856       EP/SB list. */
2857    out_traffic = 1;
2858  } else {
2859    out_traffic = usb_pipeout(urb->pipe);
2860  }
2861    
2862  if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
2863    epid_data = IO_STATE(R_USB_EPT_DATA_ISO, valid, yes) |
2864      /* FIXME: Change any to the actual port? */
2865      IO_STATE(R_USB_EPT_DATA_ISO, port, any) |
2866      IO_FIELD(R_USB_EPT_DATA_ISO, max_len, maxlen) |
2867      IO_FIELD(R_USB_EPT_DATA_ISO, ep, endpoint) |
2868      IO_FIELD(R_USB_EPT_DATA_ISO, dev, devnum);
2869    etrax_epid_iso_set(epid, epid_data);
2870  } else {
2871    epid_data = IO_STATE(R_USB_EPT_DATA, valid, yes) |
2872      IO_FIELD(R_USB_EPT_DATA, low_speed, slow) |
2873      /* FIXME: Change any to the actual port? */
2874      IO_STATE(R_USB_EPT_DATA, port, any) |
2875      IO_FIELD(R_USB_EPT_DATA, max_len, maxlen) |
2876      IO_FIELD(R_USB_EPT_DATA, ep, endpoint) |
2877      IO_FIELD(R_USB_EPT_DATA, dev, devnum);
2878    etrax_epid_set(epid, epid_data);
2879  }
2880  
2881  epid_state[epid].out_traffic = out_traffic;
2882  epid_state[epid].type = usb_pipetype(urb->pipe);
2883
2884  tc_warn("Setting up ep:0x%x epid:%d (addr:%d endp:%d max_len:%d %s %s %s)\n",
2885      (unsigned int)ep, epid, devnum, endpoint, maxlen,
2886      str_type(urb->pipe), out_traffic ? "out" : "in",
2887      slow ? "low" : "full");
2888
2889  /* Enable Isoc eof interrupt if we set up the first Isoc epid */
2890  if(usb_pipeisoc(urb->pipe)) {
2891    isoc_epid_counter++;
2892    if(isoc_epid_counter == 1) {
2893      isoc_warn("Enabled Isoc eof interrupt\n");
2894      *R_USB_IRQ_MASK_SET = IO_STATE(R_USB_IRQ_MASK_SET, iso_eof, set);
2895    }
2896  }
2897
2898  DBFEXIT;
2899  return epid;
2900}
2901
2902static void tc_free_epid(struct usb_host_endpoint *ep) {
2903  unsigned long flags;
2904  struct crisv10_ep_priv *ep_priv = ep->hcpriv;
2905  int epid;
2906  volatile int timeout = 10000;
2907
2908  DBFENTER;
2909
2910  if (ep_priv == NULL) {
2911    tc_warn("Trying to free unused epid on ep:0x%x\n", (unsigned int)ep);
2912    DBFEXIT;
2913    return;
2914  }
2915
2916  epid = ep_priv->epid;
2917
2918  /* Disable Isoc eof interrupt if we free the last Isoc epid */
2919  if(epid_isoc(epid)) {
2920    ASSERT(isoc_epid_counter > 0);
2921    isoc_epid_counter--;
2922    if(isoc_epid_counter == 0) {
2923      *R_USB_IRQ_MASK_CLR = IO_STATE(R_USB_IRQ_MASK_CLR, iso_eof, clr);
2924      isoc_warn("Disabled Isoc eof interrupt\n");
2925    }
2926  }
2927
2928  /* Take lock manualy instead of in epid_x_x wrappers,
2929     because we need to be polling here */
2930  spin_lock_irqsave(&etrax_epid_lock, flags);
2931  
2932  *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, epid);
2933  nop();
2934  while((*R_USB_EPT_DATA & IO_MASK(R_USB_EPT_DATA, hold)) &&
2935    (timeout-- > 0));
2936  /* This will, among other things, set the valid field to 0. */
2937  *R_USB_EPT_DATA = 0;
2938  spin_unlock_irqrestore(&etrax_epid_lock, flags);
2939  
2940  /* Free resource in software state info list */
2941  epid_state[epid].inuse = 0;
2942
2943  /* Free private endpoint data */
2944  ep_priv_free(ep);
2945  
2946  DBFEXIT;
2947}
2948
2949static int tc_allocate_epid(void) {
2950  int i;
2951  DBFENTER;
2952  for (i = 0; i < NBR_OF_EPIDS; i++) {
2953    if (!epid_inuse(i)) {
2954      DBFEXIT;
2955      return i;
2956    }
2957  }
2958  
2959  tc_warn("Found no free epids\n");
2960  DBFEXIT;
2961  return -1;
2962}
2963
2964
2965/* Wrappers around the list functions (include/linux/list.h). */
2966/* ---------------------------------------------------------- */
2967static inline int __urb_list_empty(int epid) {
2968  int retval;
2969  retval = list_empty(&urb_list[epid]);
2970  return retval;
2971}
2972
2973/* Returns first urb for this epid, or NULL if list is empty. */
2974static inline struct urb *urb_list_first(int epid) {
2975  unsigned long flags;
2976  struct urb *first_urb = 0;
2977  spin_lock_irqsave(&urb_list_lock, flags);
2978  if (!__urb_list_empty(epid)) {
2979    /* Get the first urb (i.e. head->next). */
2980    urb_entry_t *urb_entry = list_entry((&urb_list[epid])->next, urb_entry_t, list);
2981    first_urb = urb_entry->urb;
2982  }
2983  spin_unlock_irqrestore(&urb_list_lock, flags);
2984  return first_urb;
2985}
2986
2987/* Adds an urb_entry last in the list for this epid. */
2988static inline void urb_list_add(struct urb *urb, int epid, int mem_flags) {
2989  unsigned long flags;
2990  urb_entry_t *urb_entry = (urb_entry_t *)kmalloc(sizeof(urb_entry_t), mem_flags);
2991  ASSERT(urb_entry);
2992  
2993  urb_entry->urb = urb;
2994  spin_lock_irqsave(&urb_list_lock, flags);
2995  list_add_tail(&urb_entry->list, &urb_list[epid]);
2996  spin_unlock_irqrestore(&urb_list_lock, flags);
2997}
2998
2999/* Search through the list for an element that contains this urb. (The list
3000   is expected to be short and the one we are about to delete will often be
3001   the first in the list.)
3002   Should be protected by spin_locks in calling function */
3003static inline urb_entry_t *__urb_list_entry(struct urb *urb, int epid) {
3004  struct list_head *entry;
3005  struct list_head *tmp;
3006  urb_entry_t *urb_entry;
3007  
3008  list_for_each_safe(entry, tmp, &urb_list[epid]) {
3009    urb_entry = list_entry(entry, urb_entry_t, list);
3010    ASSERT(urb_entry);
3011    ASSERT(urb_entry->urb);
3012    
3013    if (urb_entry->urb == urb) {
3014      return urb_entry;
3015    }
3016  }
3017  return 0;
3018}
3019
3020/* Same function as above but for global use. Protects list by spinlock */
3021static inline urb_entry_t *urb_list_entry(struct urb *urb, int epid) {
3022  unsigned long flags;
3023  urb_entry_t *urb_entry;
3024  spin_lock_irqsave(&urb_list_lock, flags);
3025  urb_entry = __urb_list_entry(urb, epid);
3026  spin_unlock_irqrestore(&urb_list_lock, flags);
3027  return (urb_entry);
3028}
3029
3030/* Delete an urb from the list. */
3031static inline void urb_list_del(struct urb *urb, int epid) {
3032  unsigned long flags;
3033  urb_entry_t *urb_entry;
3034
3035  /* Delete entry and free. */
3036  spin_lock_irqsave(&urb_list_lock, flags);
3037  urb_entry = __urb_list_entry(urb, epid);
3038  ASSERT(urb_entry);
3039
3040  list_del(&urb_entry->list);
3041  spin_unlock_irqrestore(&urb_list_lock, flags);
3042  kfree(urb_entry);
3043}
3044
3045/* Move an urb to the end of the list. */
3046static inline void urb_list_move_last(struct urb *urb, int epid) {
3047  unsigned long flags;
3048  urb_entry_t *urb_entry;
3049  
3050  spin_lock_irqsave(&urb_list_lock, flags);
3051  urb_entry = __urb_list_entry(urb, epid);
3052  ASSERT(urb_entry);
3053
3054  list_del(&urb_entry->list);
3055  list_add_tail(&urb_entry->list, &urb_list[epid]);
3056  spin_unlock_irqrestore(&urb_list_lock, flags);
3057}
3058
3059/* Get the next urb in the list. */
3060static inline struct urb *urb_list_next(struct urb *urb, int epid) {
3061  unsigned long flags;
3062  urb_entry_t *urb_entry;
3063
3064  spin_lock_irqsave(&urb_list_lock, flags);
3065  urb_entry = __urb_list_entry(urb, epid);
3066  ASSERT(urb_entry);
3067
3068  if (urb_entry->list.next != &urb_list[epid]) {
3069    struct list_head *elem = urb_entry->list.next;
3070    urb_entry = list_entry(elem, urb_entry_t, list);
3071    spin_unlock_irqrestore(&urb_list_lock, flags);
3072    return urb_entry->urb;
3073  } else {
3074    spin_unlock_irqrestore(&urb_list_lock, flags);
3075    return NULL;
3076  }
3077}
3078
3079struct USB_EP_Desc* create_ep(int epid, struct USB_SB_Desc* sb_desc,
3080                  int mem_flags) {
3081  struct USB_EP_Desc *ep_desc;
3082  ep_desc = (struct USB_EP_Desc *) kmem_cache_alloc(usb_desc_cache, mem_flags);
3083  if(ep_desc == NULL)
3084    return NULL;
3085  memset(ep_desc, 0, sizeof(struct USB_EP_Desc));
3086
3087  ep_desc->hw_len = 0;
3088  ep_desc->command = (IO_FIELD(USB_EP_command, epid, epid) |
3089              IO_STATE(USB_EP_command, enable, yes));
3090  if(sb_desc == NULL) {
3091    ep_desc->sub = 0;
3092  } else {
3093    ep_desc->sub = virt_to_phys(sb_desc);
3094  }
3095  return ep_desc;
3096}
3097
3098#define TT_ZOUT 0
3099#define TT_IN 1
3100#define TT_OUT 2
3101#define TT_SETUP 3
3102
3103#define CMD_EOL IO_STATE(USB_SB_command, eol, yes)
3104#define CMD_INTR IO_STATE(USB_SB_command, intr, yes)
3105#define CMD_FULL IO_STATE(USB_SB_command, full, yes)
3106
3107/* Allocation and setup of a generic SB. Used to create SETUP, OUT and ZOUT
3108   SBs. Also used by create_sb_in() to avoid same allocation procedure at two
3109   places */
3110struct USB_SB_Desc* create_sb(struct USB_SB_Desc* sb_prev, int tt, void* data,
3111                  int datalen, int mem_flags) {
3112  struct USB_SB_Desc *sb_desc;
3113  sb_desc = (struct USB_SB_Desc*)kmem_cache_alloc(usb_desc_cache, mem_flags);
3114  if(sb_desc == NULL)
3115    return NULL;
3116  memset(sb_desc, 0, sizeof(struct USB_SB_Desc));
3117
3118  sb_desc->command = IO_FIELD(USB_SB_command, tt, tt) |
3119                     IO_STATE(USB_SB_command, eot, yes);
3120
3121  sb_desc->sw_len = datalen;
3122  if(data != NULL) {
3123    sb_desc->buf = virt_to_phys(data);
3124  } else {
3125    sb_desc->buf = 0;
3126  }
3127  if(sb_prev != NULL) {
3128    sb_prev->next = virt_to_phys(sb_desc);
3129  }
3130  return sb_desc;
3131}
3132
3133/* Creates a copy of an existing SB by allocation space for it and copy
3134   settings */
3135struct USB_SB_Desc* create_sb_copy(struct USB_SB_Desc* sb_orig, int mem_flags) {
3136  struct USB_SB_Desc *sb_desc;
3137  sb_desc = (struct USB_SB_Desc*)kmem_cache_alloc(usb_desc_cache, mem_flags);
3138  if(sb_desc == NULL)
3139    return NULL;
3140
3141  memcpy(sb_desc, sb_orig, sizeof(struct USB_SB_Desc));
3142  return sb_desc;
3143}
3144
3145/* A specific create_sb function for creation of in SBs. This is due to
3146   that datalen in In SBs shows how many packets we are expecting. It also
3147   sets up the rem field to show if how many bytes we expect in last packet
3148   if it's not a full one */
3149struct USB_SB_Desc* create_sb_in(struct USB_SB_Desc* sb_prev, int datalen,
3150                 int maxlen, int mem_flags) {
3151  struct USB_SB_Desc *sb_desc;
3152  sb_desc = create_sb(sb_prev, TT_IN, NULL,
3153              datalen ? (datalen - 1) / maxlen + 1 : 0, mem_flags);
3154  if(sb_desc == NULL)
3155    return NULL;
3156  sb_desc->command |= IO_FIELD(USB_SB_command, rem, datalen % maxlen);
3157  return sb_desc;
3158}
3159
3160void set_sb_cmds(struct USB_SB_Desc *sb_desc, __u16 flags) {
3161  sb_desc->command |= flags;
3162}
3163
3164int create_sb_for_urb(struct urb *urb, int mem_flags) {
3165  int is_out = !usb_pipein(urb->pipe);
3166  int type = usb_pipetype(urb->pipe);
3167  int maxlen = usb_maxpacket(urb->dev, urb->pipe, is_out);
3168  int buf_len = urb->transfer_buffer_length;
3169  void *buf = buf_len > 0 ? urb->transfer_buffer : NULL;
3170  struct USB_SB_Desc *sb_desc = NULL;
3171
3172  struct crisv10_urb_priv *urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
3173  ASSERT(urb_priv != NULL);
3174
3175  switch(type) {
3176  case PIPE_CONTROL:
3177    /* Setup stage */
3178    sb_desc = create_sb(NULL, TT_SETUP, urb->setup_packet, 8, mem_flags);
3179    if(sb_desc == NULL)
3180      return -ENOMEM;
3181    set_sb_cmds(sb_desc, CMD_FULL);
3182
3183    /* Attach first SB to URB */
3184    urb_priv->first_sb = sb_desc;
3185
3186    if (is_out) { /* Out Control URB */
3187      /* If this Control OUT transfer has an optional data stage we add
3188     an OUT token before the mandatory IN (status) token */
3189      if ((buf_len > 0) && buf) {
3190    sb_desc = create_sb(sb_desc, TT_OUT, buf, buf_len, mem_flags);
3191    if(sb_desc == NULL)
3192      return -ENOMEM;
3193    set_sb_cmds(sb_desc, CMD_FULL);
3194      }
3195
3196      /* Status stage */
3197      /* The data length has to be exactly 1. This is due to a requirement
3198         of the USB specification that a host must be prepared to receive
3199         data in the status phase */
3200      sb_desc = create_sb(sb_desc, TT_IN, NULL, 1, mem_flags);
3201      if(sb_desc == NULL)
3202    return -ENOMEM;
3203    } else { /* In control URB */
3204      /* Data stage */
3205      sb_desc = create_sb_in(sb_desc, buf_len, maxlen, mem_flags);
3206      if(sb_desc == NULL)
3207    return -ENOMEM;
3208
3209      /* Status stage */
3210      /* Read comment at zout_buffer declaration for an explanation to this. */
3211      sb_desc = create_sb(sb_desc, TT_ZOUT, &zout_buffer[0], 1, mem_flags);
3212      if(sb_desc == NULL)
3213    return -ENOMEM;
3214      /* Set descriptor interrupt flag for in URBs so we can finish URB after
3215         zout-packet has been sent */
3216      set_sb_cmds(sb_desc, CMD_INTR | CMD_FULL);
3217    }
3218    /* Set end-of-list flag in last SB */
3219    set_sb_cmds(sb_desc, CMD_EOL);
3220    /* Attach last SB to URB */
3221    urb_priv->last_sb = sb_desc;
3222    break;
3223
3224  case PIPE_BULK:
3225    if (is_out) { /* Out Bulk URB */
3226      sb_desc = create_sb(NULL, TT_OUT, buf, buf_len, mem_flags);
3227      if(sb_desc == NULL)
3228    return -ENOMEM;
3229      /* The full field is set to yes, even if we don't actually check that
3230     this is a full-length transfer (i.e., that transfer_buffer_length %
3231     maxlen = 0).
3232     Setting full prevents the USB controller from sending an empty packet
3233     in that case. However, if URB_ZERO_PACKET was set we want that. */
3234      if (!(urb->transfer_flags & URB_ZERO_PACKET)) {
3235    set_sb_cmds(sb_desc, CMD_FULL);
3236      }
3237    } else { /* In Bulk URB */
3238      sb_desc = create_sb_in(NULL, buf_len, maxlen, mem_flags);
3239      if(sb_desc == NULL)
3240    return -ENOMEM;
3241    }
3242    /* Set end-of-list flag for last SB */
3243    set_sb_cmds(sb_desc, CMD_EOL);
3244
3245    /* Attach SB to URB */
3246    urb_priv->first_sb = sb_desc;
3247    urb_priv->last_sb = sb_desc;
3248    break;
3249
3250  case PIPE_INTERRUPT:
3251    if(is_out) { /* Out Intr URB */
3252      sb_desc = create_sb(NULL, TT_OUT, buf, buf_len, mem_flags);
3253      if(sb_desc == NULL)
3254    return -ENOMEM;
3255
3256      /* The full field is set to yes, even if we don't actually check that
3257     this is a full-length transfer (i.e., that transfer_buffer_length %
3258     maxlen = 0).
3259     Setting full prevents the USB controller from sending an empty packet
3260     in that case. However, if URB_ZERO_PACKET was set we want that. */
3261      if (!(urb->transfer_flags & URB_ZERO_PACKET)) {
3262    set_sb_cmds(sb_desc, CMD_FULL);
3263      }
3264      /* Only generate TX interrupt if it's a Out URB*/
3265      set_sb_cmds(sb_desc, CMD_INTR);
3266
3267    } else { /* In Intr URB */
3268      sb_desc = create_sb_in(NULL, buf_len, maxlen, mem_flags);
3269      if(sb_desc == NULL)
3270    return -ENOMEM;
3271    }
3272    /* Set end-of-list flag for last SB */
3273    set_sb_cmds(sb_desc, CMD_EOL);
3274
3275    /* Attach SB to URB */
3276    urb_priv->first_sb = sb_desc;
3277    urb_priv->last_sb = sb_desc;
3278
3279    break;
3280  case PIPE_ISOCHRONOUS:
3281    if(is_out) { /* Out Isoc URB */
3282      int i;
3283      if(urb->number_of_packets == 0) {
3284    tc_err("Can't create SBs for Isoc URB with zero packets\n");
3285    return -EPIPE;
3286      }
3287      /* Create one SB descriptor for each packet and link them together. */
3288      for(i = 0; i < urb->number_of_packets; i++) {
3289    if (urb->iso_frame_desc[i].length > 0) {
3290
3291      sb_desc = create_sb(sb_desc, TT_OUT, urb->transfer_buffer +
3292                  urb->iso_frame_desc[i].offset,
3293                  urb->iso_frame_desc[i].length, mem_flags);
3294      if(sb_desc == NULL)
3295        return -ENOMEM;
3296
3297      /* Check if it's a full length packet */
3298      if (urb->iso_frame_desc[i].length ==
3299          usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe))) {
3300        set_sb_cmds(sb_desc, CMD_FULL);
3301      }
3302      
3303    } else { /* zero length packet */
3304      sb_desc = create_sb(sb_desc, TT_ZOUT, &zout_buffer[0], 1, mem_flags);
3305      if(sb_desc == NULL)
3306        return -ENOMEM;
3307      set_sb_cmds(sb_desc, CMD_FULL);
3308    }
3309    /* Attach first SB descriptor to URB */
3310    if (i == 0) {
3311      urb_priv->first_sb = sb_desc;
3312    }
3313      }
3314      /* Set interrupt and end-of-list flags in last SB */
3315      set_sb_cmds(sb_desc, CMD_INTR | CMD_EOL);
3316      /* Attach last SB descriptor to URB */
3317      urb_priv->last_sb = sb_desc;
3318      tc_dbg("Created %d out SBs for Isoc URB:0x%x\n",
3319           urb->number_of_packets, (unsigned int)urb);
3320    } else { /* In Isoc URB */
3321      /* Actual number of packets is not relevant for periodic in traffic as
3322     long as it is more than zero. Set to 1 always. */
3323      sb_desc = create_sb(sb_desc, TT_IN, NULL, 1, mem_flags);
3324      if(sb_desc == NULL)
3325    return -ENOMEM;
3326      /* Set end-of-list flags for SB */
3327      set_sb_cmds(sb_desc, CMD_EOL);
3328
3329      /* Attach SB to URB */
3330      urb_priv->first_sb = sb_desc;
3331      urb_priv->last_sb = sb_desc;
3332    }
3333    break;
3334  default:
3335    tc_err("Unknown pipe-type\n");
3336    return -EPIPE;
3337    break;
3338  }
3339  return 0;
3340}
3341
3342int init_intr_urb(struct urb *urb, int mem_flags) {
3343  struct crisv10_urb_priv *urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
3344  struct USB_EP_Desc* ep_desc;
3345  int interval;
3346  int i;
3347  int ep_count;
3348
3349  ASSERT(urb_priv != NULL);
3350  ASSERT(usb_pipeint(urb->pipe));
3351  /* We can't support interval longer than amount of eof descriptors in
3352     TxIntrEPList */
3353  if(urb->interval > MAX_INTR_INTERVAL) {
3354    tc_err("Interrupt interval %dms too big (max: %dms)\n", urb->interval,
3355       MAX_INTR_INTERVAL);
3356    return -EINVAL;
3357  }
3358
3359  /* We assume that the SB descriptors already have been setup */
3360  ASSERT(urb_priv->first_sb != NULL);
3361
3362  /* Round of the interval to 2^n, it is obvious that this code favours
3363     smaller numbers, but that is actually a good thing */
3364  /* FIXME: The "rounding error" for larger intervals will be quite
3365     large. For in traffic this shouldn't be a problem since it will only
3366     mean that we "poll" more often. */
3367  interval = urb->interval;
3368  for (i = 0; interval; i++) {
3369    interval = interval >> 1;
3370  }
3371  urb_priv->interval = 1 << (i - 1);
3372
3373  /* We can only have max interval for Out Interrupt due to that we can only
3374     handle one linked in EP for a certain epid in the Intr descr array at the
3375     time. The USB Controller in the Etrax 100LX continues to process Intr EPs
3376     so we have no way of knowing which one that caused the actual transfer if
3377     we have several linked in. */
3378  if(usb_pipeout(urb->pipe)) {
3379    urb_priv->interval = MAX_INTR_INTERVAL;
3380  }
3381
3382  /* Calculate amount of EPs needed */
3383  ep_count = MAX_INTR_INTERVAL / urb_priv->interval;
3384
3385  for(i = 0; i < ep_count; i++) {
3386    ep_desc = create_ep(urb_priv->epid, urb_priv->first_sb, mem_flags);
3387    if(ep_desc == NULL) {
3388      /* Free any descriptors that we may have allocated before failure */
3389      while(i > 0) {
3390    i--;
3391    kfree(urb_priv->intr_ep_pool[i]);
3392      }
3393      return -ENOMEM;
3394    }
3395    urb_priv->intr_ep_pool[i] = ep_desc;
3396  }
3397  urb_priv->intr_ep_pool_length = ep_count;
3398  return 0;
3399}
3400
3401/* DMA RX/TX functions */
3402/* ----------------------- */
3403
3404static void tc_dma_init_rx_list(void) {
3405  int i;
3406
3407  /* Setup descriptor list except last one */
3408  for (i = 0; i < (NBR_OF_RX_DESC - 1); i++) {
3409    RxDescList[i].sw_len = RX_DESC_BUF_SIZE;
3410    RxDescList[i].command = 0;
3411    RxDescList[i].next = virt_to_phys(&RxDescList[i + 1]);
3412    RxDescList[i].buf = virt_to_phys(RxBuf + (i * RX_DESC_BUF_SIZE));
3413    RxDescList[i].hw_len = 0;
3414    RxDescList[i].status = 0;
3415    
3416    /* DMA IN cache bug. (struct etrax_dma_descr has the same layout as
3417       USB_IN_Desc for the relevant fields.) */
3418    prepare_rx_descriptor((struct etrax_dma_descr*)&RxDescList[i]);
3419    
3420  }
3421  /* Special handling of last descriptor */
3422  RxDescList[i].sw_len = RX_DESC_BUF_SIZE;
3423  RxDescList[i].command = IO_STATE(USB_IN_command, eol, yes);
3424  RxDescList[i].next = virt_to_phys(&RxDescList[0]);
3425  RxDescList[i].buf = virt_to_phys(RxBuf + (i * RX_DESC_BUF_SIZE));
3426  RxDescList[i].hw_len = 0;
3427  RxDescList[i].status = 0;
3428  
3429  /* Setup list pointers that show progress in list */
3430  myNextRxDesc = &RxDescList[0];
3431  myLastRxDesc = &RxDescList[NBR_OF_RX_DESC - 1];
3432  
3433  flush_etrax_cache();
3434  /* Point DMA to first descriptor in list and start it */
3435  *R_DMA_CH9_FIRST = virt_to_phys(myNextRxDesc);
3436  *R_DMA_CH9_CMD = IO_STATE(R_DMA_CH9_CMD, cmd, start);
3437}
3438
3439
3440static void tc_dma_init_tx_bulk_list(void) {
3441  int i;
3442  volatile struct USB_EP_Desc *epDescr;
3443
3444  for (i = 0; i < (NBR_OF_EPIDS - 1); i++) {
3445    epDescr = &(TxBulkEPList[i]);
3446    CHECK_ALIGN(epDescr);
3447    epDescr->hw_len = 0;
3448    epDescr->command = IO_FIELD(USB_EP_command, epid, i);
3449    epDescr->sub = 0;
3450    epDescr->next = virt_to_phys(&TxBulkEPList[i + 1]);
3451
3452    /* Initiate two EPs, disabled and with the eol flag set. No need for any
3453       preserved epid. */
3454    
3455    /* The first one has the intr flag set so we get an interrupt when the DMA
3456       channel is about to become disabled. */
3457    CHECK_ALIGN(&TxBulkDummyEPList[i][0]);
3458    TxBulkDummyEPList[i][0].hw_len = 0;
3459    TxBulkDummyEPList[i][0].command = (IO_FIELD(USB_EP_command, epid, DUMMY_EPID) |
3460                       IO_STATE(USB_EP_command, eol, yes) |
3461                       IO_STATE(USB_EP_command, intr, yes));
3462    TxBulkDummyEPList[i][0].sub = 0;
3463    TxBulkDummyEPList[i][0].next = virt_to_phys(&TxBulkDummyEPList[i][1]);
3464    
3465    /* The second one. */
3466    CHECK_ALIGN(&TxBulkDummyEPList[i][1]);
3467    TxBulkDummyEPList[i][1].hw_len = 0;
3468    TxBulkDummyEPList[i][1].command = (IO_FIELD(USB_EP_command, epid, DUMMY_EPID) |
3469                       IO_STATE(USB_EP_command, eol, yes));
3470    TxBulkDummyEPList[i][1].sub = 0;
3471    /* The last dummy's next pointer is the same as the current EP's next pointer. */
3472    TxBulkDummyEPList[i][1].next = virt_to_phys(&TxBulkEPList[i + 1]);
3473  }
3474
3475  /* Special handling of last descr in list, make list circular */
3476  epDescr = &TxBulkEPList[i];
3477  CHECK_ALIGN(epDescr);
3478  epDescr->hw_len = 0;
3479  epDescr->command = IO_STATE(USB_EP_command, eol, yes) |
3480    IO_FIELD(USB_EP_command, epid, i);
3481  epDescr->sub = 0;
3482  epDescr->next = virt_to_phys(&TxBulkEPList[0]);
3483  
3484  /* Init DMA sub-channel pointers to last item in each list */
3485  *R_DMA_CH8_SUB0_EP = virt_to_phys(&TxBulkEPList[i]);
3486  /* No point in starting the bulk channel yet.
3487   *R_DMA_CH8_SUB0_CMD = IO_STATE(R_DMA_CH8_SUB0_CMD, cmd, start); */
3488}
3489
3490static void tc_dma_init_tx_ctrl_list(void) {
3491  int i;
3492  volatile struct USB_EP_Desc *epDescr;
3493
3494  for (i = 0; i < (NBR_OF_EPIDS - 1); i++) {
3495    epDescr = &(TxCtrlEPList[i]);
3496    CHECK_ALIGN(epDescr);
3497    epDescr->hw_len = 0;
3498    epDescr->command = IO_FIELD(USB_EP_command, epid, i);
3499    epDescr->sub = 0;
3500    epDescr->next = virt_to_phys(&TxCtrlEPList[i + 1]);
3501  }
3502  /* Special handling of last descr in list, make list circular */
3503  epDescr = &TxCtrlEPList[i];
3504  CHECK_ALIGN(epDescr);
3505  epDescr->hw_len = 0;
3506  epDescr->command = IO_STATE(USB_EP_command, eol, yes) |
3507    IO_FIELD(USB_EP_command, epid, i);
3508  epDescr->sub = 0;
3509  epDescr->next = virt_to_phys(&TxCtrlEPList[0]);
3510  
3511  /* Init DMA sub-channel pointers to last item in each list */
3512  *R_DMA_CH8_SUB1_EP = virt_to_phys(&TxCtrlEPList[i]);
3513  /* No point in starting the ctrl channel yet.
3514   *R_DMA_CH8_SUB1_CMD = IO_STATE(R_DMA_CH8_SUB0_CMD, cmd, start); */
3515}
3516
3517
3518static void tc_dma_init_tx_intr_list(void) {
3519  int i;
3520
3521  TxIntrSB_zout.sw_len = 1;
3522  TxIntrSB_zout.next = 0;
3523  TxIntrSB_zout.buf = virt_to_phys(&zout_buffer[0]);
3524  TxIntrSB_zout.command = (IO_FIELD(USB_SB_command, rem, 0) |
3525               IO_STATE(USB_SB_command, tt, zout) |
3526               IO_STATE(USB_SB_command, full, yes) |
3527               IO_STATE(USB_SB_command, eot, yes) |
3528               IO_STATE(USB_SB_command, eol, yes));
3529  
3530  for (i = 0; i < (MAX_INTR_INTERVAL - 1); i++) {
3531    CHECK_ALIGN(&TxIntrEPList[i]);
3532    TxIntrEPList[i].hw_len = 0;
3533    TxIntrEPList[i].command =
3534      (IO_STATE(USB_EP_command, eof, yes) |
3535       IO_STATE(USB_EP_command, enable, yes) |
3536       IO_FIELD(USB_EP_command, epid, INVALID_EPID));
3537    TxIntrEPList[i].sub = virt_to_phys(&TxIntrSB_zout);
3538    TxIntrEPList[i].next = virt_to_phys(&TxIntrEPList[i + 1]);
3539  }
3540
3541  /* Special handling of last descr in list, make list circular */
3542  CHECK_ALIGN(&TxIntrEPList[i]);
3543  TxIntrEPList[i].hw_len = 0;
3544  TxIntrEPList[i].command =
3545    (IO_STATE(USB_EP_command, eof, yes) |
3546     IO_STATE(USB_EP_command, eol, yes) |
3547     IO_STATE(USB_EP_command, enable, yes) |
3548     IO_FIELD(USB_EP_command, epid, INVALID_EPID));
3549  TxIntrEPList[i].sub = virt_to_phys(&TxIntrSB_zout);
3550  TxIntrEPList[i].next = virt_to_phys(&TxIntrEPList[0]);
3551
3552  intr_dbg("Initiated Intr EP descriptor list\n");
3553
3554
3555  /* Connect DMA 8 sub-channel 2 to first in list */
3556  *R_DMA_CH8_SUB2_EP = virt_to_phys(&TxIntrEPList[0]);
3557}
3558
3559static void tc_dma_init_tx_isoc_list(void) {
3560  int i;
3561
3562  DBFENTER;
3563
3564  /* Read comment at zout_buffer declaration for an explanation to this. */
3565  TxIsocSB_zout.sw_len = 1;
3566  TxIsocSB_zout.next = 0;
3567  TxIsocSB_zout.buf = virt_to_phys(&zout_buffer[0]);
3568  TxIsocSB_zout.command = (IO_FIELD(USB_SB_command, rem, 0) |
3569               IO_STATE(USB_SB_command, tt, zout) |
3570               IO_STATE(USB_SB_command, full, yes) |
3571               IO_STATE(USB_SB_command, eot, yes) |
3572               IO_STATE(USB_SB_command, eol, yes));
3573
3574  /* The last isochronous EP descriptor is a dummy. */
3575  for (i = 0; i < (NBR_OF_EPIDS - 1); i++) {
3576    CHECK_ALIGN(&TxIsocEPList[i]);
3577    TxIsocEPList[i].hw_len = 0;
3578    TxIsocEPList[i].command = IO_FIELD(USB_EP_command, epid, i);
3579    TxIsocEPList[i].sub = 0;
3580    TxIsocEPList[i].next = virt_to_phys(&TxIsocEPList[i + 1]);
3581  }
3582
3583  CHECK_ALIGN(&TxIsocEPList[i]);
3584  TxIsocEPList[i].hw_len = 0;
3585
3586  /* Must enable the last EP descr to get eof interrupt. */
3587  TxIsocEPList[i].command = (IO_STATE(USB_EP_command, enable, yes) |
3588                 IO_STATE(USB_EP_command, eof, yes) |
3589                 IO_STATE(USB_EP_command, eol, yes) |
3590                 IO_FIELD(USB_EP_command, epid, INVALID_EPID));
3591  TxIsocEPList[i].sub = virt_to_phys(&TxIsocSB_zout);
3592  TxIsocEPList[i].next = virt_to_phys(&TxIsocEPList[0]);
3593
3594  *R_DMA_CH8_SUB3_EP = virt_to_phys(&TxIsocEPList[0]);
3595  *R_DMA_CH8_SUB3_CMD = IO_STATE(R_DMA_CH8_SUB3_CMD, cmd, start);
3596}
3597
3598static int tc_dma_init(struct usb_hcd *hcd) {
3599  tc_dma_init_rx_list();
3600  tc_dma_init_tx_bulk_list();
3601  tc_dma_init_tx_ctrl_list();
3602  tc_dma_init_tx_intr_list();
3603  tc_dma_init_tx_isoc_list();
3604
3605  if (cris_request_dma(USB_TX_DMA_NBR,
3606               "ETRAX 100LX built-in USB (Tx)",
3607               DMA_VERBOSE_ON_ERROR,
3608               dma_usb)) {
3609    err("Could not allocate DMA ch 8 for USB");
3610    return -EBUSY;
3611  }
3612     
3613  if (cris_request_dma(USB_RX_DMA_NBR,
3614               "ETRAX 100LX built-in USB (Rx)",
3615               DMA_VERBOSE_ON_ERROR,
3616               dma_usb)) {
3617    err("Could not allocate DMA ch 9 for USB");
3618    return -EBUSY;
3619  }
3620
3621  *R_IRQ_MASK2_SET =
3622    /* Note that these interrupts are not used. */
3623    IO_STATE(R_IRQ_MASK2_SET, dma8_sub0_descr, set) |
3624    /* Sub channel 1 (ctrl) descr. interrupts are used. */
3625    IO_STATE(R_IRQ_MASK2_SET, dma8_sub1_descr, set) |
3626    IO_STATE(R_IRQ_MASK2_SET, dma8_sub2_descr, set) |
3627    /* Sub channel 3 (isoc) descr. interrupts are used. */
3628    IO_STATE(R_IRQ_MASK2_SET, dma8_sub3_descr, set);
3629  
3630  /* Note that the dma9_descr interrupt is not used. */
3631  *R_IRQ_MASK2_SET =
3632    IO_STATE(R_IRQ_MASK2_SET, dma9_eop, set) |
3633    IO_STATE(R_IRQ_MASK2_SET, dma9_descr, set);
3634
3635  if (request_irq(ETRAX_USB_RX_IRQ, tc_dma_rx_interrupt, 0,
3636          "ETRAX 100LX built-in USB (Rx)", hcd)) {
3637    err("Could not allocate IRQ %d for USB", ETRAX_USB_RX_IRQ);
3638    return -EBUSY;
3639  }
3640  
3641  if (request_irq(ETRAX_USB_TX_IRQ, tc_dma_tx_interrupt, 0,
3642          "ETRAX 100LX built-in USB (Tx)", hcd)) {
3643    err("Could not allocate IRQ %d for USB", ETRAX_USB_TX_IRQ);
3644    return -EBUSY;
3645  }
3646
3647  return 0;
3648}
3649
3650static void tc_dma_destroy(void) {
3651  free_irq(ETRAX_USB_RX_IRQ, NULL);
3652  free_irq(ETRAX_USB_TX_IRQ, NULL);
3653
3654  cris_free_dma(USB_TX_DMA_NBR, "ETRAX 100LX built-in USB (Tx)");
3655  cris_free_dma(USB_RX_DMA_NBR, "ETRAX 100LX built-in USB (Rx)");
3656
3657}
3658
3659static void tc_dma_link_intr_urb(struct urb *urb);
3660
3661/* Handle processing of Bulk, Ctrl and Intr queues */
3662static void tc_dma_process_queue(int epid) {
3663  struct urb *urb;
3664  struct crisv10_urb_priv *urb_priv;
3665  unsigned long flags;
3666  char toggle;
3667
3668  if(epid_state[epid].disabled) {
3669    /* Don't process any URBs on a disabled endpoint */
3670    return;
3671  }
3672
3673  /* Do not disturb us while fiddling with EPs and epids */
3674  local_irq_save(flags);
3675
3676  /* For bulk, Ctrl and Intr can we only have one URB active at a time for
3677     a specific EP. */
3678  if(activeUrbList[epid] != NULL) {
3679    /* An URB is already active on EP, skip checking queue */
3680    local_irq_restore(flags);
3681    return;
3682  }
3683
3684  urb = urb_list_first(epid);
3685  if(urb == NULL) {
3686    /* No URB waiting in EP queue. Nothing do to */
3687    local_irq_restore(flags);
3688    return;
3689  }
3690
3691  urb_priv = urb->hcpriv;
3692  ASSERT(urb_priv != NULL);
3693  ASSERT(urb_priv->urb_state == NOT_STARTED);
3694  ASSERT(!usb_pipeisoc(urb->pipe));
3695
3696  /* Remove this URB from the queue and move it to active */
3697  activeUrbList[epid] = urb;
3698  urb_list_del(urb, epid);
3699
3700  urb_priv->urb_state = STARTED;
3701
3702  /* Reset error counters (regardless of which direction this traffic is). */
3703  etrax_epid_clear_error(epid);
3704
3705  /* Special handling of Intr EP lists */
3706  if(usb_pipeint(urb->pipe)) {
3707    tc_dma_link_intr_urb(urb);
3708    local_irq_restore(flags);
3709    return;
3710  }
3711
3712  /* Software must preset the toggle bits for Bulk and Ctrl */
3713  if(usb_pipecontrol(urb->pipe)) {
3714    /* Toggle bits are initialized only during setup transaction in a
3715       CTRL transfer */
3716    etrax_epid_set_toggle(epid, 0, 0);
3717    etrax_epid_set_toggle(epid, 1, 0);
3718  } else {
3719    toggle = usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe),
3720               usb_pipeout(urb->pipe));
3721    etrax_epid_set_toggle(epid, usb_pipeout(urb->pipe), toggle);
3722  }
3723
3724  tc_dbg("Added SBs from (URB:0x%x %s %s) to epid %d: %s\n",
3725     (unsigned int)urb, str_dir(urb->pipe), str_type(urb->pipe), epid,
3726     sblist_to_str(urb_priv->first_sb));
3727
3728  /* We start the DMA sub channel without checking if it's running or not,
3729     because:
3730     1) If it's already running, issuing the start command is a nop.
3731     2) We avoid a test-and-set race condition. */
3732  switch(usb_pipetype(urb->pipe)) {
3733  case PIPE_BULK:
3734    /* Assert that the EP descriptor is disabled. */
3735    ASSERT(!(TxBulkEPList[epid].command & IO_MASK(USB_EP_command, enable)));
3736
3737    /* Set up and enable the EP descriptor. */
3738    TxBulkEPList[epid].sub = virt_to_phys(urb_priv->first_sb);
3739    TxBulkEPList[epid].hw_len = 0;
3740    TxBulkEPList[epid].command |= IO_STATE(USB_EP_command, enable, yes);
3741
3742    /* Check if the dummy list is already with us (if several urbs were queued). */
3743    if (usb_pipein(urb->pipe) && (TxBulkEPList[epid].next != virt_to_phys(&TxBulkDummyEPList[epid][0]))) {
3744      tc_dbg("Inviting dummy list to the party for urb 0x%lx, epid %d",
3745         (unsigned long)urb, epid);
3746      
3747      /* We don't need to check if the DMA is at this EP or not before changing the
3748     next pointer, since we will do it in one 32-bit write (EP descriptors are
3749     32-bit aligned). */
3750      TxBulkEPList[epid].next = virt_to_phys(&TxBulkDummyEPList[epid][0]);
3751    }
3752
3753    restart_dma8_sub0();
3754
3755    /* Update/restart the bulk start timer since we just started the channel.*/
3756    mod_timer(&bulk_start_timer, jiffies + BULK_START_TIMER_INTERVAL);
3757    /* Update/restart the bulk eot timer since we just inserted traffic. */
3758    mod_timer(&bulk_eot_timer, jiffies + BULK_EOT_TIMER_INTERVAL);
3759    break;
3760  case PIPE_CONTROL:
3761    /* Assert that the EP descriptor is disabled. */
3762    ASSERT(!(TxCtrlEPList[epid].command & IO_MASK(USB_EP_command, enable)));
3763
3764    /* Set up and enable the EP descriptor. */
3765    TxCtrlEPList[epid].sub = virt_to_phys(urb_priv->first_sb);
3766    TxCtrlEPList[epid].hw_len = 0;
3767    TxCtrlEPList[epid].command |= IO_STATE(USB_EP_command, enable, yes);
3768
3769    *R_DMA_CH8_SUB1_CMD = IO_STATE(R_DMA_CH8_SUB1_CMD, cmd, start);
3770    break;
3771  }
3772  local_irq_restore(flags);
3773}
3774
3775static void tc_dma_link_intr_urb(struct urb *urb) {
3776  struct crisv10_urb_priv *urb_priv = urb->hcpriv;
3777  volatile struct USB_EP_Desc *tmp_ep;
3778  struct USB_EP_Desc *ep_desc;
3779  int i = 0, epid;
3780  int pool_idx = 0;
3781
3782  ASSERT(urb_priv != NULL);
3783  epid = urb_priv->epid;
3784  ASSERT(urb_priv->interval > 0);
3785  ASSERT(urb_priv->intr_ep_pool_length > 0);
3786
3787  tmp_ep = &TxIntrEPList[0];
3788
3789  /* Only insert one EP descriptor in list for Out Intr URBs.
3790     We can only handle Out Intr with interval of 128ms because
3791     it's not possible to insert several Out Intr EPs because they
3792     are not consumed by the DMA. */
3793  if(usb_pipeout(urb->pipe)) {
3794    ep_desc = urb_priv->intr_ep_pool[0];
3795    ASSERT(ep_desc);
3796    ep_desc->next = tmp_ep->next;
3797    tmp_ep->next = virt_to_phys(ep_desc);
3798    i++;
3799  } else {
3800    /* Loop through Intr EP descriptor list and insert EP for URB at
3801       specified interval */
3802    do {
3803      /* Each EP descriptor with eof flag sat signals a new frame */
3804      if (tmp_ep->command & IO_MASK(USB_EP_command, eof)) {
3805    /* Insert a EP from URBs EP pool at correct interval */
3806    if ((i % urb_priv->interval) == 0) {
3807      ep_desc = urb_priv->intr_ep_pool[pool_idx];
3808      ASSERT(ep_desc);
3809      ep_desc->next = tmp_ep->next;
3810      tmp_ep->next = virt_to_phys(ep_desc);
3811      pool_idx++;
3812      ASSERT(pool_idx <= urb_priv->intr_ep_pool_length);
3813    }
3814    i++;
3815      }
3816      tmp_ep = (struct USB_EP_Desc *)phys_to_virt(tmp_ep->next);
3817    } while(tmp_ep != &TxIntrEPList[0]);
3818  }
3819
3820  intr_dbg("Added SBs to intr epid %d: %s interval:%d (%d EP)\n", epid,
3821       sblist_to_str(urb_priv->first_sb), urb_priv->interval, pool_idx);
3822
3823  /* We start the DMA sub channel without checking if it's running or not,
3824     because:
3825     1) If it's already running, issuing the start command is a nop.
3826     2) We avoid a test-and-set race condition. */
3827  *R_DMA_CH8_SUB2_CMD = IO_STATE(R_DMA_CH8_SUB2_CMD, cmd, start);
3828}
3829
3830static void tc_dma_process_isoc_urb(struct urb *urb) {
3831  unsigned long flags;
3832  struct crisv10_urb_priv *urb_priv = urb->hcpriv;
3833  int epid;
3834
3835  /* Do not disturb us while fiddling with EPs and epids */
3836  local_irq_save(flags);
3837
3838  ASSERT(urb_priv);
3839  ASSERT(urb_priv->first_sb);
3840  epid = urb_priv->epid;
3841
3842  if(activeUrbList[epid] == NULL) {
3843    /* EP is idle, so make this URB active */
3844    activeUrbList[epid] = urb;
3845    urb_list_del(urb, epid);
3846    ASSERT(TxIsocEPList[epid].sub == 0);
3847    ASSERT(!(TxIsocEPList[epid].command &
3848         IO_STATE(USB_EP_command, enable, yes)));
3849
3850    /* Differentiate between In and Out Isoc. Because In SBs are not consumed*/
3851    if(usb_pipein(urb->pipe)) {
3852    /* Each EP for In Isoc will have only one SB descriptor, setup when
3853       submitting the first active urb. We do it here by copying from URBs
3854       pre-allocated SB. */
3855      memcpy((void *)&(TxIsocSBList[epid]), urb_priv->first_sb,
3856         sizeof(TxIsocSBList[epid]));
3857      TxIsocEPList[epid].hw_len = 0;
3858      TxIsocEPList[epid].sub = virt_to_phys(&(TxIsocSBList[epid]));
3859    } else {
3860      /* For Out Isoc we attach the pre-allocated list of SBs for the URB */
3861      TxIsocEPList[epid].hw_len = 0;
3862      TxIsocEPList[epid].sub = virt_to_phys(urb_priv->first_sb);
3863
3864      isoc_dbg("Attached first URB:0x%x[%d] to epid:%d first_sb:0x%x"
3865           " last_sb::0x%x\n",
3866           (unsigned int)urb, urb_priv->urb_num, epid,
3867           (unsigned int)(urb_priv->first_sb),
3868           (unsigned int)(urb_priv->last_sb));
3869    }
3870
3871    if (urb->transfer_flags & URB_ISO_ASAP) {
3872      /* The isoc transfer should be started as soon as possible. The
3873     start_frame field is a return value if URB_ISO_ASAP was set. Comparing
3874     R_USB_FM_NUMBER with a USB Chief trace shows that the first isoc IN
3875     token is sent 2 frames later. I'm not sure how this affects usage of
3876     the start_frame field by the device driver, or how it affects things
3877     when USB_ISO_ASAP is not set, so therefore there's no compensation for
3878     the 2 frame "lag" here. */
3879      urb->start_frame = (*R_USB_FM_NUMBER & 0x7ff);
3880      TxIsocEPList[epid].command |= IO_STATE(USB_EP_command, enable, yes);
3881      urb_priv->urb_state = STARTED;
3882      isoc_dbg("URB_ISO_ASAP set, urb->start_frame set to %d\n",
3883           urb->start_frame);
3884    } else {
3885      /* Not started yet. */
3886      urb_priv->urb_state = NOT_STARTED;
3887      isoc_warn("urb_priv->urb_state set to NOT_STARTED for URB:0x%x\n",
3888        (unsigned int)urb);
3889    }
3890
3891  } else {
3892    /* An URB is already active on the EP. Leave URB in queue and let
3893       finish_isoc_urb process it after current active URB */
3894    ASSERT(TxIsocEPList[epid].sub != 0);
3895
3896    if(usb_pipein(urb->pipe)) {
3897      /* Because there already is a active In URB on this epid we do nothing
3898         and the finish_isoc_urb() function will handle switching to next URB*/
3899
3900    } else { /* For Out Isoc, insert new URBs traffic last in SB-list. */
3901      struct USB_SB_Desc *temp_sb_desc;
3902
3903      /* Set state STARTED to all Out Isoc URBs added to SB list because we
3904         don't know how many of them that are finished before descr interrupt*/
3905      urb_priv->urb_state = STARTED;
3906
3907      /* Find end of current SB list by looking for SB with eol flag sat */
3908      temp_sb_desc = phys_to_virt(TxIsocEPList[epid].sub);
3909      while ((temp_sb_desc->command & IO_MASK(USB_SB_command, eol)) !=
3910         IO_STATE(USB_SB_command, eol, yes)) {
3911    ASSERT(temp_sb_desc->next);
3912    temp_sb_desc = phys_to_virt(temp_sb_desc->next);
3913      }
3914
3915      isoc_dbg("Appended URB:0x%x[%d] (first:0x%x last:0x%x) to epid:%d"
3916           " sub:0x%x eol:0x%x\n",
3917           (unsigned int)urb, urb_priv->urb_num,
3918           (unsigned int)(urb_priv->first_sb),
3919           (unsigned int)(urb_priv->last_sb), epid,
3920           (unsigned int)phys_to_virt(TxIsocEPList[epid].sub),
3921           (unsigned int)temp_sb_desc);
3922
3923      /* Next pointer must be set before eol is removed. */
3924      temp_sb_desc->next = virt_to_phys(urb_priv->first_sb);
3925      /* Clear the previous end of list flag since there is a new in the
3926     added SB descriptor list. */
3927      temp_sb_desc->command &= ~IO_MASK(USB_SB_command, eol);
3928
3929      if (!(TxIsocEPList[epid].command & IO_MASK(USB_EP_command, enable))) {
3930    __u32 epid_data;
3931    /* 8.8.5 in Designer's Reference says we should check for and correct
3932       any errors in the EP here. That should not be necessary if
3933       epid_attn is handled correctly, so we assume all is ok. */
3934    epid_data = etrax_epid_iso_get(epid);
3935    if (IO_EXTRACT(R_USB_EPT_DATA, error_code, epid_data) !=
3936        IO_STATE_VALUE(R_USB_EPT_DATA, error_code, no_error)) {
3937      isoc_err("Disabled Isoc EP with error:%d on epid:%d when appending"
3938           " URB:0x%x[%d]\n",
3939           IO_EXTRACT(R_USB_EPT_DATA, error_code, epid_data), epid,
3940           (unsigned int)urb, urb_priv->urb_num);
3941    }
3942
3943    /* The SB list was exhausted. */
3944    if (virt_to_phys(urb_priv->last_sb) != TxIsocEPList[epid].sub) {
3945      /* The new sublist did not get processed before the EP was
3946         disabled. Setup the EP again. */
3947
3948      if(virt_to_phys(temp_sb_desc) == TxIsocEPList[epid].sub) {
3949        isoc_dbg("EP for epid:%d stoped at SB:0x%x before newly inserted"
3950             ", restarting from this URBs SB:0x%x\n",
3951             epid, (unsigned int)temp_sb_desc,
3952             (unsigned int)(urb_priv->first_sb));
3953        TxIsocEPList[epid].hw_len = 0;
3954        TxIsocEPList[epid].sub = virt_to_phys(urb_priv->first_sb);
3955        urb->start_frame = (*R_USB_FM_NUMBER & 0x7ff);
3956        /* Enable the EP again so data gets processed this time */
3957        TxIsocEPList[epid].command |=
3958          IO_STATE(USB_EP_command, enable, yes);
3959
3960      } else {
3961        /* The EP has been disabled but not at end this URB (god knows
3962           where). This should generate an epid_attn so we should not be
3963           here */
3964        isoc_warn("EP was disabled on sb:0x%x before SB list for"
3965             " URB:0x%x[%d] got processed\n",
3966             (unsigned int)phys_to_virt(TxIsocEPList[epid].sub),
3967             (unsigned int)urb, urb_priv->urb_num);
3968      }
3969    } else {
3970      /* This might happend if we are slow on this function and isn't
3971         an error. */
3972      isoc_dbg("EP was disabled and finished with SBs from appended"
3973           " URB:0x%x[%d]\n", (unsigned int)urb, urb_priv->urb_num);
3974    }
3975      }
3976    }
3977  }
3978  
3979  /* Start the DMA sub channel */
3980  *R_DMA_CH8_SUB3_CMD = IO_STATE(R_DMA_CH8_SUB3_CMD, cmd, start);
3981
3982  local_irq_restore(flags);
3983}
3984
3985static void tc_dma_unlink_intr_urb(struct urb *urb) {
3986  struct crisv10_urb_priv *urb_priv = urb->hcpriv;
3987  volatile struct USB_EP_Desc *first_ep; /* First EP in the list. */
3988  volatile struct USB_EP_Desc *curr_ep; /* Current EP, the iterator. */
3989  volatile struct USB_EP_Desc *next_ep; /* The EP after current. */
3990  volatile struct USB_EP_Desc *unlink_ep; /* The one we should remove from
3991                         the list. */
3992  int count = 0;
3993  volatile int timeout = 10000;
3994  int epid;
3995
3996  /* Read 8.8.4 in Designer's Reference, "Removing an EP Descriptor from the
3997     List". */
3998  ASSERT(urb_priv);
3999  ASSERT(urb_priv->intr_ep_pool_length > 0);
4000  epid = urb_priv->epid;
4001
4002  /* First disable all Intr EPs belonging to epid for this URB */
4003  first_ep = &TxIntrEPList[0];
4004  curr_ep = first_ep;
4005  do {
4006    next_ep = (struct USB_EP_Desc *)phys_to_virt(curr_ep->next);
4007    if (IO_EXTRACT(USB_EP_command, epid, next_ep->command) == epid) {
4008      /* Disable EP */
4009      next_ep->command &= ~IO_MASK(USB_EP_command, enable);
4010    }
4011    curr_ep = phys_to_virt(curr_ep->next);
4012  } while (curr_ep != first_ep);
4013
4014
4015  /* Now unlink all EPs belonging to this epid from Descr list */
4016  first_ep = &TxIntrEPList[0];
4017  curr_ep = first_ep;
4018  do {
4019    next_ep = (struct USB_EP_Desc *)phys_to_virt(curr_ep->next);
4020    if (IO_EXTRACT(USB_EP_command, epid, next_ep->command) == epid) {
4021      /* This is the one we should unlink. */
4022      unlink_ep = next_ep;
4023
4024      /* Actually unlink the EP from the DMA list. */
4025      curr_ep->next = unlink_ep->next;
4026
4027      /* Wait until the DMA is no longer at this descriptor. */
4028      while((*R_DMA_CH8_SUB2_EP == virt_to_phys(unlink_ep)) &&
4029        (timeout-- > 0));
4030
4031      count++;
4032    }
4033    curr_ep = phys_to_virt(curr_ep->next);
4034  } while (curr_ep != first_ep);
4035
4036  if(count != urb_priv->intr_ep_pool_length) {
4037    intr_warn("Unlinked %d of %d Intr EPs for URB:0x%x[%d]\n", count,
4038          urb_priv->intr_ep_pool_length, (unsigned int)urb,
4039          urb_priv->urb_num);
4040  } else {
4041    intr_dbg("Unlinked %d of %d interrupt EPs for URB:0x%x\n", count,
4042         urb_priv->intr_ep_pool_length, (unsigned int)urb);
4043  }
4044}
4045
4046static void check_finished_bulk_tx_epids(struct usb_hcd *hcd,
4047                            int timer) {
4048  unsigned long flags;
4049  int epid;
4050  struct urb *urb;
4051  struct crisv10_urb_priv * urb_priv;
4052  __u32 epid_data;
4053
4054  /* Protect TxEPList */
4055  local_irq_save(flags);
4056
4057  for (epid = 0; epid < NBR_OF_EPIDS; epid++) {
4058    /* A finished EP descriptor is disabled and has a valid sub pointer */
4059    if (!(TxBulkEPList[epid].command & IO_MASK(USB_EP_command, enable)) &&
4060    (TxBulkEPList[epid].sub != 0)) {
4061
4062      /* Get the active URB for this epid */
4063      urb = activeUrbList[epid];
4064      /* Sanity checks */
4065      ASSERT(urb);
4066      urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
4067      ASSERT(urb_priv);
4068      
4069      /* Only handle finished out Bulk EPs here,
4070     and let RX interrupt take care of the rest */
4071      if(!epid_out_traffic(epid)) {
4072    continue;
4073      }
4074
4075      if(timer) {
4076    tc_warn("Found finished %s Bulk epid:%d URB:0x%x[%d] from timeout\n",
4077        epid_out_traffic(epid) ? "Out" : "In", epid, (unsigned int)urb,
4078        urb_priv->urb_num);
4079      } else {
4080    tc_dbg("Found finished %s Bulk epid:%d URB:0x%x[%d] from interrupt\n",
4081           epid_out_traffic(epid) ? "Out" : "In", epid, (unsigned int)urb,
4082           urb_priv->urb_num);
4083      }
4084
4085      if(urb_priv->urb_state == UNLINK) {
4086    /* This Bulk URB is requested to be unlinked, that means that the EP
4087       has been disabled and we might not have sent all data */
4088    tc_finish_urb(hcd, urb, urb->status);
4089    continue;
4090      }
4091
4092      ASSERT(urb_priv->urb_state == STARTED);
4093      if (phys_to_virt(TxBulkEPList[epid].sub) != urb_priv->last_sb) {
4094    tc_err("Endpoint got disabled before reaching last sb\n");
4095      }
4096    
4097      epid_data = etrax_epid_get(epid);
4098      if (IO_EXTRACT(R_USB_EPT_DATA, error_code, epid_data) ==
4099      IO_STATE_VALUE(R_USB_EPT_DATA, error_code, no_error)) {
4100    /* This means that the endpoint has no error, is disabled
4101       and had inserted traffic, i.e. transfer successfully completed. */
4102    tc_finish_urb(hcd, urb, 0);
4103      } else {
4104    /* Shouldn't happen. We expect errors to be caught by epid
4105       attention. */
4106    tc_err("Found disabled bulk EP desc (epid:%d error:%d)\n",
4107           epid, IO_EXTRACT(R_USB_EPT_DATA, error_code, epid_data));
4108      }
4109    } else {
4110      tc_dbg("Ignoring In Bulk epid:%d, let RX interrupt handle it\n", epid);
4111    }
4112  }
4113
4114  local_irq_restore(flags);
4115}
4116
4117static void check_finished_ctrl_tx_epids(struct usb_hcd *hcd) {
4118  unsigned long flags;
4119  int epid;
4120  struct urb *urb;
4121  struct crisv10_urb_priv * urb_priv;
4122  __u32 epid_data;
4123
4124  /* Protect TxEPList */
4125  local_irq_save(flags);
4126
4127  for (epid = 0; epid < NBR_OF_EPIDS; epid++) {
4128    if(epid == DUMMY_EPID)
4129      continue;
4130
4131    /* A finished EP descriptor is disabled and has a valid sub pointer */
4132    if (!(TxCtrlEPList[epid].command & IO_MASK(USB_EP_command, enable)) &&
4133    (TxCtrlEPList[epid].sub != 0)) {
4134      
4135      /* Get the active URB for this epid */
4136      urb = activeUrbList[epid];
4137
4138      if(urb == NULL) {
4139    tc_warn("Found finished Ctrl epid:%d with no active URB\n", epid);
4140    continue;
4141      }
4142      
4143      /* Sanity checks */
4144      ASSERT(usb_pipein(urb->pipe));
4145      urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
4146      ASSERT(urb_priv);
4147      if (phys_to_virt(TxCtrlEPList[epid].sub) != urb_priv->last_sb) {
4148    tc_err("Endpoint got disabled before reaching last sb\n");
4149      }
4150
4151      epid_data = etrax_epid_get(epid);
4152      if (IO_EXTRACT(R_USB_EPT_DATA, error_code, epid_data) ==
4153      IO_STATE_VALUE(R_USB_EPT_DATA, error_code, no_error)) {
4154    /* This means that the endpoint has no error, is disabled
4155       and had inserted traffic, i.e. transfer successfully completed. */
4156
4157    /* Check if RX-interrupt for In Ctrl has been processed before
4158       finishing the URB */
4159    if(urb_priv->ctrl_rx_done) {
4160      tc_dbg("Finishing In Ctrl URB:0x%x[%d] in tx_interrupt\n",
4161         (unsigned int)urb, urb_priv->urb_num);
4162      tc_finish_urb(hcd, urb, 0);
4163    } else {
4164      /* If we get zout descriptor interrupt before RX was done for a
4165         In Ctrl transfer, then we flag that and it will be finished
4166         in the RX-Interrupt */
4167      urb_priv->ctrl_zout_done = 1;
4168      tc_dbg("Got zout descr interrupt before RX interrupt\n");
4169    }
4170      } else {
4171    /* Shouldn't happen. We expect errors to be caught by epid
4172       attention. */
4173    tc_err("Found disabled Ctrl EP desc (epid:%d URB:0x%x[%d]) error_code:%d\n", epid, (unsigned int)urb, urb_priv->urb_num, IO_EXTRACT(R_USB_EPT_DATA, error_code, epid_data));
4174    __dump_ep_desc(&(TxCtrlEPList[epid]));
4175    __dump_ept_data(epid);
4176      }
4177    }
4178  }
4179  local_irq_restore(flags);
4180}
4181
4182/* This function goes through all epids that are setup for Out Isoc transfers
4183   and marks (isoc_out_done) all queued URBs that the DMA has finished
4184   transfer for.
4185   No URB completetion is done here to make interrupt routine return quickly.
4186   URBs are completed later with help of complete_isoc_bottom_half() that
4187   becomes schedules when this functions is finished. */
4188static void check_finished_isoc_tx_epids(void) {
4189  unsigned long flags;
4190  int epid;
4191  struct urb *urb;
4192  struct crisv10_urb_priv * urb_priv;
4193  struct USB_SB_Desc* sb_desc;
4194  int epid_done;
4195
4196  /* Protect TxIsocEPList */
4197  local_irq_save(flags);
4198
4199  for (epid = 0; epid < NBR_OF_EPIDS; epid++) {
4200    if (TxIsocEPList[epid].sub == 0 || epid == INVALID_EPID ||
4201    !epid_out_traffic(epid)) {
4202      /* Nothing here to see. */
4203      continue;
4204    }
4205    ASSERT(epid_inuse(epid));
4206    ASSERT(epid_isoc(epid));
4207
4208    sb_desc = phys_to_virt(TxIsocEPList[epid].sub);
4209    /* Find the last descriptor of the currently active URB for this ep.
4210       This is the first descriptor in the sub list marked for a descriptor
4211       interrupt. */
4212    while (sb_desc && !IO_EXTRACT(USB_SB_command, intr, sb_desc->command)) {
4213      sb_desc = sb_desc->next ? phys_to_virt(sb_desc->next) : 0;
4214    }
4215    ASSERT(sb_desc);
4216
4217    isoc_dbg("Descr IRQ checking epid:%d sub:0x%x intr:0x%x\n",
4218         epid, (unsigned int)phys_to_virt(TxIsocEPList[epid].sub),
4219         (unsigned int)sb_desc);
4220
4221    urb = activeUrbList[epid];
4222    if(urb == NULL) {
4223      isoc_err("Isoc Descr irq on epid:%d with no active URB\n", epid);
4224      continue;
4225    }
4226
4227    epid_done = 0;
4228    while(urb && !epid_done) {
4229      /* Sanity check. */
4230      ASSERT(usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS);
4231      ASSERT(usb_pipeout(urb->pipe));
4232      
4233      urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
4234      ASSERT(urb_priv);
4235      ASSERT(urb_priv->urb_state == STARTED ||
4236         urb_priv->urb_state == UNLINK);
4237      
4238      if (sb_desc != urb_priv->last_sb) {
4239    /* This urb has been sent. */
4240    urb_priv->isoc_out_done = 1;
4241
4242      } else { /* Found URB that has last_sb as the interrupt reason */
4243
4244    /* Check if EP has been disabled, meaning that all transfers are done*/
4245    if(!(TxIsocEPList[epid].command & IO_MASK(USB_EP_command, enable))) {
4246      ASSERT((sb_desc->command & IO_MASK(USB_SB_command, eol)) ==
4247         IO_STATE(USB_SB_command, eol, yes));
4248      ASSERT(sb_desc->next == 0);
4249      urb_priv->isoc_out_done = 1;
4250    } else {
4251      isoc_dbg("Skipping URB:0x%x[%d] because EP not disabled yet\n",
4252           (unsigned int)urb, urb_priv->urb_num);
4253    }
4254    /* Stop looking any further in queue */
4255    epid_done = 1;
4256      }
4257
4258      if (!epid_done) {
4259    if(urb == activeUrbList[epid]) {
4260      urb = urb_list_first(epid);
4261    } else {
4262      urb = urb_list_next(urb, epid);
4263    }
4264      }
4265    } /* END: while(urb && !epid_done) */
4266  }
4267
4268  local_irq_restore(flags);
4269}
4270
4271
4272/* This is where the Out Isoc URBs are realy completed. This function is
4273   scheduled from tc_dma_tx_interrupt() when one or more Out Isoc transfers
4274   are done. This functions completes all URBs earlier marked with
4275   isoc_out_done by fast interrupt routine check_finished_isoc_tx_epids() */
4276
4277static void complete_isoc_bottom_half(struct work_struct* work) {
4278  struct crisv10_isoc_complete_data *comp_data;
4279  struct usb_iso_packet_descriptor *packet;
4280  struct crisv10_urb_priv * urb_priv;
4281  unsigned long flags;
4282  struct urb* urb;
4283  int epid_done;
4284  int epid;
4285  int i;
4286
4287  comp_data = container_of(work, struct crisv10_isoc_complete_data, usb_bh);
4288
4289  local_irq_save(flags);
4290
4291  for (epid = 0; epid < NBR_OF_EPIDS - 1; epid++) {
4292    if(!epid_inuse(epid) || !epid_isoc(epid) || !epid_out_traffic(epid) || epid == DUMMY_EPID) {
4293      /* Only check valid Out Isoc epids */
4294      continue;
4295    }
4296
4297    isoc_dbg("Isoc bottom-half checking epid:%d, sub:0x%x\n", epid,
4298         (unsigned int)phys_to_virt(TxIsocEPList[epid].sub));
4299
4300    /* The descriptor interrupt handler has marked all transmitted Out Isoc
4301       URBs with isoc_out_done. Now we traverse all epids and for all that
4302       have out Isoc traffic we traverse its URB list and complete the
4303       transmitted URBs. */
4304    epid_done = 0;
4305    while (!epid_done) {
4306
4307      /* Get the active urb (if any) */
4308      urb = activeUrbList[epid];
4309      if (urb == 0) {
4310    isoc_dbg("No active URB on epid:%d anymore\n", epid);
4311    epid_done = 1;
4312    continue;
4313      }
4314
4315      /* Sanity check. */
4316      ASSERT(usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS);
4317      ASSERT(usb_pipeout(urb->pipe));
4318
4319      urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
4320      ASSERT(urb_priv);
4321
4322      if (!(urb_priv->isoc_out_done)) {
4323    /* We have reached URB that isn't flaged done yet, stop traversing. */
4324    isoc_dbg("Stoped traversing Out Isoc URBs on epid:%d"
4325         " before not yet flaged URB:0x%x[%d]\n",
4326         epid, (unsigned int)urb, urb_priv->urb_num);
4327    epid_done = 1;
4328    continue;
4329      }
4330
4331      /* This urb has been sent. */
4332      isoc_dbg("Found URB:0x%x[%d] that is flaged isoc_out_done\n",
4333           (unsigned int)urb, urb_priv->urb_num);
4334
4335      /* Set ok on transfered packets for this URB and finish it */
4336      for (i = 0; i < urb->number_of_packets; i++) {
4337    packet = &urb->iso_frame_desc[i];
4338    packet->status = 0;
4339    packet->actual_length = packet->length;
4340      }
4341      urb_priv->isoc_packet_counter = urb->number_of_packets;
4342      tc_finish_urb(comp_data->hcd, urb, 0);
4343
4344    } /* END: while(!epid_done) */
4345  } /* END: for(epid...) */
4346
4347  local_irq_restore(flags);
4348  kmem_cache_free(isoc_compl_cache, comp_data);
4349}
4350
4351
4352static void check_finished_intr_tx_epids(struct usb_hcd *hcd) {
4353  unsigned long flags;
4354  int epid;
4355  struct urb *urb;
4356  struct crisv10_urb_priv * urb_priv;
4357  volatile struct USB_EP_Desc *curr_ep; /* Current EP, the iterator. */
4358  volatile struct USB_EP_Desc *next_ep; /* The EP after current. */
4359
4360  /* Protect TxintrEPList */
4361  local_irq_save(flags);
4362
4363  for (epid = 0; epid < NBR_OF_EPIDS; epid++) {
4364    if(!epid_inuse(epid) || !epid_intr(epid) || !epid_out_traffic(epid)) {
4365      /* Nothing to see on this epid. Only check valid Out Intr epids */
4366      continue;
4367    }
4368
4369    urb = activeUrbList[epid];
4370    if(urb == 0) {
4371      intr_warn("Found Out Intr epid:%d with no active URB\n", epid);
4372      continue;
4373    }
4374
4375    /* Sanity check. */
4376    ASSERT(usb_pipetype(urb->pipe) == PIPE_INTERRUPT);
4377    ASSERT(usb_pipeout(urb->pipe));
4378    
4379    urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
4380    ASSERT(urb_priv);
4381
4382    /* Go through EPs between first and second sof-EP. It's here Out Intr EPs
4383       are inserted.*/
4384    curr_ep = &TxIntrEPList[0];
4385    do {
4386      next_ep = (struct USB_EP_Desc *)phys_to_virt(curr_ep->next);
4387      if(next_ep == urb_priv->intr_ep_pool[0]) {
4388    /* We found the Out Intr EP for this epid */
4389    
4390    /* Disable it so it doesn't get processed again */
4391    next_ep->command &= ~IO_MASK(USB_EP_command, enable);
4392
4393    /* Finish the active Out Intr URB with status OK */
4394    tc_finish_urb(hcd, urb, 0);
4395      }
4396      curr_ep = phys_to_virt(curr_ep->next);
4397    } while (curr_ep != &TxIntrEPList[1]);
4398
4399  }
4400  local_irq_restore(flags);
4401}
4402
4403/* Interrupt handler for DMA8/IRQ24 with subchannels (called from hardware intr) */
4404static irqreturn_t tc_dma_tx_interrupt(int irq, void *vhc) {
4405  struct usb_hcd *hcd = (struct usb_hcd*)vhc;
4406  ASSERT(hcd);
4407
4408  if (*R_IRQ_READ2 & IO_MASK(R_IRQ_READ2, dma8_sub0_descr)) {
4409    /* Clear this interrupt */
4410    *R_DMA_CH8_SUB0_CLR_INTR = IO_STATE(R_DMA_CH8_SUB0_CLR_INTR, clr_descr, do);
4411    restart_dma8_sub0();
4412  }
4413
4414  if (*R_IRQ_READ2 & IO_MASK(R_IRQ_READ2, dma8_sub1_descr)) {
4415    /* Clear this interrupt */
4416    *R_DMA_CH8_SUB1_CLR_INTR = IO_STATE(R_DMA_CH8_SUB1_CLR_INTR, clr_descr, do);
4417    check_finished_ctrl_tx_epids(hcd);
4418  }
4419
4420  if (*R_IRQ_READ2 & IO_MASK(R_IRQ_READ2, dma8_sub2_descr)) {
4421    /* Clear this interrupt */
4422    *R_DMA_CH8_SUB2_CLR_INTR = IO_STATE(R_DMA_CH8_SUB2_CLR_INTR, clr_descr, do);
4423    check_finished_intr_tx_epids(hcd);
4424  }
4425
4426  if (*R_IRQ_READ2 & IO_MASK(R_IRQ_READ2, dma8_sub3_descr)) {
4427    struct crisv10_isoc_complete_data* comp_data;
4428
4429    /* Flag done Out Isoc for later completion */
4430    check_finished_isoc_tx_epids();
4431
4432    /* Clear this interrupt */
4433    *R_DMA_CH8_SUB3_CLR_INTR = IO_STATE(R_DMA_CH8_SUB3_CLR_INTR, clr_descr, do);
4434    /* Schedule bottom half of Out Isoc completion function. This function
4435       finishes the URBs marked with isoc_out_done */
4436    comp_data = (struct crisv10_isoc_complete_data*)
4437      kmem_cache_alloc(isoc_compl_cache, GFP_ATOMIC);
4438    ASSERT(comp_data != NULL);
4439    comp_data ->hcd = hcd;
4440
4441    INIT_WORK(&comp_data->usb_bh, complete_isoc_bottom_half);
4442    schedule_work(&comp_data->usb_bh);
4443  }
4444
4445  return IRQ_HANDLED;
4446}
4447
4448/* Interrupt handler for DMA9/IRQ25 (called from hardware intr) */
4449static irqreturn_t tc_dma_rx_interrupt(int irq, void *vhc) {
4450  unsigned long flags;
4451  struct urb *urb;
4452  struct usb_hcd *hcd = (struct usb_hcd*)vhc;
4453  struct crisv10_urb_priv *urb_priv;
4454  int epid = 0;
4455  int real_error;
4456
4457  ASSERT(hcd);
4458
4459  /* Clear this interrupt. */
4460  *R_DMA_CH9_CLR_INTR = IO_STATE(R_DMA_CH9_CLR_INTR, clr_eop, do);
4461
4462  /* Custom clear interrupt for this interrupt */
4463  /* The reason we cli here is that we call the driver's callback functions. */
4464  local_irq_save(flags);
4465
4466  /* Note that this while loop assumes that all packets span only
4467     one rx descriptor. */
4468  while(myNextRxDesc->status & IO_MASK(USB_IN_status, eop)) {
4469    epid = IO_EXTRACT(USB_IN_status, epid, myNextRxDesc->status);
4470    /* Get the active URB for this epid */
4471    urb = activeUrbList[epid];
4472
4473    ASSERT(epid_inuse(epid));
4474    if (!urb) {
4475      dma_err("No urb for epid %d in rx interrupt\n", epid);
4476      goto skip_out;
4477    }
4478
4479    /* Check if any errors on epid */
4480    real_error = 0;
4481    if (myNextRxDesc->status & IO_MASK(USB_IN_status, error)) {
4482      __u32 r_usb_ept_data;
4483
4484      if (usb_pipeisoc(urb->pipe)) {
4485    r_usb_ept_data = etrax_epid_iso_get(epid);
4486    if((r_usb_ept_data & IO_MASK(R_USB_EPT_DATA_ISO, valid)) &&
4487       (IO_EXTRACT(R_USB_EPT_DATA_ISO, error_code, r_usb_ept_data) == 0) &&
4488       (myNextRxDesc->status & IO_MASK(USB_IN_status, nodata))) {
4489      /* Not an error, just a failure to receive an expected iso
4490         in packet in this frame. This is not documented
4491         in the designers reference. Continue processing.
4492      */
4493    } else real_error = 1;
4494      } else real_error = 1;
4495    }
4496
4497    if(real_error) {
4498      dma_err("Error in RX descr on epid:%d for URB 0x%x",
4499          epid, (unsigned int)urb);
4500      dump_ept_data(epid);
4501      dump_in_desc(myNextRxDesc);
4502      goto skip_out;
4503    }
4504
4505    urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
4506    ASSERT(urb_priv);
4507    ASSERT(urb_priv->urb_state == STARTED ||
4508       urb_priv->urb_state == UNLINK);
4509
4510    if ((usb_pipetype(urb->pipe) == PIPE_BULK) ||
4511    (usb_pipetype(urb->pipe) == PIPE_CONTROL) ||
4512    (usb_pipetype(urb->pipe) == PIPE_INTERRUPT)) {
4513
4514      /* We get nodata for empty data transactions, and the rx descriptor's
4515     hw_len field is not valid in that case. No data to copy in other
4516     words. */
4517      if (myNextRxDesc->status & IO_MASK(USB_IN_status, nodata)) {
4518    /* No data to copy */
4519      } else {
4520    /*
4521    dma_dbg("Processing RX for URB:0x%x epid:%d (data:%d ofs:%d)\n",
4522        (unsigned int)urb, epid, myNextRxDesc->hw_len,
4523        urb_priv->rx_offset);
4524    */
4525    /* Only copy data if URB isn't flaged to be unlinked*/
4526    if(urb_priv->urb_state != UNLINK) {
4527      /* Make sure the data fits in the buffer. */
4528      if(urb_priv->rx_offset + myNextRxDesc->hw_len
4529         <= urb->transfer_buffer_length) {
4530
4531        /* Copy the data to URBs buffer */
4532        memcpy(urb->transfer_buffer + urb_priv->rx_offset,
4533           phys_to_virt(myNextRxDesc->buf), myNextRxDesc->hw_len);
4534        urb_priv->rx_offset += myNextRxDesc->hw_len;
4535      } else {
4536        /* Signal overflow when returning URB */
4537        urb->status = -EOVERFLOW;
4538        tc_finish_urb_later(hcd, urb, urb->status);
4539      }
4540    }
4541      }
4542
4543      /* Check if it was the last packet in the transfer */
4544      if (myNextRxDesc->status & IO_MASK(USB_IN_status, eot)) {
4545    /* Special handling for In Ctrl URBs. */
4546    if(usb_pipecontrol(urb->pipe) && usb_pipein(urb->pipe) &&
4547       !(urb_priv->ctrl_zout_done)) {
4548      /* Flag that RX part of Ctrl transfer is done. Because zout descr
4549         interrupt hasn't happend yet will the URB be finished in the
4550         TX-Interrupt. */
4551      urb_priv->ctrl_rx_done = 1;
4552      tc_dbg("Not finishing In Ctrl URB:0x%x from rx_interrupt, waiting"
4553         " for zout\n", (unsigned int)urb);
4554    } else {
4555      tc_finish_urb(hcd, urb, 0);
4556    }
4557      }
4558    } else { /* ISOC RX */
4559      /*
4560      isoc_dbg("Processing RX for epid:%d (URB:0x%x) ISOC pipe\n",
4561           epid, (unsigned int)urb);
4562      */
4563
4564      struct usb_iso_packet_descriptor *packet;
4565
4566      if (urb_priv->urb_state == UNLINK) {
4567    isoc_warn("Ignoring Isoc Rx data for urb being unlinked.\n");
4568    goto skip_out;
4569      } else if (urb_priv->urb_state == NOT_STARTED) {
4570    isoc_err("What? Got Rx data for Isoc urb that isn't started?\n");
4571    goto skip_out;
4572      }
4573
4574      packet = &urb->iso_frame_desc[urb_priv->isoc_packet_counter];
4575      ASSERT(packet);
4576      packet->status = 0;
4577
4578      if (myNextRxDesc->status & IO_MASK(USB_IN_status, nodata)) {
4579    /* We get nodata for empty data transactions, and the rx descriptor's
4580       hw_len field is not valid in that case. We copy 0 bytes however to
4581       stay in synch. */
4582    packet->actual_length = 0;
4583      } else {
4584    packet->actual_length = myNextRxDesc->hw_len;
4585    /* Make sure the data fits in the buffer. */
4586    ASSERT(packet->actual_length <= packet->length);
4587    memcpy(urb->transfer_buffer + packet->offset,
4588           phys_to_virt(myNextRxDesc->buf), packet->actual_length);
4589    if(packet->actual_length > 0)
4590      isoc_dbg("Copied %d bytes, packet %d for URB:0x%x[%d]\n",
4591           packet->actual_length, urb_priv->isoc_packet_counter,
4592           (unsigned int)urb, urb_priv->urb_num);
4593      }
4594
4595      /* Increment the packet counter. */
4596      urb_priv->isoc_packet_counter++;
4597
4598      /* Note that we don't care about the eot field in the rx descriptor's
4599     status. It will always be set for isoc traffic. */
4600      if (urb->number_of_packets == urb_priv->isoc_packet_counter) {
4601    /* Complete the urb with status OK. */
4602    tc_finish_urb(hcd, urb, 0);
4603      }
4604    }
4605
4606  skip_out:
4607    myNextRxDesc->status = 0;
4608    myNextRxDesc->command |= IO_MASK(USB_IN_command, eol);
4609    myLastRxDesc->command &= ~IO_MASK(USB_IN_command, eol);
4610    myLastRxDesc = myNextRxDesc;
4611    myNextRxDesc = phys_to_virt(myNextRxDesc->next);
4612    flush_etrax_cache();
4613    *R_DMA_CH9_CMD = IO_STATE(R_DMA_CH9_CMD, cmd, restart);
4614  }
4615
4616  local_irq_restore(flags);
4617
4618  return IRQ_HANDLED;
4619}
4620
4621static void tc_bulk_start_timer_func(unsigned long dummy) {
4622  /* We might enable an EP descriptor behind the current DMA position when
4623     it's about to decide that there are no more bulk traffic and it should
4624     stop the bulk channel.
4625     Therefore we periodically check if the bulk channel is stopped and there
4626     is an enabled bulk EP descriptor, in which case we start the bulk
4627     channel. */
4628  
4629  if (!(*R_DMA_CH8_SUB0_CMD & IO_MASK(R_DMA_CH8_SUB0_CMD, cmd))) {
4630    int epid;
4631
4632    timer_dbg("bulk_start_timer: Bulk DMA channel not running.\n");
4633
4634    for (epid = 0; epid < NBR_OF_EPIDS; epid++) {
4635      if (TxBulkEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
4636    timer_warn("Found enabled EP for epid %d, starting bulk channel.\n",
4637           epid);
4638    restart_dma8_sub0();
4639
4640    /* Restart the bulk eot timer since we just started the bulk channel.*/
4641    mod_timer(&bulk_eot_timer, jiffies + BULK_EOT_TIMER_INTERVAL);
4642
4643    /* No need to search any further. */
4644    break;
4645      }
4646    }
4647  } else {
4648    timer_dbg("bulk_start_timer: Bulk DMA channel running.\n");
4649  }
4650}
4651
4652static void tc_bulk_eot_timer_func(unsigned long dummy) {
4653  struct usb_hcd *hcd = (struct usb_hcd*)dummy;
4654  ASSERT(hcd);
4655  /* Because of a race condition in the top half, we might miss a bulk eot.
4656     This timer "simulates" a bulk eot if we don't get one for a while,
4657     hopefully correcting the situation. */
4658  timer_dbg("bulk_eot_timer timed out.\n");
4659  check_finished_bulk_tx_epids(hcd, 1);
4660}
4661
4662
4663/*************************************************************/
4664/*************************************************************/
4665/* Device driver block */
4666/*************************************************************/
4667/*************************************************************/
4668
4669/* Forward declarations for device driver functions */
4670static int devdrv_hcd_probe(struct device *);
4671static int devdrv_hcd_remove(struct device *);
4672#ifdef CONFIG_PM
4673static int devdrv_hcd_suspend(struct device *, u32, u32);
4674static int devdrv_hcd_resume(struct device *, u32);
4675#endif /* CONFIG_PM */
4676
4677/* the device */
4678static struct platform_device *devdrv_hc_platform_device;
4679
4680/* device driver interface */
4681static struct device_driver devdrv_hc_device_driver = {
4682  .name = (char *) hc_name,
4683  .bus = &platform_bus_type,
4684
4685  .probe = devdrv_hcd_probe,
4686  .remove = devdrv_hcd_remove,
4687
4688#ifdef CONFIG_PM
4689  .suspend = devdrv_hcd_suspend,
4690  .resume = devdrv_hcd_resume,
4691#endif /* CONFIG_PM */
4692};
4693
4694/* initialize the host controller and driver */
4695static int __init_or_module devdrv_hcd_probe(struct device *dev)
4696{
4697  struct usb_hcd *hcd;
4698  struct crisv10_hcd *crisv10_hcd;
4699  int retval;
4700
4701  /* Check DMA burst length */
4702  if(IO_EXTRACT(R_BUS_CONFIG, dma_burst, *R_BUS_CONFIG) !=
4703     IO_STATE(R_BUS_CONFIG, dma_burst, burst32)) {
4704    devdrv_err("Invalid DMA burst length in Etrax 100LX,"
4705           " needs to be 32\n");
4706    return -EPERM;
4707  }
4708  
4709  hcd = usb_create_hcd(&crisv10_hc_driver, dev, dev_name(dev));
4710  if (!hcd)
4711    return -ENOMEM;
4712
4713  crisv10_hcd = hcd_to_crisv10_hcd(hcd);
4714  spin_lock_init(&crisv10_hcd->lock);
4715  crisv10_hcd->num_ports = num_ports();
4716  crisv10_hcd->running = 0;
4717
4718  dev_set_drvdata(dev, crisv10_hcd);
4719
4720  devdrv_dbg("ETRAX USB IRQs HC:%d RX:%d TX:%d\n", ETRAX_USB_HC_IRQ,
4721      ETRAX_USB_RX_IRQ, ETRAX_USB_TX_IRQ);
4722
4723  /* Print out chip version read from registers */
4724  int rev_maj = *R_USB_REVISION & IO_MASK(R_USB_REVISION, major);
4725  int rev_min = *R_USB_REVISION & IO_MASK(R_USB_REVISION, minor);
4726  if(rev_min == 0) {
4727    devdrv_info("Etrax 100LX USB Revision %d v1,2\n", rev_maj);
4728  } else {
4729    devdrv_info("Etrax 100LX USB Revision %d v%d\n", rev_maj, rev_min);
4730  }
4731
4732  devdrv_info("Bulk timer interval, start:%d eot:%d\n",
4733          BULK_START_TIMER_INTERVAL,
4734          BULK_EOT_TIMER_INTERVAL);
4735
4736
4737  /* Init root hub data structures */
4738  if(rh_init()) {
4739    devdrv_err("Failed init data for Root Hub\n");
4740    retval = -ENOMEM;
4741  }
4742
4743  if(port_in_use(0)) {
4744    if (cris_request_io_interface(if_usb_1, "ETRAX100LX USB-HCD")) {
4745      printk(KERN_CRIT "usb-host: request IO interface usb1 failed");
4746      retval = -EBUSY;
4747      goto out;
4748    }
4749    devdrv_info("Claimed interface for USB physical port 1\n");
4750  }
4751  if(port_in_use(1)) {
4752    if (cris_request_io_interface(if_usb_2, "ETRAX100LX USB-HCD")) {
4753      /* Free first interface if second failed to be claimed */
4754      if(port_in_use(0)) {
4755    cris_free_io_interface(if_usb_1);
4756      }
4757      printk(KERN_CRIT "usb-host: request IO interface usb2 failed");
4758      retval = -EBUSY;
4759      goto out;
4760    }
4761    devdrv_info("Claimed interface for USB physical port 2\n");
4762  }
4763  
4764  /* Init transfer controller structs and locks */
4765  if((retval = tc_init(hcd)) != 0) {
4766    goto out;
4767  }
4768
4769  /* Attach interrupt functions for DMA and init DMA controller */
4770  if((retval = tc_dma_init(hcd)) != 0) {
4771    goto out;
4772  }
4773
4774  /* Attach the top IRQ handler for USB controller interrupts */
4775  if (request_irq(ETRAX_USB_HC_IRQ, crisv10_hcd_top_irq, 0,
4776          "ETRAX 100LX built-in USB (HC)", hcd)) {
4777    err("Could not allocate IRQ %d for USB", ETRAX_USB_HC_IRQ);
4778    retval = -EBUSY;
4779    goto out;
4780  }
4781
4782  /* iso_eof is only enabled when isoc traffic is running. */
4783  *R_USB_IRQ_MASK_SET =
4784    /* IO_STATE(R_USB_IRQ_MASK_SET, iso_eof, set) | */
4785    IO_STATE(R_USB_IRQ_MASK_SET, bulk_eot, set) |
4786    IO_STATE(R_USB_IRQ_MASK_SET, epid_attn, set) |
4787    IO_STATE(R_USB_IRQ_MASK_SET, port_status, set) |
4788    IO_STATE(R_USB_IRQ_MASK_SET, ctl_status, set);
4789
4790
4791  crisv10_ready_wait();
4792  /* Reset the USB interface. */
4793  *R_USB_COMMAND =
4794    IO_STATE(R_USB_COMMAND, port_sel, nop) |
4795    IO_STATE(R_USB_COMMAND, port_cmd, reset) |
4796    IO_STATE(R_USB_COMMAND, ctrl_cmd, reset);
4797
4798  /* Designer's Reference, p. 8 - 10 says we should Initate R_USB_FM_PSTART to
4799     0x2A30 (10800), to guarantee that control traffic gets 10% of the
4800     bandwidth, and periodic transfer may allocate the rest (90%).
4801     This doesn't work though.
4802     The value 11960 is chosen to be just after the SOF token, with a couple
4803     of bit times extra for possible bit stuffing. */
4804  *R_USB_FM_PSTART = IO_FIELD(R_USB_FM_PSTART, value, 11960);
4805
4806  crisv10_ready_wait();
4807  /* Configure the USB interface as a host controller. */
4808  *R_USB_COMMAND =
4809    IO_STATE(R_USB_COMMAND, port_sel, nop) |
4810    IO_STATE(R_USB_COMMAND, port_cmd, reset) |
4811    IO_STATE(R_USB_COMMAND, ctrl_cmd, host_config);
4812
4813
4814  /* Check so controller not busy before enabling ports */
4815  crisv10_ready_wait();
4816
4817  /* Enable selected USB ports */
4818  if(port_in_use(0)) {
4819    *R_USB_PORT1_DISABLE = IO_STATE(R_USB_PORT1_DISABLE, disable, no);
4820  } else {
4821    *R_USB_PORT1_DISABLE = IO_STATE(R_USB_PORT1_DISABLE, disable, yes);
4822  }
4823  if(port_in_use(1)) {
4824    *R_USB_PORT2_DISABLE = IO_STATE(R_USB_PORT2_DISABLE, disable, no);
4825  } else {
4826    *R_USB_PORT2_DISABLE = IO_STATE(R_USB_PORT2_DISABLE, disable, yes);
4827  }
4828
4829  crisv10_ready_wait();
4830  /* Start processing of USB traffic. */
4831  *R_USB_COMMAND =
4832    IO_STATE(R_USB_COMMAND, port_sel, nop) |
4833    IO_STATE(R_USB_COMMAND, port_cmd, reset) |
4834    IO_STATE(R_USB_COMMAND, ctrl_cmd, host_run);
4835
4836  /* Do not continue probing initialization before USB interface is done */
4837  crisv10_ready_wait();
4838
4839  /* Register our Host Controller to USB Core
4840   * Finish the remaining parts of generic HCD initialization: allocate the
4841   * buffers of consistent memory, register the bus
4842   * and call the driver's reset() and start() routines. */
4843  retval = usb_add_hcd(hcd, ETRAX_USB_HC_IRQ, IRQF_DISABLED);
4844  if (retval != 0) {
4845    devdrv_err("Failed registering HCD driver\n");
4846    goto out;
4847  }
4848
4849  return 0;
4850
4851 out:
4852  devdrv_hcd_remove(dev);
4853  return retval;
4854}
4855
4856
4857/* cleanup after the host controller and driver */
4858static int __init_or_module devdrv_hcd_remove(struct device *dev)
4859{
4860  struct crisv10_hcd *crisv10_hcd = dev_get_drvdata(dev);
4861  struct usb_hcd *hcd;
4862
4863  if (!crisv10_hcd)
4864    return 0;
4865  hcd = crisv10_hcd_to_hcd(crisv10_hcd);
4866
4867
4868  /* Stop USB Controller in Etrax 100LX */
4869  crisv10_hcd_reset(hcd);
4870
4871  usb_remove_hcd(hcd);
4872  devdrv_dbg("Removed HCD from USB Core\n");
4873
4874  /* Free USB Controller IRQ */
4875  free_irq(ETRAX_USB_HC_IRQ, NULL);
4876
4877  /* Free resources */
4878  tc_dma_destroy();
4879  tc_destroy();
4880
4881
4882  if(port_in_use(0)) {
4883    cris_free_io_interface(if_usb_1);
4884  }
4885  if(port_in_use(1)) {
4886    cris_free_io_interface(if_usb_2);
4887  }
4888
4889  devdrv_dbg("Freed all claimed resources\n");
4890
4891  return 0;
4892}
4893
4894
4895#ifdef CONFIG_PM
4896
4897static int devdrv_hcd_suspend(struct usb_hcd *hcd, u32 state, u32 level)
4898{
4899  return 0; /* no-op for now */
4900}
4901
4902static int devdrv_hcd_resume(struct usb_hcd *hcd, u32 level)
4903{
4904  return 0; /* no-op for now */
4905}
4906
4907#endif /* CONFIG_PM */
4908
4909
4910/*************************************************************/
4911/*************************************************************/
4912/* Module block */
4913/*************************************************************/
4914/*************************************************************/
4915 
4916/* register driver */
4917static int __init module_hcd_init(void)
4918{
4919  
4920  if (usb_disabled())
4921    return -ENODEV;
4922
4923  /* Here we select enabled ports by following defines created from
4924     menuconfig */
4925#ifndef CONFIG_ETRAX_USB_HOST_PORT1
4926  ports &= ~(1<<0);
4927#endif
4928#ifndef CONFIG_ETRAX_USB_HOST_PORT2
4929  ports &= ~(1<<1);
4930#endif
4931
4932  printk(KERN_INFO "%s version "VERSION" "COPYRIGHT"\n", product_desc);
4933
4934  devdrv_hc_platform_device =
4935    platform_device_register_simple((char *) hc_name, 0, NULL, 0);
4936
4937  if (IS_ERR(devdrv_hc_platform_device))
4938    return PTR_ERR(devdrv_hc_platform_device);
4939  return driver_register(&devdrv_hc_device_driver);
4940  /*
4941   * Note that we do not set the DMA mask for the device,
4942   * i.e. we pretend that we will use PIO, since no specific
4943   * allocation routines are needed for DMA buffers. This will
4944   * cause the HCD buffer allocation routines to fall back to
4945   * kmalloc().
4946   */
4947}
4948
4949/* unregister driver */
4950static void __exit module_hcd_exit(void)
4951{
4952  driver_unregister(&devdrv_hc_device_driver);
4953}
4954
4955
4956/* Module hooks */
4957module_init(module_hcd_init);
4958module_exit(module_hcd_exit);
4959
4960

Archive Download this file



interactive