Root/target/linux/lantiq/files/drivers/usb/dwc_otg/dwc_otg_hcd_intr.c

1/* ==========================================================================
2 * $File: //dwh/usb_iip/dev/software/otg_ipmate/linux/drivers/dwc_otg_hcd_intr.c $
3 * $Revision: 1.1.1.1 $
4 * $Date: 2009-04-17 06:15:34 $
5 * $Change: 553126 $
6 *
7 * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
8 * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
9 * otherwise expressly agreed to in writing between Synopsys and you.
10 *
11 * The Software IS NOT an item of Licensed Software or Licensed Product under
12 * any End User Software License Agreement or Agreement for Licensed Product
13 * with Synopsys or any supplement thereto. You are permitted to use and
14 * redistribute this Software in source and binary forms, with or without
15 * modification, provided that redistributions of source code must retain this
16 * notice. You may not view, use, disclose, copy or distribute this file or
17 * any information contained herein except pursuant to this license grant from
18 * Synopsys. If you do not agree with this notice, including the disclaimer
19 * below, then you are not authorized to use the Software.
20 *
21 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
25 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
31 * DAMAGE.
32 * ========================================================================== */
33#ifndef DWC_DEVICE_ONLY
34
35#include "dwc_otg_driver.h"
36#include "dwc_otg_hcd.h"
37#include "dwc_otg_regs.h"
38
39const int erratum_usb09_patched = 0;
40const int deferral_on = 1;
41const int nak_deferral_delay = 8;
42const int nyet_deferral_delay = 1;
43/** @file
44 * This file contains the implementation of the HCD Interrupt handlers.
45 */
46
47/** This function handles interrupts for the HCD. */
48int32_t dwc_otg_hcd_handle_intr (dwc_otg_hcd_t *_dwc_otg_hcd)
49{
50    int retval = 0;
51
52        dwc_otg_core_if_t *core_if = _dwc_otg_hcd->core_if;
53        gintsts_data_t gintsts;
54#ifdef DEBUG
55        dwc_otg_core_global_regs_t *global_regs = core_if->core_global_regs;
56#endif
57
58    /* Check if HOST Mode */
59        if (dwc_otg_is_host_mode(core_if)) {
60        gintsts.d32 = dwc_otg_read_core_intr(core_if);
61        if (!gintsts.d32) {
62            return 0;
63        }
64
65#ifdef DEBUG
66        /* Don't print debug message in the interrupt handler on SOF */
67# ifndef DEBUG_SOF
68        if (gintsts.d32 != DWC_SOF_INTR_MASK)
69# endif
70            DWC_DEBUGPL (DBG_HCD, "\n");
71#endif
72
73#ifdef DEBUG
74# ifndef DEBUG_SOF
75        if (gintsts.d32 != DWC_SOF_INTR_MASK)
76# endif
77            DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD Interrupt Detected gintsts&gintmsk=0x%08x\n", gintsts.d32);
78#endif
79
80                if (gintsts.b.sofintr) {
81            retval |= dwc_otg_hcd_handle_sof_intr (_dwc_otg_hcd);
82                }
83                if (gintsts.b.rxstsqlvl) {
84            retval |= dwc_otg_hcd_handle_rx_status_q_level_intr (_dwc_otg_hcd);
85                }
86                if (gintsts.b.nptxfempty) {
87            retval |= dwc_otg_hcd_handle_np_tx_fifo_empty_intr (_dwc_otg_hcd);
88        }
89                if (gintsts.b.i2cintr) {
90            /** @todo Implement i2cintr handler. */
91                }
92        if (gintsts.b.portintr) {
93            retval |= dwc_otg_hcd_handle_port_intr (_dwc_otg_hcd);
94        }
95        if (gintsts.b.hcintr) {
96            retval |= dwc_otg_hcd_handle_hc_intr (_dwc_otg_hcd);
97        }
98        if (gintsts.b.ptxfempty) {
99            retval |= dwc_otg_hcd_handle_perio_tx_fifo_empty_intr (_dwc_otg_hcd);
100        }
101#ifdef DEBUG
102# ifndef DEBUG_SOF
103        if (gintsts.d32 != DWC_SOF_INTR_MASK)
104# endif
105        {
106            DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD Finished Servicing Interrupts\n");
107            DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD gintsts=0x%08x\n",
108                    dwc_read_reg32(&global_regs->gintsts));
109            DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD gintmsk=0x%08x\n",
110                    dwc_read_reg32(&global_regs->gintmsk));
111        }
112#endif
113
114#ifdef DEBUG
115# ifndef DEBUG_SOF
116    if (gintsts.d32 != DWC_SOF_INTR_MASK)
117# endif
118        DWC_DEBUGPL (DBG_HCD, "\n");
119#endif
120
121    }
122
123    return retval;
124}
125
126#ifdef DWC_TRACK_MISSED_SOFS
127#warning Compiling code to track missed SOFs
128#define FRAME_NUM_ARRAY_SIZE 1000
129/**
130 * This function is for debug only.
131 */
132static inline void track_missed_sofs(uint16_t _curr_frame_number) {
133    static uint16_t frame_num_array[FRAME_NUM_ARRAY_SIZE];
134    static uint16_t last_frame_num_array[FRAME_NUM_ARRAY_SIZE];
135    static int frame_num_idx = 0;
136    static uint16_t last_frame_num = DWC_HFNUM_MAX_FRNUM;
137    static int dumped_frame_num_array = 0;
138    
139    if (frame_num_idx < FRAME_NUM_ARRAY_SIZE) {
140        if ((((last_frame_num + 1) & DWC_HFNUM_MAX_FRNUM) != _curr_frame_number)) {
141            frame_num_array[frame_num_idx] = _curr_frame_number;
142            last_frame_num_array[frame_num_idx++] = last_frame_num;
143        }
144    } else if (!dumped_frame_num_array) {
145        int i;
146        printk(KERN_EMERG USB_DWC "Frame Last Frame\n");
147        printk(KERN_EMERG USB_DWC "----- ----------\n");
148        for (i = 0; i < FRAME_NUM_ARRAY_SIZE; i++) {
149            printk(KERN_EMERG USB_DWC "0x%04x 0x%04x\n",
150                   frame_num_array[i], last_frame_num_array[i]);
151        }
152        dumped_frame_num_array = 1;
153    }
154    last_frame_num = _curr_frame_number;
155}
156#endif
157
158/**
159 * Handles the start-of-frame interrupt in host mode. Non-periodic
160 * transactions may be queued to the DWC_otg controller for the current
161 * (micro)frame. Periodic transactions may be queued to the controller for the
162 * next (micro)frame.
163 */
164int32_t dwc_otg_hcd_handle_sof_intr (dwc_otg_hcd_t *_hcd)
165{
166    hfnum_data_t hfnum;
167    struct list_head *qh_entry;
168    dwc_otg_qh_t *qh;
169    dwc_otg_transaction_type_e tr_type;
170    gintsts_data_t gintsts = {.d32 = 0};
171
172    hfnum.d32 = dwc_read_reg32(&_hcd->core_if->host_if->host_global_regs->hfnum);
173
174#ifdef DEBUG_SOF
175    DWC_DEBUGPL(DBG_HCD, "--Start of Frame Interrupt--\n");
176#endif
177
178    _hcd->frame_number = hfnum.b.frnum;
179
180#ifdef DEBUG
181    _hcd->frrem_accum += hfnum.b.frrem;
182    _hcd->frrem_samples++;
183#endif
184
185#ifdef DWC_TRACK_MISSED_SOFS
186    track_missed_sofs(_hcd->frame_number);
187#endif
188
189    /* Determine whether any periodic QHs should be executed. */
190    qh_entry = _hcd->periodic_sched_inactive.next;
191    while (qh_entry != &_hcd->periodic_sched_inactive) {
192        qh = list_entry(qh_entry, dwc_otg_qh_t, qh_list_entry);
193        qh_entry = qh_entry->next;
194        if (dwc_frame_num_le(qh->sched_frame, _hcd->frame_number)) {
195            /*
196             * Move QH to the ready list to be executed next
197             * (micro)frame.
198             */
199            list_move(&qh->qh_list_entry, &_hcd->periodic_sched_ready);
200        }
201    }
202
203    tr_type = dwc_otg_hcd_select_transactions(_hcd);
204    if (tr_type != DWC_OTG_TRANSACTION_NONE) {
205        dwc_otg_hcd_queue_transactions(_hcd, tr_type);
206    }
207
208    /* Clear interrupt */
209    gintsts.b.sofintr = 1;
210    dwc_write_reg32(&_hcd->core_if->core_global_regs->gintsts, gintsts.d32);
211
212    return 1;
213}
214
215/** Handles the Rx Status Queue Level Interrupt, which indicates that there is at
216 * least one packet in the Rx FIFO. The packets are moved from the FIFO to
217 * memory if the DWC_otg controller is operating in Slave mode. */
218int32_t dwc_otg_hcd_handle_rx_status_q_level_intr (dwc_otg_hcd_t *_dwc_otg_hcd)
219{
220    host_grxsts_data_t grxsts;
221    dwc_hc_t *hc = NULL;
222
223    DWC_DEBUGPL(DBG_HCD, "--RxStsQ Level Interrupt--\n");
224
225    grxsts.d32 = dwc_read_reg32(&_dwc_otg_hcd->core_if->core_global_regs->grxstsp);
226
227    hc = _dwc_otg_hcd->hc_ptr_array[grxsts.b.chnum];
228
229    /* Packet Status */
230    DWC_DEBUGPL(DBG_HCDV, " Ch num = %d\n", grxsts.b.chnum);
231    DWC_DEBUGPL(DBG_HCDV, " Count = %d\n", grxsts.b.bcnt);
232    DWC_DEBUGPL(DBG_HCDV, " DPID = %d, hc.dpid = %d\n", grxsts.b.dpid, hc->data_pid_start);
233    DWC_DEBUGPL(DBG_HCDV, " PStatus = %d\n", grxsts.b.pktsts);
234    
235    switch (grxsts.b.pktsts) {
236    case DWC_GRXSTS_PKTSTS_IN:
237        /* Read the data into the host buffer. */
238        if (grxsts.b.bcnt > 0) {
239            dwc_otg_read_packet(_dwc_otg_hcd->core_if,
240                        hc->xfer_buff,
241                        grxsts.b.bcnt);
242
243            /* Update the HC fields for the next packet received. */
244            hc->xfer_count += grxsts.b.bcnt;
245            hc->xfer_buff += grxsts.b.bcnt;
246        }
247        
248    case DWC_GRXSTS_PKTSTS_IN_XFER_COMP:
249    case DWC_GRXSTS_PKTSTS_DATA_TOGGLE_ERR:
250    case DWC_GRXSTS_PKTSTS_CH_HALTED:
251        /* Handled in interrupt, just ignore data */
252        break;
253    default:
254        DWC_ERROR ("RX_STS_Q Interrupt: Unknown status %d\n", grxsts.b.pktsts);
255        break;
256    }
257    
258    return 1;
259}
260
261/** This interrupt occurs when the non-periodic Tx FIFO is half-empty. More
262 * data packets may be written to the FIFO for OUT transfers. More requests
263 * may be written to the non-periodic request queue for IN transfers. This
264 * interrupt is enabled only in Slave mode. */
265int32_t dwc_otg_hcd_handle_np_tx_fifo_empty_intr (dwc_otg_hcd_t *_dwc_otg_hcd)
266{
267    DWC_DEBUGPL(DBG_HCD, "--Non-Periodic TxFIFO Empty Interrupt--\n");
268    dwc_otg_hcd_queue_transactions(_dwc_otg_hcd,
269                       DWC_OTG_TRANSACTION_NON_PERIODIC);
270    return 1;
271}
272
273/** This interrupt occurs when the periodic Tx FIFO is half-empty. More data
274 * packets may be written to the FIFO for OUT transfers. More requests may be
275 * written to the periodic request queue for IN transfers. This interrupt is
276 * enabled only in Slave mode. */
277int32_t dwc_otg_hcd_handle_perio_tx_fifo_empty_intr (dwc_otg_hcd_t *_dwc_otg_hcd)
278{
279    DWC_DEBUGPL(DBG_HCD, "--Periodic TxFIFO Empty Interrupt--\n");
280    dwc_otg_hcd_queue_transactions(_dwc_otg_hcd,
281                       DWC_OTG_TRANSACTION_PERIODIC);
282    return 1;
283}
284
285/** There are multiple conditions that can cause a port interrupt. This function
286 * determines which interrupt conditions have occurred and handles them
287 * appropriately. */
288int32_t dwc_otg_hcd_handle_port_intr (dwc_otg_hcd_t *_dwc_otg_hcd)
289{
290    int retval = 0;
291    hprt0_data_t hprt0;
292    hprt0_data_t hprt0_modify;
293
294    hprt0.d32 = dwc_read_reg32(_dwc_otg_hcd->core_if->host_if->hprt0);
295    hprt0_modify.d32 = dwc_read_reg32(_dwc_otg_hcd->core_if->host_if->hprt0);
296
297    /* Clear appropriate bits in HPRT0 to clear the interrupt bit in
298     * GINTSTS */
299
300    hprt0_modify.b.prtena = 0;
301    hprt0_modify.b.prtconndet = 0;
302    hprt0_modify.b.prtenchng = 0;
303    hprt0_modify.b.prtovrcurrchng = 0;
304
305    /* Port Connect Detected
306     * Set flag and clear if detected */
307    if (hprt0.b.prtconndet) {
308        DWC_DEBUGPL(DBG_HCD, "--Port Interrupt HPRT0=0x%08x "
309                "Port Connect Detected--\n", hprt0.d32);
310        _dwc_otg_hcd->flags.b.port_connect_status_change = 1;
311        _dwc_otg_hcd->flags.b.port_connect_status = 1;
312        hprt0_modify.b.prtconndet = 1;
313
314                /* B-Device has connected, Delete the connection timer. */
315                del_timer( &_dwc_otg_hcd->conn_timer );
316
317        /* The Hub driver asserts a reset when it sees port connect
318         * status change flag */
319        retval |= 1;
320    }
321
322    /* Port Enable Changed
323     * Clear if detected - Set internal flag if disabled */
324    if (hprt0.b.prtenchng) {
325        DWC_DEBUGPL(DBG_HCD, " --Port Interrupt HPRT0=0x%08x "
326                "Port Enable Changed--\n", hprt0.d32);
327        hprt0_modify.b.prtenchng = 1;
328        if (hprt0.b.prtena == 1) {
329            int do_reset = 0;
330            dwc_otg_core_params_t *params = _dwc_otg_hcd->core_if->core_params;
331            dwc_otg_core_global_regs_t *global_regs = _dwc_otg_hcd->core_if->core_global_regs;
332            dwc_otg_host_if_t *host_if = _dwc_otg_hcd->core_if->host_if;
333
334            /* Check if we need to adjust the PHY clock speed for
335             * low power and adjust it */
336            if (params->host_support_fs_ls_low_power)
337            {
338                gusbcfg_data_t usbcfg;
339
340                usbcfg.d32 = dwc_read_reg32 (&global_regs->gusbcfg);
341
342                if ((hprt0.b.prtspd == DWC_HPRT0_PRTSPD_LOW_SPEED) ||
343                    (hprt0.b.prtspd == DWC_HPRT0_PRTSPD_FULL_SPEED))
344                {
345                    /*
346                     * Low power
347                     */
348                    hcfg_data_t hcfg;
349                    if (usbcfg.b.phylpwrclksel == 0) {
350                        /* Set PHY low power clock select for FS/LS devices */
351                        usbcfg.b.phylpwrclksel = 1;
352                        dwc_write_reg32(&global_regs->gusbcfg, usbcfg.d32);
353                        do_reset = 1;
354                    }
355
356                    hcfg.d32 = dwc_read_reg32(&host_if->host_global_regs->hcfg);
357
358                    if ((hprt0.b.prtspd == DWC_HPRT0_PRTSPD_LOW_SPEED) &&
359                        (params->host_ls_low_power_phy_clk ==
360                         DWC_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ))
361                    {
362                        /* 6 MHZ */
363                        DWC_DEBUGPL(DBG_CIL, "FS_PHY programming HCFG to 6 MHz (Low Power)\n");
364                        if (hcfg.b.fslspclksel != DWC_HCFG_6_MHZ) {
365                            hcfg.b.fslspclksel = DWC_HCFG_6_MHZ;
366                            dwc_write_reg32(&host_if->host_global_regs->hcfg,
367                                    hcfg.d32);
368                            do_reset = 1;
369                        }
370                    }
371                    else {
372                        /* 48 MHZ */
373                        DWC_DEBUGPL(DBG_CIL, "FS_PHY programming HCFG to 48 MHz ()\n");
374                        if (hcfg.b.fslspclksel != DWC_HCFG_48_MHZ) {
375                            hcfg.b.fslspclksel = DWC_HCFG_48_MHZ;
376                            dwc_write_reg32(&host_if->host_global_regs->hcfg,
377                                    hcfg.d32);
378                            do_reset = 1;
379                        }
380                    }
381                }
382                else {
383                    /*
384                     * Not low power
385                     */
386                    if (usbcfg.b.phylpwrclksel == 1) {
387                        usbcfg.b.phylpwrclksel = 0;
388                        dwc_write_reg32(&global_regs->gusbcfg, usbcfg.d32);
389                        do_reset = 1;
390                    }
391                }
392
393                if (do_reset) {
394                    tasklet_schedule(_dwc_otg_hcd->reset_tasklet);
395                }
396            }
397            
398            if (!do_reset) {
399                /* Port has been enabled set the reset change flag */
400                _dwc_otg_hcd->flags.b.port_reset_change = 1;
401            }
402
403        } else {
404            _dwc_otg_hcd->flags.b.port_enable_change = 1;
405        }
406        retval |= 1;
407    }
408
409    /** Overcurrent Change Interrupt */
410    if (hprt0.b.prtovrcurrchng) {
411        DWC_DEBUGPL(DBG_HCD, " --Port Interrupt HPRT0=0x%08x "
412                "Port Overcurrent Changed--\n", hprt0.d32);
413        _dwc_otg_hcd->flags.b.port_over_current_change = 1;
414        hprt0_modify.b.prtovrcurrchng = 1;
415        retval |= 1;
416    }
417
418    /* Clear Port Interrupts */
419    dwc_write_reg32(_dwc_otg_hcd->core_if->host_if->hprt0, hprt0_modify.d32);
420
421    return retval;
422}
423
424
425/** This interrupt indicates that one or more host channels has a pending
426 * interrupt. There are multiple conditions that can cause each host channel
427 * interrupt. This function determines which conditions have occurred for each
428 * host channel interrupt and handles them appropriately. */
429int32_t dwc_otg_hcd_handle_hc_intr (dwc_otg_hcd_t *_dwc_otg_hcd)
430{
431    int i;
432    int retval = 0;
433    haint_data_t haint;
434
435    /* Clear appropriate bits in HCINTn to clear the interrupt bit in
436     * GINTSTS */
437
438    haint.d32 = dwc_otg_read_host_all_channels_intr(_dwc_otg_hcd->core_if);
439
440    for (i=0; i<_dwc_otg_hcd->core_if->core_params->host_channels; i++) {
441        if (haint.b2.chint & (1 << i)) {
442            retval |= dwc_otg_hcd_handle_hc_n_intr (_dwc_otg_hcd, i);
443        }
444    }
445
446    return retval;
447}
448
449/* Macro used to clear one channel interrupt */
450#define clear_hc_int(_hc_regs_,_intr_) \
451do { \
452    hcint_data_t hcint_clear = {.d32 = 0}; \
453    hcint_clear.b._intr_ = 1; \
454    dwc_write_reg32(&((_hc_regs_)->hcint), hcint_clear.d32); \
455} while (0)
456
457/*
458 * Macro used to disable one channel interrupt. Channel interrupts are
459 * disabled when the channel is halted or released by the interrupt handler.
460 * There is no need to handle further interrupts of that type until the
461 * channel is re-assigned. In fact, subsequent handling may cause crashes
462 * because the channel structures are cleaned up when the channel is released.
463 */
464#define disable_hc_int(_hc_regs_,_intr_) \
465do { \
466    hcintmsk_data_t hcintmsk = {.d32 = 0}; \
467    hcintmsk.b._intr_ = 1; \
468    dwc_modify_reg32(&((_hc_regs_)->hcintmsk), hcintmsk.d32, 0); \
469} while (0)
470
471/**
472 * Gets the actual length of a transfer after the transfer halts. _halt_status
473 * holds the reason for the halt.
474 *
475 * For IN transfers where _halt_status is DWC_OTG_HC_XFER_COMPLETE,
476 * *_short_read is set to 1 upon return if less than the requested
477 * number of bytes were transferred. Otherwise, *_short_read is set to 0 upon
478 * return. _short_read may also be NULL on entry, in which case it remains
479 * unchanged.
480 */
481static uint32_t get_actual_xfer_length(dwc_hc_t *_hc,
482                       dwc_otg_hc_regs_t *_hc_regs,
483                       dwc_otg_qtd_t *_qtd,
484                       dwc_otg_halt_status_e _halt_status,
485                       int *_short_read)
486{
487    hctsiz_data_t hctsiz;
488    uint32_t length;
489
490    if (_short_read != NULL) {
491        *_short_read = 0;
492    }
493    hctsiz.d32 = dwc_read_reg32(&_hc_regs->hctsiz);
494
495    if (_halt_status == DWC_OTG_HC_XFER_COMPLETE) {
496        if (_hc->ep_is_in) {
497            length = _hc->xfer_len - hctsiz.b.xfersize;
498            if (_short_read != NULL) {
499                *_short_read = (hctsiz.b.xfersize != 0);
500            }
501        } else if (_hc->qh->do_split) {
502            length = _qtd->ssplit_out_xfer_count;
503        } else {
504            length = _hc->xfer_len;
505        }
506    } else {
507        /*
508         * Must use the hctsiz.pktcnt field to determine how much data
509         * has been transferred. This field reflects the number of
510         * packets that have been transferred via the USB. This is
511         * always an integral number of packets if the transfer was
512         * halted before its normal completion. (Can't use the
513         * hctsiz.xfersize field because that reflects the number of
514         * bytes transferred via the AHB, not the USB).
515         */
516        length = (_hc->start_pkt_count - hctsiz.b.pktcnt) * _hc->max_packet;
517    }
518
519    return length;
520}
521
522/**
523 * Updates the state of the URB after a Transfer Complete interrupt on the
524 * host channel. Updates the actual_length field of the URB based on the
525 * number of bytes transferred via the host channel. Sets the URB status
526 * if the data transfer is finished.
527 *
528 * @return 1 if the data transfer specified by the URB is completely finished,
529 * 0 otherwise.
530 */
531static int update_urb_state_xfer_comp(dwc_hc_t *_hc,
532                      dwc_otg_hc_regs_t * _hc_regs, struct urb *_urb,
533                      dwc_otg_qtd_t * _qtd, int *status)
534{
535    int xfer_done = 0;
536    int short_read = 0;
537
538    _urb->actual_length += get_actual_xfer_length(_hc, _hc_regs, _qtd,
539                              DWC_OTG_HC_XFER_COMPLETE,
540                              &short_read);
541
542    if (short_read || (_urb->actual_length == _urb->transfer_buffer_length)) {
543        xfer_done = 1;
544        if (short_read && (_urb->transfer_flags & URB_SHORT_NOT_OK)) {
545            *status = -EREMOTEIO;
546        } else {
547            *status = 0;
548        }
549    }
550
551#ifdef DEBUG
552    {
553        hctsiz_data_t hctsiz;
554        hctsiz.d32 = dwc_read_reg32(&_hc_regs->hctsiz);
555        DWC_DEBUGPL(DBG_HCDV, "DWC_otg: %s: %s, channel %d\n",
556                __func__, (_hc->ep_is_in ? "IN" : "OUT"), _hc->hc_num);
557        DWC_DEBUGPL(DBG_HCDV, " hc->xfer_len %d\n", _hc->xfer_len);
558        DWC_DEBUGPL(DBG_HCDV, " hctsiz.xfersize %d\n", hctsiz.b.xfersize);
559        DWC_DEBUGPL(DBG_HCDV, " urb->transfer_buffer_length %d\n",
560                _urb->transfer_buffer_length);
561        DWC_DEBUGPL(DBG_HCDV, " urb->actual_length %d\n", _urb->actual_length);
562        DWC_DEBUGPL(DBG_HCDV, " short_read %d, xfer_done %d\n",
563                short_read, xfer_done);
564    }
565#endif
566
567    return xfer_done;
568}
569
570/*
571 * Save the starting data toggle for the next transfer. The data toggle is
572 * saved in the QH for non-control transfers and it's saved in the QTD for
573 * control transfers.
574 */
575static void save_data_toggle(dwc_hc_t *_hc,
576                 dwc_otg_hc_regs_t *_hc_regs,
577                 dwc_otg_qtd_t *_qtd)
578{
579    hctsiz_data_t hctsiz;
580    hctsiz.d32 = dwc_read_reg32(&_hc_regs->hctsiz);
581
582    if (_hc->ep_type != DWC_OTG_EP_TYPE_CONTROL) {
583        dwc_otg_qh_t *qh = _hc->qh;
584        if (hctsiz.b.pid == DWC_HCTSIZ_DATA0) {
585            qh->data_toggle = DWC_OTG_HC_PID_DATA0;
586        } else {
587            qh->data_toggle = DWC_OTG_HC_PID_DATA1;
588        }
589    } else {
590        if (hctsiz.b.pid == DWC_HCTSIZ_DATA0) {
591            _qtd->data_toggle = DWC_OTG_HC_PID_DATA0;
592        } else {
593            _qtd->data_toggle = DWC_OTG_HC_PID_DATA1;
594        }
595    }
596}
597
598/**
599 * Frees the first QTD in the QH's list if free_qtd is 1. For non-periodic
600 * QHs, removes the QH from the active non-periodic schedule. If any QTDs are
601 * still linked to the QH, the QH is added to the end of the inactive
602 * non-periodic schedule. For periodic QHs, removes the QH from the periodic
603 * schedule if no more QTDs are linked to the QH.
604 */
605static void deactivate_qh(dwc_otg_hcd_t *_hcd,
606              dwc_otg_qh_t *_qh,
607              int free_qtd)
608{
609    int continue_split = 0;
610    dwc_otg_qtd_t *qtd;
611
612    DWC_DEBUGPL(DBG_HCDV, " %s(%p,%p,%d)\n", __func__, _hcd, _qh, free_qtd);
613
614    qtd = list_entry(_qh->qtd_list.next, dwc_otg_qtd_t, qtd_list_entry);
615
616    if (qtd->complete_split) {
617        continue_split = 1;
618    }
619    else if ((qtd->isoc_split_pos == DWC_HCSPLIT_XACTPOS_MID) ||
620         (qtd->isoc_split_pos == DWC_HCSPLIT_XACTPOS_END))
621    {
622        continue_split = 1;
623    }
624
625    if (free_qtd) {
626        /*
627         * Note that this was previously a call to
628         * dwc_otg_hcd_qtd_remove_and_free(qtd), which frees the qtd.
629         * However, that call frees the qtd memory, and we continue in the
630         * interrupt logic to access it many more times, including writing
631         * to it. With slub debugging on, it is clear that we were writing
632         * to memory we had freed.
633         * Call this instead, and now I have moved the freeing of the memory to
634         * the end of processing this interrupt.
635         */
636        //dwc_otg_hcd_qtd_remove_and_free(qtd);
637        dwc_otg_hcd_qtd_remove(qtd);
638        
639        continue_split = 0;
640    }
641
642    _qh->channel = NULL;
643    _qh->qtd_in_process = NULL;
644    dwc_otg_hcd_qh_deactivate(_hcd, _qh, continue_split);
645}
646
647/**
648 * Updates the state of an Isochronous URB when the transfer is stopped for
649 * any reason. The fields of the current entry in the frame descriptor array
650 * are set based on the transfer state and the input _halt_status. Completes
651 * the Isochronous URB if all the URB frames have been completed.
652 *
653 * @return DWC_OTG_HC_XFER_COMPLETE if there are more frames remaining to be
654 * transferred in the URB. Otherwise return DWC_OTG_HC_XFER_URB_COMPLETE.
655 */
656static dwc_otg_halt_status_e
657update_isoc_urb_state(dwc_otg_hcd_t *_hcd,
658              dwc_hc_t *_hc,
659              dwc_otg_hc_regs_t *_hc_regs,
660              dwc_otg_qtd_t *_qtd,
661              dwc_otg_halt_status_e _halt_status)
662{
663    struct urb *urb = _qtd->urb;
664    dwc_otg_halt_status_e ret_val = _halt_status;
665    struct usb_iso_packet_descriptor *frame_desc;
666
667    frame_desc = &urb->iso_frame_desc[_qtd->isoc_frame_index];
668    switch (_halt_status) {
669    case DWC_OTG_HC_XFER_COMPLETE:
670        frame_desc->status = 0;
671        frame_desc->actual_length =
672            get_actual_xfer_length(_hc, _hc_regs, _qtd,
673                           _halt_status, NULL);
674        break;
675    case DWC_OTG_HC_XFER_FRAME_OVERRUN:
676        urb->error_count++;
677        if (_hc->ep_is_in) {
678            frame_desc->status = -ENOSR;
679        } else {
680            frame_desc->status = -ECOMM;
681        }
682        frame_desc->actual_length = 0;
683        break;
684    case DWC_OTG_HC_XFER_BABBLE_ERR:
685        urb->error_count++;
686        frame_desc->status = -EOVERFLOW;
687        /* Don't need to update actual_length in this case. */
688        break;
689    case DWC_OTG_HC_XFER_XACT_ERR:
690        urb->error_count++;
691        frame_desc->status = -EPROTO;
692        frame_desc->actual_length =
693            get_actual_xfer_length(_hc, _hc_regs, _qtd,
694                           _halt_status, NULL);
695    default:
696        DWC_ERROR("%s: Unhandled _halt_status (%d)\n", __func__,
697              _halt_status);
698        BUG();
699        break;
700    }
701
702    if (++_qtd->isoc_frame_index == urb->number_of_packets) {
703        /*
704         * urb->status is not used for isoc transfers.
705         * The individual frame_desc statuses are used instead.
706         */
707        dwc_otg_hcd_complete_urb(_hcd, urb, 0);
708        ret_val = DWC_OTG_HC_XFER_URB_COMPLETE;
709    } else {
710        ret_val = DWC_OTG_HC_XFER_COMPLETE;
711    }
712
713    return ret_val;
714}
715
716/**
717 * Releases a host channel for use by other transfers. Attempts to select and
718 * queue more transactions since at least one host channel is available.
719 *
720 * @param _hcd The HCD state structure.
721 * @param _hc The host channel to release.
722 * @param _qtd The QTD associated with the host channel. This QTD may be freed
723 * if the transfer is complete or an error has occurred.
724 * @param _halt_status Reason the channel is being released. This status
725 * determines the actions taken by this function.
726 */
727static void release_channel(dwc_otg_hcd_t *_hcd,
728                dwc_hc_t *_hc,
729                dwc_otg_qtd_t *_qtd,
730                dwc_otg_halt_status_e _halt_status,
731                int *must_free)
732{
733    dwc_otg_transaction_type_e tr_type;
734    int free_qtd;
735    dwc_otg_qh_t * _qh;
736    int deact = 1;
737    int retry_delay = 1;
738    unsigned long flags;
739
740    DWC_DEBUGPL(DBG_HCDV, " %s: channel %d, halt_status %d\n", __func__,
741              _hc->hc_num, _halt_status);
742
743    switch (_halt_status) {
744    case DWC_OTG_HC_XFER_NYET:
745    case DWC_OTG_HC_XFER_NAK:
746        if (_halt_status == DWC_OTG_HC_XFER_NYET) {
747            retry_delay = nyet_deferral_delay;
748        } else {
749            retry_delay = nak_deferral_delay;
750        }
751        free_qtd = 0;
752        if (deferral_on && _hc->do_split) {
753            _qh = _hc->qh;
754            if (_qh) {
755                deact = dwc_otg_hcd_qh_deferr(_hcd, _qh , retry_delay);
756            }
757        }
758            break;
759    case DWC_OTG_HC_XFER_URB_COMPLETE:
760        free_qtd = 1;
761        break;
762    case DWC_OTG_HC_XFER_AHB_ERR:
763    case DWC_OTG_HC_XFER_STALL:
764    case DWC_OTG_HC_XFER_BABBLE_ERR:
765        free_qtd = 1;
766        break;
767    case DWC_OTG_HC_XFER_XACT_ERR:
768        if (_qtd->error_count >= 3) {
769            DWC_DEBUGPL(DBG_HCDV, " Complete URB with transaction error\n");
770            free_qtd = 1;
771            //_qtd->urb->status = -EPROTO;
772            dwc_otg_hcd_complete_urb(_hcd, _qtd->urb, -EPROTO);
773        } else {
774            free_qtd = 0;
775        }
776        break;
777    case DWC_OTG_HC_XFER_URB_DEQUEUE:
778        /*
779         * The QTD has already been removed and the QH has been
780         * deactivated. Don't want to do anything except release the
781         * host channel and try to queue more transfers.
782         */
783        goto cleanup;
784    case DWC_OTG_HC_XFER_NO_HALT_STATUS:
785        DWC_ERROR("%s: No halt_status, channel %d\n", __func__, _hc->hc_num);
786        free_qtd = 0;
787        break;
788    default:
789        free_qtd = 0;
790        break;
791    }
792    if (free_qtd) {
793        /* Only change must_free to true (do not set to zero here -- it is
794         * pre-initialized to zero).
795         */
796        *must_free = 1;
797    }
798    if (deact) {
799    deactivate_qh(_hcd, _hc->qh, free_qtd);
800    }
801 cleanup:
802    /*
803     * Release the host channel for use by other transfers. The cleanup
804     * function clears the channel interrupt enables and conditions, so
805     * there's no need to clear the Channel Halted interrupt separately.
806     */
807    dwc_otg_hc_cleanup(_hcd->core_if, _hc);
808    list_add_tail(&_hc->hc_list_entry, &_hcd->free_hc_list);
809
810    local_irq_save(flags);
811    _hcd->available_host_channels++;
812    local_irq_restore(flags);
813    /* Try to queue more transfers now that there's a free channel, */
814    /* unless erratum_usb09_patched is set */
815    if (!erratum_usb09_patched) {
816    tr_type = dwc_otg_hcd_select_transactions(_hcd);
817    if (tr_type != DWC_OTG_TRANSACTION_NONE) {
818        dwc_otg_hcd_queue_transactions(_hcd, tr_type);
819        }
820    }
821}
822
823/**
824 * Halts a host channel. If the channel cannot be halted immediately because
825 * the request queue is full, this function ensures that the FIFO empty
826 * interrupt for the appropriate queue is enabled so that the halt request can
827 * be queued when there is space in the request queue.
828 *
829 * This function may also be called in DMA mode. In that case, the channel is
830 * simply released since the core always halts the channel automatically in
831 * DMA mode.
832 */
833static void halt_channel(dwc_otg_hcd_t *_hcd,
834             dwc_hc_t *_hc,
835             dwc_otg_qtd_t *_qtd,
836             dwc_otg_halt_status_e _halt_status, int *must_free)
837{
838    if (_hcd->core_if->dma_enable) {
839        release_channel(_hcd, _hc, _qtd, _halt_status, must_free);
840        return;
841    }
842
843    /* Slave mode processing... */
844    dwc_otg_hc_halt(_hcd->core_if, _hc, _halt_status);
845
846    if (_hc->halt_on_queue) {
847        gintmsk_data_t gintmsk = {.d32 = 0};
848        dwc_otg_core_global_regs_t *global_regs;
849        global_regs = _hcd->core_if->core_global_regs;
850
851        if (_hc->ep_type == DWC_OTG_EP_TYPE_CONTROL ||
852            _hc->ep_type == DWC_OTG_EP_TYPE_BULK) {
853            /*
854             * Make sure the Non-periodic Tx FIFO empty interrupt
855             * is enabled so that the non-periodic schedule will
856             * be processed.
857             */
858            gintmsk.b.nptxfempty = 1;
859            dwc_modify_reg32(&global_regs->gintmsk, 0, gintmsk.d32);
860        } else {
861            /*
862             * Move the QH from the periodic queued schedule to
863             * the periodic assigned schedule. This allows the
864             * halt to be queued when the periodic schedule is
865             * processed.
866             */
867            list_move(&_hc->qh->qh_list_entry,
868                  &_hcd->periodic_sched_assigned);
869
870            /*
871             * Make sure the Periodic Tx FIFO Empty interrupt is
872             * enabled so that the periodic schedule will be
873             * processed.
874             */
875            gintmsk.b.ptxfempty = 1;
876            dwc_modify_reg32(&global_regs->gintmsk, 0, gintmsk.d32);
877        }
878    }
879}
880
881/**
882 * Performs common cleanup for non-periodic transfers after a Transfer
883 * Complete interrupt. This function should be called after any endpoint type
884 * specific handling is finished to release the host channel.
885 */
886static void complete_non_periodic_xfer(dwc_otg_hcd_t *_hcd,
887                       dwc_hc_t *_hc,
888                       dwc_otg_hc_regs_t *_hc_regs,
889                       dwc_otg_qtd_t *_qtd,
890                       dwc_otg_halt_status_e _halt_status, int *must_free)
891{
892    hcint_data_t hcint;
893
894    _qtd->error_count = 0;
895
896    hcint.d32 = dwc_read_reg32(&_hc_regs->hcint);
897    if (hcint.b.nyet) {
898        /*
899         * Got a NYET on the last transaction of the transfer. This
900         * means that the endpoint should be in the PING state at the
901         * beginning of the next transfer.
902         */
903        _hc->qh->ping_state = 1;
904        clear_hc_int(_hc_regs,nyet);
905    }
906
907    /*
908     * Always halt and release the host channel to make it available for
909     * more transfers. There may still be more phases for a control
910     * transfer or more data packets for a bulk transfer at this point,
911     * but the host channel is still halted. A channel will be reassigned
912     * to the transfer when the non-periodic schedule is processed after
913     * the channel is released. This allows transactions to be queued
914     * properly via dwc_otg_hcd_queue_transactions, which also enables the
915     * Tx FIFO Empty interrupt if necessary.
916     */
917    if (_hc->ep_is_in) {
918        /*
919         * IN transfers in Slave mode require an explicit disable to
920         * halt the channel. (In DMA mode, this call simply releases
921         * the channel.)
922         */
923        halt_channel(_hcd, _hc, _qtd, _halt_status, must_free);
924    } else {
925        /*
926         * The channel is automatically disabled by the core for OUT
927         * transfers in Slave mode.
928         */
929        release_channel(_hcd, _hc, _qtd, _halt_status, must_free);
930    }
931}
932
933/**
934 * Performs common cleanup for periodic transfers after a Transfer Complete
935 * interrupt. This function should be called after any endpoint type specific
936 * handling is finished to release the host channel.
937 */
938static void complete_periodic_xfer(dwc_otg_hcd_t *_hcd,
939                   dwc_hc_t *_hc,
940                   dwc_otg_hc_regs_t *_hc_regs,
941                   dwc_otg_qtd_t *_qtd,
942                   dwc_otg_halt_status_e _halt_status, int *must_free)
943{
944    hctsiz_data_t hctsiz;
945    _qtd->error_count = 0;
946        
947    hctsiz.d32 = dwc_read_reg32(&_hc_regs->hctsiz);
948    if (!_hc->ep_is_in || hctsiz.b.pktcnt == 0) {
949        /* Core halts channel in these cases. */
950        release_channel(_hcd, _hc, _qtd, _halt_status, must_free);
951    } else {
952        /* Flush any outstanding requests from the Tx queue. */
953        halt_channel(_hcd, _hc, _qtd, _halt_status, must_free);
954    }
955}
956
957/**
958 * Handles a host channel Transfer Complete interrupt. This handler may be
959 * called in either DMA mode or Slave mode.
960 */
961static int32_t handle_hc_xfercomp_intr(dwc_otg_hcd_t *_hcd,
962                       dwc_hc_t *_hc,
963                       dwc_otg_hc_regs_t *_hc_regs,
964                       dwc_otg_qtd_t *_qtd, int *must_free)
965{
966    int urb_xfer_done;
967    dwc_otg_halt_status_e halt_status = DWC_OTG_HC_XFER_COMPLETE;
968    struct urb *urb = _qtd->urb;
969    int pipe_type = usb_pipetype(urb->pipe);
970    int status = -EINPROGRESS;
971
972    DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
973            "Transfer Complete--\n", _hc->hc_num);
974
975         /*
976     * Handle xfer complete on CSPLIT.
977     */
978    if (_hc->qh->do_split) {
979        _qtd->complete_split = 0;
980    }
981
982    /* Update the QTD and URB states. */
983    switch (pipe_type) {
984    case PIPE_CONTROL:
985        switch (_qtd->control_phase) {
986        case DWC_OTG_CONTROL_SETUP:
987            if (urb->transfer_buffer_length > 0) {
988                _qtd->control_phase = DWC_OTG_CONTROL_DATA;
989            } else {
990                _qtd->control_phase = DWC_OTG_CONTROL_STATUS;
991            }
992            DWC_DEBUGPL(DBG_HCDV, " Control setup transaction done\n");
993            halt_status = DWC_OTG_HC_XFER_COMPLETE;
994            break;
995        case DWC_OTG_CONTROL_DATA: {
996            urb_xfer_done = update_urb_state_xfer_comp(_hc, _hc_regs,urb, _qtd, &status);
997            if (urb_xfer_done) {
998                _qtd->control_phase = DWC_OTG_CONTROL_STATUS;
999                DWC_DEBUGPL(DBG_HCDV, " Control data transfer done\n");
1000            } else {
1001                save_data_toggle(_hc, _hc_regs, _qtd);
1002            }
1003            halt_status = DWC_OTG_HC_XFER_COMPLETE;
1004            break;
1005        }
1006        case DWC_OTG_CONTROL_STATUS:
1007            DWC_DEBUGPL(DBG_HCDV, " Control transfer complete\n");
1008            if (status == -EINPROGRESS) {
1009                status = 0;
1010            }
1011            dwc_otg_hcd_complete_urb(_hcd, urb, status);
1012            halt_status = DWC_OTG_HC_XFER_URB_COMPLETE;
1013            break;
1014        }
1015
1016        complete_non_periodic_xfer(_hcd, _hc, _hc_regs, _qtd,
1017                         halt_status, must_free);
1018        break;
1019    case PIPE_BULK:
1020        DWC_DEBUGPL(DBG_HCDV, " Bulk transfer complete\n");
1021        urb_xfer_done = update_urb_state_xfer_comp(_hc, _hc_regs, urb, _qtd, &status);
1022        if (urb_xfer_done) {
1023            dwc_otg_hcd_complete_urb(_hcd, urb, status);
1024            halt_status = DWC_OTG_HC_XFER_URB_COMPLETE;
1025        } else {
1026            halt_status = DWC_OTG_HC_XFER_COMPLETE;
1027        }
1028            
1029        save_data_toggle(_hc, _hc_regs, _qtd);
1030        complete_non_periodic_xfer(_hcd, _hc, _hc_regs, _qtd,halt_status, must_free);
1031        break;
1032    case PIPE_INTERRUPT:
1033        DWC_DEBUGPL(DBG_HCDV, " Interrupt transfer complete\n");
1034        update_urb_state_xfer_comp(_hc, _hc_regs, urb, _qtd, &status);
1035
1036        /*
1037         * Interrupt URB is done on the first transfer complete
1038         * interrupt.
1039         */
1040        dwc_otg_hcd_complete_urb(_hcd, urb, status);
1041        save_data_toggle(_hc, _hc_regs, _qtd);
1042        complete_periodic_xfer(_hcd, _hc, _hc_regs, _qtd,
1043                    DWC_OTG_HC_XFER_URB_COMPLETE, must_free);
1044        break;
1045    case PIPE_ISOCHRONOUS:
1046        DWC_DEBUGPL(DBG_HCDV, " Isochronous transfer complete\n");
1047        if (_qtd->isoc_split_pos == DWC_HCSPLIT_XACTPOS_ALL)
1048        {
1049            halt_status = update_isoc_urb_state(_hcd, _hc, _hc_regs, _qtd,
1050                                DWC_OTG_HC_XFER_COMPLETE);
1051        }
1052        complete_periodic_xfer(_hcd, _hc, _hc_regs, _qtd, halt_status, must_free);
1053        break;
1054    }
1055
1056        disable_hc_int(_hc_regs,xfercompl);
1057
1058    return 1;
1059}
1060
1061/**
1062 * Handles a host channel STALL interrupt. This handler may be called in
1063 * either DMA mode or Slave mode.
1064 */
1065static int32_t handle_hc_stall_intr(dwc_otg_hcd_t *_hcd,
1066                    dwc_hc_t *_hc,
1067                    dwc_otg_hc_regs_t *_hc_regs,
1068                    dwc_otg_qtd_t *_qtd, int *must_free)
1069{
1070    struct urb *urb = _qtd->urb;
1071    int pipe_type = usb_pipetype(urb->pipe);
1072
1073    DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
1074            "STALL Received--\n", _hc->hc_num);
1075
1076    if (pipe_type == PIPE_CONTROL) {
1077        dwc_otg_hcd_complete_urb(_hcd, _qtd->urb, -EPIPE);
1078    }
1079
1080    if (pipe_type == PIPE_BULK || pipe_type == PIPE_INTERRUPT) {
1081        dwc_otg_hcd_complete_urb(_hcd, _qtd->urb, -EPIPE);
1082        /*
1083         * USB protocol requires resetting the data toggle for bulk
1084         * and interrupt endpoints when a CLEAR_FEATURE(ENDPOINT_HALT)
1085         * setup command is issued to the endpoint. Anticipate the
1086         * CLEAR_FEATURE command since a STALL has occurred and reset
1087         * the data toggle now.
1088         */
1089        _hc->qh->data_toggle = 0;
1090    }
1091
1092    halt_channel(_hcd, _hc, _qtd, DWC_OTG_HC_XFER_STALL, must_free);
1093    disable_hc_int(_hc_regs,stall);
1094
1095    return 1;
1096}
1097
1098/*
1099 * Updates the state of the URB when a transfer has been stopped due to an
1100 * abnormal condition before the transfer completes. Modifies the
1101 * actual_length field of the URB to reflect the number of bytes that have
1102 * actually been transferred via the host channel.
1103 */
1104static void update_urb_state_xfer_intr(dwc_hc_t *_hc,
1105                       dwc_otg_hc_regs_t *_hc_regs,
1106                       struct urb *_urb,
1107                       dwc_otg_qtd_t *_qtd,
1108                       dwc_otg_halt_status_e _halt_status)
1109{
1110    uint32_t bytes_transferred = get_actual_xfer_length(_hc, _hc_regs, _qtd,
1111                                _halt_status, NULL);
1112    _urb->actual_length += bytes_transferred;
1113
1114#ifdef DEBUG
1115    {
1116        hctsiz_data_t hctsiz;
1117        hctsiz.d32 = dwc_read_reg32(&_hc_regs->hctsiz);
1118        DWC_DEBUGPL(DBG_HCDV, "DWC_otg: %s: %s, channel %d\n",
1119                __func__, (_hc->ep_is_in ? "IN" : "OUT"), _hc->hc_num);
1120        DWC_DEBUGPL(DBG_HCDV, " _hc->start_pkt_count %d\n", _hc->start_pkt_count);
1121        DWC_DEBUGPL(DBG_HCDV, " hctsiz.pktcnt %d\n", hctsiz.b.pktcnt);
1122        DWC_DEBUGPL(DBG_HCDV, " _hc->max_packet %d\n", _hc->max_packet);
1123        DWC_DEBUGPL(DBG_HCDV, " bytes_transferred %d\n", bytes_transferred);
1124        DWC_DEBUGPL(DBG_HCDV, " _urb->actual_length %d\n", _urb->actual_length);
1125        DWC_DEBUGPL(DBG_HCDV, " _urb->transfer_buffer_length %d\n",
1126                _urb->transfer_buffer_length);
1127    }
1128#endif
1129}
1130
1131/**
1132 * Handles a host channel NAK interrupt. This handler may be called in either
1133 * DMA mode or Slave mode.
1134 */
1135static int32_t handle_hc_nak_intr(dwc_otg_hcd_t *_hcd,
1136                  dwc_hc_t *_hc,
1137                  dwc_otg_hc_regs_t *_hc_regs,
1138                  dwc_otg_qtd_t *_qtd, int *must_free)
1139{
1140    DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
1141            "NAK Received--\n", _hc->hc_num);
1142
1143    /*
1144     * Handle NAK for IN/OUT SSPLIT/CSPLIT transfers, bulk, control, and
1145     * interrupt. Re-start the SSPLIT transfer.
1146     */
1147    if (_hc->do_split) {
1148        if (_hc->complete_split) {
1149            _qtd->error_count = 0;
1150        }
1151        _qtd->complete_split = 0;
1152        halt_channel(_hcd, _hc, _qtd, DWC_OTG_HC_XFER_NAK, must_free);
1153        goto handle_nak_done;
1154    }
1155
1156    switch (usb_pipetype(_qtd->urb->pipe)) {
1157    case PIPE_CONTROL:
1158    case PIPE_BULK:
1159        if (_hcd->core_if->dma_enable && _hc->ep_is_in) {
1160            /*
1161             * NAK interrupts are enabled on bulk/control IN
1162             * transfers in DMA mode for the sole purpose of
1163             * resetting the error count after a transaction error
1164             * occurs. The core will continue transferring data.
1165             */
1166            _qtd->error_count = 0;
1167            goto handle_nak_done;
1168        }
1169
1170        /*
1171         * NAK interrupts normally occur during OUT transfers in DMA
1172         * or Slave mode. For IN transfers, more requests will be
1173         * queued as request queue space is available.
1174         */
1175        _qtd->error_count = 0;
1176
1177        if (!_hc->qh->ping_state) {
1178            update_urb_state_xfer_intr(_hc, _hc_regs, _qtd->urb,
1179                           _qtd, DWC_OTG_HC_XFER_NAK);
1180            save_data_toggle(_hc, _hc_regs, _qtd);
1181            if (_qtd->urb->dev->speed == USB_SPEED_HIGH) {
1182                _hc->qh->ping_state = 1;
1183            }
1184        }
1185
1186        /*
1187         * Halt the channel so the transfer can be re-started from
1188         * the appropriate point or the PING protocol will
1189         * start/continue.
1190         */
1191        halt_channel(_hcd, _hc, _qtd, DWC_OTG_HC_XFER_NAK, must_free);
1192        break;
1193    case PIPE_INTERRUPT:
1194        _qtd->error_count = 0;
1195        halt_channel(_hcd, _hc, _qtd, DWC_OTG_HC_XFER_NAK, must_free);
1196        break;
1197    case PIPE_ISOCHRONOUS:
1198        /* Should never get called for isochronous transfers. */
1199        BUG();
1200        break;
1201    }
1202
1203 handle_nak_done:
1204    disable_hc_int(_hc_regs,nak);
1205
1206    return 1;
1207}
1208
1209/**
1210 * Handles a host channel ACK interrupt. This interrupt is enabled when
1211 * performing the PING protocol in Slave mode, when errors occur during
1212 * either Slave mode or DMA mode, and during Start Split transactions.
1213 */
1214static int32_t handle_hc_ack_intr(dwc_otg_hcd_t *_hcd,
1215    dwc_hc_t * _hc, dwc_otg_hc_regs_t * _hc_regs, dwc_otg_qtd_t * _qtd, int *must_free)
1216{
1217    DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
1218            "ACK Received--\n", _hc->hc_num);
1219
1220    if (_hc->do_split) {
1221        /*
1222         * Handle ACK on SSPLIT.
1223         * ACK should not occur in CSPLIT.
1224         */
1225        if ((!_hc->ep_is_in) && (_hc->data_pid_start != DWC_OTG_HC_PID_SETUP)) {
1226            _qtd->ssplit_out_xfer_count = _hc->xfer_len;
1227        }
1228        if (!(_hc->ep_type == DWC_OTG_EP_TYPE_ISOC && !_hc->ep_is_in)) {
1229            /* Don't need complete for isochronous out transfers. */
1230            _qtd->complete_split = 1;
1231        }
1232
1233        /* ISOC OUT */
1234        if ((_hc->ep_type == DWC_OTG_EP_TYPE_ISOC) && !_hc->ep_is_in) {
1235            switch (_hc->xact_pos) {
1236            case DWC_HCSPLIT_XACTPOS_ALL:
1237                break;
1238            case DWC_HCSPLIT_XACTPOS_END:
1239                _qtd->isoc_split_pos = DWC_HCSPLIT_XACTPOS_ALL;
1240                _qtd->isoc_split_offset = 0;
1241                break;
1242            case DWC_HCSPLIT_XACTPOS_BEGIN:
1243            case DWC_HCSPLIT_XACTPOS_MID:
1244                /*
1245                 * For BEGIN or MID, calculate the length for
1246                 * the next microframe to determine the correct
1247                 * SSPLIT token, either MID or END.
1248                 */
1249                do {
1250                    struct usb_iso_packet_descriptor *frame_desc;
1251
1252                    frame_desc = &_qtd->urb->iso_frame_desc[_qtd->isoc_frame_index];
1253                    _qtd->isoc_split_offset += 188;
1254
1255                    if ((frame_desc->length - _qtd->isoc_split_offset) <= 188) {
1256                        _qtd->isoc_split_pos = DWC_HCSPLIT_XACTPOS_END;
1257                    }
1258                    else {
1259                        _qtd->isoc_split_pos = DWC_HCSPLIT_XACTPOS_MID;
1260                    }
1261                    
1262                } while(0);
1263                break;
1264            }
1265        } else {
1266            halt_channel(_hcd, _hc, _qtd, DWC_OTG_HC_XFER_ACK, must_free);
1267        }
1268    } else {
1269        _qtd->error_count = 0;
1270
1271        if (_hc->qh->ping_state) {
1272            _hc->qh->ping_state = 0;
1273            /*
1274             * Halt the channel so the transfer can be re-started
1275             * from the appropriate point. This only happens in
1276             * Slave mode. In DMA mode, the ping_state is cleared
1277             * when the transfer is started because the core
1278             * automatically executes the PING, then the transfer.
1279             */
1280            halt_channel(_hcd, _hc, _qtd, DWC_OTG_HC_XFER_ACK, must_free);
1281        }
1282    }
1283
1284    /*
1285     * If the ACK occurred when _not_ in the PING state, let the channel
1286     * continue transferring data after clearing the error count.
1287     */
1288
1289    disable_hc_int(_hc_regs,ack);
1290
1291    return 1;
1292}
1293
1294/**
1295 * Handles a host channel NYET interrupt. This interrupt should only occur on
1296 * Bulk and Control OUT endpoints and for complete split transactions. If a
1297 * NYET occurs at the same time as a Transfer Complete interrupt, it is
1298 * handled in the xfercomp interrupt handler, not here. This handler may be
1299 * called in either DMA mode or Slave mode.
1300 */
1301static int32_t handle_hc_nyet_intr(dwc_otg_hcd_t *_hcd,
1302                   dwc_hc_t *_hc,
1303                   dwc_otg_hc_regs_t *_hc_regs,
1304                   dwc_otg_qtd_t *_qtd, int *must_free)
1305{
1306    DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
1307            "NYET Received--\n", _hc->hc_num);
1308
1309    /*
1310     * NYET on CSPLIT
1311     * re-do the CSPLIT immediately on non-periodic
1312     */
1313    if ((_hc->do_split) && (_hc->complete_split)) {
1314        if ((_hc->ep_type == DWC_OTG_EP_TYPE_INTR) ||
1315            (_hc->ep_type == DWC_OTG_EP_TYPE_ISOC)) {
1316            int frnum = dwc_otg_hcd_get_frame_number(dwc_otg_hcd_to_hcd(_hcd));
1317
1318            if (dwc_full_frame_num(frnum) !=
1319                dwc_full_frame_num(_hc->qh->sched_frame)) {
1320                /*
1321                 * No longer in the same full speed frame.
1322                 * Treat this as a transaction error.
1323                 */
1324#if 0
1325                /** @todo Fix system performance so this can
1326                 * be treated as an error. Right now complete
1327                 * splits cannot be scheduled precisely enough
1328                 * due to other system activity, so this error
1329                 * occurs regularly in Slave mode.
1330                 */
1331                _qtd->error_count++;
1332#endif
1333                _qtd->complete_split = 0;
1334                halt_channel(_hcd, _hc, _qtd, DWC_OTG_HC_XFER_XACT_ERR, must_free);
1335                /** @todo add support for isoc release */
1336                goto handle_nyet_done;
1337            }
1338        }
1339
1340        halt_channel(_hcd, _hc, _qtd, DWC_OTG_HC_XFER_NYET, must_free);
1341        goto handle_nyet_done;
1342    }
1343
1344    _hc->qh->ping_state = 1;
1345    _qtd->error_count = 0;
1346
1347    update_urb_state_xfer_intr(_hc, _hc_regs, _qtd->urb, _qtd,
1348                   DWC_OTG_HC_XFER_NYET);
1349    save_data_toggle(_hc, _hc_regs, _qtd);
1350
1351    /*
1352     * Halt the channel and re-start the transfer so the PING
1353     * protocol will start.
1354     */
1355    halt_channel(_hcd, _hc, _qtd, DWC_OTG_HC_XFER_NYET, must_free);
1356
1357handle_nyet_done:
1358    disable_hc_int(_hc_regs,nyet);
1359    clear_hc_int(_hc_regs, nyet);
1360    return 1;
1361}
1362
1363/**
1364 * Handles a host channel babble interrupt. This handler may be called in
1365 * either DMA mode or Slave mode.
1366 */
1367static int32_t handle_hc_babble_intr(dwc_otg_hcd_t *_hcd,
1368    dwc_hc_t * _hc, dwc_otg_hc_regs_t * _hc_regs, dwc_otg_qtd_t * _qtd, int *must_free)
1369{
1370    DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
1371            "Babble Error--\n", _hc->hc_num);
1372    if (_hc->ep_type != DWC_OTG_EP_TYPE_ISOC) {
1373        dwc_otg_hcd_complete_urb(_hcd, _qtd->urb, -EOVERFLOW);
1374        halt_channel(_hcd, _hc, _qtd, DWC_OTG_HC_XFER_BABBLE_ERR, must_free);
1375    } else {
1376        dwc_otg_halt_status_e halt_status;
1377        halt_status = update_isoc_urb_state(_hcd, _hc, _hc_regs, _qtd,
1378                            DWC_OTG_HC_XFER_BABBLE_ERR);
1379        halt_channel(_hcd, _hc, _qtd, halt_status, must_free);
1380    }
1381    disable_hc_int(_hc_regs,bblerr);
1382    return 1;
1383}
1384
1385/**
1386 * Handles a host channel AHB error interrupt. This handler is only called in
1387 * DMA mode.
1388 */
1389static int32_t handle_hc_ahberr_intr(dwc_otg_hcd_t *_hcd,
1390                     dwc_hc_t *_hc,
1391                     dwc_otg_hc_regs_t *_hc_regs,
1392                     dwc_otg_qtd_t *_qtd)
1393{
1394    hcchar_data_t hcchar;
1395    hcsplt_data_t hcsplt;
1396    hctsiz_data_t hctsiz;
1397    uint32_t hcdma;
1398    struct urb *urb = _qtd->urb;
1399
1400    DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
1401            "AHB Error--\n", _hc->hc_num);
1402
1403    hcchar.d32 = dwc_read_reg32(&_hc_regs->hcchar);
1404    hcsplt.d32 = dwc_read_reg32(&_hc_regs->hcsplt);
1405    hctsiz.d32 = dwc_read_reg32(&_hc_regs->hctsiz);
1406    hcdma = dwc_read_reg32(&_hc_regs->hcdma);
1407
1408    DWC_ERROR("AHB ERROR, Channel %d\n", _hc->hc_num);
1409    DWC_ERROR(" hcchar 0x%08x, hcsplt 0x%08x\n", hcchar.d32, hcsplt.d32);
1410    DWC_ERROR(" hctsiz 0x%08x, hcdma 0x%08x\n", hctsiz.d32, hcdma);
1411        DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD URB Enqueue\n");
1412    DWC_ERROR(" Device address: %d\n", usb_pipedevice(urb->pipe));
1413    DWC_ERROR(" Endpoint: %d, %s\n", usb_pipeendpoint(urb->pipe),
1414            (usb_pipein(urb->pipe) ? "IN" : "OUT"));
1415    DWC_ERROR(" Endpoint type: %s\n",
1416            ({char *pipetype;
1417            switch (usb_pipetype(urb->pipe)) {
1418            case PIPE_CONTROL: pipetype = "CONTROL"; break;
1419            case PIPE_BULK: pipetype = "BULK"; break;
1420            case PIPE_INTERRUPT: pipetype = "INTERRUPT"; break;
1421            case PIPE_ISOCHRONOUS: pipetype = "ISOCHRONOUS"; break;
1422            default: pipetype = "UNKNOWN"; break;
1423            }; pipetype;}));
1424    DWC_ERROR(" Speed: %s\n",
1425            ({char *speed;
1426            switch (urb->dev->speed) {
1427            case USB_SPEED_HIGH: speed = "HIGH"; break;
1428            case USB_SPEED_FULL: speed = "FULL"; break;
1429            case USB_SPEED_LOW: speed = "LOW"; break;
1430            default: speed = "UNKNOWN"; break;
1431            }; speed;}));
1432    DWC_ERROR(" Max packet size: %d\n",
1433            usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe)));
1434    DWC_ERROR(" Data buffer length: %d\n", urb->transfer_buffer_length);
1435    DWC_ERROR(" Transfer buffer: %p, Transfer DMA: %p\n",
1436          urb->transfer_buffer, (void *)(u32)urb->transfer_dma);
1437    DWC_ERROR(" Setup buffer: %p, Setup DMA: %p\n",
1438          urb->setup_packet, (void *)(u32)urb->setup_dma);
1439    DWC_ERROR(" Interval: %d\n", urb->interval);
1440
1441    dwc_otg_hcd_complete_urb(_hcd, urb, -EIO);
1442
1443    /*
1444     * Force a channel halt. Don't call halt_channel because that won't
1445     * write to the HCCHARn register in DMA mode to force the halt.
1446     */
1447    dwc_otg_hc_halt(_hcd->core_if, _hc, DWC_OTG_HC_XFER_AHB_ERR);
1448
1449    disable_hc_int(_hc_regs,ahberr);
1450    return 1;
1451}
1452
1453/**
1454 * Handles a host channel transaction error interrupt. This handler may be
1455 * called in either DMA mode or Slave mode.
1456 */
1457static int32_t handle_hc_xacterr_intr(dwc_otg_hcd_t *_hcd,
1458    dwc_hc_t * _hc, dwc_otg_hc_regs_t * _hc_regs, dwc_otg_qtd_t * _qtd, int *must_free)
1459{
1460    DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
1461            "Transaction Error--\n", _hc->hc_num);
1462
1463    switch (usb_pipetype(_qtd->urb->pipe)) {
1464    case PIPE_CONTROL:
1465    case PIPE_BULK:
1466        _qtd->error_count++;
1467        if (!_hc->qh->ping_state) {
1468            update_urb_state_xfer_intr(_hc, _hc_regs, _qtd->urb,
1469                           _qtd, DWC_OTG_HC_XFER_XACT_ERR);
1470            save_data_toggle(_hc, _hc_regs, _qtd);
1471            if (!_hc->ep_is_in && _qtd->urb->dev->speed == USB_SPEED_HIGH) {
1472                _hc->qh->ping_state = 1;
1473            }
1474        }
1475
1476        /*
1477         * Halt the channel so the transfer can be re-started from
1478         * the appropriate point or the PING protocol will start.
1479         */
1480        halt_channel(_hcd, _hc, _qtd, DWC_OTG_HC_XFER_XACT_ERR, must_free);
1481        break;
1482    case PIPE_INTERRUPT:
1483        _qtd->error_count++;
1484        if ((_hc->do_split) && (_hc->complete_split)) {
1485            _qtd->complete_split = 0;
1486        }
1487        halt_channel(_hcd, _hc, _qtd, DWC_OTG_HC_XFER_XACT_ERR, must_free);
1488        break;
1489    case PIPE_ISOCHRONOUS:
1490        {
1491            dwc_otg_halt_status_e halt_status;
1492            halt_status = update_isoc_urb_state(_hcd, _hc, _hc_regs, _qtd,
1493                                DWC_OTG_HC_XFER_XACT_ERR);
1494                                
1495            halt_channel(_hcd, _hc, _qtd, halt_status, must_free);
1496        }
1497        break;
1498    }
1499        
1500
1501    disable_hc_int(_hc_regs,xacterr);
1502
1503    return 1;
1504}
1505
1506/**
1507 * Handles a host channel frame overrun interrupt. This handler may be called
1508 * in either DMA mode or Slave mode.
1509 */
1510static int32_t handle_hc_frmovrun_intr(dwc_otg_hcd_t *_hcd,
1511    dwc_hc_t * _hc, dwc_otg_hc_regs_t * _hc_regs, dwc_otg_qtd_t * _qtd, int *must_free)
1512{
1513    DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
1514            "Frame Overrun--\n", _hc->hc_num);
1515
1516    switch (usb_pipetype(_qtd->urb->pipe)) {
1517    case PIPE_CONTROL:
1518    case PIPE_BULK:
1519        break;
1520    case PIPE_INTERRUPT:
1521        halt_channel(_hcd, _hc, _qtd, DWC_OTG_HC_XFER_FRAME_OVERRUN, must_free);
1522        break;
1523    case PIPE_ISOCHRONOUS:
1524        {
1525            dwc_otg_halt_status_e halt_status;
1526            halt_status = update_isoc_urb_state(_hcd, _hc, _hc_regs, _qtd,
1527                                DWC_OTG_HC_XFER_FRAME_OVERRUN);
1528                                
1529            halt_channel(_hcd, _hc, _qtd, halt_status, must_free);
1530        }
1531        break;
1532    }
1533
1534    disable_hc_int(_hc_regs,frmovrun);
1535
1536    return 1;
1537}
1538
1539/**
1540 * Handles a host channel data toggle error interrupt. This handler may be
1541 * called in either DMA mode or Slave mode.
1542 */
1543static int32_t handle_hc_datatglerr_intr(dwc_otg_hcd_t *_hcd,
1544    dwc_hc_t * _hc, dwc_otg_hc_regs_t * _hc_regs, dwc_otg_qtd_t * _qtd, int *must_free)
1545{
1546    DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
1547            "Data Toggle Error--\n", _hc->hc_num);
1548
1549    if (_hc->ep_is_in) {
1550        _qtd->error_count = 0;
1551    } else {
1552        DWC_ERROR("Data Toggle Error on OUT transfer,"
1553              "channel %d\n", _hc->hc_num);
1554    }
1555
1556    disable_hc_int(_hc_regs,datatglerr);
1557
1558    return 1;
1559}
1560
1561#ifdef DEBUG
1562/**
1563 * This function is for debug only. It checks that a valid halt status is set
1564 * and that HCCHARn.chdis is clear. If there's a problem, corrective action is
1565 * taken and a warning is issued.
1566 * @return 1 if halt status is ok, 0 otherwise.
1567 */
1568static inline int halt_status_ok(dwc_otg_hcd_t *_hcd,
1569    dwc_hc_t * _hc, dwc_otg_hc_regs_t * _hc_regs, dwc_otg_qtd_t * _qtd, int *must_free)
1570{
1571    hcchar_data_t hcchar;
1572    hctsiz_data_t hctsiz;
1573    hcint_data_t hcint;
1574    hcintmsk_data_t hcintmsk;
1575    hcsplt_data_t hcsplt;
1576
1577    if (_hc->halt_status == DWC_OTG_HC_XFER_NO_HALT_STATUS) {
1578        /*
1579         * This code is here only as a check. This condition should
1580         * never happen. Ignore the halt if it does occur.
1581         */
1582        hcchar.d32 = dwc_read_reg32(&_hc_regs->hcchar);
1583        hctsiz.d32 = dwc_read_reg32(&_hc_regs->hctsiz);
1584        hcint.d32 = dwc_read_reg32(&_hc_regs->hcint);
1585        hcintmsk.d32 = dwc_read_reg32(&_hc_regs->hcintmsk);
1586        hcsplt.d32 = dwc_read_reg32(&_hc_regs->hcsplt);
1587        DWC_WARN("%s: _hc->halt_status == DWC_OTG_HC_XFER_NO_HALT_STATUS, "
1588             "channel %d, hcchar 0x%08x, hctsiz 0x%08x, "
1589             "hcint 0x%08x, hcintmsk 0x%08x, "
1590             "hcsplt 0x%08x, qtd->complete_split %d\n",
1591             __func__, _hc->hc_num, hcchar.d32, hctsiz.d32,
1592             hcint.d32, hcintmsk.d32,
1593             hcsplt.d32, _qtd->complete_split);
1594
1595        DWC_WARN("%s: no halt status, channel %d, ignoring interrupt\n",
1596             __func__, _hc->hc_num);
1597        DWC_WARN("\n");
1598        clear_hc_int(_hc_regs,chhltd);
1599        return 0;
1600    }
1601
1602    /*
1603     * This code is here only as a check. hcchar.chdis should
1604     * never be set when the halt interrupt occurs. Halt the
1605     * channel again if it does occur.
1606     */
1607    hcchar.d32 = dwc_read_reg32(&_hc_regs->hcchar);
1608    if (hcchar.b.chdis) {
1609        DWC_WARN("%s: hcchar.chdis set unexpectedly, "
1610             "hcchar 0x%08x, trying to halt again\n",
1611             __func__, hcchar.d32);
1612        clear_hc_int(_hc_regs,chhltd);
1613        _hc->halt_pending = 0;
1614        halt_channel(_hcd, _hc, _qtd, _hc->halt_status, must_free);
1615        return 0;
1616    }
1617
1618    return 1;
1619}
1620#endif
1621
1622/**
1623 * Handles a host Channel Halted interrupt in DMA mode. This handler
1624 * determines the reason the channel halted and proceeds accordingly.
1625 */
1626static void handle_hc_chhltd_intr_dma(dwc_otg_hcd_t *_hcd,
1627    dwc_hc_t * _hc, dwc_otg_hc_regs_t * _hc_regs, dwc_otg_qtd_t * _qtd, int *must_free)
1628{
1629    hcint_data_t hcint;
1630    hcintmsk_data_t hcintmsk;
1631
1632    if (_hc->halt_status == DWC_OTG_HC_XFER_URB_DEQUEUE ||
1633        _hc->halt_status == DWC_OTG_HC_XFER_AHB_ERR) {
1634        /*
1635         * Just release the channel. A dequeue can happen on a
1636         * transfer timeout. In the case of an AHB Error, the channel
1637         * was forced to halt because there's no way to gracefully
1638         * recover.
1639         */
1640        release_channel(_hcd, _hc, _qtd, _hc->halt_status, must_free);
1641        return;
1642    }
1643
1644    /* Read the HCINTn register to determine the cause for the halt. */
1645    hcint.d32 = dwc_read_reg32(&_hc_regs->hcint);
1646    hcintmsk.d32 = dwc_read_reg32(&_hc_regs->hcintmsk);
1647
1648    if (hcint.b.xfercomp) {
1649        /** @todo This is here because of a possible hardware bug. Spec
1650         * says that on SPLIT-ISOC OUT transfers in DMA mode that a HALT
1651         * interrupt w/ACK bit set should occur, but I only see the
1652         * XFERCOMP bit, even with it masked out. This is a workaround
1653         * for that behavior. Should fix this when hardware is fixed.
1654         */
1655        if ((_hc->ep_type == DWC_OTG_EP_TYPE_ISOC) && (!_hc->ep_is_in)) {
1656            handle_hc_ack_intr(_hcd, _hc, _hc_regs, _qtd, must_free);
1657        }
1658        handle_hc_xfercomp_intr(_hcd, _hc, _hc_regs, _qtd, must_free);
1659    } else if (hcint.b.stall) {
1660        handle_hc_stall_intr(_hcd, _hc, _hc_regs, _qtd, must_free);
1661    } else if (hcint.b.xacterr) {
1662        /*
1663         * Must handle xacterr before nak or ack. Could get a xacterr
1664         * at the same time as either of these on a BULK/CONTROL OUT
1665         * that started with a PING. The xacterr takes precedence.
1666         */
1667        handle_hc_xacterr_intr(_hcd, _hc, _hc_regs, _qtd, must_free);
1668    } else if (hcint.b.nyet) {
1669        /*
1670         * Must handle nyet before nak or ack. Could get a nyet at the
1671         * same time as either of those on a BULK/CONTROL OUT that
1672         * started with a PING. The nyet takes precedence.
1673         */
1674        handle_hc_nyet_intr(_hcd, _hc, _hc_regs, _qtd, must_free);
1675    } else if (hcint.b.bblerr) {
1676        handle_hc_babble_intr(_hcd, _hc, _hc_regs, _qtd, must_free);
1677    } else if (hcint.b.frmovrun) {
1678        handle_hc_frmovrun_intr(_hcd, _hc, _hc_regs, _qtd, must_free);
1679    } else if (hcint.b.datatglerr) {
1680        handle_hc_datatglerr_intr(_hcd, _hc, _hc_regs, _qtd, must_free);
1681        _hc->qh->data_toggle = 0;
1682        halt_channel(_hcd, _hc, _qtd, _hc->halt_status, must_free);
1683    } else if (hcint.b.nak && !hcintmsk.b.nak) {
1684        /*
1685         * If nak is not masked, it's because a non-split IN transfer
1686         * is in an error state. In that case, the nak is handled by
1687         * the nak interrupt handler, not here. Handle nak here for
1688         * BULK/CONTROL OUT transfers, which halt on a NAK to allow
1689         * rewinding the buffer pointer.
1690         */
1691        handle_hc_nak_intr(_hcd, _hc, _hc_regs, _qtd, must_free);
1692    } else if (hcint.b.ack && !hcintmsk.b.ack) {
1693        /*
1694         * If ack is not masked, it's because a non-split IN transfer
1695         * is in an error state. In that case, the ack is handled by
1696         * the ack interrupt handler, not here. Handle ack here for
1697         * split transfers. Start splits halt on ACK.
1698         */
1699        handle_hc_ack_intr(_hcd, _hc, _hc_regs, _qtd, must_free);
1700    } else {
1701        if (_hc->ep_type == DWC_OTG_EP_TYPE_INTR ||
1702            _hc->ep_type == DWC_OTG_EP_TYPE_ISOC) {
1703            /*
1704             * A periodic transfer halted with no other channel
1705             * interrupts set. Assume it was halted by the core
1706             * because it could not be completed in its scheduled
1707             * (micro)frame.
1708             */
1709#ifdef DEBUG
1710            DWC_PRINT("%s: Halt channel %d (assume incomplete periodic transfer)\n",
1711                  __func__, _hc->hc_num);
1712#endif /* */
1713            halt_channel(_hcd, _hc, _qtd,
1714                     DWC_OTG_HC_XFER_PERIODIC_INCOMPLETE, must_free);
1715        } else {
1716#ifdef DEBUG
1717            DWC_ERROR("%s: Channel %d, DMA Mode -- ChHltd set, but reason "
1718                 "for halting is unknown, nyet %d, hcint 0x%08x, intsts 0x%08x\n",
1719                 __func__, _hc->hc_num, hcint.b.nyet, hcint.d32,
1720                 dwc_read_reg32(&_hcd->core_if->core_global_regs->gintsts));
1721#endif
1722            halt_channel(_hcd, _hc, _qtd, _hc->halt_status, must_free);
1723        }
1724    }
1725}
1726
1727/**
1728 * Handles a host channel Channel Halted interrupt.
1729 *
1730 * In slave mode, this handler is called only when the driver specifically
1731 * requests a halt. This occurs during handling other host channel interrupts
1732 * (e.g. nak, xacterr, stall, nyet, etc.).
1733 *
1734 * In DMA mode, this is the interrupt that occurs when the core has finished
1735 * processing a transfer on a channel. Other host channel interrupts (except
1736 * ahberr) are disabled in DMA mode.
1737 */
1738static int32_t handle_hc_chhltd_intr(dwc_otg_hcd_t *_hcd,
1739    dwc_hc_t * _hc, dwc_otg_hc_regs_t * _hc_regs, dwc_otg_qtd_t * _qtd, int *must_free)
1740{
1741    DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
1742            "Channel Halted--\n", _hc->hc_num);
1743
1744    if (_hcd->core_if->dma_enable) {
1745        handle_hc_chhltd_intr_dma(_hcd, _hc, _hc_regs, _qtd, must_free);
1746    } else {
1747#ifdef DEBUG
1748        if (!halt_status_ok(_hcd, _hc, _hc_regs, _qtd, must_free)) {
1749            return 1;
1750        }
1751#endif /* */
1752        release_channel(_hcd, _hc, _qtd, _hc->halt_status, must_free);
1753    }
1754
1755    return 1;
1756}
1757
1758/** Handles interrupt for a specific Host Channel */
1759int32_t dwc_otg_hcd_handle_hc_n_intr (dwc_otg_hcd_t *_dwc_otg_hcd, uint32_t _num)
1760{
1761    int must_free = 0;
1762    int retval = 0;
1763    hcint_data_t hcint;
1764    hcintmsk_data_t hcintmsk;
1765    dwc_hc_t *hc;
1766    dwc_otg_hc_regs_t *hc_regs;
1767    dwc_otg_qtd_t *qtd;
1768    
1769    DWC_DEBUGPL(DBG_HCDV, "--Host Channel Interrupt--, Channel %d\n", _num);
1770
1771    hc = _dwc_otg_hcd->hc_ptr_array[_num];
1772    hc_regs = _dwc_otg_hcd->core_if->host_if->hc_regs[_num];
1773    qtd = list_entry(hc->qh->qtd_list.next, dwc_otg_qtd_t, qtd_list_entry);
1774
1775    hcint.d32 = dwc_read_reg32(&hc_regs->hcint);
1776    hcintmsk.d32 = dwc_read_reg32(&hc_regs->hcintmsk);
1777    DWC_DEBUGPL(DBG_HCDV, " hcint 0x%08x, hcintmsk 0x%08x, hcint&hcintmsk 0x%08x\n",
1778            hcint.d32, hcintmsk.d32, (hcint.d32 & hcintmsk.d32));
1779    hcint.d32 = hcint.d32 & hcintmsk.d32;
1780
1781    if (!_dwc_otg_hcd->core_if->dma_enable) {
1782        if ((hcint.b.chhltd) && (hcint.d32 != 0x2)) {
1783            hcint.b.chhltd = 0;
1784        }
1785    }
1786
1787    if (hcint.b.xfercomp) {
1788        retval |= handle_hc_xfercomp_intr(_dwc_otg_hcd, hc, hc_regs, qtd, &must_free);
1789        /*
1790         * If NYET occurred at same time as Xfer Complete, the NYET is
1791         * handled by the Xfer Complete interrupt handler. Don't want
1792         * to call the NYET interrupt handler in this case.
1793         */
1794        hcint.b.nyet = 0;
1795    }
1796    if (hcint.b.chhltd) {
1797        retval |= handle_hc_chhltd_intr(_dwc_otg_hcd, hc, hc_regs, qtd, &must_free);
1798    }
1799    if (hcint.b.ahberr) {
1800        retval |= handle_hc_ahberr_intr(_dwc_otg_hcd, hc, hc_regs, qtd);
1801    }
1802    if (hcint.b.stall) {
1803        retval |= handle_hc_stall_intr(_dwc_otg_hcd, hc, hc_regs, qtd, &must_free);
1804    }
1805    if (hcint.b.nak) {
1806        retval |= handle_hc_nak_intr(_dwc_otg_hcd, hc, hc_regs, qtd, &must_free);
1807    }
1808    if (hcint.b.ack) {
1809        retval |= handle_hc_ack_intr(_dwc_otg_hcd, hc, hc_regs, qtd, &must_free);
1810    }
1811    if (hcint.b.nyet) {
1812        retval |= handle_hc_nyet_intr(_dwc_otg_hcd, hc, hc_regs, qtd, &must_free);
1813    }
1814    if (hcint.b.xacterr) {
1815        retval |= handle_hc_xacterr_intr(_dwc_otg_hcd, hc, hc_regs, qtd, &must_free);
1816    }
1817    if (hcint.b.bblerr) {
1818        retval |= handle_hc_babble_intr(_dwc_otg_hcd, hc, hc_regs, qtd, &must_free);
1819    }
1820    if (hcint.b.frmovrun) {
1821        retval |= handle_hc_frmovrun_intr(_dwc_otg_hcd, hc, hc_regs, qtd, &must_free);
1822    }
1823    if (hcint.b.datatglerr) {
1824        retval |= handle_hc_datatglerr_intr(_dwc_otg_hcd, hc, hc_regs, qtd, &must_free);
1825    }
1826
1827    /*
1828     * Logic to free the qtd here, at the end of the hc intr
1829     * processing, if the handling of this interrupt determined
1830     * that it needs to be freed.
1831     */
1832    if (must_free) {
1833        /* Free the qtd here now that we are done using it. */
1834        dwc_otg_hcd_qtd_free(qtd);
1835    }
1836    return retval;
1837}
1838
1839#endif /* DWC_DEVICE_ONLY */
1840

Archive Download this file



interactive