Root/target/linux/cns3xxx/files/drivers/usb/dwc/otg_hcd_intr.c

1/* ==========================================================================
2 * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_hcd_intr.c $
3 * $Revision: #70 $
4 * $Date: 2008/10/16 $
5 * $Change: 1117667 $
6 *
7 * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
8 * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
9 * otherwise expressly agreed to in writing between Synopsys and you.
10 *
11 * The Software IS NOT an item of Licensed Software or Licensed Product under
12 * any End User Software License Agreement or Agreement for Licensed Product
13 * with Synopsys or any supplement thereto. You are permitted to use and
14 * redistribute this Software in source and binary forms, with or without
15 * modification, provided that redistributions of source code must retain this
16 * notice. You may not view, use, disclose, copy or distribute this file or
17 * any information contained herein except pursuant to this license grant from
18 * Synopsys. If you do not agree with this notice, including the disclaimer
19 * below, then you are not authorized to use the Software.
20 *
21 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
25 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
31 * DAMAGE.
32 * ========================================================================== */
33#ifndef DWC_DEVICE_ONLY
34
35#include <linux/version.h>
36
37#include "otg_driver.h"
38#include "otg_hcd.h"
39#include "otg_regs.h"
40
41/** @file
42 * This file contains the implementation of the HCD Interrupt handlers.
43 */
44
45/** This function handles interrupts for the HCD. */
46int32_t dwc_otg_hcd_handle_intr(dwc_otg_hcd_t *dwc_otg_hcd)
47{
48    int retval = 0;
49
50    dwc_otg_core_if_t *core_if = dwc_otg_hcd->core_if;
51    gintsts_data_t gintsts;
52#ifdef DEBUG
53    dwc_otg_core_global_regs_t *global_regs = core_if->core_global_regs;
54#endif
55
56    /* Check if HOST Mode */
57    if (dwc_otg_is_host_mode(core_if)) {
58        gintsts.d32 = dwc_otg_read_core_intr(core_if);
59        if (!gintsts.d32) {
60            return 0;
61        }
62
63#ifdef DEBUG
64        /* Don't print debug message in the interrupt handler on SOF */
65# ifndef DEBUG_SOF
66        if (gintsts.d32 != DWC_SOF_INTR_MASK)
67# endif
68            DWC_DEBUGPL(DBG_HCD, "\n");
69#endif
70
71#ifdef DEBUG
72# ifndef DEBUG_SOF
73        if (gintsts.d32 != DWC_SOF_INTR_MASK)
74# endif
75            DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD Interrupt Detected gintsts&gintmsk=0x%08x\n", gintsts.d32);
76#endif
77        if (gintsts.b.usbreset) {
78            DWC_PRINT("Usb Reset In Host Mode\n");
79        }
80        if (gintsts.b.sofintr) {
81            retval |= dwc_otg_hcd_handle_sof_intr(dwc_otg_hcd);
82        }
83        if (gintsts.b.rxstsqlvl) {
84            retval |= dwc_otg_hcd_handle_rx_status_q_level_intr(dwc_otg_hcd);
85        }
86        if (gintsts.b.nptxfempty) {
87            retval |= dwc_otg_hcd_handle_np_tx_fifo_empty_intr(dwc_otg_hcd);
88        }
89        if (gintsts.b.i2cintr) {
90            /** @todo Implement i2cintr handler. */
91        }
92        if (gintsts.b.portintr) {
93            retval |= dwc_otg_hcd_handle_port_intr(dwc_otg_hcd);
94        }
95        if (gintsts.b.hcintr) {
96            retval |= dwc_otg_hcd_handle_hc_intr(dwc_otg_hcd);
97        }
98        if (gintsts.b.ptxfempty) {
99            retval |= dwc_otg_hcd_handle_perio_tx_fifo_empty_intr(dwc_otg_hcd);
100        }
101#ifdef DEBUG
102# ifndef DEBUG_SOF
103        if (gintsts.d32 != DWC_SOF_INTR_MASK)
104# endif
105        {
106            DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD Finished Servicing Interrupts\n");
107            DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD gintsts=0x%08x\n",
108                    dwc_read_reg32(&global_regs->gintsts));
109            DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD gintmsk=0x%08x\n",
110                    dwc_read_reg32(&global_regs->gintmsk));
111        }
112#endif
113
114#ifdef DEBUG
115# ifndef DEBUG_SOF
116    if (gintsts.d32 != DWC_SOF_INTR_MASK)
117# endif
118        DWC_DEBUGPL(DBG_HCD, "\n");
119#endif
120
121    }
122    S3C2410X_CLEAR_EINTPEND();
123
124    return retval;
125}
126
127#ifdef DWC_TRACK_MISSED_SOFS
128#warning Compiling code to track missed SOFs
129#define FRAME_NUM_ARRAY_SIZE 1000
130/**
131 * This function is for debug only.
132 */
133static inline void track_missed_sofs(uint16_t curr_frame_number)
134{
135    static uint16_t frame_num_array[FRAME_NUM_ARRAY_SIZE];
136    static uint16_t last_frame_num_array[FRAME_NUM_ARRAY_SIZE];
137    static int frame_num_idx = 0;
138    static uint16_t last_frame_num = DWC_HFNUM_MAX_FRNUM;
139    static int dumped_frame_num_array = 0;
140
141    if (frame_num_idx < FRAME_NUM_ARRAY_SIZE) {
142        if (((last_frame_num + 1) & DWC_HFNUM_MAX_FRNUM) != curr_frame_number) {
143            frame_num_array[frame_num_idx] = curr_frame_number;
144            last_frame_num_array[frame_num_idx++] = last_frame_num;
145        }
146    } else if (!dumped_frame_num_array) {
147        int i;
148        printk(KERN_EMERG USB_DWC "Frame Last Frame\n");
149        printk(KERN_EMERG USB_DWC "----- ----------\n");
150        for (i = 0; i < FRAME_NUM_ARRAY_SIZE; i++) {
151            printk(KERN_EMERG USB_DWC "0x%04x 0x%04x\n",
152                   frame_num_array[i], last_frame_num_array[i]);
153        }
154        dumped_frame_num_array = 1;
155    }
156    last_frame_num = curr_frame_number;
157}
158#endif
159
160/**
161 * Handles the start-of-frame interrupt in host mode. Non-periodic
162 * transactions may be queued to the DWC_otg controller for the current
163 * (micro)frame. Periodic transactions may be queued to the controller for the
164 * next (micro)frame.
165 */
166int32_t dwc_otg_hcd_handle_sof_intr(dwc_otg_hcd_t *hcd)
167{
168    hfnum_data_t hfnum;
169    struct list_head *qh_entry;
170    dwc_otg_qh_t *qh;
171    dwc_otg_transaction_type_e tr_type;
172    gintsts_data_t gintsts = {.d32 = 0};
173
174    hfnum.d32 = dwc_read_reg32(&hcd->core_if->host_if->host_global_regs->hfnum);
175
176#ifdef DEBUG_SOF
177    DWC_DEBUGPL(DBG_HCD, "--Start of Frame Interrupt--\n");
178#endif
179    hcd->frame_number = hfnum.b.frnum;
180
181#ifdef DEBUG
182    hcd->frrem_accum += hfnum.b.frrem;
183    hcd->frrem_samples++;
184#endif
185
186#ifdef DWC_TRACK_MISSED_SOFS
187    track_missed_sofs(hcd->frame_number);
188#endif
189
190    /* Determine whether any periodic QHs should be executed. */
191    qh_entry = hcd->periodic_sched_inactive.next;
192    while (qh_entry != &hcd->periodic_sched_inactive) {
193        qh = list_entry(qh_entry, dwc_otg_qh_t, qh_list_entry);
194        qh_entry = qh_entry->next;
195        if (dwc_frame_num_le(qh->sched_frame, hcd->frame_number)) {
196            /*
197             * Move QH to the ready list to be executed next
198             * (micro)frame.
199             */
200            list_move(&qh->qh_list_entry, &hcd->periodic_sched_ready);
201        }
202    }
203
204    tr_type = dwc_otg_hcd_select_transactions(hcd);
205    if (tr_type != DWC_OTG_TRANSACTION_NONE) {
206        dwc_otg_hcd_queue_transactions(hcd, tr_type);
207    }
208
209    /* Clear interrupt */
210    gintsts.b.sofintr = 1;
211    dwc_write_reg32(&hcd->core_if->core_global_regs->gintsts, gintsts.d32);
212
213    return 1;
214}
215
216/** Handles the Rx Status Queue Level Interrupt, which indicates that there is at
217 * least one packet in the Rx FIFO. The packets are moved from the FIFO to
218 * memory if the DWC_otg controller is operating in Slave mode. */
219int32_t dwc_otg_hcd_handle_rx_status_q_level_intr(dwc_otg_hcd_t *dwc_otg_hcd)
220{
221    host_grxsts_data_t grxsts;
222    dwc_hc_t *hc = NULL;
223
224    DWC_DEBUGPL(DBG_HCD, "--RxStsQ Level Interrupt--\n");
225
226    grxsts.d32 = dwc_read_reg32(&dwc_otg_hcd->core_if->core_global_regs->grxstsp);
227
228    hc = dwc_otg_hcd->hc_ptr_array[grxsts.b.chnum];
229
230    /* Packet Status */
231    DWC_DEBUGPL(DBG_HCDV, " Ch num = %d\n", grxsts.b.chnum);
232    DWC_DEBUGPL(DBG_HCDV, " Count = %d\n", grxsts.b.bcnt);
233    DWC_DEBUGPL(DBG_HCDV, " DPID = %d, hc.dpid = %d\n", grxsts.b.dpid, hc->data_pid_start);
234    DWC_DEBUGPL(DBG_HCDV, " PStatus = %d\n", grxsts.b.pktsts);
235
236    switch (grxsts.b.pktsts) {
237    case DWC_GRXSTS_PKTSTS_IN:
238        /* Read the data into the host buffer. */
239        if (grxsts.b.bcnt > 0) {
240            dwc_otg_read_packet(dwc_otg_hcd->core_if,
241                        hc->xfer_buff,
242                        grxsts.b.bcnt);
243
244            /* Update the HC fields for the next packet received. */
245            hc->xfer_count += grxsts.b.bcnt;
246            hc->xfer_buff += grxsts.b.bcnt;
247        }
248
249    case DWC_GRXSTS_PKTSTS_IN_XFER_COMP:
250    case DWC_GRXSTS_PKTSTS_DATA_TOGGLE_ERR:
251    case DWC_GRXSTS_PKTSTS_CH_HALTED:
252        /* Handled in interrupt, just ignore data */
253        break;
254    default:
255        DWC_ERROR("RX_STS_Q Interrupt: Unknown status %d\n", grxsts.b.pktsts);
256        break;
257    }
258
259    return 1;
260}
261
262/** This interrupt occurs when the non-periodic Tx FIFO is half-empty. More
263 * data packets may be written to the FIFO for OUT transfers. More requests
264 * may be written to the non-periodic request queue for IN transfers. This
265 * interrupt is enabled only in Slave mode. */
266int32_t dwc_otg_hcd_handle_np_tx_fifo_empty_intr(dwc_otg_hcd_t *dwc_otg_hcd)
267{
268    DWC_DEBUGPL(DBG_HCD, "--Non-Periodic TxFIFO Empty Interrupt--\n");
269    dwc_otg_hcd_queue_transactions(dwc_otg_hcd,
270                       DWC_OTG_TRANSACTION_NON_PERIODIC);
271    return 1;
272}
273
274/** This interrupt occurs when the periodic Tx FIFO is half-empty. More data
275 * packets may be written to the FIFO for OUT transfers. More requests may be
276 * written to the periodic request queue for IN transfers. This interrupt is
277 * enabled only in Slave mode. */
278int32_t dwc_otg_hcd_handle_perio_tx_fifo_empty_intr(dwc_otg_hcd_t *dwc_otg_hcd)
279{
280    DWC_DEBUGPL(DBG_HCD, "--Periodic TxFIFO Empty Interrupt--\n");
281    dwc_otg_hcd_queue_transactions(dwc_otg_hcd,
282                       DWC_OTG_TRANSACTION_PERIODIC);
283    return 1;
284}
285
286/** There are multiple conditions that can cause a port interrupt. This function
287 * determines which interrupt conditions have occurred and handles them
288 * appropriately. */
289int32_t dwc_otg_hcd_handle_port_intr(dwc_otg_hcd_t *dwc_otg_hcd)
290{
291    int retval = 0;
292    hprt0_data_t hprt0;
293    hprt0_data_t hprt0_modify;
294
295    hprt0.d32 = dwc_read_reg32(dwc_otg_hcd->core_if->host_if->hprt0);
296    hprt0_modify.d32 = dwc_read_reg32(dwc_otg_hcd->core_if->host_if->hprt0);
297
298    /* Clear appropriate bits in HPRT0 to clear the interrupt bit in
299     * GINTSTS */
300
301    hprt0_modify.b.prtena = 0;
302    hprt0_modify.b.prtconndet = 0;
303    hprt0_modify.b.prtenchng = 0;
304    hprt0_modify.b.prtovrcurrchng = 0;
305
306    /* Port Connect Detected
307     * Set flag and clear if detected */
308    if (hprt0.b.prtconndet) {
309        DWC_DEBUGPL(DBG_HCD, "--Port Interrupt HPRT0=0x%08x "
310                "Port Connect Detected--\n", hprt0.d32);
311        dwc_otg_hcd->flags.b.port_connect_status_change = 1;
312        dwc_otg_hcd->flags.b.port_connect_status = 1;
313        hprt0_modify.b.prtconndet = 1;
314
315        /* B-Device has connected, Delete the connection timer. */
316        del_timer( &dwc_otg_hcd->conn_timer );
317
318        /* The Hub driver asserts a reset when it sees port connect
319         * status change flag */
320        retval |= 1;
321    }
322
323    /* Port Enable Changed
324     * Clear if detected - Set internal flag if disabled */
325    if (hprt0.b.prtenchng) {
326        DWC_DEBUGPL(DBG_HCD, " --Port Interrupt HPRT0=0x%08x "
327                "Port Enable Changed--\n", hprt0.d32);
328        hprt0_modify.b.prtenchng = 1;
329        if (hprt0.b.prtena == 1) {
330            int do_reset = 0;
331            dwc_otg_core_params_t *params = dwc_otg_hcd->core_if->core_params;
332            dwc_otg_core_global_regs_t *global_regs = dwc_otg_hcd->core_if->core_global_regs;
333            dwc_otg_host_if_t *host_if = dwc_otg_hcd->core_if->host_if;
334
335            /* Check if we need to adjust the PHY clock speed for
336             * low power and adjust it */
337            if (params->host_support_fs_ls_low_power) {
338                gusbcfg_data_t usbcfg;
339
340                usbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg);
341
342                if (hprt0.b.prtspd == DWC_HPRT0_PRTSPD_LOW_SPEED ||
343                    hprt0.b.prtspd == DWC_HPRT0_PRTSPD_FULL_SPEED) {
344                    /*
345                     * Low power
346                     */
347                    hcfg_data_t hcfg;
348                    if (usbcfg.b.phylpwrclksel == 0) {
349                        /* Set PHY low power clock select for FS/LS devices */
350                        usbcfg.b.phylpwrclksel = 1;
351                        dwc_write_reg32(&global_regs->gusbcfg, usbcfg.d32);
352                        do_reset = 1;
353                    }
354
355                    hcfg.d32 = dwc_read_reg32(&host_if->host_global_regs->hcfg);
356
357                    if (hprt0.b.prtspd == DWC_HPRT0_PRTSPD_LOW_SPEED &&
358                        params->host_ls_low_power_phy_clk ==
359                         DWC_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ) {
360                        /* 6 MHZ */
361                        DWC_DEBUGPL(DBG_CIL, "FS_PHY programming HCFG to 6 MHz (Low Power)\n");
362                        if (hcfg.b.fslspclksel != DWC_HCFG_6_MHZ) {
363                            hcfg.b.fslspclksel = DWC_HCFG_6_MHZ;
364                            dwc_write_reg32(&host_if->host_global_regs->hcfg,
365                                    hcfg.d32);
366                            do_reset = 1;
367                        }
368                    } else {
369                        /* 48 MHZ */
370                        DWC_DEBUGPL(DBG_CIL, "FS_PHY programming HCFG to 48 MHz ()\n");
371                        if (hcfg.b.fslspclksel != DWC_HCFG_48_MHZ) {
372                            hcfg.b.fslspclksel = DWC_HCFG_48_MHZ;
373                            dwc_write_reg32(&host_if->host_global_regs->hcfg,
374                                    hcfg.d32);
375                            do_reset = 1;
376                        }
377                    }
378                } else {
379                    /*
380                     * Not low power
381                     */
382                    if (usbcfg.b.phylpwrclksel == 1) {
383                        usbcfg.b.phylpwrclksel = 0;
384                        dwc_write_reg32(&global_regs->gusbcfg, usbcfg.d32);
385                        do_reset = 1;
386                    }
387                }
388
389                if (do_reset) {
390                    tasklet_schedule(dwc_otg_hcd->reset_tasklet);
391                }
392            }
393
394            if (!do_reset) {
395                /* Port has been enabled set the reset change flag */
396                dwc_otg_hcd->flags.b.port_reset_change = 1;
397            }
398        } else {
399            dwc_otg_hcd->flags.b.port_enable_change = 1;
400        }
401        retval |= 1;
402    }
403
404    /** Overcurrent Change Interrupt */
405    if (hprt0.b.prtovrcurrchng) {
406        DWC_DEBUGPL(DBG_HCD, " --Port Interrupt HPRT0=0x%08x "
407                "Port Overcurrent Changed--\n", hprt0.d32);
408        dwc_otg_hcd->flags.b.port_over_current_change = 1;
409        hprt0_modify.b.prtovrcurrchng = 1;
410        retval |= 1;
411    }
412
413    /* Clear Port Interrupts */
414    dwc_write_reg32(dwc_otg_hcd->core_if->host_if->hprt0, hprt0_modify.d32);
415
416    return retval;
417}
418
419/** This interrupt indicates that one or more host channels has a pending
420 * interrupt. There are multiple conditions that can cause each host channel
421 * interrupt. This function determines which conditions have occurred for each
422 * host channel interrupt and handles them appropriately. */
423int32_t dwc_otg_hcd_handle_hc_intr(dwc_otg_hcd_t *dwc_otg_hcd)
424{
425    int i;
426    int retval = 0;
427    haint_data_t haint;
428
429    /* Clear appropriate bits in HCINTn to clear the interrupt bit in
430     * GINTSTS */
431
432    haint.d32 = dwc_otg_read_host_all_channels_intr(dwc_otg_hcd->core_if);
433
434    for (i = 0; i < dwc_otg_hcd->core_if->core_params->host_channels; i++) {
435        if (haint.b2.chint & (1 << i)) {
436            retval |= dwc_otg_hcd_handle_hc_n_intr(dwc_otg_hcd, i);
437        }
438    }
439
440    return retval;
441}
442
443/* Macro used to clear one channel interrupt */
444#define clear_hc_int(_hc_regs_, _intr_) \
445do { \
446    hcint_data_t hcint_clear = {.d32 = 0}; \
447    hcint_clear.b._intr_ = 1; \
448    dwc_write_reg32(&(_hc_regs_)->hcint, hcint_clear.d32); \
449} while (0)
450
451/*
452 * Macro used to disable one channel interrupt. Channel interrupts are
453 * disabled when the channel is halted or released by the interrupt handler.
454 * There is no need to handle further interrupts of that type until the
455 * channel is re-assigned. In fact, subsequent handling may cause crashes
456 * because the channel structures are cleaned up when the channel is released.
457 */
458#define disable_hc_int(_hc_regs_, _intr_) \
459do { \
460    hcintmsk_data_t hcintmsk = {.d32 = 0}; \
461    hcintmsk.b._intr_ = 1; \
462    dwc_modify_reg32(&(_hc_regs_)->hcintmsk, hcintmsk.d32, 0); \
463} while (0)
464
465/**
466 * Gets the actual length of a transfer after the transfer halts. _halt_status
467 * holds the reason for the halt.
468 *
469 * For IN transfers where halt_status is DWC_OTG_HC_XFER_COMPLETE,
470 * *short_read is set to 1 upon return if less than the requested
471 * number of bytes were transferred. Otherwise, *short_read is set to 0 upon
472 * return. short_read may also be NULL on entry, in which case it remains
473 * unchanged.
474 */
475static uint32_t get_actual_xfer_length(dwc_hc_t *hc,
476                       dwc_otg_hc_regs_t *hc_regs,
477                       dwc_otg_qtd_t *qtd,
478                       dwc_otg_halt_status_e halt_status,
479                       int *short_read)
480{
481    hctsiz_data_t hctsiz;
482    uint32_t length;
483
484    if (short_read != NULL) {
485        *short_read = 0;
486    }
487    hctsiz.d32 = dwc_read_reg32(&hc_regs->hctsiz);
488
489    if (halt_status == DWC_OTG_HC_XFER_COMPLETE) {
490        if (hc->ep_is_in) {
491            length = hc->xfer_len - hctsiz.b.xfersize;
492            if (short_read != NULL) {
493                *short_read = (hctsiz.b.xfersize != 0);
494            }
495        } else if (hc->qh->do_split) {
496            length = qtd->ssplit_out_xfer_count;
497        } else {
498            length = hc->xfer_len;
499        }
500    } else {
501        /*
502         * Must use the hctsiz.pktcnt field to determine how much data
503         * has been transferred. This field reflects the number of
504         * packets that have been transferred via the USB. This is
505         * always an integral number of packets if the transfer was
506         * halted before its normal completion. (Can't use the
507         * hctsiz.xfersize field because that reflects the number of
508         * bytes transferred via the AHB, not the USB).
509         */
510        length = (hc->start_pkt_count - hctsiz.b.pktcnt) * hc->max_packet;
511    }
512
513    return length;
514}
515
516/**
517 * Updates the state of the URB after a Transfer Complete interrupt on the
518 * host channel. Updates the actual_length field of the URB based on the
519 * number of bytes transferred via the host channel. Sets the URB status
520 * if the data transfer is finished.
521 *
522 * @return 1 if the data transfer specified by the URB is completely finished,
523 * 0 otherwise.
524 */
525static int update_urb_state_xfer_comp(dwc_hc_t *hc,
526                      dwc_otg_hc_regs_t *hc_regs,
527                      struct urb *urb,
528                      dwc_otg_qtd_t *qtd)
529{
530    int xfer_done = 0;
531    int short_read = 0;
532
533    urb->actual_length += get_actual_xfer_length(hc, hc_regs, qtd,
534                             DWC_OTG_HC_XFER_COMPLETE,
535                             &short_read);
536
537    if (short_read || urb->actual_length == urb->transfer_buffer_length) {
538        xfer_done = 1;
539        if (short_read && (urb->transfer_flags & URB_SHORT_NOT_OK)) {
540            urb->status = -EREMOTEIO;
541        } else {
542            urb->status = 0;
543        }
544    }
545
546#ifdef DEBUG
547    {
548        hctsiz_data_t hctsiz;
549        hctsiz.d32 = dwc_read_reg32(&hc_regs->hctsiz);
550        DWC_DEBUGPL(DBG_HCDV, "DWC_otg: %s: %s, channel %d\n",
551                __func__, (hc->ep_is_in ? "IN" : "OUT"), hc->hc_num);
552        DWC_DEBUGPL(DBG_HCDV, " hc->xfer_len %d\n", hc->xfer_len);
553        DWC_DEBUGPL(DBG_HCDV, " hctsiz.xfersize %d\n", hctsiz.b.xfersize);
554        DWC_DEBUGPL(DBG_HCDV, " urb->transfer_buffer_length %d\n",
555                urb->transfer_buffer_length);
556        DWC_DEBUGPL(DBG_HCDV, " urb->actual_length %d\n", urb->actual_length);
557        DWC_DEBUGPL(DBG_HCDV, " short_read %d, xfer_done %d\n",
558                short_read, xfer_done);
559    }
560#endif
561
562    return xfer_done;
563}
564
565/*
566 * Save the starting data toggle for the next transfer. The data toggle is
567 * saved in the QH for non-control transfers and it's saved in the QTD for
568 * control transfers.
569 */
570static void save_data_toggle(dwc_hc_t *hc,
571                 dwc_otg_hc_regs_t *hc_regs,
572                 dwc_otg_qtd_t *qtd)
573{
574    hctsiz_data_t hctsiz;
575    hctsiz.d32 = dwc_read_reg32(&hc_regs->hctsiz);
576
577    if (hc->ep_type != DWC_OTG_EP_TYPE_CONTROL) {
578        dwc_otg_qh_t *qh = hc->qh;
579        if (hctsiz.b.pid == DWC_HCTSIZ_DATA0) {
580            qh->data_toggle = DWC_OTG_HC_PID_DATA0;
581        } else {
582            qh->data_toggle = DWC_OTG_HC_PID_DATA1;
583        }
584    } else {
585        if (hctsiz.b.pid == DWC_HCTSIZ_DATA0) {
586            qtd->data_toggle = DWC_OTG_HC_PID_DATA0;
587        } else {
588            qtd->data_toggle = DWC_OTG_HC_PID_DATA1;
589        }
590    }
591}
592
593/**
594 * Frees the first QTD in the QH's list if free_qtd is 1. For non-periodic
595 * QHs, removes the QH from the active non-periodic schedule. If any QTDs are
596 * still linked to the QH, the QH is added to the end of the inactive
597 * non-periodic schedule. For periodic QHs, removes the QH from the periodic
598 * schedule if no more QTDs are linked to the QH.
599 */
600static void deactivate_qh(dwc_otg_hcd_t *hcd,
601              dwc_otg_qh_t *qh,
602              int free_qtd)
603{
604    int continue_split = 0;
605    dwc_otg_qtd_t *qtd;
606
607    DWC_DEBUGPL(DBG_HCDV, " %s(%p,%p,%d)\n", __func__, hcd, qh, free_qtd);
608
609    spin_lock(&hcd->lock);
610    qtd = list_entry(qh->qtd_list.next, dwc_otg_qtd_t, qtd_list_entry);
611
612    if (qtd->complete_split) {
613        continue_split = 1;
614    } else if (qtd->isoc_split_pos == DWC_HCSPLIT_XACTPOS_MID ||
615           qtd->isoc_split_pos == DWC_HCSPLIT_XACTPOS_END) {
616        continue_split = 1;
617    }
618
619    if (free_qtd) {
620        dwc_otg_hcd_qtd_remove_and_free(hcd, qtd);
621        continue_split = 0;
622    }
623
624    qh->channel = NULL;
625    qh->qtd_in_process = NULL;
626    spin_unlock(&hcd->lock);
627    dwc_otg_hcd_qh_deactivate(hcd, qh, continue_split);
628}
629
630/**
631 * Updates the state of an Isochronous URB when the transfer is stopped for
632 * any reason. The fields of the current entry in the frame descriptor array
633 * are set based on the transfer state and the input _halt_status. Completes
634 * the Isochronous URB if all the URB frames have been completed.
635 *
636 * @return DWC_OTG_HC_XFER_COMPLETE if there are more frames remaining to be
637 * transferred in the URB. Otherwise return DWC_OTG_HC_XFER_URB_COMPLETE.
638 */
639static dwc_otg_halt_status_e
640update_isoc_urb_state(dwc_otg_hcd_t *hcd,
641              dwc_hc_t *hc,
642              dwc_otg_hc_regs_t *hc_regs,
643              dwc_otg_qtd_t *qtd,
644              dwc_otg_halt_status_e halt_status)
645{
646    struct urb *urb = qtd->urb;
647    dwc_otg_halt_status_e ret_val = halt_status;
648    struct usb_iso_packet_descriptor *frame_desc;
649
650    frame_desc = &urb->iso_frame_desc[qtd->isoc_frame_index];
651    switch (halt_status) {
652    case DWC_OTG_HC_XFER_COMPLETE:
653        frame_desc->status = 0;
654        frame_desc->actual_length =
655            get_actual_xfer_length(hc, hc_regs, qtd,
656                           halt_status, NULL);
657        break;
658    case DWC_OTG_HC_XFER_FRAME_OVERRUN:
659        urb->error_count++;
660        if (hc->ep_is_in) {
661            frame_desc->status = -ENOSR;
662        } else {
663            frame_desc->status = -ECOMM;
664        }
665        frame_desc->actual_length = 0;
666        break;
667    case DWC_OTG_HC_XFER_BABBLE_ERR:
668        urb->error_count++;
669        frame_desc->status = -EOVERFLOW;
670        /* Don't need to update actual_length in this case. */
671        break;
672    case DWC_OTG_HC_XFER_XACT_ERR:
673        urb->error_count++;
674        frame_desc->status = -EPROTO;
675        frame_desc->actual_length =
676            get_actual_xfer_length(hc, hc_regs, qtd,
677                           halt_status, NULL);
678    default:
679        DWC_ERROR("%s: Unhandled _halt_status (%d)\n", __func__,
680              halt_status);
681        BUG();
682        break;
683    }
684
685    if (++qtd->isoc_frame_index == urb->number_of_packets) {
686        /*
687         * urb->status is not used for isoc transfers.
688         * The individual frame_desc statuses are used instead.
689         */
690        dwc_otg_hcd_complete_urb(hcd, urb, 0);
691        ret_val = DWC_OTG_HC_XFER_URB_COMPLETE;
692    } else {
693        ret_val = DWC_OTG_HC_XFER_COMPLETE;
694    }
695
696    return ret_val;
697}
698
699/**
700 * Releases a host channel for use by other transfers. Attempts to select and
701 * queue more transactions since at least one host channel is available.
702 *
703 * @param hcd The HCD state structure.
704 * @param hc The host channel to release.
705 * @param qtd The QTD associated with the host channel. This QTD may be freed
706 * if the transfer is complete or an error has occurred.
707 * @param halt_status Reason the channel is being released. This status
708 * determines the actions taken by this function.
709 */
710static void release_channel(dwc_otg_hcd_t *hcd,
711                dwc_hc_t *hc,
712                dwc_otg_qtd_t *qtd,
713                dwc_otg_halt_status_e halt_status)
714{
715    dwc_otg_transaction_type_e tr_type;
716    int free_qtd;
717
718    DWC_DEBUGPL(DBG_HCDV, " %s: channel %d, halt_status %d\n",
719            __func__, hc->hc_num, halt_status);
720
721    switch (halt_status) {
722    case DWC_OTG_HC_XFER_URB_COMPLETE:
723        free_qtd = 1;
724        break;
725    case DWC_OTG_HC_XFER_AHB_ERR:
726    case DWC_OTG_HC_XFER_STALL:
727    case DWC_OTG_HC_XFER_BABBLE_ERR:
728        free_qtd = 1;
729        break;
730    case DWC_OTG_HC_XFER_XACT_ERR:
731        if (qtd->error_count >= 3) {
732            DWC_DEBUGPL(DBG_HCDV, " Complete URB with transaction error\n");
733            free_qtd = 1;
734            qtd->urb->status = -EPROTO;
735            dwc_otg_hcd_complete_urb(hcd, qtd->urb, -EPROTO);
736        } else {
737            free_qtd = 0;
738        }
739        break;
740    case DWC_OTG_HC_XFER_URB_DEQUEUE:
741        /*
742         * The QTD has already been removed and the QH has been
743         * deactivated. Don't want to do anything except release the
744         * host channel and try to queue more transfers.
745         */
746        goto cleanup;
747    case DWC_OTG_HC_XFER_NO_HALT_STATUS:
748        DWC_ERROR("%s: No halt_status, channel %d\n", __func__, hc->hc_num);
749        free_qtd = 0;
750        break;
751    default:
752        free_qtd = 0;
753        break;
754    }
755
756    deactivate_qh(hcd, hc->qh, free_qtd);
757
758 cleanup:
759    /*
760     * Release the host channel for use by other transfers. The cleanup
761     * function clears the channel interrupt enables and conditions, so
762     * there's no need to clear the Channel Halted interrupt separately.
763     */
764    dwc_otg_hc_cleanup(hcd->core_if, hc);
765    list_add_tail(&hc->hc_list_entry, &hcd->free_hc_list);
766
767    switch (hc->ep_type) {
768    case DWC_OTG_EP_TYPE_CONTROL:
769    case DWC_OTG_EP_TYPE_BULK:
770        hcd->non_periodic_channels--;
771        break;
772
773    default:
774        /*
775         * Don't release reservations for periodic channels here.
776         * That's done when a periodic transfer is descheduled (i.e.
777         * when the QH is removed from the periodic schedule).
778         */
779        break;
780    }
781
782    /* Try to queue more transfers now that there's a free channel. */
783    tr_type = dwc_otg_hcd_select_transactions(hcd);
784    if (tr_type != DWC_OTG_TRANSACTION_NONE) {
785        dwc_otg_hcd_queue_transactions(hcd, tr_type);
786    }
787}
788
789/**
790 * Halts a host channel. If the channel cannot be halted immediately because
791 * the request queue is full, this function ensures that the FIFO empty
792 * interrupt for the appropriate queue is enabled so that the halt request can
793 * be queued when there is space in the request queue.
794 *
795 * This function may also be called in DMA mode. In that case, the channel is
796 * simply released since the core always halts the channel automatically in
797 * DMA mode.
798 */
799static void halt_channel(dwc_otg_hcd_t *hcd,
800             dwc_hc_t *hc,
801             dwc_otg_qtd_t *qtd,
802             dwc_otg_halt_status_e halt_status)
803{
804    if (hcd->core_if->dma_enable) {
805        release_channel(hcd, hc, qtd, halt_status);
806        return;
807    }
808
809    /* Slave mode processing... */
810    dwc_otg_hc_halt(hcd->core_if, hc, halt_status);
811
812    if (hc->halt_on_queue) {
813        gintmsk_data_t gintmsk = {.d32 = 0};
814        dwc_otg_core_global_regs_t *global_regs;
815        global_regs = hcd->core_if->core_global_regs;
816
817        if (hc->ep_type == DWC_OTG_EP_TYPE_CONTROL ||
818            hc->ep_type == DWC_OTG_EP_TYPE_BULK) {
819            /*
820             * Make sure the Non-periodic Tx FIFO empty interrupt
821             * is enabled so that the non-periodic schedule will
822             * be processed.
823             */
824            gintmsk.b.nptxfempty = 1;
825            dwc_modify_reg32(&global_regs->gintmsk, 0, gintmsk.d32);
826        } else {
827            /*
828             * Move the QH from the periodic queued schedule to
829             * the periodic assigned schedule. This allows the
830             * halt to be queued when the periodic schedule is
831             * processed.
832             */
833            list_move(&hc->qh->qh_list_entry,
834                  &hcd->periodic_sched_assigned);
835
836            /*
837             * Make sure the Periodic Tx FIFO Empty interrupt is
838             * enabled so that the periodic schedule will be
839             * processed.
840             */
841            gintmsk.b.ptxfempty = 1;
842            dwc_modify_reg32(&global_regs->gintmsk, 0, gintmsk.d32);
843        }
844    }
845}
846
847/**
848 * Performs common cleanup for non-periodic transfers after a Transfer
849 * Complete interrupt. This function should be called after any endpoint type
850 * specific handling is finished to release the host channel.
851 */
852static void complete_non_periodic_xfer(dwc_otg_hcd_t *hcd,
853                       dwc_hc_t *hc,
854                       dwc_otg_hc_regs_t *hc_regs,
855                       dwc_otg_qtd_t *qtd,
856                       dwc_otg_halt_status_e halt_status)
857{
858    hcint_data_t hcint;
859
860    qtd->error_count = 0;
861
862    hcint.d32 = dwc_read_reg32(&hc_regs->hcint);
863    if (hcint.b.nyet) {
864        /*
865         * Got a NYET on the last transaction of the transfer. This
866         * means that the endpoint should be in the PING state at the
867         * beginning of the next transfer.
868         */
869        hc->qh->ping_state = 1;
870        clear_hc_int(hc_regs, nyet);
871    }
872
873    /*
874     * Always halt and release the host channel to make it available for
875     * more transfers. There may still be more phases for a control
876     * transfer or more data packets for a bulk transfer at this point,
877     * but the host channel is still halted. A channel will be reassigned
878     * to the transfer when the non-periodic schedule is processed after
879     * the channel is released. This allows transactions to be queued
880     * properly via dwc_otg_hcd_queue_transactions, which also enables the
881     * Tx FIFO Empty interrupt if necessary.
882     */
883    if (hc->ep_is_in) {
884        /*
885         * IN transfers in Slave mode require an explicit disable to
886         * halt the channel. (In DMA mode, this call simply releases
887         * the channel.)
888         */
889        halt_channel(hcd, hc, qtd, halt_status);
890    } else {
891        /*
892         * The channel is automatically disabled by the core for OUT
893         * transfers in Slave mode.
894         */
895        release_channel(hcd, hc, qtd, halt_status);
896    }
897}
898
899/**
900 * Performs common cleanup for periodic transfers after a Transfer Complete
901 * interrupt. This function should be called after any endpoint type specific
902 * handling is finished to release the host channel.
903 */
904static void complete_periodic_xfer(dwc_otg_hcd_t *hcd,
905                   dwc_hc_t *hc,
906                   dwc_otg_hc_regs_t *hc_regs,
907                   dwc_otg_qtd_t *qtd,
908                   dwc_otg_halt_status_e halt_status)
909{
910    hctsiz_data_t hctsiz;
911    qtd->error_count = 0;
912
913    hctsiz.d32 = dwc_read_reg32(&hc_regs->hctsiz);
914    if (!hc->ep_is_in || hctsiz.b.pktcnt == 0) {
915        /* Core halts channel in these cases. */
916        release_channel(hcd, hc, qtd, halt_status);
917    } else {
918        /* Flush any outstanding requests from the Tx queue. */
919        halt_channel(hcd, hc, qtd, halt_status);
920    }
921}
922
923/**
924 * Handles a host channel Transfer Complete interrupt. This handler may be
925 * called in either DMA mode or Slave mode.
926 */
927static int32_t handle_hc_xfercomp_intr(dwc_otg_hcd_t *hcd,
928                       dwc_hc_t *hc,
929                       dwc_otg_hc_regs_t *hc_regs,
930                       dwc_otg_qtd_t *qtd)
931{
932    int urb_xfer_done;
933    dwc_otg_halt_status_e halt_status = DWC_OTG_HC_XFER_COMPLETE;
934    struct urb *urb = qtd->urb;
935    int pipe_type = usb_pipetype(urb->pipe);
936
937    DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
938            "Transfer Complete--\n", hc->hc_num);
939
940    /*
941     * Handle xfer complete on CSPLIT.
942     */
943    if (hc->qh->do_split) {
944        qtd->complete_split = 0;
945    }
946
947    /* Update the QTD and URB states. */
948    switch (pipe_type) {
949    case PIPE_CONTROL:
950        switch (qtd->control_phase) {
951        case DWC_OTG_CONTROL_SETUP:
952            if (urb->transfer_buffer_length > 0) {
953                qtd->control_phase = DWC_OTG_CONTROL_DATA;
954            } else {
955                qtd->control_phase = DWC_OTG_CONTROL_STATUS;
956            }
957            DWC_DEBUGPL(DBG_HCDV, " Control setup transaction done\n");
958            halt_status = DWC_OTG_HC_XFER_COMPLETE;
959            break;
960        case DWC_OTG_CONTROL_DATA: {
961            urb_xfer_done = update_urb_state_xfer_comp(hc, hc_regs, urb, qtd);
962            if (urb_xfer_done) {
963                qtd->control_phase = DWC_OTG_CONTROL_STATUS;
964                DWC_DEBUGPL(DBG_HCDV, " Control data transfer done\n");
965            } else {
966                save_data_toggle(hc, hc_regs, qtd);
967            }
968            halt_status = DWC_OTG_HC_XFER_COMPLETE;
969            break;
970        }
971        case DWC_OTG_CONTROL_STATUS:
972            DWC_DEBUGPL(DBG_HCDV, " Control transfer complete\n");
973            if (urb->status == -EINPROGRESS) {
974                urb->status = 0;
975            }
976            dwc_otg_hcd_complete_urb(hcd, urb, urb->status);
977            halt_status = DWC_OTG_HC_XFER_URB_COMPLETE;
978            break;
979        }
980
981        complete_non_periodic_xfer(hcd, hc, hc_regs, qtd, halt_status);
982        break;
983    case PIPE_BULK:
984        DWC_DEBUGPL(DBG_HCDV, " Bulk transfer complete\n");
985        urb_xfer_done = update_urb_state_xfer_comp(hc, hc_regs, urb, qtd);
986        if (urb_xfer_done) {
987            dwc_otg_hcd_complete_urb(hcd, urb, urb->status);
988            halt_status = DWC_OTG_HC_XFER_URB_COMPLETE;
989        } else {
990            halt_status = DWC_OTG_HC_XFER_COMPLETE;
991        }
992
993        save_data_toggle(hc, hc_regs, qtd);
994        complete_non_periodic_xfer(hcd, hc, hc_regs, qtd, halt_status);
995        break;
996    case PIPE_INTERRUPT:
997        DWC_DEBUGPL(DBG_HCDV, " Interrupt transfer complete\n");
998        update_urb_state_xfer_comp(hc, hc_regs, urb, qtd);
999
1000        /*
1001         * Interrupt URB is done on the first transfer complete
1002         * interrupt.
1003         */
1004        dwc_otg_hcd_complete_urb(hcd, urb, urb->status);
1005        save_data_toggle(hc, hc_regs, qtd);
1006        complete_periodic_xfer(hcd, hc, hc_regs, qtd,
1007                       DWC_OTG_HC_XFER_URB_COMPLETE);
1008        break;
1009    case PIPE_ISOCHRONOUS:
1010        DWC_DEBUGPL(DBG_HCDV, " Isochronous transfer complete\n");
1011        if (qtd->isoc_split_pos == DWC_HCSPLIT_XACTPOS_ALL) {
1012            halt_status = update_isoc_urb_state(hcd, hc, hc_regs, qtd,
1013                                DWC_OTG_HC_XFER_COMPLETE);
1014        }
1015        complete_periodic_xfer(hcd, hc, hc_regs, qtd, halt_status);
1016        break;
1017    }
1018
1019    disable_hc_int(hc_regs, xfercompl);
1020
1021    return 1;
1022}
1023
1024/**
1025 * Handles a host channel STALL interrupt. This handler may be called in
1026 * either DMA mode or Slave mode.
1027 */
1028static int32_t handle_hc_stall_intr(dwc_otg_hcd_t *hcd,
1029                    dwc_hc_t *hc,
1030                    dwc_otg_hc_regs_t *hc_regs,
1031                    dwc_otg_qtd_t *qtd)
1032{
1033    struct urb *urb = qtd->urb;
1034    int pipe_type = usb_pipetype(urb->pipe);
1035
1036    DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
1037            "STALL Received--\n", hc->hc_num);
1038
1039    if (pipe_type == PIPE_CONTROL) {
1040        dwc_otg_hcd_complete_urb(hcd, urb, -EPIPE);
1041    }
1042
1043    if (pipe_type == PIPE_BULK || pipe_type == PIPE_INTERRUPT) {
1044        dwc_otg_hcd_complete_urb(hcd, urb, -EPIPE);
1045        /*
1046         * USB protocol requires resetting the data toggle for bulk
1047         * and interrupt endpoints when a CLEAR_FEATURE(ENDPOINT_HALT)
1048         * setup command is issued to the endpoint. Anticipate the
1049         * CLEAR_FEATURE command since a STALL has occurred and reset
1050         * the data toggle now.
1051         */
1052        hc->qh->data_toggle = 0;
1053    }
1054
1055    halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_STALL);
1056
1057    disable_hc_int(hc_regs, stall);
1058
1059    return 1;
1060}
1061
1062/*
1063 * Updates the state of the URB when a transfer has been stopped due to an
1064 * abnormal condition before the transfer completes. Modifies the
1065 * actual_length field of the URB to reflect the number of bytes that have
1066 * actually been transferred via the host channel.
1067 */
1068static void update_urb_state_xfer_intr(dwc_hc_t *hc,
1069                       dwc_otg_hc_regs_t *hc_regs,
1070                       struct urb *urb,
1071                       dwc_otg_qtd_t *qtd,
1072                       dwc_otg_halt_status_e halt_status)
1073{
1074    uint32_t bytes_transferred = get_actual_xfer_length(hc, hc_regs, qtd,
1075                                halt_status, NULL);
1076    urb->actual_length += bytes_transferred;
1077
1078#ifdef DEBUG
1079    {
1080        hctsiz_data_t hctsiz;
1081        hctsiz.d32 = dwc_read_reg32(&hc_regs->hctsiz);
1082        DWC_DEBUGPL(DBG_HCDV, "DWC_otg: %s: %s, channel %d\n",
1083                __func__, (hc->ep_is_in ? "IN" : "OUT"), hc->hc_num);
1084        DWC_DEBUGPL(DBG_HCDV, " hc->start_pkt_count %d\n", hc->start_pkt_count);
1085        DWC_DEBUGPL(DBG_HCDV, " hctsiz.pktcnt %d\n", hctsiz.b.pktcnt);
1086        DWC_DEBUGPL(DBG_HCDV, " hc->max_packet %d\n", hc->max_packet);
1087        DWC_DEBUGPL(DBG_HCDV, " bytes_transferred %d\n", bytes_transferred);
1088        DWC_DEBUGPL(DBG_HCDV, " urb->actual_length %d\n", urb->actual_length);
1089        DWC_DEBUGPL(DBG_HCDV, " urb->transfer_buffer_length %d\n",
1090                urb->transfer_buffer_length);
1091    }
1092#endif
1093}
1094
1095/**
1096 * Handles a host channel NAK interrupt. This handler may be called in either
1097 * DMA mode or Slave mode.
1098 */
1099static int32_t handle_hc_nak_intr(dwc_otg_hcd_t *hcd,
1100                  dwc_hc_t *hc,
1101                  dwc_otg_hc_regs_t *hc_regs,
1102                  dwc_otg_qtd_t *qtd)
1103{
1104    DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
1105            "NAK Received--\n", hc->hc_num);
1106
1107    /*
1108     * Handle NAK for IN/OUT SSPLIT/CSPLIT transfers, bulk, control, and
1109     * interrupt. Re-start the SSPLIT transfer.
1110     */
1111    if (hc->do_split) {
1112        if (hc->complete_split) {
1113            qtd->error_count = 0;
1114        }
1115        qtd->complete_split = 0;
1116        halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_NAK);
1117        goto handle_nak_done;
1118    }
1119
1120    switch (usb_pipetype(qtd->urb->pipe)) {
1121    case PIPE_CONTROL:
1122    case PIPE_BULK:
1123        if (hcd->core_if->dma_enable && hc->ep_is_in) {
1124            /*
1125             * NAK interrupts are enabled on bulk/control IN
1126             * transfers in DMA mode for the sole purpose of
1127             * resetting the error count after a transaction error
1128             * occurs. The core will continue transferring data.
1129             */
1130            qtd->error_count = 0;
1131            goto handle_nak_done;
1132        }
1133
1134        /*
1135         * NAK interrupts normally occur during OUT transfers in DMA
1136         * or Slave mode. For IN transfers, more requests will be
1137         * queued as request queue space is available.
1138         */
1139        qtd->error_count = 0;
1140
1141        if (!hc->qh->ping_state) {
1142            update_urb_state_xfer_intr(hc, hc_regs, qtd->urb,
1143                           qtd, DWC_OTG_HC_XFER_NAK);
1144            save_data_toggle(hc, hc_regs, qtd);
1145            if (qtd->urb->dev->speed == USB_SPEED_HIGH) {
1146                hc->qh->ping_state = 1;
1147            }
1148        }
1149
1150        /*
1151         * Halt the channel so the transfer can be re-started from
1152         * the appropriate point or the PING protocol will
1153         * start/continue.
1154         */
1155        halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_NAK);
1156        break;
1157    case PIPE_INTERRUPT:
1158        qtd->error_count = 0;
1159        halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_NAK);
1160        break;
1161    case PIPE_ISOCHRONOUS:
1162        /* Should never get called for isochronous transfers. */
1163        BUG();
1164        break;
1165    }
1166
1167 handle_nak_done:
1168    disable_hc_int(hc_regs, nak);
1169
1170    return 1;
1171}
1172
1173/**
1174 * Handles a host channel ACK interrupt. This interrupt is enabled when
1175 * performing the PING protocol in Slave mode, when errors occur during
1176 * either Slave mode or DMA mode, and during Start Split transactions.
1177 */
1178static int32_t handle_hc_ack_intr(dwc_otg_hcd_t *hcd,
1179                  dwc_hc_t *hc,
1180                  dwc_otg_hc_regs_t *hc_regs,
1181                  dwc_otg_qtd_t *qtd)
1182{
1183    DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
1184            "ACK Received--\n", hc->hc_num);
1185
1186    if (hc->do_split) {
1187        /*
1188         * Handle ACK on SSPLIT.
1189         * ACK should not occur in CSPLIT.
1190         */
1191        if (!hc->ep_is_in && hc->data_pid_start != DWC_OTG_HC_PID_SETUP) {
1192            qtd->ssplit_out_xfer_count = hc->xfer_len;
1193        }
1194        if (!(hc->ep_type == DWC_OTG_EP_TYPE_ISOC && !hc->ep_is_in)) {
1195            /* Don't need complete for isochronous out transfers. */
1196            qtd->complete_split = 1;
1197        }
1198
1199        /* ISOC OUT */
1200        if (hc->ep_type == DWC_OTG_EP_TYPE_ISOC && !hc->ep_is_in) {
1201            switch (hc->xact_pos) {
1202            case DWC_HCSPLIT_XACTPOS_ALL:
1203                break;
1204            case DWC_HCSPLIT_XACTPOS_END:
1205                qtd->isoc_split_pos = DWC_HCSPLIT_XACTPOS_ALL;
1206                qtd->isoc_split_offset = 0;
1207                break;
1208            case DWC_HCSPLIT_XACTPOS_BEGIN:
1209            case DWC_HCSPLIT_XACTPOS_MID:
1210                /*
1211                 * For BEGIN or MID, calculate the length for
1212                 * the next microframe to determine the correct
1213                 * SSPLIT token, either MID or END.
1214                 */
1215                {
1216                    struct usb_iso_packet_descriptor *frame_desc;
1217
1218                    frame_desc = &qtd->urb->iso_frame_desc[qtd->isoc_frame_index];
1219                    qtd->isoc_split_offset += 188;
1220
1221                    if ((frame_desc->length - qtd->isoc_split_offset) <= 188) {
1222                        qtd->isoc_split_pos = DWC_HCSPLIT_XACTPOS_END;
1223                    } else {
1224                        qtd->isoc_split_pos = DWC_HCSPLIT_XACTPOS_MID;
1225                    }
1226
1227                }
1228                break;
1229            }
1230        } else {
1231            halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_ACK);
1232        }
1233    } else {
1234        qtd->error_count = 0;
1235
1236        if (hc->qh->ping_state) {
1237            hc->qh->ping_state = 0;
1238            /*
1239             * Halt the channel so the transfer can be re-started
1240             * from the appropriate point. This only happens in
1241             * Slave mode. In DMA mode, the ping_state is cleared
1242             * when the transfer is started because the core
1243             * automatically executes the PING, then the transfer.
1244             */
1245            halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_ACK);
1246        }
1247    }
1248
1249    /*
1250     * If the ACK occurred when _not_ in the PING state, let the channel
1251     * continue transferring data after clearing the error count.
1252     */
1253
1254    disable_hc_int(hc_regs, ack);
1255
1256    return 1;
1257}
1258
1259/**
1260 * Handles a host channel NYET interrupt. This interrupt should only occur on
1261 * Bulk and Control OUT endpoints and for complete split transactions. If a
1262 * NYET occurs at the same time as a Transfer Complete interrupt, it is
1263 * handled in the xfercomp interrupt handler, not here. This handler may be
1264 * called in either DMA mode or Slave mode.
1265 */
1266static int32_t handle_hc_nyet_intr(dwc_otg_hcd_t *hcd,
1267                   dwc_hc_t *hc,
1268                   dwc_otg_hc_regs_t *hc_regs,
1269                   dwc_otg_qtd_t *qtd)
1270{
1271    DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
1272            "NYET Received--\n", hc->hc_num);
1273
1274    /*
1275     * NYET on CSPLIT
1276     * re-do the CSPLIT immediately on non-periodic
1277     */
1278    if (hc->do_split && hc->complete_split) {
1279        if (hc->ep_type == DWC_OTG_EP_TYPE_INTR ||
1280            hc->ep_type == DWC_OTG_EP_TYPE_ISOC) {
1281            int frnum = dwc_otg_hcd_get_frame_number(dwc_otg_hcd_to_hcd(hcd));
1282
1283            if (dwc_full_frame_num(frnum) !=
1284                dwc_full_frame_num(hc->qh->sched_frame)) {
1285                /*
1286                 * No longer in the same full speed frame.
1287                 * Treat this as a transaction error.
1288                 */
1289#if 0
1290                /** @todo Fix system performance so this can
1291                 * be treated as an error. Right now complete
1292                 * splits cannot be scheduled precisely enough
1293                 * due to other system activity, so this error
1294                 * occurs regularly in Slave mode.
1295                 */
1296                qtd->error_count++;
1297#endif
1298                qtd->complete_split = 0;
1299                halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_XACT_ERR);
1300                /** @todo add support for isoc release */
1301                goto handle_nyet_done;
1302            }
1303        }
1304
1305        halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_NYET);
1306        goto handle_nyet_done;
1307    }
1308
1309    hc->qh->ping_state = 1;
1310    qtd->error_count = 0;
1311
1312    update_urb_state_xfer_intr(hc, hc_regs, qtd->urb, qtd,
1313                   DWC_OTG_HC_XFER_NYET);
1314    save_data_toggle(hc, hc_regs, qtd);
1315
1316    /*
1317     * Halt the channel and re-start the transfer so the PING
1318     * protocol will start.
1319     */
1320    halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_NYET);
1321
1322handle_nyet_done:
1323    disable_hc_int(hc_regs, nyet);
1324    return 1;
1325}
1326
1327/**
1328 * Handles a host channel babble interrupt. This handler may be called in
1329 * either DMA mode or Slave mode.
1330 */
1331static int32_t handle_hc_babble_intr(dwc_otg_hcd_t *hcd,
1332                     dwc_hc_t *hc,
1333                     dwc_otg_hc_regs_t *hc_regs,
1334                     dwc_otg_qtd_t *qtd)
1335{
1336    DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
1337            "Babble Error--\n", hc->hc_num);
1338    if (hc->ep_type != DWC_OTG_EP_TYPE_ISOC) {
1339        dwc_otg_hcd_complete_urb(hcd, qtd->urb, -EOVERFLOW);
1340        halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_BABBLE_ERR);
1341    } else {
1342        dwc_otg_halt_status_e halt_status;
1343        halt_status = update_isoc_urb_state(hcd, hc, hc_regs, qtd,
1344                            DWC_OTG_HC_XFER_BABBLE_ERR);
1345        halt_channel(hcd, hc, qtd, halt_status);
1346    }
1347    disable_hc_int(hc_regs, bblerr);
1348    return 1;
1349}
1350
1351/**
1352 * Handles a host channel AHB error interrupt. This handler is only called in
1353 * DMA mode.
1354 */
1355static int32_t handle_hc_ahberr_intr(dwc_otg_hcd_t *hcd,
1356                     dwc_hc_t *hc,
1357                     dwc_otg_hc_regs_t *hc_regs,
1358                     dwc_otg_qtd_t *qtd)
1359{
1360    hcchar_data_t hcchar;
1361    hcsplt_data_t hcsplt;
1362    hctsiz_data_t hctsiz;
1363    uint32_t hcdma;
1364    struct urb *urb = qtd->urb;
1365
1366    DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
1367            "AHB Error--\n", hc->hc_num);
1368
1369    hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
1370    hcsplt.d32 = dwc_read_reg32(&hc_regs->hcsplt);
1371    hctsiz.d32 = dwc_read_reg32(&hc_regs->hctsiz);
1372    hcdma = dwc_read_reg32(&hc_regs->hcdma);
1373
1374    DWC_ERROR("AHB ERROR, Channel %d\n", hc->hc_num);
1375    DWC_ERROR(" hcchar 0x%08x, hcsplt 0x%08x\n", hcchar.d32, hcsplt.d32);
1376    DWC_ERROR(" hctsiz 0x%08x, hcdma 0x%08x\n", hctsiz.d32, hcdma);
1377          DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD URB Enqueue\n");
1378    DWC_ERROR(" Device address: %d\n", usb_pipedevice(urb->pipe));
1379    DWC_ERROR(" Endpoint: %d, %s\n", usb_pipeendpoint(urb->pipe),
1380          (usb_pipein(urb->pipe) ? "IN" : "OUT"));
1381    DWC_ERROR(" Endpoint type: %s\n",
1382          ({char *pipetype;
1383            switch (usb_pipetype(urb->pipe)) {
1384            case PIPE_CONTROL: pipetype = "CONTROL"; break;
1385            case PIPE_BULK: pipetype = "BULK"; break;
1386            case PIPE_INTERRUPT: pipetype = "INTERRUPT"; break;
1387            case PIPE_ISOCHRONOUS: pipetype = "ISOCHRONOUS"; break;
1388            default: pipetype = "UNKNOWN"; break;
1389           }; pipetype;}));
1390    DWC_ERROR(" Speed: %s\n",
1391          ({char *speed;
1392            switch (urb->dev->speed) {
1393            case USB_SPEED_HIGH: speed = "HIGH"; break;
1394            case USB_SPEED_FULL: speed = "FULL"; break;
1395            case USB_SPEED_LOW: speed = "LOW"; break;
1396            default: speed = "UNKNOWN"; break;
1397           }; speed;}));
1398    DWC_ERROR(" Max packet size: %d\n",
1399          usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe)));
1400    DWC_ERROR(" Data buffer length: %d\n", urb->transfer_buffer_length);
1401    DWC_ERROR(" Transfer buffer: %p, Transfer DMA: %p\n",
1402          urb->transfer_buffer, (void *)urb->transfer_dma);
1403    DWC_ERROR(" Setup buffer: %p, Setup DMA: %p\n",
1404          urb->setup_packet, (void *)urb->setup_dma);
1405    DWC_ERROR(" Interval: %d\n", urb->interval);
1406
1407    dwc_otg_hcd_complete_urb(hcd, urb, -EIO);
1408
1409    /*
1410     * Force a channel halt. Don't call halt_channel because that won't
1411     * write to the HCCHARn register in DMA mode to force the halt.
1412     */
1413    dwc_otg_hc_halt(hcd->core_if, hc, DWC_OTG_HC_XFER_AHB_ERR);
1414
1415    disable_hc_int(hc_regs, ahberr);
1416    return 1;
1417}
1418
1419/**
1420 * Handles a host channel transaction error interrupt. This handler may be
1421 * called in either DMA mode or Slave mode.
1422 */
1423static int32_t handle_hc_xacterr_intr(dwc_otg_hcd_t *hcd,
1424                      dwc_hc_t *hc,
1425                      dwc_otg_hc_regs_t *hc_regs,
1426                      dwc_otg_qtd_t *qtd)
1427{
1428    DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
1429            "Transaction Error--\n", hc->hc_num);
1430
1431    switch (usb_pipetype(qtd->urb->pipe)) {
1432    case PIPE_CONTROL:
1433    case PIPE_BULK:
1434        qtd->error_count++;
1435        if (!hc->qh->ping_state) {
1436            update_urb_state_xfer_intr(hc, hc_regs, qtd->urb,
1437                           qtd, DWC_OTG_HC_XFER_XACT_ERR);
1438            save_data_toggle(hc, hc_regs, qtd);
1439            if (!hc->ep_is_in && qtd->urb->dev->speed == USB_SPEED_HIGH) {
1440                hc->qh->ping_state = 1;
1441            }
1442        }
1443
1444        /*
1445         * Halt the channel so the transfer can be re-started from
1446         * the appropriate point or the PING protocol will start.
1447         */
1448        halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_XACT_ERR);
1449        break;
1450    case PIPE_INTERRUPT:
1451        qtd->error_count++;
1452        if (hc->do_split && hc->complete_split) {
1453            qtd->complete_split = 0;
1454        }
1455        halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_XACT_ERR);
1456        break;
1457    case PIPE_ISOCHRONOUS:
1458        {
1459            dwc_otg_halt_status_e halt_status;
1460            halt_status = update_isoc_urb_state(hcd, hc, hc_regs, qtd,
1461                                DWC_OTG_HC_XFER_XACT_ERR);
1462
1463            halt_channel(hcd, hc, qtd, halt_status);
1464        }
1465        break;
1466    }
1467
1468    disable_hc_int(hc_regs, xacterr);
1469
1470    return 1;
1471}
1472
1473/**
1474 * Handles a host channel frame overrun interrupt. This handler may be called
1475 * in either DMA mode or Slave mode.
1476 */
1477static int32_t handle_hc_frmovrun_intr(dwc_otg_hcd_t *hcd,
1478                       dwc_hc_t *hc,
1479                       dwc_otg_hc_regs_t *hc_regs,
1480                       dwc_otg_qtd_t *qtd)
1481{
1482    DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
1483            "Frame Overrun--\n", hc->hc_num);
1484
1485    switch (usb_pipetype(qtd->urb->pipe)) {
1486    case PIPE_CONTROL:
1487    case PIPE_BULK:
1488        break;
1489    case PIPE_INTERRUPT:
1490        halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_FRAME_OVERRUN);
1491        break;
1492    case PIPE_ISOCHRONOUS:
1493        {
1494            dwc_otg_halt_status_e halt_status;
1495            halt_status = update_isoc_urb_state(hcd, hc, hc_regs, qtd,
1496                                DWC_OTG_HC_XFER_FRAME_OVERRUN);
1497
1498            halt_channel(hcd, hc, qtd, halt_status);
1499        }
1500        break;
1501    }
1502
1503    disable_hc_int(hc_regs, frmovrun);
1504
1505    return 1;
1506}
1507
1508/**
1509 * Handles a host channel data toggle error interrupt. This handler may be
1510 * called in either DMA mode or Slave mode.
1511 */
1512static int32_t handle_hc_datatglerr_intr(dwc_otg_hcd_t *hcd,
1513                     dwc_hc_t *hc,
1514                     dwc_otg_hc_regs_t *hc_regs,
1515                     dwc_otg_qtd_t *qtd)
1516{
1517    DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
1518            "Data Toggle Error--\n", hc->hc_num);
1519
1520    if (hc->ep_is_in) {
1521        qtd->error_count = 0;
1522    } else {
1523        DWC_ERROR("Data Toggle Error on OUT transfer,"
1524              "channel %d\n", hc->hc_num);
1525    }
1526
1527    disable_hc_int(hc_regs, datatglerr);
1528
1529    return 1;
1530}
1531
1532#ifdef DEBUG
1533/**
1534 * This function is for debug only. It checks that a valid halt status is set
1535 * and that HCCHARn.chdis is clear. If there's a problem, corrective action is
1536 * taken and a warning is issued.
1537 * @return 1 if halt status is ok, 0 otherwise.
1538 */
1539static inline int halt_status_ok(dwc_otg_hcd_t *hcd,
1540                 dwc_hc_t *hc,
1541                 dwc_otg_hc_regs_t *hc_regs,
1542                 dwc_otg_qtd_t *qtd)
1543{
1544    hcchar_data_t hcchar;
1545    hctsiz_data_t hctsiz;
1546    hcint_data_t hcint;
1547    hcintmsk_data_t hcintmsk;
1548    hcsplt_data_t hcsplt;
1549
1550    if (hc->halt_status == DWC_OTG_HC_XFER_NO_HALT_STATUS) {
1551        /*
1552         * This code is here only as a check. This condition should
1553         * never happen. Ignore the halt if it does occur.
1554         */
1555        hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
1556        hctsiz.d32 = dwc_read_reg32(&hc_regs->hctsiz);
1557        hcint.d32 = dwc_read_reg32(&hc_regs->hcint);
1558        hcintmsk.d32 = dwc_read_reg32(&hc_regs->hcintmsk);
1559        hcsplt.d32 = dwc_read_reg32(&hc_regs->hcsplt);
1560        DWC_WARN("%s: hc->halt_status == DWC_OTG_HC_XFER_NO_HALT_STATUS, "
1561             "channel %d, hcchar 0x%08x, hctsiz 0x%08x, "
1562             "hcint 0x%08x, hcintmsk 0x%08x, "
1563             "hcsplt 0x%08x, qtd->complete_split %d\n",
1564             __func__, hc->hc_num, hcchar.d32, hctsiz.d32,
1565             hcint.d32, hcintmsk.d32,
1566             hcsplt.d32, qtd->complete_split);
1567
1568        DWC_WARN("%s: no halt status, channel %d, ignoring interrupt\n",
1569             __func__, hc->hc_num);
1570        DWC_WARN("\n");
1571        clear_hc_int(hc_regs, chhltd);
1572        return 0;
1573    }
1574
1575    /*
1576     * This code is here only as a check. hcchar.chdis should
1577     * never be set when the halt interrupt occurs. Halt the
1578     * channel again if it does occur.
1579     */
1580    hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
1581    if (hcchar.b.chdis) {
1582        DWC_WARN("%s: hcchar.chdis set unexpectedly, "
1583             "hcchar 0x%08x, trying to halt again\n",
1584             __func__, hcchar.d32);
1585        clear_hc_int(hc_regs, chhltd);
1586        hc->halt_pending = 0;
1587        halt_channel(hcd, hc, qtd, hc->halt_status);
1588        return 0;
1589    }
1590
1591    return 1;
1592}
1593#endif
1594
1595/**
1596 * Handles a host Channel Halted interrupt in DMA mode. This handler
1597 * determines the reason the channel halted and proceeds accordingly.
1598 */
1599static void handle_hc_chhltd_intr_dma(dwc_otg_hcd_t *hcd,
1600                      dwc_hc_t *hc,
1601                      dwc_otg_hc_regs_t *hc_regs,
1602                      dwc_otg_qtd_t *qtd)
1603{
1604    hcint_data_t hcint;
1605    hcintmsk_data_t hcintmsk;
1606    int out_nak_enh = 0;
1607
1608    /* For core with OUT NAK enhancement, the flow for high-
1609     * speed CONTROL/BULK OUT is handled a little differently.
1610     */
1611    if (hcd->core_if->snpsid >= 0x4F54271A) {
1612        if (hc->speed == DWC_OTG_EP_SPEED_HIGH && !hc->ep_is_in &&
1613            (hc->ep_type == DWC_OTG_EP_TYPE_CONTROL ||
1614             hc->ep_type == DWC_OTG_EP_TYPE_BULK)) {
1615            DWC_DEBUGPL(DBG_HCD, "OUT NAK enhancement enabled\n");
1616            out_nak_enh = 1;
1617        } else {
1618            DWC_DEBUGPL(DBG_HCD, "OUT NAK enhancement disabled, not HS Ctrl/Bulk OUT EP\n");
1619        }
1620    } else {
1621        DWC_DEBUGPL(DBG_HCD, "OUT NAK enhancement disabled, no core support\n");
1622    }
1623
1624    if (hc->halt_status == DWC_OTG_HC_XFER_URB_DEQUEUE ||
1625        hc->halt_status == DWC_OTG_HC_XFER_AHB_ERR) {
1626        /*
1627         * Just release the channel. A dequeue can happen on a
1628         * transfer timeout. In the case of an AHB Error, the channel
1629         * was forced to halt because there's no way to gracefully
1630         * recover.
1631         */
1632        release_channel(hcd, hc, qtd, hc->halt_status);
1633        return;
1634    }
1635
1636    /* Read the HCINTn register to determine the cause for the halt. */
1637    hcint.d32 = dwc_read_reg32(&hc_regs->hcint);
1638    hcintmsk.d32 = dwc_read_reg32(&hc_regs->hcintmsk);
1639
1640    if (hcint.b.xfercomp) {
1641        /** @todo This is here because of a possible hardware bug. Spec
1642         * says that on SPLIT-ISOC OUT transfers in DMA mode that a HALT
1643         * interrupt w/ACK bit set should occur, but I only see the
1644         * XFERCOMP bit, even with it masked out. This is a workaround
1645         * for that behavior. Should fix this when hardware is fixed.
1646         */
1647        if (hc->ep_type == DWC_OTG_EP_TYPE_ISOC && !hc->ep_is_in) {
1648            handle_hc_ack_intr(hcd, hc, hc_regs, qtd);
1649        }
1650        handle_hc_xfercomp_intr(hcd, hc, hc_regs, qtd);
1651    } else if (hcint.b.stall) {
1652        handle_hc_stall_intr(hcd, hc, hc_regs, qtd);
1653    } else if (hcint.b.xacterr) {
1654        if (out_nak_enh) {
1655            if (hcint.b.nyet || hcint.b.nak || hcint.b.ack) {
1656                printk(KERN_DEBUG "XactErr with NYET/NAK/ACK\n");
1657                qtd->error_count = 0;
1658            } else {
1659                printk(KERN_DEBUG "XactErr without NYET/NAK/ACK\n");
1660            }
1661        }
1662
1663        /*
1664         * Must handle xacterr before nak or ack. Could get a xacterr
1665         * at the same time as either of these on a BULK/CONTROL OUT
1666         * that started with a PING. The xacterr takes precedence.
1667         */
1668        handle_hc_xacterr_intr(hcd, hc, hc_regs, qtd);
1669    } else if (!out_nak_enh) {
1670        if (hcint.b.nyet) {
1671            /*
1672             * Must handle nyet before nak or ack. Could get a nyet at the
1673             * same time as either of those on a BULK/CONTROL OUT that
1674             * started with a PING. The nyet takes precedence.
1675             */
1676            handle_hc_nyet_intr(hcd, hc, hc_regs, qtd);
1677        } else if (hcint.b.bblerr) {
1678            handle_hc_babble_intr(hcd, hc, hc_regs, qtd);
1679        } else if (hcint.b.frmovrun) {
1680            handle_hc_frmovrun_intr(hcd, hc, hc_regs, qtd);
1681        } else if (hcint.b.nak && !hcintmsk.b.nak) {
1682            /*
1683             * If nak is not masked, it's because a non-split IN transfer
1684             * is in an error state. In that case, the nak is handled by
1685             * the nak interrupt handler, not here. Handle nak here for
1686             * BULK/CONTROL OUT transfers, which halt on a NAK to allow
1687             * rewinding the buffer pointer.
1688             */
1689            handle_hc_nak_intr(hcd, hc, hc_regs, qtd);
1690        } else if (hcint.b.ack && !hcintmsk.b.ack) {
1691            /*
1692             * If ack is not masked, it's because a non-split IN transfer
1693             * is in an error state. In that case, the ack is handled by
1694             * the ack interrupt handler, not here. Handle ack here for
1695             * split transfers. Start splits halt on ACK.
1696             */
1697            handle_hc_ack_intr(hcd, hc, hc_regs, qtd);
1698        } else {
1699            if (hc->ep_type == DWC_OTG_EP_TYPE_INTR ||
1700                hc->ep_type == DWC_OTG_EP_TYPE_ISOC) {
1701                /*
1702                 * A periodic transfer halted with no other channel
1703                 * interrupts set. Assume it was halted by the core
1704                 * because it could not be completed in its scheduled
1705                 * (micro)frame.
1706                 */
1707#ifdef DEBUG
1708                DWC_PRINT("%s: Halt channel %d (assume incomplete periodic transfer)\n",
1709                      __func__, hc->hc_num);
1710#endif
1711                halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_PERIODIC_INCOMPLETE);
1712            } else {
1713                DWC_ERROR("%s: Channel %d, DMA Mode -- ChHltd set, but reason "
1714                      "for halting is unknown, hcint 0x%08x, intsts 0x%08x\n",
1715                      __func__, hc->hc_num, hcint.d32,
1716                      dwc_read_reg32(&hcd->core_if->core_global_regs->gintsts));
1717            }
1718        }
1719    } else {
1720        printk(KERN_DEBUG "NYET/NAK/ACK/other in non-error case, 0x%08x\n", hcint.d32);
1721    }
1722}
1723
1724/**
1725 * Handles a host channel Channel Halted interrupt.
1726 *
1727 * In slave mode, this handler is called only when the driver specifically
1728 * requests a halt. This occurs during handling other host channel interrupts
1729 * (e.g. nak, xacterr, stall, nyet, etc.).
1730 *
1731 * In DMA mode, this is the interrupt that occurs when the core has finished
1732 * processing a transfer on a channel. Other host channel interrupts (except
1733 * ahberr) are disabled in DMA mode.
1734 */
1735static int32_t handle_hc_chhltd_intr(dwc_otg_hcd_t *hcd,
1736                     dwc_hc_t *hc,
1737                     dwc_otg_hc_regs_t *hc_regs,
1738                     dwc_otg_qtd_t *qtd)
1739{
1740    DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
1741            "Channel Halted--\n", hc->hc_num);
1742
1743    if (hcd->core_if->dma_enable) {
1744        handle_hc_chhltd_intr_dma(hcd, hc, hc_regs, qtd);
1745    } else {
1746#ifdef DEBUG
1747        if (!halt_status_ok(hcd, hc, hc_regs, qtd)) {
1748            return 1;
1749        }
1750#endif
1751        release_channel(hcd, hc, qtd, hc->halt_status);
1752    }
1753
1754    return 1;
1755}
1756
1757/** Handles interrupt for a specific Host Channel */
1758int32_t dwc_otg_hcd_handle_hc_n_intr(dwc_otg_hcd_t *dwc_otg_hcd, uint32_t num)
1759{
1760    int retval = 0;
1761    hcint_data_t hcint;
1762    hcintmsk_data_t hcintmsk;
1763    dwc_hc_t *hc;
1764    dwc_otg_hc_regs_t *hc_regs;
1765    dwc_otg_qtd_t *qtd;
1766
1767    DWC_DEBUGPL(DBG_HCDV, "--Host Channel Interrupt--, Channel %d\n", num);
1768
1769    hc = dwc_otg_hcd->hc_ptr_array[num];
1770    hc_regs = dwc_otg_hcd->core_if->host_if->hc_regs[num];
1771    qtd = list_entry(hc->qh->qtd_list.next, dwc_otg_qtd_t, qtd_list_entry);
1772
1773    hcint.d32 = dwc_read_reg32(&hc_regs->hcint);
1774    hcintmsk.d32 = dwc_read_reg32(&hc_regs->hcintmsk);
1775    DWC_DEBUGPL(DBG_HCDV, " hcint 0x%08x, hcintmsk 0x%08x, hcint&hcintmsk 0x%08x\n",
1776            hcint.d32, hcintmsk.d32, (hcint.d32 & hcintmsk.d32));
1777    hcint.d32 = hcint.d32 & hcintmsk.d32;
1778
1779    if (!dwc_otg_hcd->core_if->dma_enable) {
1780        if (hcint.b.chhltd && hcint.d32 != 0x2) {
1781            hcint.b.chhltd = 0;
1782        }
1783    }
1784
1785    if (hcint.b.xfercomp) {
1786        retval |= handle_hc_xfercomp_intr(dwc_otg_hcd, hc, hc_regs, qtd);
1787        /*
1788         * If NYET occurred at same time as Xfer Complete, the NYET is
1789         * handled by the Xfer Complete interrupt handler. Don't want
1790         * to call the NYET interrupt handler in this case.
1791         */
1792        hcint.b.nyet = 0;
1793    }
1794    if (hcint.b.chhltd) {
1795        retval |= handle_hc_chhltd_intr(dwc_otg_hcd, hc, hc_regs, qtd);
1796    }
1797    if (hcint.b.ahberr) {
1798        retval |= handle_hc_ahberr_intr(dwc_otg_hcd, hc, hc_regs, qtd);
1799    }
1800    if (hcint.b.stall) {
1801        retval |= handle_hc_stall_intr(dwc_otg_hcd, hc, hc_regs, qtd);
1802    }
1803    if (hcint.b.nak) {
1804        retval |= handle_hc_nak_intr(dwc_otg_hcd, hc, hc_regs, qtd);
1805    }
1806    if (hcint.b.ack) {
1807        retval |= handle_hc_ack_intr(dwc_otg_hcd, hc, hc_regs, qtd);
1808    }
1809    if (hcint.b.nyet) {
1810        retval |= handle_hc_nyet_intr(dwc_otg_hcd, hc, hc_regs, qtd);
1811    }
1812    if (hcint.b.xacterr) {
1813        retval |= handle_hc_xacterr_intr(dwc_otg_hcd, hc, hc_regs, qtd);
1814    }
1815    if (hcint.b.bblerr) {
1816        retval |= handle_hc_babble_intr(dwc_otg_hcd, hc, hc_regs, qtd);
1817    }
1818    if (hcint.b.frmovrun) {
1819        retval |= handle_hc_frmovrun_intr(dwc_otg_hcd, hc, hc_regs, qtd);
1820    }
1821    if (hcint.b.datatglerr) {
1822        retval |= handle_hc_datatglerr_intr(dwc_otg_hcd, hc, hc_regs, qtd);
1823    }
1824
1825    return retval;
1826}
1827
1828#endif /* DWC_DEVICE_ONLY */
1829

Archive Download this file



interactive