Root/
1 | /* |
2 | * Driver for OHCI 1394 controllers |
3 | * |
4 | * Copyright (C) 2003-2006 Kristian Hoegsberg <krh@bitplanet.net> |
5 | * |
6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License as published by |
8 | * the Free Software Foundation; either version 2 of the License, or |
9 | * (at your option) any later version. |
10 | * |
11 | * This program is distributed in the hope that it will be useful, |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
14 | * GNU General Public License for more details. |
15 | * |
16 | * You should have received a copy of the GNU General Public License |
17 | * along with this program; if not, write to the Free Software Foundation, |
18 | * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
19 | */ |
20 | |
21 | #include <linux/bug.h> |
22 | #include <linux/compiler.h> |
23 | #include <linux/delay.h> |
24 | #include <linux/device.h> |
25 | #include <linux/dma-mapping.h> |
26 | #include <linux/firewire.h> |
27 | #include <linux/firewire-constants.h> |
28 | #include <linux/init.h> |
29 | #include <linux/interrupt.h> |
30 | #include <linux/io.h> |
31 | #include <linux/kernel.h> |
32 | #include <linux/list.h> |
33 | #include <linux/mm.h> |
34 | #include <linux/module.h> |
35 | #include <linux/moduleparam.h> |
36 | #include <linux/mutex.h> |
37 | #include <linux/pci.h> |
38 | #include <linux/pci_ids.h> |
39 | #include <linux/slab.h> |
40 | #include <linux/spinlock.h> |
41 | #include <linux/string.h> |
42 | #include <linux/time.h> |
43 | |
44 | #include <asm/byteorder.h> |
45 | #include <asm/page.h> |
46 | #include <asm/system.h> |
47 | |
48 | #ifdef CONFIG_PPC_PMAC |
49 | #include <asm/pmac_feature.h> |
50 | #endif |
51 | |
52 | #include "core.h" |
53 | #include "ohci.h" |
54 | |
55 | #define DESCRIPTOR_OUTPUT_MORE 0 |
56 | #define DESCRIPTOR_OUTPUT_LAST (1 << 12) |
57 | #define DESCRIPTOR_INPUT_MORE (2 << 12) |
58 | #define DESCRIPTOR_INPUT_LAST (3 << 12) |
59 | #define DESCRIPTOR_STATUS (1 << 11) |
60 | #define DESCRIPTOR_KEY_IMMEDIATE (2 << 8) |
61 | #define DESCRIPTOR_PING (1 << 7) |
62 | #define DESCRIPTOR_YY (1 << 6) |
63 | #define DESCRIPTOR_NO_IRQ (0 << 4) |
64 | #define DESCRIPTOR_IRQ_ERROR (1 << 4) |
65 | #define DESCRIPTOR_IRQ_ALWAYS (3 << 4) |
66 | #define DESCRIPTOR_BRANCH_ALWAYS (3 << 2) |
67 | #define DESCRIPTOR_WAIT (3 << 0) |
68 | |
69 | struct descriptor { |
70 | __le16 req_count; |
71 | __le16 control; |
72 | __le32 data_address; |
73 | __le32 branch_address; |
74 | __le16 res_count; |
75 | __le16 transfer_status; |
76 | } __attribute__((aligned(16))); |
77 | |
78 | #define CONTROL_SET(regs) (regs) |
79 | #define CONTROL_CLEAR(regs) ((regs) + 4) |
80 | #define COMMAND_PTR(regs) ((regs) + 12) |
81 | #define CONTEXT_MATCH(regs) ((regs) + 16) |
82 | |
83 | struct ar_buffer { |
84 | struct descriptor descriptor; |
85 | struct ar_buffer *next; |
86 | __le32 data[0]; |
87 | }; |
88 | |
89 | struct ar_context { |
90 | struct fw_ohci *ohci; |
91 | struct ar_buffer *current_buffer; |
92 | struct ar_buffer *last_buffer; |
93 | void *pointer; |
94 | u32 regs; |
95 | struct tasklet_struct tasklet; |
96 | }; |
97 | |
98 | struct context; |
99 | |
100 | typedef int (*descriptor_callback_t)(struct context *ctx, |
101 | struct descriptor *d, |
102 | struct descriptor *last); |
103 | |
104 | /* |
105 | * A buffer that contains a block of DMA-able coherent memory used for |
106 | * storing a portion of a DMA descriptor program. |
107 | */ |
108 | struct descriptor_buffer { |
109 | struct list_head list; |
110 | dma_addr_t buffer_bus; |
111 | size_t buffer_size; |
112 | size_t used; |
113 | struct descriptor buffer[0]; |
114 | }; |
115 | |
116 | struct context { |
117 | struct fw_ohci *ohci; |
118 | u32 regs; |
119 | int total_allocation; |
120 | |
121 | /* |
122 | * List of page-sized buffers for storing DMA descriptors. |
123 | * Head of list contains buffers in use and tail of list contains |
124 | * free buffers. |
125 | */ |
126 | struct list_head buffer_list; |
127 | |
128 | /* |
129 | * Pointer to a buffer inside buffer_list that contains the tail |
130 | * end of the current DMA program. |
131 | */ |
132 | struct descriptor_buffer *buffer_tail; |
133 | |
134 | /* |
135 | * The descriptor containing the branch address of the first |
136 | * descriptor that has not yet been filled by the device. |
137 | */ |
138 | struct descriptor *last; |
139 | |
140 | /* |
141 | * The last descriptor in the DMA program. It contains the branch |
142 | * address that must be updated upon appending a new descriptor. |
143 | */ |
144 | struct descriptor *prev; |
145 | |
146 | descriptor_callback_t callback; |
147 | |
148 | struct tasklet_struct tasklet; |
149 | }; |
150 | |
151 | #define IT_HEADER_SY(v) ((v) << 0) |
152 | #define IT_HEADER_TCODE(v) ((v) << 4) |
153 | #define IT_HEADER_CHANNEL(v) ((v) << 8) |
154 | #define IT_HEADER_TAG(v) ((v) << 14) |
155 | #define IT_HEADER_SPEED(v) ((v) << 16) |
156 | #define IT_HEADER_DATA_LENGTH(v) ((v) << 16) |
157 | |
158 | struct iso_context { |
159 | struct fw_iso_context base; |
160 | struct context context; |
161 | int excess_bytes; |
162 | void *header; |
163 | size_t header_length; |
164 | }; |
165 | |
166 | #define CONFIG_ROM_SIZE 1024 |
167 | |
168 | struct fw_ohci { |
169 | struct fw_card card; |
170 | |
171 | __iomem char *registers; |
172 | int node_id; |
173 | int generation; |
174 | int request_generation; /* for timestamping incoming requests */ |
175 | unsigned quirks; |
176 | unsigned int pri_req_max; |
177 | u32 bus_time; |
178 | bool is_root; |
179 | bool csr_state_setclear_abdicate; |
180 | |
181 | /* |
182 | * Spinlock for accessing fw_ohci data. Never call out of |
183 | * this driver with this lock held. |
184 | */ |
185 | spinlock_t lock; |
186 | |
187 | struct mutex phy_reg_mutex; |
188 | |
189 | struct ar_context ar_request_ctx; |
190 | struct ar_context ar_response_ctx; |
191 | struct context at_request_ctx; |
192 | struct context at_response_ctx; |
193 | |
194 | u32 it_context_mask; /* unoccupied IT contexts */ |
195 | struct iso_context *it_context_list; |
196 | u64 ir_context_channels; /* unoccupied channels */ |
197 | u32 ir_context_mask; /* unoccupied IR contexts */ |
198 | struct iso_context *ir_context_list; |
199 | u64 mc_channels; /* channels in use by the multichannel IR context */ |
200 | bool mc_allocated; |
201 | |
202 | __be32 *config_rom; |
203 | dma_addr_t config_rom_bus; |
204 | __be32 *next_config_rom; |
205 | dma_addr_t next_config_rom_bus; |
206 | __be32 next_header; |
207 | |
208 | __le32 *self_id_cpu; |
209 | dma_addr_t self_id_bus; |
210 | struct tasklet_struct bus_reset_tasklet; |
211 | |
212 | u32 self_id_buffer[512]; |
213 | }; |
214 | |
215 | static inline struct fw_ohci *fw_ohci(struct fw_card *card) |
216 | { |
217 | return container_of(card, struct fw_ohci, card); |
218 | } |
219 | |
220 | #define IT_CONTEXT_CYCLE_MATCH_ENABLE 0x80000000 |
221 | #define IR_CONTEXT_BUFFER_FILL 0x80000000 |
222 | #define IR_CONTEXT_ISOCH_HEADER 0x40000000 |
223 | #define IR_CONTEXT_CYCLE_MATCH_ENABLE 0x20000000 |
224 | #define IR_CONTEXT_MULTI_CHANNEL_MODE 0x10000000 |
225 | #define IR_CONTEXT_DUAL_BUFFER_MODE 0x08000000 |
226 | |
227 | #define CONTEXT_RUN 0x8000 |
228 | #define CONTEXT_WAKE 0x1000 |
229 | #define CONTEXT_DEAD 0x0800 |
230 | #define CONTEXT_ACTIVE 0x0400 |
231 | |
232 | #define OHCI1394_MAX_AT_REQ_RETRIES 0xf |
233 | #define OHCI1394_MAX_AT_RESP_RETRIES 0x2 |
234 | #define OHCI1394_MAX_PHYS_RESP_RETRIES 0x8 |
235 | |
236 | #define OHCI1394_REGISTER_SIZE 0x800 |
237 | #define OHCI_LOOP_COUNT 500 |
238 | #define OHCI1394_PCI_HCI_Control 0x40 |
239 | #define SELF_ID_BUF_SIZE 0x800 |
240 | #define OHCI_TCODE_PHY_PACKET 0x0e |
241 | #define OHCI_VERSION_1_1 0x010010 |
242 | |
243 | static char ohci_driver_name[] = KBUILD_MODNAME; |
244 | |
245 | #define PCI_DEVICE_ID_AGERE_FW643 0x5901 |
246 | #define PCI_DEVICE_ID_JMICRON_JMB38X_FW 0x2380 |
247 | #define PCI_DEVICE_ID_TI_TSB12LV22 0x8009 |
248 | |
249 | #define QUIRK_CYCLE_TIMER 1 |
250 | #define QUIRK_RESET_PACKET 2 |
251 | #define QUIRK_BE_HEADERS 4 |
252 | #define QUIRK_NO_1394A 8 |
253 | #define QUIRK_NO_MSI 16 |
254 | |
255 | /* In case of multiple matches in ohci_quirks[], only the first one is used. */ |
256 | static const struct { |
257 | unsigned short vendor, device, revision, flags; |
258 | } ohci_quirks[] = { |
259 | {PCI_VENDOR_ID_AL, PCI_ANY_ID, PCI_ANY_ID, |
260 | QUIRK_CYCLE_TIMER}, |
261 | |
262 | {PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_FW, PCI_ANY_ID, |
263 | QUIRK_BE_HEADERS}, |
264 | |
265 | {PCI_VENDOR_ID_ATT, PCI_DEVICE_ID_AGERE_FW643, 6, |
266 | QUIRK_NO_MSI}, |
267 | |
268 | {PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB38X_FW, PCI_ANY_ID, |
269 | QUIRK_NO_MSI}, |
270 | |
271 | {PCI_VENDOR_ID_NEC, PCI_ANY_ID, PCI_ANY_ID, |
272 | QUIRK_CYCLE_TIMER}, |
273 | |
274 | {PCI_VENDOR_ID_RICOH, PCI_ANY_ID, PCI_ANY_ID, |
275 | QUIRK_CYCLE_TIMER}, |
276 | |
277 | {PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB12LV22, PCI_ANY_ID, |
278 | QUIRK_CYCLE_TIMER | QUIRK_RESET_PACKET | QUIRK_NO_1394A}, |
279 | |
280 | {PCI_VENDOR_ID_TI, PCI_ANY_ID, PCI_ANY_ID, |
281 | QUIRK_RESET_PACKET}, |
282 | |
283 | {PCI_VENDOR_ID_VIA, PCI_ANY_ID, PCI_ANY_ID, |
284 | QUIRK_CYCLE_TIMER | QUIRK_NO_MSI}, |
285 | }; |
286 | |
287 | /* This overrides anything that was found in ohci_quirks[]. */ |
288 | static int param_quirks; |
289 | module_param_named(quirks, param_quirks, int, 0644); |
290 | MODULE_PARM_DESC(quirks, "Chip quirks (default = 0" |
291 | ", nonatomic cycle timer = " __stringify(QUIRK_CYCLE_TIMER) |
292 | ", reset packet generation = " __stringify(QUIRK_RESET_PACKET) |
293 | ", AR/selfID endianess = " __stringify(QUIRK_BE_HEADERS) |
294 | ", no 1394a enhancements = " __stringify(QUIRK_NO_1394A) |
295 | ", disable MSI = " __stringify(QUIRK_NO_MSI) |
296 | ")"); |
297 | |
298 | #define OHCI_PARAM_DEBUG_AT_AR 1 |
299 | #define OHCI_PARAM_DEBUG_SELFIDS 2 |
300 | #define OHCI_PARAM_DEBUG_IRQS 4 |
301 | #define OHCI_PARAM_DEBUG_BUSRESETS 8 /* only effective before chip init */ |
302 | |
303 | #ifdef CONFIG_FIREWIRE_OHCI_DEBUG |
304 | |
305 | static int param_debug; |
306 | module_param_named(debug, param_debug, int, 0644); |
307 | MODULE_PARM_DESC(debug, "Verbose logging (default = 0" |
308 | ", AT/AR events = " __stringify(OHCI_PARAM_DEBUG_AT_AR) |
309 | ", self-IDs = " __stringify(OHCI_PARAM_DEBUG_SELFIDS) |
310 | ", IRQs = " __stringify(OHCI_PARAM_DEBUG_IRQS) |
311 | ", busReset events = " __stringify(OHCI_PARAM_DEBUG_BUSRESETS) |
312 | ", or a combination, or all = -1)"); |
313 | |
314 | static void log_irqs(u32 evt) |
315 | { |
316 | if (likely(!(param_debug & |
317 | (OHCI_PARAM_DEBUG_IRQS | OHCI_PARAM_DEBUG_BUSRESETS)))) |
318 | return; |
319 | |
320 | if (!(param_debug & OHCI_PARAM_DEBUG_IRQS) && |
321 | !(evt & OHCI1394_busReset)) |
322 | return; |
323 | |
324 | fw_notify("IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt, |
325 | evt & OHCI1394_selfIDComplete ? " selfID" : "", |
326 | evt & OHCI1394_RQPkt ? " AR_req" : "", |
327 | evt & OHCI1394_RSPkt ? " AR_resp" : "", |
328 | evt & OHCI1394_reqTxComplete ? " AT_req" : "", |
329 | evt & OHCI1394_respTxComplete ? " AT_resp" : "", |
330 | evt & OHCI1394_isochRx ? " IR" : "", |
331 | evt & OHCI1394_isochTx ? " IT" : "", |
332 | evt & OHCI1394_postedWriteErr ? " postedWriteErr" : "", |
333 | evt & OHCI1394_cycleTooLong ? " cycleTooLong" : "", |
334 | evt & OHCI1394_cycle64Seconds ? " cycle64Seconds" : "", |
335 | evt & OHCI1394_cycleInconsistent ? " cycleInconsistent" : "", |
336 | evt & OHCI1394_regAccessFail ? " regAccessFail" : "", |
337 | evt & OHCI1394_busReset ? " busReset" : "", |
338 | evt & ~(OHCI1394_selfIDComplete | OHCI1394_RQPkt | |
339 | OHCI1394_RSPkt | OHCI1394_reqTxComplete | |
340 | OHCI1394_respTxComplete | OHCI1394_isochRx | |
341 | OHCI1394_isochTx | OHCI1394_postedWriteErr | |
342 | OHCI1394_cycleTooLong | OHCI1394_cycle64Seconds | |
343 | OHCI1394_cycleInconsistent | |
344 | OHCI1394_regAccessFail | OHCI1394_busReset) |
345 | ? " ?" : ""); |
346 | } |
347 | |
348 | static const char *speed[] = { |
349 | [0] = "S100", [1] = "S200", [2] = "S400", [3] = "beta", |
350 | }; |
351 | static const char *power[] = { |
352 | [0] = "+0W", [1] = "+15W", [2] = "+30W", [3] = "+45W", |
353 | [4] = "-3W", [5] = " ?W", [6] = "-3..-6W", [7] = "-3..-10W", |
354 | }; |
355 | static const char port[] = { '.', '-', 'p', 'c', }; |
356 | |
357 | static char _p(u32 *s, int shift) |
358 | { |
359 | return port[*s >> shift & 3]; |
360 | } |
361 | |
362 | static void log_selfids(int node_id, int generation, int self_id_count, u32 *s) |
363 | { |
364 | if (likely(!(param_debug & OHCI_PARAM_DEBUG_SELFIDS))) |
365 | return; |
366 | |
367 | fw_notify("%d selfIDs, generation %d, local node ID %04x\n", |
368 | self_id_count, generation, node_id); |
369 | |
370 | for (; self_id_count--; ++s) |
371 | if ((*s & 1 << 23) == 0) |
372 | fw_notify("selfID 0: %08x, phy %d [%c%c%c] " |
373 | "%s gc=%d %s %s%s%s\n", |
374 | *s, *s >> 24 & 63, _p(s, 6), _p(s, 4), _p(s, 2), |
375 | speed[*s >> 14 & 3], *s >> 16 & 63, |
376 | power[*s >> 8 & 7], *s >> 22 & 1 ? "L" : "", |
377 | *s >> 11 & 1 ? "c" : "", *s & 2 ? "i" : ""); |
378 | else |
379 | fw_notify("selfID n: %08x, phy %d [%c%c%c%c%c%c%c%c]\n", |
380 | *s, *s >> 24 & 63, |
381 | _p(s, 16), _p(s, 14), _p(s, 12), _p(s, 10), |
382 | _p(s, 8), _p(s, 6), _p(s, 4), _p(s, 2)); |
383 | } |
384 | |
385 | static const char *evts[] = { |
386 | [0x00] = "evt_no_status", [0x01] = "-reserved-", |
387 | [0x02] = "evt_long_packet", [0x03] = "evt_missing_ack", |
388 | [0x04] = "evt_underrun", [0x05] = "evt_overrun", |
389 | [0x06] = "evt_descriptor_read", [0x07] = "evt_data_read", |
390 | [0x08] = "evt_data_write", [0x09] = "evt_bus_reset", |
391 | [0x0a] = "evt_timeout", [0x0b] = "evt_tcode_err", |
392 | [0x0c] = "-reserved-", [0x0d] = "-reserved-", |
393 | [0x0e] = "evt_unknown", [0x0f] = "evt_flushed", |
394 | [0x10] = "-reserved-", [0x11] = "ack_complete", |
395 | [0x12] = "ack_pending ", [0x13] = "-reserved-", |
396 | [0x14] = "ack_busy_X", [0x15] = "ack_busy_A", |
397 | [0x16] = "ack_busy_B", [0x17] = "-reserved-", |
398 | [0x18] = "-reserved-", [0x19] = "-reserved-", |
399 | [0x1a] = "-reserved-", [0x1b] = "ack_tardy", |
400 | [0x1c] = "-reserved-", [0x1d] = "ack_data_error", |
401 | [0x1e] = "ack_type_error", [0x1f] = "-reserved-", |
402 | [0x20] = "pending/cancelled", |
403 | }; |
404 | static const char *tcodes[] = { |
405 | [0x0] = "QW req", [0x1] = "BW req", |
406 | [0x2] = "W resp", [0x3] = "-reserved-", |
407 | [0x4] = "QR req", [0x5] = "BR req", |
408 | [0x6] = "QR resp", [0x7] = "BR resp", |
409 | [0x8] = "cycle start", [0x9] = "Lk req", |
410 | [0xa] = "async stream packet", [0xb] = "Lk resp", |
411 | [0xc] = "-reserved-", [0xd] = "-reserved-", |
412 | [0xe] = "link internal", [0xf] = "-reserved-", |
413 | }; |
414 | static const char *phys[] = { |
415 | [0x0] = "phy config packet", [0x1] = "link-on packet", |
416 | [0x2] = "self-id packet", [0x3] = "-reserved-", |
417 | }; |
418 | |
419 | static void log_ar_at_event(char dir, int speed, u32 *header, int evt) |
420 | { |
421 | int tcode = header[0] >> 4 & 0xf; |
422 | char specific[12]; |
423 | |
424 | if (likely(!(param_debug & OHCI_PARAM_DEBUG_AT_AR))) |
425 | return; |
426 | |
427 | if (unlikely(evt >= ARRAY_SIZE(evts))) |
428 | evt = 0x1f; |
429 | |
430 | if (evt == OHCI1394_evt_bus_reset) { |
431 | fw_notify("A%c evt_bus_reset, generation %d\n", |
432 | dir, (header[2] >> 16) & 0xff); |
433 | return; |
434 | } |
435 | |
436 | if (header[0] == ~header[1]) { |
437 | fw_notify("A%c %s, %s, %08x\n", |
438 | dir, evts[evt], phys[header[0] >> 30 & 0x3], header[0]); |
439 | return; |
440 | } |
441 | |
442 | switch (tcode) { |
443 | case 0x0: case 0x6: case 0x8: |
444 | snprintf(specific, sizeof(specific), " = %08x", |
445 | be32_to_cpu((__force __be32)header[3])); |
446 | break; |
447 | case 0x1: case 0x5: case 0x7: case 0x9: case 0xb: |
448 | snprintf(specific, sizeof(specific), " %x,%x", |
449 | header[3] >> 16, header[3] & 0xffff); |
450 | break; |
451 | default: |
452 | specific[0] = '\0'; |
453 | } |
454 | |
455 | switch (tcode) { |
456 | case 0xe: case 0xa: |
457 | fw_notify("A%c %s, %s\n", dir, evts[evt], tcodes[tcode]); |
458 | break; |
459 | case 0x0: case 0x1: case 0x4: case 0x5: case 0x9: |
460 | fw_notify("A%c spd %x tl %02x, " |
461 | "%04x -> %04x, %s, " |
462 | "%s, %04x%08x%s\n", |
463 | dir, speed, header[0] >> 10 & 0x3f, |
464 | header[1] >> 16, header[0] >> 16, evts[evt], |
465 | tcodes[tcode], header[1] & 0xffff, header[2], specific); |
466 | break; |
467 | default: |
468 | fw_notify("A%c spd %x tl %02x, " |
469 | "%04x -> %04x, %s, " |
470 | "%s%s\n", |
471 | dir, speed, header[0] >> 10 & 0x3f, |
472 | header[1] >> 16, header[0] >> 16, evts[evt], |
473 | tcodes[tcode], specific); |
474 | } |
475 | } |
476 | |
477 | #else |
478 | |
479 | #define param_debug 0 |
480 | static inline void log_irqs(u32 evt) {} |
481 | static inline void log_selfids(int node_id, int generation, int self_id_count, u32 *s) {} |
482 | static inline void log_ar_at_event(char dir, int speed, u32 *header, int evt) {} |
483 | |
484 | #endif /* CONFIG_FIREWIRE_OHCI_DEBUG */ |
485 | |
486 | static inline void reg_write(const struct fw_ohci *ohci, int offset, u32 data) |
487 | { |
488 | writel(data, ohci->registers + offset); |
489 | } |
490 | |
491 | static inline u32 reg_read(const struct fw_ohci *ohci, int offset) |
492 | { |
493 | return readl(ohci->registers + offset); |
494 | } |
495 | |
496 | static inline void flush_writes(const struct fw_ohci *ohci) |
497 | { |
498 | /* Do a dummy read to flush writes. */ |
499 | reg_read(ohci, OHCI1394_Version); |
500 | } |
501 | |
502 | static int read_phy_reg(struct fw_ohci *ohci, int addr) |
503 | { |
504 | u32 val; |
505 | int i; |
506 | |
507 | reg_write(ohci, OHCI1394_PhyControl, OHCI1394_PhyControl_Read(addr)); |
508 | for (i = 0; i < 3 + 100; i++) { |
509 | val = reg_read(ohci, OHCI1394_PhyControl); |
510 | if (val & OHCI1394_PhyControl_ReadDone) |
511 | return OHCI1394_PhyControl_ReadData(val); |
512 | |
513 | /* |
514 | * Try a few times without waiting. Sleeping is necessary |
515 | * only when the link/PHY interface is busy. |
516 | */ |
517 | if (i >= 3) |
518 | msleep(1); |
519 | } |
520 | fw_error("failed to read phy reg\n"); |
521 | |
522 | return -EBUSY; |
523 | } |
524 | |
525 | static int write_phy_reg(const struct fw_ohci *ohci, int addr, u32 val) |
526 | { |
527 | int i; |
528 | |
529 | reg_write(ohci, OHCI1394_PhyControl, |
530 | OHCI1394_PhyControl_Write(addr, val)); |
531 | for (i = 0; i < 3 + 100; i++) { |
532 | val = reg_read(ohci, OHCI1394_PhyControl); |
533 | if (!(val & OHCI1394_PhyControl_WritePending)) |
534 | return 0; |
535 | |
536 | if (i >= 3) |
537 | msleep(1); |
538 | } |
539 | fw_error("failed to write phy reg\n"); |
540 | |
541 | return -EBUSY; |
542 | } |
543 | |
544 | static int update_phy_reg(struct fw_ohci *ohci, int addr, |
545 | int clear_bits, int set_bits) |
546 | { |
547 | int ret = read_phy_reg(ohci, addr); |
548 | if (ret < 0) |
549 | return ret; |
550 | |
551 | /* |
552 | * The interrupt status bits are cleared by writing a one bit. |
553 | * Avoid clearing them unless explicitly requested in set_bits. |
554 | */ |
555 | if (addr == 5) |
556 | clear_bits |= PHY_INT_STATUS_BITS; |
557 | |
558 | return write_phy_reg(ohci, addr, (ret & ~clear_bits) | set_bits); |
559 | } |
560 | |
561 | static int read_paged_phy_reg(struct fw_ohci *ohci, int page, int addr) |
562 | { |
563 | int ret; |
564 | |
565 | ret = update_phy_reg(ohci, 7, PHY_PAGE_SELECT, page << 5); |
566 | if (ret < 0) |
567 | return ret; |
568 | |
569 | return read_phy_reg(ohci, addr); |
570 | } |
571 | |
572 | static int ohci_read_phy_reg(struct fw_card *card, int addr) |
573 | { |
574 | struct fw_ohci *ohci = fw_ohci(card); |
575 | int ret; |
576 | |
577 | mutex_lock(&ohci->phy_reg_mutex); |
578 | ret = read_phy_reg(ohci, addr); |
579 | mutex_unlock(&ohci->phy_reg_mutex); |
580 | |
581 | return ret; |
582 | } |
583 | |
584 | static int ohci_update_phy_reg(struct fw_card *card, int addr, |
585 | int clear_bits, int set_bits) |
586 | { |
587 | struct fw_ohci *ohci = fw_ohci(card); |
588 | int ret; |
589 | |
590 | mutex_lock(&ohci->phy_reg_mutex); |
591 | ret = update_phy_reg(ohci, addr, clear_bits, set_bits); |
592 | mutex_unlock(&ohci->phy_reg_mutex); |
593 | |
594 | return ret; |
595 | } |
596 | |
597 | static void ar_context_link_page(struct ar_context *ctx, |
598 | struct ar_buffer *ab, dma_addr_t ab_bus) |
599 | { |
600 | size_t offset; |
601 | |
602 | ab->next = NULL; |
603 | memset(&ab->descriptor, 0, sizeof(ab->descriptor)); |
604 | ab->descriptor.control = cpu_to_le16(DESCRIPTOR_INPUT_MORE | |
605 | DESCRIPTOR_STATUS | |
606 | DESCRIPTOR_BRANCH_ALWAYS); |
607 | offset = offsetof(struct ar_buffer, data); |
608 | ab->descriptor.req_count = cpu_to_le16(PAGE_SIZE - offset); |
609 | ab->descriptor.data_address = cpu_to_le32(ab_bus + offset); |
610 | ab->descriptor.res_count = cpu_to_le16(PAGE_SIZE - offset); |
611 | ab->descriptor.branch_address = 0; |
612 | |
613 | wmb(); /* finish init of new descriptors before branch_address update */ |
614 | ctx->last_buffer->descriptor.branch_address = cpu_to_le32(ab_bus | 1); |
615 | ctx->last_buffer->next = ab; |
616 | ctx->last_buffer = ab; |
617 | |
618 | reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE); |
619 | flush_writes(ctx->ohci); |
620 | } |
621 | |
622 | static int ar_context_add_page(struct ar_context *ctx) |
623 | { |
624 | struct device *dev = ctx->ohci->card.device; |
625 | struct ar_buffer *ab; |
626 | dma_addr_t uninitialized_var(ab_bus); |
627 | |
628 | ab = dma_alloc_coherent(dev, PAGE_SIZE, &ab_bus, GFP_ATOMIC); |
629 | if (ab == NULL) |
630 | return -ENOMEM; |
631 | |
632 | ar_context_link_page(ctx, ab, ab_bus); |
633 | |
634 | return 0; |
635 | } |
636 | |
637 | static void ar_context_release(struct ar_context *ctx) |
638 | { |
639 | struct ar_buffer *ab, *ab_next; |
640 | size_t offset; |
641 | dma_addr_t ab_bus; |
642 | |
643 | for (ab = ctx->current_buffer; ab; ab = ab_next) { |
644 | ab_next = ab->next; |
645 | offset = offsetof(struct ar_buffer, data); |
646 | ab_bus = le32_to_cpu(ab->descriptor.data_address) - offset; |
647 | dma_free_coherent(ctx->ohci->card.device, PAGE_SIZE, |
648 | ab, ab_bus); |
649 | } |
650 | } |
651 | |
652 | #if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32) |
653 | #define cond_le32_to_cpu(v) \ |
654 | (ohci->quirks & QUIRK_BE_HEADERS ? (__force __u32)(v) : le32_to_cpu(v)) |
655 | #else |
656 | #define cond_le32_to_cpu(v) le32_to_cpu(v) |
657 | #endif |
658 | |
659 | static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer) |
660 | { |
661 | struct fw_ohci *ohci = ctx->ohci; |
662 | struct fw_packet p; |
663 | u32 status, length, tcode; |
664 | int evt; |
665 | |
666 | p.header[0] = cond_le32_to_cpu(buffer[0]); |
667 | p.header[1] = cond_le32_to_cpu(buffer[1]); |
668 | p.header[2] = cond_le32_to_cpu(buffer[2]); |
669 | |
670 | tcode = (p.header[0] >> 4) & 0x0f; |
671 | switch (tcode) { |
672 | case TCODE_WRITE_QUADLET_REQUEST: |
673 | case TCODE_READ_QUADLET_RESPONSE: |
674 | p.header[3] = (__force __u32) buffer[3]; |
675 | p.header_length = 16; |
676 | p.payload_length = 0; |
677 | break; |
678 | |
679 | case TCODE_READ_BLOCK_REQUEST : |
680 | p.header[3] = cond_le32_to_cpu(buffer[3]); |
681 | p.header_length = 16; |
682 | p.payload_length = 0; |
683 | break; |
684 | |
685 | case TCODE_WRITE_BLOCK_REQUEST: |
686 | case TCODE_READ_BLOCK_RESPONSE: |
687 | case TCODE_LOCK_REQUEST: |
688 | case TCODE_LOCK_RESPONSE: |
689 | p.header[3] = cond_le32_to_cpu(buffer[3]); |
690 | p.header_length = 16; |
691 | p.payload_length = p.header[3] >> 16; |
692 | break; |
693 | |
694 | case TCODE_WRITE_RESPONSE: |
695 | case TCODE_READ_QUADLET_REQUEST: |
696 | case OHCI_TCODE_PHY_PACKET: |
697 | p.header_length = 12; |
698 | p.payload_length = 0; |
699 | break; |
700 | |
701 | default: |
702 | /* FIXME: Stop context, discard everything, and restart? */ |
703 | p.header_length = 0; |
704 | p.payload_length = 0; |
705 | } |
706 | |
707 | p.payload = (void *) buffer + p.header_length; |
708 | |
709 | /* FIXME: What to do about evt_* errors? */ |
710 | length = (p.header_length + p.payload_length + 3) / 4; |
711 | status = cond_le32_to_cpu(buffer[length]); |
712 | evt = (status >> 16) & 0x1f; |
713 | |
714 | p.ack = evt - 16; |
715 | p.speed = (status >> 21) & 0x7; |
716 | p.timestamp = status & 0xffff; |
717 | p.generation = ohci->request_generation; |
718 | |
719 | log_ar_at_event('R', p.speed, p.header, evt); |
720 | |
721 | /* |
722 | * Several controllers, notably from NEC and VIA, forget to |
723 | * write ack_complete status at PHY packet reception. |
724 | */ |
725 | if (evt == OHCI1394_evt_no_status && |
726 | (p.header[0] & 0xff) == (OHCI1394_phy_tcode << 4)) |
727 | p.ack = ACK_COMPLETE; |
728 | |
729 | /* |
730 | * The OHCI bus reset handler synthesizes a PHY packet with |
731 | * the new generation number when a bus reset happens (see |
732 | * section 8.4.2.3). This helps us determine when a request |
733 | * was received and make sure we send the response in the same |
734 | * generation. We only need this for requests; for responses |
735 | * we use the unique tlabel for finding the matching |
736 | * request. |
737 | * |
738 | * Alas some chips sometimes emit bus reset packets with a |
739 | * wrong generation. We set the correct generation for these |
740 | * at a slightly incorrect time (in bus_reset_tasklet). |
741 | */ |
742 | if (evt == OHCI1394_evt_bus_reset) { |
743 | if (!(ohci->quirks & QUIRK_RESET_PACKET)) |
744 | ohci->request_generation = (p.header[2] >> 16) & 0xff; |
745 | } else if (ctx == &ohci->ar_request_ctx) { |
746 | fw_core_handle_request(&ohci->card, &p); |
747 | } else { |
748 | fw_core_handle_response(&ohci->card, &p); |
749 | } |
750 | |
751 | return buffer + length + 1; |
752 | } |
753 | |
754 | static void ar_context_tasklet(unsigned long data) |
755 | { |
756 | struct ar_context *ctx = (struct ar_context *)data; |
757 | struct ar_buffer *ab; |
758 | struct descriptor *d; |
759 | void *buffer, *end; |
760 | __le16 res_count; |
761 | |
762 | ab = ctx->current_buffer; |
763 | d = &ab->descriptor; |
764 | |
765 | res_count = ACCESS_ONCE(d->res_count); |
766 | if (res_count == 0) { |
767 | size_t size, size2, rest, pktsize, size3, offset; |
768 | dma_addr_t start_bus; |
769 | void *start; |
770 | |
771 | /* |
772 | * This descriptor is finished and we may have a |
773 | * packet split across this and the next buffer. We |
774 | * reuse the page for reassembling the split packet. |
775 | */ |
776 | |
777 | offset = offsetof(struct ar_buffer, data); |
778 | start = ab; |
779 | start_bus = le32_to_cpu(ab->descriptor.data_address) - offset; |
780 | buffer = ab->data; |
781 | |
782 | ab = ab->next; |
783 | d = &ab->descriptor; |
784 | size = start + PAGE_SIZE - ctx->pointer; |
785 | /* valid buffer data in the next page */ |
786 | rest = le16_to_cpu(d->req_count) - le16_to_cpu(d->res_count); |
787 | /* what actually fits in this page */ |
788 | size2 = min(rest, (size_t)PAGE_SIZE - offset - size); |
789 | memmove(buffer, ctx->pointer, size); |
790 | memcpy(buffer + size, ab->data, size2); |
791 | |
792 | while (size > 0) { |
793 | void *next = handle_ar_packet(ctx, buffer); |
794 | pktsize = next - buffer; |
795 | if (pktsize >= size) { |
796 | /* |
797 | * We have handled all the data that was |
798 | * originally in this page, so we can now |
799 | * continue in the next page. |
800 | */ |
801 | buffer = next; |
802 | break; |
803 | } |
804 | /* move the next packet to the start of the buffer */ |
805 | memmove(buffer, next, size + size2 - pktsize); |
806 | size -= pktsize; |
807 | /* fill up this page again */ |
808 | size3 = min(rest - size2, |
809 | (size_t)PAGE_SIZE - offset - size - size2); |
810 | memcpy(buffer + size + size2, |
811 | (void *) ab->data + size2, size3); |
812 | size2 += size3; |
813 | } |
814 | |
815 | if (rest > 0) { |
816 | /* handle the packets that are fully in the next page */ |
817 | buffer = (void *) ab->data + |
818 | (buffer - (start + offset + size)); |
819 | end = (void *) ab->data + rest; |
820 | |
821 | while (buffer < end) |
822 | buffer = handle_ar_packet(ctx, buffer); |
823 | |
824 | ctx->current_buffer = ab; |
825 | ctx->pointer = end; |
826 | |
827 | ar_context_link_page(ctx, start, start_bus); |
828 | } else { |
829 | ctx->pointer = start + PAGE_SIZE; |
830 | } |
831 | } else { |
832 | buffer = ctx->pointer; |
833 | ctx->pointer = end = |
834 | (void *) ab + PAGE_SIZE - le16_to_cpu(res_count); |
835 | |
836 | while (buffer < end) |
837 | buffer = handle_ar_packet(ctx, buffer); |
838 | } |
839 | } |
840 | |
841 | static int ar_context_init(struct ar_context *ctx, |
842 | struct fw_ohci *ohci, u32 regs) |
843 | { |
844 | struct ar_buffer ab; |
845 | |
846 | ctx->regs = regs; |
847 | ctx->ohci = ohci; |
848 | ctx->last_buffer = &ab; |
849 | tasklet_init(&ctx->tasklet, ar_context_tasklet, (unsigned long)ctx); |
850 | |
851 | ar_context_add_page(ctx); |
852 | ar_context_add_page(ctx); |
853 | ctx->current_buffer = ab.next; |
854 | ctx->pointer = ctx->current_buffer->data; |
855 | |
856 | return 0; |
857 | } |
858 | |
859 | static void ar_context_run(struct ar_context *ctx) |
860 | { |
861 | struct ar_buffer *ab = ctx->current_buffer; |
862 | dma_addr_t ab_bus; |
863 | size_t offset; |
864 | |
865 | offset = offsetof(struct ar_buffer, data); |
866 | ab_bus = le32_to_cpu(ab->descriptor.data_address) - offset; |
867 | |
868 | reg_write(ctx->ohci, COMMAND_PTR(ctx->regs), ab_bus | 1); |
869 | reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN); |
870 | flush_writes(ctx->ohci); |
871 | } |
872 | |
873 | static struct descriptor *find_branch_descriptor(struct descriptor *d, int z) |
874 | { |
875 | int b, key; |
876 | |
877 | b = (le16_to_cpu(d->control) & DESCRIPTOR_BRANCH_ALWAYS) >> 2; |
878 | key = (le16_to_cpu(d->control) & DESCRIPTOR_KEY_IMMEDIATE) >> 8; |
879 | |
880 | /* figure out which descriptor the branch address goes in */ |
881 | if (z == 2 && (b == 3 || key == 2)) |
882 | return d; |
883 | else |
884 | return d + z - 1; |
885 | } |
886 | |
887 | static void context_tasklet(unsigned long data) |
888 | { |
889 | struct context *ctx = (struct context *) data; |
890 | struct descriptor *d, *last; |
891 | u32 address; |
892 | int z; |
893 | struct descriptor_buffer *desc; |
894 | |
895 | desc = list_entry(ctx->buffer_list.next, |
896 | struct descriptor_buffer, list); |
897 | last = ctx->last; |
898 | while (last->branch_address != 0) { |
899 | struct descriptor_buffer *old_desc = desc; |
900 | address = le32_to_cpu(last->branch_address); |
901 | z = address & 0xf; |
902 | address &= ~0xf; |
903 | |
904 | /* If the branch address points to a buffer outside of the |
905 | * current buffer, advance to the next buffer. */ |
906 | if (address < desc->buffer_bus || |
907 | address >= desc->buffer_bus + desc->used) |
908 | desc = list_entry(desc->list.next, |
909 | struct descriptor_buffer, list); |
910 | d = desc->buffer + (address - desc->buffer_bus) / sizeof(*d); |
911 | last = find_branch_descriptor(d, z); |
912 | |
913 | if (!ctx->callback(ctx, d, last)) |
914 | break; |
915 | |
916 | if (old_desc != desc) { |
917 | /* If we've advanced to the next buffer, move the |
918 | * previous buffer to the free list. */ |
919 | unsigned long flags; |
920 | old_desc->used = 0; |
921 | spin_lock_irqsave(&ctx->ohci->lock, flags); |
922 | list_move_tail(&old_desc->list, &ctx->buffer_list); |
923 | spin_unlock_irqrestore(&ctx->ohci->lock, flags); |
924 | } |
925 | ctx->last = last; |
926 | } |
927 | } |
928 | |
929 | /* |
930 | * Allocate a new buffer and add it to the list of free buffers for this |
931 | * context. Must be called with ohci->lock held. |
932 | */ |
933 | static int context_add_buffer(struct context *ctx) |
934 | { |
935 | struct descriptor_buffer *desc; |
936 | dma_addr_t uninitialized_var(bus_addr); |
937 | int offset; |
938 | |
939 | /* |
940 | * 16MB of descriptors should be far more than enough for any DMA |
941 | * program. This will catch run-away userspace or DoS attacks. |
942 | */ |
943 | if (ctx->total_allocation >= 16*1024*1024) |
944 | return -ENOMEM; |
945 | |
946 | desc = dma_alloc_coherent(ctx->ohci->card.device, PAGE_SIZE, |
947 | &bus_addr, GFP_ATOMIC); |
948 | if (!desc) |
949 | return -ENOMEM; |
950 | |
951 | offset = (void *)&desc->buffer - (void *)desc; |
952 | desc->buffer_size = PAGE_SIZE - offset; |
953 | desc->buffer_bus = bus_addr + offset; |
954 | desc->used = 0; |
955 | |
956 | list_add_tail(&desc->list, &ctx->buffer_list); |
957 | ctx->total_allocation += PAGE_SIZE; |
958 | |
959 | return 0; |
960 | } |
961 | |
962 | static int context_init(struct context *ctx, struct fw_ohci *ohci, |
963 | u32 regs, descriptor_callback_t callback) |
964 | { |
965 | ctx->ohci = ohci; |
966 | ctx->regs = regs; |
967 | ctx->total_allocation = 0; |
968 | |
969 | INIT_LIST_HEAD(&ctx->buffer_list); |
970 | if (context_add_buffer(ctx) < 0) |
971 | return -ENOMEM; |
972 | |
973 | ctx->buffer_tail = list_entry(ctx->buffer_list.next, |
974 | struct descriptor_buffer, list); |
975 | |
976 | tasklet_init(&ctx->tasklet, context_tasklet, (unsigned long)ctx); |
977 | ctx->callback = callback; |
978 | |
979 | /* |
980 | * We put a dummy descriptor in the buffer that has a NULL |
981 | * branch address and looks like it's been sent. That way we |
982 | * have a descriptor to append DMA programs to. |
983 | */ |
984 | memset(ctx->buffer_tail->buffer, 0, sizeof(*ctx->buffer_tail->buffer)); |
985 | ctx->buffer_tail->buffer->control = cpu_to_le16(DESCRIPTOR_OUTPUT_LAST); |
986 | ctx->buffer_tail->buffer->transfer_status = cpu_to_le16(0x8011); |
987 | ctx->buffer_tail->used += sizeof(*ctx->buffer_tail->buffer); |
988 | ctx->last = ctx->buffer_tail->buffer; |
989 | ctx->prev = ctx->buffer_tail->buffer; |
990 | |
991 | return 0; |
992 | } |
993 | |
994 | static void context_release(struct context *ctx) |
995 | { |
996 | struct fw_card *card = &ctx->ohci->card; |
997 | struct descriptor_buffer *desc, *tmp; |
998 | |
999 | list_for_each_entry_safe(desc, tmp, &ctx->buffer_list, list) |
1000 | dma_free_coherent(card->device, PAGE_SIZE, desc, |
1001 | desc->buffer_bus - |
1002 | ((void *)&desc->buffer - (void *)desc)); |
1003 | } |
1004 | |
1005 | /* Must be called with ohci->lock held */ |
1006 | static struct descriptor *context_get_descriptors(struct context *ctx, |
1007 | int z, dma_addr_t *d_bus) |
1008 | { |
1009 | struct descriptor *d = NULL; |
1010 | struct descriptor_buffer *desc = ctx->buffer_tail; |
1011 | |
1012 | if (z * sizeof(*d) > desc->buffer_size) |
1013 | return NULL; |
1014 | |
1015 | if (z * sizeof(*d) > desc->buffer_size - desc->used) { |
1016 | /* No room for the descriptor in this buffer, so advance to the |
1017 | * next one. */ |
1018 | |
1019 | if (desc->list.next == &ctx->buffer_list) { |
1020 | /* If there is no free buffer next in the list, |
1021 | * allocate one. */ |
1022 | if (context_add_buffer(ctx) < 0) |
1023 | return NULL; |
1024 | } |
1025 | desc = list_entry(desc->list.next, |
1026 | struct descriptor_buffer, list); |
1027 | ctx->buffer_tail = desc; |
1028 | } |
1029 | |
1030 | d = desc->buffer + desc->used / sizeof(*d); |
1031 | memset(d, 0, z * sizeof(*d)); |
1032 | *d_bus = desc->buffer_bus + desc->used; |
1033 | |
1034 | return d; |
1035 | } |
1036 | |
1037 | static void context_run(struct context *ctx, u32 extra) |
1038 | { |
1039 | struct fw_ohci *ohci = ctx->ohci; |
1040 | |
1041 | reg_write(ohci, COMMAND_PTR(ctx->regs), |
1042 | le32_to_cpu(ctx->last->branch_address)); |
1043 | reg_write(ohci, CONTROL_CLEAR(ctx->regs), ~0); |
1044 | reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN | extra); |
1045 | flush_writes(ohci); |
1046 | } |
1047 | |
1048 | static void context_append(struct context *ctx, |
1049 | struct descriptor *d, int z, int extra) |
1050 | { |
1051 | dma_addr_t d_bus; |
1052 | struct descriptor_buffer *desc = ctx->buffer_tail; |
1053 | |
1054 | d_bus = desc->buffer_bus + (d - desc->buffer) * sizeof(*d); |
1055 | |
1056 | desc->used += (z + extra) * sizeof(*d); |
1057 | |
1058 | wmb(); /* finish init of new descriptors before branch_address update */ |
1059 | ctx->prev->branch_address = cpu_to_le32(d_bus | z); |
1060 | ctx->prev = find_branch_descriptor(d, z); |
1061 | |
1062 | reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE); |
1063 | flush_writes(ctx->ohci); |
1064 | } |
1065 | |
1066 | static void context_stop(struct context *ctx) |
1067 | { |
1068 | u32 reg; |
1069 | int i; |
1070 | |
1071 | reg_write(ctx->ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN); |
1072 | flush_writes(ctx->ohci); |
1073 | |
1074 | for (i = 0; i < 10; i++) { |
1075 | reg = reg_read(ctx->ohci, CONTROL_SET(ctx->regs)); |
1076 | if ((reg & CONTEXT_ACTIVE) == 0) |
1077 | return; |
1078 | |
1079 | mdelay(1); |
1080 | } |
1081 | fw_error("Error: DMA context still active (0x%08x)\n", reg); |
1082 | } |
1083 | |
1084 | struct driver_data { |
1085 | struct fw_packet *packet; |
1086 | }; |
1087 | |
1088 | /* |
1089 | * This function apppends a packet to the DMA queue for transmission. |
1090 | * Must always be called with the ochi->lock held to ensure proper |
1091 | * generation handling and locking around packet queue manipulation. |
1092 | */ |
1093 | static int at_context_queue_packet(struct context *ctx, |
1094 | struct fw_packet *packet) |
1095 | { |
1096 | struct fw_ohci *ohci = ctx->ohci; |
1097 | dma_addr_t d_bus, uninitialized_var(payload_bus); |
1098 | struct driver_data *driver_data; |
1099 | struct descriptor *d, *last; |
1100 | __le32 *header; |
1101 | int z, tcode; |
1102 | u32 reg; |
1103 | |
1104 | d = context_get_descriptors(ctx, 4, &d_bus); |
1105 | if (d == NULL) { |
1106 | packet->ack = RCODE_SEND_ERROR; |
1107 | return -1; |
1108 | } |
1109 | |
1110 | d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE); |
1111 | d[0].res_count = cpu_to_le16(packet->timestamp); |
1112 | |
1113 | /* |
1114 | * The DMA format for asyncronous link packets is different |
1115 | * from the IEEE1394 layout, so shift the fields around |
1116 | * accordingly. If header_length is 8, it's a PHY packet, to |
1117 | * which we need to prepend an extra quadlet. |
1118 | */ |
1119 | |
1120 | header = (__le32 *) &d[1]; |
1121 | switch (packet->header_length) { |
1122 | case 16: |
1123 | case 12: |
1124 | header[0] = cpu_to_le32((packet->header[0] & 0xffff) | |
1125 | (packet->speed << 16)); |
1126 | header[1] = cpu_to_le32((packet->header[1] & 0xffff) | |
1127 | (packet->header[0] & 0xffff0000)); |
1128 | header[2] = cpu_to_le32(packet->header[2]); |
1129 | |
1130 | tcode = (packet->header[0] >> 4) & 0x0f; |
1131 | if (TCODE_IS_BLOCK_PACKET(tcode)) |
1132 | header[3] = cpu_to_le32(packet->header[3]); |
1133 | else |
1134 | header[3] = (__force __le32) packet->header[3]; |
1135 | |
1136 | d[0].req_count = cpu_to_le16(packet->header_length); |
1137 | break; |
1138 | |
1139 | case 8: |
1140 | header[0] = cpu_to_le32((OHCI1394_phy_tcode << 4) | |
1141 | (packet->speed << 16)); |
1142 | header[1] = cpu_to_le32(packet->header[0]); |
1143 | header[2] = cpu_to_le32(packet->header[1]); |
1144 | d[0].req_count = cpu_to_le16(12); |
1145 | |
1146 | if (is_ping_packet(packet->header)) |
1147 | d[0].control |= cpu_to_le16(DESCRIPTOR_PING); |
1148 | break; |
1149 | |
1150 | case 4: |
1151 | header[0] = cpu_to_le32((packet->header[0] & 0xffff) | |
1152 | (packet->speed << 16)); |
1153 | header[1] = cpu_to_le32(packet->header[0] & 0xffff0000); |
1154 | d[0].req_count = cpu_to_le16(8); |
1155 | break; |
1156 | |
1157 | default: |
1158 | /* BUG(); */ |
1159 | packet->ack = RCODE_SEND_ERROR; |
1160 | return -1; |
1161 | } |
1162 | |
1163 | driver_data = (struct driver_data *) &d[3]; |
1164 | driver_data->packet = packet; |
1165 | packet->driver_data = driver_data; |
1166 | |
1167 | if (packet->payload_length > 0) { |
1168 | payload_bus = |
1169 | dma_map_single(ohci->card.device, packet->payload, |
1170 | packet->payload_length, DMA_TO_DEVICE); |
1171 | if (dma_mapping_error(ohci->card.device, payload_bus)) { |
1172 | packet->ack = RCODE_SEND_ERROR; |
1173 | return -1; |
1174 | } |
1175 | packet->payload_bus = payload_bus; |
1176 | packet->payload_mapped = true; |
1177 | |
1178 | d[2].req_count = cpu_to_le16(packet->payload_length); |
1179 | d[2].data_address = cpu_to_le32(payload_bus); |
1180 | last = &d[2]; |
1181 | z = 3; |
1182 | } else { |
1183 | last = &d[0]; |
1184 | z = 2; |
1185 | } |
1186 | |
1187 | last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST | |
1188 | DESCRIPTOR_IRQ_ALWAYS | |
1189 | DESCRIPTOR_BRANCH_ALWAYS); |
1190 | |
1191 | /* |
1192 | * If the controller and packet generations don't match, we need to |
1193 | * bail out and try again. If IntEvent.busReset is set, the AT context |
1194 | * is halted, so appending to the context and trying to run it is |
1195 | * futile. Most controllers do the right thing and just flush the AT |
1196 | * queue (per section 7.2.3.2 of the OHCI 1.1 specification), but |
1197 | * some controllers (like a JMicron JMB381 PCI-e) misbehave and wind |
1198 | * up stalling out. So we just bail out in software and try again |
1199 | * later, and everyone is happy. |
1200 | * FIXME: Document how the locking works. |
1201 | */ |
1202 | if (ohci->generation != packet->generation || |
1203 | reg_read(ohci, OHCI1394_IntEventSet) & OHCI1394_busReset) { |
1204 | if (packet->payload_mapped) |
1205 | dma_unmap_single(ohci->card.device, payload_bus, |
1206 | packet->payload_length, DMA_TO_DEVICE); |
1207 | packet->ack = RCODE_GENERATION; |
1208 | return -1; |
1209 | } |
1210 | |
1211 | context_append(ctx, d, z, 4 - z); |
1212 | |
1213 | /* If the context isn't already running, start it up. */ |
1214 | reg = reg_read(ctx->ohci, CONTROL_SET(ctx->regs)); |
1215 | if ((reg & CONTEXT_RUN) == 0) |
1216 | context_run(ctx, 0); |
1217 | |
1218 | return 0; |
1219 | } |
1220 | |
1221 | static int handle_at_packet(struct context *context, |
1222 | struct descriptor *d, |
1223 | struct descriptor *last) |
1224 | { |
1225 | struct driver_data *driver_data; |
1226 | struct fw_packet *packet; |
1227 | struct fw_ohci *ohci = context->ohci; |
1228 | int evt; |
1229 | |
1230 | if (last->transfer_status == 0) |
1231 | /* This descriptor isn't done yet, stop iteration. */ |
1232 | return 0; |
1233 | |
1234 | driver_data = (struct driver_data *) &d[3]; |
1235 | packet = driver_data->packet; |
1236 | if (packet == NULL) |
1237 | /* This packet was cancelled, just continue. */ |
1238 | return 1; |
1239 | |
1240 | if (packet->payload_mapped) |
1241 | dma_unmap_single(ohci->card.device, packet->payload_bus, |
1242 | packet->payload_length, DMA_TO_DEVICE); |
1243 | |
1244 | evt = le16_to_cpu(last->transfer_status) & 0x1f; |
1245 | packet->timestamp = le16_to_cpu(last->res_count); |
1246 | |
1247 | log_ar_at_event('T', packet->speed, packet->header, evt); |
1248 | |
1249 | switch (evt) { |
1250 | case OHCI1394_evt_timeout: |
1251 | /* Async response transmit timed out. */ |
1252 | packet->ack = RCODE_CANCELLED; |
1253 | break; |
1254 | |
1255 | case OHCI1394_evt_flushed: |
1256 | /* |
1257 | * The packet was flushed should give same error as |
1258 | * when we try to use a stale generation count. |
1259 | */ |
1260 | packet->ack = RCODE_GENERATION; |
1261 | break; |
1262 | |
1263 | case OHCI1394_evt_missing_ack: |
1264 | /* |
1265 | * Using a valid (current) generation count, but the |
1266 | * node is not on the bus or not sending acks. |
1267 | */ |
1268 | packet->ack = RCODE_NO_ACK; |
1269 | break; |
1270 | |
1271 | case ACK_COMPLETE + 0x10: |
1272 | case ACK_PENDING + 0x10: |
1273 | case ACK_BUSY_X + 0x10: |
1274 | case ACK_BUSY_A + 0x10: |
1275 | case ACK_BUSY_B + 0x10: |
1276 | case ACK_DATA_ERROR + 0x10: |
1277 | case ACK_TYPE_ERROR + 0x10: |
1278 | packet->ack = evt - 0x10; |
1279 | break; |
1280 | |
1281 | default: |
1282 | packet->ack = RCODE_SEND_ERROR; |
1283 | break; |
1284 | } |
1285 | |
1286 | packet->callback(packet, &ohci->card, packet->ack); |
1287 | |
1288 | return 1; |
1289 | } |
1290 | |
1291 | #define HEADER_GET_DESTINATION(q) (((q) >> 16) & 0xffff) |
1292 | #define HEADER_GET_TCODE(q) (((q) >> 4) & 0x0f) |
1293 | #define HEADER_GET_OFFSET_HIGH(q) (((q) >> 0) & 0xffff) |
1294 | #define HEADER_GET_DATA_LENGTH(q) (((q) >> 16) & 0xffff) |
1295 | #define HEADER_GET_EXTENDED_TCODE(q) (((q) >> 0) & 0xffff) |
1296 | |
1297 | static void handle_local_rom(struct fw_ohci *ohci, |
1298 | struct fw_packet *packet, u32 csr) |
1299 | { |
1300 | struct fw_packet response; |
1301 | int tcode, length, i; |
1302 | |
1303 | tcode = HEADER_GET_TCODE(packet->header[0]); |
1304 | if (TCODE_IS_BLOCK_PACKET(tcode)) |
1305 | length = HEADER_GET_DATA_LENGTH(packet->header[3]); |
1306 | else |
1307 | length = 4; |
1308 | |
1309 | i = csr - CSR_CONFIG_ROM; |
1310 | if (i + length > CONFIG_ROM_SIZE) { |
1311 | fw_fill_response(&response, packet->header, |
1312 | RCODE_ADDRESS_ERROR, NULL, 0); |
1313 | } else if (!TCODE_IS_READ_REQUEST(tcode)) { |
1314 | fw_fill_response(&response, packet->header, |
1315 | RCODE_TYPE_ERROR, NULL, 0); |
1316 | } else { |
1317 | fw_fill_response(&response, packet->header, RCODE_COMPLETE, |
1318 | (void *) ohci->config_rom + i, length); |
1319 | } |
1320 | |
1321 | fw_core_handle_response(&ohci->card, &response); |
1322 | } |
1323 | |
1324 | static void handle_local_lock(struct fw_ohci *ohci, |
1325 | struct fw_packet *packet, u32 csr) |
1326 | { |
1327 | struct fw_packet response; |
1328 | int tcode, length, ext_tcode, sel, try; |
1329 | __be32 *payload, lock_old; |
1330 | u32 lock_arg, lock_data; |
1331 | |
1332 | tcode = HEADER_GET_TCODE(packet->header[0]); |
1333 | length = HEADER_GET_DATA_LENGTH(packet->header[3]); |
1334 | payload = packet->payload; |
1335 | ext_tcode = HEADER_GET_EXTENDED_TCODE(packet->header[3]); |
1336 | |
1337 | if (tcode == TCODE_LOCK_REQUEST && |
1338 | ext_tcode == EXTCODE_COMPARE_SWAP && length == 8) { |
1339 | lock_arg = be32_to_cpu(payload[0]); |
1340 | lock_data = be32_to_cpu(payload[1]); |
1341 | } else if (tcode == TCODE_READ_QUADLET_REQUEST) { |
1342 | lock_arg = 0; |
1343 | lock_data = 0; |
1344 | } else { |
1345 | fw_fill_response(&response, packet->header, |
1346 | RCODE_TYPE_ERROR, NULL, 0); |
1347 | goto out; |
1348 | } |
1349 | |
1350 | sel = (csr - CSR_BUS_MANAGER_ID) / 4; |
1351 | reg_write(ohci, OHCI1394_CSRData, lock_data); |
1352 | reg_write(ohci, OHCI1394_CSRCompareData, lock_arg); |
1353 | reg_write(ohci, OHCI1394_CSRControl, sel); |
1354 | |
1355 | for (try = 0; try < 20; try++) |
1356 | if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000) { |
1357 | lock_old = cpu_to_be32(reg_read(ohci, |
1358 | OHCI1394_CSRData)); |
1359 | fw_fill_response(&response, packet->header, |
1360 | RCODE_COMPLETE, |
1361 | &lock_old, sizeof(lock_old)); |
1362 | goto out; |
1363 | } |
1364 | |
1365 | fw_error("swap not done (CSR lock timeout)\n"); |
1366 | fw_fill_response(&response, packet->header, RCODE_BUSY, NULL, 0); |
1367 | |
1368 | out: |
1369 | fw_core_handle_response(&ohci->card, &response); |
1370 | } |
1371 | |
1372 | static void handle_local_request(struct context *ctx, struct fw_packet *packet) |
1373 | { |
1374 | u64 offset, csr; |
1375 | |
1376 | if (ctx == &ctx->ohci->at_request_ctx) { |
1377 | packet->ack = ACK_PENDING; |
1378 | packet->callback(packet, &ctx->ohci->card, packet->ack); |
1379 | } |
1380 | |
1381 | offset = |
1382 | ((unsigned long long) |
1383 | HEADER_GET_OFFSET_HIGH(packet->header[1]) << 32) | |
1384 | packet->header[2]; |
1385 | csr = offset - CSR_REGISTER_BASE; |
1386 | |
1387 | /* Handle config rom reads. */ |
1388 | if (csr >= CSR_CONFIG_ROM && csr < CSR_CONFIG_ROM_END) |
1389 | handle_local_rom(ctx->ohci, packet, csr); |
1390 | else switch (csr) { |
1391 | case CSR_BUS_MANAGER_ID: |
1392 | case CSR_BANDWIDTH_AVAILABLE: |
1393 | case CSR_CHANNELS_AVAILABLE_HI: |
1394 | case CSR_CHANNELS_AVAILABLE_LO: |
1395 | handle_local_lock(ctx->ohci, packet, csr); |
1396 | break; |
1397 | default: |
1398 | if (ctx == &ctx->ohci->at_request_ctx) |
1399 | fw_core_handle_request(&ctx->ohci->card, packet); |
1400 | else |
1401 | fw_core_handle_response(&ctx->ohci->card, packet); |
1402 | break; |
1403 | } |
1404 | |
1405 | if (ctx == &ctx->ohci->at_response_ctx) { |
1406 | packet->ack = ACK_COMPLETE; |
1407 | packet->callback(packet, &ctx->ohci->card, packet->ack); |
1408 | } |
1409 | } |
1410 | |
1411 | static void at_context_transmit(struct context *ctx, struct fw_packet *packet) |
1412 | { |
1413 | unsigned long flags; |
1414 | int ret; |
1415 | |
1416 | spin_lock_irqsave(&ctx->ohci->lock, flags); |
1417 | |
1418 | if (HEADER_GET_DESTINATION(packet->header[0]) == ctx->ohci->node_id && |
1419 | ctx->ohci->generation == packet->generation) { |
1420 | spin_unlock_irqrestore(&ctx->ohci->lock, flags); |
1421 | handle_local_request(ctx, packet); |
1422 | return; |
1423 | } |
1424 | |
1425 | ret = at_context_queue_packet(ctx, packet); |
1426 | spin_unlock_irqrestore(&ctx->ohci->lock, flags); |
1427 | |
1428 | if (ret < 0) |
1429 | packet->callback(packet, &ctx->ohci->card, packet->ack); |
1430 | |
1431 | } |
1432 | |
1433 | static u32 cycle_timer_ticks(u32 cycle_timer) |
1434 | { |
1435 | u32 ticks; |
1436 | |
1437 | ticks = cycle_timer & 0xfff; |
1438 | ticks += 3072 * ((cycle_timer >> 12) & 0x1fff); |
1439 | ticks += (3072 * 8000) * (cycle_timer >> 25); |
1440 | |
1441 | return ticks; |
1442 | } |
1443 | |
1444 | /* |
1445 | * Some controllers exhibit one or more of the following bugs when updating the |
1446 | * iso cycle timer register: |
1447 | * - When the lowest six bits are wrapping around to zero, a read that happens |
1448 | * at the same time will return garbage in the lowest ten bits. |
1449 | * - When the cycleOffset field wraps around to zero, the cycleCount field is |
1450 | * not incremented for about 60 ns. |
1451 | * - Occasionally, the entire register reads zero. |
1452 | * |
1453 | * To catch these, we read the register three times and ensure that the |
1454 | * difference between each two consecutive reads is approximately the same, i.e. |
1455 | * less than twice the other. Furthermore, any negative difference indicates an |
1456 | * error. (A PCI read should take at least 20 ticks of the 24.576 MHz timer to |
1457 | * execute, so we have enough precision to compute the ratio of the differences.) |
1458 | */ |
1459 | static u32 get_cycle_time(struct fw_ohci *ohci) |
1460 | { |
1461 | u32 c0, c1, c2; |
1462 | u32 t0, t1, t2; |
1463 | s32 diff01, diff12; |
1464 | int i; |
1465 | |
1466 | c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer); |
1467 | |
1468 | if (ohci->quirks & QUIRK_CYCLE_TIMER) { |
1469 | i = 0; |
1470 | c1 = c2; |
1471 | c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer); |
1472 | do { |
1473 | c0 = c1; |
1474 | c1 = c2; |
1475 | c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer); |
1476 | t0 = cycle_timer_ticks(c0); |
1477 | t1 = cycle_timer_ticks(c1); |
1478 | t2 = cycle_timer_ticks(c2); |
1479 | diff01 = t1 - t0; |
1480 | diff12 = t2 - t1; |
1481 | } while ((diff01 <= 0 || diff12 <= 0 || |
1482 | diff01 / diff12 >= 2 || diff12 / diff01 >= 2) |
1483 | && i++ < 20); |
1484 | } |
1485 | |
1486 | return c2; |
1487 | } |
1488 | |
1489 | /* |
1490 | * This function has to be called at least every 64 seconds. The bus_time |
1491 | * field stores not only the upper 25 bits of the BUS_TIME register but also |
1492 | * the most significant bit of the cycle timer in bit 6 so that we can detect |
1493 | * changes in this bit. |
1494 | */ |
1495 | static u32 update_bus_time(struct fw_ohci *ohci) |
1496 | { |
1497 | u32 cycle_time_seconds = get_cycle_time(ohci) >> 25; |
1498 | |
1499 | if ((ohci->bus_time & 0x40) != (cycle_time_seconds & 0x40)) |
1500 | ohci->bus_time += 0x40; |
1501 | |
1502 | return ohci->bus_time | cycle_time_seconds; |
1503 | } |
1504 | |
1505 | static void bus_reset_tasklet(unsigned long data) |
1506 | { |
1507 | struct fw_ohci *ohci = (struct fw_ohci *)data; |
1508 | int self_id_count, i, j, reg; |
1509 | int generation, new_generation; |
1510 | unsigned long flags; |
1511 | void *free_rom = NULL; |
1512 | dma_addr_t free_rom_bus = 0; |
1513 | bool is_new_root; |
1514 | |
1515 | reg = reg_read(ohci, OHCI1394_NodeID); |
1516 | if (!(reg & OHCI1394_NodeID_idValid)) { |
1517 | fw_notify("node ID not valid, new bus reset in progress\n"); |
1518 | return; |
1519 | } |
1520 | if ((reg & OHCI1394_NodeID_nodeNumber) == 63) { |
1521 | fw_notify("malconfigured bus\n"); |
1522 | return; |
1523 | } |
1524 | ohci->node_id = reg & (OHCI1394_NodeID_busNumber | |
1525 | OHCI1394_NodeID_nodeNumber); |
1526 | |
1527 | is_new_root = (reg & OHCI1394_NodeID_root) != 0; |
1528 | if (!(ohci->is_root && is_new_root)) |
1529 | reg_write(ohci, OHCI1394_LinkControlSet, |
1530 | OHCI1394_LinkControl_cycleMaster); |
1531 | ohci->is_root = is_new_root; |
1532 | |
1533 | reg = reg_read(ohci, OHCI1394_SelfIDCount); |
1534 | if (reg & OHCI1394_SelfIDCount_selfIDError) { |
1535 | fw_notify("inconsistent self IDs\n"); |
1536 | return; |
1537 | } |
1538 | /* |
1539 | * The count in the SelfIDCount register is the number of |
1540 | * bytes in the self ID receive buffer. Since we also receive |
1541 | * the inverted quadlets and a header quadlet, we shift one |
1542 | * bit extra to get the actual number of self IDs. |
1543 | */ |
1544 | self_id_count = (reg >> 3) & 0xff; |
1545 | if (self_id_count == 0 || self_id_count > 252) { |
1546 | fw_notify("inconsistent self IDs\n"); |
1547 | return; |
1548 | } |
1549 | generation = (cond_le32_to_cpu(ohci->self_id_cpu[0]) >> 16) & 0xff; |
1550 | rmb(); |
1551 | |
1552 | for (i = 1, j = 0; j < self_id_count; i += 2, j++) { |
1553 | if (ohci->self_id_cpu[i] != ~ohci->self_id_cpu[i + 1]) { |
1554 | fw_notify("inconsistent self IDs\n"); |
1555 | return; |
1556 | } |
1557 | ohci->self_id_buffer[j] = |
1558 | cond_le32_to_cpu(ohci->self_id_cpu[i]); |
1559 | } |
1560 | rmb(); |
1561 | |
1562 | /* |
1563 | * Check the consistency of the self IDs we just read. The |
1564 | * problem we face is that a new bus reset can start while we |
1565 | * read out the self IDs from the DMA buffer. If this happens, |
1566 | * the DMA buffer will be overwritten with new self IDs and we |
1567 | * will read out inconsistent data. The OHCI specification |
1568 | * (section 11.2) recommends a technique similar to |
1569 | * linux/seqlock.h, where we remember the generation of the |
1570 | * self IDs in the buffer before reading them out and compare |
1571 | * it to the current generation after reading them out. If |
1572 | * the two generations match we know we have a consistent set |
1573 | * of self IDs. |
1574 | */ |
1575 | |
1576 | new_generation = (reg_read(ohci, OHCI1394_SelfIDCount) >> 16) & 0xff; |
1577 | if (new_generation != generation) { |
1578 | fw_notify("recursive bus reset detected, " |
1579 | "discarding self ids\n"); |
1580 | return; |
1581 | } |
1582 | |
1583 | /* FIXME: Document how the locking works. */ |
1584 | spin_lock_irqsave(&ohci->lock, flags); |
1585 | |
1586 | ohci->generation = generation; |
1587 | context_stop(&ohci->at_request_ctx); |
1588 | context_stop(&ohci->at_response_ctx); |
1589 | reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset); |
1590 | |
1591 | if (ohci->quirks & QUIRK_RESET_PACKET) |
1592 | ohci->request_generation = generation; |
1593 | |
1594 | /* |
1595 | * This next bit is unrelated to the AT context stuff but we |
1596 | * have to do it under the spinlock also. If a new config rom |
1597 | * was set up before this reset, the old one is now no longer |
1598 | * in use and we can free it. Update the config rom pointers |
1599 | * to point to the current config rom and clear the |
1600 | * next_config_rom pointer so a new update can take place. |
1601 | */ |
1602 | |
1603 | if (ohci->next_config_rom != NULL) { |
1604 | if (ohci->next_config_rom != ohci->config_rom) { |
1605 | free_rom = ohci->config_rom; |
1606 | free_rom_bus = ohci->config_rom_bus; |
1607 | } |
1608 | ohci->config_rom = ohci->next_config_rom; |
1609 | ohci->config_rom_bus = ohci->next_config_rom_bus; |
1610 | ohci->next_config_rom = NULL; |
1611 | |
1612 | /* |
1613 | * Restore config_rom image and manually update |
1614 | * config_rom registers. Writing the header quadlet |
1615 | * will indicate that the config rom is ready, so we |
1616 | * do that last. |
1617 | */ |
1618 | reg_write(ohci, OHCI1394_BusOptions, |
1619 | be32_to_cpu(ohci->config_rom[2])); |
1620 | ohci->config_rom[0] = ohci->next_header; |
1621 | reg_write(ohci, OHCI1394_ConfigROMhdr, |
1622 | be32_to_cpu(ohci->next_header)); |
1623 | } |
1624 | |
1625 | #ifdef CONFIG_FIREWIRE_OHCI_REMOTE_DMA |
1626 | reg_write(ohci, OHCI1394_PhyReqFilterHiSet, ~0); |
1627 | reg_write(ohci, OHCI1394_PhyReqFilterLoSet, ~0); |
1628 | #endif |
1629 | |
1630 | spin_unlock_irqrestore(&ohci->lock, flags); |
1631 | |
1632 | if (free_rom) |
1633 | dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, |
1634 | free_rom, free_rom_bus); |
1635 | |
1636 | log_selfids(ohci->node_id, generation, |
1637 | self_id_count, ohci->self_id_buffer); |
1638 | |
1639 | fw_core_handle_bus_reset(&ohci->card, ohci->node_id, generation, |
1640 | self_id_count, ohci->self_id_buffer, |
1641 | ohci->csr_state_setclear_abdicate); |
1642 | ohci->csr_state_setclear_abdicate = false; |
1643 | } |
1644 | |
1645 | static irqreturn_t irq_handler(int irq, void *data) |
1646 | { |
1647 | struct fw_ohci *ohci = data; |
1648 | u32 event, iso_event; |
1649 | int i; |
1650 | |
1651 | event = reg_read(ohci, OHCI1394_IntEventClear); |
1652 | |
1653 | if (!event || !~event) |
1654 | return IRQ_NONE; |
1655 | |
1656 | /* busReset must not be cleared yet, see OHCI 1.1 clause 7.2.3.2 */ |
1657 | reg_write(ohci, OHCI1394_IntEventClear, event & ~OHCI1394_busReset); |
1658 | log_irqs(event); |
1659 | |
1660 | if (event & OHCI1394_selfIDComplete) |
1661 | tasklet_schedule(&ohci->bus_reset_tasklet); |
1662 | |
1663 | if (event & OHCI1394_RQPkt) |
1664 | tasklet_schedule(&ohci->ar_request_ctx.tasklet); |
1665 | |
1666 | if (event & OHCI1394_RSPkt) |
1667 | tasklet_schedule(&ohci->ar_response_ctx.tasklet); |
1668 | |
1669 | if (event & OHCI1394_reqTxComplete) |
1670 | tasklet_schedule(&ohci->at_request_ctx.tasklet); |
1671 | |
1672 | if (event & OHCI1394_respTxComplete) |
1673 | tasklet_schedule(&ohci->at_response_ctx.tasklet); |
1674 | |
1675 | iso_event = reg_read(ohci, OHCI1394_IsoRecvIntEventClear); |
1676 | reg_write(ohci, OHCI1394_IsoRecvIntEventClear, iso_event); |
1677 | |
1678 | while (iso_event) { |
1679 | i = ffs(iso_event) - 1; |
1680 | tasklet_schedule(&ohci->ir_context_list[i].context.tasklet); |
1681 | iso_event &= ~(1 << i); |
1682 | } |
1683 | |
1684 | iso_event = reg_read(ohci, OHCI1394_IsoXmitIntEventClear); |
1685 | reg_write(ohci, OHCI1394_IsoXmitIntEventClear, iso_event); |
1686 | |
1687 | while (iso_event) { |
1688 | i = ffs(iso_event) - 1; |
1689 | tasklet_schedule(&ohci->it_context_list[i].context.tasklet); |
1690 | iso_event &= ~(1 << i); |
1691 | } |
1692 | |
1693 | if (unlikely(event & OHCI1394_regAccessFail)) |
1694 | fw_error("Register access failure - " |
1695 | "please notify linux1394-devel@lists.sf.net\n"); |
1696 | |
1697 | if (unlikely(event & OHCI1394_postedWriteErr)) |
1698 | fw_error("PCI posted write error\n"); |
1699 | |
1700 | if (unlikely(event & OHCI1394_cycleTooLong)) { |
1701 | if (printk_ratelimit()) |
1702 | fw_notify("isochronous cycle too long\n"); |
1703 | reg_write(ohci, OHCI1394_LinkControlSet, |
1704 | OHCI1394_LinkControl_cycleMaster); |
1705 | } |
1706 | |
1707 | if (unlikely(event & OHCI1394_cycleInconsistent)) { |
1708 | /* |
1709 | * We need to clear this event bit in order to make |
1710 | * cycleMatch isochronous I/O work. In theory we should |
1711 | * stop active cycleMatch iso contexts now and restart |
1712 | * them at least two cycles later. (FIXME?) |
1713 | */ |
1714 | if (printk_ratelimit()) |
1715 | fw_notify("isochronous cycle inconsistent\n"); |
1716 | } |
1717 | |
1718 | if (event & OHCI1394_cycle64Seconds) { |
1719 | spin_lock(&ohci->lock); |
1720 | update_bus_time(ohci); |
1721 | spin_unlock(&ohci->lock); |
1722 | } |
1723 | |
1724 | return IRQ_HANDLED; |
1725 | } |
1726 | |
1727 | static int software_reset(struct fw_ohci *ohci) |
1728 | { |
1729 | int i; |
1730 | |
1731 | reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset); |
1732 | |
1733 | for (i = 0; i < OHCI_LOOP_COUNT; i++) { |
1734 | if ((reg_read(ohci, OHCI1394_HCControlSet) & |
1735 | OHCI1394_HCControl_softReset) == 0) |
1736 | return 0; |
1737 | msleep(1); |
1738 | } |
1739 | |
1740 | return -EBUSY; |
1741 | } |
1742 | |
1743 | static void copy_config_rom(__be32 *dest, const __be32 *src, size_t length) |
1744 | { |
1745 | size_t size = length * 4; |
1746 | |
1747 | memcpy(dest, src, size); |
1748 | if (size < CONFIG_ROM_SIZE) |
1749 | memset(&dest[length], 0, CONFIG_ROM_SIZE - size); |
1750 | } |
1751 | |
1752 | static int configure_1394a_enhancements(struct fw_ohci *ohci) |
1753 | { |
1754 | bool enable_1394a; |
1755 | int ret, clear, set, offset; |
1756 | |
1757 | /* Check if the driver should configure link and PHY. */ |
1758 | if (!(reg_read(ohci, OHCI1394_HCControlSet) & |
1759 | OHCI1394_HCControl_programPhyEnable)) |
1760 | return 0; |
1761 | |
1762 | /* Paranoia: check whether the PHY supports 1394a, too. */ |
1763 | enable_1394a = false; |
1764 | ret = read_phy_reg(ohci, 2); |
1765 | if (ret < 0) |
1766 | return ret; |
1767 | if ((ret & PHY_EXTENDED_REGISTERS) == PHY_EXTENDED_REGISTERS) { |
1768 | ret = read_paged_phy_reg(ohci, 1, 8); |
1769 | if (ret < 0) |
1770 | return ret; |
1771 | if (ret >= 1) |
1772 | enable_1394a = true; |
1773 | } |
1774 | |
1775 | if (ohci->quirks & QUIRK_NO_1394A) |
1776 | enable_1394a = false; |
1777 | |
1778 | /* Configure PHY and link consistently. */ |
1779 | if (enable_1394a) { |
1780 | clear = 0; |
1781 | set = PHY_ENABLE_ACCEL | PHY_ENABLE_MULTI; |
1782 | } else { |
1783 | clear = PHY_ENABLE_ACCEL | PHY_ENABLE_MULTI; |
1784 | set = 0; |
1785 | } |
1786 | ret = update_phy_reg(ohci, 5, clear, set); |
1787 | if (ret < 0) |
1788 | return ret; |
1789 | |
1790 | if (enable_1394a) |
1791 | offset = OHCI1394_HCControlSet; |
1792 | else |
1793 | offset = OHCI1394_HCControlClear; |
1794 | reg_write(ohci, offset, OHCI1394_HCControl_aPhyEnhanceEnable); |
1795 | |
1796 | /* Clean up: configuration has been taken care of. */ |
1797 | reg_write(ohci, OHCI1394_HCControlClear, |
1798 | OHCI1394_HCControl_programPhyEnable); |
1799 | |
1800 | return 0; |
1801 | } |
1802 | |
1803 | static int ohci_enable(struct fw_card *card, |
1804 | const __be32 *config_rom, size_t length) |
1805 | { |
1806 | struct fw_ohci *ohci = fw_ohci(card); |
1807 | struct pci_dev *dev = to_pci_dev(card->device); |
1808 | u32 lps, seconds, version, irqs; |
1809 | int i, ret; |
1810 | |
1811 | if (software_reset(ohci)) { |
1812 | fw_error("Failed to reset ohci card.\n"); |
1813 | return -EBUSY; |
1814 | } |
1815 | |
1816 | /* |
1817 | * Now enable LPS, which we need in order to start accessing |
1818 | * most of the registers. In fact, on some cards (ALI M5251), |
1819 | * accessing registers in the SClk domain without LPS enabled |
1820 | * will lock up the machine. Wait 50msec to make sure we have |
1821 | * full link enabled. However, with some cards (well, at least |
1822 | * a JMicron PCIe card), we have to try again sometimes. |
1823 | */ |
1824 | reg_write(ohci, OHCI1394_HCControlSet, |
1825 | OHCI1394_HCControl_LPS | |
1826 | OHCI1394_HCControl_postedWriteEnable); |
1827 | flush_writes(ohci); |
1828 | |
1829 | for (lps = 0, i = 0; !lps && i < 3; i++) { |
1830 | msleep(50); |
1831 | lps = reg_read(ohci, OHCI1394_HCControlSet) & |
1832 | OHCI1394_HCControl_LPS; |
1833 | } |
1834 | |
1835 | if (!lps) { |
1836 | fw_error("Failed to set Link Power Status\n"); |
1837 | return -EIO; |
1838 | } |
1839 | |
1840 | reg_write(ohci, OHCI1394_HCControlClear, |
1841 | OHCI1394_HCControl_noByteSwapData); |
1842 | |
1843 | reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->self_id_bus); |
1844 | reg_write(ohci, OHCI1394_LinkControlSet, |
1845 | OHCI1394_LinkControl_rcvSelfID | |
1846 | OHCI1394_LinkControl_rcvPhyPkt | |
1847 | OHCI1394_LinkControl_cycleTimerEnable | |
1848 | OHCI1394_LinkControl_cycleMaster); |
1849 | |
1850 | reg_write(ohci, OHCI1394_ATRetries, |
1851 | OHCI1394_MAX_AT_REQ_RETRIES | |
1852 | (OHCI1394_MAX_AT_RESP_RETRIES << 4) | |
1853 | (OHCI1394_MAX_PHYS_RESP_RETRIES << 8) | |
1854 | (200 << 16)); |
1855 | |
1856 | seconds = lower_32_bits(get_seconds()); |
1857 | reg_write(ohci, OHCI1394_IsochronousCycleTimer, seconds << 25); |
1858 | ohci->bus_time = seconds & ~0x3f; |
1859 | |
1860 | version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff; |
1861 | if (version >= OHCI_VERSION_1_1) { |
1862 | reg_write(ohci, OHCI1394_InitialChannelsAvailableHi, |
1863 | 0xfffffffe); |
1864 | card->broadcast_channel_auto_allocated = true; |
1865 | } |
1866 | |
1867 | /* Get implemented bits of the priority arbitration request counter. */ |
1868 | reg_write(ohci, OHCI1394_FairnessControl, 0x3f); |
1869 | ohci->pri_req_max = reg_read(ohci, OHCI1394_FairnessControl) & 0x3f; |
1870 | reg_write(ohci, OHCI1394_FairnessControl, 0); |
1871 | card->priority_budget_implemented = ohci->pri_req_max != 0; |
1872 | |
1873 | ar_context_run(&ohci->ar_request_ctx); |
1874 | ar_context_run(&ohci->ar_response_ctx); |
1875 | |
1876 | reg_write(ohci, OHCI1394_PhyUpperBound, 0x00010000); |
1877 | reg_write(ohci, OHCI1394_IntEventClear, ~0); |
1878 | reg_write(ohci, OHCI1394_IntMaskClear, ~0); |
1879 | |
1880 | ret = configure_1394a_enhancements(ohci); |
1881 | if (ret < 0) |
1882 | return ret; |
1883 | |
1884 | /* Activate link_on bit and contender bit in our self ID packets.*/ |
1885 | ret = ohci_update_phy_reg(card, 4, 0, PHY_LINK_ACTIVE | PHY_CONTENDER); |
1886 | if (ret < 0) |
1887 | return ret; |
1888 | |
1889 | /* |
1890 | * When the link is not yet enabled, the atomic config rom |
1891 | * update mechanism described below in ohci_set_config_rom() |
1892 | * is not active. We have to update ConfigRomHeader and |
1893 | * BusOptions manually, and the write to ConfigROMmap takes |
1894 | * effect immediately. We tie this to the enabling of the |
1895 | * link, so we have a valid config rom before enabling - the |
1896 | * OHCI requires that ConfigROMhdr and BusOptions have valid |
1897 | * values before enabling. |
1898 | * |
1899 | * However, when the ConfigROMmap is written, some controllers |
1900 | * always read back quadlets 0 and 2 from the config rom to |
1901 | * the ConfigRomHeader and BusOptions registers on bus reset. |
1902 | * They shouldn't do that in this initial case where the link |
1903 | * isn't enabled. This means we have to use the same |
1904 | * workaround here, setting the bus header to 0 and then write |
1905 | * the right values in the bus reset tasklet. |
1906 | */ |
1907 | |
1908 | if (config_rom) { |
1909 | ohci->next_config_rom = |
1910 | dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE, |
1911 | &ohci->next_config_rom_bus, |
1912 | GFP_KERNEL); |
1913 | if (ohci->next_config_rom == NULL) |
1914 | return -ENOMEM; |
1915 | |
1916 | copy_config_rom(ohci->next_config_rom, config_rom, length); |
1917 | } else { |
1918 | /* |
1919 | * In the suspend case, config_rom is NULL, which |
1920 | * means that we just reuse the old config rom. |
1921 | */ |
1922 | ohci->next_config_rom = ohci->config_rom; |
1923 | ohci->next_config_rom_bus = ohci->config_rom_bus; |
1924 | } |
1925 | |
1926 | ohci->next_header = ohci->next_config_rom[0]; |
1927 | ohci->next_config_rom[0] = 0; |
1928 | reg_write(ohci, OHCI1394_ConfigROMhdr, 0); |
1929 | reg_write(ohci, OHCI1394_BusOptions, |
1930 | be32_to_cpu(ohci->next_config_rom[2])); |
1931 | reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus); |
1932 | |
1933 | reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000); |
1934 | |
1935 | if (!(ohci->quirks & QUIRK_NO_MSI)) |
1936 | pci_enable_msi(dev); |
1937 | if (request_irq(dev->irq, irq_handler, |
1938 | pci_dev_msi_enabled(dev) ? 0 : IRQF_SHARED, |
1939 | ohci_driver_name, ohci)) { |
1940 | fw_error("Failed to allocate interrupt %d.\n", dev->irq); |
1941 | pci_disable_msi(dev); |
1942 | dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, |
1943 | ohci->config_rom, ohci->config_rom_bus); |
1944 | return -EIO; |
1945 | } |
1946 | |
1947 | irqs = OHCI1394_reqTxComplete | OHCI1394_respTxComplete | |
1948 | OHCI1394_RQPkt | OHCI1394_RSPkt | |
1949 | OHCI1394_isochTx | OHCI1394_isochRx | |
1950 | OHCI1394_postedWriteErr | |
1951 | OHCI1394_selfIDComplete | |
1952 | OHCI1394_regAccessFail | |
1953 | OHCI1394_cycle64Seconds | |
1954 | OHCI1394_cycleInconsistent | OHCI1394_cycleTooLong | |
1955 | OHCI1394_masterIntEnable; |
1956 | if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS) |
1957 | irqs |= OHCI1394_busReset; |
1958 | reg_write(ohci, OHCI1394_IntMaskSet, irqs); |
1959 | |
1960 | reg_write(ohci, OHCI1394_HCControlSet, |
1961 | OHCI1394_HCControl_linkEnable | |
1962 | OHCI1394_HCControl_BIBimageValid); |
1963 | flush_writes(ohci); |
1964 | |
1965 | /* We are ready to go, reset bus to finish initialization. */ |
1966 | fw_schedule_bus_reset(&ohci->card, false, true); |
1967 | |
1968 | return 0; |
1969 | } |
1970 | |
1971 | static int ohci_set_config_rom(struct fw_card *card, |
1972 | const __be32 *config_rom, size_t length) |
1973 | { |
1974 | struct fw_ohci *ohci; |
1975 | unsigned long flags; |
1976 | int ret = -EBUSY; |
1977 | __be32 *next_config_rom; |
1978 | dma_addr_t uninitialized_var(next_config_rom_bus); |
1979 | |
1980 | ohci = fw_ohci(card); |
1981 | |
1982 | /* |
1983 | * When the OHCI controller is enabled, the config rom update |
1984 | * mechanism is a bit tricky, but easy enough to use. See |
1985 | * section 5.5.6 in the OHCI specification. |
1986 | * |
1987 | * The OHCI controller caches the new config rom address in a |
1988 | * shadow register (ConfigROMmapNext) and needs a bus reset |
1989 | * for the changes to take place. When the bus reset is |
1990 | * detected, the controller loads the new values for the |
1991 | * ConfigRomHeader and BusOptions registers from the specified |
1992 | * config rom and loads ConfigROMmap from the ConfigROMmapNext |
1993 | * shadow register. All automatically and atomically. |
1994 | * |
1995 | * Now, there's a twist to this story. The automatic load of |
1996 | * ConfigRomHeader and BusOptions doesn't honor the |
1997 | * noByteSwapData bit, so with a be32 config rom, the |
1998 | * controller will load be32 values in to these registers |
1999 | * during the atomic update, even on litte endian |
2000 | * architectures. The workaround we use is to put a 0 in the |
2001 | * header quadlet; 0 is endian agnostic and means that the |
2002 | * config rom isn't ready yet. In the bus reset tasklet we |
2003 | * then set up the real values for the two registers. |
2004 | * |
2005 | * We use ohci->lock to avoid racing with the code that sets |
2006 | * ohci->next_config_rom to NULL (see bus_reset_tasklet). |
2007 | */ |
2008 | |
2009 | next_config_rom = |
2010 | dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE, |
2011 | &next_config_rom_bus, GFP_KERNEL); |
2012 | if (next_config_rom == NULL) |
2013 | return -ENOMEM; |
2014 | |
2015 | spin_lock_irqsave(&ohci->lock, flags); |
2016 | |
2017 | if (ohci->next_config_rom == NULL) { |
2018 | ohci->next_config_rom = next_config_rom; |
2019 | ohci->next_config_rom_bus = next_config_rom_bus; |
2020 | |
2021 | copy_config_rom(ohci->next_config_rom, config_rom, length); |
2022 | |
2023 | ohci->next_header = config_rom[0]; |
2024 | ohci->next_config_rom[0] = 0; |
2025 | |
2026 | reg_write(ohci, OHCI1394_ConfigROMmap, |
2027 | ohci->next_config_rom_bus); |
2028 | ret = 0; |
2029 | } |
2030 | |
2031 | spin_unlock_irqrestore(&ohci->lock, flags); |
2032 | |
2033 | /* |
2034 | * Now initiate a bus reset to have the changes take |
2035 | * effect. We clean up the old config rom memory and DMA |
2036 | * mappings in the bus reset tasklet, since the OHCI |
2037 | * controller could need to access it before the bus reset |
2038 | * takes effect. |
2039 | */ |
2040 | if (ret == 0) |
2041 | fw_schedule_bus_reset(&ohci->card, true, true); |
2042 | else |
2043 | dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, |
2044 | next_config_rom, next_config_rom_bus); |
2045 | |
2046 | return ret; |
2047 | } |
2048 | |
2049 | static void ohci_send_request(struct fw_card *card, struct fw_packet *packet) |
2050 | { |
2051 | struct fw_ohci *ohci = fw_ohci(card); |
2052 | |
2053 | at_context_transmit(&ohci->at_request_ctx, packet); |
2054 | } |
2055 | |
2056 | static void ohci_send_response(struct fw_card *card, struct fw_packet *packet) |
2057 | { |
2058 | struct fw_ohci *ohci = fw_ohci(card); |
2059 | |
2060 | at_context_transmit(&ohci->at_response_ctx, packet); |
2061 | } |
2062 | |
2063 | static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet) |
2064 | { |
2065 | struct fw_ohci *ohci = fw_ohci(card); |
2066 | struct context *ctx = &ohci->at_request_ctx; |
2067 | struct driver_data *driver_data = packet->driver_data; |
2068 | int ret = -ENOENT; |
2069 | |
2070 | tasklet_disable(&ctx->tasklet); |
2071 | |
2072 | if (packet->ack != 0) |
2073 | goto out; |
2074 | |
2075 | if (packet->payload_mapped) |
2076 | dma_unmap_single(ohci->card.device, packet->payload_bus, |
2077 | packet->payload_length, DMA_TO_DEVICE); |
2078 | |
2079 | log_ar_at_event('T', packet->speed, packet->header, 0x20); |
2080 | driver_data->packet = NULL; |
2081 | packet->ack = RCODE_CANCELLED; |
2082 | packet->callback(packet, &ohci->card, packet->ack); |
2083 | ret = 0; |
2084 | out: |
2085 | tasklet_enable(&ctx->tasklet); |
2086 | |
2087 | return ret; |
2088 | } |
2089 | |
2090 | static int ohci_enable_phys_dma(struct fw_card *card, |
2091 | int node_id, int generation) |
2092 | { |
2093 | #ifdef CONFIG_FIREWIRE_OHCI_REMOTE_DMA |
2094 | return 0; |
2095 | #else |
2096 | struct fw_ohci *ohci = fw_ohci(card); |
2097 | unsigned long flags; |
2098 | int n, ret = 0; |
2099 | |
2100 | /* |
2101 | * FIXME: Make sure this bitmask is cleared when we clear the busReset |
2102 | * interrupt bit. Clear physReqResourceAllBuses on bus reset. |
2103 | */ |
2104 | |
2105 | spin_lock_irqsave(&ohci->lock, flags); |
2106 | |
2107 | if (ohci->generation != generation) { |
2108 | ret = -ESTALE; |
2109 | goto out; |
2110 | } |
2111 | |
2112 | /* |
2113 | * Note, if the node ID contains a non-local bus ID, physical DMA is |
2114 | * enabled for _all_ nodes on remote buses. |
2115 | */ |
2116 | |
2117 | n = (node_id & 0xffc0) == LOCAL_BUS ? node_id & 0x3f : 63; |
2118 | if (n < 32) |
2119 | reg_write(ohci, OHCI1394_PhyReqFilterLoSet, 1 << n); |
2120 | else |
2121 | reg_write(ohci, OHCI1394_PhyReqFilterHiSet, 1 << (n - 32)); |
2122 | |
2123 | flush_writes(ohci); |
2124 | out: |
2125 | spin_unlock_irqrestore(&ohci->lock, flags); |
2126 | |
2127 | return ret; |
2128 | #endif /* CONFIG_FIREWIRE_OHCI_REMOTE_DMA */ |
2129 | } |
2130 | |
2131 | static u32 ohci_read_csr(struct fw_card *card, int csr_offset) |
2132 | { |
2133 | struct fw_ohci *ohci = fw_ohci(card); |
2134 | unsigned long flags; |
2135 | u32 value; |
2136 | |
2137 | switch (csr_offset) { |
2138 | case CSR_STATE_CLEAR: |
2139 | case CSR_STATE_SET: |
2140 | if (ohci->is_root && |
2141 | (reg_read(ohci, OHCI1394_LinkControlSet) & |
2142 | OHCI1394_LinkControl_cycleMaster)) |
2143 | value = CSR_STATE_BIT_CMSTR; |
2144 | else |
2145 | value = 0; |
2146 | if (ohci->csr_state_setclear_abdicate) |
2147 | value |= CSR_STATE_BIT_ABDICATE; |
2148 | |
2149 | return value; |
2150 | |
2151 | case CSR_NODE_IDS: |
2152 | return reg_read(ohci, OHCI1394_NodeID) << 16; |
2153 | |
2154 | case CSR_CYCLE_TIME: |
2155 | return get_cycle_time(ohci); |
2156 | |
2157 | case CSR_BUS_TIME: |
2158 | /* |
2159 | * We might be called just after the cycle timer has wrapped |
2160 | * around but just before the cycle64Seconds handler, so we |
2161 | * better check here, too, if the bus time needs to be updated. |
2162 | */ |
2163 | spin_lock_irqsave(&ohci->lock, flags); |
2164 | value = update_bus_time(ohci); |
2165 | spin_unlock_irqrestore(&ohci->lock, flags); |
2166 | return value; |
2167 | |
2168 | case CSR_BUSY_TIMEOUT: |
2169 | value = reg_read(ohci, OHCI1394_ATRetries); |
2170 | return (value >> 4) & 0x0ffff00f; |
2171 | |
2172 | case CSR_PRIORITY_BUDGET: |
2173 | return (reg_read(ohci, OHCI1394_FairnessControl) & 0x3f) | |
2174 | (ohci->pri_req_max << 8); |
2175 | |
2176 | default: |
2177 | WARN_ON(1); |
2178 | return 0; |
2179 | } |
2180 | } |
2181 | |
2182 | static void ohci_write_csr(struct fw_card *card, int csr_offset, u32 value) |
2183 | { |
2184 | struct fw_ohci *ohci = fw_ohci(card); |
2185 | unsigned long flags; |
2186 | |
2187 | switch (csr_offset) { |
2188 | case CSR_STATE_CLEAR: |
2189 | if ((value & CSR_STATE_BIT_CMSTR) && ohci->is_root) { |
2190 | reg_write(ohci, OHCI1394_LinkControlClear, |
2191 | OHCI1394_LinkControl_cycleMaster); |
2192 | flush_writes(ohci); |
2193 | } |
2194 | if (value & CSR_STATE_BIT_ABDICATE) |
2195 | ohci->csr_state_setclear_abdicate = false; |
2196 | break; |
2197 | |
2198 | case CSR_STATE_SET: |
2199 | if ((value & CSR_STATE_BIT_CMSTR) && ohci->is_root) { |
2200 | reg_write(ohci, OHCI1394_LinkControlSet, |
2201 | OHCI1394_LinkControl_cycleMaster); |
2202 | flush_writes(ohci); |
2203 | } |
2204 | if (value & CSR_STATE_BIT_ABDICATE) |
2205 | ohci->csr_state_setclear_abdicate = true; |
2206 | break; |
2207 | |
2208 | case CSR_NODE_IDS: |
2209 | reg_write(ohci, OHCI1394_NodeID, value >> 16); |
2210 | flush_writes(ohci); |
2211 | break; |
2212 | |
2213 | case CSR_CYCLE_TIME: |
2214 | reg_write(ohci, OHCI1394_IsochronousCycleTimer, value); |
2215 | reg_write(ohci, OHCI1394_IntEventSet, |
2216 | OHCI1394_cycleInconsistent); |
2217 | flush_writes(ohci); |
2218 | break; |
2219 | |
2220 | case CSR_BUS_TIME: |
2221 | spin_lock_irqsave(&ohci->lock, flags); |
2222 | ohci->bus_time = (ohci->bus_time & 0x7f) | (value & ~0x7f); |
2223 | spin_unlock_irqrestore(&ohci->lock, flags); |
2224 | break; |
2225 | |
2226 | case CSR_BUSY_TIMEOUT: |
2227 | value = (value & 0xf) | ((value & 0xf) << 4) | |
2228 | ((value & 0xf) << 8) | ((value & 0x0ffff000) << 4); |
2229 | reg_write(ohci, OHCI1394_ATRetries, value); |
2230 | flush_writes(ohci); |
2231 | break; |
2232 | |
2233 | case CSR_PRIORITY_BUDGET: |
2234 | reg_write(ohci, OHCI1394_FairnessControl, value & 0x3f); |
2235 | flush_writes(ohci); |
2236 | break; |
2237 | |
2238 | default: |
2239 | WARN_ON(1); |
2240 | break; |
2241 | } |
2242 | } |
2243 | |
2244 | static void copy_iso_headers(struct iso_context *ctx, void *p) |
2245 | { |
2246 | int i = ctx->header_length; |
2247 | |
2248 | if (i + ctx->base.header_size > PAGE_SIZE) |
2249 | return; |
2250 | |
2251 | /* |
2252 | * The iso header is byteswapped to little endian by |
2253 | * the controller, but the remaining header quadlets |
2254 | * are big endian. We want to present all the headers |
2255 | * as big endian, so we have to swap the first quadlet. |
2256 | */ |
2257 | if (ctx->base.header_size > 0) |
2258 | *(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4)); |
2259 | if (ctx->base.header_size > 4) |
2260 | *(u32 *) (ctx->header + i + 4) = __swab32(*(u32 *) p); |
2261 | if (ctx->base.header_size > 8) |
2262 | memcpy(ctx->header + i + 8, p + 8, ctx->base.header_size - 8); |
2263 | ctx->header_length += ctx->base.header_size; |
2264 | } |
2265 | |
2266 | static int handle_ir_packet_per_buffer(struct context *context, |
2267 | struct descriptor *d, |
2268 | struct descriptor *last) |
2269 | { |
2270 | struct iso_context *ctx = |
2271 | container_of(context, struct iso_context, context); |
2272 | struct descriptor *pd; |
2273 | __le32 *ir_header; |
2274 | void *p; |
2275 | |
2276 | for (pd = d; pd <= last; pd++) |
2277 | if (pd->transfer_status) |
2278 | break; |
2279 | if (pd > last) |
2280 | /* Descriptor(s) not done yet, stop iteration */ |
2281 | return 0; |
2282 | |
2283 | p = last + 1; |
2284 | copy_iso_headers(ctx, p); |
2285 | |
2286 | if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) { |
2287 | ir_header = (__le32 *) p; |
2288 | ctx->base.callback.sc(&ctx->base, |
2289 | le32_to_cpu(ir_header[0]) & 0xffff, |
2290 | ctx->header_length, ctx->header, |
2291 | ctx->base.callback_data); |
2292 | ctx->header_length = 0; |
2293 | } |
2294 | |
2295 | return 1; |
2296 | } |
2297 | |
2298 | /* d == last because each descriptor block is only a single descriptor. */ |
2299 | static int handle_ir_buffer_fill(struct context *context, |
2300 | struct descriptor *d, |
2301 | struct descriptor *last) |
2302 | { |
2303 | struct iso_context *ctx = |
2304 | container_of(context, struct iso_context, context); |
2305 | |
2306 | if (!last->transfer_status) |
2307 | /* Descriptor(s) not done yet, stop iteration */ |
2308 | return 0; |
2309 | |
2310 | if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) |
2311 | ctx->base.callback.mc(&ctx->base, |
2312 | le32_to_cpu(last->data_address) + |
2313 | le16_to_cpu(last->req_count) - |
2314 | le16_to_cpu(last->res_count), |
2315 | ctx->base.callback_data); |
2316 | |
2317 | return 1; |
2318 | } |
2319 | |
2320 | static int handle_it_packet(struct context *context, |
2321 | struct descriptor *d, |
2322 | struct descriptor *last) |
2323 | { |
2324 | struct iso_context *ctx = |
2325 | container_of(context, struct iso_context, context); |
2326 | int i; |
2327 | struct descriptor *pd; |
2328 | |
2329 | for (pd = d; pd <= last; pd++) |
2330 | if (pd->transfer_status) |
2331 | break; |
2332 | if (pd > last) |
2333 | /* Descriptor(s) not done yet, stop iteration */ |
2334 | return 0; |
2335 | |
2336 | i = ctx->header_length; |
2337 | if (i + 4 < PAGE_SIZE) { |
2338 | /* Present this value as big-endian to match the receive code */ |
2339 | *(__be32 *)(ctx->header + i) = cpu_to_be32( |
2340 | ((u32)le16_to_cpu(pd->transfer_status) << 16) | |
2341 | le16_to_cpu(pd->res_count)); |
2342 | ctx->header_length += 4; |
2343 | } |
2344 | if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) { |
2345 | ctx->base.callback.sc(&ctx->base, le16_to_cpu(last->res_count), |
2346 | ctx->header_length, ctx->header, |
2347 | ctx->base.callback_data); |
2348 | ctx->header_length = 0; |
2349 | } |
2350 | return 1; |
2351 | } |
2352 | |
2353 | static void set_multichannel_mask(struct fw_ohci *ohci, u64 channels) |
2354 | { |
2355 | u32 hi = channels >> 32, lo = channels; |
2356 | |
2357 | reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear, ~hi); |
2358 | reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear, ~lo); |
2359 | reg_write(ohci, OHCI1394_IRMultiChanMaskHiSet, hi); |
2360 | reg_write(ohci, OHCI1394_IRMultiChanMaskLoSet, lo); |
2361 | mmiowb(); |
2362 | ohci->mc_channels = channels; |
2363 | } |
2364 | |
2365 | static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card, |
2366 | int type, int channel, size_t header_size) |
2367 | { |
2368 | struct fw_ohci *ohci = fw_ohci(card); |
2369 | struct iso_context *uninitialized_var(ctx); |
2370 | descriptor_callback_t uninitialized_var(callback); |
2371 | u64 *uninitialized_var(channels); |
2372 | u32 *uninitialized_var(mask), uninitialized_var(regs); |
2373 | unsigned long flags; |
2374 | int index, ret = -EBUSY; |
2375 | |
2376 | spin_lock_irqsave(&ohci->lock, flags); |
2377 | |
2378 | switch (type) { |
2379 | case FW_ISO_CONTEXT_TRANSMIT: |
2380 | mask = &ohci->it_context_mask; |
2381 | callback = handle_it_packet; |
2382 | index = ffs(*mask) - 1; |
2383 | if (index >= 0) { |
2384 | *mask &= ~(1 << index); |
2385 | regs = OHCI1394_IsoXmitContextBase(index); |
2386 | ctx = &ohci->it_context_list[index]; |
2387 | } |
2388 | break; |
2389 | |
2390 | case FW_ISO_CONTEXT_RECEIVE: |
2391 | channels = &ohci->ir_context_channels; |
2392 | mask = &ohci->ir_context_mask; |
2393 | callback = handle_ir_packet_per_buffer; |
2394 | index = *channels & 1ULL << channel ? ffs(*mask) - 1 : -1; |
2395 | if (index >= 0) { |
2396 | *channels &= ~(1ULL << channel); |
2397 | *mask &= ~(1 << index); |
2398 | regs = OHCI1394_IsoRcvContextBase(index); |
2399 | ctx = &ohci->ir_context_list[index]; |
2400 | } |
2401 | break; |
2402 | |
2403 | case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: |
2404 | mask = &ohci->ir_context_mask; |
2405 | callback = handle_ir_buffer_fill; |
2406 | index = !ohci->mc_allocated ? ffs(*mask) - 1 : -1; |
2407 | if (index >= 0) { |
2408 | ohci->mc_allocated = true; |
2409 | *mask &= ~(1 << index); |
2410 | regs = OHCI1394_IsoRcvContextBase(index); |
2411 | ctx = &ohci->ir_context_list[index]; |
2412 | } |
2413 | break; |
2414 | |
2415 | default: |
2416 | index = -1; |
2417 | ret = -ENOSYS; |
2418 | } |
2419 | |
2420 | spin_unlock_irqrestore(&ohci->lock, flags); |
2421 | |
2422 | if (index < 0) |
2423 | return ERR_PTR(ret); |
2424 | |
2425 | memset(ctx, 0, sizeof(*ctx)); |
2426 | ctx->header_length = 0; |
2427 | ctx->header = (void *) __get_free_page(GFP_KERNEL); |
2428 | if (ctx->header == NULL) { |
2429 | ret = -ENOMEM; |
2430 | goto out; |
2431 | } |
2432 | ret = context_init(&ctx->context, ohci, regs, callback); |
2433 | if (ret < 0) |
2434 | goto out_with_header; |
2435 | |
2436 | if (type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL) |
2437 | set_multichannel_mask(ohci, 0); |
2438 | |
2439 | return &ctx->base; |
2440 | |
2441 | out_with_header: |
2442 | free_page((unsigned long)ctx->header); |
2443 | out: |
2444 | spin_lock_irqsave(&ohci->lock, flags); |
2445 | |
2446 | switch (type) { |
2447 | case FW_ISO_CONTEXT_RECEIVE: |
2448 | *channels |= 1ULL << channel; |
2449 | break; |
2450 | |
2451 | case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: |
2452 | ohci->mc_allocated = false; |
2453 | break; |
2454 | } |
2455 | *mask |= 1 << index; |
2456 | |
2457 | spin_unlock_irqrestore(&ohci->lock, flags); |
2458 | |
2459 | return ERR_PTR(ret); |
2460 | } |
2461 | |
2462 | static int ohci_start_iso(struct fw_iso_context *base, |
2463 | s32 cycle, u32 sync, u32 tags) |
2464 | { |
2465 | struct iso_context *ctx = container_of(base, struct iso_context, base); |
2466 | struct fw_ohci *ohci = ctx->context.ohci; |
2467 | u32 control = IR_CONTEXT_ISOCH_HEADER, match; |
2468 | int index; |
2469 | |
2470 | switch (ctx->base.type) { |
2471 | case FW_ISO_CONTEXT_TRANSMIT: |
2472 | index = ctx - ohci->it_context_list; |
2473 | match = 0; |
2474 | if (cycle >= 0) |
2475 | match = IT_CONTEXT_CYCLE_MATCH_ENABLE | |
2476 | (cycle & 0x7fff) << 16; |
2477 | |
2478 | reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 1 << index); |
2479 | reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << index); |
2480 | context_run(&ctx->context, match); |
2481 | break; |
2482 | |
2483 | case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: |
2484 | control |= IR_CONTEXT_BUFFER_FILL|IR_CONTEXT_MULTI_CHANNEL_MODE; |
2485 | /* fall through */ |
2486 | case FW_ISO_CONTEXT_RECEIVE: |
2487 | index = ctx - ohci->ir_context_list; |
2488 | match = (tags << 28) | (sync << 8) | ctx->base.channel; |
2489 | if (cycle >= 0) { |
2490 | match |= (cycle & 0x07fff) << 12; |
2491 | control |= IR_CONTEXT_CYCLE_MATCH_ENABLE; |
2492 | } |
2493 | |
2494 | reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 1 << index); |
2495 | reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << index); |
2496 | reg_write(ohci, CONTEXT_MATCH(ctx->context.regs), match); |
2497 | context_run(&ctx->context, control); |
2498 | break; |
2499 | } |
2500 | |
2501 | return 0; |
2502 | } |
2503 | |
2504 | static int ohci_stop_iso(struct fw_iso_context *base) |
2505 | { |
2506 | struct fw_ohci *ohci = fw_ohci(base->card); |
2507 | struct iso_context *ctx = container_of(base, struct iso_context, base); |
2508 | int index; |
2509 | |
2510 | switch (ctx->base.type) { |
2511 | case FW_ISO_CONTEXT_TRANSMIT: |
2512 | index = ctx - ohci->it_context_list; |
2513 | reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 1 << index); |
2514 | break; |
2515 | |
2516 | case FW_ISO_CONTEXT_RECEIVE: |
2517 | case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: |
2518 | index = ctx - ohci->ir_context_list; |
2519 | reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 1 << index); |
2520 | break; |
2521 | } |
2522 | flush_writes(ohci); |
2523 | context_stop(&ctx->context); |
2524 | |
2525 | return 0; |
2526 | } |
2527 | |
2528 | static void ohci_free_iso_context(struct fw_iso_context *base) |
2529 | { |
2530 | struct fw_ohci *ohci = fw_ohci(base->card); |
2531 | struct iso_context *ctx = container_of(base, struct iso_context, base); |
2532 | unsigned long flags; |
2533 | int index; |
2534 | |
2535 | ohci_stop_iso(base); |
2536 | context_release(&ctx->context); |
2537 | free_page((unsigned long)ctx->header); |
2538 | |
2539 | spin_lock_irqsave(&ohci->lock, flags); |
2540 | |
2541 | switch (base->type) { |
2542 | case FW_ISO_CONTEXT_TRANSMIT: |
2543 | index = ctx - ohci->it_context_list; |
2544 | ohci->it_context_mask |= 1 << index; |
2545 | break; |
2546 | |
2547 | case FW_ISO_CONTEXT_RECEIVE: |
2548 | index = ctx - ohci->ir_context_list; |
2549 | ohci->ir_context_mask |= 1 << index; |
2550 | ohci->ir_context_channels |= 1ULL << base->channel; |
2551 | break; |
2552 | |
2553 | case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: |
2554 | index = ctx - ohci->ir_context_list; |
2555 | ohci->ir_context_mask |= 1 << index; |
2556 | ohci->ir_context_channels |= ohci->mc_channels; |
2557 | ohci->mc_channels = 0; |
2558 | ohci->mc_allocated = false; |
2559 | break; |
2560 | } |
2561 | |
2562 | spin_unlock_irqrestore(&ohci->lock, flags); |
2563 | } |
2564 | |
2565 | static int ohci_set_iso_channels(struct fw_iso_context *base, u64 *channels) |
2566 | { |
2567 | struct fw_ohci *ohci = fw_ohci(base->card); |
2568 | unsigned long flags; |
2569 | int ret; |
2570 | |
2571 | switch (base->type) { |
2572 | case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: |
2573 | |
2574 | spin_lock_irqsave(&ohci->lock, flags); |
2575 | |
2576 | /* Don't allow multichannel to grab other contexts' channels. */ |
2577 | if (~ohci->ir_context_channels & ~ohci->mc_channels & *channels) { |
2578 | *channels = ohci->ir_context_channels; |
2579 | ret = -EBUSY; |
2580 | } else { |
2581 | set_multichannel_mask(ohci, *channels); |
2582 | ret = 0; |
2583 | } |
2584 | |
2585 | spin_unlock_irqrestore(&ohci->lock, flags); |
2586 | |
2587 | break; |
2588 | default: |
2589 | ret = -EINVAL; |
2590 | } |
2591 | |
2592 | return ret; |
2593 | } |
2594 | |
2595 | static int queue_iso_transmit(struct iso_context *ctx, |
2596 | struct fw_iso_packet *packet, |
2597 | struct fw_iso_buffer *buffer, |
2598 | unsigned long payload) |
2599 | { |
2600 | struct descriptor *d, *last, *pd; |
2601 | struct fw_iso_packet *p; |
2602 | __le32 *header; |
2603 | dma_addr_t d_bus, page_bus; |
2604 | u32 z, header_z, payload_z, irq; |
2605 | u32 payload_index, payload_end_index, next_page_index; |
2606 | int page, end_page, i, length, offset; |
2607 | |
2608 | p = packet; |
2609 | payload_index = payload; |
2610 | |
2611 | if (p->skip) |
2612 | z = 1; |
2613 | else |
2614 | z = 2; |
2615 | if (p->header_length > 0) |
2616 | z++; |
2617 | |
2618 | /* Determine the first page the payload isn't contained in. */ |
2619 | end_page = PAGE_ALIGN(payload_index + p->payload_length) >> PAGE_SHIFT; |
2620 | if (p->payload_length > 0) |
2621 | payload_z = end_page - (payload_index >> PAGE_SHIFT); |
2622 | else |
2623 | payload_z = 0; |
2624 | |
2625 | z += payload_z; |
2626 | |
2627 | /* Get header size in number of descriptors. */ |
2628 | header_z = DIV_ROUND_UP(p->header_length, sizeof(*d)); |
2629 | |
2630 | d = context_get_descriptors(&ctx->context, z + header_z, &d_bus); |
2631 | if (d == NULL) |
2632 | return -ENOMEM; |
2633 | |
2634 | if (!p->skip) { |
2635 | d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE); |
2636 | d[0].req_count = cpu_to_le16(8); |
2637 | /* |
2638 | * Link the skip address to this descriptor itself. This causes |
2639 | * a context to skip a cycle whenever lost cycles or FIFO |
2640 | * overruns occur, without dropping the data. The application |
2641 | * should then decide whether this is an error condition or not. |
2642 | * FIXME: Make the context's cycle-lost behaviour configurable? |
2643 | */ |
2644 | d[0].branch_address = cpu_to_le32(d_bus | z); |
2645 | |
2646 | header = (__le32 *) &d[1]; |
2647 | header[0] = cpu_to_le32(IT_HEADER_SY(p->sy) | |
2648 | IT_HEADER_TAG(p->tag) | |
2649 | IT_HEADER_TCODE(TCODE_STREAM_DATA) | |
2650 | IT_HEADER_CHANNEL(ctx->base.channel) | |
2651 | IT_HEADER_SPEED(ctx->base.speed)); |
2652 | header[1] = |
2653 | cpu_to_le32(IT_HEADER_DATA_LENGTH(p->header_length + |
2654 | p->payload_length)); |
2655 | } |
2656 | |
2657 | if (p->header_length > 0) { |
2658 | d[2].req_count = cpu_to_le16(p->header_length); |
2659 | d[2].data_address = cpu_to_le32(d_bus + z * sizeof(*d)); |
2660 | memcpy(&d[z], p->header, p->header_length); |
2661 | } |
2662 | |
2663 | pd = d + z - payload_z; |
2664 | payload_end_index = payload_index + p->payload_length; |
2665 | for (i = 0; i < payload_z; i++) { |
2666 | page = payload_index >> PAGE_SHIFT; |
2667 | offset = payload_index & ~PAGE_MASK; |
2668 | next_page_index = (page + 1) << PAGE_SHIFT; |
2669 | length = |
2670 | min(next_page_index, payload_end_index) - payload_index; |
2671 | pd[i].req_count = cpu_to_le16(length); |
2672 | |
2673 | page_bus = page_private(buffer->pages[page]); |
2674 | pd[i].data_address = cpu_to_le32(page_bus + offset); |
2675 | |
2676 | payload_index += length; |
2677 | } |
2678 | |
2679 | if (p->interrupt) |
2680 | irq = DESCRIPTOR_IRQ_ALWAYS; |
2681 | else |
2682 | irq = DESCRIPTOR_NO_IRQ; |
2683 | |
2684 | last = z == 2 ? d : d + z - 1; |
2685 | last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST | |
2686 | DESCRIPTOR_STATUS | |
2687 | DESCRIPTOR_BRANCH_ALWAYS | |
2688 | irq); |
2689 | |
2690 | context_append(&ctx->context, d, z, header_z); |
2691 | |
2692 | return 0; |
2693 | } |
2694 | |
2695 | static int queue_iso_packet_per_buffer(struct iso_context *ctx, |
2696 | struct fw_iso_packet *packet, |
2697 | struct fw_iso_buffer *buffer, |
2698 | unsigned long payload) |
2699 | { |
2700 | struct descriptor *d, *pd; |
2701 | dma_addr_t d_bus, page_bus; |
2702 | u32 z, header_z, rest; |
2703 | int i, j, length; |
2704 | int page, offset, packet_count, header_size, payload_per_buffer; |
2705 | |
2706 | /* |
2707 | * The OHCI controller puts the isochronous header and trailer in the |
2708 | * buffer, so we need at least 8 bytes. |
2709 | */ |
2710 | packet_count = packet->header_length / ctx->base.header_size; |
2711 | header_size = max(ctx->base.header_size, (size_t)8); |
2712 | |
2713 | /* Get header size in number of descriptors. */ |
2714 | header_z = DIV_ROUND_UP(header_size, sizeof(*d)); |
2715 | page = payload >> PAGE_SHIFT; |
2716 | offset = payload & ~PAGE_MASK; |
2717 | payload_per_buffer = packet->payload_length / packet_count; |
2718 | |
2719 | for (i = 0; i < packet_count; i++) { |
2720 | /* d points to the header descriptor */ |
2721 | z = DIV_ROUND_UP(payload_per_buffer + offset, PAGE_SIZE) + 1; |
2722 | d = context_get_descriptors(&ctx->context, |
2723 | z + header_z, &d_bus); |
2724 | if (d == NULL) |
2725 | return -ENOMEM; |
2726 | |
2727 | d->control = cpu_to_le16(DESCRIPTOR_STATUS | |
2728 | DESCRIPTOR_INPUT_MORE); |
2729 | if (packet->skip && i == 0) |
2730 | d->control |= cpu_to_le16(DESCRIPTOR_WAIT); |
2731 | d->req_count = cpu_to_le16(header_size); |
2732 | d->res_count = d->req_count; |
2733 | d->transfer_status = 0; |
2734 | d->data_address = cpu_to_le32(d_bus + (z * sizeof(*d))); |
2735 | |
2736 | rest = payload_per_buffer; |
2737 | pd = d; |
2738 | for (j = 1; j < z; j++) { |
2739 | pd++; |
2740 | pd->control = cpu_to_le16(DESCRIPTOR_STATUS | |
2741 | DESCRIPTOR_INPUT_MORE); |
2742 | |
2743 | if (offset + rest < PAGE_SIZE) |
2744 | length = rest; |
2745 | else |
2746 | length = PAGE_SIZE - offset; |
2747 | pd->req_count = cpu_to_le16(length); |
2748 | pd->res_count = pd->req_count; |
2749 | pd->transfer_status = 0; |
2750 | |
2751 | page_bus = page_private(buffer->pages[page]); |
2752 | pd->data_address = cpu_to_le32(page_bus + offset); |
2753 | |
2754 | offset = (offset + length) & ~PAGE_MASK; |
2755 | rest -= length; |
2756 | if (offset == 0) |
2757 | page++; |
2758 | } |
2759 | pd->control = cpu_to_le16(DESCRIPTOR_STATUS | |
2760 | DESCRIPTOR_INPUT_LAST | |
2761 | DESCRIPTOR_BRANCH_ALWAYS); |
2762 | if (packet->interrupt && i == packet_count - 1) |
2763 | pd->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS); |
2764 | |
2765 | context_append(&ctx->context, d, z, header_z); |
2766 | } |
2767 | |
2768 | return 0; |
2769 | } |
2770 | |
2771 | static int queue_iso_buffer_fill(struct iso_context *ctx, |
2772 | struct fw_iso_packet *packet, |
2773 | struct fw_iso_buffer *buffer, |
2774 | unsigned long payload) |
2775 | { |
2776 | struct descriptor *d; |
2777 | dma_addr_t d_bus, page_bus; |
2778 | int page, offset, rest, z, i, length; |
2779 | |
2780 | page = payload >> PAGE_SHIFT; |
2781 | offset = payload & ~PAGE_MASK; |
2782 | rest = packet->payload_length; |
2783 | |
2784 | /* We need one descriptor for each page in the buffer. */ |
2785 | z = DIV_ROUND_UP(offset + rest, PAGE_SIZE); |
2786 | |
2787 | if (WARN_ON(offset & 3 || rest & 3 || page + z > buffer->page_count)) |
2788 | return -EFAULT; |
2789 | |
2790 | for (i = 0; i < z; i++) { |
2791 | d = context_get_descriptors(&ctx->context, 1, &d_bus); |
2792 | if (d == NULL) |
2793 | return -ENOMEM; |
2794 | |
2795 | d->control = cpu_to_le16(DESCRIPTOR_INPUT_MORE | |
2796 | DESCRIPTOR_BRANCH_ALWAYS); |
2797 | if (packet->skip && i == 0) |
2798 | d->control |= cpu_to_le16(DESCRIPTOR_WAIT); |
2799 | if (packet->interrupt && i == z - 1) |
2800 | d->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS); |
2801 | |
2802 | if (offset + rest < PAGE_SIZE) |
2803 | length = rest; |
2804 | else |
2805 | length = PAGE_SIZE - offset; |
2806 | d->req_count = cpu_to_le16(length); |
2807 | d->res_count = d->req_count; |
2808 | d->transfer_status = 0; |
2809 | |
2810 | page_bus = page_private(buffer->pages[page]); |
2811 | d->data_address = cpu_to_le32(page_bus + offset); |
2812 | |
2813 | rest -= length; |
2814 | offset = 0; |
2815 | page++; |
2816 | |
2817 | context_append(&ctx->context, d, 1, 0); |
2818 | } |
2819 | |
2820 | return 0; |
2821 | } |
2822 | |
2823 | static int ohci_queue_iso(struct fw_iso_context *base, |
2824 | struct fw_iso_packet *packet, |
2825 | struct fw_iso_buffer *buffer, |
2826 | unsigned long payload) |
2827 | { |
2828 | struct iso_context *ctx = container_of(base, struct iso_context, base); |
2829 | unsigned long flags; |
2830 | int ret = -ENOSYS; |
2831 | |
2832 | spin_lock_irqsave(&ctx->context.ohci->lock, flags); |
2833 | switch (base->type) { |
2834 | case FW_ISO_CONTEXT_TRANSMIT: |
2835 | ret = queue_iso_transmit(ctx, packet, buffer, payload); |
2836 | break; |
2837 | case FW_ISO_CONTEXT_RECEIVE: |
2838 | ret = queue_iso_packet_per_buffer(ctx, packet, buffer, payload); |
2839 | break; |
2840 | case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: |
2841 | ret = queue_iso_buffer_fill(ctx, packet, buffer, payload); |
2842 | break; |
2843 | } |
2844 | spin_unlock_irqrestore(&ctx->context.ohci->lock, flags); |
2845 | |
2846 | return ret; |
2847 | } |
2848 | |
2849 | static const struct fw_card_driver ohci_driver = { |
2850 | .enable = ohci_enable, |
2851 | .read_phy_reg = ohci_read_phy_reg, |
2852 | .update_phy_reg = ohci_update_phy_reg, |
2853 | .set_config_rom = ohci_set_config_rom, |
2854 | .send_request = ohci_send_request, |
2855 | .send_response = ohci_send_response, |
2856 | .cancel_packet = ohci_cancel_packet, |
2857 | .enable_phys_dma = ohci_enable_phys_dma, |
2858 | .read_csr = ohci_read_csr, |
2859 | .write_csr = ohci_write_csr, |
2860 | |
2861 | .allocate_iso_context = ohci_allocate_iso_context, |
2862 | .free_iso_context = ohci_free_iso_context, |
2863 | .set_iso_channels = ohci_set_iso_channels, |
2864 | .queue_iso = ohci_queue_iso, |
2865 | .start_iso = ohci_start_iso, |
2866 | .stop_iso = ohci_stop_iso, |
2867 | }; |
2868 | |
2869 | #ifdef CONFIG_PPC_PMAC |
2870 | static void pmac_ohci_on(struct pci_dev *dev) |
2871 | { |
2872 | if (machine_is(powermac)) { |
2873 | struct device_node *ofn = pci_device_to_OF_node(dev); |
2874 | |
2875 | if (ofn) { |
2876 | pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 1); |
2877 | pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 1); |
2878 | } |
2879 | } |
2880 | } |
2881 | |
2882 | static void pmac_ohci_off(struct pci_dev *dev) |
2883 | { |
2884 | if (machine_is(powermac)) { |
2885 | struct device_node *ofn = pci_device_to_OF_node(dev); |
2886 | |
2887 | if (ofn) { |
2888 | pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 0); |
2889 | pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 0); |
2890 | } |
2891 | } |
2892 | } |
2893 | #else |
2894 | static inline void pmac_ohci_on(struct pci_dev *dev) {} |
2895 | static inline void pmac_ohci_off(struct pci_dev *dev) {} |
2896 | #endif /* CONFIG_PPC_PMAC */ |
2897 | |
2898 | static int __devinit pci_probe(struct pci_dev *dev, |
2899 | const struct pci_device_id *ent) |
2900 | { |
2901 | struct fw_ohci *ohci; |
2902 | u32 bus_options, max_receive, link_speed, version; |
2903 | u64 guid; |
2904 | int i, err, n_ir, n_it; |
2905 | size_t size; |
2906 | |
2907 | ohci = kzalloc(sizeof(*ohci), GFP_KERNEL); |
2908 | if (ohci == NULL) { |
2909 | err = -ENOMEM; |
2910 | goto fail; |
2911 | } |
2912 | |
2913 | fw_card_initialize(&ohci->card, &ohci_driver, &dev->dev); |
2914 | |
2915 | pmac_ohci_on(dev); |
2916 | |
2917 | err = pci_enable_device(dev); |
2918 | if (err) { |
2919 | fw_error("Failed to enable OHCI hardware\n"); |
2920 | goto fail_free; |
2921 | } |
2922 | |
2923 | pci_set_master(dev); |
2924 | pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0); |
2925 | pci_set_drvdata(dev, ohci); |
2926 | |
2927 | spin_lock_init(&ohci->lock); |
2928 | mutex_init(&ohci->phy_reg_mutex); |
2929 | |
2930 | tasklet_init(&ohci->bus_reset_tasklet, |
2931 | bus_reset_tasklet, (unsigned long)ohci); |
2932 | |
2933 | err = pci_request_region(dev, 0, ohci_driver_name); |
2934 | if (err) { |
2935 | fw_error("MMIO resource unavailable\n"); |
2936 | goto fail_disable; |
2937 | } |
2938 | |
2939 | ohci->registers = pci_iomap(dev, 0, OHCI1394_REGISTER_SIZE); |
2940 | if (ohci->registers == NULL) { |
2941 | fw_error("Failed to remap registers\n"); |
2942 | err = -ENXIO; |
2943 | goto fail_iomem; |
2944 | } |
2945 | |
2946 | for (i = 0; i < ARRAY_SIZE(ohci_quirks); i++) |
2947 | if ((ohci_quirks[i].vendor == dev->vendor) && |
2948 | (ohci_quirks[i].device == (unsigned short)PCI_ANY_ID || |
2949 | ohci_quirks[i].device == dev->device) && |
2950 | (ohci_quirks[i].revision == (unsigned short)PCI_ANY_ID || |
2951 | ohci_quirks[i].revision >= dev->revision)) { |
2952 | ohci->quirks = ohci_quirks[i].flags; |
2953 | break; |
2954 | } |
2955 | if (param_quirks) |
2956 | ohci->quirks = param_quirks; |
2957 | |
2958 | ar_context_init(&ohci->ar_request_ctx, ohci, |
2959 | OHCI1394_AsReqRcvContextControlSet); |
2960 | |
2961 | ar_context_init(&ohci->ar_response_ctx, ohci, |
2962 | OHCI1394_AsRspRcvContextControlSet); |
2963 | |
2964 | context_init(&ohci->at_request_ctx, ohci, |
2965 | OHCI1394_AsReqTrContextControlSet, handle_at_packet); |
2966 | |
2967 | context_init(&ohci->at_response_ctx, ohci, |
2968 | OHCI1394_AsRspTrContextControlSet, handle_at_packet); |
2969 | |
2970 | reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, ~0); |
2971 | ohci->ir_context_channels = ~0ULL; |
2972 | ohci->ir_context_mask = reg_read(ohci, OHCI1394_IsoRecvIntMaskSet); |
2973 | reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, ~0); |
2974 | n_ir = hweight32(ohci->ir_context_mask); |
2975 | size = sizeof(struct iso_context) * n_ir; |
2976 | ohci->ir_context_list = kzalloc(size, GFP_KERNEL); |
2977 | |
2978 | reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0); |
2979 | ohci->it_context_mask = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet); |
2980 | reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0); |
2981 | n_it = hweight32(ohci->it_context_mask); |
2982 | size = sizeof(struct iso_context) * n_it; |
2983 | ohci->it_context_list = kzalloc(size, GFP_KERNEL); |
2984 | |
2985 | if (ohci->it_context_list == NULL || ohci->ir_context_list == NULL) { |
2986 | err = -ENOMEM; |
2987 | goto fail_contexts; |
2988 | } |
2989 | |
2990 | /* self-id dma buffer allocation */ |
2991 | ohci->self_id_cpu = dma_alloc_coherent(ohci->card.device, |
2992 | SELF_ID_BUF_SIZE, |
2993 | &ohci->self_id_bus, |
2994 | GFP_KERNEL); |
2995 | if (ohci->self_id_cpu == NULL) { |
2996 | err = -ENOMEM; |
2997 | goto fail_contexts; |
2998 | } |
2999 | |
3000 | bus_options = reg_read(ohci, OHCI1394_BusOptions); |
3001 | max_receive = (bus_options >> 12) & 0xf; |
3002 | link_speed = bus_options & 0x7; |
3003 | guid = ((u64) reg_read(ohci, OHCI1394_GUIDHi) << 32) | |
3004 | reg_read(ohci, OHCI1394_GUIDLo); |
3005 | |
3006 | err = fw_card_add(&ohci->card, max_receive, link_speed, guid); |
3007 | if (err) |
3008 | goto fail_self_id; |
3009 | |
3010 | version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff; |
3011 | fw_notify("Added fw-ohci device %s, OHCI v%x.%x, " |
3012 | "%d IR + %d IT contexts, quirks 0x%x\n", |
3013 | dev_name(&dev->dev), version >> 16, version & 0xff, |
3014 | n_ir, n_it, ohci->quirks); |
3015 | |
3016 | return 0; |
3017 | |
3018 | fail_self_id: |
3019 | dma_free_coherent(ohci->card.device, SELF_ID_BUF_SIZE, |
3020 | ohci->self_id_cpu, ohci->self_id_bus); |
3021 | fail_contexts: |
3022 | kfree(ohci->ir_context_list); |
3023 | kfree(ohci->it_context_list); |
3024 | context_release(&ohci->at_response_ctx); |
3025 | context_release(&ohci->at_request_ctx); |
3026 | ar_context_release(&ohci->ar_response_ctx); |
3027 | ar_context_release(&ohci->ar_request_ctx); |
3028 | pci_iounmap(dev, ohci->registers); |
3029 | fail_iomem: |
3030 | pci_release_region(dev, 0); |
3031 | fail_disable: |
3032 | pci_disable_device(dev); |
3033 | fail_free: |
3034 | kfree(&ohci->card); |
3035 | pmac_ohci_off(dev); |
3036 | fail: |
3037 | if (err == -ENOMEM) |
3038 | fw_error("Out of memory\n"); |
3039 | |
3040 | return err; |
3041 | } |
3042 | |
3043 | static void pci_remove(struct pci_dev *dev) |
3044 | { |
3045 | struct fw_ohci *ohci; |
3046 | |
3047 | ohci = pci_get_drvdata(dev); |
3048 | reg_write(ohci, OHCI1394_IntMaskClear, ~0); |
3049 | flush_writes(ohci); |
3050 | fw_core_remove_card(&ohci->card); |
3051 | |
3052 | /* |
3053 | * FIXME: Fail all pending packets here, now that the upper |
3054 | * layers can't queue any more. |
3055 | */ |
3056 | |
3057 | software_reset(ohci); |
3058 | free_irq(dev->irq, ohci); |
3059 | |
3060 | if (ohci->next_config_rom && ohci->next_config_rom != ohci->config_rom) |
3061 | dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, |
3062 | ohci->next_config_rom, ohci->next_config_rom_bus); |
3063 | if (ohci->config_rom) |
3064 | dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, |
3065 | ohci->config_rom, ohci->config_rom_bus); |
3066 | dma_free_coherent(ohci->card.device, SELF_ID_BUF_SIZE, |
3067 | ohci->self_id_cpu, ohci->self_id_bus); |
3068 | ar_context_release(&ohci->ar_request_ctx); |
3069 | ar_context_release(&ohci->ar_response_ctx); |
3070 | context_release(&ohci->at_request_ctx); |
3071 | context_release(&ohci->at_response_ctx); |
3072 | kfree(ohci->it_context_list); |
3073 | kfree(ohci->ir_context_list); |
3074 | pci_disable_msi(dev); |
3075 | pci_iounmap(dev, ohci->registers); |
3076 | pci_release_region(dev, 0); |
3077 | pci_disable_device(dev); |
3078 | kfree(&ohci->card); |
3079 | pmac_ohci_off(dev); |
3080 | |
3081 | fw_notify("Removed fw-ohci device.\n"); |
3082 | } |
3083 | |
3084 | #ifdef CONFIG_PM |
3085 | static int pci_suspend(struct pci_dev *dev, pm_message_t state) |
3086 | { |
3087 | struct fw_ohci *ohci = pci_get_drvdata(dev); |
3088 | int err; |
3089 | |
3090 | software_reset(ohci); |
3091 | free_irq(dev->irq, ohci); |
3092 | pci_disable_msi(dev); |
3093 | err = pci_save_state(dev); |
3094 | if (err) { |
3095 | fw_error("pci_save_state failed\n"); |
3096 | return err; |
3097 | } |
3098 | err = pci_set_power_state(dev, pci_choose_state(dev, state)); |
3099 | if (err) |
3100 | fw_error("pci_set_power_state failed with %d\n", err); |
3101 | pmac_ohci_off(dev); |
3102 | |
3103 | return 0; |
3104 | } |
3105 | |
3106 | static int pci_resume(struct pci_dev *dev) |
3107 | { |
3108 | struct fw_ohci *ohci = pci_get_drvdata(dev); |
3109 | int err; |
3110 | |
3111 | pmac_ohci_on(dev); |
3112 | pci_set_power_state(dev, PCI_D0); |
3113 | pci_restore_state(dev); |
3114 | err = pci_enable_device(dev); |
3115 | if (err) { |
3116 | fw_error("pci_enable_device failed\n"); |
3117 | return err; |
3118 | } |
3119 | |
3120 | return ohci_enable(&ohci->card, NULL, 0); |
3121 | } |
3122 | #endif |
3123 | |
3124 | static const struct pci_device_id pci_table[] = { |
3125 | { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_FIREWIRE_OHCI, ~0) }, |
3126 | { } |
3127 | }; |
3128 | |
3129 | MODULE_DEVICE_TABLE(pci, pci_table); |
3130 | |
3131 | static struct pci_driver fw_ohci_pci_driver = { |
3132 | .name = ohci_driver_name, |
3133 | .id_table = pci_table, |
3134 | .probe = pci_probe, |
3135 | .remove = pci_remove, |
3136 | #ifdef CONFIG_PM |
3137 | .resume = pci_resume, |
3138 | .suspend = pci_suspend, |
3139 | #endif |
3140 | }; |
3141 | |
3142 | MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>"); |
3143 | MODULE_DESCRIPTION("Driver for PCI OHCI IEEE1394 controllers"); |
3144 | MODULE_LICENSE("GPL"); |
3145 | |
3146 | /* Provide a module alias so root-on-sbp2 initrds don't break. */ |
3147 | #ifndef CONFIG_IEEE1394_OHCI1394_MODULE |
3148 | MODULE_ALIAS("ohci1394"); |
3149 | #endif |
3150 | |
3151 | static int __init fw_ohci_init(void) |
3152 | { |
3153 | return pci_register_driver(&fw_ohci_pci_driver); |
3154 | } |
3155 | |
3156 | static void __exit fw_ohci_cleanup(void) |
3157 | { |
3158 | pci_unregister_driver(&fw_ohci_pci_driver); |
3159 | } |
3160 | |
3161 | module_init(fw_ohci_init); |
3162 | module_exit(fw_ohci_cleanup); |
3163 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9