Root/
1 | /* |
2 | * Copyright (C) Ericsson AB 2007-2008 |
3 | * Copyright (C) ST-Ericsson SA 2008-2010 |
4 | * Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson |
5 | * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson |
6 | * License terms: GNU General Public License (GPL) version 2 |
7 | */ |
8 | |
9 | #include <linux/dma-mapping.h> |
10 | #include <linux/kernel.h> |
11 | #include <linux/slab.h> |
12 | #include <linux/export.h> |
13 | #include <linux/dmaengine.h> |
14 | #include <linux/platform_device.h> |
15 | #include <linux/clk.h> |
16 | #include <linux/delay.h> |
17 | #include <linux/pm.h> |
18 | #include <linux/pm_runtime.h> |
19 | #include <linux/err.h> |
20 | #include <linux/amba/bus.h> |
21 | #include <linux/regulator/consumer.h> |
22 | #include <linux/platform_data/dma-ste-dma40.h> |
23 | |
24 | #include "dmaengine.h" |
25 | #include "ste_dma40_ll.h" |
26 | |
27 | #define D40_NAME "dma40" |
28 | |
29 | #define D40_PHY_CHAN -1 |
30 | |
31 | /* For masking out/in 2 bit channel positions */ |
32 | #define D40_CHAN_POS(chan) (2 * (chan / 2)) |
33 | #define D40_CHAN_POS_MASK(chan) (0x3 << D40_CHAN_POS(chan)) |
34 | |
35 | /* Maximum iterations taken before giving up suspending a channel */ |
36 | #define D40_SUSPEND_MAX_IT 500 |
37 | |
38 | /* Milliseconds */ |
39 | #define DMA40_AUTOSUSPEND_DELAY 100 |
40 | |
41 | /* Hardware requirement on LCLA alignment */ |
42 | #define LCLA_ALIGNMENT 0x40000 |
43 | |
44 | /* Max number of links per event group */ |
45 | #define D40_LCLA_LINK_PER_EVENT_GRP 128 |
46 | #define D40_LCLA_END D40_LCLA_LINK_PER_EVENT_GRP |
47 | |
48 | /* Attempts before giving up to trying to get pages that are aligned */ |
49 | #define MAX_LCLA_ALLOC_ATTEMPTS 256 |
50 | |
51 | /* Bit markings for allocation map */ |
52 | #define D40_ALLOC_FREE (1 << 31) |
53 | #define D40_ALLOC_PHY (1 << 30) |
54 | #define D40_ALLOC_LOG_FREE 0 |
55 | |
56 | #define MAX(a, b) (((a) < (b)) ? (b) : (a)) |
57 | |
58 | /** |
59 | * enum 40_command - The different commands and/or statuses. |
60 | * |
61 | * @D40_DMA_STOP: DMA channel command STOP or status STOPPED, |
62 | * @D40_DMA_RUN: The DMA channel is RUNNING of the command RUN. |
63 | * @D40_DMA_SUSPEND_REQ: Request the DMA to SUSPEND as soon as possible. |
64 | * @D40_DMA_SUSPENDED: The DMA channel is SUSPENDED. |
65 | */ |
66 | enum d40_command { |
67 | D40_DMA_STOP = 0, |
68 | D40_DMA_RUN = 1, |
69 | D40_DMA_SUSPEND_REQ = 2, |
70 | D40_DMA_SUSPENDED = 3 |
71 | }; |
72 | |
73 | /* |
74 | * enum d40_events - The different Event Enables for the event lines. |
75 | * |
76 | * @D40_DEACTIVATE_EVENTLINE: De-activate Event line, stopping the logical chan. |
77 | * @D40_ACTIVATE_EVENTLINE: Activate the Event line, to start a logical chan. |
78 | * @D40_SUSPEND_REQ_EVENTLINE: Requesting for suspending a event line. |
79 | * @D40_ROUND_EVENTLINE: Status check for event line. |
80 | */ |
81 | |
82 | enum d40_events { |
83 | D40_DEACTIVATE_EVENTLINE = 0, |
84 | D40_ACTIVATE_EVENTLINE = 1, |
85 | D40_SUSPEND_REQ_EVENTLINE = 2, |
86 | D40_ROUND_EVENTLINE = 3 |
87 | }; |
88 | |
89 | /* |
90 | * These are the registers that has to be saved and later restored |
91 | * when the DMA hw is powered off. |
92 | * TODO: Add save/restore of D40_DREG_GCC on dma40 v3 or later, if that works. |
93 | */ |
94 | static u32 d40_backup_regs[] = { |
95 | D40_DREG_LCPA, |
96 | D40_DREG_LCLA, |
97 | D40_DREG_PRMSE, |
98 | D40_DREG_PRMSO, |
99 | D40_DREG_PRMOE, |
100 | D40_DREG_PRMOO, |
101 | }; |
102 | |
103 | #define BACKUP_REGS_SZ ARRAY_SIZE(d40_backup_regs) |
104 | |
105 | /* |
106 | * since 9540 and 8540 has the same HW revision |
107 | * use v4a for 9540 or ealier |
108 | * use v4b for 8540 or later |
109 | * HW revision: |
110 | * DB8500ed has revision 0 |
111 | * DB8500v1 has revision 2 |
112 | * DB8500v2 has revision 3 |
113 | * AP9540v1 has revision 4 |
114 | * DB8540v1 has revision 4 |
115 | * TODO: Check if all these registers have to be saved/restored on dma40 v4a |
116 | */ |
117 | static u32 d40_backup_regs_v4a[] = { |
118 | D40_DREG_PSEG1, |
119 | D40_DREG_PSEG2, |
120 | D40_DREG_PSEG3, |
121 | D40_DREG_PSEG4, |
122 | D40_DREG_PCEG1, |
123 | D40_DREG_PCEG2, |
124 | D40_DREG_PCEG3, |
125 | D40_DREG_PCEG4, |
126 | D40_DREG_RSEG1, |
127 | D40_DREG_RSEG2, |
128 | D40_DREG_RSEG3, |
129 | D40_DREG_RSEG4, |
130 | D40_DREG_RCEG1, |
131 | D40_DREG_RCEG2, |
132 | D40_DREG_RCEG3, |
133 | D40_DREG_RCEG4, |
134 | }; |
135 | |
136 | #define BACKUP_REGS_SZ_V4A ARRAY_SIZE(d40_backup_regs_v4a) |
137 | |
138 | static u32 d40_backup_regs_v4b[] = { |
139 | D40_DREG_CPSEG1, |
140 | D40_DREG_CPSEG2, |
141 | D40_DREG_CPSEG3, |
142 | D40_DREG_CPSEG4, |
143 | D40_DREG_CPSEG5, |
144 | D40_DREG_CPCEG1, |
145 | D40_DREG_CPCEG2, |
146 | D40_DREG_CPCEG3, |
147 | D40_DREG_CPCEG4, |
148 | D40_DREG_CPCEG5, |
149 | D40_DREG_CRSEG1, |
150 | D40_DREG_CRSEG2, |
151 | D40_DREG_CRSEG3, |
152 | D40_DREG_CRSEG4, |
153 | D40_DREG_CRSEG5, |
154 | D40_DREG_CRCEG1, |
155 | D40_DREG_CRCEG2, |
156 | D40_DREG_CRCEG3, |
157 | D40_DREG_CRCEG4, |
158 | D40_DREG_CRCEG5, |
159 | }; |
160 | |
161 | #define BACKUP_REGS_SZ_V4B ARRAY_SIZE(d40_backup_regs_v4b) |
162 | |
163 | static u32 d40_backup_regs_chan[] = { |
164 | D40_CHAN_REG_SSCFG, |
165 | D40_CHAN_REG_SSELT, |
166 | D40_CHAN_REG_SSPTR, |
167 | D40_CHAN_REG_SSLNK, |
168 | D40_CHAN_REG_SDCFG, |
169 | D40_CHAN_REG_SDELT, |
170 | D40_CHAN_REG_SDPTR, |
171 | D40_CHAN_REG_SDLNK, |
172 | }; |
173 | |
174 | /** |
175 | * struct d40_interrupt_lookup - lookup table for interrupt handler |
176 | * |
177 | * @src: Interrupt mask register. |
178 | * @clr: Interrupt clear register. |
179 | * @is_error: true if this is an error interrupt. |
180 | * @offset: start delta in the lookup_log_chans in d40_base. If equals to |
181 | * D40_PHY_CHAN, the lookup_phy_chans shall be used instead. |
182 | */ |
183 | struct d40_interrupt_lookup { |
184 | u32 src; |
185 | u32 clr; |
186 | bool is_error; |
187 | int offset; |
188 | }; |
189 | |
190 | |
191 | static struct d40_interrupt_lookup il_v4a[] = { |
192 | {D40_DREG_LCTIS0, D40_DREG_LCICR0, false, 0}, |
193 | {D40_DREG_LCTIS1, D40_DREG_LCICR1, false, 32}, |
194 | {D40_DREG_LCTIS2, D40_DREG_LCICR2, false, 64}, |
195 | {D40_DREG_LCTIS3, D40_DREG_LCICR3, false, 96}, |
196 | {D40_DREG_LCEIS0, D40_DREG_LCICR0, true, 0}, |
197 | {D40_DREG_LCEIS1, D40_DREG_LCICR1, true, 32}, |
198 | {D40_DREG_LCEIS2, D40_DREG_LCICR2, true, 64}, |
199 | {D40_DREG_LCEIS3, D40_DREG_LCICR3, true, 96}, |
200 | {D40_DREG_PCTIS, D40_DREG_PCICR, false, D40_PHY_CHAN}, |
201 | {D40_DREG_PCEIS, D40_DREG_PCICR, true, D40_PHY_CHAN}, |
202 | }; |
203 | |
204 | static struct d40_interrupt_lookup il_v4b[] = { |
205 | {D40_DREG_CLCTIS1, D40_DREG_CLCICR1, false, 0}, |
206 | {D40_DREG_CLCTIS2, D40_DREG_CLCICR2, false, 32}, |
207 | {D40_DREG_CLCTIS3, D40_DREG_CLCICR3, false, 64}, |
208 | {D40_DREG_CLCTIS4, D40_DREG_CLCICR4, false, 96}, |
209 | {D40_DREG_CLCTIS5, D40_DREG_CLCICR5, false, 128}, |
210 | {D40_DREG_CLCEIS1, D40_DREG_CLCICR1, true, 0}, |
211 | {D40_DREG_CLCEIS2, D40_DREG_CLCICR2, true, 32}, |
212 | {D40_DREG_CLCEIS3, D40_DREG_CLCICR3, true, 64}, |
213 | {D40_DREG_CLCEIS4, D40_DREG_CLCICR4, true, 96}, |
214 | {D40_DREG_CLCEIS5, D40_DREG_CLCICR5, true, 128}, |
215 | {D40_DREG_CPCTIS, D40_DREG_CPCICR, false, D40_PHY_CHAN}, |
216 | {D40_DREG_CPCEIS, D40_DREG_CPCICR, true, D40_PHY_CHAN}, |
217 | }; |
218 | |
219 | /** |
220 | * struct d40_reg_val - simple lookup struct |
221 | * |
222 | * @reg: The register. |
223 | * @val: The value that belongs to the register in reg. |
224 | */ |
225 | struct d40_reg_val { |
226 | unsigned int reg; |
227 | unsigned int val; |
228 | }; |
229 | |
230 | static __initdata struct d40_reg_val dma_init_reg_v4a[] = { |
231 | /* Clock every part of the DMA block from start */ |
232 | { .reg = D40_DREG_GCC, .val = D40_DREG_GCC_ENABLE_ALL}, |
233 | |
234 | /* Interrupts on all logical channels */ |
235 | { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF}, |
236 | { .reg = D40_DREG_LCMIS1, .val = 0xFFFFFFFF}, |
237 | { .reg = D40_DREG_LCMIS2, .val = 0xFFFFFFFF}, |
238 | { .reg = D40_DREG_LCMIS3, .val = 0xFFFFFFFF}, |
239 | { .reg = D40_DREG_LCICR0, .val = 0xFFFFFFFF}, |
240 | { .reg = D40_DREG_LCICR1, .val = 0xFFFFFFFF}, |
241 | { .reg = D40_DREG_LCICR2, .val = 0xFFFFFFFF}, |
242 | { .reg = D40_DREG_LCICR3, .val = 0xFFFFFFFF}, |
243 | { .reg = D40_DREG_LCTIS0, .val = 0xFFFFFFFF}, |
244 | { .reg = D40_DREG_LCTIS1, .val = 0xFFFFFFFF}, |
245 | { .reg = D40_DREG_LCTIS2, .val = 0xFFFFFFFF}, |
246 | { .reg = D40_DREG_LCTIS3, .val = 0xFFFFFFFF} |
247 | }; |
248 | static __initdata struct d40_reg_val dma_init_reg_v4b[] = { |
249 | /* Clock every part of the DMA block from start */ |
250 | { .reg = D40_DREG_GCC, .val = D40_DREG_GCC_ENABLE_ALL}, |
251 | |
252 | /* Interrupts on all logical channels */ |
253 | { .reg = D40_DREG_CLCMIS1, .val = 0xFFFFFFFF}, |
254 | { .reg = D40_DREG_CLCMIS2, .val = 0xFFFFFFFF}, |
255 | { .reg = D40_DREG_CLCMIS3, .val = 0xFFFFFFFF}, |
256 | { .reg = D40_DREG_CLCMIS4, .val = 0xFFFFFFFF}, |
257 | { .reg = D40_DREG_CLCMIS5, .val = 0xFFFFFFFF}, |
258 | { .reg = D40_DREG_CLCICR1, .val = 0xFFFFFFFF}, |
259 | { .reg = D40_DREG_CLCICR2, .val = 0xFFFFFFFF}, |
260 | { .reg = D40_DREG_CLCICR3, .val = 0xFFFFFFFF}, |
261 | { .reg = D40_DREG_CLCICR4, .val = 0xFFFFFFFF}, |
262 | { .reg = D40_DREG_CLCICR5, .val = 0xFFFFFFFF}, |
263 | { .reg = D40_DREG_CLCTIS1, .val = 0xFFFFFFFF}, |
264 | { .reg = D40_DREG_CLCTIS2, .val = 0xFFFFFFFF}, |
265 | { .reg = D40_DREG_CLCTIS3, .val = 0xFFFFFFFF}, |
266 | { .reg = D40_DREG_CLCTIS4, .val = 0xFFFFFFFF}, |
267 | { .reg = D40_DREG_CLCTIS5, .val = 0xFFFFFFFF} |
268 | }; |
269 | |
270 | /** |
271 | * struct d40_lli_pool - Structure for keeping LLIs in memory |
272 | * |
273 | * @base: Pointer to memory area when the pre_alloc_lli's are not large |
274 | * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if |
275 | * pre_alloc_lli is used. |
276 | * @dma_addr: DMA address, if mapped |
277 | * @size: The size in bytes of the memory at base or the size of pre_alloc_lli. |
278 | * @pre_alloc_lli: Pre allocated area for the most common case of transfers, |
279 | * one buffer to one buffer. |
280 | */ |
281 | struct d40_lli_pool { |
282 | void *base; |
283 | int size; |
284 | dma_addr_t dma_addr; |
285 | /* Space for dst and src, plus an extra for padding */ |
286 | u8 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)]; |
287 | }; |
288 | |
289 | /** |
290 | * struct d40_desc - A descriptor is one DMA job. |
291 | * |
292 | * @lli_phy: LLI settings for physical channel. Both src and dst= |
293 | * points into the lli_pool, to base if lli_len > 1 or to pre_alloc_lli if |
294 | * lli_len equals one. |
295 | * @lli_log: Same as above but for logical channels. |
296 | * @lli_pool: The pool with two entries pre-allocated. |
297 | * @lli_len: Number of llis of current descriptor. |
298 | * @lli_current: Number of transferred llis. |
299 | * @lcla_alloc: Number of LCLA entries allocated. |
300 | * @txd: DMA engine struct. Used for among other things for communication |
301 | * during a transfer. |
302 | * @node: List entry. |
303 | * @is_in_client_list: true if the client owns this descriptor. |
304 | * @cyclic: true if this is a cyclic job |
305 | * |
306 | * This descriptor is used for both logical and physical transfers. |
307 | */ |
308 | struct d40_desc { |
309 | /* LLI physical */ |
310 | struct d40_phy_lli_bidir lli_phy; |
311 | /* LLI logical */ |
312 | struct d40_log_lli_bidir lli_log; |
313 | |
314 | struct d40_lli_pool lli_pool; |
315 | int lli_len; |
316 | int lli_current; |
317 | int lcla_alloc; |
318 | |
319 | struct dma_async_tx_descriptor txd; |
320 | struct list_head node; |
321 | |
322 | bool is_in_client_list; |
323 | bool cyclic; |
324 | }; |
325 | |
326 | /** |
327 | * struct d40_lcla_pool - LCLA pool settings and data. |
328 | * |
329 | * @base: The virtual address of LCLA. 18 bit aligned. |
330 | * @base_unaligned: The orignal kmalloc pointer, if kmalloc is used. |
331 | * This pointer is only there for clean-up on error. |
332 | * @pages: The number of pages needed for all physical channels. |
333 | * Only used later for clean-up on error |
334 | * @lock: Lock to protect the content in this struct. |
335 | * @alloc_map: big map over which LCLA entry is own by which job. |
336 | */ |
337 | struct d40_lcla_pool { |
338 | void *base; |
339 | dma_addr_t dma_addr; |
340 | void *base_unaligned; |
341 | int pages; |
342 | spinlock_t lock; |
343 | struct d40_desc **alloc_map; |
344 | }; |
345 | |
346 | /** |
347 | * struct d40_phy_res - struct for handling eventlines mapped to physical |
348 | * channels. |
349 | * |
350 | * @lock: A lock protection this entity. |
351 | * @reserved: True if used by secure world or otherwise. |
352 | * @num: The physical channel number of this entity. |
353 | * @allocated_src: Bit mapped to show which src event line's are mapped to |
354 | * this physical channel. Can also be free or physically allocated. |
355 | * @allocated_dst: Same as for src but is dst. |
356 | * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as |
357 | * event line number. |
358 | * @use_soft_lli: To mark if the linked lists of channel are managed by SW. |
359 | */ |
360 | struct d40_phy_res { |
361 | spinlock_t lock; |
362 | bool reserved; |
363 | int num; |
364 | u32 allocated_src; |
365 | u32 allocated_dst; |
366 | bool use_soft_lli; |
367 | }; |
368 | |
369 | struct d40_base; |
370 | |
371 | /** |
372 | * struct d40_chan - Struct that describes a channel. |
373 | * |
374 | * @lock: A spinlock to protect this struct. |
375 | * @log_num: The logical number, if any of this channel. |
376 | * @pending_tx: The number of pending transfers. Used between interrupt handler |
377 | * and tasklet. |
378 | * @busy: Set to true when transfer is ongoing on this channel. |
379 | * @phy_chan: Pointer to physical channel which this instance runs on. If this |
380 | * point is NULL, then the channel is not allocated. |
381 | * @chan: DMA engine handle. |
382 | * @tasklet: Tasklet that gets scheduled from interrupt context to complete a |
383 | * transfer and call client callback. |
384 | * @client: Cliented owned descriptor list. |
385 | * @pending_queue: Submitted jobs, to be issued by issue_pending() |
386 | * @active: Active descriptor. |
387 | * @done: Completed jobs |
388 | * @queue: Queued jobs. |
389 | * @prepare_queue: Prepared jobs. |
390 | * @dma_cfg: The client configuration of this dma channel. |
391 | * @configured: whether the dma_cfg configuration is valid |
392 | * @base: Pointer to the device instance struct. |
393 | * @src_def_cfg: Default cfg register setting for src. |
394 | * @dst_def_cfg: Default cfg register setting for dst. |
395 | * @log_def: Default logical channel settings. |
396 | * @lcpa: Pointer to dst and src lcpa settings. |
397 | * @runtime_addr: runtime configured address. |
398 | * @runtime_direction: runtime configured direction. |
399 | * |
400 | * This struct can either "be" a logical or a physical channel. |
401 | */ |
402 | struct d40_chan { |
403 | spinlock_t lock; |
404 | int log_num; |
405 | int pending_tx; |
406 | bool busy; |
407 | struct d40_phy_res *phy_chan; |
408 | struct dma_chan chan; |
409 | struct tasklet_struct tasklet; |
410 | struct list_head client; |
411 | struct list_head pending_queue; |
412 | struct list_head active; |
413 | struct list_head done; |
414 | struct list_head queue; |
415 | struct list_head prepare_queue; |
416 | struct stedma40_chan_cfg dma_cfg; |
417 | bool configured; |
418 | struct d40_base *base; |
419 | /* Default register configurations */ |
420 | u32 src_def_cfg; |
421 | u32 dst_def_cfg; |
422 | struct d40_def_lcsp log_def; |
423 | struct d40_log_lli_full *lcpa; |
424 | /* Runtime reconfiguration */ |
425 | dma_addr_t runtime_addr; |
426 | enum dma_transfer_direction runtime_direction; |
427 | }; |
428 | |
429 | /** |
430 | * struct d40_gen_dmac - generic values to represent u8500/u8540 DMA |
431 | * controller |
432 | * |
433 | * @backup: the pointer to the registers address array for backup |
434 | * @backup_size: the size of the registers address array for backup |
435 | * @realtime_en: the realtime enable register |
436 | * @realtime_clear: the realtime clear register |
437 | * @high_prio_en: the high priority enable register |
438 | * @high_prio_clear: the high priority clear register |
439 | * @interrupt_en: the interrupt enable register |
440 | * @interrupt_clear: the interrupt clear register |
441 | * @il: the pointer to struct d40_interrupt_lookup |
442 | * @il_size: the size of d40_interrupt_lookup array |
443 | * @init_reg: the pointer to the struct d40_reg_val |
444 | * @init_reg_size: the size of d40_reg_val array |
445 | */ |
446 | struct d40_gen_dmac { |
447 | u32 *backup; |
448 | u32 backup_size; |
449 | u32 realtime_en; |
450 | u32 realtime_clear; |
451 | u32 high_prio_en; |
452 | u32 high_prio_clear; |
453 | u32 interrupt_en; |
454 | u32 interrupt_clear; |
455 | struct d40_interrupt_lookup *il; |
456 | u32 il_size; |
457 | struct d40_reg_val *init_reg; |
458 | u32 init_reg_size; |
459 | }; |
460 | |
461 | /** |
462 | * struct d40_base - The big global struct, one for each probe'd instance. |
463 | * |
464 | * @interrupt_lock: Lock used to make sure one interrupt is handle a time. |
465 | * @execmd_lock: Lock for execute command usage since several channels share |
466 | * the same physical register. |
467 | * @dev: The device structure. |
468 | * @virtbase: The virtual base address of the DMA's register. |
469 | * @rev: silicon revision detected. |
470 | * @clk: Pointer to the DMA clock structure. |
471 | * @phy_start: Physical memory start of the DMA registers. |
472 | * @phy_size: Size of the DMA register map. |
473 | * @irq: The IRQ number. |
474 | * @num_phy_chans: The number of physical channels. Read from HW. This |
475 | * is the number of available channels for this driver, not counting "Secure |
476 | * mode" allocated physical channels. |
477 | * @num_log_chans: The number of logical channels. Calculated from |
478 | * num_phy_chans. |
479 | * @dma_both: dma_device channels that can do both memcpy and slave transfers. |
480 | * @dma_slave: dma_device channels that can do only do slave transfers. |
481 | * @dma_memcpy: dma_device channels that can do only do memcpy transfers. |
482 | * @phy_chans: Room for all possible physical channels in system. |
483 | * @log_chans: Room for all possible logical channels in system. |
484 | * @lookup_log_chans: Used to map interrupt number to logical channel. Points |
485 | * to log_chans entries. |
486 | * @lookup_phy_chans: Used to map interrupt number to physical channel. Points |
487 | * to phy_chans entries. |
488 | * @plat_data: Pointer to provided platform_data which is the driver |
489 | * configuration. |
490 | * @lcpa_regulator: Pointer to hold the regulator for the esram bank for lcla. |
491 | * @phy_res: Vector containing all physical channels. |
492 | * @lcla_pool: lcla pool settings and data. |
493 | * @lcpa_base: The virtual mapped address of LCPA. |
494 | * @phy_lcpa: The physical address of the LCPA. |
495 | * @lcpa_size: The size of the LCPA area. |
496 | * @desc_slab: cache for descriptors. |
497 | * @reg_val_backup: Here the values of some hardware registers are stored |
498 | * before the DMA is powered off. They are restored when the power is back on. |
499 | * @reg_val_backup_v4: Backup of registers that only exits on dma40 v3 and |
500 | * later |
501 | * @reg_val_backup_chan: Backup data for standard channel parameter registers. |
502 | * @gcc_pwr_off_mask: Mask to maintain the channels that can be turned off. |
503 | * @initialized: true if the dma has been initialized |
504 | * @gen_dmac: the struct for generic registers values to represent u8500/8540 |
505 | * DMA controller |
506 | */ |
507 | struct d40_base { |
508 | spinlock_t interrupt_lock; |
509 | spinlock_t execmd_lock; |
510 | struct device *dev; |
511 | void __iomem *virtbase; |
512 | u8 rev:4; |
513 | struct clk *clk; |
514 | phys_addr_t phy_start; |
515 | resource_size_t phy_size; |
516 | int irq; |
517 | int num_phy_chans; |
518 | int num_log_chans; |
519 | struct device_dma_parameters dma_parms; |
520 | struct dma_device dma_both; |
521 | struct dma_device dma_slave; |
522 | struct dma_device dma_memcpy; |
523 | struct d40_chan *phy_chans; |
524 | struct d40_chan *log_chans; |
525 | struct d40_chan **lookup_log_chans; |
526 | struct d40_chan **lookup_phy_chans; |
527 | struct stedma40_platform_data *plat_data; |
528 | struct regulator *lcpa_regulator; |
529 | /* Physical half channels */ |
530 | struct d40_phy_res *phy_res; |
531 | struct d40_lcla_pool lcla_pool; |
532 | void *lcpa_base; |
533 | dma_addr_t phy_lcpa; |
534 | resource_size_t lcpa_size; |
535 | struct kmem_cache *desc_slab; |
536 | u32 reg_val_backup[BACKUP_REGS_SZ]; |
537 | u32 reg_val_backup_v4[MAX(BACKUP_REGS_SZ_V4A, BACKUP_REGS_SZ_V4B)]; |
538 | u32 *reg_val_backup_chan; |
539 | u16 gcc_pwr_off_mask; |
540 | bool initialized; |
541 | struct d40_gen_dmac gen_dmac; |
542 | }; |
543 | |
544 | static struct device *chan2dev(struct d40_chan *d40c) |
545 | { |
546 | return &d40c->chan.dev->device; |
547 | } |
548 | |
549 | static bool chan_is_physical(struct d40_chan *chan) |
550 | { |
551 | return chan->log_num == D40_PHY_CHAN; |
552 | } |
553 | |
554 | static bool chan_is_logical(struct d40_chan *chan) |
555 | { |
556 | return !chan_is_physical(chan); |
557 | } |
558 | |
559 | static void __iomem *chan_base(struct d40_chan *chan) |
560 | { |
561 | return chan->base->virtbase + D40_DREG_PCBASE + |
562 | chan->phy_chan->num * D40_DREG_PCDELTA; |
563 | } |
564 | |
565 | #define d40_err(dev, format, arg...) \ |
566 | dev_err(dev, "[%s] " format, __func__, ## arg) |
567 | |
568 | #define chan_err(d40c, format, arg...) \ |
569 | d40_err(chan2dev(d40c), format, ## arg) |
570 | |
571 | static int d40_pool_lli_alloc(struct d40_chan *d40c, struct d40_desc *d40d, |
572 | int lli_len) |
573 | { |
574 | bool is_log = chan_is_logical(d40c); |
575 | u32 align; |
576 | void *base; |
577 | |
578 | if (is_log) |
579 | align = sizeof(struct d40_log_lli); |
580 | else |
581 | align = sizeof(struct d40_phy_lli); |
582 | |
583 | if (lli_len == 1) { |
584 | base = d40d->lli_pool.pre_alloc_lli; |
585 | d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli); |
586 | d40d->lli_pool.base = NULL; |
587 | } else { |
588 | d40d->lli_pool.size = lli_len * 2 * align; |
589 | |
590 | base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT); |
591 | d40d->lli_pool.base = base; |
592 | |
593 | if (d40d->lli_pool.base == NULL) |
594 | return -ENOMEM; |
595 | } |
596 | |
597 | if (is_log) { |
598 | d40d->lli_log.src = PTR_ALIGN(base, align); |
599 | d40d->lli_log.dst = d40d->lli_log.src + lli_len; |
600 | |
601 | d40d->lli_pool.dma_addr = 0; |
602 | } else { |
603 | d40d->lli_phy.src = PTR_ALIGN(base, align); |
604 | d40d->lli_phy.dst = d40d->lli_phy.src + lli_len; |
605 | |
606 | d40d->lli_pool.dma_addr = dma_map_single(d40c->base->dev, |
607 | d40d->lli_phy.src, |
608 | d40d->lli_pool.size, |
609 | DMA_TO_DEVICE); |
610 | |
611 | if (dma_mapping_error(d40c->base->dev, |
612 | d40d->lli_pool.dma_addr)) { |
613 | kfree(d40d->lli_pool.base); |
614 | d40d->lli_pool.base = NULL; |
615 | d40d->lli_pool.dma_addr = 0; |
616 | return -ENOMEM; |
617 | } |
618 | } |
619 | |
620 | return 0; |
621 | } |
622 | |
623 | static void d40_pool_lli_free(struct d40_chan *d40c, struct d40_desc *d40d) |
624 | { |
625 | if (d40d->lli_pool.dma_addr) |
626 | dma_unmap_single(d40c->base->dev, d40d->lli_pool.dma_addr, |
627 | d40d->lli_pool.size, DMA_TO_DEVICE); |
628 | |
629 | kfree(d40d->lli_pool.base); |
630 | d40d->lli_pool.base = NULL; |
631 | d40d->lli_pool.size = 0; |
632 | d40d->lli_log.src = NULL; |
633 | d40d->lli_log.dst = NULL; |
634 | d40d->lli_phy.src = NULL; |
635 | d40d->lli_phy.dst = NULL; |
636 | } |
637 | |
638 | static int d40_lcla_alloc_one(struct d40_chan *d40c, |
639 | struct d40_desc *d40d) |
640 | { |
641 | unsigned long flags; |
642 | int i; |
643 | int ret = -EINVAL; |
644 | |
645 | spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags); |
646 | |
647 | /* |
648 | * Allocate both src and dst at the same time, therefore the half |
649 | * start on 1 since 0 can't be used since zero is used as end marker. |
650 | */ |
651 | for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) { |
652 | int idx = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP + i; |
653 | |
654 | if (!d40c->base->lcla_pool.alloc_map[idx]) { |
655 | d40c->base->lcla_pool.alloc_map[idx] = d40d; |
656 | d40d->lcla_alloc++; |
657 | ret = i; |
658 | break; |
659 | } |
660 | } |
661 | |
662 | spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags); |
663 | |
664 | return ret; |
665 | } |
666 | |
667 | static int d40_lcla_free_all(struct d40_chan *d40c, |
668 | struct d40_desc *d40d) |
669 | { |
670 | unsigned long flags; |
671 | int i; |
672 | int ret = -EINVAL; |
673 | |
674 | if (chan_is_physical(d40c)) |
675 | return 0; |
676 | |
677 | spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags); |
678 | |
679 | for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) { |
680 | int idx = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP + i; |
681 | |
682 | if (d40c->base->lcla_pool.alloc_map[idx] == d40d) { |
683 | d40c->base->lcla_pool.alloc_map[idx] = NULL; |
684 | d40d->lcla_alloc--; |
685 | if (d40d->lcla_alloc == 0) { |
686 | ret = 0; |
687 | break; |
688 | } |
689 | } |
690 | } |
691 | |
692 | spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags); |
693 | |
694 | return ret; |
695 | |
696 | } |
697 | |
698 | static void d40_desc_remove(struct d40_desc *d40d) |
699 | { |
700 | list_del(&d40d->node); |
701 | } |
702 | |
703 | static struct d40_desc *d40_desc_get(struct d40_chan *d40c) |
704 | { |
705 | struct d40_desc *desc = NULL; |
706 | |
707 | if (!list_empty(&d40c->client)) { |
708 | struct d40_desc *d; |
709 | struct d40_desc *_d; |
710 | |
711 | list_for_each_entry_safe(d, _d, &d40c->client, node) { |
712 | if (async_tx_test_ack(&d->txd)) { |
713 | d40_desc_remove(d); |
714 | desc = d; |
715 | memset(desc, 0, sizeof(*desc)); |
716 | break; |
717 | } |
718 | } |
719 | } |
720 | |
721 | if (!desc) |
722 | desc = kmem_cache_zalloc(d40c->base->desc_slab, GFP_NOWAIT); |
723 | |
724 | if (desc) |
725 | INIT_LIST_HEAD(&desc->node); |
726 | |
727 | return desc; |
728 | } |
729 | |
730 | static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d) |
731 | { |
732 | |
733 | d40_pool_lli_free(d40c, d40d); |
734 | d40_lcla_free_all(d40c, d40d); |
735 | kmem_cache_free(d40c->base->desc_slab, d40d); |
736 | } |
737 | |
738 | static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc) |
739 | { |
740 | list_add_tail(&desc->node, &d40c->active); |
741 | } |
742 | |
743 | static void d40_phy_lli_load(struct d40_chan *chan, struct d40_desc *desc) |
744 | { |
745 | struct d40_phy_lli *lli_dst = desc->lli_phy.dst; |
746 | struct d40_phy_lli *lli_src = desc->lli_phy.src; |
747 | void __iomem *base = chan_base(chan); |
748 | |
749 | writel(lli_src->reg_cfg, base + D40_CHAN_REG_SSCFG); |
750 | writel(lli_src->reg_elt, base + D40_CHAN_REG_SSELT); |
751 | writel(lli_src->reg_ptr, base + D40_CHAN_REG_SSPTR); |
752 | writel(lli_src->reg_lnk, base + D40_CHAN_REG_SSLNK); |
753 | |
754 | writel(lli_dst->reg_cfg, base + D40_CHAN_REG_SDCFG); |
755 | writel(lli_dst->reg_elt, base + D40_CHAN_REG_SDELT); |
756 | writel(lli_dst->reg_ptr, base + D40_CHAN_REG_SDPTR); |
757 | writel(lli_dst->reg_lnk, base + D40_CHAN_REG_SDLNK); |
758 | } |
759 | |
760 | static void d40_desc_done(struct d40_chan *d40c, struct d40_desc *desc) |
761 | { |
762 | list_add_tail(&desc->node, &d40c->done); |
763 | } |
764 | |
765 | static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc) |
766 | { |
767 | struct d40_lcla_pool *pool = &chan->base->lcla_pool; |
768 | struct d40_log_lli_bidir *lli = &desc->lli_log; |
769 | int lli_current = desc->lli_current; |
770 | int lli_len = desc->lli_len; |
771 | bool cyclic = desc->cyclic; |
772 | int curr_lcla = -EINVAL; |
773 | int first_lcla = 0; |
774 | bool use_esram_lcla = chan->base->plat_data->use_esram_lcla; |
775 | bool linkback; |
776 | |
777 | /* |
778 | * We may have partially running cyclic transfers, in case we did't get |
779 | * enough LCLA entries. |
780 | */ |
781 | linkback = cyclic && lli_current == 0; |
782 | |
783 | /* |
784 | * For linkback, we need one LCLA even with only one link, because we |
785 | * can't link back to the one in LCPA space |
786 | */ |
787 | if (linkback || (lli_len - lli_current > 1)) { |
788 | /* |
789 | * If the channel is expected to use only soft_lli don't |
790 | * allocate a lcla. This is to avoid a HW issue that exists |
791 | * in some controller during a peripheral to memory transfer |
792 | * that uses linked lists. |
793 | */ |
794 | if (!(chan->phy_chan->use_soft_lli && |
795 | chan->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)) |
796 | curr_lcla = d40_lcla_alloc_one(chan, desc); |
797 | |
798 | first_lcla = curr_lcla; |
799 | } |
800 | |
801 | /* |
802 | * For linkback, we normally load the LCPA in the loop since we need to |
803 | * link it to the second LCLA and not the first. However, if we |
804 | * couldn't even get a first LCLA, then we have to run in LCPA and |
805 | * reload manually. |
806 | */ |
807 | if (!linkback || curr_lcla == -EINVAL) { |
808 | unsigned int flags = 0; |
809 | |
810 | if (curr_lcla == -EINVAL) |
811 | flags |= LLI_TERM_INT; |
812 | |
813 | d40_log_lli_lcpa_write(chan->lcpa, |
814 | &lli->dst[lli_current], |
815 | &lli->src[lli_current], |
816 | curr_lcla, |
817 | flags); |
818 | lli_current++; |
819 | } |
820 | |
821 | if (curr_lcla < 0) |
822 | goto out; |
823 | |
824 | for (; lli_current < lli_len; lli_current++) { |
825 | unsigned int lcla_offset = chan->phy_chan->num * 1024 + |
826 | 8 * curr_lcla * 2; |
827 | struct d40_log_lli *lcla = pool->base + lcla_offset; |
828 | unsigned int flags = 0; |
829 | int next_lcla; |
830 | |
831 | if (lli_current + 1 < lli_len) |
832 | next_lcla = d40_lcla_alloc_one(chan, desc); |
833 | else |
834 | next_lcla = linkback ? first_lcla : -EINVAL; |
835 | |
836 | if (cyclic || next_lcla == -EINVAL) |
837 | flags |= LLI_TERM_INT; |
838 | |
839 | if (linkback && curr_lcla == first_lcla) { |
840 | /* First link goes in both LCPA and LCLA */ |
841 | d40_log_lli_lcpa_write(chan->lcpa, |
842 | &lli->dst[lli_current], |
843 | &lli->src[lli_current], |
844 | next_lcla, flags); |
845 | } |
846 | |
847 | /* |
848 | * One unused LCLA in the cyclic case if the very first |
849 | * next_lcla fails... |
850 | */ |
851 | d40_log_lli_lcla_write(lcla, |
852 | &lli->dst[lli_current], |
853 | &lli->src[lli_current], |
854 | next_lcla, flags); |
855 | |
856 | /* |
857 | * Cache maintenance is not needed if lcla is |
858 | * mapped in esram |
859 | */ |
860 | if (!use_esram_lcla) { |
861 | dma_sync_single_range_for_device(chan->base->dev, |
862 | pool->dma_addr, lcla_offset, |
863 | 2 * sizeof(struct d40_log_lli), |
864 | DMA_TO_DEVICE); |
865 | } |
866 | curr_lcla = next_lcla; |
867 | |
868 | if (curr_lcla == -EINVAL || curr_lcla == first_lcla) { |
869 | lli_current++; |
870 | break; |
871 | } |
872 | } |
873 | |
874 | out: |
875 | desc->lli_current = lli_current; |
876 | } |
877 | |
878 | static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d) |
879 | { |
880 | if (chan_is_physical(d40c)) { |
881 | d40_phy_lli_load(d40c, d40d); |
882 | d40d->lli_current = d40d->lli_len; |
883 | } else |
884 | d40_log_lli_to_lcxa(d40c, d40d); |
885 | } |
886 | |
887 | static struct d40_desc *d40_first_active_get(struct d40_chan *d40c) |
888 | { |
889 | struct d40_desc *d; |
890 | |
891 | if (list_empty(&d40c->active)) |
892 | return NULL; |
893 | |
894 | d = list_first_entry(&d40c->active, |
895 | struct d40_desc, |
896 | node); |
897 | return d; |
898 | } |
899 | |
900 | /* remove desc from current queue and add it to the pending_queue */ |
901 | static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc) |
902 | { |
903 | d40_desc_remove(desc); |
904 | desc->is_in_client_list = false; |
905 | list_add_tail(&desc->node, &d40c->pending_queue); |
906 | } |
907 | |
908 | static struct d40_desc *d40_first_pending(struct d40_chan *d40c) |
909 | { |
910 | struct d40_desc *d; |
911 | |
912 | if (list_empty(&d40c->pending_queue)) |
913 | return NULL; |
914 | |
915 | d = list_first_entry(&d40c->pending_queue, |
916 | struct d40_desc, |
917 | node); |
918 | return d; |
919 | } |
920 | |
921 | static struct d40_desc *d40_first_queued(struct d40_chan *d40c) |
922 | { |
923 | struct d40_desc *d; |
924 | |
925 | if (list_empty(&d40c->queue)) |
926 | return NULL; |
927 | |
928 | d = list_first_entry(&d40c->queue, |
929 | struct d40_desc, |
930 | node); |
931 | return d; |
932 | } |
933 | |
934 | static struct d40_desc *d40_first_done(struct d40_chan *d40c) |
935 | { |
936 | if (list_empty(&d40c->done)) |
937 | return NULL; |
938 | |
939 | return list_first_entry(&d40c->done, struct d40_desc, node); |
940 | } |
941 | |
942 | static int d40_psize_2_burst_size(bool is_log, int psize) |
943 | { |
944 | if (is_log) { |
945 | if (psize == STEDMA40_PSIZE_LOG_1) |
946 | return 1; |
947 | } else { |
948 | if (psize == STEDMA40_PSIZE_PHY_1) |
949 | return 1; |
950 | } |
951 | |
952 | return 2 << psize; |
953 | } |
954 | |
955 | /* |
956 | * The dma only supports transmitting packages up to |
957 | * STEDMA40_MAX_SEG_SIZE << data_width. Calculate the total number of |
958 | * dma elements required to send the entire sg list |
959 | */ |
960 | static int d40_size_2_dmalen(int size, u32 data_width1, u32 data_width2) |
961 | { |
962 | int dmalen; |
963 | u32 max_w = max(data_width1, data_width2); |
964 | u32 min_w = min(data_width1, data_width2); |
965 | u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE << min_w, 1 << max_w); |
966 | |
967 | if (seg_max > STEDMA40_MAX_SEG_SIZE) |
968 | seg_max -= (1 << max_w); |
969 | |
970 | if (!IS_ALIGNED(size, 1 << max_w)) |
971 | return -EINVAL; |
972 | |
973 | if (size <= seg_max) |
974 | dmalen = 1; |
975 | else { |
976 | dmalen = size / seg_max; |
977 | if (dmalen * seg_max < size) |
978 | dmalen++; |
979 | } |
980 | return dmalen; |
981 | } |
982 | |
983 | static int d40_sg_2_dmalen(struct scatterlist *sgl, int sg_len, |
984 | u32 data_width1, u32 data_width2) |
985 | { |
986 | struct scatterlist *sg; |
987 | int i; |
988 | int len = 0; |
989 | int ret; |
990 | |
991 | for_each_sg(sgl, sg, sg_len, i) { |
992 | ret = d40_size_2_dmalen(sg_dma_len(sg), |
993 | data_width1, data_width2); |
994 | if (ret < 0) |
995 | return ret; |
996 | len += ret; |
997 | } |
998 | return len; |
999 | } |
1000 | |
1001 | |
1002 | #ifdef CONFIG_PM |
1003 | static void dma40_backup(void __iomem *baseaddr, u32 *backup, |
1004 | u32 *regaddr, int num, bool save) |
1005 | { |
1006 | int i; |
1007 | |
1008 | for (i = 0; i < num; i++) { |
1009 | void __iomem *addr = baseaddr + regaddr[i]; |
1010 | |
1011 | if (save) |
1012 | backup[i] = readl_relaxed(addr); |
1013 | else |
1014 | writel_relaxed(backup[i], addr); |
1015 | } |
1016 | } |
1017 | |
1018 | static void d40_save_restore_registers(struct d40_base *base, bool save) |
1019 | { |
1020 | int i; |
1021 | |
1022 | /* Save/Restore channel specific registers */ |
1023 | for (i = 0; i < base->num_phy_chans; i++) { |
1024 | void __iomem *addr; |
1025 | int idx; |
1026 | |
1027 | if (base->phy_res[i].reserved) |
1028 | continue; |
1029 | |
1030 | addr = base->virtbase + D40_DREG_PCBASE + i * D40_DREG_PCDELTA; |
1031 | idx = i * ARRAY_SIZE(d40_backup_regs_chan); |
1032 | |
1033 | dma40_backup(addr, &base->reg_val_backup_chan[idx], |
1034 | d40_backup_regs_chan, |
1035 | ARRAY_SIZE(d40_backup_regs_chan), |
1036 | save); |
1037 | } |
1038 | |
1039 | /* Save/Restore global registers */ |
1040 | dma40_backup(base->virtbase, base->reg_val_backup, |
1041 | d40_backup_regs, ARRAY_SIZE(d40_backup_regs), |
1042 | save); |
1043 | |
1044 | /* Save/Restore registers only existing on dma40 v3 and later */ |
1045 | if (base->gen_dmac.backup) |
1046 | dma40_backup(base->virtbase, base->reg_val_backup_v4, |
1047 | base->gen_dmac.backup, |
1048 | base->gen_dmac.backup_size, |
1049 | save); |
1050 | } |
1051 | #else |
1052 | static void d40_save_restore_registers(struct d40_base *base, bool save) |
1053 | { |
1054 | } |
1055 | #endif |
1056 | |
1057 | static int __d40_execute_command_phy(struct d40_chan *d40c, |
1058 | enum d40_command command) |
1059 | { |
1060 | u32 status; |
1061 | int i; |
1062 | void __iomem *active_reg; |
1063 | int ret = 0; |
1064 | unsigned long flags; |
1065 | u32 wmask; |
1066 | |
1067 | if (command == D40_DMA_STOP) { |
1068 | ret = __d40_execute_command_phy(d40c, D40_DMA_SUSPEND_REQ); |
1069 | if (ret) |
1070 | return ret; |
1071 | } |
1072 | |
1073 | spin_lock_irqsave(&d40c->base->execmd_lock, flags); |
1074 | |
1075 | if (d40c->phy_chan->num % 2 == 0) |
1076 | active_reg = d40c->base->virtbase + D40_DREG_ACTIVE; |
1077 | else |
1078 | active_reg = d40c->base->virtbase + D40_DREG_ACTIVO; |
1079 | |
1080 | if (command == D40_DMA_SUSPEND_REQ) { |
1081 | status = (readl(active_reg) & |
1082 | D40_CHAN_POS_MASK(d40c->phy_chan->num)) >> |
1083 | D40_CHAN_POS(d40c->phy_chan->num); |
1084 | |
1085 | if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP) |
1086 | goto done; |
1087 | } |
1088 | |
1089 | wmask = 0xffffffff & ~(D40_CHAN_POS_MASK(d40c->phy_chan->num)); |
1090 | writel(wmask | (command << D40_CHAN_POS(d40c->phy_chan->num)), |
1091 | active_reg); |
1092 | |
1093 | if (command == D40_DMA_SUSPEND_REQ) { |
1094 | |
1095 | for (i = 0 ; i < D40_SUSPEND_MAX_IT; i++) { |
1096 | status = (readl(active_reg) & |
1097 | D40_CHAN_POS_MASK(d40c->phy_chan->num)) >> |
1098 | D40_CHAN_POS(d40c->phy_chan->num); |
1099 | |
1100 | cpu_relax(); |
1101 | /* |
1102 | * Reduce the number of bus accesses while |
1103 | * waiting for the DMA to suspend. |
1104 | */ |
1105 | udelay(3); |
1106 | |
1107 | if (status == D40_DMA_STOP || |
1108 | status == D40_DMA_SUSPENDED) |
1109 | break; |
1110 | } |
1111 | |
1112 | if (i == D40_SUSPEND_MAX_IT) { |
1113 | chan_err(d40c, |
1114 | "unable to suspend the chl %d (log: %d) status %x\n", |
1115 | d40c->phy_chan->num, d40c->log_num, |
1116 | status); |
1117 | dump_stack(); |
1118 | ret = -EBUSY; |
1119 | } |
1120 | |
1121 | } |
1122 | done: |
1123 | spin_unlock_irqrestore(&d40c->base->execmd_lock, flags); |
1124 | return ret; |
1125 | } |
1126 | |
1127 | static void d40_term_all(struct d40_chan *d40c) |
1128 | { |
1129 | struct d40_desc *d40d; |
1130 | struct d40_desc *_d; |
1131 | |
1132 | /* Release completed descriptors */ |
1133 | while ((d40d = d40_first_done(d40c))) { |
1134 | d40_desc_remove(d40d); |
1135 | d40_desc_free(d40c, d40d); |
1136 | } |
1137 | |
1138 | /* Release active descriptors */ |
1139 | while ((d40d = d40_first_active_get(d40c))) { |
1140 | d40_desc_remove(d40d); |
1141 | d40_desc_free(d40c, d40d); |
1142 | } |
1143 | |
1144 | /* Release queued descriptors waiting for transfer */ |
1145 | while ((d40d = d40_first_queued(d40c))) { |
1146 | d40_desc_remove(d40d); |
1147 | d40_desc_free(d40c, d40d); |
1148 | } |
1149 | |
1150 | /* Release pending descriptors */ |
1151 | while ((d40d = d40_first_pending(d40c))) { |
1152 | d40_desc_remove(d40d); |
1153 | d40_desc_free(d40c, d40d); |
1154 | } |
1155 | |
1156 | /* Release client owned descriptors */ |
1157 | if (!list_empty(&d40c->client)) |
1158 | list_for_each_entry_safe(d40d, _d, &d40c->client, node) { |
1159 | d40_desc_remove(d40d); |
1160 | d40_desc_free(d40c, d40d); |
1161 | } |
1162 | |
1163 | /* Release descriptors in prepare queue */ |
1164 | if (!list_empty(&d40c->prepare_queue)) |
1165 | list_for_each_entry_safe(d40d, _d, |
1166 | &d40c->prepare_queue, node) { |
1167 | d40_desc_remove(d40d); |
1168 | d40_desc_free(d40c, d40d); |
1169 | } |
1170 | |
1171 | d40c->pending_tx = 0; |
1172 | } |
1173 | |
1174 | static void __d40_config_set_event(struct d40_chan *d40c, |
1175 | enum d40_events event_type, u32 event, |
1176 | int reg) |
1177 | { |
1178 | void __iomem *addr = chan_base(d40c) + reg; |
1179 | int tries; |
1180 | u32 status; |
1181 | |
1182 | switch (event_type) { |
1183 | |
1184 | case D40_DEACTIVATE_EVENTLINE: |
1185 | |
1186 | writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event)) |
1187 | | ~D40_EVENTLINE_MASK(event), addr); |
1188 | break; |
1189 | |
1190 | case D40_SUSPEND_REQ_EVENTLINE: |
1191 | status = (readl(addr) & D40_EVENTLINE_MASK(event)) >> |
1192 | D40_EVENTLINE_POS(event); |
1193 | |
1194 | if (status == D40_DEACTIVATE_EVENTLINE || |
1195 | status == D40_SUSPEND_REQ_EVENTLINE) |
1196 | break; |
1197 | |
1198 | writel((D40_SUSPEND_REQ_EVENTLINE << D40_EVENTLINE_POS(event)) |
1199 | | ~D40_EVENTLINE_MASK(event), addr); |
1200 | |
1201 | for (tries = 0 ; tries < D40_SUSPEND_MAX_IT; tries++) { |
1202 | |
1203 | status = (readl(addr) & D40_EVENTLINE_MASK(event)) >> |
1204 | D40_EVENTLINE_POS(event); |
1205 | |
1206 | cpu_relax(); |
1207 | /* |
1208 | * Reduce the number of bus accesses while |
1209 | * waiting for the DMA to suspend. |
1210 | */ |
1211 | udelay(3); |
1212 | |
1213 | if (status == D40_DEACTIVATE_EVENTLINE) |
1214 | break; |
1215 | } |
1216 | |
1217 | if (tries == D40_SUSPEND_MAX_IT) { |
1218 | chan_err(d40c, |
1219 | "unable to stop the event_line chl %d (log: %d)" |
1220 | "status %x\n", d40c->phy_chan->num, |
1221 | d40c->log_num, status); |
1222 | } |
1223 | break; |
1224 | |
1225 | case D40_ACTIVATE_EVENTLINE: |
1226 | /* |
1227 | * The hardware sometimes doesn't register the enable when src and dst |
1228 | * event lines are active on the same logical channel. Retry to ensure |
1229 | * it does. Usually only one retry is sufficient. |
1230 | */ |
1231 | tries = 100; |
1232 | while (--tries) { |
1233 | writel((D40_ACTIVATE_EVENTLINE << |
1234 | D40_EVENTLINE_POS(event)) | |
1235 | ~D40_EVENTLINE_MASK(event), addr); |
1236 | |
1237 | if (readl(addr) & D40_EVENTLINE_MASK(event)) |
1238 | break; |
1239 | } |
1240 | |
1241 | if (tries != 99) |
1242 | dev_dbg(chan2dev(d40c), |
1243 | "[%s] workaround enable S%cLNK (%d tries)\n", |
1244 | __func__, reg == D40_CHAN_REG_SSLNK ? 'S' : 'D', |
1245 | 100 - tries); |
1246 | |
1247 | WARN_ON(!tries); |
1248 | break; |
1249 | |
1250 | case D40_ROUND_EVENTLINE: |
1251 | BUG(); |
1252 | break; |
1253 | |
1254 | } |
1255 | } |
1256 | |
1257 | static void d40_config_set_event(struct d40_chan *d40c, |
1258 | enum d40_events event_type) |
1259 | { |
1260 | /* Enable event line connected to device (or memcpy) */ |
1261 | if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) || |
1262 | (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) { |
1263 | u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); |
1264 | |
1265 | __d40_config_set_event(d40c, event_type, event, |
1266 | D40_CHAN_REG_SSLNK); |
1267 | } |
1268 | |
1269 | if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) { |
1270 | u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type); |
1271 | |
1272 | __d40_config_set_event(d40c, event_type, event, |
1273 | D40_CHAN_REG_SDLNK); |
1274 | } |
1275 | } |
1276 | |
1277 | static u32 d40_chan_has_events(struct d40_chan *d40c) |
1278 | { |
1279 | void __iomem *chanbase = chan_base(d40c); |
1280 | u32 val; |
1281 | |
1282 | val = readl(chanbase + D40_CHAN_REG_SSLNK); |
1283 | val |= readl(chanbase + D40_CHAN_REG_SDLNK); |
1284 | |
1285 | return val; |
1286 | } |
1287 | |
1288 | static int |
1289 | __d40_execute_command_log(struct d40_chan *d40c, enum d40_command command) |
1290 | { |
1291 | unsigned long flags; |
1292 | int ret = 0; |
1293 | u32 active_status; |
1294 | void __iomem *active_reg; |
1295 | |
1296 | if (d40c->phy_chan->num % 2 == 0) |
1297 | active_reg = d40c->base->virtbase + D40_DREG_ACTIVE; |
1298 | else |
1299 | active_reg = d40c->base->virtbase + D40_DREG_ACTIVO; |
1300 | |
1301 | |
1302 | spin_lock_irqsave(&d40c->phy_chan->lock, flags); |
1303 | |
1304 | switch (command) { |
1305 | case D40_DMA_STOP: |
1306 | case D40_DMA_SUSPEND_REQ: |
1307 | |
1308 | active_status = (readl(active_reg) & |
1309 | D40_CHAN_POS_MASK(d40c->phy_chan->num)) >> |
1310 | D40_CHAN_POS(d40c->phy_chan->num); |
1311 | |
1312 | if (active_status == D40_DMA_RUN) |
1313 | d40_config_set_event(d40c, D40_SUSPEND_REQ_EVENTLINE); |
1314 | else |
1315 | d40_config_set_event(d40c, D40_DEACTIVATE_EVENTLINE); |
1316 | |
1317 | if (!d40_chan_has_events(d40c) && (command == D40_DMA_STOP)) |
1318 | ret = __d40_execute_command_phy(d40c, command); |
1319 | |
1320 | break; |
1321 | |
1322 | case D40_DMA_RUN: |
1323 | |
1324 | d40_config_set_event(d40c, D40_ACTIVATE_EVENTLINE); |
1325 | ret = __d40_execute_command_phy(d40c, command); |
1326 | break; |
1327 | |
1328 | case D40_DMA_SUSPENDED: |
1329 | BUG(); |
1330 | break; |
1331 | } |
1332 | |
1333 | spin_unlock_irqrestore(&d40c->phy_chan->lock, flags); |
1334 | return ret; |
1335 | } |
1336 | |
1337 | static int d40_channel_execute_command(struct d40_chan *d40c, |
1338 | enum d40_command command) |
1339 | { |
1340 | if (chan_is_logical(d40c)) |
1341 | return __d40_execute_command_log(d40c, command); |
1342 | else |
1343 | return __d40_execute_command_phy(d40c, command); |
1344 | } |
1345 | |
1346 | static u32 d40_get_prmo(struct d40_chan *d40c) |
1347 | { |
1348 | static const unsigned int phy_map[] = { |
1349 | [STEDMA40_PCHAN_BASIC_MODE] |
1350 | = D40_DREG_PRMO_PCHAN_BASIC, |
1351 | [STEDMA40_PCHAN_MODULO_MODE] |
1352 | = D40_DREG_PRMO_PCHAN_MODULO, |
1353 | [STEDMA40_PCHAN_DOUBLE_DST_MODE] |
1354 | = D40_DREG_PRMO_PCHAN_DOUBLE_DST, |
1355 | }; |
1356 | static const unsigned int log_map[] = { |
1357 | [STEDMA40_LCHAN_SRC_PHY_DST_LOG] |
1358 | = D40_DREG_PRMO_LCHAN_SRC_PHY_DST_LOG, |
1359 | [STEDMA40_LCHAN_SRC_LOG_DST_PHY] |
1360 | = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_PHY, |
1361 | [STEDMA40_LCHAN_SRC_LOG_DST_LOG] |
1362 | = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_LOG, |
1363 | }; |
1364 | |
1365 | if (chan_is_physical(d40c)) |
1366 | return phy_map[d40c->dma_cfg.mode_opt]; |
1367 | else |
1368 | return log_map[d40c->dma_cfg.mode_opt]; |
1369 | } |
1370 | |
1371 | static void d40_config_write(struct d40_chan *d40c) |
1372 | { |
1373 | u32 addr_base; |
1374 | u32 var; |
1375 | |
1376 | /* Odd addresses are even addresses + 4 */ |
1377 | addr_base = (d40c->phy_chan->num % 2) * 4; |
1378 | /* Setup channel mode to logical or physical */ |
1379 | var = ((u32)(chan_is_logical(d40c)) + 1) << |
1380 | D40_CHAN_POS(d40c->phy_chan->num); |
1381 | writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base); |
1382 | |
1383 | /* Setup operational mode option register */ |
1384 | var = d40_get_prmo(d40c) << D40_CHAN_POS(d40c->phy_chan->num); |
1385 | |
1386 | writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base); |
1387 | |
1388 | if (chan_is_logical(d40c)) { |
1389 | int lidx = (d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) |
1390 | & D40_SREG_ELEM_LOG_LIDX_MASK; |
1391 | void __iomem *chanbase = chan_base(d40c); |
1392 | |
1393 | /* Set default config for CFG reg */ |
1394 | writel(d40c->src_def_cfg, chanbase + D40_CHAN_REG_SSCFG); |
1395 | writel(d40c->dst_def_cfg, chanbase + D40_CHAN_REG_SDCFG); |
1396 | |
1397 | /* Set LIDX for lcla */ |
1398 | writel(lidx, chanbase + D40_CHAN_REG_SSELT); |
1399 | writel(lidx, chanbase + D40_CHAN_REG_SDELT); |
1400 | |
1401 | /* Clear LNK which will be used by d40_chan_has_events() */ |
1402 | writel(0, chanbase + D40_CHAN_REG_SSLNK); |
1403 | writel(0, chanbase + D40_CHAN_REG_SDLNK); |
1404 | } |
1405 | } |
1406 | |
1407 | static u32 d40_residue(struct d40_chan *d40c) |
1408 | { |
1409 | u32 num_elt; |
1410 | |
1411 | if (chan_is_logical(d40c)) |
1412 | num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK) |
1413 | >> D40_MEM_LCSP2_ECNT_POS; |
1414 | else { |
1415 | u32 val = readl(chan_base(d40c) + D40_CHAN_REG_SDELT); |
1416 | num_elt = (val & D40_SREG_ELEM_PHY_ECNT_MASK) |
1417 | >> D40_SREG_ELEM_PHY_ECNT_POS; |
1418 | } |
1419 | |
1420 | return num_elt * (1 << d40c->dma_cfg.dst_info.data_width); |
1421 | } |
1422 | |
1423 | static bool d40_tx_is_linked(struct d40_chan *d40c) |
1424 | { |
1425 | bool is_link; |
1426 | |
1427 | if (chan_is_logical(d40c)) |
1428 | is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK; |
1429 | else |
1430 | is_link = readl(chan_base(d40c) + D40_CHAN_REG_SDLNK) |
1431 | & D40_SREG_LNK_PHYS_LNK_MASK; |
1432 | |
1433 | return is_link; |
1434 | } |
1435 | |
1436 | static int d40_pause(struct d40_chan *d40c) |
1437 | { |
1438 | int res = 0; |
1439 | unsigned long flags; |
1440 | |
1441 | if (!d40c->busy) |
1442 | return 0; |
1443 | |
1444 | pm_runtime_get_sync(d40c->base->dev); |
1445 | spin_lock_irqsave(&d40c->lock, flags); |
1446 | |
1447 | res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); |
1448 | |
1449 | pm_runtime_mark_last_busy(d40c->base->dev); |
1450 | pm_runtime_put_autosuspend(d40c->base->dev); |
1451 | spin_unlock_irqrestore(&d40c->lock, flags); |
1452 | return res; |
1453 | } |
1454 | |
1455 | static int d40_resume(struct d40_chan *d40c) |
1456 | { |
1457 | int res = 0; |
1458 | unsigned long flags; |
1459 | |
1460 | if (!d40c->busy) |
1461 | return 0; |
1462 | |
1463 | spin_lock_irqsave(&d40c->lock, flags); |
1464 | pm_runtime_get_sync(d40c->base->dev); |
1465 | |
1466 | /* If bytes left to transfer or linked tx resume job */ |
1467 | if (d40_residue(d40c) || d40_tx_is_linked(d40c)) |
1468 | res = d40_channel_execute_command(d40c, D40_DMA_RUN); |
1469 | |
1470 | pm_runtime_mark_last_busy(d40c->base->dev); |
1471 | pm_runtime_put_autosuspend(d40c->base->dev); |
1472 | spin_unlock_irqrestore(&d40c->lock, flags); |
1473 | return res; |
1474 | } |
1475 | |
1476 | static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx) |
1477 | { |
1478 | struct d40_chan *d40c = container_of(tx->chan, |
1479 | struct d40_chan, |
1480 | chan); |
1481 | struct d40_desc *d40d = container_of(tx, struct d40_desc, txd); |
1482 | unsigned long flags; |
1483 | dma_cookie_t cookie; |
1484 | |
1485 | spin_lock_irqsave(&d40c->lock, flags); |
1486 | cookie = dma_cookie_assign(tx); |
1487 | d40_desc_queue(d40c, d40d); |
1488 | spin_unlock_irqrestore(&d40c->lock, flags); |
1489 | |
1490 | return cookie; |
1491 | } |
1492 | |
1493 | static int d40_start(struct d40_chan *d40c) |
1494 | { |
1495 | return d40_channel_execute_command(d40c, D40_DMA_RUN); |
1496 | } |
1497 | |
1498 | static struct d40_desc *d40_queue_start(struct d40_chan *d40c) |
1499 | { |
1500 | struct d40_desc *d40d; |
1501 | int err; |
1502 | |
1503 | /* Start queued jobs, if any */ |
1504 | d40d = d40_first_queued(d40c); |
1505 | |
1506 | if (d40d != NULL) { |
1507 | if (!d40c->busy) { |
1508 | d40c->busy = true; |
1509 | pm_runtime_get_sync(d40c->base->dev); |
1510 | } |
1511 | |
1512 | /* Remove from queue */ |
1513 | d40_desc_remove(d40d); |
1514 | |
1515 | /* Add to active queue */ |
1516 | d40_desc_submit(d40c, d40d); |
1517 | |
1518 | /* Initiate DMA job */ |
1519 | d40_desc_load(d40c, d40d); |
1520 | |
1521 | /* Start dma job */ |
1522 | err = d40_start(d40c); |
1523 | |
1524 | if (err) |
1525 | return NULL; |
1526 | } |
1527 | |
1528 | return d40d; |
1529 | } |
1530 | |
1531 | /* called from interrupt context */ |
1532 | static void dma_tc_handle(struct d40_chan *d40c) |
1533 | { |
1534 | struct d40_desc *d40d; |
1535 | |
1536 | /* Get first active entry from list */ |
1537 | d40d = d40_first_active_get(d40c); |
1538 | |
1539 | if (d40d == NULL) |
1540 | return; |
1541 | |
1542 | if (d40d->cyclic) { |
1543 | /* |
1544 | * If this was a paritially loaded list, we need to reloaded |
1545 | * it, and only when the list is completed. We need to check |
1546 | * for done because the interrupt will hit for every link, and |
1547 | * not just the last one. |
1548 | */ |
1549 | if (d40d->lli_current < d40d->lli_len |
1550 | && !d40_tx_is_linked(d40c) |
1551 | && !d40_residue(d40c)) { |
1552 | d40_lcla_free_all(d40c, d40d); |
1553 | d40_desc_load(d40c, d40d); |
1554 | (void) d40_start(d40c); |
1555 | |
1556 | if (d40d->lli_current == d40d->lli_len) |
1557 | d40d->lli_current = 0; |
1558 | } |
1559 | } else { |
1560 | d40_lcla_free_all(d40c, d40d); |
1561 | |
1562 | if (d40d->lli_current < d40d->lli_len) { |
1563 | d40_desc_load(d40c, d40d); |
1564 | /* Start dma job */ |
1565 | (void) d40_start(d40c); |
1566 | return; |
1567 | } |
1568 | |
1569 | if (d40_queue_start(d40c) == NULL) |
1570 | d40c->busy = false; |
1571 | pm_runtime_mark_last_busy(d40c->base->dev); |
1572 | pm_runtime_put_autosuspend(d40c->base->dev); |
1573 | |
1574 | d40_desc_remove(d40d); |
1575 | d40_desc_done(d40c, d40d); |
1576 | } |
1577 | |
1578 | d40c->pending_tx++; |
1579 | tasklet_schedule(&d40c->tasklet); |
1580 | |
1581 | } |
1582 | |
1583 | static void dma_tasklet(unsigned long data) |
1584 | { |
1585 | struct d40_chan *d40c = (struct d40_chan *) data; |
1586 | struct d40_desc *d40d; |
1587 | unsigned long flags; |
1588 | dma_async_tx_callback callback; |
1589 | void *callback_param; |
1590 | |
1591 | spin_lock_irqsave(&d40c->lock, flags); |
1592 | |
1593 | /* Get first entry from the done list */ |
1594 | d40d = d40_first_done(d40c); |
1595 | if (d40d == NULL) { |
1596 | /* Check if we have reached here for cyclic job */ |
1597 | d40d = d40_first_active_get(d40c); |
1598 | if (d40d == NULL || !d40d->cyclic) |
1599 | goto err; |
1600 | } |
1601 | |
1602 | if (!d40d->cyclic) |
1603 | dma_cookie_complete(&d40d->txd); |
1604 | |
1605 | /* |
1606 | * If terminating a channel pending_tx is set to zero. |
1607 | * This prevents any finished active jobs to return to the client. |
1608 | */ |
1609 | if (d40c->pending_tx == 0) { |
1610 | spin_unlock_irqrestore(&d40c->lock, flags); |
1611 | return; |
1612 | } |
1613 | |
1614 | /* Callback to client */ |
1615 | callback = d40d->txd.callback; |
1616 | callback_param = d40d->txd.callback_param; |
1617 | |
1618 | if (!d40d->cyclic) { |
1619 | if (async_tx_test_ack(&d40d->txd)) { |
1620 | d40_desc_remove(d40d); |
1621 | d40_desc_free(d40c, d40d); |
1622 | } else if (!d40d->is_in_client_list) { |
1623 | d40_desc_remove(d40d); |
1624 | d40_lcla_free_all(d40c, d40d); |
1625 | list_add_tail(&d40d->node, &d40c->client); |
1626 | d40d->is_in_client_list = true; |
1627 | } |
1628 | } |
1629 | |
1630 | d40c->pending_tx--; |
1631 | |
1632 | if (d40c->pending_tx) |
1633 | tasklet_schedule(&d40c->tasklet); |
1634 | |
1635 | spin_unlock_irqrestore(&d40c->lock, flags); |
1636 | |
1637 | if (callback && (d40d->txd.flags & DMA_PREP_INTERRUPT)) |
1638 | callback(callback_param); |
1639 | |
1640 | return; |
1641 | |
1642 | err: |
1643 | /* Rescue manouver if receiving double interrupts */ |
1644 | if (d40c->pending_tx > 0) |
1645 | d40c->pending_tx--; |
1646 | spin_unlock_irqrestore(&d40c->lock, flags); |
1647 | } |
1648 | |
1649 | static irqreturn_t d40_handle_interrupt(int irq, void *data) |
1650 | { |
1651 | int i; |
1652 | u32 idx; |
1653 | u32 row; |
1654 | long chan = -1; |
1655 | struct d40_chan *d40c; |
1656 | unsigned long flags; |
1657 | struct d40_base *base = data; |
1658 | u32 regs[base->gen_dmac.il_size]; |
1659 | struct d40_interrupt_lookup *il = base->gen_dmac.il; |
1660 | u32 il_size = base->gen_dmac.il_size; |
1661 | |
1662 | spin_lock_irqsave(&base->interrupt_lock, flags); |
1663 | |
1664 | /* Read interrupt status of both logical and physical channels */ |
1665 | for (i = 0; i < il_size; i++) |
1666 | regs[i] = readl(base->virtbase + il[i].src); |
1667 | |
1668 | for (;;) { |
1669 | |
1670 | chan = find_next_bit((unsigned long *)regs, |
1671 | BITS_PER_LONG * il_size, chan + 1); |
1672 | |
1673 | /* No more set bits found? */ |
1674 | if (chan == BITS_PER_LONG * il_size) |
1675 | break; |
1676 | |
1677 | row = chan / BITS_PER_LONG; |
1678 | idx = chan & (BITS_PER_LONG - 1); |
1679 | |
1680 | if (il[row].offset == D40_PHY_CHAN) |
1681 | d40c = base->lookup_phy_chans[idx]; |
1682 | else |
1683 | d40c = base->lookup_log_chans[il[row].offset + idx]; |
1684 | |
1685 | if (!d40c) { |
1686 | /* |
1687 | * No error because this can happen if something else |
1688 | * in the system is using the channel. |
1689 | */ |
1690 | continue; |
1691 | } |
1692 | |
1693 | /* ACK interrupt */ |
1694 | writel(1 << idx, base->virtbase + il[row].clr); |
1695 | |
1696 | spin_lock(&d40c->lock); |
1697 | |
1698 | if (!il[row].is_error) |
1699 | dma_tc_handle(d40c); |
1700 | else |
1701 | d40_err(base->dev, "IRQ chan: %ld offset %d idx %d\n", |
1702 | chan, il[row].offset, idx); |
1703 | |
1704 | spin_unlock(&d40c->lock); |
1705 | } |
1706 | |
1707 | spin_unlock_irqrestore(&base->interrupt_lock, flags); |
1708 | |
1709 | return IRQ_HANDLED; |
1710 | } |
1711 | |
1712 | static int d40_validate_conf(struct d40_chan *d40c, |
1713 | struct stedma40_chan_cfg *conf) |
1714 | { |
1715 | int res = 0; |
1716 | u32 dst_event_group = D40_TYPE_TO_GROUP(conf->dst_dev_type); |
1717 | u32 src_event_group = D40_TYPE_TO_GROUP(conf->src_dev_type); |
1718 | bool is_log = conf->mode == STEDMA40_MODE_LOGICAL; |
1719 | |
1720 | if (!conf->dir) { |
1721 | chan_err(d40c, "Invalid direction.\n"); |
1722 | res = -EINVAL; |
1723 | } |
1724 | |
1725 | if (conf->dst_dev_type != STEDMA40_DEV_DST_MEMORY && |
1726 | d40c->base->plat_data->dev_tx[conf->dst_dev_type] == 0 && |
1727 | d40c->runtime_addr == 0) { |
1728 | |
1729 | chan_err(d40c, "Invalid TX channel address (%d)\n", |
1730 | conf->dst_dev_type); |
1731 | res = -EINVAL; |
1732 | } |
1733 | |
1734 | if (conf->src_dev_type != STEDMA40_DEV_SRC_MEMORY && |
1735 | d40c->base->plat_data->dev_rx[conf->src_dev_type] == 0 && |
1736 | d40c->runtime_addr == 0) { |
1737 | chan_err(d40c, "Invalid RX channel address (%d)\n", |
1738 | conf->src_dev_type); |
1739 | res = -EINVAL; |
1740 | } |
1741 | |
1742 | if (conf->dir == STEDMA40_MEM_TO_PERIPH && |
1743 | dst_event_group == STEDMA40_DEV_DST_MEMORY) { |
1744 | chan_err(d40c, "Invalid dst\n"); |
1745 | res = -EINVAL; |
1746 | } |
1747 | |
1748 | if (conf->dir == STEDMA40_PERIPH_TO_MEM && |
1749 | src_event_group == STEDMA40_DEV_SRC_MEMORY) { |
1750 | chan_err(d40c, "Invalid src\n"); |
1751 | res = -EINVAL; |
1752 | } |
1753 | |
1754 | if (src_event_group == STEDMA40_DEV_SRC_MEMORY && |
1755 | dst_event_group == STEDMA40_DEV_DST_MEMORY && is_log) { |
1756 | chan_err(d40c, "No event line\n"); |
1757 | res = -EINVAL; |
1758 | } |
1759 | |
1760 | if (conf->dir == STEDMA40_PERIPH_TO_PERIPH && |
1761 | (src_event_group != dst_event_group)) { |
1762 | chan_err(d40c, "Invalid event group\n"); |
1763 | res = -EINVAL; |
1764 | } |
1765 | |
1766 | if (conf->dir == STEDMA40_PERIPH_TO_PERIPH) { |
1767 | /* |
1768 | * DMAC HW supports it. Will be added to this driver, |
1769 | * in case any dma client requires it. |
1770 | */ |
1771 | chan_err(d40c, "periph to periph not supported\n"); |
1772 | res = -EINVAL; |
1773 | } |
1774 | |
1775 | if (d40_psize_2_burst_size(is_log, conf->src_info.psize) * |
1776 | (1 << conf->src_info.data_width) != |
1777 | d40_psize_2_burst_size(is_log, conf->dst_info.psize) * |
1778 | (1 << conf->dst_info.data_width)) { |
1779 | /* |
1780 | * The DMAC hardware only supports |
1781 | * src (burst x width) == dst (burst x width) |
1782 | */ |
1783 | |
1784 | chan_err(d40c, "src (burst x width) != dst (burst x width)\n"); |
1785 | res = -EINVAL; |
1786 | } |
1787 | |
1788 | return res; |
1789 | } |
1790 | |
1791 | static bool d40_alloc_mask_set(struct d40_phy_res *phy, |
1792 | bool is_src, int log_event_line, bool is_log, |
1793 | bool *first_user) |
1794 | { |
1795 | unsigned long flags; |
1796 | spin_lock_irqsave(&phy->lock, flags); |
1797 | |
1798 | *first_user = ((phy->allocated_src | phy->allocated_dst) |
1799 | == D40_ALLOC_FREE); |
1800 | |
1801 | if (!is_log) { |
1802 | /* Physical interrupts are masked per physical full channel */ |
1803 | if (phy->allocated_src == D40_ALLOC_FREE && |
1804 | phy->allocated_dst == D40_ALLOC_FREE) { |
1805 | phy->allocated_dst = D40_ALLOC_PHY; |
1806 | phy->allocated_src = D40_ALLOC_PHY; |
1807 | goto found; |
1808 | } else |
1809 | goto not_found; |
1810 | } |
1811 | |
1812 | /* Logical channel */ |
1813 | if (is_src) { |
1814 | if (phy->allocated_src == D40_ALLOC_PHY) |
1815 | goto not_found; |
1816 | |
1817 | if (phy->allocated_src == D40_ALLOC_FREE) |
1818 | phy->allocated_src = D40_ALLOC_LOG_FREE; |
1819 | |
1820 | if (!(phy->allocated_src & (1 << log_event_line))) { |
1821 | phy->allocated_src |= 1 << log_event_line; |
1822 | goto found; |
1823 | } else |
1824 | goto not_found; |
1825 | } else { |
1826 | if (phy->allocated_dst == D40_ALLOC_PHY) |
1827 | goto not_found; |
1828 | |
1829 | if (phy->allocated_dst == D40_ALLOC_FREE) |
1830 | phy->allocated_dst = D40_ALLOC_LOG_FREE; |
1831 | |
1832 | if (!(phy->allocated_dst & (1 << log_event_line))) { |
1833 | phy->allocated_dst |= 1 << log_event_line; |
1834 | goto found; |
1835 | } else |
1836 | goto not_found; |
1837 | } |
1838 | |
1839 | not_found: |
1840 | spin_unlock_irqrestore(&phy->lock, flags); |
1841 | return false; |
1842 | found: |
1843 | spin_unlock_irqrestore(&phy->lock, flags); |
1844 | return true; |
1845 | } |
1846 | |
1847 | static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src, |
1848 | int log_event_line) |
1849 | { |
1850 | unsigned long flags; |
1851 | bool is_free = false; |
1852 | |
1853 | spin_lock_irqsave(&phy->lock, flags); |
1854 | if (!log_event_line) { |
1855 | phy->allocated_dst = D40_ALLOC_FREE; |
1856 | phy->allocated_src = D40_ALLOC_FREE; |
1857 | is_free = true; |
1858 | goto out; |
1859 | } |
1860 | |
1861 | /* Logical channel */ |
1862 | if (is_src) { |
1863 | phy->allocated_src &= ~(1 << log_event_line); |
1864 | if (phy->allocated_src == D40_ALLOC_LOG_FREE) |
1865 | phy->allocated_src = D40_ALLOC_FREE; |
1866 | } else { |
1867 | phy->allocated_dst &= ~(1 << log_event_line); |
1868 | if (phy->allocated_dst == D40_ALLOC_LOG_FREE) |
1869 | phy->allocated_dst = D40_ALLOC_FREE; |
1870 | } |
1871 | |
1872 | is_free = ((phy->allocated_src | phy->allocated_dst) == |
1873 | D40_ALLOC_FREE); |
1874 | |
1875 | out: |
1876 | spin_unlock_irqrestore(&phy->lock, flags); |
1877 | |
1878 | return is_free; |
1879 | } |
1880 | |
1881 | static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user) |
1882 | { |
1883 | int dev_type; |
1884 | int event_group; |
1885 | int event_line; |
1886 | struct d40_phy_res *phys; |
1887 | int i; |
1888 | int j; |
1889 | int log_num; |
1890 | int num_phy_chans; |
1891 | bool is_src; |
1892 | bool is_log = d40c->dma_cfg.mode == STEDMA40_MODE_LOGICAL; |
1893 | |
1894 | phys = d40c->base->phy_res; |
1895 | num_phy_chans = d40c->base->num_phy_chans; |
1896 | |
1897 | if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) { |
1898 | dev_type = d40c->dma_cfg.src_dev_type; |
1899 | log_num = 2 * dev_type; |
1900 | is_src = true; |
1901 | } else if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH || |
1902 | d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) { |
1903 | /* dst event lines are used for logical memcpy */ |
1904 | dev_type = d40c->dma_cfg.dst_dev_type; |
1905 | log_num = 2 * dev_type + 1; |
1906 | is_src = false; |
1907 | } else |
1908 | return -EINVAL; |
1909 | |
1910 | event_group = D40_TYPE_TO_GROUP(dev_type); |
1911 | event_line = D40_TYPE_TO_EVENT(dev_type); |
1912 | |
1913 | if (!is_log) { |
1914 | if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) { |
1915 | /* Find physical half channel */ |
1916 | if (d40c->dma_cfg.use_fixed_channel) { |
1917 | i = d40c->dma_cfg.phy_channel; |
1918 | if (d40_alloc_mask_set(&phys[i], is_src, |
1919 | 0, is_log, |
1920 | first_phy_user)) |
1921 | goto found_phy; |
1922 | } else { |
1923 | for (i = 0; i < num_phy_chans; i++) { |
1924 | if (d40_alloc_mask_set(&phys[i], is_src, |
1925 | 0, is_log, |
1926 | first_phy_user)) |
1927 | goto found_phy; |
1928 | } |
1929 | } |
1930 | } else |
1931 | for (j = 0; j < d40c->base->num_phy_chans; j += 8) { |
1932 | int phy_num = j + event_group * 2; |
1933 | for (i = phy_num; i < phy_num + 2; i++) { |
1934 | if (d40_alloc_mask_set(&phys[i], |
1935 | is_src, |
1936 | 0, |
1937 | is_log, |
1938 | first_phy_user)) |
1939 | goto found_phy; |
1940 | } |
1941 | } |
1942 | return -EINVAL; |
1943 | found_phy: |
1944 | d40c->phy_chan = &phys[i]; |
1945 | d40c->log_num = D40_PHY_CHAN; |
1946 | goto out; |
1947 | } |
1948 | if (dev_type == -1) |
1949 | return -EINVAL; |
1950 | |
1951 | /* Find logical channel */ |
1952 | for (j = 0; j < d40c->base->num_phy_chans; j += 8) { |
1953 | int phy_num = j + event_group * 2; |
1954 | |
1955 | if (d40c->dma_cfg.use_fixed_channel) { |
1956 | i = d40c->dma_cfg.phy_channel; |
1957 | |
1958 | if ((i != phy_num) && (i != phy_num + 1)) { |
1959 | dev_err(chan2dev(d40c), |
1960 | "invalid fixed phy channel %d\n", i); |
1961 | return -EINVAL; |
1962 | } |
1963 | |
1964 | if (d40_alloc_mask_set(&phys[i], is_src, event_line, |
1965 | is_log, first_phy_user)) |
1966 | goto found_log; |
1967 | |
1968 | dev_err(chan2dev(d40c), |
1969 | "could not allocate fixed phy channel %d\n", i); |
1970 | return -EINVAL; |
1971 | } |
1972 | |
1973 | /* |
1974 | * Spread logical channels across all available physical rather |
1975 | * than pack every logical channel at the first available phy |
1976 | * channels. |
1977 | */ |
1978 | if (is_src) { |
1979 | for (i = phy_num; i < phy_num + 2; i++) { |
1980 | if (d40_alloc_mask_set(&phys[i], is_src, |
1981 | event_line, is_log, |
1982 | first_phy_user)) |
1983 | goto found_log; |
1984 | } |
1985 | } else { |
1986 | for (i = phy_num + 1; i >= phy_num; i--) { |
1987 | if (d40_alloc_mask_set(&phys[i], is_src, |
1988 | event_line, is_log, |
1989 | first_phy_user)) |
1990 | goto found_log; |
1991 | } |
1992 | } |
1993 | } |
1994 | return -EINVAL; |
1995 | |
1996 | found_log: |
1997 | d40c->phy_chan = &phys[i]; |
1998 | d40c->log_num = log_num; |
1999 | out: |
2000 | |
2001 | if (is_log) |
2002 | d40c->base->lookup_log_chans[d40c->log_num] = d40c; |
2003 | else |
2004 | d40c->base->lookup_phy_chans[d40c->phy_chan->num] = d40c; |
2005 | |
2006 | return 0; |
2007 | |
2008 | } |
2009 | |
2010 | static int d40_config_memcpy(struct d40_chan *d40c) |
2011 | { |
2012 | dma_cap_mask_t cap = d40c->chan.device->cap_mask; |
2013 | |
2014 | if (dma_has_cap(DMA_MEMCPY, cap) && !dma_has_cap(DMA_SLAVE, cap)) { |
2015 | d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_log; |
2016 | d40c->dma_cfg.src_dev_type = STEDMA40_DEV_SRC_MEMORY; |
2017 | d40c->dma_cfg.dst_dev_type = d40c->base->plat_data-> |
2018 | memcpy[d40c->chan.chan_id]; |
2019 | |
2020 | } else if (dma_has_cap(DMA_MEMCPY, cap) && |
2021 | dma_has_cap(DMA_SLAVE, cap)) { |
2022 | d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_phy; |
2023 | } else { |
2024 | chan_err(d40c, "No memcpy\n"); |
2025 | return -EINVAL; |
2026 | } |
2027 | |
2028 | return 0; |
2029 | } |
2030 | |
2031 | static int d40_free_dma(struct d40_chan *d40c) |
2032 | { |
2033 | |
2034 | int res = 0; |
2035 | u32 event; |
2036 | struct d40_phy_res *phy = d40c->phy_chan; |
2037 | bool is_src; |
2038 | |
2039 | /* Terminate all queued and active transfers */ |
2040 | d40_term_all(d40c); |
2041 | |
2042 | if (phy == NULL) { |
2043 | chan_err(d40c, "phy == null\n"); |
2044 | return -EINVAL; |
2045 | } |
2046 | |
2047 | if (phy->allocated_src == D40_ALLOC_FREE && |
2048 | phy->allocated_dst == D40_ALLOC_FREE) { |
2049 | chan_err(d40c, "channel already free\n"); |
2050 | return -EINVAL; |
2051 | } |
2052 | |
2053 | if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH || |
2054 | d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) { |
2055 | event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type); |
2056 | is_src = false; |
2057 | } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) { |
2058 | event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); |
2059 | is_src = true; |
2060 | } else { |
2061 | chan_err(d40c, "Unknown direction\n"); |
2062 | return -EINVAL; |
2063 | } |
2064 | |
2065 | pm_runtime_get_sync(d40c->base->dev); |
2066 | res = d40_channel_execute_command(d40c, D40_DMA_STOP); |
2067 | if (res) { |
2068 | chan_err(d40c, "stop failed\n"); |
2069 | goto out; |
2070 | } |
2071 | |
2072 | d40_alloc_mask_free(phy, is_src, chan_is_logical(d40c) ? event : 0); |
2073 | |
2074 | if (chan_is_logical(d40c)) |
2075 | d40c->base->lookup_log_chans[d40c->log_num] = NULL; |
2076 | else |
2077 | d40c->base->lookup_phy_chans[phy->num] = NULL; |
2078 | |
2079 | if (d40c->busy) { |
2080 | pm_runtime_mark_last_busy(d40c->base->dev); |
2081 | pm_runtime_put_autosuspend(d40c->base->dev); |
2082 | } |
2083 | |
2084 | d40c->busy = false; |
2085 | d40c->phy_chan = NULL; |
2086 | d40c->configured = false; |
2087 | out: |
2088 | |
2089 | pm_runtime_mark_last_busy(d40c->base->dev); |
2090 | pm_runtime_put_autosuspend(d40c->base->dev); |
2091 | return res; |
2092 | } |
2093 | |
2094 | static bool d40_is_paused(struct d40_chan *d40c) |
2095 | { |
2096 | void __iomem *chanbase = chan_base(d40c); |
2097 | bool is_paused = false; |
2098 | unsigned long flags; |
2099 | void __iomem *active_reg; |
2100 | u32 status; |
2101 | u32 event; |
2102 | |
2103 | spin_lock_irqsave(&d40c->lock, flags); |
2104 | |
2105 | if (chan_is_physical(d40c)) { |
2106 | if (d40c->phy_chan->num % 2 == 0) |
2107 | active_reg = d40c->base->virtbase + D40_DREG_ACTIVE; |
2108 | else |
2109 | active_reg = d40c->base->virtbase + D40_DREG_ACTIVO; |
2110 | |
2111 | status = (readl(active_reg) & |
2112 | D40_CHAN_POS_MASK(d40c->phy_chan->num)) >> |
2113 | D40_CHAN_POS(d40c->phy_chan->num); |
2114 | if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP) |
2115 | is_paused = true; |
2116 | |
2117 | goto _exit; |
2118 | } |
2119 | |
2120 | if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH || |
2121 | d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) { |
2122 | event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type); |
2123 | status = readl(chanbase + D40_CHAN_REG_SDLNK); |
2124 | } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) { |
2125 | event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); |
2126 | status = readl(chanbase + D40_CHAN_REG_SSLNK); |
2127 | } else { |
2128 | chan_err(d40c, "Unknown direction\n"); |
2129 | goto _exit; |
2130 | } |
2131 | |
2132 | status = (status & D40_EVENTLINE_MASK(event)) >> |
2133 | D40_EVENTLINE_POS(event); |
2134 | |
2135 | if (status != D40_DMA_RUN) |
2136 | is_paused = true; |
2137 | _exit: |
2138 | spin_unlock_irqrestore(&d40c->lock, flags); |
2139 | return is_paused; |
2140 | |
2141 | } |
2142 | |
2143 | static u32 stedma40_residue(struct dma_chan *chan) |
2144 | { |
2145 | struct d40_chan *d40c = |
2146 | container_of(chan, struct d40_chan, chan); |
2147 | u32 bytes_left; |
2148 | unsigned long flags; |
2149 | |
2150 | spin_lock_irqsave(&d40c->lock, flags); |
2151 | bytes_left = d40_residue(d40c); |
2152 | spin_unlock_irqrestore(&d40c->lock, flags); |
2153 | |
2154 | return bytes_left; |
2155 | } |
2156 | |
2157 | static int |
2158 | d40_prep_sg_log(struct d40_chan *chan, struct d40_desc *desc, |
2159 | struct scatterlist *sg_src, struct scatterlist *sg_dst, |
2160 | unsigned int sg_len, dma_addr_t src_dev_addr, |
2161 | dma_addr_t dst_dev_addr) |
2162 | { |
2163 | struct stedma40_chan_cfg *cfg = &chan->dma_cfg; |
2164 | struct stedma40_half_channel_info *src_info = &cfg->src_info; |
2165 | struct stedma40_half_channel_info *dst_info = &cfg->dst_info; |
2166 | int ret; |
2167 | |
2168 | ret = d40_log_sg_to_lli(sg_src, sg_len, |
2169 | src_dev_addr, |
2170 | desc->lli_log.src, |
2171 | chan->log_def.lcsp1, |
2172 | src_info->data_width, |
2173 | dst_info->data_width); |
2174 | |
2175 | ret = d40_log_sg_to_lli(sg_dst, sg_len, |
2176 | dst_dev_addr, |
2177 | desc->lli_log.dst, |
2178 | chan->log_def.lcsp3, |
2179 | dst_info->data_width, |
2180 | src_info->data_width); |
2181 | |
2182 | return ret < 0 ? ret : 0; |
2183 | } |
2184 | |
2185 | static int |
2186 | d40_prep_sg_phy(struct d40_chan *chan, struct d40_desc *desc, |
2187 | struct scatterlist *sg_src, struct scatterlist *sg_dst, |
2188 | unsigned int sg_len, dma_addr_t src_dev_addr, |
2189 | dma_addr_t dst_dev_addr) |
2190 | { |
2191 | struct stedma40_chan_cfg *cfg = &chan->dma_cfg; |
2192 | struct stedma40_half_channel_info *src_info = &cfg->src_info; |
2193 | struct stedma40_half_channel_info *dst_info = &cfg->dst_info; |
2194 | unsigned long flags = 0; |
2195 | int ret; |
2196 | |
2197 | if (desc->cyclic) |
2198 | flags |= LLI_CYCLIC | LLI_TERM_INT; |
2199 | |
2200 | ret = d40_phy_sg_to_lli(sg_src, sg_len, src_dev_addr, |
2201 | desc->lli_phy.src, |
2202 | virt_to_phys(desc->lli_phy.src), |
2203 | chan->src_def_cfg, |
2204 | src_info, dst_info, flags); |
2205 | |
2206 | ret = d40_phy_sg_to_lli(sg_dst, sg_len, dst_dev_addr, |
2207 | desc->lli_phy.dst, |
2208 | virt_to_phys(desc->lli_phy.dst), |
2209 | chan->dst_def_cfg, |
2210 | dst_info, src_info, flags); |
2211 | |
2212 | dma_sync_single_for_device(chan->base->dev, desc->lli_pool.dma_addr, |
2213 | desc->lli_pool.size, DMA_TO_DEVICE); |
2214 | |
2215 | return ret < 0 ? ret : 0; |
2216 | } |
2217 | |
2218 | static struct d40_desc * |
2219 | d40_prep_desc(struct d40_chan *chan, struct scatterlist *sg, |
2220 | unsigned int sg_len, unsigned long dma_flags) |
2221 | { |
2222 | struct stedma40_chan_cfg *cfg = &chan->dma_cfg; |
2223 | struct d40_desc *desc; |
2224 | int ret; |
2225 | |
2226 | desc = d40_desc_get(chan); |
2227 | if (!desc) |
2228 | return NULL; |
2229 | |
2230 | desc->lli_len = d40_sg_2_dmalen(sg, sg_len, cfg->src_info.data_width, |
2231 | cfg->dst_info.data_width); |
2232 | if (desc->lli_len < 0) { |
2233 | chan_err(chan, "Unaligned size\n"); |
2234 | goto err; |
2235 | } |
2236 | |
2237 | ret = d40_pool_lli_alloc(chan, desc, desc->lli_len); |
2238 | if (ret < 0) { |
2239 | chan_err(chan, "Could not allocate lli\n"); |
2240 | goto err; |
2241 | } |
2242 | |
2243 | desc->lli_current = 0; |
2244 | desc->txd.flags = dma_flags; |
2245 | desc->txd.tx_submit = d40_tx_submit; |
2246 | |
2247 | dma_async_tx_descriptor_init(&desc->txd, &chan->chan); |
2248 | |
2249 | return desc; |
2250 | |
2251 | err: |
2252 | d40_desc_free(chan, desc); |
2253 | return NULL; |
2254 | } |
2255 | |
2256 | static dma_addr_t |
2257 | d40_get_dev_addr(struct d40_chan *chan, enum dma_transfer_direction direction) |
2258 | { |
2259 | struct stedma40_platform_data *plat = chan->base->plat_data; |
2260 | struct stedma40_chan_cfg *cfg = &chan->dma_cfg; |
2261 | dma_addr_t addr = 0; |
2262 | |
2263 | if (chan->runtime_addr) |
2264 | return chan->runtime_addr; |
2265 | |
2266 | if (direction == DMA_DEV_TO_MEM) |
2267 | addr = plat->dev_rx[cfg->src_dev_type]; |
2268 | else if (direction == DMA_MEM_TO_DEV) |
2269 | addr = plat->dev_tx[cfg->dst_dev_type]; |
2270 | |
2271 | return addr; |
2272 | } |
2273 | |
2274 | static struct dma_async_tx_descriptor * |
2275 | d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src, |
2276 | struct scatterlist *sg_dst, unsigned int sg_len, |
2277 | enum dma_transfer_direction direction, unsigned long dma_flags) |
2278 | { |
2279 | struct d40_chan *chan = container_of(dchan, struct d40_chan, chan); |
2280 | dma_addr_t src_dev_addr = 0; |
2281 | dma_addr_t dst_dev_addr = 0; |
2282 | struct d40_desc *desc; |
2283 | unsigned long flags; |
2284 | int ret; |
2285 | |
2286 | if (!chan->phy_chan) { |
2287 | chan_err(chan, "Cannot prepare unallocated channel\n"); |
2288 | return NULL; |
2289 | } |
2290 | |
2291 | spin_lock_irqsave(&chan->lock, flags); |
2292 | |
2293 | desc = d40_prep_desc(chan, sg_src, sg_len, dma_flags); |
2294 | if (desc == NULL) |
2295 | goto err; |
2296 | |
2297 | if (sg_next(&sg_src[sg_len - 1]) == sg_src) |
2298 | desc->cyclic = true; |
2299 | |
2300 | if (direction != DMA_TRANS_NONE) { |
2301 | dma_addr_t dev_addr = d40_get_dev_addr(chan, direction); |
2302 | |
2303 | if (direction == DMA_DEV_TO_MEM) |
2304 | src_dev_addr = dev_addr; |
2305 | else if (direction == DMA_MEM_TO_DEV) |
2306 | dst_dev_addr = dev_addr; |
2307 | } |
2308 | |
2309 | if (chan_is_logical(chan)) |
2310 | ret = d40_prep_sg_log(chan, desc, sg_src, sg_dst, |
2311 | sg_len, src_dev_addr, dst_dev_addr); |
2312 | else |
2313 | ret = d40_prep_sg_phy(chan, desc, sg_src, sg_dst, |
2314 | sg_len, src_dev_addr, dst_dev_addr); |
2315 | |
2316 | if (ret) { |
2317 | chan_err(chan, "Failed to prepare %s sg job: %d\n", |
2318 | chan_is_logical(chan) ? "log" : "phy", ret); |
2319 | goto err; |
2320 | } |
2321 | |
2322 | /* |
2323 | * add descriptor to the prepare queue in order to be able |
2324 | * to free them later in terminate_all |
2325 | */ |
2326 | list_add_tail(&desc->node, &chan->prepare_queue); |
2327 | |
2328 | spin_unlock_irqrestore(&chan->lock, flags); |
2329 | |
2330 | return &desc->txd; |
2331 | |
2332 | err: |
2333 | if (desc) |
2334 | d40_desc_free(chan, desc); |
2335 | spin_unlock_irqrestore(&chan->lock, flags); |
2336 | return NULL; |
2337 | } |
2338 | |
2339 | bool stedma40_filter(struct dma_chan *chan, void *data) |
2340 | { |
2341 | struct stedma40_chan_cfg *info = data; |
2342 | struct d40_chan *d40c = |
2343 | container_of(chan, struct d40_chan, chan); |
2344 | int err; |
2345 | |
2346 | if (data) { |
2347 | err = d40_validate_conf(d40c, info); |
2348 | if (!err) |
2349 | d40c->dma_cfg = *info; |
2350 | } else |
2351 | err = d40_config_memcpy(d40c); |
2352 | |
2353 | if (!err) |
2354 | d40c->configured = true; |
2355 | |
2356 | return err == 0; |
2357 | } |
2358 | EXPORT_SYMBOL(stedma40_filter); |
2359 | |
2360 | static void __d40_set_prio_rt(struct d40_chan *d40c, int dev_type, bool src) |
2361 | { |
2362 | bool realtime = d40c->dma_cfg.realtime; |
2363 | bool highprio = d40c->dma_cfg.high_priority; |
2364 | u32 rtreg; |
2365 | u32 event = D40_TYPE_TO_EVENT(dev_type); |
2366 | u32 group = D40_TYPE_TO_GROUP(dev_type); |
2367 | u32 bit = 1 << event; |
2368 | u32 prioreg; |
2369 | struct d40_gen_dmac *dmac = &d40c->base->gen_dmac; |
2370 | |
2371 | rtreg = realtime ? dmac->realtime_en : dmac->realtime_clear; |
2372 | /* |
2373 | * Due to a hardware bug, in some cases a logical channel triggered by |
2374 | * a high priority destination event line can generate extra packet |
2375 | * transactions. |
2376 | * |
2377 | * The workaround is to not set the high priority level for the |
2378 | * destination event lines that trigger logical channels. |
2379 | */ |
2380 | if (!src && chan_is_logical(d40c)) |
2381 | highprio = false; |
2382 | |
2383 | prioreg = highprio ? dmac->high_prio_en : dmac->high_prio_clear; |
2384 | |
2385 | /* Destination event lines are stored in the upper halfword */ |
2386 | if (!src) |
2387 | bit <<= 16; |
2388 | |
2389 | writel(bit, d40c->base->virtbase + prioreg + group * 4); |
2390 | writel(bit, d40c->base->virtbase + rtreg + group * 4); |
2391 | } |
2392 | |
2393 | static void d40_set_prio_realtime(struct d40_chan *d40c) |
2394 | { |
2395 | if (d40c->base->rev < 3) |
2396 | return; |
2397 | |
2398 | if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) || |
2399 | (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) |
2400 | __d40_set_prio_rt(d40c, d40c->dma_cfg.src_dev_type, true); |
2401 | |
2402 | if ((d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH) || |
2403 | (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) |
2404 | __d40_set_prio_rt(d40c, d40c->dma_cfg.dst_dev_type, false); |
2405 | } |
2406 | |
2407 | /* DMA ENGINE functions */ |
2408 | static int d40_alloc_chan_resources(struct dma_chan *chan) |
2409 | { |
2410 | int err; |
2411 | unsigned long flags; |
2412 | struct d40_chan *d40c = |
2413 | container_of(chan, struct d40_chan, chan); |
2414 | bool is_free_phy; |
2415 | spin_lock_irqsave(&d40c->lock, flags); |
2416 | |
2417 | dma_cookie_init(chan); |
2418 | |
2419 | /* If no dma configuration is set use default configuration (memcpy) */ |
2420 | if (!d40c->configured) { |
2421 | err = d40_config_memcpy(d40c); |
2422 | if (err) { |
2423 | chan_err(d40c, "Failed to configure memcpy channel\n"); |
2424 | goto fail; |
2425 | } |
2426 | } |
2427 | |
2428 | err = d40_allocate_channel(d40c, &is_free_phy); |
2429 | if (err) { |
2430 | chan_err(d40c, "Failed to allocate channel\n"); |
2431 | d40c->configured = false; |
2432 | goto fail; |
2433 | } |
2434 | |
2435 | pm_runtime_get_sync(d40c->base->dev); |
2436 | /* Fill in basic CFG register values */ |
2437 | d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg, |
2438 | &d40c->dst_def_cfg, chan_is_logical(d40c)); |
2439 | |
2440 | d40_set_prio_realtime(d40c); |
2441 | |
2442 | if (chan_is_logical(d40c)) { |
2443 | d40_log_cfg(&d40c->dma_cfg, |
2444 | &d40c->log_def.lcsp1, &d40c->log_def.lcsp3); |
2445 | |
2446 | if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) |
2447 | d40c->lcpa = d40c->base->lcpa_base + |
2448 | d40c->dma_cfg.src_dev_type * D40_LCPA_CHAN_SIZE; |
2449 | else |
2450 | d40c->lcpa = d40c->base->lcpa_base + |
2451 | d40c->dma_cfg.dst_dev_type * |
2452 | D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA; |
2453 | } |
2454 | |
2455 | dev_dbg(chan2dev(d40c), "allocated %s channel (phy %d%s)\n", |
2456 | chan_is_logical(d40c) ? "logical" : "physical", |
2457 | d40c->phy_chan->num, |
2458 | d40c->dma_cfg.use_fixed_channel ? ", fixed" : ""); |
2459 | |
2460 | |
2461 | /* |
2462 | * Only write channel configuration to the DMA if the physical |
2463 | * resource is free. In case of multiple logical channels |
2464 | * on the same physical resource, only the first write is necessary. |
2465 | */ |
2466 | if (is_free_phy) |
2467 | d40_config_write(d40c); |
2468 | fail: |
2469 | pm_runtime_mark_last_busy(d40c->base->dev); |
2470 | pm_runtime_put_autosuspend(d40c->base->dev); |
2471 | spin_unlock_irqrestore(&d40c->lock, flags); |
2472 | return err; |
2473 | } |
2474 | |
2475 | static void d40_free_chan_resources(struct dma_chan *chan) |
2476 | { |
2477 | struct d40_chan *d40c = |
2478 | container_of(chan, struct d40_chan, chan); |
2479 | int err; |
2480 | unsigned long flags; |
2481 | |
2482 | if (d40c->phy_chan == NULL) { |
2483 | chan_err(d40c, "Cannot free unallocated channel\n"); |
2484 | return; |
2485 | } |
2486 | |
2487 | spin_lock_irqsave(&d40c->lock, flags); |
2488 | |
2489 | err = d40_free_dma(d40c); |
2490 | |
2491 | if (err) |
2492 | chan_err(d40c, "Failed to free channel\n"); |
2493 | spin_unlock_irqrestore(&d40c->lock, flags); |
2494 | } |
2495 | |
2496 | static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan, |
2497 | dma_addr_t dst, |
2498 | dma_addr_t src, |
2499 | size_t size, |
2500 | unsigned long dma_flags) |
2501 | { |
2502 | struct scatterlist dst_sg; |
2503 | struct scatterlist src_sg; |
2504 | |
2505 | sg_init_table(&dst_sg, 1); |
2506 | sg_init_table(&src_sg, 1); |
2507 | |
2508 | sg_dma_address(&dst_sg) = dst; |
2509 | sg_dma_address(&src_sg) = src; |
2510 | |
2511 | sg_dma_len(&dst_sg) = size; |
2512 | sg_dma_len(&src_sg) = size; |
2513 | |
2514 | return d40_prep_sg(chan, &src_sg, &dst_sg, 1, DMA_NONE, dma_flags); |
2515 | } |
2516 | |
2517 | static struct dma_async_tx_descriptor * |
2518 | d40_prep_memcpy_sg(struct dma_chan *chan, |
2519 | struct scatterlist *dst_sg, unsigned int dst_nents, |
2520 | struct scatterlist *src_sg, unsigned int src_nents, |
2521 | unsigned long dma_flags) |
2522 | { |
2523 | if (dst_nents != src_nents) |
2524 | return NULL; |
2525 | |
2526 | return d40_prep_sg(chan, src_sg, dst_sg, src_nents, DMA_NONE, dma_flags); |
2527 | } |
2528 | |
2529 | static struct dma_async_tx_descriptor * |
2530 | d40_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, |
2531 | unsigned int sg_len, enum dma_transfer_direction direction, |
2532 | unsigned long dma_flags, void *context) |
2533 | { |
2534 | if (!is_slave_direction(direction)) |
2535 | return NULL; |
2536 | |
2537 | return d40_prep_sg(chan, sgl, sgl, sg_len, direction, dma_flags); |
2538 | } |
2539 | |
2540 | static struct dma_async_tx_descriptor * |
2541 | dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr, |
2542 | size_t buf_len, size_t period_len, |
2543 | enum dma_transfer_direction direction, unsigned long flags, |
2544 | void *context) |
2545 | { |
2546 | unsigned int periods = buf_len / period_len; |
2547 | struct dma_async_tx_descriptor *txd; |
2548 | struct scatterlist *sg; |
2549 | int i; |
2550 | |
2551 | sg = kcalloc(periods + 1, sizeof(struct scatterlist), GFP_NOWAIT); |
2552 | for (i = 0; i < periods; i++) { |
2553 | sg_dma_address(&sg[i]) = dma_addr; |
2554 | sg_dma_len(&sg[i]) = period_len; |
2555 | dma_addr += period_len; |
2556 | } |
2557 | |
2558 | sg[periods].offset = 0; |
2559 | sg_dma_len(&sg[periods]) = 0; |
2560 | sg[periods].page_link = |
2561 | ((unsigned long)sg | 0x01) & ~0x02; |
2562 | |
2563 | txd = d40_prep_sg(chan, sg, sg, periods, direction, |
2564 | DMA_PREP_INTERRUPT); |
2565 | |
2566 | kfree(sg); |
2567 | |
2568 | return txd; |
2569 | } |
2570 | |
2571 | static enum dma_status d40_tx_status(struct dma_chan *chan, |
2572 | dma_cookie_t cookie, |
2573 | struct dma_tx_state *txstate) |
2574 | { |
2575 | struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); |
2576 | enum dma_status ret; |
2577 | |
2578 | if (d40c->phy_chan == NULL) { |
2579 | chan_err(d40c, "Cannot read status of unallocated channel\n"); |
2580 | return -EINVAL; |
2581 | } |
2582 | |
2583 | ret = dma_cookie_status(chan, cookie, txstate); |
2584 | if (ret != DMA_SUCCESS) |
2585 | dma_set_residue(txstate, stedma40_residue(chan)); |
2586 | |
2587 | if (d40_is_paused(d40c)) |
2588 | ret = DMA_PAUSED; |
2589 | |
2590 | return ret; |
2591 | } |
2592 | |
2593 | static void d40_issue_pending(struct dma_chan *chan) |
2594 | { |
2595 | struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); |
2596 | unsigned long flags; |
2597 | |
2598 | if (d40c->phy_chan == NULL) { |
2599 | chan_err(d40c, "Channel is not allocated!\n"); |
2600 | return; |
2601 | } |
2602 | |
2603 | spin_lock_irqsave(&d40c->lock, flags); |
2604 | |
2605 | list_splice_tail_init(&d40c->pending_queue, &d40c->queue); |
2606 | |
2607 | /* Busy means that queued jobs are already being processed */ |
2608 | if (!d40c->busy) |
2609 | (void) d40_queue_start(d40c); |
2610 | |
2611 | spin_unlock_irqrestore(&d40c->lock, flags); |
2612 | } |
2613 | |
2614 | static void d40_terminate_all(struct dma_chan *chan) |
2615 | { |
2616 | unsigned long flags; |
2617 | struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); |
2618 | int ret; |
2619 | |
2620 | spin_lock_irqsave(&d40c->lock, flags); |
2621 | |
2622 | pm_runtime_get_sync(d40c->base->dev); |
2623 | ret = d40_channel_execute_command(d40c, D40_DMA_STOP); |
2624 | if (ret) |
2625 | chan_err(d40c, "Failed to stop channel\n"); |
2626 | |
2627 | d40_term_all(d40c); |
2628 | pm_runtime_mark_last_busy(d40c->base->dev); |
2629 | pm_runtime_put_autosuspend(d40c->base->dev); |
2630 | if (d40c->busy) { |
2631 | pm_runtime_mark_last_busy(d40c->base->dev); |
2632 | pm_runtime_put_autosuspend(d40c->base->dev); |
2633 | } |
2634 | d40c->busy = false; |
2635 | |
2636 | spin_unlock_irqrestore(&d40c->lock, flags); |
2637 | } |
2638 | |
2639 | static int |
2640 | dma40_config_to_halfchannel(struct d40_chan *d40c, |
2641 | struct stedma40_half_channel_info *info, |
2642 | enum dma_slave_buswidth width, |
2643 | u32 maxburst) |
2644 | { |
2645 | enum stedma40_periph_data_width addr_width; |
2646 | int psize; |
2647 | |
2648 | switch (width) { |
2649 | case DMA_SLAVE_BUSWIDTH_1_BYTE: |
2650 | addr_width = STEDMA40_BYTE_WIDTH; |
2651 | break; |
2652 | case DMA_SLAVE_BUSWIDTH_2_BYTES: |
2653 | addr_width = STEDMA40_HALFWORD_WIDTH; |
2654 | break; |
2655 | case DMA_SLAVE_BUSWIDTH_4_BYTES: |
2656 | addr_width = STEDMA40_WORD_WIDTH; |
2657 | break; |
2658 | case DMA_SLAVE_BUSWIDTH_8_BYTES: |
2659 | addr_width = STEDMA40_DOUBLEWORD_WIDTH; |
2660 | break; |
2661 | default: |
2662 | dev_err(d40c->base->dev, |
2663 | "illegal peripheral address width " |
2664 | "requested (%d)\n", |
2665 | width); |
2666 | return -EINVAL; |
2667 | } |
2668 | |
2669 | if (chan_is_logical(d40c)) { |
2670 | if (maxburst >= 16) |
2671 | psize = STEDMA40_PSIZE_LOG_16; |
2672 | else if (maxburst >= 8) |
2673 | psize = STEDMA40_PSIZE_LOG_8; |
2674 | else if (maxburst >= 4) |
2675 | psize = STEDMA40_PSIZE_LOG_4; |
2676 | else |
2677 | psize = STEDMA40_PSIZE_LOG_1; |
2678 | } else { |
2679 | if (maxburst >= 16) |
2680 | psize = STEDMA40_PSIZE_PHY_16; |
2681 | else if (maxburst >= 8) |
2682 | psize = STEDMA40_PSIZE_PHY_8; |
2683 | else if (maxburst >= 4) |
2684 | psize = STEDMA40_PSIZE_PHY_4; |
2685 | else |
2686 | psize = STEDMA40_PSIZE_PHY_1; |
2687 | } |
2688 | |
2689 | info->data_width = addr_width; |
2690 | info->psize = psize; |
2691 | info->flow_ctrl = STEDMA40_NO_FLOW_CTRL; |
2692 | |
2693 | return 0; |
2694 | } |
2695 | |
2696 | /* Runtime reconfiguration extension */ |
2697 | static int d40_set_runtime_config(struct dma_chan *chan, |
2698 | struct dma_slave_config *config) |
2699 | { |
2700 | struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); |
2701 | struct stedma40_chan_cfg *cfg = &d40c->dma_cfg; |
2702 | enum dma_slave_buswidth src_addr_width, dst_addr_width; |
2703 | dma_addr_t config_addr; |
2704 | u32 src_maxburst, dst_maxburst; |
2705 | int ret; |
2706 | |
2707 | src_addr_width = config->src_addr_width; |
2708 | src_maxburst = config->src_maxburst; |
2709 | dst_addr_width = config->dst_addr_width; |
2710 | dst_maxburst = config->dst_maxburst; |
2711 | |
2712 | if (config->direction == DMA_DEV_TO_MEM) { |
2713 | dma_addr_t dev_addr_rx = |
2714 | d40c->base->plat_data->dev_rx[cfg->src_dev_type]; |
2715 | |
2716 | config_addr = config->src_addr; |
2717 | if (dev_addr_rx) |
2718 | dev_dbg(d40c->base->dev, |
2719 | "channel has a pre-wired RX address %08x " |
2720 | "overriding with %08x\n", |
2721 | dev_addr_rx, config_addr); |
2722 | if (cfg->dir != STEDMA40_PERIPH_TO_MEM) |
2723 | dev_dbg(d40c->base->dev, |
2724 | "channel was not configured for peripheral " |
2725 | "to memory transfer (%d) overriding\n", |
2726 | cfg->dir); |
2727 | cfg->dir = STEDMA40_PERIPH_TO_MEM; |
2728 | |
2729 | /* Configure the memory side */ |
2730 | if (dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) |
2731 | dst_addr_width = src_addr_width; |
2732 | if (dst_maxburst == 0) |
2733 | dst_maxburst = src_maxburst; |
2734 | |
2735 | } else if (config->direction == DMA_MEM_TO_DEV) { |
2736 | dma_addr_t dev_addr_tx = |
2737 | d40c->base->plat_data->dev_tx[cfg->dst_dev_type]; |
2738 | |
2739 | config_addr = config->dst_addr; |
2740 | if (dev_addr_tx) |
2741 | dev_dbg(d40c->base->dev, |
2742 | "channel has a pre-wired TX address %08x " |
2743 | "overriding with %08x\n", |
2744 | dev_addr_tx, config_addr); |
2745 | if (cfg->dir != STEDMA40_MEM_TO_PERIPH) |
2746 | dev_dbg(d40c->base->dev, |
2747 | "channel was not configured for memory " |
2748 | "to peripheral transfer (%d) overriding\n", |
2749 | cfg->dir); |
2750 | cfg->dir = STEDMA40_MEM_TO_PERIPH; |
2751 | |
2752 | /* Configure the memory side */ |
2753 | if (src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) |
2754 | src_addr_width = dst_addr_width; |
2755 | if (src_maxburst == 0) |
2756 | src_maxburst = dst_maxburst; |
2757 | } else { |
2758 | dev_err(d40c->base->dev, |
2759 | "unrecognized channel direction %d\n", |
2760 | config->direction); |
2761 | return -EINVAL; |
2762 | } |
2763 | |
2764 | if (src_maxburst * src_addr_width != dst_maxburst * dst_addr_width) { |
2765 | dev_err(d40c->base->dev, |
2766 | "src/dst width/maxburst mismatch: %d*%d != %d*%d\n", |
2767 | src_maxburst, |
2768 | src_addr_width, |
2769 | dst_maxburst, |
2770 | dst_addr_width); |
2771 | return -EINVAL; |
2772 | } |
2773 | |
2774 | if (src_maxburst > 16) { |
2775 | src_maxburst = 16; |
2776 | dst_maxburst = src_maxburst * src_addr_width / dst_addr_width; |
2777 | } else if (dst_maxburst > 16) { |
2778 | dst_maxburst = 16; |
2779 | src_maxburst = dst_maxburst * dst_addr_width / src_addr_width; |
2780 | } |
2781 | |
2782 | ret = dma40_config_to_halfchannel(d40c, &cfg->src_info, |
2783 | src_addr_width, |
2784 | src_maxburst); |
2785 | if (ret) |
2786 | return ret; |
2787 | |
2788 | ret = dma40_config_to_halfchannel(d40c, &cfg->dst_info, |
2789 | dst_addr_width, |
2790 | dst_maxburst); |
2791 | if (ret) |
2792 | return ret; |
2793 | |
2794 | /* Fill in register values */ |
2795 | if (chan_is_logical(d40c)) |
2796 | d40_log_cfg(cfg, &d40c->log_def.lcsp1, &d40c->log_def.lcsp3); |
2797 | else |
2798 | d40_phy_cfg(cfg, &d40c->src_def_cfg, |
2799 | &d40c->dst_def_cfg, false); |
2800 | |
2801 | /* These settings will take precedence later */ |
2802 | d40c->runtime_addr = config_addr; |
2803 | d40c->runtime_direction = config->direction; |
2804 | dev_dbg(d40c->base->dev, |
2805 | "configured channel %s for %s, data width %d/%d, " |
2806 | "maxburst %d/%d elements, LE, no flow control\n", |
2807 | dma_chan_name(chan), |
2808 | (config->direction == DMA_DEV_TO_MEM) ? "RX" : "TX", |
2809 | src_addr_width, dst_addr_width, |
2810 | src_maxburst, dst_maxburst); |
2811 | |
2812 | return 0; |
2813 | } |
2814 | |
2815 | static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, |
2816 | unsigned long arg) |
2817 | { |
2818 | struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); |
2819 | |
2820 | if (d40c->phy_chan == NULL) { |
2821 | chan_err(d40c, "Channel is not allocated!\n"); |
2822 | return -EINVAL; |
2823 | } |
2824 | |
2825 | switch (cmd) { |
2826 | case DMA_TERMINATE_ALL: |
2827 | d40_terminate_all(chan); |
2828 | return 0; |
2829 | case DMA_PAUSE: |
2830 | return d40_pause(d40c); |
2831 | case DMA_RESUME: |
2832 | return d40_resume(d40c); |
2833 | case DMA_SLAVE_CONFIG: |
2834 | return d40_set_runtime_config(chan, |
2835 | (struct dma_slave_config *) arg); |
2836 | default: |
2837 | break; |
2838 | } |
2839 | |
2840 | /* Other commands are unimplemented */ |
2841 | return -ENXIO; |
2842 | } |
2843 | |
2844 | /* Initialization functions */ |
2845 | |
2846 | static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma, |
2847 | struct d40_chan *chans, int offset, |
2848 | int num_chans) |
2849 | { |
2850 | int i = 0; |
2851 | struct d40_chan *d40c; |
2852 | |
2853 | INIT_LIST_HEAD(&dma->channels); |
2854 | |
2855 | for (i = offset; i < offset + num_chans; i++) { |
2856 | d40c = &chans[i]; |
2857 | d40c->base = base; |
2858 | d40c->chan.device = dma; |
2859 | |
2860 | spin_lock_init(&d40c->lock); |
2861 | |
2862 | d40c->log_num = D40_PHY_CHAN; |
2863 | |
2864 | INIT_LIST_HEAD(&d40c->done); |
2865 | INIT_LIST_HEAD(&d40c->active); |
2866 | INIT_LIST_HEAD(&d40c->queue); |
2867 | INIT_LIST_HEAD(&d40c->pending_queue); |
2868 | INIT_LIST_HEAD(&d40c->client); |
2869 | INIT_LIST_HEAD(&d40c->prepare_queue); |
2870 | |
2871 | tasklet_init(&d40c->tasklet, dma_tasklet, |
2872 | (unsigned long) d40c); |
2873 | |
2874 | list_add_tail(&d40c->chan.device_node, |
2875 | &dma->channels); |
2876 | } |
2877 | } |
2878 | |
2879 | static void d40_ops_init(struct d40_base *base, struct dma_device *dev) |
2880 | { |
2881 | if (dma_has_cap(DMA_SLAVE, dev->cap_mask)) |
2882 | dev->device_prep_slave_sg = d40_prep_slave_sg; |
2883 | |
2884 | if (dma_has_cap(DMA_MEMCPY, dev->cap_mask)) { |
2885 | dev->device_prep_dma_memcpy = d40_prep_memcpy; |
2886 | |
2887 | /* |
2888 | * This controller can only access address at even |
2889 | * 32bit boundaries, i.e. 2^2 |
2890 | */ |
2891 | dev->copy_align = 2; |
2892 | } |
2893 | |
2894 | if (dma_has_cap(DMA_SG, dev->cap_mask)) |
2895 | dev->device_prep_dma_sg = d40_prep_memcpy_sg; |
2896 | |
2897 | if (dma_has_cap(DMA_CYCLIC, dev->cap_mask)) |
2898 | dev->device_prep_dma_cyclic = dma40_prep_dma_cyclic; |
2899 | |
2900 | dev->device_alloc_chan_resources = d40_alloc_chan_resources; |
2901 | dev->device_free_chan_resources = d40_free_chan_resources; |
2902 | dev->device_issue_pending = d40_issue_pending; |
2903 | dev->device_tx_status = d40_tx_status; |
2904 | dev->device_control = d40_control; |
2905 | dev->dev = base->dev; |
2906 | } |
2907 | |
2908 | static int __init d40_dmaengine_init(struct d40_base *base, |
2909 | int num_reserved_chans) |
2910 | { |
2911 | int err ; |
2912 | |
2913 | d40_chan_init(base, &base->dma_slave, base->log_chans, |
2914 | 0, base->num_log_chans); |
2915 | |
2916 | dma_cap_zero(base->dma_slave.cap_mask); |
2917 | dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask); |
2918 | dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask); |
2919 | |
2920 | d40_ops_init(base, &base->dma_slave); |
2921 | |
2922 | err = dma_async_device_register(&base->dma_slave); |
2923 | |
2924 | if (err) { |
2925 | d40_err(base->dev, "Failed to register slave channels\n"); |
2926 | goto failure1; |
2927 | } |
2928 | |
2929 | d40_chan_init(base, &base->dma_memcpy, base->log_chans, |
2930 | base->num_log_chans, base->plat_data->memcpy_len); |
2931 | |
2932 | dma_cap_zero(base->dma_memcpy.cap_mask); |
2933 | dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask); |
2934 | dma_cap_set(DMA_SG, base->dma_memcpy.cap_mask); |
2935 | |
2936 | d40_ops_init(base, &base->dma_memcpy); |
2937 | |
2938 | err = dma_async_device_register(&base->dma_memcpy); |
2939 | |
2940 | if (err) { |
2941 | d40_err(base->dev, |
2942 | "Failed to regsiter memcpy only channels\n"); |
2943 | goto failure2; |
2944 | } |
2945 | |
2946 | d40_chan_init(base, &base->dma_both, base->phy_chans, |
2947 | 0, num_reserved_chans); |
2948 | |
2949 | dma_cap_zero(base->dma_both.cap_mask); |
2950 | dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask); |
2951 | dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask); |
2952 | dma_cap_set(DMA_SG, base->dma_both.cap_mask); |
2953 | dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask); |
2954 | |
2955 | d40_ops_init(base, &base->dma_both); |
2956 | err = dma_async_device_register(&base->dma_both); |
2957 | |
2958 | if (err) { |
2959 | d40_err(base->dev, |
2960 | "Failed to register logical and physical capable channels\n"); |
2961 | goto failure3; |
2962 | } |
2963 | return 0; |
2964 | failure3: |
2965 | dma_async_device_unregister(&base->dma_memcpy); |
2966 | failure2: |
2967 | dma_async_device_unregister(&base->dma_slave); |
2968 | failure1: |
2969 | return err; |
2970 | } |
2971 | |
2972 | /* Suspend resume functionality */ |
2973 | #ifdef CONFIG_PM |
2974 | static int dma40_pm_suspend(struct device *dev) |
2975 | { |
2976 | struct platform_device *pdev = to_platform_device(dev); |
2977 | struct d40_base *base = platform_get_drvdata(pdev); |
2978 | int ret = 0; |
2979 | |
2980 | if (base->lcpa_regulator) |
2981 | ret = regulator_disable(base->lcpa_regulator); |
2982 | return ret; |
2983 | } |
2984 | |
2985 | static int dma40_runtime_suspend(struct device *dev) |
2986 | { |
2987 | struct platform_device *pdev = to_platform_device(dev); |
2988 | struct d40_base *base = platform_get_drvdata(pdev); |
2989 | |
2990 | d40_save_restore_registers(base, true); |
2991 | |
2992 | /* Don't disable/enable clocks for v1 due to HW bugs */ |
2993 | if (base->rev != 1) |
2994 | writel_relaxed(base->gcc_pwr_off_mask, |
2995 | base->virtbase + D40_DREG_GCC); |
2996 | |
2997 | return 0; |
2998 | } |
2999 | |
3000 | static int dma40_runtime_resume(struct device *dev) |
3001 | { |
3002 | struct platform_device *pdev = to_platform_device(dev); |
3003 | struct d40_base *base = platform_get_drvdata(pdev); |
3004 | |
3005 | if (base->initialized) |
3006 | d40_save_restore_registers(base, false); |
3007 | |
3008 | writel_relaxed(D40_DREG_GCC_ENABLE_ALL, |
3009 | base->virtbase + D40_DREG_GCC); |
3010 | return 0; |
3011 | } |
3012 | |
3013 | static int dma40_resume(struct device *dev) |
3014 | { |
3015 | struct platform_device *pdev = to_platform_device(dev); |
3016 | struct d40_base *base = platform_get_drvdata(pdev); |
3017 | int ret = 0; |
3018 | |
3019 | if (base->lcpa_regulator) |
3020 | ret = regulator_enable(base->lcpa_regulator); |
3021 | |
3022 | return ret; |
3023 | } |
3024 | |
3025 | static const struct dev_pm_ops dma40_pm_ops = { |
3026 | .suspend = dma40_pm_suspend, |
3027 | .runtime_suspend = dma40_runtime_suspend, |
3028 | .runtime_resume = dma40_runtime_resume, |
3029 | .resume = dma40_resume, |
3030 | }; |
3031 | #define DMA40_PM_OPS (&dma40_pm_ops) |
3032 | #else |
3033 | #define DMA40_PM_OPS NULL |
3034 | #endif |
3035 | |
3036 | /* Initialization functions. */ |
3037 | |
3038 | static int __init d40_phy_res_init(struct d40_base *base) |
3039 | { |
3040 | int i; |
3041 | int num_phy_chans_avail = 0; |
3042 | u32 val[2]; |
3043 | int odd_even_bit = -2; |
3044 | int gcc = D40_DREG_GCC_ENA; |
3045 | |
3046 | val[0] = readl(base->virtbase + D40_DREG_PRSME); |
3047 | val[1] = readl(base->virtbase + D40_DREG_PRSMO); |
3048 | |
3049 | for (i = 0; i < base->num_phy_chans; i++) { |
3050 | base->phy_res[i].num = i; |
3051 | odd_even_bit += 2 * ((i % 2) == 0); |
3052 | if (((val[i % 2] >> odd_even_bit) & 3) == 1) { |
3053 | /* Mark security only channels as occupied */ |
3054 | base->phy_res[i].allocated_src = D40_ALLOC_PHY; |
3055 | base->phy_res[i].allocated_dst = D40_ALLOC_PHY; |
3056 | base->phy_res[i].reserved = true; |
3057 | gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i), |
3058 | D40_DREG_GCC_SRC); |
3059 | gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i), |
3060 | D40_DREG_GCC_DST); |
3061 | |
3062 | |
3063 | } else { |
3064 | base->phy_res[i].allocated_src = D40_ALLOC_FREE; |
3065 | base->phy_res[i].allocated_dst = D40_ALLOC_FREE; |
3066 | base->phy_res[i].reserved = false; |
3067 | num_phy_chans_avail++; |
3068 | } |
3069 | spin_lock_init(&base->phy_res[i].lock); |
3070 | } |
3071 | |
3072 | /* Mark disabled channels as occupied */ |
3073 | for (i = 0; base->plat_data->disabled_channels[i] != -1; i++) { |
3074 | int chan = base->plat_data->disabled_channels[i]; |
3075 | |
3076 | base->phy_res[chan].allocated_src = D40_ALLOC_PHY; |
3077 | base->phy_res[chan].allocated_dst = D40_ALLOC_PHY; |
3078 | base->phy_res[chan].reserved = true; |
3079 | gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan), |
3080 | D40_DREG_GCC_SRC); |
3081 | gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan), |
3082 | D40_DREG_GCC_DST); |
3083 | num_phy_chans_avail--; |
3084 | } |
3085 | |
3086 | /* Mark soft_lli channels */ |
3087 | for (i = 0; i < base->plat_data->num_of_soft_lli_chans; i++) { |
3088 | int chan = base->plat_data->soft_lli_chans[i]; |
3089 | |
3090 | base->phy_res[chan].use_soft_lli = true; |
3091 | } |
3092 | |
3093 | dev_info(base->dev, "%d of %d physical DMA channels available\n", |
3094 | num_phy_chans_avail, base->num_phy_chans); |
3095 | |
3096 | /* Verify settings extended vs standard */ |
3097 | val[0] = readl(base->virtbase + D40_DREG_PRTYP); |
3098 | |
3099 | for (i = 0; i < base->num_phy_chans; i++) { |
3100 | |
3101 | if (base->phy_res[i].allocated_src == D40_ALLOC_FREE && |
3102 | (val[0] & 0x3) != 1) |
3103 | dev_info(base->dev, |
3104 | "[%s] INFO: channel %d is misconfigured (%d)\n", |
3105 | __func__, i, val[0] & 0x3); |
3106 | |
3107 | val[0] = val[0] >> 2; |
3108 | } |
3109 | |
3110 | /* |
3111 | * To keep things simple, Enable all clocks initially. |
3112 | * The clocks will get managed later post channel allocation. |
3113 | * The clocks for the event lines on which reserved channels exists |
3114 | * are not managed here. |
3115 | */ |
3116 | writel(D40_DREG_GCC_ENABLE_ALL, base->virtbase + D40_DREG_GCC); |
3117 | base->gcc_pwr_off_mask = gcc; |
3118 | |
3119 | return num_phy_chans_avail; |
3120 | } |
3121 | |
3122 | static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) |
3123 | { |
3124 | struct stedma40_platform_data *plat_data; |
3125 | struct clk *clk = NULL; |
3126 | void __iomem *virtbase = NULL; |
3127 | struct resource *res = NULL; |
3128 | struct d40_base *base = NULL; |
3129 | int num_log_chans = 0; |
3130 | int num_phy_chans; |
3131 | int clk_ret = -EINVAL; |
3132 | int i; |
3133 | u32 pid; |
3134 | u32 cid; |
3135 | u8 rev; |
3136 | |
3137 | clk = clk_get(&pdev->dev, NULL); |
3138 | if (IS_ERR(clk)) { |
3139 | d40_err(&pdev->dev, "No matching clock found\n"); |
3140 | goto failure; |
3141 | } |
3142 | |
3143 | clk_ret = clk_prepare_enable(clk); |
3144 | if (clk_ret) { |
3145 | d40_err(&pdev->dev, "Failed to prepare/enable clock\n"); |
3146 | goto failure; |
3147 | } |
3148 | |
3149 | /* Get IO for DMAC base address */ |
3150 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base"); |
3151 | if (!res) |
3152 | goto failure; |
3153 | |
3154 | if (request_mem_region(res->start, resource_size(res), |
3155 | D40_NAME " I/O base") == NULL) |
3156 | goto failure; |
3157 | |
3158 | virtbase = ioremap(res->start, resource_size(res)); |
3159 | if (!virtbase) |
3160 | goto failure; |
3161 | |
3162 | /* This is just a regular AMBA PrimeCell ID actually */ |
3163 | for (pid = 0, i = 0; i < 4; i++) |
3164 | pid |= (readl(virtbase + resource_size(res) - 0x20 + 4 * i) |
3165 | & 255) << (i * 8); |
3166 | for (cid = 0, i = 0; i < 4; i++) |
3167 | cid |= (readl(virtbase + resource_size(res) - 0x10 + 4 * i) |
3168 | & 255) << (i * 8); |
3169 | |
3170 | if (cid != AMBA_CID) { |
3171 | d40_err(&pdev->dev, "Unknown hardware! No PrimeCell ID\n"); |
3172 | goto failure; |
3173 | } |
3174 | if (AMBA_MANF_BITS(pid) != AMBA_VENDOR_ST) { |
3175 | d40_err(&pdev->dev, "Unknown designer! Got %x wanted %x\n", |
3176 | AMBA_MANF_BITS(pid), |
3177 | AMBA_VENDOR_ST); |
3178 | goto failure; |
3179 | } |
3180 | /* |
3181 | * HW revision: |
3182 | * DB8500ed has revision 0 |
3183 | * ? has revision 1 |
3184 | * DB8500v1 has revision 2 |
3185 | * DB8500v2 has revision 3 |
3186 | * AP9540v1 has revision 4 |
3187 | * DB8540v1 has revision 4 |
3188 | */ |
3189 | rev = AMBA_REV_BITS(pid); |
3190 | |
3191 | plat_data = pdev->dev.platform_data; |
3192 | |
3193 | /* The number of physical channels on this HW */ |
3194 | if (plat_data->num_of_phy_chans) |
3195 | num_phy_chans = plat_data->num_of_phy_chans; |
3196 | else |
3197 | num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4; |
3198 | |
3199 | dev_info(&pdev->dev, "hardware revision: %d @ 0x%x with %d physical channels\n", |
3200 | rev, res->start, num_phy_chans); |
3201 | |
3202 | if (rev < 2) { |
3203 | d40_err(&pdev->dev, "hardware revision: %d is not supported", |
3204 | rev); |
3205 | goto failure; |
3206 | } |
3207 | |
3208 | /* Count the number of logical channels in use */ |
3209 | for (i = 0; i < plat_data->dev_len; i++) |
3210 | if (plat_data->dev_rx[i] != 0) |
3211 | num_log_chans++; |
3212 | |
3213 | for (i = 0; i < plat_data->dev_len; i++) |
3214 | if (plat_data->dev_tx[i] != 0) |
3215 | num_log_chans++; |
3216 | |
3217 | base = kzalloc(ALIGN(sizeof(struct d40_base), 4) + |
3218 | (num_phy_chans + num_log_chans + plat_data->memcpy_len) * |
3219 | sizeof(struct d40_chan), GFP_KERNEL); |
3220 | |
3221 | if (base == NULL) { |
3222 | d40_err(&pdev->dev, "Out of memory\n"); |
3223 | goto failure; |
3224 | } |
3225 | |
3226 | base->rev = rev; |
3227 | base->clk = clk; |
3228 | base->num_phy_chans = num_phy_chans; |
3229 | base->num_log_chans = num_log_chans; |
3230 | base->phy_start = res->start; |
3231 | base->phy_size = resource_size(res); |
3232 | base->virtbase = virtbase; |
3233 | base->plat_data = plat_data; |
3234 | base->dev = &pdev->dev; |
3235 | base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4); |
3236 | base->log_chans = &base->phy_chans[num_phy_chans]; |
3237 | |
3238 | if (base->plat_data->num_of_phy_chans == 14) { |
3239 | base->gen_dmac.backup = d40_backup_regs_v4b; |
3240 | base->gen_dmac.backup_size = BACKUP_REGS_SZ_V4B; |
3241 | base->gen_dmac.interrupt_en = D40_DREG_CPCMIS; |
3242 | base->gen_dmac.interrupt_clear = D40_DREG_CPCICR; |
3243 | base->gen_dmac.realtime_en = D40_DREG_CRSEG1; |
3244 | base->gen_dmac.realtime_clear = D40_DREG_CRCEG1; |
3245 | base->gen_dmac.high_prio_en = D40_DREG_CPSEG1; |
3246 | base->gen_dmac.high_prio_clear = D40_DREG_CPCEG1; |
3247 | base->gen_dmac.il = il_v4b; |
3248 | base->gen_dmac.il_size = ARRAY_SIZE(il_v4b); |
3249 | base->gen_dmac.init_reg = dma_init_reg_v4b; |
3250 | base->gen_dmac.init_reg_size = ARRAY_SIZE(dma_init_reg_v4b); |
3251 | } else { |
3252 | if (base->rev >= 3) { |
3253 | base->gen_dmac.backup = d40_backup_regs_v4a; |
3254 | base->gen_dmac.backup_size = BACKUP_REGS_SZ_V4A; |
3255 | } |
3256 | base->gen_dmac.interrupt_en = D40_DREG_PCMIS; |
3257 | base->gen_dmac.interrupt_clear = D40_DREG_PCICR; |
3258 | base->gen_dmac.realtime_en = D40_DREG_RSEG1; |
3259 | base->gen_dmac.realtime_clear = D40_DREG_RCEG1; |
3260 | base->gen_dmac.high_prio_en = D40_DREG_PSEG1; |
3261 | base->gen_dmac.high_prio_clear = D40_DREG_PCEG1; |
3262 | base->gen_dmac.il = il_v4a; |
3263 | base->gen_dmac.il_size = ARRAY_SIZE(il_v4a); |
3264 | base->gen_dmac.init_reg = dma_init_reg_v4a; |
3265 | base->gen_dmac.init_reg_size = ARRAY_SIZE(dma_init_reg_v4a); |
3266 | } |
3267 | |
3268 | base->phy_res = kzalloc(num_phy_chans * sizeof(struct d40_phy_res), |
3269 | GFP_KERNEL); |
3270 | if (!base->phy_res) |
3271 | goto failure; |
3272 | |
3273 | base->lookup_phy_chans = kzalloc(num_phy_chans * |
3274 | sizeof(struct d40_chan *), |
3275 | GFP_KERNEL); |
3276 | if (!base->lookup_phy_chans) |
3277 | goto failure; |
3278 | |
3279 | if (num_log_chans + plat_data->memcpy_len) { |
3280 | /* |
3281 | * The max number of logical channels are event lines for all |
3282 | * src devices and dst devices |
3283 | */ |
3284 | base->lookup_log_chans = kzalloc(plat_data->dev_len * 2 * |
3285 | sizeof(struct d40_chan *), |
3286 | GFP_KERNEL); |
3287 | if (!base->lookup_log_chans) |
3288 | goto failure; |
3289 | } |
3290 | |
3291 | base->reg_val_backup_chan = kmalloc(base->num_phy_chans * |
3292 | sizeof(d40_backup_regs_chan), |
3293 | GFP_KERNEL); |
3294 | if (!base->reg_val_backup_chan) |
3295 | goto failure; |
3296 | |
3297 | base->lcla_pool.alloc_map = |
3298 | kzalloc(num_phy_chans * sizeof(struct d40_desc *) |
3299 | * D40_LCLA_LINK_PER_EVENT_GRP, GFP_KERNEL); |
3300 | if (!base->lcla_pool.alloc_map) |
3301 | goto failure; |
3302 | |
3303 | base->desc_slab = kmem_cache_create(D40_NAME, sizeof(struct d40_desc), |
3304 | 0, SLAB_HWCACHE_ALIGN, |
3305 | NULL); |
3306 | if (base->desc_slab == NULL) |
3307 | goto failure; |
3308 | |
3309 | return base; |
3310 | |
3311 | failure: |
3312 | if (!clk_ret) |
3313 | clk_disable_unprepare(clk); |
3314 | if (!IS_ERR(clk)) |
3315 | clk_put(clk); |
3316 | if (virtbase) |
3317 | iounmap(virtbase); |
3318 | if (res) |
3319 | release_mem_region(res->start, |
3320 | resource_size(res)); |
3321 | if (virtbase) |
3322 | iounmap(virtbase); |
3323 | |
3324 | if (base) { |
3325 | kfree(base->lcla_pool.alloc_map); |
3326 | kfree(base->reg_val_backup_chan); |
3327 | kfree(base->lookup_log_chans); |
3328 | kfree(base->lookup_phy_chans); |
3329 | kfree(base->phy_res); |
3330 | kfree(base); |
3331 | } |
3332 | |
3333 | return NULL; |
3334 | } |
3335 | |
3336 | static void __init d40_hw_init(struct d40_base *base) |
3337 | { |
3338 | |
3339 | int i; |
3340 | u32 prmseo[2] = {0, 0}; |
3341 | u32 activeo[2] = {0xFFFFFFFF, 0xFFFFFFFF}; |
3342 | u32 pcmis = 0; |
3343 | u32 pcicr = 0; |
3344 | struct d40_reg_val *dma_init_reg = base->gen_dmac.init_reg; |
3345 | u32 reg_size = base->gen_dmac.init_reg_size; |
3346 | |
3347 | for (i = 0; i < reg_size; i++) |
3348 | writel(dma_init_reg[i].val, |
3349 | base->virtbase + dma_init_reg[i].reg); |
3350 | |
3351 | /* Configure all our dma channels to default settings */ |
3352 | for (i = 0; i < base->num_phy_chans; i++) { |
3353 | |
3354 | activeo[i % 2] = activeo[i % 2] << 2; |
3355 | |
3356 | if (base->phy_res[base->num_phy_chans - i - 1].allocated_src |
3357 | == D40_ALLOC_PHY) { |
3358 | activeo[i % 2] |= 3; |
3359 | continue; |
3360 | } |
3361 | |
3362 | /* Enable interrupt # */ |
3363 | pcmis = (pcmis << 1) | 1; |
3364 | |
3365 | /* Clear interrupt # */ |
3366 | pcicr = (pcicr << 1) | 1; |
3367 | |
3368 | /* Set channel to physical mode */ |
3369 | prmseo[i % 2] = prmseo[i % 2] << 2; |
3370 | prmseo[i % 2] |= 1; |
3371 | |
3372 | } |
3373 | |
3374 | writel(prmseo[1], base->virtbase + D40_DREG_PRMSE); |
3375 | writel(prmseo[0], base->virtbase + D40_DREG_PRMSO); |
3376 | writel(activeo[1], base->virtbase + D40_DREG_ACTIVE); |
3377 | writel(activeo[0], base->virtbase + D40_DREG_ACTIVO); |
3378 | |
3379 | /* Write which interrupt to enable */ |
3380 | writel(pcmis, base->virtbase + base->gen_dmac.interrupt_en); |
3381 | |
3382 | /* Write which interrupt to clear */ |
3383 | writel(pcicr, base->virtbase + base->gen_dmac.interrupt_clear); |
3384 | |
3385 | /* These are __initdata and cannot be accessed after init */ |
3386 | base->gen_dmac.init_reg = NULL; |
3387 | base->gen_dmac.init_reg_size = 0; |
3388 | } |
3389 | |
3390 | static int __init d40_lcla_allocate(struct d40_base *base) |
3391 | { |
3392 | struct d40_lcla_pool *pool = &base->lcla_pool; |
3393 | unsigned long *page_list; |
3394 | int i, j; |
3395 | int ret = 0; |
3396 | |
3397 | /* |
3398 | * This is somewhat ugly. We need 8192 bytes that are 18 bit aligned, |
3399 | * To full fill this hardware requirement without wasting 256 kb |
3400 | * we allocate pages until we get an aligned one. |
3401 | */ |
3402 | page_list = kmalloc(sizeof(unsigned long) * MAX_LCLA_ALLOC_ATTEMPTS, |
3403 | GFP_KERNEL); |
3404 | |
3405 | if (!page_list) { |
3406 | ret = -ENOMEM; |
3407 | goto failure; |
3408 | } |
3409 | |
3410 | /* Calculating how many pages that are required */ |
3411 | base->lcla_pool.pages = SZ_1K * base->num_phy_chans / PAGE_SIZE; |
3412 | |
3413 | for (i = 0; i < MAX_LCLA_ALLOC_ATTEMPTS; i++) { |
3414 | page_list[i] = __get_free_pages(GFP_KERNEL, |
3415 | base->lcla_pool.pages); |
3416 | if (!page_list[i]) { |
3417 | |
3418 | d40_err(base->dev, "Failed to allocate %d pages.\n", |
3419 | base->lcla_pool.pages); |
3420 | |
3421 | for (j = 0; j < i; j++) |
3422 | free_pages(page_list[j], base->lcla_pool.pages); |
3423 | goto failure; |
3424 | } |
3425 | |
3426 | if ((virt_to_phys((void *)page_list[i]) & |
3427 | (LCLA_ALIGNMENT - 1)) == 0) |
3428 | break; |
3429 | } |
3430 | |
3431 | for (j = 0; j < i; j++) |
3432 | free_pages(page_list[j], base->lcla_pool.pages); |
3433 | |
3434 | if (i < MAX_LCLA_ALLOC_ATTEMPTS) { |
3435 | base->lcla_pool.base = (void *)page_list[i]; |
3436 | } else { |
3437 | /* |
3438 | * After many attempts and no succees with finding the correct |
3439 | * alignment, try with allocating a big buffer. |
3440 | */ |
3441 | dev_warn(base->dev, |
3442 | "[%s] Failed to get %d pages @ 18 bit align.\n", |
3443 | __func__, base->lcla_pool.pages); |
3444 | base->lcla_pool.base_unaligned = kmalloc(SZ_1K * |
3445 | base->num_phy_chans + |
3446 | LCLA_ALIGNMENT, |
3447 | GFP_KERNEL); |
3448 | if (!base->lcla_pool.base_unaligned) { |
3449 | ret = -ENOMEM; |
3450 | goto failure; |
3451 | } |
3452 | |
3453 | base->lcla_pool.base = PTR_ALIGN(base->lcla_pool.base_unaligned, |
3454 | LCLA_ALIGNMENT); |
3455 | } |
3456 | |
3457 | pool->dma_addr = dma_map_single(base->dev, pool->base, |
3458 | SZ_1K * base->num_phy_chans, |
3459 | DMA_TO_DEVICE); |
3460 | if (dma_mapping_error(base->dev, pool->dma_addr)) { |
3461 | pool->dma_addr = 0; |
3462 | ret = -ENOMEM; |
3463 | goto failure; |
3464 | } |
3465 | |
3466 | writel(virt_to_phys(base->lcla_pool.base), |
3467 | base->virtbase + D40_DREG_LCLA); |
3468 | failure: |
3469 | kfree(page_list); |
3470 | return ret; |
3471 | } |
3472 | |
3473 | static int __init d40_probe(struct platform_device *pdev) |
3474 | { |
3475 | int err; |
3476 | int ret = -ENOENT; |
3477 | struct d40_base *base; |
3478 | struct resource *res = NULL; |
3479 | int num_reserved_chans; |
3480 | u32 val; |
3481 | |
3482 | base = d40_hw_detect_init(pdev); |
3483 | |
3484 | if (!base) |
3485 | goto failure; |
3486 | |
3487 | num_reserved_chans = d40_phy_res_init(base); |
3488 | |
3489 | platform_set_drvdata(pdev, base); |
3490 | |
3491 | spin_lock_init(&base->interrupt_lock); |
3492 | spin_lock_init(&base->execmd_lock); |
3493 | |
3494 | /* Get IO for logical channel parameter address */ |
3495 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa"); |
3496 | if (!res) { |
3497 | ret = -ENOENT; |
3498 | d40_err(&pdev->dev, "No \"lcpa\" memory resource\n"); |
3499 | goto failure; |
3500 | } |
3501 | base->lcpa_size = resource_size(res); |
3502 | base->phy_lcpa = res->start; |
3503 | |
3504 | if (request_mem_region(res->start, resource_size(res), |
3505 | D40_NAME " I/O lcpa") == NULL) { |
3506 | ret = -EBUSY; |
3507 | d40_err(&pdev->dev, |
3508 | "Failed to request LCPA region 0x%x-0x%x\n", |
3509 | res->start, res->end); |
3510 | goto failure; |
3511 | } |
3512 | |
3513 | /* We make use of ESRAM memory for this. */ |
3514 | val = readl(base->virtbase + D40_DREG_LCPA); |
3515 | if (res->start != val && val != 0) { |
3516 | dev_warn(&pdev->dev, |
3517 | "[%s] Mismatch LCPA dma 0x%x, def 0x%x\n", |
3518 | __func__, val, res->start); |
3519 | } else |
3520 | writel(res->start, base->virtbase + D40_DREG_LCPA); |
3521 | |
3522 | base->lcpa_base = ioremap(res->start, resource_size(res)); |
3523 | if (!base->lcpa_base) { |
3524 | ret = -ENOMEM; |
3525 | d40_err(&pdev->dev, "Failed to ioremap LCPA region\n"); |
3526 | goto failure; |
3527 | } |
3528 | /* If lcla has to be located in ESRAM we don't need to allocate */ |
3529 | if (base->plat_data->use_esram_lcla) { |
3530 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, |
3531 | "lcla_esram"); |
3532 | if (!res) { |
3533 | ret = -ENOENT; |
3534 | d40_err(&pdev->dev, |
3535 | "No \"lcla_esram\" memory resource\n"); |
3536 | goto failure; |
3537 | } |
3538 | base->lcla_pool.base = ioremap(res->start, |
3539 | resource_size(res)); |
3540 | if (!base->lcla_pool.base) { |
3541 | ret = -ENOMEM; |
3542 | d40_err(&pdev->dev, "Failed to ioremap LCLA region\n"); |
3543 | goto failure; |
3544 | } |
3545 | writel(res->start, base->virtbase + D40_DREG_LCLA); |
3546 | |
3547 | } else { |
3548 | ret = d40_lcla_allocate(base); |
3549 | if (ret) { |
3550 | d40_err(&pdev->dev, "Failed to allocate LCLA area\n"); |
3551 | goto failure; |
3552 | } |
3553 | } |
3554 | |
3555 | spin_lock_init(&base->lcla_pool.lock); |
3556 | |
3557 | base->irq = platform_get_irq(pdev, 0); |
3558 | |
3559 | ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base); |
3560 | if (ret) { |
3561 | d40_err(&pdev->dev, "No IRQ defined\n"); |
3562 | goto failure; |
3563 | } |
3564 | |
3565 | pm_runtime_irq_safe(base->dev); |
3566 | pm_runtime_set_autosuspend_delay(base->dev, DMA40_AUTOSUSPEND_DELAY); |
3567 | pm_runtime_use_autosuspend(base->dev); |
3568 | pm_runtime_enable(base->dev); |
3569 | pm_runtime_resume(base->dev); |
3570 | |
3571 | if (base->plat_data->use_esram_lcla) { |
3572 | |
3573 | base->lcpa_regulator = regulator_get(base->dev, "lcla_esram"); |
3574 | if (IS_ERR(base->lcpa_regulator)) { |
3575 | d40_err(&pdev->dev, "Failed to get lcpa_regulator\n"); |
3576 | base->lcpa_regulator = NULL; |
3577 | goto failure; |
3578 | } |
3579 | |
3580 | ret = regulator_enable(base->lcpa_regulator); |
3581 | if (ret) { |
3582 | d40_err(&pdev->dev, |
3583 | "Failed to enable lcpa_regulator\n"); |
3584 | regulator_put(base->lcpa_regulator); |
3585 | base->lcpa_regulator = NULL; |
3586 | goto failure; |
3587 | } |
3588 | } |
3589 | |
3590 | base->initialized = true; |
3591 | err = d40_dmaengine_init(base, num_reserved_chans); |
3592 | if (err) |
3593 | goto failure; |
3594 | |
3595 | base->dev->dma_parms = &base->dma_parms; |
3596 | err = dma_set_max_seg_size(base->dev, STEDMA40_MAX_SEG_SIZE); |
3597 | if (err) { |
3598 | d40_err(&pdev->dev, "Failed to set dma max seg size\n"); |
3599 | goto failure; |
3600 | } |
3601 | |
3602 | d40_hw_init(base); |
3603 | |
3604 | dev_info(base->dev, "initialized\n"); |
3605 | return 0; |
3606 | |
3607 | failure: |
3608 | if (base) { |
3609 | if (base->desc_slab) |
3610 | kmem_cache_destroy(base->desc_slab); |
3611 | if (base->virtbase) |
3612 | iounmap(base->virtbase); |
3613 | |
3614 | if (base->lcla_pool.base && base->plat_data->use_esram_lcla) { |
3615 | iounmap(base->lcla_pool.base); |
3616 | base->lcla_pool.base = NULL; |
3617 | } |
3618 | |
3619 | if (base->lcla_pool.dma_addr) |
3620 | dma_unmap_single(base->dev, base->lcla_pool.dma_addr, |
3621 | SZ_1K * base->num_phy_chans, |
3622 | DMA_TO_DEVICE); |
3623 | |
3624 | if (!base->lcla_pool.base_unaligned && base->lcla_pool.base) |
3625 | free_pages((unsigned long)base->lcla_pool.base, |
3626 | base->lcla_pool.pages); |
3627 | |
3628 | kfree(base->lcla_pool.base_unaligned); |
3629 | |
3630 | if (base->phy_lcpa) |
3631 | release_mem_region(base->phy_lcpa, |
3632 | base->lcpa_size); |
3633 | if (base->phy_start) |
3634 | release_mem_region(base->phy_start, |
3635 | base->phy_size); |
3636 | if (base->clk) { |
3637 | clk_disable_unprepare(base->clk); |
3638 | clk_put(base->clk); |
3639 | } |
3640 | |
3641 | if (base->lcpa_regulator) { |
3642 | regulator_disable(base->lcpa_regulator); |
3643 | regulator_put(base->lcpa_regulator); |
3644 | } |
3645 | |
3646 | kfree(base->lcla_pool.alloc_map); |
3647 | kfree(base->lookup_log_chans); |
3648 | kfree(base->lookup_phy_chans); |
3649 | kfree(base->phy_res); |
3650 | kfree(base); |
3651 | } |
3652 | |
3653 | d40_err(&pdev->dev, "probe failed\n"); |
3654 | return ret; |
3655 | } |
3656 | |
3657 | static struct platform_driver d40_driver = { |
3658 | .driver = { |
3659 | .owner = THIS_MODULE, |
3660 | .name = D40_NAME, |
3661 | .pm = DMA40_PM_OPS, |
3662 | }, |
3663 | }; |
3664 | |
3665 | static int __init stedma40_init(void) |
3666 | { |
3667 | return platform_driver_probe(&d40_driver, d40_probe); |
3668 | } |
3669 | subsys_initcall(stedma40_init); |
3670 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9