Root/
1 | /* |
2 | * core routines for the asynchronous memory transfer/transform api |
3 | * |
4 | * Copyright © 2006, Intel Corporation. |
5 | * |
6 | * Dan Williams <dan.j.williams@intel.com> |
7 | * |
8 | * with architecture considerations by: |
9 | * Neil Brown <neilb@suse.de> |
10 | * Jeff Garzik <jeff@garzik.org> |
11 | * |
12 | * This program is free software; you can redistribute it and/or modify it |
13 | * under the terms and conditions of the GNU General Public License, |
14 | * version 2, as published by the Free Software Foundation. |
15 | * |
16 | * This program is distributed in the hope it will be useful, but WITHOUT |
17 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
18 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
19 | * more details. |
20 | * |
21 | * You should have received a copy of the GNU General Public License along with |
22 | * this program; if not, write to the Free Software Foundation, Inc., |
23 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. |
24 | * |
25 | */ |
26 | #include <linux/rculist.h> |
27 | #include <linux/kernel.h> |
28 | #include <linux/async_tx.h> |
29 | |
30 | #ifdef CONFIG_DMA_ENGINE |
31 | static int __init async_tx_init(void) |
32 | { |
33 | async_dmaengine_get(); |
34 | |
35 | printk(KERN_INFO "async_tx: api initialized (async)\n"); |
36 | |
37 | return 0; |
38 | } |
39 | |
40 | static void __exit async_tx_exit(void) |
41 | { |
42 | async_dmaengine_put(); |
43 | } |
44 | |
45 | module_init(async_tx_init); |
46 | module_exit(async_tx_exit); |
47 | |
48 | /** |
49 | * __async_tx_find_channel - find a channel to carry out the operation or let |
50 | * the transaction execute synchronously |
51 | * @submit: transaction dependency and submission modifiers |
52 | * @tx_type: transaction type |
53 | */ |
54 | struct dma_chan * |
55 | __async_tx_find_channel(struct async_submit_ctl *submit, |
56 | enum dma_transaction_type tx_type) |
57 | { |
58 | struct dma_async_tx_descriptor *depend_tx = submit->depend_tx; |
59 | |
60 | /* see if we can keep the chain on one channel */ |
61 | if (depend_tx && |
62 | dma_has_cap(tx_type, depend_tx->chan->device->cap_mask)) |
63 | return depend_tx->chan; |
64 | return async_dma_find_channel(tx_type); |
65 | } |
66 | EXPORT_SYMBOL_GPL(__async_tx_find_channel); |
67 | #endif |
68 | |
69 | |
70 | /** |
71 | * async_tx_channel_switch - queue an interrupt descriptor with a dependency |
72 | * pre-attached. |
73 | * @depend_tx: the operation that must finish before the new operation runs |
74 | * @tx: the new operation |
75 | */ |
76 | static void |
77 | async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx, |
78 | struct dma_async_tx_descriptor *tx) |
79 | { |
80 | struct dma_chan *chan = depend_tx->chan; |
81 | struct dma_device *device = chan->device; |
82 | struct dma_async_tx_descriptor *intr_tx = (void *) ~0; |
83 | |
84 | /* first check to see if we can still append to depend_tx */ |
85 | txd_lock(depend_tx); |
86 | if (txd_parent(depend_tx) && depend_tx->chan == tx->chan) { |
87 | txd_chain(depend_tx, tx); |
88 | intr_tx = NULL; |
89 | } |
90 | txd_unlock(depend_tx); |
91 | |
92 | /* attached dependency, flush the parent channel */ |
93 | if (!intr_tx) { |
94 | device->device_issue_pending(chan); |
95 | return; |
96 | } |
97 | |
98 | /* see if we can schedule an interrupt |
99 | * otherwise poll for completion |
100 | */ |
101 | if (dma_has_cap(DMA_INTERRUPT, device->cap_mask)) |
102 | intr_tx = device->device_prep_dma_interrupt(chan, 0); |
103 | else |
104 | intr_tx = NULL; |
105 | |
106 | if (intr_tx) { |
107 | intr_tx->callback = NULL; |
108 | intr_tx->callback_param = NULL; |
109 | /* safe to chain outside the lock since we know we are |
110 | * not submitted yet |
111 | */ |
112 | txd_chain(intr_tx, tx); |
113 | |
114 | /* check if we need to append */ |
115 | txd_lock(depend_tx); |
116 | if (txd_parent(depend_tx)) { |
117 | txd_chain(depend_tx, intr_tx); |
118 | async_tx_ack(intr_tx); |
119 | intr_tx = NULL; |
120 | } |
121 | txd_unlock(depend_tx); |
122 | |
123 | if (intr_tx) { |
124 | txd_clear_parent(intr_tx); |
125 | intr_tx->tx_submit(intr_tx); |
126 | async_tx_ack(intr_tx); |
127 | } |
128 | device->device_issue_pending(chan); |
129 | } else { |
130 | if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR) |
131 | panic("%s: DMA_ERROR waiting for depend_tx\n", |
132 | __func__); |
133 | tx->tx_submit(tx); |
134 | } |
135 | } |
136 | |
137 | |
138 | /** |
139 | * submit_disposition - flags for routing an incoming operation |
140 | * @ASYNC_TX_SUBMITTED: we were able to append the new operation under the lock |
141 | * @ASYNC_TX_CHANNEL_SWITCH: when the lock is dropped schedule a channel switch |
142 | * @ASYNC_TX_DIRECT_SUBMIT: when the lock is dropped submit directly |
143 | * |
144 | * while holding depend_tx->lock we must avoid submitting new operations |
145 | * to prevent a circular locking dependency with drivers that already |
146 | * hold a channel lock when calling async_tx_run_dependencies. |
147 | */ |
148 | enum submit_disposition { |
149 | ASYNC_TX_SUBMITTED, |
150 | ASYNC_TX_CHANNEL_SWITCH, |
151 | ASYNC_TX_DIRECT_SUBMIT, |
152 | }; |
153 | |
154 | void |
155 | async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx, |
156 | struct async_submit_ctl *submit) |
157 | { |
158 | struct dma_async_tx_descriptor *depend_tx = submit->depend_tx; |
159 | |
160 | tx->callback = submit->cb_fn; |
161 | tx->callback_param = submit->cb_param; |
162 | |
163 | if (depend_tx) { |
164 | enum submit_disposition s; |
165 | |
166 | /* sanity check the dependency chain: |
167 | * 1/ if ack is already set then we cannot be sure |
168 | * we are referring to the correct operation |
169 | * 2/ dependencies are 1:1 i.e. two transactions can |
170 | * not depend on the same parent |
171 | */ |
172 | BUG_ON(async_tx_test_ack(depend_tx) || txd_next(depend_tx) || |
173 | txd_parent(tx)); |
174 | |
175 | /* the lock prevents async_tx_run_dependencies from missing |
176 | * the setting of ->next when ->parent != NULL |
177 | */ |
178 | txd_lock(depend_tx); |
179 | if (txd_parent(depend_tx)) { |
180 | /* we have a parent so we can not submit directly |
181 | * if we are staying on the same channel: append |
182 | * else: channel switch |
183 | */ |
184 | if (depend_tx->chan == chan) { |
185 | txd_chain(depend_tx, tx); |
186 | s = ASYNC_TX_SUBMITTED; |
187 | } else |
188 | s = ASYNC_TX_CHANNEL_SWITCH; |
189 | } else { |
190 | /* we do not have a parent so we may be able to submit |
191 | * directly if we are staying on the same channel |
192 | */ |
193 | if (depend_tx->chan == chan) |
194 | s = ASYNC_TX_DIRECT_SUBMIT; |
195 | else |
196 | s = ASYNC_TX_CHANNEL_SWITCH; |
197 | } |
198 | txd_unlock(depend_tx); |
199 | |
200 | switch (s) { |
201 | case ASYNC_TX_SUBMITTED: |
202 | break; |
203 | case ASYNC_TX_CHANNEL_SWITCH: |
204 | async_tx_channel_switch(depend_tx, tx); |
205 | break; |
206 | case ASYNC_TX_DIRECT_SUBMIT: |
207 | txd_clear_parent(tx); |
208 | tx->tx_submit(tx); |
209 | break; |
210 | } |
211 | } else { |
212 | txd_clear_parent(tx); |
213 | tx->tx_submit(tx); |
214 | } |
215 | |
216 | if (submit->flags & ASYNC_TX_ACK) |
217 | async_tx_ack(tx); |
218 | |
219 | if (depend_tx) |
220 | async_tx_ack(depend_tx); |
221 | } |
222 | EXPORT_SYMBOL_GPL(async_tx_submit); |
223 | |
224 | /** |
225 | * async_trigger_callback - schedules the callback function to be run |
226 | * @submit: submission and completion parameters |
227 | * |
228 | * honored flags: ASYNC_TX_ACK |
229 | * |
230 | * The callback is run after any dependent operations have completed. |
231 | */ |
232 | struct dma_async_tx_descriptor * |
233 | async_trigger_callback(struct async_submit_ctl *submit) |
234 | { |
235 | struct dma_chan *chan; |
236 | struct dma_device *device; |
237 | struct dma_async_tx_descriptor *tx; |
238 | struct dma_async_tx_descriptor *depend_tx = submit->depend_tx; |
239 | |
240 | if (depend_tx) { |
241 | chan = depend_tx->chan; |
242 | device = chan->device; |
243 | |
244 | /* see if we can schedule an interrupt |
245 | * otherwise poll for completion |
246 | */ |
247 | if (device && !dma_has_cap(DMA_INTERRUPT, device->cap_mask)) |
248 | device = NULL; |
249 | |
250 | tx = device ? device->device_prep_dma_interrupt(chan, 0) : NULL; |
251 | } else |
252 | tx = NULL; |
253 | |
254 | if (tx) { |
255 | pr_debug("%s: (async)\n", __func__); |
256 | |
257 | async_tx_submit(chan, tx, submit); |
258 | } else { |
259 | pr_debug("%s: (sync)\n", __func__); |
260 | |
261 | /* wait for any prerequisite operations */ |
262 | async_tx_quiesce(&submit->depend_tx); |
263 | |
264 | async_tx_sync_epilog(submit); |
265 | } |
266 | |
267 | return tx; |
268 | } |
269 | EXPORT_SYMBOL_GPL(async_trigger_callback); |
270 | |
271 | /** |
272 | * async_tx_quiesce - ensure tx is complete and freeable upon return |
273 | * @tx - transaction to quiesce |
274 | */ |
275 | void async_tx_quiesce(struct dma_async_tx_descriptor **tx) |
276 | { |
277 | if (*tx) { |
278 | /* if ack is already set then we cannot be sure |
279 | * we are referring to the correct operation |
280 | */ |
281 | BUG_ON(async_tx_test_ack(*tx)); |
282 | if (dma_wait_for_async_tx(*tx) == DMA_ERROR) |
283 | panic("DMA_ERROR waiting for transaction\n"); |
284 | async_tx_ack(*tx); |
285 | *tx = NULL; |
286 | } |
287 | } |
288 | EXPORT_SYMBOL_GPL(async_tx_quiesce); |
289 | |
290 | MODULE_AUTHOR("Intel Corporation"); |
291 | MODULE_DESCRIPTION("Asynchronous Bulk Memory Transactions API"); |
292 | MODULE_LICENSE("GPL"); |
293 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9