Root/
1 | /* |
2 | * Copyright © 2006-2009, Intel Corporation. |
3 | * |
4 | * This program is free software; you can redistribute it and/or modify it |
5 | * under the terms and conditions of the GNU General Public License, |
6 | * version 2, as published by the Free Software Foundation. |
7 | * |
8 | * This program is distributed in the hope it will be useful, but WITHOUT |
9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
11 | * more details. |
12 | * |
13 | * You should have received a copy of the GNU General Public License along with |
14 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple |
15 | * Place - Suite 330, Boston, MA 02111-1307 USA. |
16 | * |
17 | * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> |
18 | */ |
19 | |
20 | #include <linux/iova.h> |
21 | |
22 | void |
23 | init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit) |
24 | { |
25 | spin_lock_init(&iovad->iova_rbtree_lock); |
26 | iovad->rbroot = RB_ROOT; |
27 | iovad->cached32_node = NULL; |
28 | iovad->dma_32bit_pfn = pfn_32bit; |
29 | } |
30 | |
31 | static struct rb_node * |
32 | __get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn) |
33 | { |
34 | if ((*limit_pfn != iovad->dma_32bit_pfn) || |
35 | (iovad->cached32_node == NULL)) |
36 | return rb_last(&iovad->rbroot); |
37 | else { |
38 | struct rb_node *prev_node = rb_prev(iovad->cached32_node); |
39 | struct iova *curr_iova = |
40 | container_of(iovad->cached32_node, struct iova, node); |
41 | *limit_pfn = curr_iova->pfn_lo - 1; |
42 | return prev_node; |
43 | } |
44 | } |
45 | |
46 | static void |
47 | __cached_rbnode_insert_update(struct iova_domain *iovad, |
48 | unsigned long limit_pfn, struct iova *new) |
49 | { |
50 | if (limit_pfn != iovad->dma_32bit_pfn) |
51 | return; |
52 | iovad->cached32_node = &new->node; |
53 | } |
54 | |
55 | static void |
56 | __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free) |
57 | { |
58 | struct iova *cached_iova; |
59 | struct rb_node *curr; |
60 | |
61 | if (!iovad->cached32_node) |
62 | return; |
63 | curr = iovad->cached32_node; |
64 | cached_iova = container_of(curr, struct iova, node); |
65 | |
66 | if (free->pfn_lo >= cached_iova->pfn_lo) |
67 | iovad->cached32_node = rb_next(&free->node); |
68 | } |
69 | |
70 | /* Computes the padding size required, to make the |
71 | * the start address naturally aligned on its size |
72 | */ |
73 | static int |
74 | iova_get_pad_size(int size, unsigned int limit_pfn) |
75 | { |
76 | unsigned int pad_size = 0; |
77 | unsigned int order = ilog2(size); |
78 | |
79 | if (order) |
80 | pad_size = (limit_pfn + 1) % (1 << order); |
81 | |
82 | return pad_size; |
83 | } |
84 | |
85 | static int __alloc_and_insert_iova_range(struct iova_domain *iovad, |
86 | unsigned long size, unsigned long limit_pfn, |
87 | struct iova *new, bool size_aligned) |
88 | { |
89 | struct rb_node *prev, *curr = NULL; |
90 | unsigned long flags; |
91 | unsigned long saved_pfn; |
92 | unsigned int pad_size = 0; |
93 | |
94 | /* Walk the tree backwards */ |
95 | spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); |
96 | saved_pfn = limit_pfn; |
97 | curr = __get_cached_rbnode(iovad, &limit_pfn); |
98 | prev = curr; |
99 | while (curr) { |
100 | struct iova *curr_iova = container_of(curr, struct iova, node); |
101 | |
102 | if (limit_pfn < curr_iova->pfn_lo) |
103 | goto move_left; |
104 | else if (limit_pfn < curr_iova->pfn_hi) |
105 | goto adjust_limit_pfn; |
106 | else { |
107 | if (size_aligned) |
108 | pad_size = iova_get_pad_size(size, limit_pfn); |
109 | if ((curr_iova->pfn_hi + size + pad_size) <= limit_pfn) |
110 | break; /* found a free slot */ |
111 | } |
112 | adjust_limit_pfn: |
113 | limit_pfn = curr_iova->pfn_lo - 1; |
114 | move_left: |
115 | prev = curr; |
116 | curr = rb_prev(curr); |
117 | } |
118 | |
119 | if (!curr) { |
120 | if (size_aligned) |
121 | pad_size = iova_get_pad_size(size, limit_pfn); |
122 | if ((IOVA_START_PFN + size + pad_size) > limit_pfn) { |
123 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); |
124 | return -ENOMEM; |
125 | } |
126 | } |
127 | |
128 | /* pfn_lo will point to size aligned address if size_aligned is set */ |
129 | new->pfn_lo = limit_pfn - (size + pad_size) + 1; |
130 | new->pfn_hi = new->pfn_lo + size - 1; |
131 | |
132 | /* Insert the new_iova into domain rbtree by holding writer lock */ |
133 | /* Add new node and rebalance tree. */ |
134 | { |
135 | struct rb_node **entry, *parent = NULL; |
136 | |
137 | /* If we have 'prev', it's a valid place to start the |
138 | insertion. Otherwise, start from the root. */ |
139 | if (prev) |
140 | entry = &prev; |
141 | else |
142 | entry = &iovad->rbroot.rb_node; |
143 | |
144 | /* Figure out where to put new node */ |
145 | while (*entry) { |
146 | struct iova *this = container_of(*entry, |
147 | struct iova, node); |
148 | parent = *entry; |
149 | |
150 | if (new->pfn_lo < this->pfn_lo) |
151 | entry = &((*entry)->rb_left); |
152 | else if (new->pfn_lo > this->pfn_lo) |
153 | entry = &((*entry)->rb_right); |
154 | else |
155 | BUG(); /* this should not happen */ |
156 | } |
157 | |
158 | /* Add new node and rebalance tree. */ |
159 | rb_link_node(&new->node, parent, entry); |
160 | rb_insert_color(&new->node, &iovad->rbroot); |
161 | } |
162 | __cached_rbnode_insert_update(iovad, saved_pfn, new); |
163 | |
164 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); |
165 | |
166 | |
167 | return 0; |
168 | } |
169 | |
170 | static void |
171 | iova_insert_rbtree(struct rb_root *root, struct iova *iova) |
172 | { |
173 | struct rb_node **new = &(root->rb_node), *parent = NULL; |
174 | /* Figure out where to put new node */ |
175 | while (*new) { |
176 | struct iova *this = container_of(*new, struct iova, node); |
177 | parent = *new; |
178 | |
179 | if (iova->pfn_lo < this->pfn_lo) |
180 | new = &((*new)->rb_left); |
181 | else if (iova->pfn_lo > this->pfn_lo) |
182 | new = &((*new)->rb_right); |
183 | else |
184 | BUG(); /* this should not happen */ |
185 | } |
186 | /* Add new node and rebalance tree. */ |
187 | rb_link_node(&iova->node, parent, new); |
188 | rb_insert_color(&iova->node, root); |
189 | } |
190 | |
191 | /** |
192 | * alloc_iova - allocates an iova |
193 | * @iovad - iova domain in question |
194 | * @size - size of page frames to allocate |
195 | * @limit_pfn - max limit address |
196 | * @size_aligned - set if size_aligned address range is required |
197 | * This function allocates an iova in the range limit_pfn to IOVA_START_PFN |
198 | * looking from limit_pfn instead from IOVA_START_PFN. If the size_aligned |
199 | * flag is set then the allocated address iova->pfn_lo will be naturally |
200 | * aligned on roundup_power_of_two(size). |
201 | */ |
202 | struct iova * |
203 | alloc_iova(struct iova_domain *iovad, unsigned long size, |
204 | unsigned long limit_pfn, |
205 | bool size_aligned) |
206 | { |
207 | struct iova *new_iova; |
208 | int ret; |
209 | |
210 | new_iova = alloc_iova_mem(); |
211 | if (!new_iova) |
212 | return NULL; |
213 | |
214 | /* If size aligned is set then round the size to |
215 | * to next power of two. |
216 | */ |
217 | if (size_aligned) |
218 | size = __roundup_pow_of_two(size); |
219 | |
220 | ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn, |
221 | new_iova, size_aligned); |
222 | |
223 | if (ret) { |
224 | free_iova_mem(new_iova); |
225 | return NULL; |
226 | } |
227 | |
228 | return new_iova; |
229 | } |
230 | |
231 | /** |
232 | * find_iova - find's an iova for a given pfn |
233 | * @iovad - iova domain in question. |
234 | * pfn - page frame number |
235 | * This function finds and returns an iova belonging to the |
236 | * given doamin which matches the given pfn. |
237 | */ |
238 | struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn) |
239 | { |
240 | unsigned long flags; |
241 | struct rb_node *node; |
242 | |
243 | /* Take the lock so that no other thread is manipulating the rbtree */ |
244 | spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); |
245 | node = iovad->rbroot.rb_node; |
246 | while (node) { |
247 | struct iova *iova = container_of(node, struct iova, node); |
248 | |
249 | /* If pfn falls within iova's range, return iova */ |
250 | if ((pfn >= iova->pfn_lo) && (pfn <= iova->pfn_hi)) { |
251 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); |
252 | /* We are not holding the lock while this iova |
253 | * is referenced by the caller as the same thread |
254 | * which called this function also calls __free_iova() |
255 | * and it is by desing that only one thread can possibly |
256 | * reference a particular iova and hence no conflict. |
257 | */ |
258 | return iova; |
259 | } |
260 | |
261 | if (pfn < iova->pfn_lo) |
262 | node = node->rb_left; |
263 | else if (pfn > iova->pfn_lo) |
264 | node = node->rb_right; |
265 | } |
266 | |
267 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); |
268 | return NULL; |
269 | } |
270 | |
271 | /** |
272 | * __free_iova - frees the given iova |
273 | * @iovad: iova domain in question. |
274 | * @iova: iova in question. |
275 | * Frees the given iova belonging to the giving domain |
276 | */ |
277 | void |
278 | __free_iova(struct iova_domain *iovad, struct iova *iova) |
279 | { |
280 | unsigned long flags; |
281 | |
282 | spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); |
283 | __cached_rbnode_delete_update(iovad, iova); |
284 | rb_erase(&iova->node, &iovad->rbroot); |
285 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); |
286 | free_iova_mem(iova); |
287 | } |
288 | |
289 | /** |
290 | * free_iova - finds and frees the iova for a given pfn |
291 | * @iovad: - iova domain in question. |
292 | * @pfn: - pfn that is allocated previously |
293 | * This functions finds an iova for a given pfn and then |
294 | * frees the iova from that domain. |
295 | */ |
296 | void |
297 | free_iova(struct iova_domain *iovad, unsigned long pfn) |
298 | { |
299 | struct iova *iova = find_iova(iovad, pfn); |
300 | if (iova) |
301 | __free_iova(iovad, iova); |
302 | |
303 | } |
304 | |
305 | /** |
306 | * put_iova_domain - destroys the iova doamin |
307 | * @iovad: - iova domain in question. |
308 | * All the iova's in that domain are destroyed. |
309 | */ |
310 | void put_iova_domain(struct iova_domain *iovad) |
311 | { |
312 | struct rb_node *node; |
313 | unsigned long flags; |
314 | |
315 | spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); |
316 | node = rb_first(&iovad->rbroot); |
317 | while (node) { |
318 | struct iova *iova = container_of(node, struct iova, node); |
319 | rb_erase(node, &iovad->rbroot); |
320 | free_iova_mem(iova); |
321 | node = rb_first(&iovad->rbroot); |
322 | } |
323 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); |
324 | } |
325 | |
326 | static int |
327 | __is_range_overlap(struct rb_node *node, |
328 | unsigned long pfn_lo, unsigned long pfn_hi) |
329 | { |
330 | struct iova *iova = container_of(node, struct iova, node); |
331 | |
332 | if ((pfn_lo <= iova->pfn_hi) && (pfn_hi >= iova->pfn_lo)) |
333 | return 1; |
334 | return 0; |
335 | } |
336 | |
337 | static struct iova * |
338 | __insert_new_range(struct iova_domain *iovad, |
339 | unsigned long pfn_lo, unsigned long pfn_hi) |
340 | { |
341 | struct iova *iova; |
342 | |
343 | iova = alloc_iova_mem(); |
344 | if (!iova) |
345 | return iova; |
346 | |
347 | iova->pfn_hi = pfn_hi; |
348 | iova->pfn_lo = pfn_lo; |
349 | iova_insert_rbtree(&iovad->rbroot, iova); |
350 | return iova; |
351 | } |
352 | |
353 | static void |
354 | __adjust_overlap_range(struct iova *iova, |
355 | unsigned long *pfn_lo, unsigned long *pfn_hi) |
356 | { |
357 | if (*pfn_lo < iova->pfn_lo) |
358 | iova->pfn_lo = *pfn_lo; |
359 | if (*pfn_hi > iova->pfn_hi) |
360 | *pfn_lo = iova->pfn_hi + 1; |
361 | } |
362 | |
363 | /** |
364 | * reserve_iova - reserves an iova in the given range |
365 | * @iovad: - iova domain pointer |
366 | * @pfn_lo: - lower page frame address |
367 | * @pfn_hi:- higher pfn adderss |
368 | * This function allocates reserves the address range from pfn_lo to pfn_hi so |
369 | * that this address is not dished out as part of alloc_iova. |
370 | */ |
371 | struct iova * |
372 | reserve_iova(struct iova_domain *iovad, |
373 | unsigned long pfn_lo, unsigned long pfn_hi) |
374 | { |
375 | struct rb_node *node; |
376 | unsigned long flags; |
377 | struct iova *iova; |
378 | unsigned int overlap = 0; |
379 | |
380 | spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); |
381 | for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) { |
382 | if (__is_range_overlap(node, pfn_lo, pfn_hi)) { |
383 | iova = container_of(node, struct iova, node); |
384 | __adjust_overlap_range(iova, &pfn_lo, &pfn_hi); |
385 | if ((pfn_lo >= iova->pfn_lo) && |
386 | (pfn_hi <= iova->pfn_hi)) |
387 | goto finish; |
388 | overlap = 1; |
389 | |
390 | } else if (overlap) |
391 | break; |
392 | } |
393 | |
394 | /* We are here either becasue this is the first reserver node |
395 | * or need to insert remaining non overlap addr range |
396 | */ |
397 | iova = __insert_new_range(iovad, pfn_lo, pfn_hi); |
398 | finish: |
399 | |
400 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); |
401 | return iova; |
402 | } |
403 | |
404 | /** |
405 | * copy_reserved_iova - copies the reserved between domains |
406 | * @from: - source doamin from where to copy |
407 | * @to: - destination domin where to copy |
408 | * This function copies reserved iova's from one doamin to |
409 | * other. |
410 | */ |
411 | void |
412 | copy_reserved_iova(struct iova_domain *from, struct iova_domain *to) |
413 | { |
414 | unsigned long flags; |
415 | struct rb_node *node; |
416 | |
417 | spin_lock_irqsave(&from->iova_rbtree_lock, flags); |
418 | for (node = rb_first(&from->rbroot); node; node = rb_next(node)) { |
419 | struct iova *iova = container_of(node, struct iova, node); |
420 | struct iova *new_iova; |
421 | new_iova = reserve_iova(to, iova->pfn_lo, iova->pfn_hi); |
422 | if (!new_iova) |
423 | printk(KERN_ERR "Reserve iova range %lx@%lx failed\n", |
424 | iova->pfn_lo, iova->pfn_lo); |
425 | } |
426 | spin_unlock_irqrestore(&from->iova_rbtree_lock, flags); |
427 | } |
428 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9