Root/
1 | /* |
2 | * Copyright (c) 2006 Oracle. All rights reserved. |
3 | * |
4 | * This software is available to you under a choice of one of two |
5 | * licenses. You may choose to be licensed under the terms of the GNU |
6 | * General Public License (GPL) Version 2, available from the file |
7 | * COPYING in the main directory of this source tree, or the |
8 | * OpenIB.org BSD license below: |
9 | * |
10 | * Redistribution and use in source and binary forms, with or |
11 | * without modification, are permitted provided that the following |
12 | * conditions are met: |
13 | * |
14 | * - Redistributions of source code must retain the above |
15 | * copyright notice, this list of conditions and the following |
16 | * disclaimer. |
17 | * |
18 | * - Redistributions in binary form must reproduce the above |
19 | * copyright notice, this list of conditions and the following |
20 | * disclaimer in the documentation and/or other materials |
21 | * provided with the distribution. |
22 | * |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
30 | * SOFTWARE. |
31 | * |
32 | */ |
33 | #include <linux/kernel.h> |
34 | |
35 | #include "rds.h" |
36 | #include "rdma.h" |
37 | #include "ib.h" |
38 | |
39 | |
40 | /* |
41 | * This is stored as mr->r_trans_private. |
42 | */ |
43 | struct rds_ib_mr { |
44 | struct rds_ib_device *device; |
45 | struct rds_ib_mr_pool *pool; |
46 | struct ib_fmr *fmr; |
47 | struct list_head list; |
48 | unsigned int remap_count; |
49 | |
50 | struct scatterlist *sg; |
51 | unsigned int sg_len; |
52 | u64 *dma; |
53 | int sg_dma_len; |
54 | }; |
55 | |
56 | /* |
57 | * Our own little FMR pool |
58 | */ |
59 | struct rds_ib_mr_pool { |
60 | struct mutex flush_lock; /* serialize fmr invalidate */ |
61 | struct work_struct flush_worker; /* flush worker */ |
62 | |
63 | spinlock_t list_lock; /* protect variables below */ |
64 | atomic_t item_count; /* total # of MRs */ |
65 | atomic_t dirty_count; /* # dirty of MRs */ |
66 | struct list_head drop_list; /* MRs that have reached their max_maps limit */ |
67 | struct list_head free_list; /* unused MRs */ |
68 | struct list_head clean_list; /* unused & unamapped MRs */ |
69 | atomic_t free_pinned; /* memory pinned by free MRs */ |
70 | unsigned long max_items; |
71 | unsigned long max_items_soft; |
72 | unsigned long max_free_pinned; |
73 | struct ib_fmr_attr fmr_attr; |
74 | }; |
75 | |
76 | static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, int free_all); |
77 | static void rds_ib_teardown_mr(struct rds_ib_mr *ibmr); |
78 | static void rds_ib_mr_pool_flush_worker(struct work_struct *work); |
79 | |
80 | static struct rds_ib_device *rds_ib_get_device(__be32 ipaddr) |
81 | { |
82 | struct rds_ib_device *rds_ibdev; |
83 | struct rds_ib_ipaddr *i_ipaddr; |
84 | |
85 | list_for_each_entry(rds_ibdev, &rds_ib_devices, list) { |
86 | spin_lock_irq(&rds_ibdev->spinlock); |
87 | list_for_each_entry(i_ipaddr, &rds_ibdev->ipaddr_list, list) { |
88 | if (i_ipaddr->ipaddr == ipaddr) { |
89 | spin_unlock_irq(&rds_ibdev->spinlock); |
90 | return rds_ibdev; |
91 | } |
92 | } |
93 | spin_unlock_irq(&rds_ibdev->spinlock); |
94 | } |
95 | |
96 | return NULL; |
97 | } |
98 | |
99 | static int rds_ib_add_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr) |
100 | { |
101 | struct rds_ib_ipaddr *i_ipaddr; |
102 | |
103 | i_ipaddr = kmalloc(sizeof *i_ipaddr, GFP_KERNEL); |
104 | if (!i_ipaddr) |
105 | return -ENOMEM; |
106 | |
107 | i_ipaddr->ipaddr = ipaddr; |
108 | |
109 | spin_lock_irq(&rds_ibdev->spinlock); |
110 | list_add_tail(&i_ipaddr->list, &rds_ibdev->ipaddr_list); |
111 | spin_unlock_irq(&rds_ibdev->spinlock); |
112 | |
113 | return 0; |
114 | } |
115 | |
116 | static void rds_ib_remove_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr) |
117 | { |
118 | struct rds_ib_ipaddr *i_ipaddr, *next; |
119 | |
120 | spin_lock_irq(&rds_ibdev->spinlock); |
121 | list_for_each_entry_safe(i_ipaddr, next, &rds_ibdev->ipaddr_list, list) { |
122 | if (i_ipaddr->ipaddr == ipaddr) { |
123 | list_del(&i_ipaddr->list); |
124 | kfree(i_ipaddr); |
125 | break; |
126 | } |
127 | } |
128 | spin_unlock_irq(&rds_ibdev->spinlock); |
129 | } |
130 | |
131 | int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr) |
132 | { |
133 | struct rds_ib_device *rds_ibdev_old; |
134 | |
135 | rds_ibdev_old = rds_ib_get_device(ipaddr); |
136 | if (rds_ibdev_old) |
137 | rds_ib_remove_ipaddr(rds_ibdev_old, ipaddr); |
138 | |
139 | return rds_ib_add_ipaddr(rds_ibdev, ipaddr); |
140 | } |
141 | |
142 | void rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn) |
143 | { |
144 | struct rds_ib_connection *ic = conn->c_transport_data; |
145 | |
146 | /* conn was previously on the nodev_conns_list */ |
147 | spin_lock_irq(&ib_nodev_conns_lock); |
148 | BUG_ON(list_empty(&ib_nodev_conns)); |
149 | BUG_ON(list_empty(&ic->ib_node)); |
150 | list_del(&ic->ib_node); |
151 | |
152 | spin_lock_irq(&rds_ibdev->spinlock); |
153 | list_add_tail(&ic->ib_node, &rds_ibdev->conn_list); |
154 | spin_unlock_irq(&rds_ibdev->spinlock); |
155 | spin_unlock_irq(&ib_nodev_conns_lock); |
156 | |
157 | ic->rds_ibdev = rds_ibdev; |
158 | } |
159 | |
160 | void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn) |
161 | { |
162 | struct rds_ib_connection *ic = conn->c_transport_data; |
163 | |
164 | /* place conn on nodev_conns_list */ |
165 | spin_lock(&ib_nodev_conns_lock); |
166 | |
167 | spin_lock_irq(&rds_ibdev->spinlock); |
168 | BUG_ON(list_empty(&ic->ib_node)); |
169 | list_del(&ic->ib_node); |
170 | spin_unlock_irq(&rds_ibdev->spinlock); |
171 | |
172 | list_add_tail(&ic->ib_node, &ib_nodev_conns); |
173 | |
174 | spin_unlock(&ib_nodev_conns_lock); |
175 | |
176 | ic->rds_ibdev = NULL; |
177 | } |
178 | |
179 | void __rds_ib_destroy_conns(struct list_head *list, spinlock_t *list_lock) |
180 | { |
181 | struct rds_ib_connection *ic, *_ic; |
182 | LIST_HEAD(tmp_list); |
183 | |
184 | /* avoid calling conn_destroy with irqs off */ |
185 | spin_lock_irq(list_lock); |
186 | list_splice(list, &tmp_list); |
187 | INIT_LIST_HEAD(list); |
188 | spin_unlock_irq(list_lock); |
189 | |
190 | list_for_each_entry_safe(ic, _ic, &tmp_list, ib_node) { |
191 | if (ic->conn->c_passive) |
192 | rds_conn_destroy(ic->conn->c_passive); |
193 | rds_conn_destroy(ic->conn); |
194 | } |
195 | } |
196 | |
197 | struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_ibdev) |
198 | { |
199 | struct rds_ib_mr_pool *pool; |
200 | |
201 | pool = kzalloc(sizeof(*pool), GFP_KERNEL); |
202 | if (!pool) |
203 | return ERR_PTR(-ENOMEM); |
204 | |
205 | INIT_LIST_HEAD(&pool->free_list); |
206 | INIT_LIST_HEAD(&pool->drop_list); |
207 | INIT_LIST_HEAD(&pool->clean_list); |
208 | mutex_init(&pool->flush_lock); |
209 | spin_lock_init(&pool->list_lock); |
210 | INIT_WORK(&pool->flush_worker, rds_ib_mr_pool_flush_worker); |
211 | |
212 | pool->fmr_attr.max_pages = fmr_message_size; |
213 | pool->fmr_attr.max_maps = rds_ibdev->fmr_max_remaps; |
214 | pool->fmr_attr.page_shift = PAGE_SHIFT; |
215 | pool->max_free_pinned = rds_ibdev->max_fmrs * fmr_message_size / 4; |
216 | |
217 | /* We never allow more than max_items MRs to be allocated. |
218 | * When we exceed more than max_items_soft, we start freeing |
219 | * items more aggressively. |
220 | * Make sure that max_items > max_items_soft > max_items / 2 |
221 | */ |
222 | pool->max_items_soft = rds_ibdev->max_fmrs * 3 / 4; |
223 | pool->max_items = rds_ibdev->max_fmrs; |
224 | |
225 | return pool; |
226 | } |
227 | |
228 | void rds_ib_get_mr_info(struct rds_ib_device *rds_ibdev, struct rds_info_rdma_connection *iinfo) |
229 | { |
230 | struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool; |
231 | |
232 | iinfo->rdma_mr_max = pool->max_items; |
233 | iinfo->rdma_mr_size = pool->fmr_attr.max_pages; |
234 | } |
235 | |
236 | void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *pool) |
237 | { |
238 | flush_workqueue(rds_wq); |
239 | rds_ib_flush_mr_pool(pool, 1); |
240 | BUG_ON(atomic_read(&pool->item_count)); |
241 | BUG_ON(atomic_read(&pool->free_pinned)); |
242 | kfree(pool); |
243 | } |
244 | |
245 | static inline struct rds_ib_mr *rds_ib_reuse_fmr(struct rds_ib_mr_pool *pool) |
246 | { |
247 | struct rds_ib_mr *ibmr = NULL; |
248 | unsigned long flags; |
249 | |
250 | spin_lock_irqsave(&pool->list_lock, flags); |
251 | if (!list_empty(&pool->clean_list)) { |
252 | ibmr = list_entry(pool->clean_list.next, struct rds_ib_mr, list); |
253 | list_del_init(&ibmr->list); |
254 | } |
255 | spin_unlock_irqrestore(&pool->list_lock, flags); |
256 | |
257 | return ibmr; |
258 | } |
259 | |
260 | static struct rds_ib_mr *rds_ib_alloc_fmr(struct rds_ib_device *rds_ibdev) |
261 | { |
262 | struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool; |
263 | struct rds_ib_mr *ibmr = NULL; |
264 | int err = 0, iter = 0; |
265 | |
266 | while (1) { |
267 | ibmr = rds_ib_reuse_fmr(pool); |
268 | if (ibmr) |
269 | return ibmr; |
270 | |
271 | /* No clean MRs - now we have the choice of either |
272 | * allocating a fresh MR up to the limit imposed by the |
273 | * driver, or flush any dirty unused MRs. |
274 | * We try to avoid stalling in the send path if possible, |
275 | * so we allocate as long as we're allowed to. |
276 | * |
277 | * We're fussy with enforcing the FMR limit, though. If the driver |
278 | * tells us we can't use more than N fmrs, we shouldn't start |
279 | * arguing with it */ |
280 | if (atomic_inc_return(&pool->item_count) <= pool->max_items) |
281 | break; |
282 | |
283 | atomic_dec(&pool->item_count); |
284 | |
285 | if (++iter > 2) { |
286 | rds_ib_stats_inc(s_ib_rdma_mr_pool_depleted); |
287 | return ERR_PTR(-EAGAIN); |
288 | } |
289 | |
290 | /* We do have some empty MRs. Flush them out. */ |
291 | rds_ib_stats_inc(s_ib_rdma_mr_pool_wait); |
292 | rds_ib_flush_mr_pool(pool, 0); |
293 | } |
294 | |
295 | ibmr = kzalloc(sizeof(*ibmr), GFP_KERNEL); |
296 | if (!ibmr) { |
297 | err = -ENOMEM; |
298 | goto out_no_cigar; |
299 | } |
300 | |
301 | ibmr->fmr = ib_alloc_fmr(rds_ibdev->pd, |
302 | (IB_ACCESS_LOCAL_WRITE | |
303 | IB_ACCESS_REMOTE_READ | |
304 | IB_ACCESS_REMOTE_WRITE), |
305 | &pool->fmr_attr); |
306 | if (IS_ERR(ibmr->fmr)) { |
307 | err = PTR_ERR(ibmr->fmr); |
308 | ibmr->fmr = NULL; |
309 | printk(KERN_WARNING "RDS/IB: ib_alloc_fmr failed (err=%d)\n", err); |
310 | goto out_no_cigar; |
311 | } |
312 | |
313 | rds_ib_stats_inc(s_ib_rdma_mr_alloc); |
314 | return ibmr; |
315 | |
316 | out_no_cigar: |
317 | if (ibmr) { |
318 | if (ibmr->fmr) |
319 | ib_dealloc_fmr(ibmr->fmr); |
320 | kfree(ibmr); |
321 | } |
322 | atomic_dec(&pool->item_count); |
323 | return ERR_PTR(err); |
324 | } |
325 | |
326 | static int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev, struct rds_ib_mr *ibmr, |
327 | struct scatterlist *sg, unsigned int nents) |
328 | { |
329 | struct ib_device *dev = rds_ibdev->dev; |
330 | struct scatterlist *scat = sg; |
331 | u64 io_addr = 0; |
332 | u64 *dma_pages; |
333 | u32 len; |
334 | int page_cnt, sg_dma_len; |
335 | int i, j; |
336 | int ret; |
337 | |
338 | sg_dma_len = ib_dma_map_sg(dev, sg, nents, |
339 | DMA_BIDIRECTIONAL); |
340 | if (unlikely(!sg_dma_len)) { |
341 | printk(KERN_WARNING "RDS/IB: dma_map_sg failed!\n"); |
342 | return -EBUSY; |
343 | } |
344 | |
345 | len = 0; |
346 | page_cnt = 0; |
347 | |
348 | for (i = 0; i < sg_dma_len; ++i) { |
349 | unsigned int dma_len = ib_sg_dma_len(dev, &scat[i]); |
350 | u64 dma_addr = ib_sg_dma_address(dev, &scat[i]); |
351 | |
352 | if (dma_addr & ~PAGE_MASK) { |
353 | if (i > 0) |
354 | return -EINVAL; |
355 | else |
356 | ++page_cnt; |
357 | } |
358 | if ((dma_addr + dma_len) & ~PAGE_MASK) { |
359 | if (i < sg_dma_len - 1) |
360 | return -EINVAL; |
361 | else |
362 | ++page_cnt; |
363 | } |
364 | |
365 | len += dma_len; |
366 | } |
367 | |
368 | page_cnt += len >> PAGE_SHIFT; |
369 | if (page_cnt > fmr_message_size) |
370 | return -EINVAL; |
371 | |
372 | dma_pages = kmalloc(sizeof(u64) * page_cnt, GFP_ATOMIC); |
373 | if (!dma_pages) |
374 | return -ENOMEM; |
375 | |
376 | page_cnt = 0; |
377 | for (i = 0; i < sg_dma_len; ++i) { |
378 | unsigned int dma_len = ib_sg_dma_len(dev, &scat[i]); |
379 | u64 dma_addr = ib_sg_dma_address(dev, &scat[i]); |
380 | |
381 | for (j = 0; j < dma_len; j += PAGE_SIZE) |
382 | dma_pages[page_cnt++] = |
383 | (dma_addr & PAGE_MASK) + j; |
384 | } |
385 | |
386 | ret = ib_map_phys_fmr(ibmr->fmr, |
387 | dma_pages, page_cnt, io_addr); |
388 | if (ret) |
389 | goto out; |
390 | |
391 | /* Success - we successfully remapped the MR, so we can |
392 | * safely tear down the old mapping. */ |
393 | rds_ib_teardown_mr(ibmr); |
394 | |
395 | ibmr->sg = scat; |
396 | ibmr->sg_len = nents; |
397 | ibmr->sg_dma_len = sg_dma_len; |
398 | ibmr->remap_count++; |
399 | |
400 | rds_ib_stats_inc(s_ib_rdma_mr_used); |
401 | ret = 0; |
402 | |
403 | out: |
404 | kfree(dma_pages); |
405 | |
406 | return ret; |
407 | } |
408 | |
409 | void rds_ib_sync_mr(void *trans_private, int direction) |
410 | { |
411 | struct rds_ib_mr *ibmr = trans_private; |
412 | struct rds_ib_device *rds_ibdev = ibmr->device; |
413 | |
414 | switch (direction) { |
415 | case DMA_FROM_DEVICE: |
416 | ib_dma_sync_sg_for_cpu(rds_ibdev->dev, ibmr->sg, |
417 | ibmr->sg_dma_len, DMA_BIDIRECTIONAL); |
418 | break; |
419 | case DMA_TO_DEVICE: |
420 | ib_dma_sync_sg_for_device(rds_ibdev->dev, ibmr->sg, |
421 | ibmr->sg_dma_len, DMA_BIDIRECTIONAL); |
422 | break; |
423 | } |
424 | } |
425 | |
426 | static void __rds_ib_teardown_mr(struct rds_ib_mr *ibmr) |
427 | { |
428 | struct rds_ib_device *rds_ibdev = ibmr->device; |
429 | |
430 | if (ibmr->sg_dma_len) { |
431 | ib_dma_unmap_sg(rds_ibdev->dev, |
432 | ibmr->sg, ibmr->sg_len, |
433 | DMA_BIDIRECTIONAL); |
434 | ibmr->sg_dma_len = 0; |
435 | } |
436 | |
437 | /* Release the s/g list */ |
438 | if (ibmr->sg_len) { |
439 | unsigned int i; |
440 | |
441 | for (i = 0; i < ibmr->sg_len; ++i) { |
442 | struct page *page = sg_page(&ibmr->sg[i]); |
443 | |
444 | /* FIXME we need a way to tell a r/w MR |
445 | * from a r/o MR */ |
446 | set_page_dirty(page); |
447 | put_page(page); |
448 | } |
449 | kfree(ibmr->sg); |
450 | |
451 | ibmr->sg = NULL; |
452 | ibmr->sg_len = 0; |
453 | } |
454 | } |
455 | |
456 | static void rds_ib_teardown_mr(struct rds_ib_mr *ibmr) |
457 | { |
458 | unsigned int pinned = ibmr->sg_len; |
459 | |
460 | __rds_ib_teardown_mr(ibmr); |
461 | if (pinned) { |
462 | struct rds_ib_device *rds_ibdev = ibmr->device; |
463 | struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool; |
464 | |
465 | atomic_sub(pinned, &pool->free_pinned); |
466 | } |
467 | } |
468 | |
469 | static inline unsigned int rds_ib_flush_goal(struct rds_ib_mr_pool *pool, int free_all) |
470 | { |
471 | unsigned int item_count; |
472 | |
473 | item_count = atomic_read(&pool->item_count); |
474 | if (free_all) |
475 | return item_count; |
476 | |
477 | return 0; |
478 | } |
479 | |
480 | /* |
481 | * Flush our pool of MRs. |
482 | * At a minimum, all currently unused MRs are unmapped. |
483 | * If the number of MRs allocated exceeds the limit, we also try |
484 | * to free as many MRs as needed to get back to this limit. |
485 | */ |
486 | static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, int free_all) |
487 | { |
488 | struct rds_ib_mr *ibmr, *next; |
489 | LIST_HEAD(unmap_list); |
490 | LIST_HEAD(fmr_list); |
491 | unsigned long unpinned = 0; |
492 | unsigned long flags; |
493 | unsigned int nfreed = 0, ncleaned = 0, free_goal; |
494 | int ret = 0; |
495 | |
496 | rds_ib_stats_inc(s_ib_rdma_mr_pool_flush); |
497 | |
498 | mutex_lock(&pool->flush_lock); |
499 | |
500 | spin_lock_irqsave(&pool->list_lock, flags); |
501 | /* Get the list of all MRs to be dropped. Ordering matters - |
502 | * we want to put drop_list ahead of free_list. */ |
503 | list_splice_init(&pool->free_list, &unmap_list); |
504 | list_splice_init(&pool->drop_list, &unmap_list); |
505 | if (free_all) |
506 | list_splice_init(&pool->clean_list, &unmap_list); |
507 | spin_unlock_irqrestore(&pool->list_lock, flags); |
508 | |
509 | free_goal = rds_ib_flush_goal(pool, free_all); |
510 | |
511 | if (list_empty(&unmap_list)) |
512 | goto out; |
513 | |
514 | /* String all ib_mr's onto one list and hand them to ib_unmap_fmr */ |
515 | list_for_each_entry(ibmr, &unmap_list, list) |
516 | list_add(&ibmr->fmr->list, &fmr_list); |
517 | ret = ib_unmap_fmr(&fmr_list); |
518 | if (ret) |
519 | printk(KERN_WARNING "RDS/IB: ib_unmap_fmr failed (err=%d)\n", ret); |
520 | |
521 | /* Now we can destroy the DMA mapping and unpin any pages */ |
522 | list_for_each_entry_safe(ibmr, next, &unmap_list, list) { |
523 | unpinned += ibmr->sg_len; |
524 | __rds_ib_teardown_mr(ibmr); |
525 | if (nfreed < free_goal || ibmr->remap_count >= pool->fmr_attr.max_maps) { |
526 | rds_ib_stats_inc(s_ib_rdma_mr_free); |
527 | list_del(&ibmr->list); |
528 | ib_dealloc_fmr(ibmr->fmr); |
529 | kfree(ibmr); |
530 | nfreed++; |
531 | } |
532 | ncleaned++; |
533 | } |
534 | |
535 | spin_lock_irqsave(&pool->list_lock, flags); |
536 | list_splice(&unmap_list, &pool->clean_list); |
537 | spin_unlock_irqrestore(&pool->list_lock, flags); |
538 | |
539 | atomic_sub(unpinned, &pool->free_pinned); |
540 | atomic_sub(ncleaned, &pool->dirty_count); |
541 | atomic_sub(nfreed, &pool->item_count); |
542 | |
543 | out: |
544 | mutex_unlock(&pool->flush_lock); |
545 | return ret; |
546 | } |
547 | |
548 | static void rds_ib_mr_pool_flush_worker(struct work_struct *work) |
549 | { |
550 | struct rds_ib_mr_pool *pool = container_of(work, struct rds_ib_mr_pool, flush_worker); |
551 | |
552 | rds_ib_flush_mr_pool(pool, 0); |
553 | } |
554 | |
555 | void rds_ib_free_mr(void *trans_private, int invalidate) |
556 | { |
557 | struct rds_ib_mr *ibmr = trans_private; |
558 | struct rds_ib_device *rds_ibdev = ibmr->device; |
559 | struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool; |
560 | unsigned long flags; |
561 | |
562 | rdsdebug("RDS/IB: free_mr nents %u\n", ibmr->sg_len); |
563 | |
564 | /* Return it to the pool's free list */ |
565 | spin_lock_irqsave(&pool->list_lock, flags); |
566 | if (ibmr->remap_count >= pool->fmr_attr.max_maps) |
567 | list_add(&ibmr->list, &pool->drop_list); |
568 | else |
569 | list_add(&ibmr->list, &pool->free_list); |
570 | |
571 | atomic_add(ibmr->sg_len, &pool->free_pinned); |
572 | atomic_inc(&pool->dirty_count); |
573 | spin_unlock_irqrestore(&pool->list_lock, flags); |
574 | |
575 | /* If we've pinned too many pages, request a flush */ |
576 | if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned |
577 | || atomic_read(&pool->dirty_count) >= pool->max_items / 10) |
578 | queue_work(rds_wq, &pool->flush_worker); |
579 | |
580 | if (invalidate) { |
581 | if (likely(!in_interrupt())) { |
582 | rds_ib_flush_mr_pool(pool, 0); |
583 | } else { |
584 | /* We get here if the user created a MR marked |
585 | * as use_once and invalidate at the same time. */ |
586 | queue_work(rds_wq, &pool->flush_worker); |
587 | } |
588 | } |
589 | } |
590 | |
591 | void rds_ib_flush_mrs(void) |
592 | { |
593 | struct rds_ib_device *rds_ibdev; |
594 | |
595 | list_for_each_entry(rds_ibdev, &rds_ib_devices, list) { |
596 | struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool; |
597 | |
598 | if (pool) |
599 | rds_ib_flush_mr_pool(pool, 0); |
600 | } |
601 | } |
602 | |
603 | void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents, |
604 | struct rds_sock *rs, u32 *key_ret) |
605 | { |
606 | struct rds_ib_device *rds_ibdev; |
607 | struct rds_ib_mr *ibmr = NULL; |
608 | int ret; |
609 | |
610 | rds_ibdev = rds_ib_get_device(rs->rs_bound_addr); |
611 | if (!rds_ibdev) { |
612 | ret = -ENODEV; |
613 | goto out; |
614 | } |
615 | |
616 | if (!rds_ibdev->mr_pool) { |
617 | ret = -ENODEV; |
618 | goto out; |
619 | } |
620 | |
621 | ibmr = rds_ib_alloc_fmr(rds_ibdev); |
622 | if (IS_ERR(ibmr)) |
623 | return ibmr; |
624 | |
625 | ret = rds_ib_map_fmr(rds_ibdev, ibmr, sg, nents); |
626 | if (ret == 0) |
627 | *key_ret = ibmr->fmr->rkey; |
628 | else |
629 | printk(KERN_WARNING "RDS/IB: map_fmr failed (errno=%d)\n", ret); |
630 | |
631 | ibmr->device = rds_ibdev; |
632 | |
633 | out: |
634 | if (ret) { |
635 | if (ibmr) |
636 | rds_ib_free_mr(ibmr, 0); |
637 | ibmr = ERR_PTR(ret); |
638 | } |
639 | return ibmr; |
640 | } |
641 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9