Root/
1 | /* |
2 | * Copyright (c) 2006 Oracle. All rights reserved. |
3 | * |
4 | * This software is available to you under a choice of one of two |
5 | * licenses. You may choose to be licensed under the terms of the GNU |
6 | * General Public License (GPL) Version 2, available from the file |
7 | * COPYING in the main directory of this source tree, or the |
8 | * OpenIB.org BSD license below: |
9 | * |
10 | * Redistribution and use in source and binary forms, with or |
11 | * without modification, are permitted provided that the following |
12 | * conditions are met: |
13 | * |
14 | * - Redistributions of source code must retain the above |
15 | * copyright notice, this list of conditions and the following |
16 | * disclaimer. |
17 | * |
18 | * - Redistributions in binary form must reproduce the above |
19 | * copyright notice, this list of conditions and the following |
20 | * disclaimer in the documentation and/or other materials |
21 | * provided with the distribution. |
22 | * |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
30 | * SOFTWARE. |
31 | * |
32 | */ |
33 | #include <linux/kernel.h> |
34 | #include <linux/slab.h> |
35 | |
36 | #include "rds.h" |
37 | #include "rdma.h" |
38 | #include "iw.h" |
39 | |
40 | |
41 | /* |
42 | * This is stored as mr->r_trans_private. |
43 | */ |
44 | struct rds_iw_mr { |
45 | struct rds_iw_device *device; |
46 | struct rds_iw_mr_pool *pool; |
47 | struct rdma_cm_id *cm_id; |
48 | |
49 | struct ib_mr *mr; |
50 | struct ib_fast_reg_page_list *page_list; |
51 | |
52 | struct rds_iw_mapping mapping; |
53 | unsigned char remap_count; |
54 | }; |
55 | |
56 | /* |
57 | * Our own little MR pool |
58 | */ |
59 | struct rds_iw_mr_pool { |
60 | struct rds_iw_device *device; /* back ptr to the device that owns us */ |
61 | |
62 | struct mutex flush_lock; /* serialize fmr invalidate */ |
63 | struct work_struct flush_worker; /* flush worker */ |
64 | |
65 | spinlock_t list_lock; /* protect variables below */ |
66 | atomic_t item_count; /* total # of MRs */ |
67 | atomic_t dirty_count; /* # dirty of MRs */ |
68 | struct list_head dirty_list; /* dirty mappings */ |
69 | struct list_head clean_list; /* unused & unamapped MRs */ |
70 | atomic_t free_pinned; /* memory pinned by free MRs */ |
71 | unsigned long max_message_size; /* in pages */ |
72 | unsigned long max_items; |
73 | unsigned long max_items_soft; |
74 | unsigned long max_free_pinned; |
75 | int max_pages; |
76 | }; |
77 | |
78 | static int rds_iw_flush_mr_pool(struct rds_iw_mr_pool *pool, int free_all); |
79 | static void rds_iw_mr_pool_flush_worker(struct work_struct *work); |
80 | static int rds_iw_init_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr); |
81 | static int rds_iw_map_fastreg(struct rds_iw_mr_pool *pool, |
82 | struct rds_iw_mr *ibmr, |
83 | struct scatterlist *sg, unsigned int nents); |
84 | static void rds_iw_free_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr); |
85 | static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool, |
86 | struct list_head *unmap_list, |
87 | struct list_head *kill_list); |
88 | static void rds_iw_destroy_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr); |
89 | |
90 | static int rds_iw_get_device(struct rds_sock *rs, struct rds_iw_device **rds_iwdev, struct rdma_cm_id **cm_id) |
91 | { |
92 | struct rds_iw_device *iwdev; |
93 | struct rds_iw_cm_id *i_cm_id; |
94 | |
95 | *rds_iwdev = NULL; |
96 | *cm_id = NULL; |
97 | |
98 | list_for_each_entry(iwdev, &rds_iw_devices, list) { |
99 | spin_lock_irq(&iwdev->spinlock); |
100 | list_for_each_entry(i_cm_id, &iwdev->cm_id_list, list) { |
101 | struct sockaddr_in *src_addr, *dst_addr; |
102 | |
103 | src_addr = (struct sockaddr_in *)&i_cm_id->cm_id->route.addr.src_addr; |
104 | dst_addr = (struct sockaddr_in *)&i_cm_id->cm_id->route.addr.dst_addr; |
105 | |
106 | rdsdebug("local ipaddr = %x port %d, " |
107 | "remote ipaddr = %x port %d" |
108 | "..looking for %x port %d, " |
109 | "remote ipaddr = %x port %d\n", |
110 | src_addr->sin_addr.s_addr, |
111 | src_addr->sin_port, |
112 | dst_addr->sin_addr.s_addr, |
113 | dst_addr->sin_port, |
114 | rs->rs_bound_addr, |
115 | rs->rs_bound_port, |
116 | rs->rs_conn_addr, |
117 | rs->rs_conn_port); |
118 | #ifdef WORKING_TUPLE_DETECTION |
119 | if (src_addr->sin_addr.s_addr == rs->rs_bound_addr && |
120 | src_addr->sin_port == rs->rs_bound_port && |
121 | dst_addr->sin_addr.s_addr == rs->rs_conn_addr && |
122 | dst_addr->sin_port == rs->rs_conn_port) { |
123 | #else |
124 | /* FIXME - needs to compare the local and remote |
125 | * ipaddr/port tuple, but the ipaddr is the only |
126 | * available infomation in the rds_sock (as the rest are |
127 | * zero'ed. It doesn't appear to be properly populated |
128 | * during connection setup... |
129 | */ |
130 | if (src_addr->sin_addr.s_addr == rs->rs_bound_addr) { |
131 | #endif |
132 | spin_unlock_irq(&iwdev->spinlock); |
133 | *rds_iwdev = iwdev; |
134 | *cm_id = i_cm_id->cm_id; |
135 | return 0; |
136 | } |
137 | } |
138 | spin_unlock_irq(&iwdev->spinlock); |
139 | } |
140 | |
141 | return 1; |
142 | } |
143 | |
144 | static int rds_iw_add_cm_id(struct rds_iw_device *rds_iwdev, struct rdma_cm_id *cm_id) |
145 | { |
146 | struct rds_iw_cm_id *i_cm_id; |
147 | |
148 | i_cm_id = kmalloc(sizeof *i_cm_id, GFP_KERNEL); |
149 | if (!i_cm_id) |
150 | return -ENOMEM; |
151 | |
152 | i_cm_id->cm_id = cm_id; |
153 | |
154 | spin_lock_irq(&rds_iwdev->spinlock); |
155 | list_add_tail(&i_cm_id->list, &rds_iwdev->cm_id_list); |
156 | spin_unlock_irq(&rds_iwdev->spinlock); |
157 | |
158 | return 0; |
159 | } |
160 | |
161 | void rds_iw_remove_cm_id(struct rds_iw_device *rds_iwdev, struct rdma_cm_id *cm_id) |
162 | { |
163 | struct rds_iw_cm_id *i_cm_id; |
164 | |
165 | spin_lock_irq(&rds_iwdev->spinlock); |
166 | list_for_each_entry(i_cm_id, &rds_iwdev->cm_id_list, list) { |
167 | if (i_cm_id->cm_id == cm_id) { |
168 | list_del(&i_cm_id->list); |
169 | kfree(i_cm_id); |
170 | break; |
171 | } |
172 | } |
173 | spin_unlock_irq(&rds_iwdev->spinlock); |
174 | } |
175 | |
176 | |
177 | int rds_iw_update_cm_id(struct rds_iw_device *rds_iwdev, struct rdma_cm_id *cm_id) |
178 | { |
179 | struct sockaddr_in *src_addr, *dst_addr; |
180 | struct rds_iw_device *rds_iwdev_old; |
181 | struct rds_sock rs; |
182 | struct rdma_cm_id *pcm_id; |
183 | int rc; |
184 | |
185 | src_addr = (struct sockaddr_in *)&cm_id->route.addr.src_addr; |
186 | dst_addr = (struct sockaddr_in *)&cm_id->route.addr.dst_addr; |
187 | |
188 | rs.rs_bound_addr = src_addr->sin_addr.s_addr; |
189 | rs.rs_bound_port = src_addr->sin_port; |
190 | rs.rs_conn_addr = dst_addr->sin_addr.s_addr; |
191 | rs.rs_conn_port = dst_addr->sin_port; |
192 | |
193 | rc = rds_iw_get_device(&rs, &rds_iwdev_old, &pcm_id); |
194 | if (rc) |
195 | rds_iw_remove_cm_id(rds_iwdev, cm_id); |
196 | |
197 | return rds_iw_add_cm_id(rds_iwdev, cm_id); |
198 | } |
199 | |
200 | void rds_iw_add_conn(struct rds_iw_device *rds_iwdev, struct rds_connection *conn) |
201 | { |
202 | struct rds_iw_connection *ic = conn->c_transport_data; |
203 | |
204 | /* conn was previously on the nodev_conns_list */ |
205 | spin_lock_irq(&iw_nodev_conns_lock); |
206 | BUG_ON(list_empty(&iw_nodev_conns)); |
207 | BUG_ON(list_empty(&ic->iw_node)); |
208 | list_del(&ic->iw_node); |
209 | |
210 | spin_lock_irq(&rds_iwdev->spinlock); |
211 | list_add_tail(&ic->iw_node, &rds_iwdev->conn_list); |
212 | spin_unlock_irq(&rds_iwdev->spinlock); |
213 | spin_unlock_irq(&iw_nodev_conns_lock); |
214 | |
215 | ic->rds_iwdev = rds_iwdev; |
216 | } |
217 | |
218 | void rds_iw_remove_conn(struct rds_iw_device *rds_iwdev, struct rds_connection *conn) |
219 | { |
220 | struct rds_iw_connection *ic = conn->c_transport_data; |
221 | |
222 | /* place conn on nodev_conns_list */ |
223 | spin_lock(&iw_nodev_conns_lock); |
224 | |
225 | spin_lock_irq(&rds_iwdev->spinlock); |
226 | BUG_ON(list_empty(&ic->iw_node)); |
227 | list_del(&ic->iw_node); |
228 | spin_unlock_irq(&rds_iwdev->spinlock); |
229 | |
230 | list_add_tail(&ic->iw_node, &iw_nodev_conns); |
231 | |
232 | spin_unlock(&iw_nodev_conns_lock); |
233 | |
234 | rds_iw_remove_cm_id(ic->rds_iwdev, ic->i_cm_id); |
235 | ic->rds_iwdev = NULL; |
236 | } |
237 | |
238 | void __rds_iw_destroy_conns(struct list_head *list, spinlock_t *list_lock) |
239 | { |
240 | struct rds_iw_connection *ic, *_ic; |
241 | LIST_HEAD(tmp_list); |
242 | |
243 | /* avoid calling conn_destroy with irqs off */ |
244 | spin_lock_irq(list_lock); |
245 | list_splice(list, &tmp_list); |
246 | INIT_LIST_HEAD(list); |
247 | spin_unlock_irq(list_lock); |
248 | |
249 | list_for_each_entry_safe(ic, _ic, &tmp_list, iw_node) |
250 | rds_conn_destroy(ic->conn); |
251 | } |
252 | |
253 | static void rds_iw_set_scatterlist(struct rds_iw_scatterlist *sg, |
254 | struct scatterlist *list, unsigned int sg_len) |
255 | { |
256 | sg->list = list; |
257 | sg->len = sg_len; |
258 | sg->dma_len = 0; |
259 | sg->dma_npages = 0; |
260 | sg->bytes = 0; |
261 | } |
262 | |
263 | static u64 *rds_iw_map_scatterlist(struct rds_iw_device *rds_iwdev, |
264 | struct rds_iw_scatterlist *sg) |
265 | { |
266 | struct ib_device *dev = rds_iwdev->dev; |
267 | u64 *dma_pages = NULL; |
268 | int i, j, ret; |
269 | |
270 | WARN_ON(sg->dma_len); |
271 | |
272 | sg->dma_len = ib_dma_map_sg(dev, sg->list, sg->len, DMA_BIDIRECTIONAL); |
273 | if (unlikely(!sg->dma_len)) { |
274 | printk(KERN_WARNING "RDS/IW: dma_map_sg failed!\n"); |
275 | return ERR_PTR(-EBUSY); |
276 | } |
277 | |
278 | sg->bytes = 0; |
279 | sg->dma_npages = 0; |
280 | |
281 | ret = -EINVAL; |
282 | for (i = 0; i < sg->dma_len; ++i) { |
283 | unsigned int dma_len = ib_sg_dma_len(dev, &sg->list[i]); |
284 | u64 dma_addr = ib_sg_dma_address(dev, &sg->list[i]); |
285 | u64 end_addr; |
286 | |
287 | sg->bytes += dma_len; |
288 | |
289 | end_addr = dma_addr + dma_len; |
290 | if (dma_addr & PAGE_MASK) { |
291 | if (i > 0) |
292 | goto out_unmap; |
293 | dma_addr &= ~PAGE_MASK; |
294 | } |
295 | if (end_addr & PAGE_MASK) { |
296 | if (i < sg->dma_len - 1) |
297 | goto out_unmap; |
298 | end_addr = (end_addr + PAGE_MASK) & ~PAGE_MASK; |
299 | } |
300 | |
301 | sg->dma_npages += (end_addr - dma_addr) >> PAGE_SHIFT; |
302 | } |
303 | |
304 | /* Now gather the dma addrs into one list */ |
305 | if (sg->dma_npages > fastreg_message_size) |
306 | goto out_unmap; |
307 | |
308 | dma_pages = kmalloc(sizeof(u64) * sg->dma_npages, GFP_ATOMIC); |
309 | if (!dma_pages) { |
310 | ret = -ENOMEM; |
311 | goto out_unmap; |
312 | } |
313 | |
314 | for (i = j = 0; i < sg->dma_len; ++i) { |
315 | unsigned int dma_len = ib_sg_dma_len(dev, &sg->list[i]); |
316 | u64 dma_addr = ib_sg_dma_address(dev, &sg->list[i]); |
317 | u64 end_addr; |
318 | |
319 | end_addr = dma_addr + dma_len; |
320 | dma_addr &= ~PAGE_MASK; |
321 | for (; dma_addr < end_addr; dma_addr += PAGE_SIZE) |
322 | dma_pages[j++] = dma_addr; |
323 | BUG_ON(j > sg->dma_npages); |
324 | } |
325 | |
326 | return dma_pages; |
327 | |
328 | out_unmap: |
329 | ib_dma_unmap_sg(rds_iwdev->dev, sg->list, sg->len, DMA_BIDIRECTIONAL); |
330 | sg->dma_len = 0; |
331 | kfree(dma_pages); |
332 | return ERR_PTR(ret); |
333 | } |
334 | |
335 | |
336 | struct rds_iw_mr_pool *rds_iw_create_mr_pool(struct rds_iw_device *rds_iwdev) |
337 | { |
338 | struct rds_iw_mr_pool *pool; |
339 | |
340 | pool = kzalloc(sizeof(*pool), GFP_KERNEL); |
341 | if (!pool) { |
342 | printk(KERN_WARNING "RDS/IW: rds_iw_create_mr_pool alloc error\n"); |
343 | return ERR_PTR(-ENOMEM); |
344 | } |
345 | |
346 | pool->device = rds_iwdev; |
347 | INIT_LIST_HEAD(&pool->dirty_list); |
348 | INIT_LIST_HEAD(&pool->clean_list); |
349 | mutex_init(&pool->flush_lock); |
350 | spin_lock_init(&pool->list_lock); |
351 | INIT_WORK(&pool->flush_worker, rds_iw_mr_pool_flush_worker); |
352 | |
353 | pool->max_message_size = fastreg_message_size; |
354 | pool->max_items = fastreg_pool_size; |
355 | pool->max_free_pinned = pool->max_items * pool->max_message_size / 4; |
356 | pool->max_pages = fastreg_message_size; |
357 | |
358 | /* We never allow more than max_items MRs to be allocated. |
359 | * When we exceed more than max_items_soft, we start freeing |
360 | * items more aggressively. |
361 | * Make sure that max_items > max_items_soft > max_items / 2 |
362 | */ |
363 | pool->max_items_soft = pool->max_items * 3 / 4; |
364 | |
365 | return pool; |
366 | } |
367 | |
368 | void rds_iw_get_mr_info(struct rds_iw_device *rds_iwdev, struct rds_info_rdma_connection *iinfo) |
369 | { |
370 | struct rds_iw_mr_pool *pool = rds_iwdev->mr_pool; |
371 | |
372 | iinfo->rdma_mr_max = pool->max_items; |
373 | iinfo->rdma_mr_size = pool->max_pages; |
374 | } |
375 | |
376 | void rds_iw_destroy_mr_pool(struct rds_iw_mr_pool *pool) |
377 | { |
378 | flush_workqueue(rds_wq); |
379 | rds_iw_flush_mr_pool(pool, 1); |
380 | BUG_ON(atomic_read(&pool->item_count)); |
381 | BUG_ON(atomic_read(&pool->free_pinned)); |
382 | kfree(pool); |
383 | } |
384 | |
385 | static inline struct rds_iw_mr *rds_iw_reuse_fmr(struct rds_iw_mr_pool *pool) |
386 | { |
387 | struct rds_iw_mr *ibmr = NULL; |
388 | unsigned long flags; |
389 | |
390 | spin_lock_irqsave(&pool->list_lock, flags); |
391 | if (!list_empty(&pool->clean_list)) { |
392 | ibmr = list_entry(pool->clean_list.next, struct rds_iw_mr, mapping.m_list); |
393 | list_del_init(&ibmr->mapping.m_list); |
394 | } |
395 | spin_unlock_irqrestore(&pool->list_lock, flags); |
396 | |
397 | return ibmr; |
398 | } |
399 | |
400 | static struct rds_iw_mr *rds_iw_alloc_mr(struct rds_iw_device *rds_iwdev) |
401 | { |
402 | struct rds_iw_mr_pool *pool = rds_iwdev->mr_pool; |
403 | struct rds_iw_mr *ibmr = NULL; |
404 | int err = 0, iter = 0; |
405 | |
406 | while (1) { |
407 | ibmr = rds_iw_reuse_fmr(pool); |
408 | if (ibmr) |
409 | return ibmr; |
410 | |
411 | /* No clean MRs - now we have the choice of either |
412 | * allocating a fresh MR up to the limit imposed by the |
413 | * driver, or flush any dirty unused MRs. |
414 | * We try to avoid stalling in the send path if possible, |
415 | * so we allocate as long as we're allowed to. |
416 | * |
417 | * We're fussy with enforcing the FMR limit, though. If the driver |
418 | * tells us we can't use more than N fmrs, we shouldn't start |
419 | * arguing with it */ |
420 | if (atomic_inc_return(&pool->item_count) <= pool->max_items) |
421 | break; |
422 | |
423 | atomic_dec(&pool->item_count); |
424 | |
425 | if (++iter > 2) { |
426 | rds_iw_stats_inc(s_iw_rdma_mr_pool_depleted); |
427 | return ERR_PTR(-EAGAIN); |
428 | } |
429 | |
430 | /* We do have some empty MRs. Flush them out. */ |
431 | rds_iw_stats_inc(s_iw_rdma_mr_pool_wait); |
432 | rds_iw_flush_mr_pool(pool, 0); |
433 | } |
434 | |
435 | ibmr = kzalloc(sizeof(*ibmr), GFP_KERNEL); |
436 | if (!ibmr) { |
437 | err = -ENOMEM; |
438 | goto out_no_cigar; |
439 | } |
440 | |
441 | spin_lock_init(&ibmr->mapping.m_lock); |
442 | INIT_LIST_HEAD(&ibmr->mapping.m_list); |
443 | ibmr->mapping.m_mr = ibmr; |
444 | |
445 | err = rds_iw_init_fastreg(pool, ibmr); |
446 | if (err) |
447 | goto out_no_cigar; |
448 | |
449 | rds_iw_stats_inc(s_iw_rdma_mr_alloc); |
450 | return ibmr; |
451 | |
452 | out_no_cigar: |
453 | if (ibmr) { |
454 | rds_iw_destroy_fastreg(pool, ibmr); |
455 | kfree(ibmr); |
456 | } |
457 | atomic_dec(&pool->item_count); |
458 | return ERR_PTR(err); |
459 | } |
460 | |
461 | void rds_iw_sync_mr(void *trans_private, int direction) |
462 | { |
463 | struct rds_iw_mr *ibmr = trans_private; |
464 | struct rds_iw_device *rds_iwdev = ibmr->device; |
465 | |
466 | switch (direction) { |
467 | case DMA_FROM_DEVICE: |
468 | ib_dma_sync_sg_for_cpu(rds_iwdev->dev, ibmr->mapping.m_sg.list, |
469 | ibmr->mapping.m_sg.dma_len, DMA_BIDIRECTIONAL); |
470 | break; |
471 | case DMA_TO_DEVICE: |
472 | ib_dma_sync_sg_for_device(rds_iwdev->dev, ibmr->mapping.m_sg.list, |
473 | ibmr->mapping.m_sg.dma_len, DMA_BIDIRECTIONAL); |
474 | break; |
475 | } |
476 | } |
477 | |
478 | static inline unsigned int rds_iw_flush_goal(struct rds_iw_mr_pool *pool, int free_all) |
479 | { |
480 | unsigned int item_count; |
481 | |
482 | item_count = atomic_read(&pool->item_count); |
483 | if (free_all) |
484 | return item_count; |
485 | |
486 | return 0; |
487 | } |
488 | |
489 | /* |
490 | * Flush our pool of MRs. |
491 | * At a minimum, all currently unused MRs are unmapped. |
492 | * If the number of MRs allocated exceeds the limit, we also try |
493 | * to free as many MRs as needed to get back to this limit. |
494 | */ |
495 | static int rds_iw_flush_mr_pool(struct rds_iw_mr_pool *pool, int free_all) |
496 | { |
497 | struct rds_iw_mr *ibmr, *next; |
498 | LIST_HEAD(unmap_list); |
499 | LIST_HEAD(kill_list); |
500 | unsigned long flags; |
501 | unsigned int nfreed = 0, ncleaned = 0, free_goal; |
502 | int ret = 0; |
503 | |
504 | rds_iw_stats_inc(s_iw_rdma_mr_pool_flush); |
505 | |
506 | mutex_lock(&pool->flush_lock); |
507 | |
508 | spin_lock_irqsave(&pool->list_lock, flags); |
509 | /* Get the list of all mappings to be destroyed */ |
510 | list_splice_init(&pool->dirty_list, &unmap_list); |
511 | if (free_all) |
512 | list_splice_init(&pool->clean_list, &kill_list); |
513 | spin_unlock_irqrestore(&pool->list_lock, flags); |
514 | |
515 | free_goal = rds_iw_flush_goal(pool, free_all); |
516 | |
517 | /* Batched invalidate of dirty MRs. |
518 | * For FMR based MRs, the mappings on the unmap list are |
519 | * actually members of an ibmr (ibmr->mapping). They either |
520 | * migrate to the kill_list, or have been cleaned and should be |
521 | * moved to the clean_list. |
522 | * For fastregs, they will be dynamically allocated, and |
523 | * will be destroyed by the unmap function. |
524 | */ |
525 | if (!list_empty(&unmap_list)) { |
526 | ncleaned = rds_iw_unmap_fastreg_list(pool, &unmap_list, &kill_list); |
527 | /* If we've been asked to destroy all MRs, move those |
528 | * that were simply cleaned to the kill list */ |
529 | if (free_all) |
530 | list_splice_init(&unmap_list, &kill_list); |
531 | } |
532 | |
533 | /* Destroy any MRs that are past their best before date */ |
534 | list_for_each_entry_safe(ibmr, next, &kill_list, mapping.m_list) { |
535 | rds_iw_stats_inc(s_iw_rdma_mr_free); |
536 | list_del(&ibmr->mapping.m_list); |
537 | rds_iw_destroy_fastreg(pool, ibmr); |
538 | kfree(ibmr); |
539 | nfreed++; |
540 | } |
541 | |
542 | /* Anything that remains are laundered ibmrs, which we can add |
543 | * back to the clean list. */ |
544 | if (!list_empty(&unmap_list)) { |
545 | spin_lock_irqsave(&pool->list_lock, flags); |
546 | list_splice(&unmap_list, &pool->clean_list); |
547 | spin_unlock_irqrestore(&pool->list_lock, flags); |
548 | } |
549 | |
550 | atomic_sub(ncleaned, &pool->dirty_count); |
551 | atomic_sub(nfreed, &pool->item_count); |
552 | |
553 | mutex_unlock(&pool->flush_lock); |
554 | return ret; |
555 | } |
556 | |
557 | static void rds_iw_mr_pool_flush_worker(struct work_struct *work) |
558 | { |
559 | struct rds_iw_mr_pool *pool = container_of(work, struct rds_iw_mr_pool, flush_worker); |
560 | |
561 | rds_iw_flush_mr_pool(pool, 0); |
562 | } |
563 | |
564 | void rds_iw_free_mr(void *trans_private, int invalidate) |
565 | { |
566 | struct rds_iw_mr *ibmr = trans_private; |
567 | struct rds_iw_mr_pool *pool = ibmr->device->mr_pool; |
568 | |
569 | rdsdebug("RDS/IW: free_mr nents %u\n", ibmr->mapping.m_sg.len); |
570 | if (!pool) |
571 | return; |
572 | |
573 | /* Return it to the pool's free list */ |
574 | rds_iw_free_fastreg(pool, ibmr); |
575 | |
576 | /* If we've pinned too many pages, request a flush */ |
577 | if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned || |
578 | atomic_read(&pool->dirty_count) >= pool->max_items / 10) |
579 | queue_work(rds_wq, &pool->flush_worker); |
580 | |
581 | if (invalidate) { |
582 | if (likely(!in_interrupt())) { |
583 | rds_iw_flush_mr_pool(pool, 0); |
584 | } else { |
585 | /* We get here if the user created a MR marked |
586 | * as use_once and invalidate at the same time. */ |
587 | queue_work(rds_wq, &pool->flush_worker); |
588 | } |
589 | } |
590 | } |
591 | |
592 | void rds_iw_flush_mrs(void) |
593 | { |
594 | struct rds_iw_device *rds_iwdev; |
595 | |
596 | list_for_each_entry(rds_iwdev, &rds_iw_devices, list) { |
597 | struct rds_iw_mr_pool *pool = rds_iwdev->mr_pool; |
598 | |
599 | if (pool) |
600 | rds_iw_flush_mr_pool(pool, 0); |
601 | } |
602 | } |
603 | |
604 | void *rds_iw_get_mr(struct scatterlist *sg, unsigned long nents, |
605 | struct rds_sock *rs, u32 *key_ret) |
606 | { |
607 | struct rds_iw_device *rds_iwdev; |
608 | struct rds_iw_mr *ibmr = NULL; |
609 | struct rdma_cm_id *cm_id; |
610 | int ret; |
611 | |
612 | ret = rds_iw_get_device(rs, &rds_iwdev, &cm_id); |
613 | if (ret || !cm_id) { |
614 | ret = -ENODEV; |
615 | goto out; |
616 | } |
617 | |
618 | if (!rds_iwdev->mr_pool) { |
619 | ret = -ENODEV; |
620 | goto out; |
621 | } |
622 | |
623 | ibmr = rds_iw_alloc_mr(rds_iwdev); |
624 | if (IS_ERR(ibmr)) |
625 | return ibmr; |
626 | |
627 | ibmr->cm_id = cm_id; |
628 | ibmr->device = rds_iwdev; |
629 | |
630 | ret = rds_iw_map_fastreg(rds_iwdev->mr_pool, ibmr, sg, nents); |
631 | if (ret == 0) |
632 | *key_ret = ibmr->mr->rkey; |
633 | else |
634 | printk(KERN_WARNING "RDS/IW: failed to map mr (errno=%d)\n", ret); |
635 | |
636 | out: |
637 | if (ret) { |
638 | if (ibmr) |
639 | rds_iw_free_mr(ibmr, 0); |
640 | ibmr = ERR_PTR(ret); |
641 | } |
642 | return ibmr; |
643 | } |
644 | |
645 | /* |
646 | * iWARP fastreg handling |
647 | * |
648 | * The life cycle of a fastreg registration is a bit different from |
649 | * FMRs. |
650 | * The idea behind fastreg is to have one MR, to which we bind different |
651 | * mappings over time. To avoid stalling on the expensive map and invalidate |
652 | * operations, these operations are pipelined on the same send queue on |
653 | * which we want to send the message containing the r_key. |
654 | * |
655 | * This creates a bit of a problem for us, as we do not have the destination |
656 | * IP in GET_MR, so the connection must be setup prior to the GET_MR call for |
657 | * RDMA to be correctly setup. If a fastreg request is present, rds_iw_xmit |
658 | * will try to queue a LOCAL_INV (if needed) and a FAST_REG_MR work request |
659 | * before queuing the SEND. When completions for these arrive, they are |
660 | * dispatched to the MR has a bit set showing that RDMa can be performed. |
661 | * |
662 | * There is another interesting aspect that's related to invalidation. |
663 | * The application can request that a mapping is invalidated in FREE_MR. |
664 | * The expectation there is that this invalidation step includes ALL |
665 | * PREVIOUSLY FREED MRs. |
666 | */ |
667 | static int rds_iw_init_fastreg(struct rds_iw_mr_pool *pool, |
668 | struct rds_iw_mr *ibmr) |
669 | { |
670 | struct rds_iw_device *rds_iwdev = pool->device; |
671 | struct ib_fast_reg_page_list *page_list = NULL; |
672 | struct ib_mr *mr; |
673 | int err; |
674 | |
675 | mr = ib_alloc_fast_reg_mr(rds_iwdev->pd, pool->max_message_size); |
676 | if (IS_ERR(mr)) { |
677 | err = PTR_ERR(mr); |
678 | |
679 | printk(KERN_WARNING "RDS/IW: ib_alloc_fast_reg_mr failed (err=%d)\n", err); |
680 | return err; |
681 | } |
682 | |
683 | /* FIXME - this is overkill, but mapping->m_sg.dma_len/mapping->m_sg.dma_npages |
684 | * is not filled in. |
685 | */ |
686 | page_list = ib_alloc_fast_reg_page_list(rds_iwdev->dev, pool->max_message_size); |
687 | if (IS_ERR(page_list)) { |
688 | err = PTR_ERR(page_list); |
689 | |
690 | printk(KERN_WARNING "RDS/IW: ib_alloc_fast_reg_page_list failed (err=%d)\n", err); |
691 | ib_dereg_mr(mr); |
692 | return err; |
693 | } |
694 | |
695 | ibmr->page_list = page_list; |
696 | ibmr->mr = mr; |
697 | return 0; |
698 | } |
699 | |
700 | static int rds_iw_rdma_build_fastreg(struct rds_iw_mapping *mapping) |
701 | { |
702 | struct rds_iw_mr *ibmr = mapping->m_mr; |
703 | struct ib_send_wr f_wr, *failed_wr; |
704 | int ret; |
705 | |
706 | /* |
707 | * Perform a WR for the fast_reg_mr. Each individual page |
708 | * in the sg list is added to the fast reg page list and placed |
709 | * inside the fast_reg_mr WR. The key used is a rolling 8bit |
710 | * counter, which should guarantee uniqueness. |
711 | */ |
712 | ib_update_fast_reg_key(ibmr->mr, ibmr->remap_count++); |
713 | mapping->m_rkey = ibmr->mr->rkey; |
714 | |
715 | memset(&f_wr, 0, sizeof(f_wr)); |
716 | f_wr.wr_id = RDS_IW_FAST_REG_WR_ID; |
717 | f_wr.opcode = IB_WR_FAST_REG_MR; |
718 | f_wr.wr.fast_reg.length = mapping->m_sg.bytes; |
719 | f_wr.wr.fast_reg.rkey = mapping->m_rkey; |
720 | f_wr.wr.fast_reg.page_list = ibmr->page_list; |
721 | f_wr.wr.fast_reg.page_list_len = mapping->m_sg.dma_len; |
722 | f_wr.wr.fast_reg.page_shift = PAGE_SHIFT; |
723 | f_wr.wr.fast_reg.access_flags = IB_ACCESS_LOCAL_WRITE | |
724 | IB_ACCESS_REMOTE_READ | |
725 | IB_ACCESS_REMOTE_WRITE; |
726 | f_wr.wr.fast_reg.iova_start = 0; |
727 | f_wr.send_flags = IB_SEND_SIGNALED; |
728 | |
729 | failed_wr = &f_wr; |
730 | ret = ib_post_send(ibmr->cm_id->qp, &f_wr, &failed_wr); |
731 | BUG_ON(failed_wr != &f_wr); |
732 | if (ret && printk_ratelimit()) |
733 | printk(KERN_WARNING "RDS/IW: %s:%d ib_post_send returned %d\n", |
734 | __func__, __LINE__, ret); |
735 | return ret; |
736 | } |
737 | |
738 | static int rds_iw_rdma_fastreg_inv(struct rds_iw_mr *ibmr) |
739 | { |
740 | struct ib_send_wr s_wr, *failed_wr; |
741 | int ret = 0; |
742 | |
743 | if (!ibmr->cm_id->qp || !ibmr->mr) |
744 | goto out; |
745 | |
746 | memset(&s_wr, 0, sizeof(s_wr)); |
747 | s_wr.wr_id = RDS_IW_LOCAL_INV_WR_ID; |
748 | s_wr.opcode = IB_WR_LOCAL_INV; |
749 | s_wr.ex.invalidate_rkey = ibmr->mr->rkey; |
750 | s_wr.send_flags = IB_SEND_SIGNALED; |
751 | |
752 | failed_wr = &s_wr; |
753 | ret = ib_post_send(ibmr->cm_id->qp, &s_wr, &failed_wr); |
754 | if (ret && printk_ratelimit()) { |
755 | printk(KERN_WARNING "RDS/IW: %s:%d ib_post_send returned %d\n", |
756 | __func__, __LINE__, ret); |
757 | goto out; |
758 | } |
759 | out: |
760 | return ret; |
761 | } |
762 | |
763 | static int rds_iw_map_fastreg(struct rds_iw_mr_pool *pool, |
764 | struct rds_iw_mr *ibmr, |
765 | struct scatterlist *sg, |
766 | unsigned int sg_len) |
767 | { |
768 | struct rds_iw_device *rds_iwdev = pool->device; |
769 | struct rds_iw_mapping *mapping = &ibmr->mapping; |
770 | u64 *dma_pages; |
771 | int i, ret = 0; |
772 | |
773 | rds_iw_set_scatterlist(&mapping->m_sg, sg, sg_len); |
774 | |
775 | dma_pages = rds_iw_map_scatterlist(rds_iwdev, &mapping->m_sg); |
776 | if (IS_ERR(dma_pages)) { |
777 | ret = PTR_ERR(dma_pages); |
778 | dma_pages = NULL; |
779 | goto out; |
780 | } |
781 | |
782 | if (mapping->m_sg.dma_len > pool->max_message_size) { |
783 | ret = -EMSGSIZE; |
784 | goto out; |
785 | } |
786 | |
787 | for (i = 0; i < mapping->m_sg.dma_npages; ++i) |
788 | ibmr->page_list->page_list[i] = dma_pages[i]; |
789 | |
790 | ret = rds_iw_rdma_build_fastreg(mapping); |
791 | if (ret) |
792 | goto out; |
793 | |
794 | rds_iw_stats_inc(s_iw_rdma_mr_used); |
795 | |
796 | out: |
797 | kfree(dma_pages); |
798 | |
799 | return ret; |
800 | } |
801 | |
802 | /* |
803 | * "Free" a fastreg MR. |
804 | */ |
805 | static void rds_iw_free_fastreg(struct rds_iw_mr_pool *pool, |
806 | struct rds_iw_mr *ibmr) |
807 | { |
808 | unsigned long flags; |
809 | int ret; |
810 | |
811 | if (!ibmr->mapping.m_sg.dma_len) |
812 | return; |
813 | |
814 | ret = rds_iw_rdma_fastreg_inv(ibmr); |
815 | if (ret) |
816 | return; |
817 | |
818 | /* Try to post the LOCAL_INV WR to the queue. */ |
819 | spin_lock_irqsave(&pool->list_lock, flags); |
820 | |
821 | list_add_tail(&ibmr->mapping.m_list, &pool->dirty_list); |
822 | atomic_add(ibmr->mapping.m_sg.len, &pool->free_pinned); |
823 | atomic_inc(&pool->dirty_count); |
824 | |
825 | spin_unlock_irqrestore(&pool->list_lock, flags); |
826 | } |
827 | |
828 | static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool, |
829 | struct list_head *unmap_list, |
830 | struct list_head *kill_list) |
831 | { |
832 | struct rds_iw_mapping *mapping, *next; |
833 | unsigned int ncleaned = 0; |
834 | LIST_HEAD(laundered); |
835 | |
836 | /* Batched invalidation of fastreg MRs. |
837 | * Why do we do it this way, even though we could pipeline unmap |
838 | * and remap? The reason is the application semantics - when the |
839 | * application requests an invalidation of MRs, it expects all |
840 | * previously released R_Keys to become invalid. |
841 | * |
842 | * If we implement MR reuse naively, we risk memory corruption |
843 | * (this has actually been observed). So the default behavior |
844 | * requires that a MR goes through an explicit unmap operation before |
845 | * we can reuse it again. |
846 | * |
847 | * We could probably improve on this a little, by allowing immediate |
848 | * reuse of a MR on the same socket (eg you could add small |
849 | * cache of unused MRs to strct rds_socket - GET_MR could grab one |
850 | * of these without requiring an explicit invalidate). |
851 | */ |
852 | while (!list_empty(unmap_list)) { |
853 | unsigned long flags; |
854 | |
855 | spin_lock_irqsave(&pool->list_lock, flags); |
856 | list_for_each_entry_safe(mapping, next, unmap_list, m_list) { |
857 | list_move(&mapping->m_list, &laundered); |
858 | ncleaned++; |
859 | } |
860 | spin_unlock_irqrestore(&pool->list_lock, flags); |
861 | } |
862 | |
863 | /* Move all laundered mappings back to the unmap list. |
864 | * We do not kill any WRs right now - it doesn't seem the |
865 | * fastreg API has a max_remap limit. */ |
866 | list_splice_init(&laundered, unmap_list); |
867 | |
868 | return ncleaned; |
869 | } |
870 | |
871 | static void rds_iw_destroy_fastreg(struct rds_iw_mr_pool *pool, |
872 | struct rds_iw_mr *ibmr) |
873 | { |
874 | if (ibmr->page_list) |
875 | ib_free_fast_reg_page_list(ibmr->page_list); |
876 | if (ibmr->mr) |
877 | ib_dereg_mr(ibmr->mr); |
878 | } |
879 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9