Root/
1 | /* |
2 | * Copyright (c) 2006 Oracle. All rights reserved. |
3 | * |
4 | * This software is available to you under a choice of one of two |
5 | * licenses. You may choose to be licensed under the terms of the GNU |
6 | * General Public License (GPL) Version 2, available from the file |
7 | * COPYING in the main directory of this source tree, or the |
8 | * OpenIB.org BSD license below: |
9 | * |
10 | * Redistribution and use in source and binary forms, with or |
11 | * without modification, are permitted provided that the following |
12 | * conditions are met: |
13 | * |
14 | * - Redistributions of source code must retain the above |
15 | * copyright notice, this list of conditions and the following |
16 | * disclaimer. |
17 | * |
18 | * - Redistributions in binary form must reproduce the above |
19 | * copyright notice, this list of conditions and the following |
20 | * disclaimer in the documentation and/or other materials |
21 | * provided with the distribution. |
22 | * |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
30 | * SOFTWARE. |
31 | * |
32 | */ |
33 | #include <linux/kernel.h> |
34 | #include <linux/in.h> |
35 | #include <linux/device.h> |
36 | #include <linux/dmapool.h> |
37 | |
38 | #include "rds.h" |
39 | #include "rdma.h" |
40 | #include "ib.h" |
41 | |
42 | static void rds_ib_send_rdma_complete(struct rds_message *rm, |
43 | int wc_status) |
44 | { |
45 | int notify_status; |
46 | |
47 | switch (wc_status) { |
48 | case IB_WC_WR_FLUSH_ERR: |
49 | return; |
50 | |
51 | case IB_WC_SUCCESS: |
52 | notify_status = RDS_RDMA_SUCCESS; |
53 | break; |
54 | |
55 | case IB_WC_REM_ACCESS_ERR: |
56 | notify_status = RDS_RDMA_REMOTE_ERROR; |
57 | break; |
58 | |
59 | default: |
60 | notify_status = RDS_RDMA_OTHER_ERROR; |
61 | break; |
62 | } |
63 | rds_rdma_send_complete(rm, notify_status); |
64 | } |
65 | |
66 | static void rds_ib_send_unmap_rdma(struct rds_ib_connection *ic, |
67 | struct rds_rdma_op *op) |
68 | { |
69 | if (op->r_mapped) { |
70 | ib_dma_unmap_sg(ic->i_cm_id->device, |
71 | op->r_sg, op->r_nents, |
72 | op->r_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE); |
73 | op->r_mapped = 0; |
74 | } |
75 | } |
76 | |
77 | static void rds_ib_send_unmap_rm(struct rds_ib_connection *ic, |
78 | struct rds_ib_send_work *send, |
79 | int wc_status) |
80 | { |
81 | struct rds_message *rm = send->s_rm; |
82 | |
83 | rdsdebug("ic %p send %p rm %p\n", ic, send, rm); |
84 | |
85 | ib_dma_unmap_sg(ic->i_cm_id->device, |
86 | rm->m_sg, rm->m_nents, |
87 | DMA_TO_DEVICE); |
88 | |
89 | if (rm->m_rdma_op != NULL) { |
90 | rds_ib_send_unmap_rdma(ic, rm->m_rdma_op); |
91 | |
92 | /* If the user asked for a completion notification on this |
93 | * message, we can implement three different semantics: |
94 | * 1. Notify when we received the ACK on the RDS message |
95 | * that was queued with the RDMA. This provides reliable |
96 | * notification of RDMA status at the expense of a one-way |
97 | * packet delay. |
98 | * 2. Notify when the IB stack gives us the completion event for |
99 | * the RDMA operation. |
100 | * 3. Notify when the IB stack gives us the completion event for |
101 | * the accompanying RDS messages. |
102 | * Here, we implement approach #3. To implement approach #2, |
103 | * call rds_rdma_send_complete from the cq_handler. To implement #1, |
104 | * don't call rds_rdma_send_complete at all, and fall back to the notify |
105 | * handling in the ACK processing code. |
106 | * |
107 | * Note: There's no need to explicitly sync any RDMA buffers using |
108 | * ib_dma_sync_sg_for_cpu - the completion for the RDMA |
109 | * operation itself unmapped the RDMA buffers, which takes care |
110 | * of synching. |
111 | */ |
112 | rds_ib_send_rdma_complete(rm, wc_status); |
113 | |
114 | if (rm->m_rdma_op->r_write) |
115 | rds_stats_add(s_send_rdma_bytes, rm->m_rdma_op->r_bytes); |
116 | else |
117 | rds_stats_add(s_recv_rdma_bytes, rm->m_rdma_op->r_bytes); |
118 | } |
119 | |
120 | /* If anyone waited for this message to get flushed out, wake |
121 | * them up now */ |
122 | rds_message_unmapped(rm); |
123 | |
124 | rds_message_put(rm); |
125 | send->s_rm = NULL; |
126 | } |
127 | |
128 | void rds_ib_send_init_ring(struct rds_ib_connection *ic) |
129 | { |
130 | struct rds_ib_send_work *send; |
131 | u32 i; |
132 | |
133 | for (i = 0, send = ic->i_sends; i < ic->i_send_ring.w_nr; i++, send++) { |
134 | struct ib_sge *sge; |
135 | |
136 | send->s_rm = NULL; |
137 | send->s_op = NULL; |
138 | |
139 | send->s_wr.wr_id = i; |
140 | send->s_wr.sg_list = send->s_sge; |
141 | send->s_wr.num_sge = 1; |
142 | send->s_wr.opcode = IB_WR_SEND; |
143 | send->s_wr.send_flags = 0; |
144 | send->s_wr.ex.imm_data = 0; |
145 | |
146 | sge = rds_ib_data_sge(ic, send->s_sge); |
147 | sge->lkey = ic->i_mr->lkey; |
148 | |
149 | sge = rds_ib_header_sge(ic, send->s_sge); |
150 | sge->addr = ic->i_send_hdrs_dma + (i * sizeof(struct rds_header)); |
151 | sge->length = sizeof(struct rds_header); |
152 | sge->lkey = ic->i_mr->lkey; |
153 | } |
154 | } |
155 | |
156 | void rds_ib_send_clear_ring(struct rds_ib_connection *ic) |
157 | { |
158 | struct rds_ib_send_work *send; |
159 | u32 i; |
160 | |
161 | for (i = 0, send = ic->i_sends; i < ic->i_send_ring.w_nr; i++, send++) { |
162 | if (send->s_wr.opcode == 0xdead) |
163 | continue; |
164 | if (send->s_rm) |
165 | rds_ib_send_unmap_rm(ic, send, IB_WC_WR_FLUSH_ERR); |
166 | if (send->s_op) |
167 | rds_ib_send_unmap_rdma(ic, send->s_op); |
168 | } |
169 | } |
170 | |
171 | /* |
172 | * The _oldest/_free ring operations here race cleanly with the alloc/unalloc |
173 | * operations performed in the send path. As the sender allocs and potentially |
174 | * unallocs the next free entry in the ring it doesn't alter which is |
175 | * the next to be freed, which is what this is concerned with. |
176 | */ |
177 | void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context) |
178 | { |
179 | struct rds_connection *conn = context; |
180 | struct rds_ib_connection *ic = conn->c_transport_data; |
181 | struct ib_wc wc; |
182 | struct rds_ib_send_work *send; |
183 | u32 completed; |
184 | u32 oldest; |
185 | u32 i = 0; |
186 | int ret; |
187 | |
188 | rdsdebug("cq %p conn %p\n", cq, conn); |
189 | rds_ib_stats_inc(s_ib_tx_cq_call); |
190 | ret = ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); |
191 | if (ret) |
192 | rdsdebug("ib_req_notify_cq send failed: %d\n", ret); |
193 | |
194 | while (ib_poll_cq(cq, 1, &wc) > 0) { |
195 | rdsdebug("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n", |
196 | (unsigned long long)wc.wr_id, wc.status, wc.byte_len, |
197 | be32_to_cpu(wc.ex.imm_data)); |
198 | rds_ib_stats_inc(s_ib_tx_cq_event); |
199 | |
200 | if (wc.wr_id == RDS_IB_ACK_WR_ID) { |
201 | if (ic->i_ack_queued + HZ/2 < jiffies) |
202 | rds_ib_stats_inc(s_ib_tx_stalled); |
203 | rds_ib_ack_send_complete(ic); |
204 | continue; |
205 | } |
206 | |
207 | oldest = rds_ib_ring_oldest(&ic->i_send_ring); |
208 | |
209 | completed = rds_ib_ring_completed(&ic->i_send_ring, wc.wr_id, oldest); |
210 | |
211 | for (i = 0; i < completed; i++) { |
212 | send = &ic->i_sends[oldest]; |
213 | |
214 | /* In the error case, wc.opcode sometimes contains garbage */ |
215 | switch (send->s_wr.opcode) { |
216 | case IB_WR_SEND: |
217 | if (send->s_rm) |
218 | rds_ib_send_unmap_rm(ic, send, wc.status); |
219 | break; |
220 | case IB_WR_RDMA_WRITE: |
221 | case IB_WR_RDMA_READ: |
222 | /* Nothing to be done - the SG list will be unmapped |
223 | * when the SEND completes. */ |
224 | break; |
225 | default: |
226 | if (printk_ratelimit()) |
227 | printk(KERN_NOTICE |
228 | "RDS/IB: %s: unexpected opcode 0x%x in WR!\n", |
229 | __func__, send->s_wr.opcode); |
230 | break; |
231 | } |
232 | |
233 | send->s_wr.opcode = 0xdead; |
234 | send->s_wr.num_sge = 1; |
235 | if (send->s_queued + HZ/2 < jiffies) |
236 | rds_ib_stats_inc(s_ib_tx_stalled); |
237 | |
238 | /* If a RDMA operation produced an error, signal this right |
239 | * away. If we don't, the subsequent SEND that goes with this |
240 | * RDMA will be canceled with ERR_WFLUSH, and the application |
241 | * never learn that the RDMA failed. */ |
242 | if (unlikely(wc.status == IB_WC_REM_ACCESS_ERR && send->s_op)) { |
243 | struct rds_message *rm; |
244 | |
245 | rm = rds_send_get_message(conn, send->s_op); |
246 | if (rm) { |
247 | if (rm->m_rdma_op) |
248 | rds_ib_send_unmap_rdma(ic, rm->m_rdma_op); |
249 | rds_ib_send_rdma_complete(rm, wc.status); |
250 | rds_message_put(rm); |
251 | } |
252 | } |
253 | |
254 | oldest = (oldest + 1) % ic->i_send_ring.w_nr; |
255 | } |
256 | |
257 | rds_ib_ring_free(&ic->i_send_ring, completed); |
258 | |
259 | if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags) || |
260 | test_bit(0, &conn->c_map_queued)) |
261 | queue_delayed_work(rds_wq, &conn->c_send_w, 0); |
262 | |
263 | /* We expect errors as the qp is drained during shutdown */ |
264 | if (wc.status != IB_WC_SUCCESS && rds_conn_up(conn)) { |
265 | rds_ib_conn_error(conn, |
266 | "send completion on %pI4 " |
267 | "had status %u, disconnecting and reconnecting\n", |
268 | &conn->c_faddr, wc.status); |
269 | } |
270 | } |
271 | } |
272 | |
273 | /* |
274 | * This is the main function for allocating credits when sending |
275 | * messages. |
276 | * |
277 | * Conceptually, we have two counters: |
278 | * - send credits: this tells us how many WRs we're allowed |
279 | * to submit without overruning the reciever's queue. For |
280 | * each SEND WR we post, we decrement this by one. |
281 | * |
282 | * - posted credits: this tells us how many WRs we recently |
283 | * posted to the receive queue. This value is transferred |
284 | * to the peer as a "credit update" in a RDS header field. |
285 | * Every time we transmit credits to the peer, we subtract |
286 | * the amount of transferred credits from this counter. |
287 | * |
288 | * It is essential that we avoid situations where both sides have |
289 | * exhausted their send credits, and are unable to send new credits |
290 | * to the peer. We achieve this by requiring that we send at least |
291 | * one credit update to the peer before exhausting our credits. |
292 | * When new credits arrive, we subtract one credit that is withheld |
293 | * until we've posted new buffers and are ready to transmit these |
294 | * credits (see rds_ib_send_add_credits below). |
295 | * |
296 | * The RDS send code is essentially single-threaded; rds_send_xmit |
297 | * grabs c_send_lock to ensure exclusive access to the send ring. |
298 | * However, the ACK sending code is independent and can race with |
299 | * message SENDs. |
300 | * |
301 | * In the send path, we need to update the counters for send credits |
302 | * and the counter of posted buffers atomically - when we use the |
303 | * last available credit, we cannot allow another thread to race us |
304 | * and grab the posted credits counter. Hence, we have to use a |
305 | * spinlock to protect the credit counter, or use atomics. |
306 | * |
307 | * Spinlocks shared between the send and the receive path are bad, |
308 | * because they create unnecessary delays. An early implementation |
309 | * using a spinlock showed a 5% degradation in throughput at some |
310 | * loads. |
311 | * |
312 | * This implementation avoids spinlocks completely, putting both |
313 | * counters into a single atomic, and updating that atomic using |
314 | * atomic_add (in the receive path, when receiving fresh credits), |
315 | * and using atomic_cmpxchg when updating the two counters. |
316 | */ |
317 | int rds_ib_send_grab_credits(struct rds_ib_connection *ic, |
318 | u32 wanted, u32 *adv_credits, int need_posted, int max_posted) |
319 | { |
320 | unsigned int avail, posted, got = 0, advertise; |
321 | long oldval, newval; |
322 | |
323 | *adv_credits = 0; |
324 | if (!ic->i_flowctl) |
325 | return wanted; |
326 | |
327 | try_again: |
328 | advertise = 0; |
329 | oldval = newval = atomic_read(&ic->i_credits); |
330 | posted = IB_GET_POST_CREDITS(oldval); |
331 | avail = IB_GET_SEND_CREDITS(oldval); |
332 | |
333 | rdsdebug("rds_ib_send_grab_credits(%u): credits=%u posted=%u\n", |
334 | wanted, avail, posted); |
335 | |
336 | /* The last credit must be used to send a credit update. */ |
337 | if (avail && !posted) |
338 | avail--; |
339 | |
340 | if (avail < wanted) { |
341 | struct rds_connection *conn = ic->i_cm_id->context; |
342 | |
343 | /* Oops, there aren't that many credits left! */ |
344 | set_bit(RDS_LL_SEND_FULL, &conn->c_flags); |
345 | got = avail; |
346 | } else { |
347 | /* Sometimes you get what you want, lalala. */ |
348 | got = wanted; |
349 | } |
350 | newval -= IB_SET_SEND_CREDITS(got); |
351 | |
352 | /* |
353 | * If need_posted is non-zero, then the caller wants |
354 | * the posted regardless of whether any send credits are |
355 | * available. |
356 | */ |
357 | if (posted && (got || need_posted)) { |
358 | advertise = min_t(unsigned int, posted, max_posted); |
359 | newval -= IB_SET_POST_CREDITS(advertise); |
360 | } |
361 | |
362 | /* Finally bill everything */ |
363 | if (atomic_cmpxchg(&ic->i_credits, oldval, newval) != oldval) |
364 | goto try_again; |
365 | |
366 | *adv_credits = advertise; |
367 | return got; |
368 | } |
369 | |
370 | void rds_ib_send_add_credits(struct rds_connection *conn, unsigned int credits) |
371 | { |
372 | struct rds_ib_connection *ic = conn->c_transport_data; |
373 | |
374 | if (credits == 0) |
375 | return; |
376 | |
377 | rdsdebug("rds_ib_send_add_credits(%u): current=%u%s\n", |
378 | credits, |
379 | IB_GET_SEND_CREDITS(atomic_read(&ic->i_credits)), |
380 | test_bit(RDS_LL_SEND_FULL, &conn->c_flags) ? ", ll_send_full" : ""); |
381 | |
382 | atomic_add(IB_SET_SEND_CREDITS(credits), &ic->i_credits); |
383 | if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags)) |
384 | queue_delayed_work(rds_wq, &conn->c_send_w, 0); |
385 | |
386 | WARN_ON(IB_GET_SEND_CREDITS(credits) >= 16384); |
387 | |
388 | rds_ib_stats_inc(s_ib_rx_credit_updates); |
389 | } |
390 | |
391 | void rds_ib_advertise_credits(struct rds_connection *conn, unsigned int posted) |
392 | { |
393 | struct rds_ib_connection *ic = conn->c_transport_data; |
394 | |
395 | if (posted == 0) |
396 | return; |
397 | |
398 | atomic_add(IB_SET_POST_CREDITS(posted), &ic->i_credits); |
399 | |
400 | /* Decide whether to send an update to the peer now. |
401 | * If we would send a credit update for every single buffer we |
402 | * post, we would end up with an ACK storm (ACK arrives, |
403 | * consumes buffer, we refill the ring, send ACK to remote |
404 | * advertising the newly posted buffer... ad inf) |
405 | * |
406 | * Performance pretty much depends on how often we send |
407 | * credit updates - too frequent updates mean lots of ACKs. |
408 | * Too infrequent updates, and the peer will run out of |
409 | * credits and has to throttle. |
410 | * For the time being, 16 seems to be a good compromise. |
411 | */ |
412 | if (IB_GET_POST_CREDITS(atomic_read(&ic->i_credits)) >= 16) |
413 | set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); |
414 | } |
415 | |
416 | static inline void |
417 | rds_ib_xmit_populate_wr(struct rds_ib_connection *ic, |
418 | struct rds_ib_send_work *send, unsigned int pos, |
419 | unsigned long buffer, unsigned int length, |
420 | int send_flags) |
421 | { |
422 | struct ib_sge *sge; |
423 | |
424 | WARN_ON(pos != send - ic->i_sends); |
425 | |
426 | send->s_wr.send_flags = send_flags; |
427 | send->s_wr.opcode = IB_WR_SEND; |
428 | send->s_wr.num_sge = 2; |
429 | send->s_wr.next = NULL; |
430 | send->s_queued = jiffies; |
431 | send->s_op = NULL; |
432 | |
433 | if (length != 0) { |
434 | sge = rds_ib_data_sge(ic, send->s_sge); |
435 | sge->addr = buffer; |
436 | sge->length = length; |
437 | sge->lkey = ic->i_mr->lkey; |
438 | |
439 | sge = rds_ib_header_sge(ic, send->s_sge); |
440 | } else { |
441 | /* We're sending a packet with no payload. There is only |
442 | * one SGE */ |
443 | send->s_wr.num_sge = 1; |
444 | sge = &send->s_sge[0]; |
445 | } |
446 | |
447 | sge->addr = ic->i_send_hdrs_dma + (pos * sizeof(struct rds_header)); |
448 | sge->length = sizeof(struct rds_header); |
449 | sge->lkey = ic->i_mr->lkey; |
450 | } |
451 | |
452 | /* |
453 | * This can be called multiple times for a given message. The first time |
454 | * we see a message we map its scatterlist into the IB device so that |
455 | * we can provide that mapped address to the IB scatter gather entries |
456 | * in the IB work requests. We translate the scatterlist into a series |
457 | * of work requests that fragment the message. These work requests complete |
458 | * in order so we pass ownership of the message to the completion handler |
459 | * once we send the final fragment. |
460 | * |
461 | * The RDS core uses the c_send_lock to only enter this function once |
462 | * per connection. This makes sure that the tx ring alloc/unalloc pairs |
463 | * don't get out of sync and confuse the ring. |
464 | */ |
465 | int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm, |
466 | unsigned int hdr_off, unsigned int sg, unsigned int off) |
467 | { |
468 | struct rds_ib_connection *ic = conn->c_transport_data; |
469 | struct ib_device *dev = ic->i_cm_id->device; |
470 | struct rds_ib_send_work *send = NULL; |
471 | struct rds_ib_send_work *first; |
472 | struct rds_ib_send_work *prev; |
473 | struct ib_send_wr *failed_wr; |
474 | struct scatterlist *scat; |
475 | u32 pos; |
476 | u32 i; |
477 | u32 work_alloc; |
478 | u32 credit_alloc; |
479 | u32 posted; |
480 | u32 adv_credits = 0; |
481 | int send_flags = 0; |
482 | int sent; |
483 | int ret; |
484 | int flow_controlled = 0; |
485 | |
486 | BUG_ON(off % RDS_FRAG_SIZE); |
487 | BUG_ON(hdr_off != 0 && hdr_off != sizeof(struct rds_header)); |
488 | |
489 | /* Do not send cong updates to IB loopback */ |
490 | if (conn->c_loopback |
491 | && rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) { |
492 | rds_cong_map_updated(conn->c_fcong, ~(u64) 0); |
493 | return sizeof(struct rds_header) + RDS_CONG_MAP_BYTES; |
494 | } |
495 | |
496 | /* FIXME we may overallocate here */ |
497 | if (be32_to_cpu(rm->m_inc.i_hdr.h_len) == 0) |
498 | i = 1; |
499 | else |
500 | i = ceil(be32_to_cpu(rm->m_inc.i_hdr.h_len), RDS_FRAG_SIZE); |
501 | |
502 | work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos); |
503 | if (work_alloc == 0) { |
504 | set_bit(RDS_LL_SEND_FULL, &conn->c_flags); |
505 | rds_ib_stats_inc(s_ib_tx_ring_full); |
506 | ret = -ENOMEM; |
507 | goto out; |
508 | } |
509 | |
510 | credit_alloc = work_alloc; |
511 | if (ic->i_flowctl) { |
512 | credit_alloc = rds_ib_send_grab_credits(ic, work_alloc, &posted, 0, RDS_MAX_ADV_CREDIT); |
513 | adv_credits += posted; |
514 | if (credit_alloc < work_alloc) { |
515 | rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - credit_alloc); |
516 | work_alloc = credit_alloc; |
517 | flow_controlled++; |
518 | } |
519 | if (work_alloc == 0) { |
520 | set_bit(RDS_LL_SEND_FULL, &conn->c_flags); |
521 | rds_ib_stats_inc(s_ib_tx_throttle); |
522 | ret = -ENOMEM; |
523 | goto out; |
524 | } |
525 | } |
526 | |
527 | /* map the message the first time we see it */ |
528 | if (ic->i_rm == NULL) { |
529 | /* |
530 | printk(KERN_NOTICE "rds_ib_xmit prep msg dport=%u flags=0x%x len=%d\n", |
531 | be16_to_cpu(rm->m_inc.i_hdr.h_dport), |
532 | rm->m_inc.i_hdr.h_flags, |
533 | be32_to_cpu(rm->m_inc.i_hdr.h_len)); |
534 | */ |
535 | if (rm->m_nents) { |
536 | rm->m_count = ib_dma_map_sg(dev, |
537 | rm->m_sg, rm->m_nents, DMA_TO_DEVICE); |
538 | rdsdebug("ic %p mapping rm %p: %d\n", ic, rm, rm->m_count); |
539 | if (rm->m_count == 0) { |
540 | rds_ib_stats_inc(s_ib_tx_sg_mapping_failure); |
541 | rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc); |
542 | ret = -ENOMEM; /* XXX ? */ |
543 | goto out; |
544 | } |
545 | } else { |
546 | rm->m_count = 0; |
547 | } |
548 | |
549 | ic->i_unsignaled_wrs = rds_ib_sysctl_max_unsig_wrs; |
550 | ic->i_unsignaled_bytes = rds_ib_sysctl_max_unsig_bytes; |
551 | rds_message_addref(rm); |
552 | ic->i_rm = rm; |
553 | |
554 | /* Finalize the header */ |
555 | if (test_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags)) |
556 | rm->m_inc.i_hdr.h_flags |= RDS_FLAG_ACK_REQUIRED; |
557 | if (test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags)) |
558 | rm->m_inc.i_hdr.h_flags |= RDS_FLAG_RETRANSMITTED; |
559 | |
560 | /* If it has a RDMA op, tell the peer we did it. This is |
561 | * used by the peer to release use-once RDMA MRs. */ |
562 | if (rm->m_rdma_op) { |
563 | struct rds_ext_header_rdma ext_hdr; |
564 | |
565 | ext_hdr.h_rdma_rkey = cpu_to_be32(rm->m_rdma_op->r_key); |
566 | rds_message_add_extension(&rm->m_inc.i_hdr, |
567 | RDS_EXTHDR_RDMA, &ext_hdr, sizeof(ext_hdr)); |
568 | } |
569 | if (rm->m_rdma_cookie) { |
570 | rds_message_add_rdma_dest_extension(&rm->m_inc.i_hdr, |
571 | rds_rdma_cookie_key(rm->m_rdma_cookie), |
572 | rds_rdma_cookie_offset(rm->m_rdma_cookie)); |
573 | } |
574 | |
575 | /* Note - rds_ib_piggyb_ack clears the ACK_REQUIRED bit, so |
576 | * we should not do this unless we have a chance of at least |
577 | * sticking the header into the send ring. Which is why we |
578 | * should call rds_ib_ring_alloc first. */ |
579 | rm->m_inc.i_hdr.h_ack = cpu_to_be64(rds_ib_piggyb_ack(ic)); |
580 | rds_message_make_checksum(&rm->m_inc.i_hdr); |
581 | |
582 | /* |
583 | * Update adv_credits since we reset the ACK_REQUIRED bit. |
584 | */ |
585 | rds_ib_send_grab_credits(ic, 0, &posted, 1, RDS_MAX_ADV_CREDIT - adv_credits); |
586 | adv_credits += posted; |
587 | BUG_ON(adv_credits > 255); |
588 | } |
589 | |
590 | send = &ic->i_sends[pos]; |
591 | first = send; |
592 | prev = NULL; |
593 | scat = &rm->m_sg[sg]; |
594 | sent = 0; |
595 | i = 0; |
596 | |
597 | /* Sometimes you want to put a fence between an RDMA |
598 | * READ and the following SEND. |
599 | * We could either do this all the time |
600 | * or when requested by the user. Right now, we let |
601 | * the application choose. |
602 | */ |
603 | if (rm->m_rdma_op && rm->m_rdma_op->r_fence) |
604 | send_flags = IB_SEND_FENCE; |
605 | |
606 | /* |
607 | * We could be copying the header into the unused tail of the page. |
608 | * That would need to be changed in the future when those pages might |
609 | * be mapped userspace pages or page cache pages. So instead we always |
610 | * use a second sge and our long-lived ring of mapped headers. We send |
611 | * the header after the data so that the data payload can be aligned on |
612 | * the receiver. |
613 | */ |
614 | |
615 | /* handle a 0-len message */ |
616 | if (be32_to_cpu(rm->m_inc.i_hdr.h_len) == 0) { |
617 | rds_ib_xmit_populate_wr(ic, send, pos, 0, 0, send_flags); |
618 | goto add_header; |
619 | } |
620 | |
621 | /* if there's data reference it with a chain of work reqs */ |
622 | for (; i < work_alloc && scat != &rm->m_sg[rm->m_count]; i++) { |
623 | unsigned int len; |
624 | |
625 | send = &ic->i_sends[pos]; |
626 | |
627 | len = min(RDS_FRAG_SIZE, ib_sg_dma_len(dev, scat) - off); |
628 | rds_ib_xmit_populate_wr(ic, send, pos, |
629 | ib_sg_dma_address(dev, scat) + off, len, |
630 | send_flags); |
631 | |
632 | /* |
633 | * We want to delay signaling completions just enough to get |
634 | * the batching benefits but not so much that we create dead time |
635 | * on the wire. |
636 | */ |
637 | if (ic->i_unsignaled_wrs-- == 0) { |
638 | ic->i_unsignaled_wrs = rds_ib_sysctl_max_unsig_wrs; |
639 | send->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED; |
640 | } |
641 | |
642 | ic->i_unsignaled_bytes -= len; |
643 | if (ic->i_unsignaled_bytes <= 0) { |
644 | ic->i_unsignaled_bytes = rds_ib_sysctl_max_unsig_bytes; |
645 | send->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED; |
646 | } |
647 | |
648 | /* |
649 | * Always signal the last one if we're stopping due to flow control. |
650 | */ |
651 | if (flow_controlled && i == (work_alloc-1)) |
652 | send->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED; |
653 | |
654 | rdsdebug("send %p wr %p num_sge %u next %p\n", send, |
655 | &send->s_wr, send->s_wr.num_sge, send->s_wr.next); |
656 | |
657 | sent += len; |
658 | off += len; |
659 | if (off == ib_sg_dma_len(dev, scat)) { |
660 | scat++; |
661 | off = 0; |
662 | } |
663 | |
664 | add_header: |
665 | /* Tack on the header after the data. The header SGE should already |
666 | * have been set up to point to the right header buffer. */ |
667 | memcpy(&ic->i_send_hdrs[pos], &rm->m_inc.i_hdr, sizeof(struct rds_header)); |
668 | |
669 | if (0) { |
670 | struct rds_header *hdr = &ic->i_send_hdrs[pos]; |
671 | |
672 | printk(KERN_NOTICE "send WR dport=%u flags=0x%x len=%d\n", |
673 | be16_to_cpu(hdr->h_dport), |
674 | hdr->h_flags, |
675 | be32_to_cpu(hdr->h_len)); |
676 | } |
677 | if (adv_credits) { |
678 | struct rds_header *hdr = &ic->i_send_hdrs[pos]; |
679 | |
680 | /* add credit and redo the header checksum */ |
681 | hdr->h_credit = adv_credits; |
682 | rds_message_make_checksum(hdr); |
683 | adv_credits = 0; |
684 | rds_ib_stats_inc(s_ib_tx_credit_updates); |
685 | } |
686 | |
687 | if (prev) |
688 | prev->s_wr.next = &send->s_wr; |
689 | prev = send; |
690 | |
691 | pos = (pos + 1) % ic->i_send_ring.w_nr; |
692 | } |
693 | |
694 | /* Account the RDS header in the number of bytes we sent, but just once. |
695 | * The caller has no concept of fragmentation. */ |
696 | if (hdr_off == 0) |
697 | sent += sizeof(struct rds_header); |
698 | |
699 | /* if we finished the message then send completion owns it */ |
700 | if (scat == &rm->m_sg[rm->m_count]) { |
701 | prev->s_rm = ic->i_rm; |
702 | prev->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED; |
703 | ic->i_rm = NULL; |
704 | } |
705 | |
706 | if (i < work_alloc) { |
707 | rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - i); |
708 | work_alloc = i; |
709 | } |
710 | if (ic->i_flowctl && i < credit_alloc) |
711 | rds_ib_send_add_credits(conn, credit_alloc - i); |
712 | |
713 | /* XXX need to worry about failed_wr and partial sends. */ |
714 | failed_wr = &first->s_wr; |
715 | ret = ib_post_send(ic->i_cm_id->qp, &first->s_wr, &failed_wr); |
716 | rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic, |
717 | first, &first->s_wr, ret, failed_wr); |
718 | BUG_ON(failed_wr != &first->s_wr); |
719 | if (ret) { |
720 | printk(KERN_WARNING "RDS/IB: ib_post_send to %pI4 " |
721 | "returned %d\n", &conn->c_faddr, ret); |
722 | rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc); |
723 | if (prev->s_rm) { |
724 | ic->i_rm = prev->s_rm; |
725 | prev->s_rm = NULL; |
726 | } |
727 | |
728 | rds_ib_conn_error(ic->conn, "ib_post_send failed\n"); |
729 | goto out; |
730 | } |
731 | |
732 | ret = sent; |
733 | out: |
734 | BUG_ON(adv_credits); |
735 | return ret; |
736 | } |
737 | |
738 | int rds_ib_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op) |
739 | { |
740 | struct rds_ib_connection *ic = conn->c_transport_data; |
741 | struct rds_ib_send_work *send = NULL; |
742 | struct rds_ib_send_work *first; |
743 | struct rds_ib_send_work *prev; |
744 | struct ib_send_wr *failed_wr; |
745 | struct rds_ib_device *rds_ibdev; |
746 | struct scatterlist *scat; |
747 | unsigned long len; |
748 | u64 remote_addr = op->r_remote_addr; |
749 | u32 pos; |
750 | u32 work_alloc; |
751 | u32 i; |
752 | u32 j; |
753 | int sent; |
754 | int ret; |
755 | int num_sge; |
756 | |
757 | rds_ibdev = ib_get_client_data(ic->i_cm_id->device, &rds_ib_client); |
758 | |
759 | /* map the message the first time we see it */ |
760 | if (!op->r_mapped) { |
761 | op->r_count = ib_dma_map_sg(ic->i_cm_id->device, |
762 | op->r_sg, op->r_nents, (op->r_write) ? |
763 | DMA_TO_DEVICE : DMA_FROM_DEVICE); |
764 | rdsdebug("ic %p mapping op %p: %d\n", ic, op, op->r_count); |
765 | if (op->r_count == 0) { |
766 | rds_ib_stats_inc(s_ib_tx_sg_mapping_failure); |
767 | ret = -ENOMEM; /* XXX ? */ |
768 | goto out; |
769 | } |
770 | |
771 | op->r_mapped = 1; |
772 | } |
773 | |
774 | /* |
775 | * Instead of knowing how to return a partial rdma read/write we insist that there |
776 | * be enough work requests to send the entire message. |
777 | */ |
778 | i = ceil(op->r_count, rds_ibdev->max_sge); |
779 | |
780 | work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos); |
781 | if (work_alloc != i) { |
782 | rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc); |
783 | rds_ib_stats_inc(s_ib_tx_ring_full); |
784 | ret = -ENOMEM; |
785 | goto out; |
786 | } |
787 | |
788 | send = &ic->i_sends[pos]; |
789 | first = send; |
790 | prev = NULL; |
791 | scat = &op->r_sg[0]; |
792 | sent = 0; |
793 | num_sge = op->r_count; |
794 | |
795 | for (i = 0; i < work_alloc && scat != &op->r_sg[op->r_count]; i++) { |
796 | send->s_wr.send_flags = 0; |
797 | send->s_queued = jiffies; |
798 | /* |
799 | * We want to delay signaling completions just enough to get |
800 | * the batching benefits but not so much that we create dead time on the wire. |
801 | */ |
802 | if (ic->i_unsignaled_wrs-- == 0) { |
803 | ic->i_unsignaled_wrs = rds_ib_sysctl_max_unsig_wrs; |
804 | send->s_wr.send_flags = IB_SEND_SIGNALED; |
805 | } |
806 | |
807 | send->s_wr.opcode = op->r_write ? IB_WR_RDMA_WRITE : IB_WR_RDMA_READ; |
808 | send->s_wr.wr.rdma.remote_addr = remote_addr; |
809 | send->s_wr.wr.rdma.rkey = op->r_key; |
810 | send->s_op = op; |
811 | |
812 | if (num_sge > rds_ibdev->max_sge) { |
813 | send->s_wr.num_sge = rds_ibdev->max_sge; |
814 | num_sge -= rds_ibdev->max_sge; |
815 | } else { |
816 | send->s_wr.num_sge = num_sge; |
817 | } |
818 | |
819 | send->s_wr.next = NULL; |
820 | |
821 | if (prev) |
822 | prev->s_wr.next = &send->s_wr; |
823 | |
824 | for (j = 0; j < send->s_wr.num_sge && scat != &op->r_sg[op->r_count]; j++) { |
825 | len = ib_sg_dma_len(ic->i_cm_id->device, scat); |
826 | send->s_sge[j].addr = |
827 | ib_sg_dma_address(ic->i_cm_id->device, scat); |
828 | send->s_sge[j].length = len; |
829 | send->s_sge[j].lkey = ic->i_mr->lkey; |
830 | |
831 | sent += len; |
832 | rdsdebug("ic %p sent %d remote_addr %llu\n", ic, sent, remote_addr); |
833 | |
834 | remote_addr += len; |
835 | scat++; |
836 | } |
837 | |
838 | rdsdebug("send %p wr %p num_sge %u next %p\n", send, |
839 | &send->s_wr, send->s_wr.num_sge, send->s_wr.next); |
840 | |
841 | prev = send; |
842 | if (++send == &ic->i_sends[ic->i_send_ring.w_nr]) |
843 | send = ic->i_sends; |
844 | } |
845 | |
846 | /* if we finished the message then send completion owns it */ |
847 | if (scat == &op->r_sg[op->r_count]) |
848 | prev->s_wr.send_flags = IB_SEND_SIGNALED; |
849 | |
850 | if (i < work_alloc) { |
851 | rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - i); |
852 | work_alloc = i; |
853 | } |
854 | |
855 | failed_wr = &first->s_wr; |
856 | ret = ib_post_send(ic->i_cm_id->qp, &first->s_wr, &failed_wr); |
857 | rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic, |
858 | first, &first->s_wr, ret, failed_wr); |
859 | BUG_ON(failed_wr != &first->s_wr); |
860 | if (ret) { |
861 | printk(KERN_WARNING "RDS/IB: rdma ib_post_send to %pI4 " |
862 | "returned %d\n", &conn->c_faddr, ret); |
863 | rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc); |
864 | goto out; |
865 | } |
866 | |
867 | if (unlikely(failed_wr != &first->s_wr)) { |
868 | printk(KERN_WARNING "RDS/IB: ib_post_send() rc=%d, but failed_wqe updated!\n", ret); |
869 | BUG_ON(failed_wr != &first->s_wr); |
870 | } |
871 | |
872 | |
873 | out: |
874 | return ret; |
875 | } |
876 | |
877 | void rds_ib_xmit_complete(struct rds_connection *conn) |
878 | { |
879 | struct rds_ib_connection *ic = conn->c_transport_data; |
880 | |
881 | /* We may have a pending ACK or window update we were unable |
882 | * to send previously (due to flow control). Try again. */ |
883 | rds_ib_attempt_ack(ic); |
884 | } |
885 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9