Root/
1 | /* |
2 | * Copyright (c) 2006 Oracle. All rights reserved. |
3 | * |
4 | * This software is available to you under a choice of one of two |
5 | * licenses. You may choose to be licensed under the terms of the GNU |
6 | * General Public License (GPL) Version 2, available from the file |
7 | * COPYING in the main directory of this source tree, or the |
8 | * OpenIB.org BSD license below: |
9 | * |
10 | * Redistribution and use in source and binary forms, with or |
11 | * without modification, are permitted provided that the following |
12 | * conditions are met: |
13 | * |
14 | * - Redistributions of source code must retain the above |
15 | * copyright notice, this list of conditions and the following |
16 | * disclaimer. |
17 | * |
18 | * - Redistributions in binary form must reproduce the above |
19 | * copyright notice, this list of conditions and the following |
20 | * disclaimer in the documentation and/or other materials |
21 | * provided with the distribution. |
22 | * |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
30 | * SOFTWARE. |
31 | * |
32 | */ |
33 | #include <linux/kernel.h> |
34 | #include <linux/in.h> |
35 | #include <linux/slab.h> |
36 | #include <linux/vmalloc.h> |
37 | |
38 | #include "rds.h" |
39 | #include "ib.h" |
40 | |
41 | /* |
42 | * Set the selected protocol version |
43 | */ |
44 | static void rds_ib_set_protocol(struct rds_connection *conn, unsigned int version) |
45 | { |
46 | conn->c_version = version; |
47 | } |
48 | |
49 | /* |
50 | * Set up flow control |
51 | */ |
52 | static void rds_ib_set_flow_control(struct rds_connection *conn, u32 credits) |
53 | { |
54 | struct rds_ib_connection *ic = conn->c_transport_data; |
55 | |
56 | if (rds_ib_sysctl_flow_control && credits != 0) { |
57 | /* We're doing flow control */ |
58 | ic->i_flowctl = 1; |
59 | rds_ib_send_add_credits(conn, credits); |
60 | } else { |
61 | ic->i_flowctl = 0; |
62 | } |
63 | } |
64 | |
65 | /* |
66 | * Tune RNR behavior. Without flow control, we use a rather |
67 | * low timeout, but not the absolute minimum - this should |
68 | * be tunable. |
69 | * |
70 | * We already set the RNR retry count to 7 (which is the |
71 | * smallest infinite number :-) above. |
72 | * If flow control is off, we want to change this back to 0 |
73 | * so that we learn quickly when our credit accounting is |
74 | * buggy. |
75 | * |
76 | * Caller passes in a qp_attr pointer - don't waste stack spacv |
77 | * by allocation this twice. |
78 | */ |
79 | static void |
80 | rds_ib_tune_rnr(struct rds_ib_connection *ic, struct ib_qp_attr *attr) |
81 | { |
82 | int ret; |
83 | |
84 | attr->min_rnr_timer = IB_RNR_TIMER_000_32; |
85 | ret = ib_modify_qp(ic->i_cm_id->qp, attr, IB_QP_MIN_RNR_TIMER); |
86 | if (ret) |
87 | printk(KERN_NOTICE "ib_modify_qp(IB_QP_MIN_RNR_TIMER): err=%d\n", -ret); |
88 | } |
89 | |
90 | /* |
91 | * Connection established. |
92 | * We get here for both outgoing and incoming connection. |
93 | */ |
94 | void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_event *event) |
95 | { |
96 | const struct rds_ib_connect_private *dp = NULL; |
97 | struct rds_ib_connection *ic = conn->c_transport_data; |
98 | struct rds_ib_device *rds_ibdev; |
99 | struct ib_qp_attr qp_attr; |
100 | int err; |
101 | |
102 | if (event->param.conn.private_data_len >= sizeof(*dp)) { |
103 | dp = event->param.conn.private_data; |
104 | |
105 | /* make sure it isn't empty data */ |
106 | if (dp->dp_protocol_major) { |
107 | rds_ib_set_protocol(conn, |
108 | RDS_PROTOCOL(dp->dp_protocol_major, |
109 | dp->dp_protocol_minor)); |
110 | rds_ib_set_flow_control(conn, be32_to_cpu(dp->dp_credit)); |
111 | } |
112 | } |
113 | |
114 | printk(KERN_NOTICE "RDS/IB: connected to %pI4 version %u.%u%s\n", |
115 | &conn->c_faddr, |
116 | RDS_PROTOCOL_MAJOR(conn->c_version), |
117 | RDS_PROTOCOL_MINOR(conn->c_version), |
118 | ic->i_flowctl ? ", flow control" : ""); |
119 | |
120 | /* |
121 | * Init rings and fill recv. this needs to wait until protocol negotiation |
122 | * is complete, since ring layout is different from 3.0 to 3.1. |
123 | */ |
124 | rds_ib_send_init_ring(ic); |
125 | rds_ib_recv_init_ring(ic); |
126 | /* Post receive buffers - as a side effect, this will update |
127 | * the posted credit count. */ |
128 | rds_ib_recv_refill(conn, GFP_KERNEL, GFP_HIGHUSER, 1); |
129 | |
130 | /* Tune RNR behavior */ |
131 | rds_ib_tune_rnr(ic, &qp_attr); |
132 | |
133 | qp_attr.qp_state = IB_QPS_RTS; |
134 | err = ib_modify_qp(ic->i_cm_id->qp, &qp_attr, IB_QP_STATE); |
135 | if (err) |
136 | printk(KERN_NOTICE "ib_modify_qp(IB_QP_STATE, RTS): err=%d\n", err); |
137 | |
138 | /* update ib_device with this local ipaddr & conn */ |
139 | rds_ibdev = ib_get_client_data(ic->i_cm_id->device, &rds_ib_client); |
140 | err = rds_ib_update_ipaddr(rds_ibdev, conn->c_laddr); |
141 | if (err) |
142 | printk(KERN_ERR "rds_ib_update_ipaddr failed (%d)\n", err); |
143 | rds_ib_add_conn(rds_ibdev, conn); |
144 | |
145 | /* If the peer gave us the last packet it saw, process this as if |
146 | * we had received a regular ACK. */ |
147 | if (dp && dp->dp_ack_seq) |
148 | rds_send_drop_acked(conn, be64_to_cpu(dp->dp_ack_seq), NULL); |
149 | |
150 | rds_connect_complete(conn); |
151 | } |
152 | |
153 | static void rds_ib_cm_fill_conn_param(struct rds_connection *conn, |
154 | struct rdma_conn_param *conn_param, |
155 | struct rds_ib_connect_private *dp, |
156 | u32 protocol_version) |
157 | { |
158 | memset(conn_param, 0, sizeof(struct rdma_conn_param)); |
159 | /* XXX tune these? */ |
160 | conn_param->responder_resources = 1; |
161 | conn_param->initiator_depth = 1; |
162 | conn_param->retry_count = min_t(unsigned int, rds_ib_retry_count, 7); |
163 | conn_param->rnr_retry_count = 7; |
164 | |
165 | if (dp) { |
166 | struct rds_ib_connection *ic = conn->c_transport_data; |
167 | |
168 | memset(dp, 0, sizeof(*dp)); |
169 | dp->dp_saddr = conn->c_laddr; |
170 | dp->dp_daddr = conn->c_faddr; |
171 | dp->dp_protocol_major = RDS_PROTOCOL_MAJOR(protocol_version); |
172 | dp->dp_protocol_minor = RDS_PROTOCOL_MINOR(protocol_version); |
173 | dp->dp_protocol_minor_mask = cpu_to_be16(RDS_IB_SUPPORTED_PROTOCOLS); |
174 | dp->dp_ack_seq = rds_ib_piggyb_ack(ic); |
175 | |
176 | /* Advertise flow control */ |
177 | if (ic->i_flowctl) { |
178 | unsigned int credits; |
179 | |
180 | credits = IB_GET_POST_CREDITS(atomic_read(&ic->i_credits)); |
181 | dp->dp_credit = cpu_to_be32(credits); |
182 | atomic_sub(IB_SET_POST_CREDITS(credits), &ic->i_credits); |
183 | } |
184 | |
185 | conn_param->private_data = dp; |
186 | conn_param->private_data_len = sizeof(*dp); |
187 | } |
188 | } |
189 | |
190 | static void rds_ib_cq_event_handler(struct ib_event *event, void *data) |
191 | { |
192 | rdsdebug("event %u data %p\n", event->event, data); |
193 | } |
194 | |
195 | static void rds_ib_qp_event_handler(struct ib_event *event, void *data) |
196 | { |
197 | struct rds_connection *conn = data; |
198 | struct rds_ib_connection *ic = conn->c_transport_data; |
199 | |
200 | rdsdebug("conn %p ic %p event %u\n", conn, ic, event->event); |
201 | |
202 | switch (event->event) { |
203 | case IB_EVENT_COMM_EST: |
204 | rdma_notify(ic->i_cm_id, IB_EVENT_COMM_EST); |
205 | break; |
206 | default: |
207 | rds_ib_conn_error(conn, "RDS/IB: Fatal QP Event %u " |
208 | "- connection %pI4->%pI4, reconnecting\n", |
209 | event->event, &conn->c_laddr, &conn->c_faddr); |
210 | break; |
211 | } |
212 | } |
213 | |
214 | /* |
215 | * This needs to be very careful to not leave IS_ERR pointers around for |
216 | * cleanup to trip over. |
217 | */ |
218 | static int rds_ib_setup_qp(struct rds_connection *conn) |
219 | { |
220 | struct rds_ib_connection *ic = conn->c_transport_data; |
221 | struct ib_device *dev = ic->i_cm_id->device; |
222 | struct ib_qp_init_attr attr; |
223 | struct rds_ib_device *rds_ibdev; |
224 | int ret; |
225 | |
226 | /* rds_ib_add_one creates a rds_ib_device object per IB device, |
227 | * and allocates a protection domain, memory range and FMR pool |
228 | * for each. If that fails for any reason, it will not register |
229 | * the rds_ibdev at all. |
230 | */ |
231 | rds_ibdev = ib_get_client_data(dev, &rds_ib_client); |
232 | if (rds_ibdev == NULL) { |
233 | if (printk_ratelimit()) |
234 | printk(KERN_NOTICE "RDS/IB: No client_data for device %s\n", |
235 | dev->name); |
236 | return -EOPNOTSUPP; |
237 | } |
238 | |
239 | if (rds_ibdev->max_wrs < ic->i_send_ring.w_nr + 1) |
240 | rds_ib_ring_resize(&ic->i_send_ring, rds_ibdev->max_wrs - 1); |
241 | if (rds_ibdev->max_wrs < ic->i_recv_ring.w_nr + 1) |
242 | rds_ib_ring_resize(&ic->i_recv_ring, rds_ibdev->max_wrs - 1); |
243 | |
244 | /* Protection domain and memory range */ |
245 | ic->i_pd = rds_ibdev->pd; |
246 | ic->i_mr = rds_ibdev->mr; |
247 | |
248 | ic->i_send_cq = ib_create_cq(dev, rds_ib_send_cq_comp_handler, |
249 | rds_ib_cq_event_handler, conn, |
250 | ic->i_send_ring.w_nr + 1, 0); |
251 | if (IS_ERR(ic->i_send_cq)) { |
252 | ret = PTR_ERR(ic->i_send_cq); |
253 | ic->i_send_cq = NULL; |
254 | rdsdebug("ib_create_cq send failed: %d\n", ret); |
255 | goto out; |
256 | } |
257 | |
258 | ic->i_recv_cq = ib_create_cq(dev, rds_ib_recv_cq_comp_handler, |
259 | rds_ib_cq_event_handler, conn, |
260 | ic->i_recv_ring.w_nr, 0); |
261 | if (IS_ERR(ic->i_recv_cq)) { |
262 | ret = PTR_ERR(ic->i_recv_cq); |
263 | ic->i_recv_cq = NULL; |
264 | rdsdebug("ib_create_cq recv failed: %d\n", ret); |
265 | goto out; |
266 | } |
267 | |
268 | ret = ib_req_notify_cq(ic->i_send_cq, IB_CQ_NEXT_COMP); |
269 | if (ret) { |
270 | rdsdebug("ib_req_notify_cq send failed: %d\n", ret); |
271 | goto out; |
272 | } |
273 | |
274 | ret = ib_req_notify_cq(ic->i_recv_cq, IB_CQ_SOLICITED); |
275 | if (ret) { |
276 | rdsdebug("ib_req_notify_cq recv failed: %d\n", ret); |
277 | goto out; |
278 | } |
279 | |
280 | /* XXX negotiate max send/recv with remote? */ |
281 | memset(&attr, 0, sizeof(attr)); |
282 | attr.event_handler = rds_ib_qp_event_handler; |
283 | attr.qp_context = conn; |
284 | /* + 1 to allow for the single ack message */ |
285 | attr.cap.max_send_wr = ic->i_send_ring.w_nr + 1; |
286 | attr.cap.max_recv_wr = ic->i_recv_ring.w_nr + 1; |
287 | attr.cap.max_send_sge = rds_ibdev->max_sge; |
288 | attr.cap.max_recv_sge = RDS_IB_RECV_SGE; |
289 | attr.sq_sig_type = IB_SIGNAL_REQ_WR; |
290 | attr.qp_type = IB_QPT_RC; |
291 | attr.send_cq = ic->i_send_cq; |
292 | attr.recv_cq = ic->i_recv_cq; |
293 | |
294 | /* |
295 | * XXX this can fail if max_*_wr is too large? Are we supposed |
296 | * to back off until we get a value that the hardware can support? |
297 | */ |
298 | ret = rdma_create_qp(ic->i_cm_id, ic->i_pd, &attr); |
299 | if (ret) { |
300 | rdsdebug("rdma_create_qp failed: %d\n", ret); |
301 | goto out; |
302 | } |
303 | |
304 | ic->i_send_hdrs = ib_dma_alloc_coherent(dev, |
305 | ic->i_send_ring.w_nr * |
306 | sizeof(struct rds_header), |
307 | &ic->i_send_hdrs_dma, GFP_KERNEL); |
308 | if (ic->i_send_hdrs == NULL) { |
309 | ret = -ENOMEM; |
310 | rdsdebug("ib_dma_alloc_coherent send failed\n"); |
311 | goto out; |
312 | } |
313 | |
314 | ic->i_recv_hdrs = ib_dma_alloc_coherent(dev, |
315 | ic->i_recv_ring.w_nr * |
316 | sizeof(struct rds_header), |
317 | &ic->i_recv_hdrs_dma, GFP_KERNEL); |
318 | if (ic->i_recv_hdrs == NULL) { |
319 | ret = -ENOMEM; |
320 | rdsdebug("ib_dma_alloc_coherent recv failed\n"); |
321 | goto out; |
322 | } |
323 | |
324 | ic->i_ack = ib_dma_alloc_coherent(dev, sizeof(struct rds_header), |
325 | &ic->i_ack_dma, GFP_KERNEL); |
326 | if (ic->i_ack == NULL) { |
327 | ret = -ENOMEM; |
328 | rdsdebug("ib_dma_alloc_coherent ack failed\n"); |
329 | goto out; |
330 | } |
331 | |
332 | ic->i_sends = vmalloc(ic->i_send_ring.w_nr * sizeof(struct rds_ib_send_work)); |
333 | if (ic->i_sends == NULL) { |
334 | ret = -ENOMEM; |
335 | rdsdebug("send allocation failed\n"); |
336 | goto out; |
337 | } |
338 | memset(ic->i_sends, 0, ic->i_send_ring.w_nr * sizeof(struct rds_ib_send_work)); |
339 | |
340 | ic->i_recvs = vmalloc(ic->i_recv_ring.w_nr * sizeof(struct rds_ib_recv_work)); |
341 | if (ic->i_recvs == NULL) { |
342 | ret = -ENOMEM; |
343 | rdsdebug("recv allocation failed\n"); |
344 | goto out; |
345 | } |
346 | memset(ic->i_recvs, 0, ic->i_recv_ring.w_nr * sizeof(struct rds_ib_recv_work)); |
347 | |
348 | rds_ib_recv_init_ack(ic); |
349 | |
350 | rdsdebug("conn %p pd %p mr %p cq %p %p\n", conn, ic->i_pd, ic->i_mr, |
351 | ic->i_send_cq, ic->i_recv_cq); |
352 | |
353 | out: |
354 | return ret; |
355 | } |
356 | |
357 | static u32 rds_ib_protocol_compatible(struct rdma_cm_event *event) |
358 | { |
359 | const struct rds_ib_connect_private *dp = event->param.conn.private_data; |
360 | u16 common; |
361 | u32 version = 0; |
362 | |
363 | /* |
364 | * rdma_cm private data is odd - when there is any private data in the |
365 | * request, we will be given a pretty large buffer without telling us the |
366 | * original size. The only way to tell the difference is by looking at |
367 | * the contents, which are initialized to zero. |
368 | * If the protocol version fields aren't set, this is a connection attempt |
369 | * from an older version. This could could be 3.0 or 2.0 - we can't tell. |
370 | * We really should have changed this for OFED 1.3 :-( |
371 | */ |
372 | |
373 | /* Be paranoid. RDS always has privdata */ |
374 | if (!event->param.conn.private_data_len) { |
375 | printk(KERN_NOTICE "RDS incoming connection has no private data, " |
376 | "rejecting\n"); |
377 | return 0; |
378 | } |
379 | |
380 | /* Even if len is crap *now* I still want to check it. -ASG */ |
381 | if (event->param.conn.private_data_len < sizeof (*dp) || |
382 | dp->dp_protocol_major == 0) |
383 | return RDS_PROTOCOL_3_0; |
384 | |
385 | common = be16_to_cpu(dp->dp_protocol_minor_mask) & RDS_IB_SUPPORTED_PROTOCOLS; |
386 | if (dp->dp_protocol_major == 3 && common) { |
387 | version = RDS_PROTOCOL_3_0; |
388 | while ((common >>= 1) != 0) |
389 | version++; |
390 | } else if (printk_ratelimit()) { |
391 | printk(KERN_NOTICE "RDS: Connection from %pI4 using " |
392 | "incompatible protocol version %u.%u\n", |
393 | &dp->dp_saddr, |
394 | dp->dp_protocol_major, |
395 | dp->dp_protocol_minor); |
396 | } |
397 | return version; |
398 | } |
399 | |
400 | int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id, |
401 | struct rdma_cm_event *event) |
402 | { |
403 | __be64 lguid = cm_id->route.path_rec->sgid.global.interface_id; |
404 | __be64 fguid = cm_id->route.path_rec->dgid.global.interface_id; |
405 | const struct rds_ib_connect_private *dp = event->param.conn.private_data; |
406 | struct rds_ib_connect_private dp_rep; |
407 | struct rds_connection *conn = NULL; |
408 | struct rds_ib_connection *ic = NULL; |
409 | struct rdma_conn_param conn_param; |
410 | u32 version; |
411 | int err, destroy = 1; |
412 | |
413 | /* Check whether the remote protocol version matches ours. */ |
414 | version = rds_ib_protocol_compatible(event); |
415 | if (!version) |
416 | goto out; |
417 | |
418 | rdsdebug("saddr %pI4 daddr %pI4 RDSv%u.%u lguid 0x%llx fguid " |
419 | "0x%llx\n", &dp->dp_saddr, &dp->dp_daddr, |
420 | RDS_PROTOCOL_MAJOR(version), RDS_PROTOCOL_MINOR(version), |
421 | (unsigned long long)be64_to_cpu(lguid), |
422 | (unsigned long long)be64_to_cpu(fguid)); |
423 | |
424 | conn = rds_conn_create(dp->dp_daddr, dp->dp_saddr, &rds_ib_transport, |
425 | GFP_KERNEL); |
426 | if (IS_ERR(conn)) { |
427 | rdsdebug("rds_conn_create failed (%ld)\n", PTR_ERR(conn)); |
428 | conn = NULL; |
429 | goto out; |
430 | } |
431 | |
432 | /* |
433 | * The connection request may occur while the |
434 | * previous connection exist, e.g. in case of failover. |
435 | * But as connections may be initiated simultaneously |
436 | * by both hosts, we have a random backoff mechanism - |
437 | * see the comment above rds_queue_reconnect() |
438 | */ |
439 | mutex_lock(&conn->c_cm_lock); |
440 | if (!rds_conn_transition(conn, RDS_CONN_DOWN, RDS_CONN_CONNECTING)) { |
441 | if (rds_conn_state(conn) == RDS_CONN_UP) { |
442 | rdsdebug("incoming connect while connecting\n"); |
443 | rds_conn_drop(conn); |
444 | rds_ib_stats_inc(s_ib_listen_closed_stale); |
445 | } else |
446 | if (rds_conn_state(conn) == RDS_CONN_CONNECTING) { |
447 | /* Wait and see - our connect may still be succeeding */ |
448 | rds_ib_stats_inc(s_ib_connect_raced); |
449 | } |
450 | mutex_unlock(&conn->c_cm_lock); |
451 | goto out; |
452 | } |
453 | |
454 | ic = conn->c_transport_data; |
455 | |
456 | rds_ib_set_protocol(conn, version); |
457 | rds_ib_set_flow_control(conn, be32_to_cpu(dp->dp_credit)); |
458 | |
459 | /* If the peer gave us the last packet it saw, process this as if |
460 | * we had received a regular ACK. */ |
461 | if (dp->dp_ack_seq) |
462 | rds_send_drop_acked(conn, be64_to_cpu(dp->dp_ack_seq), NULL); |
463 | |
464 | BUG_ON(cm_id->context); |
465 | BUG_ON(ic->i_cm_id); |
466 | |
467 | ic->i_cm_id = cm_id; |
468 | cm_id->context = conn; |
469 | |
470 | /* We got halfway through setting up the ib_connection, if we |
471 | * fail now, we have to take the long route out of this mess. */ |
472 | destroy = 0; |
473 | |
474 | err = rds_ib_setup_qp(conn); |
475 | if (err) { |
476 | rds_ib_conn_error(conn, "rds_ib_setup_qp failed (%d)\n", err); |
477 | goto out; |
478 | } |
479 | |
480 | rds_ib_cm_fill_conn_param(conn, &conn_param, &dp_rep, version); |
481 | |
482 | /* rdma_accept() calls rdma_reject() internally if it fails */ |
483 | err = rdma_accept(cm_id, &conn_param); |
484 | mutex_unlock(&conn->c_cm_lock); |
485 | if (err) { |
486 | rds_ib_conn_error(conn, "rdma_accept failed (%d)\n", err); |
487 | goto out; |
488 | } |
489 | |
490 | return 0; |
491 | |
492 | out: |
493 | rdma_reject(cm_id, NULL, 0); |
494 | return destroy; |
495 | } |
496 | |
497 | |
498 | int rds_ib_cm_initiate_connect(struct rdma_cm_id *cm_id) |
499 | { |
500 | struct rds_connection *conn = cm_id->context; |
501 | struct rds_ib_connection *ic = conn->c_transport_data; |
502 | struct rdma_conn_param conn_param; |
503 | struct rds_ib_connect_private dp; |
504 | int ret; |
505 | |
506 | /* If the peer doesn't do protocol negotiation, we must |
507 | * default to RDSv3.0 */ |
508 | rds_ib_set_protocol(conn, RDS_PROTOCOL_3_0); |
509 | ic->i_flowctl = rds_ib_sysctl_flow_control; /* advertise flow control */ |
510 | |
511 | ret = rds_ib_setup_qp(conn); |
512 | if (ret) { |
513 | rds_ib_conn_error(conn, "rds_ib_setup_qp failed (%d)\n", ret); |
514 | goto out; |
515 | } |
516 | |
517 | rds_ib_cm_fill_conn_param(conn, &conn_param, &dp, RDS_PROTOCOL_VERSION); |
518 | |
519 | ret = rdma_connect(cm_id, &conn_param); |
520 | if (ret) |
521 | rds_ib_conn_error(conn, "rdma_connect failed (%d)\n", ret); |
522 | |
523 | out: |
524 | /* Beware - returning non-zero tells the rdma_cm to destroy |
525 | * the cm_id. We should certainly not do it as long as we still |
526 | * "own" the cm_id. */ |
527 | if (ret) { |
528 | if (ic->i_cm_id == cm_id) |
529 | ret = 0; |
530 | } |
531 | return ret; |
532 | } |
533 | |
534 | int rds_ib_conn_connect(struct rds_connection *conn) |
535 | { |
536 | struct rds_ib_connection *ic = conn->c_transport_data; |
537 | struct sockaddr_in src, dest; |
538 | int ret; |
539 | |
540 | /* XXX I wonder what affect the port space has */ |
541 | /* delegate cm event handler to rdma_transport */ |
542 | ic->i_cm_id = rdma_create_id(rds_rdma_cm_event_handler, conn, |
543 | RDMA_PS_TCP); |
544 | if (IS_ERR(ic->i_cm_id)) { |
545 | ret = PTR_ERR(ic->i_cm_id); |
546 | ic->i_cm_id = NULL; |
547 | rdsdebug("rdma_create_id() failed: %d\n", ret); |
548 | goto out; |
549 | } |
550 | |
551 | rdsdebug("created cm id %p for conn %p\n", ic->i_cm_id, conn); |
552 | |
553 | src.sin_family = AF_INET; |
554 | src.sin_addr.s_addr = (__force u32)conn->c_laddr; |
555 | src.sin_port = (__force u16)htons(0); |
556 | |
557 | dest.sin_family = AF_INET; |
558 | dest.sin_addr.s_addr = (__force u32)conn->c_faddr; |
559 | dest.sin_port = (__force u16)htons(RDS_PORT); |
560 | |
561 | ret = rdma_resolve_addr(ic->i_cm_id, (struct sockaddr *)&src, |
562 | (struct sockaddr *)&dest, |
563 | RDS_RDMA_RESOLVE_TIMEOUT_MS); |
564 | if (ret) { |
565 | rdsdebug("addr resolve failed for cm id %p: %d\n", ic->i_cm_id, |
566 | ret); |
567 | rdma_destroy_id(ic->i_cm_id); |
568 | ic->i_cm_id = NULL; |
569 | } |
570 | |
571 | out: |
572 | return ret; |
573 | } |
574 | |
575 | /* |
576 | * This is so careful about only cleaning up resources that were built up |
577 | * so that it can be called at any point during startup. In fact it |
578 | * can be called multiple times for a given connection. |
579 | */ |
580 | void rds_ib_conn_shutdown(struct rds_connection *conn) |
581 | { |
582 | struct rds_ib_connection *ic = conn->c_transport_data; |
583 | int err = 0; |
584 | |
585 | rdsdebug("cm %p pd %p cq %p %p qp %p\n", ic->i_cm_id, |
586 | ic->i_pd, ic->i_send_cq, ic->i_recv_cq, |
587 | ic->i_cm_id ? ic->i_cm_id->qp : NULL); |
588 | |
589 | if (ic->i_cm_id) { |
590 | struct ib_device *dev = ic->i_cm_id->device; |
591 | |
592 | rdsdebug("disconnecting cm %p\n", ic->i_cm_id); |
593 | err = rdma_disconnect(ic->i_cm_id); |
594 | if (err) { |
595 | /* Actually this may happen quite frequently, when |
596 | * an outgoing connect raced with an incoming connect. |
597 | */ |
598 | rdsdebug("failed to disconnect, cm: %p err %d\n", |
599 | ic->i_cm_id, err); |
600 | } |
601 | |
602 | wait_event(rds_ib_ring_empty_wait, |
603 | rds_ib_ring_empty(&ic->i_send_ring) && |
604 | rds_ib_ring_empty(&ic->i_recv_ring)); |
605 | |
606 | if (ic->i_send_hdrs) |
607 | ib_dma_free_coherent(dev, |
608 | ic->i_send_ring.w_nr * |
609 | sizeof(struct rds_header), |
610 | ic->i_send_hdrs, |
611 | ic->i_send_hdrs_dma); |
612 | |
613 | if (ic->i_recv_hdrs) |
614 | ib_dma_free_coherent(dev, |
615 | ic->i_recv_ring.w_nr * |
616 | sizeof(struct rds_header), |
617 | ic->i_recv_hdrs, |
618 | ic->i_recv_hdrs_dma); |
619 | |
620 | if (ic->i_ack) |
621 | ib_dma_free_coherent(dev, sizeof(struct rds_header), |
622 | ic->i_ack, ic->i_ack_dma); |
623 | |
624 | if (ic->i_sends) |
625 | rds_ib_send_clear_ring(ic); |
626 | if (ic->i_recvs) |
627 | rds_ib_recv_clear_ring(ic); |
628 | |
629 | if (ic->i_cm_id->qp) |
630 | rdma_destroy_qp(ic->i_cm_id); |
631 | if (ic->i_send_cq) |
632 | ib_destroy_cq(ic->i_send_cq); |
633 | if (ic->i_recv_cq) |
634 | ib_destroy_cq(ic->i_recv_cq); |
635 | rdma_destroy_id(ic->i_cm_id); |
636 | |
637 | /* |
638 | * Move connection back to the nodev list. |
639 | */ |
640 | if (ic->rds_ibdev) |
641 | rds_ib_remove_conn(ic->rds_ibdev, conn); |
642 | |
643 | ic->i_cm_id = NULL; |
644 | ic->i_pd = NULL; |
645 | ic->i_mr = NULL; |
646 | ic->i_send_cq = NULL; |
647 | ic->i_recv_cq = NULL; |
648 | ic->i_send_hdrs = NULL; |
649 | ic->i_recv_hdrs = NULL; |
650 | ic->i_ack = NULL; |
651 | } |
652 | BUG_ON(ic->rds_ibdev); |
653 | |
654 | /* Clear pending transmit */ |
655 | if (ic->i_rm) { |
656 | rds_message_put(ic->i_rm); |
657 | ic->i_rm = NULL; |
658 | } |
659 | |
660 | /* Clear the ACK state */ |
661 | clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags); |
662 | #ifdef KERNEL_HAS_ATOMIC64 |
663 | atomic64_set(&ic->i_ack_next, 0); |
664 | #else |
665 | ic->i_ack_next = 0; |
666 | #endif |
667 | ic->i_ack_recv = 0; |
668 | |
669 | /* Clear flow control state */ |
670 | ic->i_flowctl = 0; |
671 | atomic_set(&ic->i_credits, 0); |
672 | |
673 | rds_ib_ring_init(&ic->i_send_ring, rds_ib_sysctl_max_send_wr); |
674 | rds_ib_ring_init(&ic->i_recv_ring, rds_ib_sysctl_max_recv_wr); |
675 | |
676 | if (ic->i_ibinc) { |
677 | rds_inc_put(&ic->i_ibinc->ii_inc); |
678 | ic->i_ibinc = NULL; |
679 | } |
680 | |
681 | vfree(ic->i_sends); |
682 | ic->i_sends = NULL; |
683 | vfree(ic->i_recvs); |
684 | ic->i_recvs = NULL; |
685 | } |
686 | |
687 | int rds_ib_conn_alloc(struct rds_connection *conn, gfp_t gfp) |
688 | { |
689 | struct rds_ib_connection *ic; |
690 | unsigned long flags; |
691 | |
692 | /* XXX too lazy? */ |
693 | ic = kzalloc(sizeof(struct rds_ib_connection), GFP_KERNEL); |
694 | if (ic == NULL) |
695 | return -ENOMEM; |
696 | |
697 | INIT_LIST_HEAD(&ic->ib_node); |
698 | tasklet_init(&ic->i_recv_tasklet, rds_ib_recv_tasklet_fn, |
699 | (unsigned long) ic); |
700 | mutex_init(&ic->i_recv_mutex); |
701 | #ifndef KERNEL_HAS_ATOMIC64 |
702 | spin_lock_init(&ic->i_ack_lock); |
703 | #endif |
704 | |
705 | /* |
706 | * rds_ib_conn_shutdown() waits for these to be emptied so they |
707 | * must be initialized before it can be called. |
708 | */ |
709 | rds_ib_ring_init(&ic->i_send_ring, rds_ib_sysctl_max_send_wr); |
710 | rds_ib_ring_init(&ic->i_recv_ring, rds_ib_sysctl_max_recv_wr); |
711 | |
712 | ic->conn = conn; |
713 | conn->c_transport_data = ic; |
714 | |
715 | spin_lock_irqsave(&ib_nodev_conns_lock, flags); |
716 | list_add_tail(&ic->ib_node, &ib_nodev_conns); |
717 | spin_unlock_irqrestore(&ib_nodev_conns_lock, flags); |
718 | |
719 | |
720 | rdsdebug("conn %p conn ic %p\n", conn, conn->c_transport_data); |
721 | return 0; |
722 | } |
723 | |
724 | /* |
725 | * Free a connection. Connection must be shut down and not set for reconnect. |
726 | */ |
727 | void rds_ib_conn_free(void *arg) |
728 | { |
729 | struct rds_ib_connection *ic = arg; |
730 | spinlock_t *lock_ptr; |
731 | |
732 | rdsdebug("ic %p\n", ic); |
733 | |
734 | /* |
735 | * Conn is either on a dev's list or on the nodev list. |
736 | * A race with shutdown() or connect() would cause problems |
737 | * (since rds_ibdev would change) but that should never happen. |
738 | */ |
739 | lock_ptr = ic->rds_ibdev ? &ic->rds_ibdev->spinlock : &ib_nodev_conns_lock; |
740 | |
741 | spin_lock_irq(lock_ptr); |
742 | list_del(&ic->ib_node); |
743 | spin_unlock_irq(lock_ptr); |
744 | |
745 | kfree(ic); |
746 | } |
747 | |
748 | |
749 | /* |
750 | * An error occurred on the connection |
751 | */ |
752 | void |
753 | __rds_ib_conn_error(struct rds_connection *conn, const char *fmt, ...) |
754 | { |
755 | va_list ap; |
756 | |
757 | rds_conn_drop(conn); |
758 | |
759 | va_start(ap, fmt); |
760 | vprintk(fmt, ap); |
761 | va_end(ap); |
762 | } |
763 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9