Root/
1 | /* |
2 | * Copyright (c) 2006 Oracle. All rights reserved. |
3 | * |
4 | * This software is available to you under a choice of one of two |
5 | * licenses. You may choose to be licensed under the terms of the GNU |
6 | * General Public License (GPL) Version 2, available from the file |
7 | * COPYING in the main directory of this source tree, or the |
8 | * OpenIB.org BSD license below: |
9 | * |
10 | * Redistribution and use in source and binary forms, with or |
11 | * without modification, are permitted provided that the following |
12 | * conditions are met: |
13 | * |
14 | * - Redistributions of source code must retain the above |
15 | * copyright notice, this list of conditions and the following |
16 | * disclaimer. |
17 | * |
18 | * - Redistributions in binary form must reproduce the above |
19 | * copyright notice, this list of conditions and the following |
20 | * disclaimer in the documentation and/or other materials |
21 | * provided with the distribution. |
22 | * |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
30 | * SOFTWARE. |
31 | * |
32 | */ |
33 | #include <linux/kernel.h> |
34 | #include <linux/in.h> |
35 | #include <linux/if.h> |
36 | #include <linux/netdevice.h> |
37 | #include <linux/inetdevice.h> |
38 | #include <linux/if_arp.h> |
39 | #include <linux/delay.h> |
40 | #include <linux/slab.h> |
41 | #include <linux/module.h> |
42 | |
43 | #include "rds.h" |
44 | #include "ib.h" |
45 | |
46 | static unsigned int fmr_pool_size = RDS_FMR_POOL_SIZE; |
47 | unsigned int fmr_message_size = RDS_FMR_SIZE + 1; /* +1 allows for unaligned MRs */ |
48 | unsigned int rds_ib_retry_count = RDS_IB_DEFAULT_RETRY_COUNT; |
49 | |
50 | module_param(fmr_pool_size, int, 0444); |
51 | MODULE_PARM_DESC(fmr_pool_size, " Max number of fmr per HCA"); |
52 | module_param(fmr_message_size, int, 0444); |
53 | MODULE_PARM_DESC(fmr_message_size, " Max size of a RDMA transfer"); |
54 | module_param(rds_ib_retry_count, int, 0444); |
55 | MODULE_PARM_DESC(rds_ib_retry_count, " Number of hw retries before reporting an error"); |
56 | |
57 | /* |
58 | * we have a clumsy combination of RCU and a rwsem protecting this list |
59 | * because it is used both in the get_mr fast path and while blocking in |
60 | * the FMR flushing path. |
61 | */ |
62 | DECLARE_RWSEM(rds_ib_devices_lock); |
63 | struct list_head rds_ib_devices; |
64 | |
65 | /* NOTE: if also grabbing ibdev lock, grab this first */ |
66 | DEFINE_SPINLOCK(ib_nodev_conns_lock); |
67 | LIST_HEAD(ib_nodev_conns); |
68 | |
69 | static void rds_ib_nodev_connect(void) |
70 | { |
71 | struct rds_ib_connection *ic; |
72 | |
73 | spin_lock(&ib_nodev_conns_lock); |
74 | list_for_each_entry(ic, &ib_nodev_conns, ib_node) |
75 | rds_conn_connect_if_down(ic->conn); |
76 | spin_unlock(&ib_nodev_conns_lock); |
77 | } |
78 | |
79 | static void rds_ib_dev_shutdown(struct rds_ib_device *rds_ibdev) |
80 | { |
81 | struct rds_ib_connection *ic; |
82 | unsigned long flags; |
83 | |
84 | spin_lock_irqsave(&rds_ibdev->spinlock, flags); |
85 | list_for_each_entry(ic, &rds_ibdev->conn_list, ib_node) |
86 | rds_conn_drop(ic->conn); |
87 | spin_unlock_irqrestore(&rds_ibdev->spinlock, flags); |
88 | } |
89 | |
90 | /* |
91 | * rds_ib_destroy_mr_pool() blocks on a few things and mrs drop references |
92 | * from interrupt context so we push freing off into a work struct in krdsd. |
93 | */ |
94 | static void rds_ib_dev_free(struct work_struct *work) |
95 | { |
96 | struct rds_ib_ipaddr *i_ipaddr, *i_next; |
97 | struct rds_ib_device *rds_ibdev = container_of(work, |
98 | struct rds_ib_device, free_work); |
99 | |
100 | if (rds_ibdev->mr_pool) |
101 | rds_ib_destroy_mr_pool(rds_ibdev->mr_pool); |
102 | if (rds_ibdev->mr) |
103 | ib_dereg_mr(rds_ibdev->mr); |
104 | if (rds_ibdev->pd) |
105 | ib_dealloc_pd(rds_ibdev->pd); |
106 | |
107 | list_for_each_entry_safe(i_ipaddr, i_next, &rds_ibdev->ipaddr_list, list) { |
108 | list_del(&i_ipaddr->list); |
109 | kfree(i_ipaddr); |
110 | } |
111 | |
112 | kfree(rds_ibdev); |
113 | } |
114 | |
115 | void rds_ib_dev_put(struct rds_ib_device *rds_ibdev) |
116 | { |
117 | BUG_ON(atomic_read(&rds_ibdev->refcount) <= 0); |
118 | if (atomic_dec_and_test(&rds_ibdev->refcount)) |
119 | queue_work(rds_wq, &rds_ibdev->free_work); |
120 | } |
121 | |
122 | static void rds_ib_add_one(struct ib_device *device) |
123 | { |
124 | struct rds_ib_device *rds_ibdev; |
125 | struct ib_device_attr *dev_attr; |
126 | |
127 | /* Only handle IB (no iWARP) devices */ |
128 | if (device->node_type != RDMA_NODE_IB_CA) |
129 | return; |
130 | |
131 | dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL); |
132 | if (!dev_attr) |
133 | return; |
134 | |
135 | if (ib_query_device(device, dev_attr)) { |
136 | rdsdebug("Query device failed for %s\n", device->name); |
137 | goto free_attr; |
138 | } |
139 | |
140 | rds_ibdev = kzalloc_node(sizeof(struct rds_ib_device), GFP_KERNEL, |
141 | ibdev_to_node(device)); |
142 | if (!rds_ibdev) |
143 | goto free_attr; |
144 | |
145 | spin_lock_init(&rds_ibdev->spinlock); |
146 | atomic_set(&rds_ibdev->refcount, 1); |
147 | INIT_WORK(&rds_ibdev->free_work, rds_ib_dev_free); |
148 | |
149 | rds_ibdev->max_wrs = dev_attr->max_qp_wr; |
150 | rds_ibdev->max_sge = min(dev_attr->max_sge, RDS_IB_MAX_SGE); |
151 | |
152 | rds_ibdev->fmr_max_remaps = dev_attr->max_map_per_fmr?: 32; |
153 | rds_ibdev->max_fmrs = dev_attr->max_fmr ? |
154 | min_t(unsigned int, dev_attr->max_fmr, fmr_pool_size) : |
155 | fmr_pool_size; |
156 | |
157 | rds_ibdev->max_initiator_depth = dev_attr->max_qp_init_rd_atom; |
158 | rds_ibdev->max_responder_resources = dev_attr->max_qp_rd_atom; |
159 | |
160 | rds_ibdev->dev = device; |
161 | rds_ibdev->pd = ib_alloc_pd(device); |
162 | if (IS_ERR(rds_ibdev->pd)) { |
163 | rds_ibdev->pd = NULL; |
164 | goto put_dev; |
165 | } |
166 | |
167 | rds_ibdev->mr = ib_get_dma_mr(rds_ibdev->pd, IB_ACCESS_LOCAL_WRITE); |
168 | if (IS_ERR(rds_ibdev->mr)) { |
169 | rds_ibdev->mr = NULL; |
170 | goto put_dev; |
171 | } |
172 | |
173 | rds_ibdev->mr_pool = rds_ib_create_mr_pool(rds_ibdev); |
174 | if (IS_ERR(rds_ibdev->mr_pool)) { |
175 | rds_ibdev->mr_pool = NULL; |
176 | goto put_dev; |
177 | } |
178 | |
179 | INIT_LIST_HEAD(&rds_ibdev->ipaddr_list); |
180 | INIT_LIST_HEAD(&rds_ibdev->conn_list); |
181 | |
182 | down_write(&rds_ib_devices_lock); |
183 | list_add_tail_rcu(&rds_ibdev->list, &rds_ib_devices); |
184 | up_write(&rds_ib_devices_lock); |
185 | atomic_inc(&rds_ibdev->refcount); |
186 | |
187 | ib_set_client_data(device, &rds_ib_client, rds_ibdev); |
188 | atomic_inc(&rds_ibdev->refcount); |
189 | |
190 | rds_ib_nodev_connect(); |
191 | |
192 | put_dev: |
193 | rds_ib_dev_put(rds_ibdev); |
194 | free_attr: |
195 | kfree(dev_attr); |
196 | } |
197 | |
198 | /* |
199 | * New connections use this to find the device to associate with the |
200 | * connection. It's not in the fast path so we're not concerned about the |
201 | * performance of the IB call. (As of this writing, it uses an interrupt |
202 | * blocking spinlock to serialize walking a per-device list of all registered |
203 | * clients.) |
204 | * |
205 | * RCU is used to handle incoming connections racing with device teardown. |
206 | * Rather than use a lock to serialize removal from the client_data and |
207 | * getting a new reference, we use an RCU grace period. The destruction |
208 | * path removes the device from client_data and then waits for all RCU |
209 | * readers to finish. |
210 | * |
211 | * A new connection can get NULL from this if its arriving on a |
212 | * device that is in the process of being removed. |
213 | */ |
214 | struct rds_ib_device *rds_ib_get_client_data(struct ib_device *device) |
215 | { |
216 | struct rds_ib_device *rds_ibdev; |
217 | |
218 | rcu_read_lock(); |
219 | rds_ibdev = ib_get_client_data(device, &rds_ib_client); |
220 | if (rds_ibdev) |
221 | atomic_inc(&rds_ibdev->refcount); |
222 | rcu_read_unlock(); |
223 | return rds_ibdev; |
224 | } |
225 | |
226 | /* |
227 | * The IB stack is letting us know that a device is going away. This can |
228 | * happen if the underlying HCA driver is removed or if PCI hotplug is removing |
229 | * the pci function, for example. |
230 | * |
231 | * This can be called at any time and can be racing with any other RDS path. |
232 | */ |
233 | static void rds_ib_remove_one(struct ib_device *device) |
234 | { |
235 | struct rds_ib_device *rds_ibdev; |
236 | |
237 | rds_ibdev = ib_get_client_data(device, &rds_ib_client); |
238 | if (!rds_ibdev) |
239 | return; |
240 | |
241 | rds_ib_dev_shutdown(rds_ibdev); |
242 | |
243 | /* stop connection attempts from getting a reference to this device. */ |
244 | ib_set_client_data(device, &rds_ib_client, NULL); |
245 | |
246 | down_write(&rds_ib_devices_lock); |
247 | list_del_rcu(&rds_ibdev->list); |
248 | up_write(&rds_ib_devices_lock); |
249 | |
250 | /* |
251 | * This synchronize rcu is waiting for readers of both the ib |
252 | * client data and the devices list to finish before we drop |
253 | * both of those references. |
254 | */ |
255 | synchronize_rcu(); |
256 | rds_ib_dev_put(rds_ibdev); |
257 | rds_ib_dev_put(rds_ibdev); |
258 | } |
259 | |
260 | struct ib_client rds_ib_client = { |
261 | .name = "rds_ib", |
262 | .add = rds_ib_add_one, |
263 | .remove = rds_ib_remove_one |
264 | }; |
265 | |
266 | static int rds_ib_conn_info_visitor(struct rds_connection *conn, |
267 | void *buffer) |
268 | { |
269 | struct rds_info_rdma_connection *iinfo = buffer; |
270 | struct rds_ib_connection *ic; |
271 | |
272 | /* We will only ever look at IB transports */ |
273 | if (conn->c_trans != &rds_ib_transport) |
274 | return 0; |
275 | |
276 | iinfo->src_addr = conn->c_laddr; |
277 | iinfo->dst_addr = conn->c_faddr; |
278 | |
279 | memset(&iinfo->src_gid, 0, sizeof(iinfo->src_gid)); |
280 | memset(&iinfo->dst_gid, 0, sizeof(iinfo->dst_gid)); |
281 | if (rds_conn_state(conn) == RDS_CONN_UP) { |
282 | struct rds_ib_device *rds_ibdev; |
283 | struct rdma_dev_addr *dev_addr; |
284 | |
285 | ic = conn->c_transport_data; |
286 | dev_addr = &ic->i_cm_id->route.addr.dev_addr; |
287 | |
288 | rdma_addr_get_sgid(dev_addr, (union ib_gid *) &iinfo->src_gid); |
289 | rdma_addr_get_dgid(dev_addr, (union ib_gid *) &iinfo->dst_gid); |
290 | |
291 | rds_ibdev = ic->rds_ibdev; |
292 | iinfo->max_send_wr = ic->i_send_ring.w_nr; |
293 | iinfo->max_recv_wr = ic->i_recv_ring.w_nr; |
294 | iinfo->max_send_sge = rds_ibdev->max_sge; |
295 | rds_ib_get_mr_info(rds_ibdev, iinfo); |
296 | } |
297 | return 1; |
298 | } |
299 | |
300 | static void rds_ib_ic_info(struct socket *sock, unsigned int len, |
301 | struct rds_info_iterator *iter, |
302 | struct rds_info_lengths *lens) |
303 | { |
304 | rds_for_each_conn_info(sock, len, iter, lens, |
305 | rds_ib_conn_info_visitor, |
306 | sizeof(struct rds_info_rdma_connection)); |
307 | } |
308 | |
309 | |
310 | /* |
311 | * Early RDS/IB was built to only bind to an address if there is an IPoIB |
312 | * device with that address set. |
313 | * |
314 | * If it were me, I'd advocate for something more flexible. Sending and |
315 | * receiving should be device-agnostic. Transports would try and maintain |
316 | * connections between peers who have messages queued. Userspace would be |
317 | * allowed to influence which paths have priority. We could call userspace |
318 | * asserting this policy "routing". |
319 | */ |
320 | static int rds_ib_laddr_check(__be32 addr) |
321 | { |
322 | int ret; |
323 | struct rdma_cm_id *cm_id; |
324 | struct sockaddr_in sin; |
325 | |
326 | /* Create a CMA ID and try to bind it. This catches both |
327 | * IB and iWARP capable NICs. |
328 | */ |
329 | cm_id = rdma_create_id(NULL, NULL, RDMA_PS_TCP, IB_QPT_RC); |
330 | if (IS_ERR(cm_id)) |
331 | return PTR_ERR(cm_id); |
332 | |
333 | memset(&sin, 0, sizeof(sin)); |
334 | sin.sin_family = AF_INET; |
335 | sin.sin_addr.s_addr = addr; |
336 | |
337 | /* rdma_bind_addr will only succeed for IB & iWARP devices */ |
338 | ret = rdma_bind_addr(cm_id, (struct sockaddr *)&sin); |
339 | /* due to this, we will claim to support iWARP devices unless we |
340 | check node_type. */ |
341 | if (ret || !cm_id->device || |
342 | cm_id->device->node_type != RDMA_NODE_IB_CA) |
343 | ret = -EADDRNOTAVAIL; |
344 | |
345 | rdsdebug("addr %pI4 ret %d node type %d\n", |
346 | &addr, ret, |
347 | cm_id->device ? cm_id->device->node_type : -1); |
348 | |
349 | rdma_destroy_id(cm_id); |
350 | |
351 | return ret; |
352 | } |
353 | |
354 | static void rds_ib_unregister_client(void) |
355 | { |
356 | ib_unregister_client(&rds_ib_client); |
357 | /* wait for rds_ib_dev_free() to complete */ |
358 | flush_workqueue(rds_wq); |
359 | } |
360 | |
361 | void rds_ib_exit(void) |
362 | { |
363 | rds_info_deregister_func(RDS_INFO_IB_CONNECTIONS, rds_ib_ic_info); |
364 | rds_ib_unregister_client(); |
365 | rds_ib_destroy_nodev_conns(); |
366 | rds_ib_sysctl_exit(); |
367 | rds_ib_recv_exit(); |
368 | rds_trans_unregister(&rds_ib_transport); |
369 | } |
370 | |
371 | struct rds_transport rds_ib_transport = { |
372 | .laddr_check = rds_ib_laddr_check, |
373 | .xmit_complete = rds_ib_xmit_complete, |
374 | .xmit = rds_ib_xmit, |
375 | .xmit_rdma = rds_ib_xmit_rdma, |
376 | .xmit_atomic = rds_ib_xmit_atomic, |
377 | .recv = rds_ib_recv, |
378 | .conn_alloc = rds_ib_conn_alloc, |
379 | .conn_free = rds_ib_conn_free, |
380 | .conn_connect = rds_ib_conn_connect, |
381 | .conn_shutdown = rds_ib_conn_shutdown, |
382 | .inc_copy_to_user = rds_ib_inc_copy_to_user, |
383 | .inc_free = rds_ib_inc_free, |
384 | .cm_initiate_connect = rds_ib_cm_initiate_connect, |
385 | .cm_handle_connect = rds_ib_cm_handle_connect, |
386 | .cm_connect_complete = rds_ib_cm_connect_complete, |
387 | .stats_info_copy = rds_ib_stats_info_copy, |
388 | .exit = rds_ib_exit, |
389 | .get_mr = rds_ib_get_mr, |
390 | .sync_mr = rds_ib_sync_mr, |
391 | .free_mr = rds_ib_free_mr, |
392 | .flush_mrs = rds_ib_flush_mrs, |
393 | .t_owner = THIS_MODULE, |
394 | .t_name = "infiniband", |
395 | .t_type = RDS_TRANS_IB |
396 | }; |
397 | |
398 | int rds_ib_init(void) |
399 | { |
400 | int ret; |
401 | |
402 | INIT_LIST_HEAD(&rds_ib_devices); |
403 | |
404 | ret = ib_register_client(&rds_ib_client); |
405 | if (ret) |
406 | goto out; |
407 | |
408 | ret = rds_ib_sysctl_init(); |
409 | if (ret) |
410 | goto out_ibreg; |
411 | |
412 | ret = rds_ib_recv_init(); |
413 | if (ret) |
414 | goto out_sysctl; |
415 | |
416 | ret = rds_trans_register(&rds_ib_transport); |
417 | if (ret) |
418 | goto out_recv; |
419 | |
420 | rds_info_register_func(RDS_INFO_IB_CONNECTIONS, rds_ib_ic_info); |
421 | |
422 | goto out; |
423 | |
424 | out_recv: |
425 | rds_ib_recv_exit(); |
426 | out_sysctl: |
427 | rds_ib_sysctl_exit(); |
428 | out_ibreg: |
429 | rds_ib_unregister_client(); |
430 | out: |
431 | return ret; |
432 | } |
433 | |
434 | MODULE_LICENSE("GPL"); |
435 | |
436 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9