Root/
1 | /* |
2 | * Copyright (c) 2006 Oracle. All rights reserved. |
3 | * |
4 | * This software is available to you under a choice of one of two |
5 | * licenses. You may choose to be licensed under the terms of the GNU |
6 | * General Public License (GPL) Version 2, available from the file |
7 | * COPYING in the main directory of this source tree, or the |
8 | * OpenIB.org BSD license below: |
9 | * |
10 | * Redistribution and use in source and binary forms, with or |
11 | * without modification, are permitted provided that the following |
12 | * conditions are met: |
13 | * |
14 | * - Redistributions of source code must retain the above |
15 | * copyright notice, this list of conditions and the following |
16 | * disclaimer. |
17 | * |
18 | * - Redistributions in binary form must reproduce the above |
19 | * copyright notice, this list of conditions and the following |
20 | * disclaimer in the documentation and/or other materials |
21 | * provided with the distribution. |
22 | * |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
30 | * SOFTWARE. |
31 | * |
32 | */ |
33 | #include <linux/kernel.h> |
34 | #include <linux/gfp.h> |
35 | #include <net/sock.h> |
36 | #include <linux/in.h> |
37 | #include <linux/list.h> |
38 | |
39 | #include "rds.h" |
40 | #include "rdma.h" |
41 | |
42 | /* When transmitting messages in rds_send_xmit, we need to emerge from |
43 | * time to time and briefly release the CPU. Otherwise the softlock watchdog |
44 | * will kick our shin. |
45 | * Also, it seems fairer to not let one busy connection stall all the |
46 | * others. |
47 | * |
48 | * send_batch_count is the number of times we'll loop in send_xmit. Setting |
49 | * it to 0 will restore the old behavior (where we looped until we had |
50 | * drained the queue). |
51 | */ |
52 | static int send_batch_count = 64; |
53 | module_param(send_batch_count, int, 0444); |
54 | MODULE_PARM_DESC(send_batch_count, " batch factor when working the send queue"); |
55 | |
56 | /* |
57 | * Reset the send state. Caller must hold c_send_lock when calling here. |
58 | */ |
59 | void rds_send_reset(struct rds_connection *conn) |
60 | { |
61 | struct rds_message *rm, *tmp; |
62 | unsigned long flags; |
63 | |
64 | if (conn->c_xmit_rm) { |
65 | /* Tell the user the RDMA op is no longer mapped by the |
66 | * transport. This isn't entirely true (it's flushed out |
67 | * independently) but as the connection is down, there's |
68 | * no ongoing RDMA to/from that memory */ |
69 | rds_message_unmapped(conn->c_xmit_rm); |
70 | rds_message_put(conn->c_xmit_rm); |
71 | conn->c_xmit_rm = NULL; |
72 | } |
73 | conn->c_xmit_sg = 0; |
74 | conn->c_xmit_hdr_off = 0; |
75 | conn->c_xmit_data_off = 0; |
76 | conn->c_xmit_rdma_sent = 0; |
77 | |
78 | conn->c_map_queued = 0; |
79 | |
80 | conn->c_unacked_packets = rds_sysctl_max_unacked_packets; |
81 | conn->c_unacked_bytes = rds_sysctl_max_unacked_bytes; |
82 | |
83 | /* Mark messages as retransmissions, and move them to the send q */ |
84 | spin_lock_irqsave(&conn->c_lock, flags); |
85 | list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) { |
86 | set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags); |
87 | set_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags); |
88 | } |
89 | list_splice_init(&conn->c_retrans, &conn->c_send_queue); |
90 | spin_unlock_irqrestore(&conn->c_lock, flags); |
91 | } |
92 | |
93 | /* |
94 | * We're making the concious trade-off here to only send one message |
95 | * down the connection at a time. |
96 | * Pro: |
97 | * - tx queueing is a simple fifo list |
98 | * - reassembly is optional and easily done by transports per conn |
99 | * - no per flow rx lookup at all, straight to the socket |
100 | * - less per-frag memory and wire overhead |
101 | * Con: |
102 | * - queued acks can be delayed behind large messages |
103 | * Depends: |
104 | * - small message latency is higher behind queued large messages |
105 | * - large message latency isn't starved by intervening small sends |
106 | */ |
107 | int rds_send_xmit(struct rds_connection *conn) |
108 | { |
109 | struct rds_message *rm; |
110 | unsigned long flags; |
111 | unsigned int tmp; |
112 | unsigned int send_quota = send_batch_count; |
113 | struct scatterlist *sg; |
114 | int ret = 0; |
115 | int was_empty = 0; |
116 | LIST_HEAD(to_be_dropped); |
117 | |
118 | /* |
119 | * sendmsg calls here after having queued its message on the send |
120 | * queue. We only have one task feeding the connection at a time. If |
121 | * another thread is already feeding the queue then we back off. This |
122 | * avoids blocking the caller and trading per-connection data between |
123 | * caches per message. |
124 | * |
125 | * The sem holder will issue a retry if they notice that someone queued |
126 | * a message after they stopped walking the send queue but before they |
127 | * dropped the sem. |
128 | */ |
129 | if (!mutex_trylock(&conn->c_send_lock)) { |
130 | rds_stats_inc(s_send_sem_contention); |
131 | ret = -ENOMEM; |
132 | goto out; |
133 | } |
134 | |
135 | if (conn->c_trans->xmit_prepare) |
136 | conn->c_trans->xmit_prepare(conn); |
137 | |
138 | /* |
139 | * spin trying to push headers and data down the connection until |
140 | * the connection doens't make forward progress. |
141 | */ |
142 | while (--send_quota) { |
143 | /* |
144 | * See if need to send a congestion map update if we're |
145 | * between sending messages. The send_sem protects our sole |
146 | * use of c_map_offset and _bytes. |
147 | * Note this is used only by transports that define a special |
148 | * xmit_cong_map function. For all others, we create allocate |
149 | * a cong_map message and treat it just like any other send. |
150 | */ |
151 | if (conn->c_map_bytes) { |
152 | ret = conn->c_trans->xmit_cong_map(conn, conn->c_lcong, |
153 | conn->c_map_offset); |
154 | if (ret <= 0) |
155 | break; |
156 | |
157 | conn->c_map_offset += ret; |
158 | conn->c_map_bytes -= ret; |
159 | if (conn->c_map_bytes) |
160 | continue; |
161 | } |
162 | |
163 | /* If we're done sending the current message, clear the |
164 | * offset and S/G temporaries. |
165 | */ |
166 | rm = conn->c_xmit_rm; |
167 | if (rm != NULL && |
168 | conn->c_xmit_hdr_off == sizeof(struct rds_header) && |
169 | conn->c_xmit_sg == rm->m_nents) { |
170 | conn->c_xmit_rm = NULL; |
171 | conn->c_xmit_sg = 0; |
172 | conn->c_xmit_hdr_off = 0; |
173 | conn->c_xmit_data_off = 0; |
174 | conn->c_xmit_rdma_sent = 0; |
175 | |
176 | /* Release the reference to the previous message. */ |
177 | rds_message_put(rm); |
178 | rm = NULL; |
179 | } |
180 | |
181 | /* If we're asked to send a cong map update, do so. |
182 | */ |
183 | if (rm == NULL && test_and_clear_bit(0, &conn->c_map_queued)) { |
184 | if (conn->c_trans->xmit_cong_map != NULL) { |
185 | conn->c_map_offset = 0; |
186 | conn->c_map_bytes = sizeof(struct rds_header) + |
187 | RDS_CONG_MAP_BYTES; |
188 | continue; |
189 | } |
190 | |
191 | rm = rds_cong_update_alloc(conn); |
192 | if (IS_ERR(rm)) { |
193 | ret = PTR_ERR(rm); |
194 | break; |
195 | } |
196 | |
197 | conn->c_xmit_rm = rm; |
198 | } |
199 | |
200 | /* |
201 | * Grab the next message from the send queue, if there is one. |
202 | * |
203 | * c_xmit_rm holds a ref while we're sending this message down |
204 | * the connction. We can use this ref while holding the |
205 | * send_sem.. rds_send_reset() is serialized with it. |
206 | */ |
207 | if (rm == NULL) { |
208 | unsigned int len; |
209 | |
210 | spin_lock_irqsave(&conn->c_lock, flags); |
211 | |
212 | if (!list_empty(&conn->c_send_queue)) { |
213 | rm = list_entry(conn->c_send_queue.next, |
214 | struct rds_message, |
215 | m_conn_item); |
216 | rds_message_addref(rm); |
217 | |
218 | /* |
219 | * Move the message from the send queue to the retransmit |
220 | * list right away. |
221 | */ |
222 | list_move_tail(&rm->m_conn_item, &conn->c_retrans); |
223 | } |
224 | |
225 | spin_unlock_irqrestore(&conn->c_lock, flags); |
226 | |
227 | if (rm == NULL) { |
228 | was_empty = 1; |
229 | break; |
230 | } |
231 | |
232 | /* Unfortunately, the way Infiniband deals with |
233 | * RDMA to a bad MR key is by moving the entire |
234 | * queue pair to error state. We cold possibly |
235 | * recover from that, but right now we drop the |
236 | * connection. |
237 | * Therefore, we never retransmit messages with RDMA ops. |
238 | */ |
239 | if (rm->m_rdma_op && |
240 | test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags)) { |
241 | spin_lock_irqsave(&conn->c_lock, flags); |
242 | if (test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) |
243 | list_move(&rm->m_conn_item, &to_be_dropped); |
244 | spin_unlock_irqrestore(&conn->c_lock, flags); |
245 | rds_message_put(rm); |
246 | continue; |
247 | } |
248 | |
249 | /* Require an ACK every once in a while */ |
250 | len = ntohl(rm->m_inc.i_hdr.h_len); |
251 | if (conn->c_unacked_packets == 0 || |
252 | conn->c_unacked_bytes < len) { |
253 | __set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags); |
254 | |
255 | conn->c_unacked_packets = rds_sysctl_max_unacked_packets; |
256 | conn->c_unacked_bytes = rds_sysctl_max_unacked_bytes; |
257 | rds_stats_inc(s_send_ack_required); |
258 | } else { |
259 | conn->c_unacked_bytes -= len; |
260 | conn->c_unacked_packets--; |
261 | } |
262 | |
263 | conn->c_xmit_rm = rm; |
264 | } |
265 | |
266 | /* |
267 | * Try and send an rdma message. Let's see if we can |
268 | * keep this simple and require that the transport either |
269 | * send the whole rdma or none of it. |
270 | */ |
271 | if (rm->m_rdma_op && !conn->c_xmit_rdma_sent) { |
272 | ret = conn->c_trans->xmit_rdma(conn, rm->m_rdma_op); |
273 | if (ret) |
274 | break; |
275 | conn->c_xmit_rdma_sent = 1; |
276 | /* The transport owns the mapped memory for now. |
277 | * You can't unmap it while it's on the send queue */ |
278 | set_bit(RDS_MSG_MAPPED, &rm->m_flags); |
279 | } |
280 | |
281 | if (conn->c_xmit_hdr_off < sizeof(struct rds_header) || |
282 | conn->c_xmit_sg < rm->m_nents) { |
283 | ret = conn->c_trans->xmit(conn, rm, |
284 | conn->c_xmit_hdr_off, |
285 | conn->c_xmit_sg, |
286 | conn->c_xmit_data_off); |
287 | if (ret <= 0) |
288 | break; |
289 | |
290 | if (conn->c_xmit_hdr_off < sizeof(struct rds_header)) { |
291 | tmp = min_t(int, ret, |
292 | sizeof(struct rds_header) - |
293 | conn->c_xmit_hdr_off); |
294 | conn->c_xmit_hdr_off += tmp; |
295 | ret -= tmp; |
296 | } |
297 | |
298 | sg = &rm->m_sg[conn->c_xmit_sg]; |
299 | while (ret) { |
300 | tmp = min_t(int, ret, sg->length - |
301 | conn->c_xmit_data_off); |
302 | conn->c_xmit_data_off += tmp; |
303 | ret -= tmp; |
304 | if (conn->c_xmit_data_off == sg->length) { |
305 | conn->c_xmit_data_off = 0; |
306 | sg++; |
307 | conn->c_xmit_sg++; |
308 | BUG_ON(ret != 0 && |
309 | conn->c_xmit_sg == rm->m_nents); |
310 | } |
311 | } |
312 | } |
313 | } |
314 | |
315 | /* Nuke any messages we decided not to retransmit. */ |
316 | if (!list_empty(&to_be_dropped)) |
317 | rds_send_remove_from_sock(&to_be_dropped, RDS_RDMA_DROPPED); |
318 | |
319 | if (conn->c_trans->xmit_complete) |
320 | conn->c_trans->xmit_complete(conn); |
321 | |
322 | /* |
323 | * We might be racing with another sender who queued a message but |
324 | * backed off on noticing that we held the c_send_lock. If we check |
325 | * for queued messages after dropping the sem then either we'll |
326 | * see the queued message or the queuer will get the sem. If we |
327 | * notice the queued message then we trigger an immediate retry. |
328 | * |
329 | * We need to be careful only to do this when we stopped processing |
330 | * the send queue because it was empty. It's the only way we |
331 | * stop processing the loop when the transport hasn't taken |
332 | * responsibility for forward progress. |
333 | */ |
334 | mutex_unlock(&conn->c_send_lock); |
335 | |
336 | if (conn->c_map_bytes || (send_quota == 0 && !was_empty)) { |
337 | /* We exhausted the send quota, but there's work left to |
338 | * do. Return and (re-)schedule the send worker. |
339 | */ |
340 | ret = -EAGAIN; |
341 | } |
342 | |
343 | if (ret == 0 && was_empty) { |
344 | /* A simple bit test would be way faster than taking the |
345 | * spin lock */ |
346 | spin_lock_irqsave(&conn->c_lock, flags); |
347 | if (!list_empty(&conn->c_send_queue)) { |
348 | rds_stats_inc(s_send_sem_queue_raced); |
349 | ret = -EAGAIN; |
350 | } |
351 | spin_unlock_irqrestore(&conn->c_lock, flags); |
352 | } |
353 | out: |
354 | return ret; |
355 | } |
356 | |
357 | static void rds_send_sndbuf_remove(struct rds_sock *rs, struct rds_message *rm) |
358 | { |
359 | u32 len = be32_to_cpu(rm->m_inc.i_hdr.h_len); |
360 | |
361 | assert_spin_locked(&rs->rs_lock); |
362 | |
363 | BUG_ON(rs->rs_snd_bytes < len); |
364 | rs->rs_snd_bytes -= len; |
365 | |
366 | if (rs->rs_snd_bytes == 0) |
367 | rds_stats_inc(s_send_queue_empty); |
368 | } |
369 | |
370 | static inline int rds_send_is_acked(struct rds_message *rm, u64 ack, |
371 | is_acked_func is_acked) |
372 | { |
373 | if (is_acked) |
374 | return is_acked(rm, ack); |
375 | return be64_to_cpu(rm->m_inc.i_hdr.h_sequence) <= ack; |
376 | } |
377 | |
378 | /* |
379 | * Returns true if there are no messages on the send and retransmit queues |
380 | * which have a sequence number greater than or equal to the given sequence |
381 | * number. |
382 | */ |
383 | int rds_send_acked_before(struct rds_connection *conn, u64 seq) |
384 | { |
385 | struct rds_message *rm, *tmp; |
386 | int ret = 1; |
387 | |
388 | spin_lock(&conn->c_lock); |
389 | |
390 | list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) { |
391 | if (be64_to_cpu(rm->m_inc.i_hdr.h_sequence) < seq) |
392 | ret = 0; |
393 | break; |
394 | } |
395 | |
396 | list_for_each_entry_safe(rm, tmp, &conn->c_send_queue, m_conn_item) { |
397 | if (be64_to_cpu(rm->m_inc.i_hdr.h_sequence) < seq) |
398 | ret = 0; |
399 | break; |
400 | } |
401 | |
402 | spin_unlock(&conn->c_lock); |
403 | |
404 | return ret; |
405 | } |
406 | |
407 | /* |
408 | * This is pretty similar to what happens below in the ACK |
409 | * handling code - except that we call here as soon as we get |
410 | * the IB send completion on the RDMA op and the accompanying |
411 | * message. |
412 | */ |
413 | void rds_rdma_send_complete(struct rds_message *rm, int status) |
414 | { |
415 | struct rds_sock *rs = NULL; |
416 | struct rds_rdma_op *ro; |
417 | struct rds_notifier *notifier; |
418 | |
419 | spin_lock(&rm->m_rs_lock); |
420 | |
421 | ro = rm->m_rdma_op; |
422 | if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) && |
423 | ro && ro->r_notify && ro->r_notifier) { |
424 | notifier = ro->r_notifier; |
425 | rs = rm->m_rs; |
426 | sock_hold(rds_rs_to_sk(rs)); |
427 | |
428 | notifier->n_status = status; |
429 | spin_lock(&rs->rs_lock); |
430 | list_add_tail(¬ifier->n_list, &rs->rs_notify_queue); |
431 | spin_unlock(&rs->rs_lock); |
432 | |
433 | ro->r_notifier = NULL; |
434 | } |
435 | |
436 | spin_unlock(&rm->m_rs_lock); |
437 | |
438 | if (rs) { |
439 | rds_wake_sk_sleep(rs); |
440 | sock_put(rds_rs_to_sk(rs)); |
441 | } |
442 | } |
443 | EXPORT_SYMBOL_GPL(rds_rdma_send_complete); |
444 | |
445 | /* |
446 | * This is the same as rds_rdma_send_complete except we |
447 | * don't do any locking - we have all the ingredients (message, |
448 | * socket, socket lock) and can just move the notifier. |
449 | */ |
450 | static inline void |
451 | __rds_rdma_send_complete(struct rds_sock *rs, struct rds_message *rm, int status) |
452 | { |
453 | struct rds_rdma_op *ro; |
454 | |
455 | ro = rm->m_rdma_op; |
456 | if (ro && ro->r_notify && ro->r_notifier) { |
457 | ro->r_notifier->n_status = status; |
458 | list_add_tail(&ro->r_notifier->n_list, &rs->rs_notify_queue); |
459 | ro->r_notifier = NULL; |
460 | } |
461 | |
462 | /* No need to wake the app - caller does this */ |
463 | } |
464 | |
465 | /* |
466 | * This is called from the IB send completion when we detect |
467 | * a RDMA operation that failed with remote access error. |
468 | * So speed is not an issue here. |
469 | */ |
470 | struct rds_message *rds_send_get_message(struct rds_connection *conn, |
471 | struct rds_rdma_op *op) |
472 | { |
473 | struct rds_message *rm, *tmp, *found = NULL; |
474 | unsigned long flags; |
475 | |
476 | spin_lock_irqsave(&conn->c_lock, flags); |
477 | |
478 | list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) { |
479 | if (rm->m_rdma_op == op) { |
480 | atomic_inc(&rm->m_refcount); |
481 | found = rm; |
482 | goto out; |
483 | } |
484 | } |
485 | |
486 | list_for_each_entry_safe(rm, tmp, &conn->c_send_queue, m_conn_item) { |
487 | if (rm->m_rdma_op == op) { |
488 | atomic_inc(&rm->m_refcount); |
489 | found = rm; |
490 | break; |
491 | } |
492 | } |
493 | |
494 | out: |
495 | spin_unlock_irqrestore(&conn->c_lock, flags); |
496 | |
497 | return found; |
498 | } |
499 | EXPORT_SYMBOL_GPL(rds_send_get_message); |
500 | |
501 | /* |
502 | * This removes messages from the socket's list if they're on it. The list |
503 | * argument must be private to the caller, we must be able to modify it |
504 | * without locks. The messages must have a reference held for their |
505 | * position on the list. This function will drop that reference after |
506 | * removing the messages from the 'messages' list regardless of if it found |
507 | * the messages on the socket list or not. |
508 | */ |
509 | void rds_send_remove_from_sock(struct list_head *messages, int status) |
510 | { |
511 | unsigned long flags = 0; /* silence gcc :P */ |
512 | struct rds_sock *rs = NULL; |
513 | struct rds_message *rm; |
514 | |
515 | local_irq_save(flags); |
516 | while (!list_empty(messages)) { |
517 | rm = list_entry(messages->next, struct rds_message, |
518 | m_conn_item); |
519 | list_del_init(&rm->m_conn_item); |
520 | |
521 | /* |
522 | * If we see this flag cleared then we're *sure* that someone |
523 | * else beat us to removing it from the sock. If we race |
524 | * with their flag update we'll get the lock and then really |
525 | * see that the flag has been cleared. |
526 | * |
527 | * The message spinlock makes sure nobody clears rm->m_rs |
528 | * while we're messing with it. It does not prevent the |
529 | * message from being removed from the socket, though. |
530 | */ |
531 | spin_lock(&rm->m_rs_lock); |
532 | if (!test_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) |
533 | goto unlock_and_drop; |
534 | |
535 | if (rs != rm->m_rs) { |
536 | if (rs) { |
537 | spin_unlock(&rs->rs_lock); |
538 | rds_wake_sk_sleep(rs); |
539 | sock_put(rds_rs_to_sk(rs)); |
540 | } |
541 | rs = rm->m_rs; |
542 | spin_lock(&rs->rs_lock); |
543 | sock_hold(rds_rs_to_sk(rs)); |
544 | } |
545 | |
546 | if (test_and_clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) { |
547 | struct rds_rdma_op *ro = rm->m_rdma_op; |
548 | struct rds_notifier *notifier; |
549 | |
550 | list_del_init(&rm->m_sock_item); |
551 | rds_send_sndbuf_remove(rs, rm); |
552 | |
553 | if (ro && ro->r_notifier && (status || ro->r_notify)) { |
554 | notifier = ro->r_notifier; |
555 | list_add_tail(¬ifier->n_list, |
556 | &rs->rs_notify_queue); |
557 | if (!notifier->n_status) |
558 | notifier->n_status = status; |
559 | rm->m_rdma_op->r_notifier = NULL; |
560 | } |
561 | rds_message_put(rm); |
562 | rm->m_rs = NULL; |
563 | } |
564 | |
565 | unlock_and_drop: |
566 | spin_unlock(&rm->m_rs_lock); |
567 | rds_message_put(rm); |
568 | } |
569 | |
570 | if (rs) { |
571 | spin_unlock(&rs->rs_lock); |
572 | rds_wake_sk_sleep(rs); |
573 | sock_put(rds_rs_to_sk(rs)); |
574 | } |
575 | local_irq_restore(flags); |
576 | } |
577 | |
578 | /* |
579 | * Transports call here when they've determined that the receiver queued |
580 | * messages up to, and including, the given sequence number. Messages are |
581 | * moved to the retrans queue when rds_send_xmit picks them off the send |
582 | * queue. This means that in the TCP case, the message may not have been |
583 | * assigned the m_ack_seq yet - but that's fine as long as tcp_is_acked |
584 | * checks the RDS_MSG_HAS_ACK_SEQ bit. |
585 | * |
586 | * XXX It's not clear to me how this is safely serialized with socket |
587 | * destruction. Maybe it should bail if it sees SOCK_DEAD. |
588 | */ |
589 | void rds_send_drop_acked(struct rds_connection *conn, u64 ack, |
590 | is_acked_func is_acked) |
591 | { |
592 | struct rds_message *rm, *tmp; |
593 | unsigned long flags; |
594 | LIST_HEAD(list); |
595 | |
596 | spin_lock_irqsave(&conn->c_lock, flags); |
597 | |
598 | list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) { |
599 | if (!rds_send_is_acked(rm, ack, is_acked)) |
600 | break; |
601 | |
602 | list_move(&rm->m_conn_item, &list); |
603 | clear_bit(RDS_MSG_ON_CONN, &rm->m_flags); |
604 | } |
605 | |
606 | /* order flag updates with spin locks */ |
607 | if (!list_empty(&list)) |
608 | smp_mb__after_clear_bit(); |
609 | |
610 | spin_unlock_irqrestore(&conn->c_lock, flags); |
611 | |
612 | /* now remove the messages from the sock list as needed */ |
613 | rds_send_remove_from_sock(&list, RDS_RDMA_SUCCESS); |
614 | } |
615 | EXPORT_SYMBOL_GPL(rds_send_drop_acked); |
616 | |
617 | void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest) |
618 | { |
619 | struct rds_message *rm, *tmp; |
620 | struct rds_connection *conn; |
621 | unsigned long flags, flags2; |
622 | LIST_HEAD(list); |
623 | int wake = 0; |
624 | |
625 | /* get all the messages we're dropping under the rs lock */ |
626 | spin_lock_irqsave(&rs->rs_lock, flags); |
627 | |
628 | list_for_each_entry_safe(rm, tmp, &rs->rs_send_queue, m_sock_item) { |
629 | if (dest && (dest->sin_addr.s_addr != rm->m_daddr || |
630 | dest->sin_port != rm->m_inc.i_hdr.h_dport)) |
631 | continue; |
632 | |
633 | wake = 1; |
634 | list_move(&rm->m_sock_item, &list); |
635 | rds_send_sndbuf_remove(rs, rm); |
636 | clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags); |
637 | |
638 | /* If this is a RDMA operation, notify the app. */ |
639 | __rds_rdma_send_complete(rs, rm, RDS_RDMA_CANCELED); |
640 | } |
641 | |
642 | /* order flag updates with the rs lock */ |
643 | if (wake) |
644 | smp_mb__after_clear_bit(); |
645 | |
646 | spin_unlock_irqrestore(&rs->rs_lock, flags); |
647 | |
648 | if (wake) |
649 | rds_wake_sk_sleep(rs); |
650 | |
651 | conn = NULL; |
652 | |
653 | /* now remove the messages from the conn list as needed */ |
654 | list_for_each_entry(rm, &list, m_sock_item) { |
655 | /* We do this here rather than in the loop above, so that |
656 | * we don't have to nest m_rs_lock under rs->rs_lock */ |
657 | spin_lock_irqsave(&rm->m_rs_lock, flags2); |
658 | rm->m_rs = NULL; |
659 | spin_unlock_irqrestore(&rm->m_rs_lock, flags2); |
660 | |
661 | /* |
662 | * If we see this flag cleared then we're *sure* that someone |
663 | * else beat us to removing it from the conn. If we race |
664 | * with their flag update we'll get the lock and then really |
665 | * see that the flag has been cleared. |
666 | */ |
667 | if (!test_bit(RDS_MSG_ON_CONN, &rm->m_flags)) |
668 | continue; |
669 | |
670 | if (conn != rm->m_inc.i_conn) { |
671 | if (conn) |
672 | spin_unlock_irqrestore(&conn->c_lock, flags); |
673 | conn = rm->m_inc.i_conn; |
674 | spin_lock_irqsave(&conn->c_lock, flags); |
675 | } |
676 | |
677 | if (test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) { |
678 | list_del_init(&rm->m_conn_item); |
679 | rds_message_put(rm); |
680 | } |
681 | } |
682 | |
683 | if (conn) |
684 | spin_unlock_irqrestore(&conn->c_lock, flags); |
685 | |
686 | while (!list_empty(&list)) { |
687 | rm = list_entry(list.next, struct rds_message, m_sock_item); |
688 | list_del_init(&rm->m_sock_item); |
689 | |
690 | rds_message_wait(rm); |
691 | rds_message_put(rm); |
692 | } |
693 | } |
694 | |
695 | /* |
696 | * we only want this to fire once so we use the callers 'queued'. It's |
697 | * possible that another thread can race with us and remove the |
698 | * message from the flow with RDS_CANCEL_SENT_TO. |
699 | */ |
700 | static int rds_send_queue_rm(struct rds_sock *rs, struct rds_connection *conn, |
701 | struct rds_message *rm, __be16 sport, |
702 | __be16 dport, int *queued) |
703 | { |
704 | unsigned long flags; |
705 | u32 len; |
706 | |
707 | if (*queued) |
708 | goto out; |
709 | |
710 | len = be32_to_cpu(rm->m_inc.i_hdr.h_len); |
711 | |
712 | /* this is the only place which holds both the socket's rs_lock |
713 | * and the connection's c_lock */ |
714 | spin_lock_irqsave(&rs->rs_lock, flags); |
715 | |
716 | /* |
717 | * If there is a little space in sndbuf, we don't queue anything, |
718 | * and userspace gets -EAGAIN. But poll() indicates there's send |
719 | * room. This can lead to bad behavior (spinning) if snd_bytes isn't |
720 | * freed up by incoming acks. So we check the *old* value of |
721 | * rs_snd_bytes here to allow the last msg to exceed the buffer, |
722 | * and poll() now knows no more data can be sent. |
723 | */ |
724 | if (rs->rs_snd_bytes < rds_sk_sndbuf(rs)) { |
725 | rs->rs_snd_bytes += len; |
726 | |
727 | /* let recv side know we are close to send space exhaustion. |
728 | * This is probably not the optimal way to do it, as this |
729 | * means we set the flag on *all* messages as soon as our |
730 | * throughput hits a certain threshold. |
731 | */ |
732 | if (rs->rs_snd_bytes >= rds_sk_sndbuf(rs) / 2) |
733 | __set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags); |
734 | |
735 | list_add_tail(&rm->m_sock_item, &rs->rs_send_queue); |
736 | set_bit(RDS_MSG_ON_SOCK, &rm->m_flags); |
737 | rds_message_addref(rm); |
738 | rm->m_rs = rs; |
739 | |
740 | /* The code ordering is a little weird, but we're |
741 | trying to minimize the time we hold c_lock */ |
742 | rds_message_populate_header(&rm->m_inc.i_hdr, sport, dport, 0); |
743 | rm->m_inc.i_conn = conn; |
744 | rds_message_addref(rm); |
745 | |
746 | spin_lock(&conn->c_lock); |
747 | rm->m_inc.i_hdr.h_sequence = cpu_to_be64(conn->c_next_tx_seq++); |
748 | list_add_tail(&rm->m_conn_item, &conn->c_send_queue); |
749 | set_bit(RDS_MSG_ON_CONN, &rm->m_flags); |
750 | spin_unlock(&conn->c_lock); |
751 | |
752 | rdsdebug("queued msg %p len %d, rs %p bytes %d seq %llu\n", |
753 | rm, len, rs, rs->rs_snd_bytes, |
754 | (unsigned long long)be64_to_cpu(rm->m_inc.i_hdr.h_sequence)); |
755 | |
756 | *queued = 1; |
757 | } |
758 | |
759 | spin_unlock_irqrestore(&rs->rs_lock, flags); |
760 | out: |
761 | return *queued; |
762 | } |
763 | |
764 | static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm, |
765 | struct msghdr *msg, int *allocated_mr) |
766 | { |
767 | struct cmsghdr *cmsg; |
768 | int ret = 0; |
769 | |
770 | for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) { |
771 | if (!CMSG_OK(msg, cmsg)) |
772 | return -EINVAL; |
773 | |
774 | if (cmsg->cmsg_level != SOL_RDS) |
775 | continue; |
776 | |
777 | /* As a side effect, RDMA_DEST and RDMA_MAP will set |
778 | * rm->m_rdma_cookie and rm->m_rdma_mr. |
779 | */ |
780 | switch (cmsg->cmsg_type) { |
781 | case RDS_CMSG_RDMA_ARGS: |
782 | ret = rds_cmsg_rdma_args(rs, rm, cmsg); |
783 | break; |
784 | |
785 | case RDS_CMSG_RDMA_DEST: |
786 | ret = rds_cmsg_rdma_dest(rs, rm, cmsg); |
787 | break; |
788 | |
789 | case RDS_CMSG_RDMA_MAP: |
790 | ret = rds_cmsg_rdma_map(rs, rm, cmsg); |
791 | if (!ret) |
792 | *allocated_mr = 1; |
793 | break; |
794 | |
795 | default: |
796 | return -EINVAL; |
797 | } |
798 | |
799 | if (ret) |
800 | break; |
801 | } |
802 | |
803 | return ret; |
804 | } |
805 | |
806 | int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, |
807 | size_t payload_len) |
808 | { |
809 | struct sock *sk = sock->sk; |
810 | struct rds_sock *rs = rds_sk_to_rs(sk); |
811 | struct sockaddr_in *usin = (struct sockaddr_in *)msg->msg_name; |
812 | __be32 daddr; |
813 | __be16 dport; |
814 | struct rds_message *rm = NULL; |
815 | struct rds_connection *conn; |
816 | int ret = 0; |
817 | int queued = 0, allocated_mr = 0; |
818 | int nonblock = msg->msg_flags & MSG_DONTWAIT; |
819 | long timeo = sock_rcvtimeo(sk, nonblock); |
820 | |
821 | /* Mirror Linux UDP mirror of BSD error message compatibility */ |
822 | /* XXX: Perhaps MSG_MORE someday */ |
823 | if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_CMSG_COMPAT)) { |
824 | printk(KERN_INFO "msg_flags 0x%08X\n", msg->msg_flags); |
825 | ret = -EOPNOTSUPP; |
826 | goto out; |
827 | } |
828 | |
829 | if (msg->msg_namelen) { |
830 | /* XXX fail non-unicast destination IPs? */ |
831 | if (msg->msg_namelen < sizeof(*usin) || usin->sin_family != AF_INET) { |
832 | ret = -EINVAL; |
833 | goto out; |
834 | } |
835 | daddr = usin->sin_addr.s_addr; |
836 | dport = usin->sin_port; |
837 | } else { |
838 | /* We only care about consistency with ->connect() */ |
839 | lock_sock(sk); |
840 | daddr = rs->rs_conn_addr; |
841 | dport = rs->rs_conn_port; |
842 | release_sock(sk); |
843 | } |
844 | |
845 | /* racing with another thread binding seems ok here */ |
846 | if (daddr == 0 || rs->rs_bound_addr == 0) { |
847 | ret = -ENOTCONN; /* XXX not a great errno */ |
848 | goto out; |
849 | } |
850 | |
851 | rm = rds_message_copy_from_user(msg->msg_iov, payload_len); |
852 | if (IS_ERR(rm)) { |
853 | ret = PTR_ERR(rm); |
854 | rm = NULL; |
855 | goto out; |
856 | } |
857 | |
858 | rm->m_daddr = daddr; |
859 | |
860 | /* rds_conn_create has a spinlock that runs with IRQ off. |
861 | * Caching the conn in the socket helps a lot. */ |
862 | if (rs->rs_conn && rs->rs_conn->c_faddr == daddr) |
863 | conn = rs->rs_conn; |
864 | else { |
865 | conn = rds_conn_create_outgoing(rs->rs_bound_addr, daddr, |
866 | rs->rs_transport, |
867 | sock->sk->sk_allocation); |
868 | if (IS_ERR(conn)) { |
869 | ret = PTR_ERR(conn); |
870 | goto out; |
871 | } |
872 | rs->rs_conn = conn; |
873 | } |
874 | |
875 | /* Parse any control messages the user may have included. */ |
876 | ret = rds_cmsg_send(rs, rm, msg, &allocated_mr); |
877 | if (ret) |
878 | goto out; |
879 | |
880 | if ((rm->m_rdma_cookie || rm->m_rdma_op) && |
881 | conn->c_trans->xmit_rdma == NULL) { |
882 | if (printk_ratelimit()) |
883 | printk(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n", |
884 | rm->m_rdma_op, conn->c_trans->xmit_rdma); |
885 | ret = -EOPNOTSUPP; |
886 | goto out; |
887 | } |
888 | |
889 | /* If the connection is down, trigger a connect. We may |
890 | * have scheduled a delayed reconnect however - in this case |
891 | * we should not interfere. |
892 | */ |
893 | if (rds_conn_state(conn) == RDS_CONN_DOWN && |
894 | !test_and_set_bit(RDS_RECONNECT_PENDING, &conn->c_flags)) |
895 | queue_delayed_work(rds_wq, &conn->c_conn_w, 0); |
896 | |
897 | ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs); |
898 | if (ret) |
899 | goto out; |
900 | |
901 | while (!rds_send_queue_rm(rs, conn, rm, rs->rs_bound_port, |
902 | dport, &queued)) { |
903 | rds_stats_inc(s_send_queue_full); |
904 | /* XXX make sure this is reasonable */ |
905 | if (payload_len > rds_sk_sndbuf(rs)) { |
906 | ret = -EMSGSIZE; |
907 | goto out; |
908 | } |
909 | if (nonblock) { |
910 | ret = -EAGAIN; |
911 | goto out; |
912 | } |
913 | |
914 | timeo = wait_event_interruptible_timeout(*sk->sk_sleep, |
915 | rds_send_queue_rm(rs, conn, rm, |
916 | rs->rs_bound_port, |
917 | dport, |
918 | &queued), |
919 | timeo); |
920 | rdsdebug("sendmsg woke queued %d timeo %ld\n", queued, timeo); |
921 | if (timeo > 0 || timeo == MAX_SCHEDULE_TIMEOUT) |
922 | continue; |
923 | |
924 | ret = timeo; |
925 | if (ret == 0) |
926 | ret = -ETIMEDOUT; |
927 | goto out; |
928 | } |
929 | |
930 | /* |
931 | * By now we've committed to the send. We reuse rds_send_worker() |
932 | * to retry sends in the rds thread if the transport asks us to. |
933 | */ |
934 | rds_stats_inc(s_send_queued); |
935 | |
936 | if (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags)) |
937 | rds_send_worker(&conn->c_send_w.work); |
938 | |
939 | rds_message_put(rm); |
940 | return payload_len; |
941 | |
942 | out: |
943 | /* If the user included a RDMA_MAP cmsg, we allocated a MR on the fly. |
944 | * If the sendmsg goes through, we keep the MR. If it fails with EAGAIN |
945 | * or in any other way, we need to destroy the MR again */ |
946 | if (allocated_mr) |
947 | rds_rdma_unuse(rs, rds_rdma_cookie_key(rm->m_rdma_cookie), 1); |
948 | |
949 | if (rm) |
950 | rds_message_put(rm); |
951 | return ret; |
952 | } |
953 | |
954 | /* |
955 | * Reply to a ping packet. |
956 | */ |
957 | int |
958 | rds_send_pong(struct rds_connection *conn, __be16 dport) |
959 | { |
960 | struct rds_message *rm; |
961 | unsigned long flags; |
962 | int ret = 0; |
963 | |
964 | rm = rds_message_alloc(0, GFP_ATOMIC); |
965 | if (rm == NULL) { |
966 | ret = -ENOMEM; |
967 | goto out; |
968 | } |
969 | |
970 | rm->m_daddr = conn->c_faddr; |
971 | |
972 | /* If the connection is down, trigger a connect. We may |
973 | * have scheduled a delayed reconnect however - in this case |
974 | * we should not interfere. |
975 | */ |
976 | if (rds_conn_state(conn) == RDS_CONN_DOWN && |
977 | !test_and_set_bit(RDS_RECONNECT_PENDING, &conn->c_flags)) |
978 | queue_delayed_work(rds_wq, &conn->c_conn_w, 0); |
979 | |
980 | ret = rds_cong_wait(conn->c_fcong, dport, 1, NULL); |
981 | if (ret) |
982 | goto out; |
983 | |
984 | spin_lock_irqsave(&conn->c_lock, flags); |
985 | list_add_tail(&rm->m_conn_item, &conn->c_send_queue); |
986 | set_bit(RDS_MSG_ON_CONN, &rm->m_flags); |
987 | rds_message_addref(rm); |
988 | rm->m_inc.i_conn = conn; |
989 | |
990 | rds_message_populate_header(&rm->m_inc.i_hdr, 0, dport, |
991 | conn->c_next_tx_seq); |
992 | conn->c_next_tx_seq++; |
993 | spin_unlock_irqrestore(&conn->c_lock, flags); |
994 | |
995 | rds_stats_inc(s_send_queued); |
996 | rds_stats_inc(s_send_pong); |
997 | |
998 | queue_delayed_work(rds_wq, &conn->c_send_w, 0); |
999 | rds_message_put(rm); |
1000 | return 0; |
1001 | |
1002 | out: |
1003 | if (rm) |
1004 | rds_message_put(rm); |
1005 | return ret; |
1006 | } |
1007 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9