Root/
1 | /* -*- mode: c; c-basic-offset: 8; -*- |
2 | * |
3 | * vim: noexpandtab sw=8 ts=8 sts=0: |
4 | * |
5 | * Copyright (C) 2004 Oracle. All rights reserved. |
6 | * |
7 | * This program is free software; you can redistribute it and/or |
8 | * modify it under the terms of the GNU General Public |
9 | * License as published by the Free Software Foundation; either |
10 | * version 2 of the License, or (at your option) any later version. |
11 | * |
12 | * This program is distributed in the hope that it will be useful, |
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
15 | * General Public License for more details. |
16 | * |
17 | * You should have received a copy of the GNU General Public |
18 | * License along with this program; if not, write to the |
19 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, |
20 | * Boston, MA 021110-1307, USA. |
21 | * |
22 | * ---- |
23 | * |
24 | * Callers for this were originally written against a very simple synchronus |
25 | * API. This implementation reflects those simple callers. Some day I'm sure |
26 | * we'll need to move to a more robust posting/callback mechanism. |
27 | * |
28 | * Transmit calls pass in kernel virtual addresses and block copying this into |
29 | * the socket's tx buffers via a usual blocking sendmsg. They'll block waiting |
30 | * for a failed socket to timeout. TX callers can also pass in a poniter to an |
31 | * 'int' which gets filled with an errno off the wire in response to the |
32 | * message they send. |
33 | * |
34 | * Handlers for unsolicited messages are registered. Each socket has a page |
35 | * that incoming data is copied into. First the header, then the data. |
36 | * Handlers are called from only one thread with a reference to this per-socket |
37 | * page. This page is destroyed after the handler call, so it can't be |
38 | * referenced beyond the call. Handlers may block but are discouraged from |
39 | * doing so. |
40 | * |
41 | * Any framing errors (bad magic, large payload lengths) close a connection. |
42 | * |
43 | * Our sock_container holds the state we associate with a socket. It's current |
44 | * framing state is held there as well as the refcounting we do around when it |
45 | * is safe to tear down the socket. The socket is only finally torn down from |
46 | * the container when the container loses all of its references -- so as long |
47 | * as you hold a ref on the container you can trust that the socket is valid |
48 | * for use with kernel socket APIs. |
49 | * |
50 | * Connections are initiated between a pair of nodes when the node with the |
51 | * higher node number gets a heartbeat callback which indicates that the lower |
52 | * numbered node has started heartbeating. The lower numbered node is passive |
53 | * and only accepts the connection if the higher numbered node is heartbeating. |
54 | */ |
55 | |
56 | #include <linux/kernel.h> |
57 | #include <linux/jiffies.h> |
58 | #include <linux/slab.h> |
59 | #include <linux/idr.h> |
60 | #include <linux/kref.h> |
61 | #include <linux/net.h> |
62 | #include <net/tcp.h> |
63 | |
64 | #include <asm/uaccess.h> |
65 | |
66 | #include "heartbeat.h" |
67 | #include "tcp.h" |
68 | #include "nodemanager.h" |
69 | #define MLOG_MASK_PREFIX ML_TCP |
70 | #include "masklog.h" |
71 | #include "quorum.h" |
72 | |
73 | #include "tcp_internal.h" |
74 | |
75 | #define SC_NODEF_FMT "node %s (num %u) at %pI4:%u" |
76 | #define SC_NODEF_ARGS(sc) sc->sc_node->nd_name, sc->sc_node->nd_num, \ |
77 | &sc->sc_node->nd_ipv4_address, \ |
78 | ntohs(sc->sc_node->nd_ipv4_port) |
79 | |
80 | /* |
81 | * In the following two log macros, the whitespace after the ',' just |
82 | * before ##args is intentional. Otherwise, gcc 2.95 will eat the |
83 | * previous token if args expands to nothing. |
84 | */ |
85 | #define msglog(hdr, fmt, args...) do { \ |
86 | typeof(hdr) __hdr = (hdr); \ |
87 | mlog(ML_MSG, "[mag %u len %u typ %u stat %d sys_stat %d " \ |
88 | "key %08x num %u] " fmt, \ |
89 | be16_to_cpu(__hdr->magic), be16_to_cpu(__hdr->data_len), \ |
90 | be16_to_cpu(__hdr->msg_type), be32_to_cpu(__hdr->status), \ |
91 | be32_to_cpu(__hdr->sys_status), be32_to_cpu(__hdr->key), \ |
92 | be32_to_cpu(__hdr->msg_num) , ##args); \ |
93 | } while (0) |
94 | |
95 | #define sclog(sc, fmt, args...) do { \ |
96 | typeof(sc) __sc = (sc); \ |
97 | mlog(ML_SOCKET, "[sc %p refs %d sock %p node %u page %p " \ |
98 | "pg_off %zu] " fmt, __sc, \ |
99 | atomic_read(&__sc->sc_kref.refcount), __sc->sc_sock, \ |
100 | __sc->sc_node->nd_num, __sc->sc_page, __sc->sc_page_off , \ |
101 | ##args); \ |
102 | } while (0) |
103 | |
104 | static DEFINE_RWLOCK(o2net_handler_lock); |
105 | static struct rb_root o2net_handler_tree = RB_ROOT; |
106 | |
107 | static struct o2net_node o2net_nodes[O2NM_MAX_NODES]; |
108 | |
109 | /* XXX someday we'll need better accounting */ |
110 | static struct socket *o2net_listen_sock = NULL; |
111 | |
112 | /* |
113 | * listen work is only queued by the listening socket callbacks on the |
114 | * o2net_wq. teardown detaches the callbacks before destroying the workqueue. |
115 | * quorum work is queued as sock containers are shutdown.. stop_listening |
116 | * tears down all the node's sock containers, preventing future shutdowns |
117 | * and queued quroum work, before canceling delayed quorum work and |
118 | * destroying the work queue. |
119 | */ |
120 | static struct workqueue_struct *o2net_wq; |
121 | static struct work_struct o2net_listen_work; |
122 | |
123 | static struct o2hb_callback_func o2net_hb_up, o2net_hb_down; |
124 | #define O2NET_HB_PRI 0x1 |
125 | |
126 | static struct o2net_handshake *o2net_hand; |
127 | static struct o2net_msg *o2net_keep_req, *o2net_keep_resp; |
128 | |
129 | static int o2net_sys_err_translations[O2NET_ERR_MAX] = |
130 | {[O2NET_ERR_NONE] = 0, |
131 | [O2NET_ERR_NO_HNDLR] = -ENOPROTOOPT, |
132 | [O2NET_ERR_OVERFLOW] = -EOVERFLOW, |
133 | [O2NET_ERR_DIED] = -EHOSTDOWN,}; |
134 | |
135 | /* can't quite avoid *all* internal declarations :/ */ |
136 | static void o2net_sc_connect_completed(struct work_struct *work); |
137 | static void o2net_rx_until_empty(struct work_struct *work); |
138 | static void o2net_shutdown_sc(struct work_struct *work); |
139 | static void o2net_listen_data_ready(struct sock *sk, int bytes); |
140 | static void o2net_sc_send_keep_req(struct work_struct *work); |
141 | static void o2net_idle_timer(unsigned long data); |
142 | static void o2net_sc_postpone_idle(struct o2net_sock_container *sc); |
143 | static void o2net_sc_reset_idle_timer(struct o2net_sock_container *sc); |
144 | |
145 | #ifdef CONFIG_DEBUG_FS |
146 | static void o2net_init_nst(struct o2net_send_tracking *nst, u32 msgtype, |
147 | u32 msgkey, struct task_struct *task, u8 node) |
148 | { |
149 | INIT_LIST_HEAD(&nst->st_net_debug_item); |
150 | nst->st_task = task; |
151 | nst->st_msg_type = msgtype; |
152 | nst->st_msg_key = msgkey; |
153 | nst->st_node = node; |
154 | } |
155 | |
156 | static void o2net_set_nst_sock_time(struct o2net_send_tracking *nst) |
157 | { |
158 | do_gettimeofday(&nst->st_sock_time); |
159 | } |
160 | |
161 | static void o2net_set_nst_send_time(struct o2net_send_tracking *nst) |
162 | { |
163 | do_gettimeofday(&nst->st_send_time); |
164 | } |
165 | |
166 | static void o2net_set_nst_status_time(struct o2net_send_tracking *nst) |
167 | { |
168 | do_gettimeofday(&nst->st_status_time); |
169 | } |
170 | |
171 | static void o2net_set_nst_sock_container(struct o2net_send_tracking *nst, |
172 | struct o2net_sock_container *sc) |
173 | { |
174 | nst->st_sc = sc; |
175 | } |
176 | |
177 | static void o2net_set_nst_msg_id(struct o2net_send_tracking *nst, u32 msg_id) |
178 | { |
179 | nst->st_id = msg_id; |
180 | } |
181 | |
182 | #else /* CONFIG_DEBUG_FS */ |
183 | |
184 | static inline void o2net_init_nst(struct o2net_send_tracking *nst, u32 msgtype, |
185 | u32 msgkey, struct task_struct *task, u8 node) |
186 | { |
187 | } |
188 | |
189 | static inline void o2net_set_nst_sock_time(struct o2net_send_tracking *nst) |
190 | { |
191 | } |
192 | |
193 | static inline void o2net_set_nst_send_time(struct o2net_send_tracking *nst) |
194 | { |
195 | } |
196 | |
197 | static inline void o2net_set_nst_status_time(struct o2net_send_tracking *nst) |
198 | { |
199 | } |
200 | |
201 | static inline void o2net_set_nst_sock_container(struct o2net_send_tracking *nst, |
202 | struct o2net_sock_container *sc) |
203 | { |
204 | } |
205 | |
206 | static inline void o2net_set_nst_msg_id(struct o2net_send_tracking *nst, |
207 | u32 msg_id) |
208 | { |
209 | } |
210 | |
211 | #endif /* CONFIG_DEBUG_FS */ |
212 | |
213 | static inline int o2net_reconnect_delay(void) |
214 | { |
215 | return o2nm_single_cluster->cl_reconnect_delay_ms; |
216 | } |
217 | |
218 | static inline int o2net_keepalive_delay(void) |
219 | { |
220 | return o2nm_single_cluster->cl_keepalive_delay_ms; |
221 | } |
222 | |
223 | static inline int o2net_idle_timeout(void) |
224 | { |
225 | return o2nm_single_cluster->cl_idle_timeout_ms; |
226 | } |
227 | |
228 | static inline int o2net_sys_err_to_errno(enum o2net_system_error err) |
229 | { |
230 | int trans; |
231 | BUG_ON(err >= O2NET_ERR_MAX); |
232 | trans = o2net_sys_err_translations[err]; |
233 | |
234 | /* Just in case we mess up the translation table above */ |
235 | BUG_ON(err != O2NET_ERR_NONE && trans == 0); |
236 | return trans; |
237 | } |
238 | |
239 | static struct o2net_node * o2net_nn_from_num(u8 node_num) |
240 | { |
241 | BUG_ON(node_num >= ARRAY_SIZE(o2net_nodes)); |
242 | return &o2net_nodes[node_num]; |
243 | } |
244 | |
245 | static u8 o2net_num_from_nn(struct o2net_node *nn) |
246 | { |
247 | BUG_ON(nn == NULL); |
248 | return nn - o2net_nodes; |
249 | } |
250 | |
251 | /* ------------------------------------------------------------ */ |
252 | |
253 | static int o2net_prep_nsw(struct o2net_node *nn, struct o2net_status_wait *nsw) |
254 | { |
255 | int ret = 0; |
256 | |
257 | do { |
258 | if (!idr_pre_get(&nn->nn_status_idr, GFP_ATOMIC)) { |
259 | ret = -EAGAIN; |
260 | break; |
261 | } |
262 | spin_lock(&nn->nn_lock); |
263 | ret = idr_get_new(&nn->nn_status_idr, nsw, &nsw->ns_id); |
264 | if (ret == 0) |
265 | list_add_tail(&nsw->ns_node_item, |
266 | &nn->nn_status_list); |
267 | spin_unlock(&nn->nn_lock); |
268 | } while (ret == -EAGAIN); |
269 | |
270 | if (ret == 0) { |
271 | init_waitqueue_head(&nsw->ns_wq); |
272 | nsw->ns_sys_status = O2NET_ERR_NONE; |
273 | nsw->ns_status = 0; |
274 | } |
275 | |
276 | return ret; |
277 | } |
278 | |
279 | static void o2net_complete_nsw_locked(struct o2net_node *nn, |
280 | struct o2net_status_wait *nsw, |
281 | enum o2net_system_error sys_status, |
282 | s32 status) |
283 | { |
284 | assert_spin_locked(&nn->nn_lock); |
285 | |
286 | if (!list_empty(&nsw->ns_node_item)) { |
287 | list_del_init(&nsw->ns_node_item); |
288 | nsw->ns_sys_status = sys_status; |
289 | nsw->ns_status = status; |
290 | idr_remove(&nn->nn_status_idr, nsw->ns_id); |
291 | wake_up(&nsw->ns_wq); |
292 | } |
293 | } |
294 | |
295 | static void o2net_complete_nsw(struct o2net_node *nn, |
296 | struct o2net_status_wait *nsw, |
297 | u64 id, enum o2net_system_error sys_status, |
298 | s32 status) |
299 | { |
300 | spin_lock(&nn->nn_lock); |
301 | if (nsw == NULL) { |
302 | if (id > INT_MAX) |
303 | goto out; |
304 | |
305 | nsw = idr_find(&nn->nn_status_idr, id); |
306 | if (nsw == NULL) |
307 | goto out; |
308 | } |
309 | |
310 | o2net_complete_nsw_locked(nn, nsw, sys_status, status); |
311 | |
312 | out: |
313 | spin_unlock(&nn->nn_lock); |
314 | return; |
315 | } |
316 | |
317 | static void o2net_complete_nodes_nsw(struct o2net_node *nn) |
318 | { |
319 | struct o2net_status_wait *nsw, *tmp; |
320 | unsigned int num_kills = 0; |
321 | |
322 | assert_spin_locked(&nn->nn_lock); |
323 | |
324 | list_for_each_entry_safe(nsw, tmp, &nn->nn_status_list, ns_node_item) { |
325 | o2net_complete_nsw_locked(nn, nsw, O2NET_ERR_DIED, 0); |
326 | num_kills++; |
327 | } |
328 | |
329 | mlog(0, "completed %d messages for node %u\n", num_kills, |
330 | o2net_num_from_nn(nn)); |
331 | } |
332 | |
333 | static int o2net_nsw_completed(struct o2net_node *nn, |
334 | struct o2net_status_wait *nsw) |
335 | { |
336 | int completed; |
337 | spin_lock(&nn->nn_lock); |
338 | completed = list_empty(&nsw->ns_node_item); |
339 | spin_unlock(&nn->nn_lock); |
340 | return completed; |
341 | } |
342 | |
343 | /* ------------------------------------------------------------ */ |
344 | |
345 | static void sc_kref_release(struct kref *kref) |
346 | { |
347 | struct o2net_sock_container *sc = container_of(kref, |
348 | struct o2net_sock_container, sc_kref); |
349 | BUG_ON(timer_pending(&sc->sc_idle_timeout)); |
350 | |
351 | sclog(sc, "releasing\n"); |
352 | |
353 | if (sc->sc_sock) { |
354 | sock_release(sc->sc_sock); |
355 | sc->sc_sock = NULL; |
356 | } |
357 | |
358 | o2nm_node_put(sc->sc_node); |
359 | sc->sc_node = NULL; |
360 | |
361 | o2net_debug_del_sc(sc); |
362 | kfree(sc); |
363 | } |
364 | |
365 | static void sc_put(struct o2net_sock_container *sc) |
366 | { |
367 | sclog(sc, "put\n"); |
368 | kref_put(&sc->sc_kref, sc_kref_release); |
369 | } |
370 | static void sc_get(struct o2net_sock_container *sc) |
371 | { |
372 | sclog(sc, "get\n"); |
373 | kref_get(&sc->sc_kref); |
374 | } |
375 | static struct o2net_sock_container *sc_alloc(struct o2nm_node *node) |
376 | { |
377 | struct o2net_sock_container *sc, *ret = NULL; |
378 | struct page *page = NULL; |
379 | |
380 | page = alloc_page(GFP_NOFS); |
381 | sc = kzalloc(sizeof(*sc), GFP_NOFS); |
382 | if (sc == NULL || page == NULL) |
383 | goto out; |
384 | |
385 | kref_init(&sc->sc_kref); |
386 | o2nm_node_get(node); |
387 | sc->sc_node = node; |
388 | |
389 | INIT_WORK(&sc->sc_connect_work, o2net_sc_connect_completed); |
390 | INIT_WORK(&sc->sc_rx_work, o2net_rx_until_empty); |
391 | INIT_WORK(&sc->sc_shutdown_work, o2net_shutdown_sc); |
392 | INIT_DELAYED_WORK(&sc->sc_keepalive_work, o2net_sc_send_keep_req); |
393 | |
394 | init_timer(&sc->sc_idle_timeout); |
395 | sc->sc_idle_timeout.function = o2net_idle_timer; |
396 | sc->sc_idle_timeout.data = (unsigned long)sc; |
397 | |
398 | sclog(sc, "alloced\n"); |
399 | |
400 | ret = sc; |
401 | sc->sc_page = page; |
402 | o2net_debug_add_sc(sc); |
403 | sc = NULL; |
404 | page = NULL; |
405 | |
406 | out: |
407 | if (page) |
408 | __free_page(page); |
409 | kfree(sc); |
410 | |
411 | return ret; |
412 | } |
413 | |
414 | /* ------------------------------------------------------------ */ |
415 | |
416 | static void o2net_sc_queue_work(struct o2net_sock_container *sc, |
417 | struct work_struct *work) |
418 | { |
419 | sc_get(sc); |
420 | if (!queue_work(o2net_wq, work)) |
421 | sc_put(sc); |
422 | } |
423 | static void o2net_sc_queue_delayed_work(struct o2net_sock_container *sc, |
424 | struct delayed_work *work, |
425 | int delay) |
426 | { |
427 | sc_get(sc); |
428 | if (!queue_delayed_work(o2net_wq, work, delay)) |
429 | sc_put(sc); |
430 | } |
431 | static void o2net_sc_cancel_delayed_work(struct o2net_sock_container *sc, |
432 | struct delayed_work *work) |
433 | { |
434 | if (cancel_delayed_work(work)) |
435 | sc_put(sc); |
436 | } |
437 | |
438 | static atomic_t o2net_connected_peers = ATOMIC_INIT(0); |
439 | |
440 | int o2net_num_connected_peers(void) |
441 | { |
442 | return atomic_read(&o2net_connected_peers); |
443 | } |
444 | |
445 | static void o2net_set_nn_state(struct o2net_node *nn, |
446 | struct o2net_sock_container *sc, |
447 | unsigned valid, int err) |
448 | { |
449 | int was_valid = nn->nn_sc_valid; |
450 | int was_err = nn->nn_persistent_error; |
451 | struct o2net_sock_container *old_sc = nn->nn_sc; |
452 | |
453 | assert_spin_locked(&nn->nn_lock); |
454 | |
455 | if (old_sc && !sc) |
456 | atomic_dec(&o2net_connected_peers); |
457 | else if (!old_sc && sc) |
458 | atomic_inc(&o2net_connected_peers); |
459 | |
460 | /* the node num comparison and single connect/accept path should stop |
461 | * an non-null sc from being overwritten with another */ |
462 | BUG_ON(sc && nn->nn_sc && nn->nn_sc != sc); |
463 | mlog_bug_on_msg(err && valid, "err %d valid %u\n", err, valid); |
464 | mlog_bug_on_msg(valid && !sc, "valid %u sc %p\n", valid, sc); |
465 | |
466 | if (was_valid && !valid && err == 0) |
467 | err = -ENOTCONN; |
468 | |
469 | mlog(ML_CONN, "node %u sc: %p -> %p, valid %u -> %u, err %d -> %d\n", |
470 | o2net_num_from_nn(nn), nn->nn_sc, sc, nn->nn_sc_valid, valid, |
471 | nn->nn_persistent_error, err); |
472 | |
473 | nn->nn_sc = sc; |
474 | nn->nn_sc_valid = valid ? 1 : 0; |
475 | nn->nn_persistent_error = err; |
476 | |
477 | /* mirrors o2net_tx_can_proceed() */ |
478 | if (nn->nn_persistent_error || nn->nn_sc_valid) |
479 | wake_up(&nn->nn_sc_wq); |
480 | |
481 | if (!was_err && nn->nn_persistent_error) { |
482 | o2quo_conn_err(o2net_num_from_nn(nn)); |
483 | queue_delayed_work(o2net_wq, &nn->nn_still_up, |
484 | msecs_to_jiffies(O2NET_QUORUM_DELAY_MS)); |
485 | } |
486 | |
487 | if (was_valid && !valid) { |
488 | printk(KERN_NOTICE "o2net: no longer connected to " |
489 | SC_NODEF_FMT "\n", SC_NODEF_ARGS(old_sc)); |
490 | o2net_complete_nodes_nsw(nn); |
491 | } |
492 | |
493 | if (!was_valid && valid) { |
494 | o2quo_conn_up(o2net_num_from_nn(nn)); |
495 | cancel_delayed_work(&nn->nn_connect_expired); |
496 | printk(KERN_NOTICE "o2net: %s " SC_NODEF_FMT "\n", |
497 | o2nm_this_node() > sc->sc_node->nd_num ? |
498 | "connected to" : "accepted connection from", |
499 | SC_NODEF_ARGS(sc)); |
500 | } |
501 | |
502 | /* trigger the connecting worker func as long as we're not valid, |
503 | * it will back off if it shouldn't connect. This can be called |
504 | * from node config teardown and so needs to be careful about |
505 | * the work queue actually being up. */ |
506 | if (!valid && o2net_wq) { |
507 | unsigned long delay; |
508 | /* delay if we're withing a RECONNECT_DELAY of the |
509 | * last attempt */ |
510 | delay = (nn->nn_last_connect_attempt + |
511 | msecs_to_jiffies(o2net_reconnect_delay())) |
512 | - jiffies; |
513 | if (delay > msecs_to_jiffies(o2net_reconnect_delay())) |
514 | delay = 0; |
515 | mlog(ML_CONN, "queueing conn attempt in %lu jiffies\n", delay); |
516 | queue_delayed_work(o2net_wq, &nn->nn_connect_work, delay); |
517 | |
518 | /* |
519 | * Delay the expired work after idle timeout. |
520 | * |
521 | * We might have lots of failed connection attempts that run |
522 | * through here but we only cancel the connect_expired work when |
523 | * a connection attempt succeeds. So only the first enqueue of |
524 | * the connect_expired work will do anything. The rest will see |
525 | * that it's already queued and do nothing. |
526 | */ |
527 | delay += msecs_to_jiffies(o2net_idle_timeout()); |
528 | queue_delayed_work(o2net_wq, &nn->nn_connect_expired, delay); |
529 | } |
530 | |
531 | /* keep track of the nn's sc ref for the caller */ |
532 | if ((old_sc == NULL) && sc) |
533 | sc_get(sc); |
534 | if (old_sc && (old_sc != sc)) { |
535 | o2net_sc_queue_work(old_sc, &old_sc->sc_shutdown_work); |
536 | sc_put(old_sc); |
537 | } |
538 | } |
539 | |
540 | /* see o2net_register_callbacks() */ |
541 | static void o2net_data_ready(struct sock *sk, int bytes) |
542 | { |
543 | void (*ready)(struct sock *sk, int bytes); |
544 | |
545 | read_lock(&sk->sk_callback_lock); |
546 | if (sk->sk_user_data) { |
547 | struct o2net_sock_container *sc = sk->sk_user_data; |
548 | sclog(sc, "data_ready hit\n"); |
549 | do_gettimeofday(&sc->sc_tv_data_ready); |
550 | o2net_sc_queue_work(sc, &sc->sc_rx_work); |
551 | ready = sc->sc_data_ready; |
552 | } else { |
553 | ready = sk->sk_data_ready; |
554 | } |
555 | read_unlock(&sk->sk_callback_lock); |
556 | |
557 | ready(sk, bytes); |
558 | } |
559 | |
560 | /* see o2net_register_callbacks() */ |
561 | static void o2net_state_change(struct sock *sk) |
562 | { |
563 | void (*state_change)(struct sock *sk); |
564 | struct o2net_sock_container *sc; |
565 | |
566 | read_lock(&sk->sk_callback_lock); |
567 | sc = sk->sk_user_data; |
568 | if (sc == NULL) { |
569 | state_change = sk->sk_state_change; |
570 | goto out; |
571 | } |
572 | |
573 | sclog(sc, "state_change to %d\n", sk->sk_state); |
574 | |
575 | state_change = sc->sc_state_change; |
576 | |
577 | switch(sk->sk_state) { |
578 | /* ignore connecting sockets as they make progress */ |
579 | case TCP_SYN_SENT: |
580 | case TCP_SYN_RECV: |
581 | break; |
582 | case TCP_ESTABLISHED: |
583 | o2net_sc_queue_work(sc, &sc->sc_connect_work); |
584 | break; |
585 | default: |
586 | o2net_sc_queue_work(sc, &sc->sc_shutdown_work); |
587 | break; |
588 | } |
589 | out: |
590 | read_unlock(&sk->sk_callback_lock); |
591 | state_change(sk); |
592 | } |
593 | |
594 | /* |
595 | * we register callbacks so we can queue work on events before calling |
596 | * the original callbacks. our callbacks our careful to test user_data |
597 | * to discover when they've reaced with o2net_unregister_callbacks(). |
598 | */ |
599 | static void o2net_register_callbacks(struct sock *sk, |
600 | struct o2net_sock_container *sc) |
601 | { |
602 | write_lock_bh(&sk->sk_callback_lock); |
603 | |
604 | /* accepted sockets inherit the old listen socket data ready */ |
605 | if (sk->sk_data_ready == o2net_listen_data_ready) { |
606 | sk->sk_data_ready = sk->sk_user_data; |
607 | sk->sk_user_data = NULL; |
608 | } |
609 | |
610 | BUG_ON(sk->sk_user_data != NULL); |
611 | sk->sk_user_data = sc; |
612 | sc_get(sc); |
613 | |
614 | sc->sc_data_ready = sk->sk_data_ready; |
615 | sc->sc_state_change = sk->sk_state_change; |
616 | sk->sk_data_ready = o2net_data_ready; |
617 | sk->sk_state_change = o2net_state_change; |
618 | |
619 | mutex_init(&sc->sc_send_lock); |
620 | |
621 | write_unlock_bh(&sk->sk_callback_lock); |
622 | } |
623 | |
624 | static int o2net_unregister_callbacks(struct sock *sk, |
625 | struct o2net_sock_container *sc) |
626 | { |
627 | int ret = 0; |
628 | |
629 | write_lock_bh(&sk->sk_callback_lock); |
630 | if (sk->sk_user_data == sc) { |
631 | ret = 1; |
632 | sk->sk_user_data = NULL; |
633 | sk->sk_data_ready = sc->sc_data_ready; |
634 | sk->sk_state_change = sc->sc_state_change; |
635 | } |
636 | write_unlock_bh(&sk->sk_callback_lock); |
637 | |
638 | return ret; |
639 | } |
640 | |
641 | /* |
642 | * this is a little helper that is called by callers who have seen a problem |
643 | * with an sc and want to detach it from the nn if someone already hasn't beat |
644 | * them to it. if an error is given then the shutdown will be persistent |
645 | * and pending transmits will be canceled. |
646 | */ |
647 | static void o2net_ensure_shutdown(struct o2net_node *nn, |
648 | struct o2net_sock_container *sc, |
649 | int err) |
650 | { |
651 | spin_lock(&nn->nn_lock); |
652 | if (nn->nn_sc == sc) |
653 | o2net_set_nn_state(nn, NULL, 0, err); |
654 | spin_unlock(&nn->nn_lock); |
655 | } |
656 | |
657 | /* |
658 | * This work queue function performs the blocking parts of socket shutdown. A |
659 | * few paths lead here. set_nn_state will trigger this callback if it sees an |
660 | * sc detached from the nn. state_change will also trigger this callback |
661 | * directly when it sees errors. In that case we need to call set_nn_state |
662 | * ourselves as state_change couldn't get the nn_lock and call set_nn_state |
663 | * itself. |
664 | */ |
665 | static void o2net_shutdown_sc(struct work_struct *work) |
666 | { |
667 | struct o2net_sock_container *sc = |
668 | container_of(work, struct o2net_sock_container, |
669 | sc_shutdown_work); |
670 | struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num); |
671 | |
672 | sclog(sc, "shutting down\n"); |
673 | |
674 | /* drop the callbacks ref and call shutdown only once */ |
675 | if (o2net_unregister_callbacks(sc->sc_sock->sk, sc)) { |
676 | /* we shouldn't flush as we're in the thread, the |
677 | * races with pending sc work structs are harmless */ |
678 | del_timer_sync(&sc->sc_idle_timeout); |
679 | o2net_sc_cancel_delayed_work(sc, &sc->sc_keepalive_work); |
680 | sc_put(sc); |
681 | kernel_sock_shutdown(sc->sc_sock, SHUT_RDWR); |
682 | } |
683 | |
684 | /* not fatal so failed connects before the other guy has our |
685 | * heartbeat can be retried */ |
686 | o2net_ensure_shutdown(nn, sc, 0); |
687 | sc_put(sc); |
688 | } |
689 | |
690 | /* ------------------------------------------------------------ */ |
691 | |
692 | static int o2net_handler_cmp(struct o2net_msg_handler *nmh, u32 msg_type, |
693 | u32 key) |
694 | { |
695 | int ret = memcmp(&nmh->nh_key, &key, sizeof(key)); |
696 | |
697 | if (ret == 0) |
698 | ret = memcmp(&nmh->nh_msg_type, &msg_type, sizeof(msg_type)); |
699 | |
700 | return ret; |
701 | } |
702 | |
703 | static struct o2net_msg_handler * |
704 | o2net_handler_tree_lookup(u32 msg_type, u32 key, struct rb_node ***ret_p, |
705 | struct rb_node **ret_parent) |
706 | { |
707 | struct rb_node **p = &o2net_handler_tree.rb_node; |
708 | struct rb_node *parent = NULL; |
709 | struct o2net_msg_handler *nmh, *ret = NULL; |
710 | int cmp; |
711 | |
712 | while (*p) { |
713 | parent = *p; |
714 | nmh = rb_entry(parent, struct o2net_msg_handler, nh_node); |
715 | cmp = o2net_handler_cmp(nmh, msg_type, key); |
716 | |
717 | if (cmp < 0) |
718 | p = &(*p)->rb_left; |
719 | else if (cmp > 0) |
720 | p = &(*p)->rb_right; |
721 | else { |
722 | ret = nmh; |
723 | break; |
724 | } |
725 | } |
726 | |
727 | if (ret_p != NULL) |
728 | *ret_p = p; |
729 | if (ret_parent != NULL) |
730 | *ret_parent = parent; |
731 | |
732 | return ret; |
733 | } |
734 | |
735 | static void o2net_handler_kref_release(struct kref *kref) |
736 | { |
737 | struct o2net_msg_handler *nmh; |
738 | nmh = container_of(kref, struct o2net_msg_handler, nh_kref); |
739 | |
740 | kfree(nmh); |
741 | } |
742 | |
743 | static void o2net_handler_put(struct o2net_msg_handler *nmh) |
744 | { |
745 | kref_put(&nmh->nh_kref, o2net_handler_kref_release); |
746 | } |
747 | |
748 | /* max_len is protection for the handler func. incoming messages won't |
749 | * be given to the handler if their payload is longer than the max. */ |
750 | int o2net_register_handler(u32 msg_type, u32 key, u32 max_len, |
751 | o2net_msg_handler_func *func, void *data, |
752 | o2net_post_msg_handler_func *post_func, |
753 | struct list_head *unreg_list) |
754 | { |
755 | struct o2net_msg_handler *nmh = NULL; |
756 | struct rb_node **p, *parent; |
757 | int ret = 0; |
758 | |
759 | if (max_len > O2NET_MAX_PAYLOAD_BYTES) { |
760 | mlog(0, "max_len for message handler out of range: %u\n", |
761 | max_len); |
762 | ret = -EINVAL; |
763 | goto out; |
764 | } |
765 | |
766 | if (!msg_type) { |
767 | mlog(0, "no message type provided: %u, %p\n", msg_type, func); |
768 | ret = -EINVAL; |
769 | goto out; |
770 | |
771 | } |
772 | if (!func) { |
773 | mlog(0, "no message handler provided: %u, %p\n", |
774 | msg_type, func); |
775 | ret = -EINVAL; |
776 | goto out; |
777 | } |
778 | |
779 | nmh = kzalloc(sizeof(struct o2net_msg_handler), GFP_NOFS); |
780 | if (nmh == NULL) { |
781 | ret = -ENOMEM; |
782 | goto out; |
783 | } |
784 | |
785 | nmh->nh_func = func; |
786 | nmh->nh_func_data = data; |
787 | nmh->nh_post_func = post_func; |
788 | nmh->nh_msg_type = msg_type; |
789 | nmh->nh_max_len = max_len; |
790 | nmh->nh_key = key; |
791 | /* the tree and list get this ref.. they're both removed in |
792 | * unregister when this ref is dropped */ |
793 | kref_init(&nmh->nh_kref); |
794 | INIT_LIST_HEAD(&nmh->nh_unregister_item); |
795 | |
796 | write_lock(&o2net_handler_lock); |
797 | if (o2net_handler_tree_lookup(msg_type, key, &p, &parent)) |
798 | ret = -EEXIST; |
799 | else { |
800 | rb_link_node(&nmh->nh_node, parent, p); |
801 | rb_insert_color(&nmh->nh_node, &o2net_handler_tree); |
802 | list_add_tail(&nmh->nh_unregister_item, unreg_list); |
803 | |
804 | mlog(ML_TCP, "registered handler func %p type %u key %08x\n", |
805 | func, msg_type, key); |
806 | /* we've had some trouble with handlers seemingly vanishing. */ |
807 | mlog_bug_on_msg(o2net_handler_tree_lookup(msg_type, key, &p, |
808 | &parent) == NULL, |
809 | "couldn't find handler we *just* registerd " |
810 | "for type %u key %08x\n", msg_type, key); |
811 | } |
812 | write_unlock(&o2net_handler_lock); |
813 | if (ret) |
814 | goto out; |
815 | |
816 | out: |
817 | if (ret) |
818 | kfree(nmh); |
819 | |
820 | return ret; |
821 | } |
822 | EXPORT_SYMBOL_GPL(o2net_register_handler); |
823 | |
824 | void o2net_unregister_handler_list(struct list_head *list) |
825 | { |
826 | struct o2net_msg_handler *nmh, *n; |
827 | |
828 | write_lock(&o2net_handler_lock); |
829 | list_for_each_entry_safe(nmh, n, list, nh_unregister_item) { |
830 | mlog(ML_TCP, "unregistering handler func %p type %u key %08x\n", |
831 | nmh->nh_func, nmh->nh_msg_type, nmh->nh_key); |
832 | rb_erase(&nmh->nh_node, &o2net_handler_tree); |
833 | list_del_init(&nmh->nh_unregister_item); |
834 | kref_put(&nmh->nh_kref, o2net_handler_kref_release); |
835 | } |
836 | write_unlock(&o2net_handler_lock); |
837 | } |
838 | EXPORT_SYMBOL_GPL(o2net_unregister_handler_list); |
839 | |
840 | static struct o2net_msg_handler *o2net_handler_get(u32 msg_type, u32 key) |
841 | { |
842 | struct o2net_msg_handler *nmh; |
843 | |
844 | read_lock(&o2net_handler_lock); |
845 | nmh = o2net_handler_tree_lookup(msg_type, key, NULL, NULL); |
846 | if (nmh) |
847 | kref_get(&nmh->nh_kref); |
848 | read_unlock(&o2net_handler_lock); |
849 | |
850 | return nmh; |
851 | } |
852 | |
853 | /* ------------------------------------------------------------ */ |
854 | |
855 | static int o2net_recv_tcp_msg(struct socket *sock, void *data, size_t len) |
856 | { |
857 | int ret; |
858 | mm_segment_t oldfs; |
859 | struct kvec vec = { |
860 | .iov_len = len, |
861 | .iov_base = data, |
862 | }; |
863 | struct msghdr msg = { |
864 | .msg_iovlen = 1, |
865 | .msg_iov = (struct iovec *)&vec, |
866 | .msg_flags = MSG_DONTWAIT, |
867 | }; |
868 | |
869 | oldfs = get_fs(); |
870 | set_fs(get_ds()); |
871 | ret = sock_recvmsg(sock, &msg, len, msg.msg_flags); |
872 | set_fs(oldfs); |
873 | |
874 | return ret; |
875 | } |
876 | |
877 | static int o2net_send_tcp_msg(struct socket *sock, struct kvec *vec, |
878 | size_t veclen, size_t total) |
879 | { |
880 | int ret; |
881 | mm_segment_t oldfs; |
882 | struct msghdr msg = { |
883 | .msg_iov = (struct iovec *)vec, |
884 | .msg_iovlen = veclen, |
885 | }; |
886 | |
887 | if (sock == NULL) { |
888 | ret = -EINVAL; |
889 | goto out; |
890 | } |
891 | |
892 | oldfs = get_fs(); |
893 | set_fs(get_ds()); |
894 | ret = sock_sendmsg(sock, &msg, total); |
895 | set_fs(oldfs); |
896 | if (ret != total) { |
897 | mlog(ML_ERROR, "sendmsg returned %d instead of %zu\n", ret, |
898 | total); |
899 | if (ret >= 0) |
900 | ret = -EPIPE; /* should be smarter, I bet */ |
901 | goto out; |
902 | } |
903 | |
904 | ret = 0; |
905 | out: |
906 | if (ret < 0) |
907 | mlog(0, "returning error: %d\n", ret); |
908 | return ret; |
909 | } |
910 | |
911 | static void o2net_sendpage(struct o2net_sock_container *sc, |
912 | void *kmalloced_virt, |
913 | size_t size) |
914 | { |
915 | struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num); |
916 | ssize_t ret; |
917 | |
918 | while (1) { |
919 | mutex_lock(&sc->sc_send_lock); |
920 | ret = sc->sc_sock->ops->sendpage(sc->sc_sock, |
921 | virt_to_page(kmalloced_virt), |
922 | (long)kmalloced_virt & ~PAGE_MASK, |
923 | size, MSG_DONTWAIT); |
924 | mutex_unlock(&sc->sc_send_lock); |
925 | if (ret == size) |
926 | break; |
927 | if (ret == (ssize_t)-EAGAIN) { |
928 | mlog(0, "sendpage of size %zu to " SC_NODEF_FMT |
929 | " returned EAGAIN\n", size, SC_NODEF_ARGS(sc)); |
930 | cond_resched(); |
931 | continue; |
932 | } |
933 | mlog(ML_ERROR, "sendpage of size %zu to " SC_NODEF_FMT |
934 | " failed with %zd\n", size, SC_NODEF_ARGS(sc), ret); |
935 | o2net_ensure_shutdown(nn, sc, 0); |
936 | break; |
937 | } |
938 | } |
939 | |
940 | static void o2net_init_msg(struct o2net_msg *msg, u16 data_len, u16 msg_type, u32 key) |
941 | { |
942 | memset(msg, 0, sizeof(struct o2net_msg)); |
943 | msg->magic = cpu_to_be16(O2NET_MSG_MAGIC); |
944 | msg->data_len = cpu_to_be16(data_len); |
945 | msg->msg_type = cpu_to_be16(msg_type); |
946 | msg->sys_status = cpu_to_be32(O2NET_ERR_NONE); |
947 | msg->status = 0; |
948 | msg->key = cpu_to_be32(key); |
949 | } |
950 | |
951 | static int o2net_tx_can_proceed(struct o2net_node *nn, |
952 | struct o2net_sock_container **sc_ret, |
953 | int *error) |
954 | { |
955 | int ret = 0; |
956 | |
957 | spin_lock(&nn->nn_lock); |
958 | if (nn->nn_persistent_error) { |
959 | ret = 1; |
960 | *sc_ret = NULL; |
961 | *error = nn->nn_persistent_error; |
962 | } else if (nn->nn_sc_valid) { |
963 | kref_get(&nn->nn_sc->sc_kref); |
964 | |
965 | ret = 1; |
966 | *sc_ret = nn->nn_sc; |
967 | *error = 0; |
968 | } |
969 | spin_unlock(&nn->nn_lock); |
970 | |
971 | return ret; |
972 | } |
973 | |
974 | int o2net_send_message_vec(u32 msg_type, u32 key, struct kvec *caller_vec, |
975 | size_t caller_veclen, u8 target_node, int *status) |
976 | { |
977 | int ret; |
978 | struct o2net_msg *msg = NULL; |
979 | size_t veclen, caller_bytes = 0; |
980 | struct kvec *vec = NULL; |
981 | struct o2net_sock_container *sc = NULL; |
982 | struct o2net_node *nn = o2net_nn_from_num(target_node); |
983 | struct o2net_status_wait nsw = { |
984 | .ns_node_item = LIST_HEAD_INIT(nsw.ns_node_item), |
985 | }; |
986 | struct o2net_send_tracking nst; |
987 | |
988 | o2net_init_nst(&nst, msg_type, key, current, target_node); |
989 | |
990 | if (o2net_wq == NULL) { |
991 | mlog(0, "attempt to tx without o2netd running\n"); |
992 | ret = -ESRCH; |
993 | goto out; |
994 | } |
995 | |
996 | if (caller_veclen == 0) { |
997 | mlog(0, "bad kvec array length\n"); |
998 | ret = -EINVAL; |
999 | goto out; |
1000 | } |
1001 | |
1002 | caller_bytes = iov_length((struct iovec *)caller_vec, caller_veclen); |
1003 | if (caller_bytes > O2NET_MAX_PAYLOAD_BYTES) { |
1004 | mlog(0, "total payload len %zu too large\n", caller_bytes); |
1005 | ret = -EINVAL; |
1006 | goto out; |
1007 | } |
1008 | |
1009 | if (target_node == o2nm_this_node()) { |
1010 | ret = -ELOOP; |
1011 | goto out; |
1012 | } |
1013 | |
1014 | o2net_debug_add_nst(&nst); |
1015 | |
1016 | o2net_set_nst_sock_time(&nst); |
1017 | |
1018 | wait_event(nn->nn_sc_wq, o2net_tx_can_proceed(nn, &sc, &ret)); |
1019 | if (ret) |
1020 | goto out; |
1021 | |
1022 | o2net_set_nst_sock_container(&nst, sc); |
1023 | |
1024 | veclen = caller_veclen + 1; |
1025 | vec = kmalloc(sizeof(struct kvec) * veclen, GFP_ATOMIC); |
1026 | if (vec == NULL) { |
1027 | mlog(0, "failed to %zu element kvec!\n", veclen); |
1028 | ret = -ENOMEM; |
1029 | goto out; |
1030 | } |
1031 | |
1032 | msg = kmalloc(sizeof(struct o2net_msg), GFP_ATOMIC); |
1033 | if (!msg) { |
1034 | mlog(0, "failed to allocate a o2net_msg!\n"); |
1035 | ret = -ENOMEM; |
1036 | goto out; |
1037 | } |
1038 | |
1039 | o2net_init_msg(msg, caller_bytes, msg_type, key); |
1040 | |
1041 | vec[0].iov_len = sizeof(struct o2net_msg); |
1042 | vec[0].iov_base = msg; |
1043 | memcpy(&vec[1], caller_vec, caller_veclen * sizeof(struct kvec)); |
1044 | |
1045 | ret = o2net_prep_nsw(nn, &nsw); |
1046 | if (ret) |
1047 | goto out; |
1048 | |
1049 | msg->msg_num = cpu_to_be32(nsw.ns_id); |
1050 | o2net_set_nst_msg_id(&nst, nsw.ns_id); |
1051 | |
1052 | o2net_set_nst_send_time(&nst); |
1053 | |
1054 | /* finally, convert the message header to network byte-order |
1055 | * and send */ |
1056 | mutex_lock(&sc->sc_send_lock); |
1057 | ret = o2net_send_tcp_msg(sc->sc_sock, vec, veclen, |
1058 | sizeof(struct o2net_msg) + caller_bytes); |
1059 | mutex_unlock(&sc->sc_send_lock); |
1060 | msglog(msg, "sending returned %d\n", ret); |
1061 | if (ret < 0) { |
1062 | mlog(0, "error returned from o2net_send_tcp_msg=%d\n", ret); |
1063 | goto out; |
1064 | } |
1065 | |
1066 | /* wait on other node's handler */ |
1067 | o2net_set_nst_status_time(&nst); |
1068 | wait_event(nsw.ns_wq, o2net_nsw_completed(nn, &nsw)); |
1069 | |
1070 | /* Note that we avoid overwriting the callers status return |
1071 | * variable if a system error was reported on the other |
1072 | * side. Callers beware. */ |
1073 | ret = o2net_sys_err_to_errno(nsw.ns_sys_status); |
1074 | if (status && !ret) |
1075 | *status = nsw.ns_status; |
1076 | |
1077 | mlog(0, "woken, returning system status %d, user status %d\n", |
1078 | ret, nsw.ns_status); |
1079 | out: |
1080 | o2net_debug_del_nst(&nst); /* must be before dropping sc and node */ |
1081 | if (sc) |
1082 | sc_put(sc); |
1083 | if (vec) |
1084 | kfree(vec); |
1085 | if (msg) |
1086 | kfree(msg); |
1087 | o2net_complete_nsw(nn, &nsw, 0, 0, 0); |
1088 | return ret; |
1089 | } |
1090 | EXPORT_SYMBOL_GPL(o2net_send_message_vec); |
1091 | |
1092 | int o2net_send_message(u32 msg_type, u32 key, void *data, u32 len, |
1093 | u8 target_node, int *status) |
1094 | { |
1095 | struct kvec vec = { |
1096 | .iov_base = data, |
1097 | .iov_len = len, |
1098 | }; |
1099 | return o2net_send_message_vec(msg_type, key, &vec, 1, |
1100 | target_node, status); |
1101 | } |
1102 | EXPORT_SYMBOL_GPL(o2net_send_message); |
1103 | |
1104 | static int o2net_send_status_magic(struct socket *sock, struct o2net_msg *hdr, |
1105 | enum o2net_system_error syserr, int err) |
1106 | { |
1107 | struct kvec vec = { |
1108 | .iov_base = hdr, |
1109 | .iov_len = sizeof(struct o2net_msg), |
1110 | }; |
1111 | |
1112 | BUG_ON(syserr >= O2NET_ERR_MAX); |
1113 | |
1114 | /* leave other fields intact from the incoming message, msg_num |
1115 | * in particular */ |
1116 | hdr->sys_status = cpu_to_be32(syserr); |
1117 | hdr->status = cpu_to_be32(err); |
1118 | hdr->magic = cpu_to_be16(O2NET_MSG_STATUS_MAGIC); // twiddle the magic |
1119 | hdr->data_len = 0; |
1120 | |
1121 | msglog(hdr, "about to send status magic %d\n", err); |
1122 | /* hdr has been in host byteorder this whole time */ |
1123 | return o2net_send_tcp_msg(sock, &vec, 1, sizeof(struct o2net_msg)); |
1124 | } |
1125 | |
1126 | /* this returns -errno if the header was unknown or too large, etc. |
1127 | * after this is called the buffer us reused for the next message */ |
1128 | static int o2net_process_message(struct o2net_sock_container *sc, |
1129 | struct o2net_msg *hdr) |
1130 | { |
1131 | struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num); |
1132 | int ret = 0, handler_status; |
1133 | enum o2net_system_error syserr; |
1134 | struct o2net_msg_handler *nmh = NULL; |
1135 | void *ret_data = NULL; |
1136 | |
1137 | msglog(hdr, "processing message\n"); |
1138 | |
1139 | o2net_sc_postpone_idle(sc); |
1140 | |
1141 | switch(be16_to_cpu(hdr->magic)) { |
1142 | case O2NET_MSG_STATUS_MAGIC: |
1143 | /* special type for returning message status */ |
1144 | o2net_complete_nsw(nn, NULL, |
1145 | be32_to_cpu(hdr->msg_num), |
1146 | be32_to_cpu(hdr->sys_status), |
1147 | be32_to_cpu(hdr->status)); |
1148 | goto out; |
1149 | case O2NET_MSG_KEEP_REQ_MAGIC: |
1150 | o2net_sendpage(sc, o2net_keep_resp, |
1151 | sizeof(*o2net_keep_resp)); |
1152 | goto out; |
1153 | case O2NET_MSG_KEEP_RESP_MAGIC: |
1154 | goto out; |
1155 | case O2NET_MSG_MAGIC: |
1156 | break; |
1157 | default: |
1158 | msglog(hdr, "bad magic\n"); |
1159 | ret = -EINVAL; |
1160 | goto out; |
1161 | break; |
1162 | } |
1163 | |
1164 | /* find a handler for it */ |
1165 | handler_status = 0; |
1166 | nmh = o2net_handler_get(be16_to_cpu(hdr->msg_type), |
1167 | be32_to_cpu(hdr->key)); |
1168 | if (!nmh) { |
1169 | mlog(ML_TCP, "couldn't find handler for type %u key %08x\n", |
1170 | be16_to_cpu(hdr->msg_type), be32_to_cpu(hdr->key)); |
1171 | syserr = O2NET_ERR_NO_HNDLR; |
1172 | goto out_respond; |
1173 | } |
1174 | |
1175 | syserr = O2NET_ERR_NONE; |
1176 | |
1177 | if (be16_to_cpu(hdr->data_len) > nmh->nh_max_len) |
1178 | syserr = O2NET_ERR_OVERFLOW; |
1179 | |
1180 | if (syserr != O2NET_ERR_NONE) |
1181 | goto out_respond; |
1182 | |
1183 | do_gettimeofday(&sc->sc_tv_func_start); |
1184 | sc->sc_msg_key = be32_to_cpu(hdr->key); |
1185 | sc->sc_msg_type = be16_to_cpu(hdr->msg_type); |
1186 | handler_status = (nmh->nh_func)(hdr, sizeof(struct o2net_msg) + |
1187 | be16_to_cpu(hdr->data_len), |
1188 | nmh->nh_func_data, &ret_data); |
1189 | do_gettimeofday(&sc->sc_tv_func_stop); |
1190 | |
1191 | out_respond: |
1192 | /* this destroys the hdr, so don't use it after this */ |
1193 | mutex_lock(&sc->sc_send_lock); |
1194 | ret = o2net_send_status_magic(sc->sc_sock, hdr, syserr, |
1195 | handler_status); |
1196 | mutex_unlock(&sc->sc_send_lock); |
1197 | hdr = NULL; |
1198 | mlog(0, "sending handler status %d, syserr %d returned %d\n", |
1199 | handler_status, syserr, ret); |
1200 | |
1201 | if (nmh) { |
1202 | BUG_ON(ret_data != NULL && nmh->nh_post_func == NULL); |
1203 | if (nmh->nh_post_func) |
1204 | (nmh->nh_post_func)(handler_status, nmh->nh_func_data, |
1205 | ret_data); |
1206 | } |
1207 | |
1208 | out: |
1209 | if (nmh) |
1210 | o2net_handler_put(nmh); |
1211 | return ret; |
1212 | } |
1213 | |
1214 | static int o2net_check_handshake(struct o2net_sock_container *sc) |
1215 | { |
1216 | struct o2net_handshake *hand = page_address(sc->sc_page); |
1217 | struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num); |
1218 | |
1219 | if (hand->protocol_version != cpu_to_be64(O2NET_PROTOCOL_VERSION)) { |
1220 | mlog(ML_NOTICE, SC_NODEF_FMT " advertised net protocol " |
1221 | "version %llu but %llu is required, disconnecting\n", |
1222 | SC_NODEF_ARGS(sc), |
1223 | (unsigned long long)be64_to_cpu(hand->protocol_version), |
1224 | O2NET_PROTOCOL_VERSION); |
1225 | |
1226 | /* don't bother reconnecting if its the wrong version. */ |
1227 | o2net_ensure_shutdown(nn, sc, -ENOTCONN); |
1228 | return -1; |
1229 | } |
1230 | |
1231 | /* |
1232 | * Ensure timeouts are consistent with other nodes, otherwise |
1233 | * we can end up with one node thinking that the other must be down, |
1234 | * but isn't. This can ultimately cause corruption. |
1235 | */ |
1236 | if (be32_to_cpu(hand->o2net_idle_timeout_ms) != |
1237 | o2net_idle_timeout()) { |
1238 | mlog(ML_NOTICE, SC_NODEF_FMT " uses a network idle timeout of " |
1239 | "%u ms, but we use %u ms locally. disconnecting\n", |
1240 | SC_NODEF_ARGS(sc), |
1241 | be32_to_cpu(hand->o2net_idle_timeout_ms), |
1242 | o2net_idle_timeout()); |
1243 | o2net_ensure_shutdown(nn, sc, -ENOTCONN); |
1244 | return -1; |
1245 | } |
1246 | |
1247 | if (be32_to_cpu(hand->o2net_keepalive_delay_ms) != |
1248 | o2net_keepalive_delay()) { |
1249 | mlog(ML_NOTICE, SC_NODEF_FMT " uses a keepalive delay of " |
1250 | "%u ms, but we use %u ms locally. disconnecting\n", |
1251 | SC_NODEF_ARGS(sc), |
1252 | be32_to_cpu(hand->o2net_keepalive_delay_ms), |
1253 | o2net_keepalive_delay()); |
1254 | o2net_ensure_shutdown(nn, sc, -ENOTCONN); |
1255 | return -1; |
1256 | } |
1257 | |
1258 | if (be32_to_cpu(hand->o2hb_heartbeat_timeout_ms) != |
1259 | O2HB_MAX_WRITE_TIMEOUT_MS) { |
1260 | mlog(ML_NOTICE, SC_NODEF_FMT " uses a heartbeat timeout of " |
1261 | "%u ms, but we use %u ms locally. disconnecting\n", |
1262 | SC_NODEF_ARGS(sc), |
1263 | be32_to_cpu(hand->o2hb_heartbeat_timeout_ms), |
1264 | O2HB_MAX_WRITE_TIMEOUT_MS); |
1265 | o2net_ensure_shutdown(nn, sc, -ENOTCONN); |
1266 | return -1; |
1267 | } |
1268 | |
1269 | sc->sc_handshake_ok = 1; |
1270 | |
1271 | spin_lock(&nn->nn_lock); |
1272 | /* set valid and queue the idle timers only if it hasn't been |
1273 | * shut down already */ |
1274 | if (nn->nn_sc == sc) { |
1275 | o2net_sc_reset_idle_timer(sc); |
1276 | atomic_set(&nn->nn_timeout, 0); |
1277 | o2net_set_nn_state(nn, sc, 1, 0); |
1278 | } |
1279 | spin_unlock(&nn->nn_lock); |
1280 | |
1281 | /* shift everything up as though it wasn't there */ |
1282 | sc->sc_page_off -= sizeof(struct o2net_handshake); |
1283 | if (sc->sc_page_off) |
1284 | memmove(hand, hand + 1, sc->sc_page_off); |
1285 | |
1286 | return 0; |
1287 | } |
1288 | |
1289 | /* this demuxes the queued rx bytes into header or payload bits and calls |
1290 | * handlers as each full message is read off the socket. it returns -error, |
1291 | * == 0 eof, or > 0 for progress made.*/ |
1292 | static int o2net_advance_rx(struct o2net_sock_container *sc) |
1293 | { |
1294 | struct o2net_msg *hdr; |
1295 | int ret = 0; |
1296 | void *data; |
1297 | size_t datalen; |
1298 | |
1299 | sclog(sc, "receiving\n"); |
1300 | do_gettimeofday(&sc->sc_tv_advance_start); |
1301 | |
1302 | if (unlikely(sc->sc_handshake_ok == 0)) { |
1303 | if(sc->sc_page_off < sizeof(struct o2net_handshake)) { |
1304 | data = page_address(sc->sc_page) + sc->sc_page_off; |
1305 | datalen = sizeof(struct o2net_handshake) - sc->sc_page_off; |
1306 | ret = o2net_recv_tcp_msg(sc->sc_sock, data, datalen); |
1307 | if (ret > 0) |
1308 | sc->sc_page_off += ret; |
1309 | } |
1310 | |
1311 | if (sc->sc_page_off == sizeof(struct o2net_handshake)) { |
1312 | o2net_check_handshake(sc); |
1313 | if (unlikely(sc->sc_handshake_ok == 0)) |
1314 | ret = -EPROTO; |
1315 | } |
1316 | goto out; |
1317 | } |
1318 | |
1319 | /* do we need more header? */ |
1320 | if (sc->sc_page_off < sizeof(struct o2net_msg)) { |
1321 | data = page_address(sc->sc_page) + sc->sc_page_off; |
1322 | datalen = sizeof(struct o2net_msg) - sc->sc_page_off; |
1323 | ret = o2net_recv_tcp_msg(sc->sc_sock, data, datalen); |
1324 | if (ret > 0) { |
1325 | sc->sc_page_off += ret; |
1326 | /* only swab incoming here.. we can |
1327 | * only get here once as we cross from |
1328 | * being under to over */ |
1329 | if (sc->sc_page_off == sizeof(struct o2net_msg)) { |
1330 | hdr = page_address(sc->sc_page); |
1331 | if (be16_to_cpu(hdr->data_len) > |
1332 | O2NET_MAX_PAYLOAD_BYTES) |
1333 | ret = -EOVERFLOW; |
1334 | } |
1335 | } |
1336 | if (ret <= 0) |
1337 | goto out; |
1338 | } |
1339 | |
1340 | if (sc->sc_page_off < sizeof(struct o2net_msg)) { |
1341 | /* oof, still don't have a header */ |
1342 | goto out; |
1343 | } |
1344 | |
1345 | /* this was swabbed above when we first read it */ |
1346 | hdr = page_address(sc->sc_page); |
1347 | |
1348 | msglog(hdr, "at page_off %zu\n", sc->sc_page_off); |
1349 | |
1350 | /* do we need more payload? */ |
1351 | if (sc->sc_page_off - sizeof(struct o2net_msg) < be16_to_cpu(hdr->data_len)) { |
1352 | /* need more payload */ |
1353 | data = page_address(sc->sc_page) + sc->sc_page_off; |
1354 | datalen = (sizeof(struct o2net_msg) + be16_to_cpu(hdr->data_len)) - |
1355 | sc->sc_page_off; |
1356 | ret = o2net_recv_tcp_msg(sc->sc_sock, data, datalen); |
1357 | if (ret > 0) |
1358 | sc->sc_page_off += ret; |
1359 | if (ret <= 0) |
1360 | goto out; |
1361 | } |
1362 | |
1363 | if (sc->sc_page_off - sizeof(struct o2net_msg) == be16_to_cpu(hdr->data_len)) { |
1364 | /* we can only get here once, the first time we read |
1365 | * the payload.. so set ret to progress if the handler |
1366 | * works out. after calling this the message is toast */ |
1367 | ret = o2net_process_message(sc, hdr); |
1368 | if (ret == 0) |
1369 | ret = 1; |
1370 | sc->sc_page_off = 0; |
1371 | } |
1372 | |
1373 | out: |
1374 | sclog(sc, "ret = %d\n", ret); |
1375 | do_gettimeofday(&sc->sc_tv_advance_stop); |
1376 | return ret; |
1377 | } |
1378 | |
1379 | /* this work func is triggerd by data ready. it reads until it can read no |
1380 | * more. it interprets 0, eof, as fatal. if data_ready hits while we're doing |
1381 | * our work the work struct will be marked and we'll be called again. */ |
1382 | static void o2net_rx_until_empty(struct work_struct *work) |
1383 | { |
1384 | struct o2net_sock_container *sc = |
1385 | container_of(work, struct o2net_sock_container, sc_rx_work); |
1386 | int ret; |
1387 | |
1388 | do { |
1389 | ret = o2net_advance_rx(sc); |
1390 | } while (ret > 0); |
1391 | |
1392 | if (ret <= 0 && ret != -EAGAIN) { |
1393 | struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num); |
1394 | sclog(sc, "saw error %d, closing\n", ret); |
1395 | /* not permanent so read failed handshake can retry */ |
1396 | o2net_ensure_shutdown(nn, sc, 0); |
1397 | } |
1398 | |
1399 | sc_put(sc); |
1400 | } |
1401 | |
1402 | static int o2net_set_nodelay(struct socket *sock) |
1403 | { |
1404 | int ret, val = 1; |
1405 | mm_segment_t oldfs; |
1406 | |
1407 | oldfs = get_fs(); |
1408 | set_fs(KERNEL_DS); |
1409 | |
1410 | /* |
1411 | * Dear unsuspecting programmer, |
1412 | * |
1413 | * Don't use sock_setsockopt() for SOL_TCP. It doesn't check its level |
1414 | * argument and assumes SOL_SOCKET so, say, your TCP_NODELAY will |
1415 | * silently turn into SO_DEBUG. |
1416 | * |
1417 | * Yours, |
1418 | * Keeper of hilariously fragile interfaces. |
1419 | */ |
1420 | ret = sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, |
1421 | (char __user *)&val, sizeof(val)); |
1422 | |
1423 | set_fs(oldfs); |
1424 | return ret; |
1425 | } |
1426 | |
1427 | static void o2net_initialize_handshake(void) |
1428 | { |
1429 | o2net_hand->o2hb_heartbeat_timeout_ms = cpu_to_be32( |
1430 | O2HB_MAX_WRITE_TIMEOUT_MS); |
1431 | o2net_hand->o2net_idle_timeout_ms = cpu_to_be32(o2net_idle_timeout()); |
1432 | o2net_hand->o2net_keepalive_delay_ms = cpu_to_be32( |
1433 | o2net_keepalive_delay()); |
1434 | o2net_hand->o2net_reconnect_delay_ms = cpu_to_be32( |
1435 | o2net_reconnect_delay()); |
1436 | } |
1437 | |
1438 | /* ------------------------------------------------------------ */ |
1439 | |
1440 | /* called when a connect completes and after a sock is accepted. the |
1441 | * rx path will see the response and mark the sc valid */ |
1442 | static void o2net_sc_connect_completed(struct work_struct *work) |
1443 | { |
1444 | struct o2net_sock_container *sc = |
1445 | container_of(work, struct o2net_sock_container, |
1446 | sc_connect_work); |
1447 | |
1448 | mlog(ML_MSG, "sc sending handshake with ver %llu id %llx\n", |
1449 | (unsigned long long)O2NET_PROTOCOL_VERSION, |
1450 | (unsigned long long)be64_to_cpu(o2net_hand->connector_id)); |
1451 | |
1452 | o2net_initialize_handshake(); |
1453 | o2net_sendpage(sc, o2net_hand, sizeof(*o2net_hand)); |
1454 | sc_put(sc); |
1455 | } |
1456 | |
1457 | /* this is called as a work_struct func. */ |
1458 | static void o2net_sc_send_keep_req(struct work_struct *work) |
1459 | { |
1460 | struct o2net_sock_container *sc = |
1461 | container_of(work, struct o2net_sock_container, |
1462 | sc_keepalive_work.work); |
1463 | |
1464 | o2net_sendpage(sc, o2net_keep_req, sizeof(*o2net_keep_req)); |
1465 | sc_put(sc); |
1466 | } |
1467 | |
1468 | /* socket shutdown does a del_timer_sync against this as it tears down. |
1469 | * we can't start this timer until we've got to the point in sc buildup |
1470 | * where shutdown is going to be involved */ |
1471 | static void o2net_idle_timer(unsigned long data) |
1472 | { |
1473 | struct o2net_sock_container *sc = (struct o2net_sock_container *)data; |
1474 | struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num); |
1475 | struct timeval now; |
1476 | |
1477 | do_gettimeofday(&now); |
1478 | |
1479 | printk(KERN_NOTICE "o2net: connection to " SC_NODEF_FMT " has been idle for %u.%u " |
1480 | "seconds, shutting it down.\n", SC_NODEF_ARGS(sc), |
1481 | o2net_idle_timeout() / 1000, |
1482 | o2net_idle_timeout() % 1000); |
1483 | mlog(ML_NOTICE, "here are some times that might help debug the " |
1484 | "situation: (tmr %ld.%ld now %ld.%ld dr %ld.%ld adv " |
1485 | "%ld.%ld:%ld.%ld func (%08x:%u) %ld.%ld:%ld.%ld)\n", |
1486 | sc->sc_tv_timer.tv_sec, (long) sc->sc_tv_timer.tv_usec, |
1487 | now.tv_sec, (long) now.tv_usec, |
1488 | sc->sc_tv_data_ready.tv_sec, (long) sc->sc_tv_data_ready.tv_usec, |
1489 | sc->sc_tv_advance_start.tv_sec, |
1490 | (long) sc->sc_tv_advance_start.tv_usec, |
1491 | sc->sc_tv_advance_stop.tv_sec, |
1492 | (long) sc->sc_tv_advance_stop.tv_usec, |
1493 | sc->sc_msg_key, sc->sc_msg_type, |
1494 | sc->sc_tv_func_start.tv_sec, (long) sc->sc_tv_func_start.tv_usec, |
1495 | sc->sc_tv_func_stop.tv_sec, (long) sc->sc_tv_func_stop.tv_usec); |
1496 | |
1497 | /* |
1498 | * Initialize the nn_timeout so that the next connection attempt |
1499 | * will continue in o2net_start_connect. |
1500 | */ |
1501 | atomic_set(&nn->nn_timeout, 1); |
1502 | |
1503 | o2net_sc_queue_work(sc, &sc->sc_shutdown_work); |
1504 | } |
1505 | |
1506 | static void o2net_sc_reset_idle_timer(struct o2net_sock_container *sc) |
1507 | { |
1508 | o2net_sc_cancel_delayed_work(sc, &sc->sc_keepalive_work); |
1509 | o2net_sc_queue_delayed_work(sc, &sc->sc_keepalive_work, |
1510 | msecs_to_jiffies(o2net_keepalive_delay())); |
1511 | do_gettimeofday(&sc->sc_tv_timer); |
1512 | mod_timer(&sc->sc_idle_timeout, |
1513 | jiffies + msecs_to_jiffies(o2net_idle_timeout())); |
1514 | } |
1515 | |
1516 | static void o2net_sc_postpone_idle(struct o2net_sock_container *sc) |
1517 | { |
1518 | /* Only push out an existing timer */ |
1519 | if (timer_pending(&sc->sc_idle_timeout)) |
1520 | o2net_sc_reset_idle_timer(sc); |
1521 | } |
1522 | |
1523 | /* this work func is kicked whenever a path sets the nn state which doesn't |
1524 | * have valid set. This includes seeing hb come up, losing a connection, |
1525 | * having a connect attempt fail, etc. This centralizes the logic which decides |
1526 | * if a connect attempt should be made or if we should give up and all future |
1527 | * transmit attempts should fail */ |
1528 | static void o2net_start_connect(struct work_struct *work) |
1529 | { |
1530 | struct o2net_node *nn = |
1531 | container_of(work, struct o2net_node, nn_connect_work.work); |
1532 | struct o2net_sock_container *sc = NULL; |
1533 | struct o2nm_node *node = NULL, *mynode = NULL; |
1534 | struct socket *sock = NULL; |
1535 | struct sockaddr_in myaddr = {0, }, remoteaddr = {0, }; |
1536 | int ret = 0, stop; |
1537 | unsigned int timeout; |
1538 | |
1539 | /* if we're greater we initiate tx, otherwise we accept */ |
1540 | if (o2nm_this_node() <= o2net_num_from_nn(nn)) |
1541 | goto out; |
1542 | |
1543 | /* watch for racing with tearing a node down */ |
1544 | node = o2nm_get_node_by_num(o2net_num_from_nn(nn)); |
1545 | if (node == NULL) { |
1546 | ret = 0; |
1547 | goto out; |
1548 | } |
1549 | |
1550 | mynode = o2nm_get_node_by_num(o2nm_this_node()); |
1551 | if (mynode == NULL) { |
1552 | ret = 0; |
1553 | goto out; |
1554 | } |
1555 | |
1556 | spin_lock(&nn->nn_lock); |
1557 | /* |
1558 | * see if we already have one pending or have given up. |
1559 | * For nn_timeout, it is set when we close the connection |
1560 | * because of the idle time out. So it means that we have |
1561 | * at least connected to that node successfully once, |
1562 | * now try to connect to it again. |
1563 | */ |
1564 | timeout = atomic_read(&nn->nn_timeout); |
1565 | stop = (nn->nn_sc || |
1566 | (nn->nn_persistent_error && |
1567 | (nn->nn_persistent_error != -ENOTCONN || timeout == 0))); |
1568 | spin_unlock(&nn->nn_lock); |
1569 | if (stop) |
1570 | goto out; |
1571 | |
1572 | nn->nn_last_connect_attempt = jiffies; |
1573 | |
1574 | sc = sc_alloc(node); |
1575 | if (sc == NULL) { |
1576 | mlog(0, "couldn't allocate sc\n"); |
1577 | ret = -ENOMEM; |
1578 | goto out; |
1579 | } |
1580 | |
1581 | ret = sock_create(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock); |
1582 | if (ret < 0) { |
1583 | mlog(0, "can't create socket: %d\n", ret); |
1584 | goto out; |
1585 | } |
1586 | sc->sc_sock = sock; /* freed by sc_kref_release */ |
1587 | |
1588 | sock->sk->sk_allocation = GFP_ATOMIC; |
1589 | |
1590 | myaddr.sin_family = AF_INET; |
1591 | myaddr.sin_addr.s_addr = mynode->nd_ipv4_address; |
1592 | myaddr.sin_port = htons(0); /* any port */ |
1593 | |
1594 | ret = sock->ops->bind(sock, (struct sockaddr *)&myaddr, |
1595 | sizeof(myaddr)); |
1596 | if (ret) { |
1597 | mlog(ML_ERROR, "bind failed with %d at address %pI4\n", |
1598 | ret, &mynode->nd_ipv4_address); |
1599 | goto out; |
1600 | } |
1601 | |
1602 | ret = o2net_set_nodelay(sc->sc_sock); |
1603 | if (ret) { |
1604 | mlog(ML_ERROR, "setting TCP_NODELAY failed with %d\n", ret); |
1605 | goto out; |
1606 | } |
1607 | |
1608 | o2net_register_callbacks(sc->sc_sock->sk, sc); |
1609 | |
1610 | spin_lock(&nn->nn_lock); |
1611 | /* handshake completion will set nn->nn_sc_valid */ |
1612 | o2net_set_nn_state(nn, sc, 0, 0); |
1613 | spin_unlock(&nn->nn_lock); |
1614 | |
1615 | remoteaddr.sin_family = AF_INET; |
1616 | remoteaddr.sin_addr.s_addr = node->nd_ipv4_address; |
1617 | remoteaddr.sin_port = node->nd_ipv4_port; |
1618 | |
1619 | ret = sc->sc_sock->ops->connect(sc->sc_sock, |
1620 | (struct sockaddr *)&remoteaddr, |
1621 | sizeof(remoteaddr), |
1622 | O_NONBLOCK); |
1623 | if (ret == -EINPROGRESS) |
1624 | ret = 0; |
1625 | |
1626 | out: |
1627 | if (ret) { |
1628 | mlog(ML_NOTICE, "connect attempt to " SC_NODEF_FMT " failed " |
1629 | "with errno %d\n", SC_NODEF_ARGS(sc), ret); |
1630 | /* 0 err so that another will be queued and attempted |
1631 | * from set_nn_state */ |
1632 | if (sc) |
1633 | o2net_ensure_shutdown(nn, sc, 0); |
1634 | } |
1635 | if (sc) |
1636 | sc_put(sc); |
1637 | if (node) |
1638 | o2nm_node_put(node); |
1639 | if (mynode) |
1640 | o2nm_node_put(mynode); |
1641 | |
1642 | return; |
1643 | } |
1644 | |
1645 | static void o2net_connect_expired(struct work_struct *work) |
1646 | { |
1647 | struct o2net_node *nn = |
1648 | container_of(work, struct o2net_node, nn_connect_expired.work); |
1649 | |
1650 | spin_lock(&nn->nn_lock); |
1651 | if (!nn->nn_sc_valid) { |
1652 | mlog(ML_ERROR, "no connection established with node %u after " |
1653 | "%u.%u seconds, giving up and returning errors.\n", |
1654 | o2net_num_from_nn(nn), |
1655 | o2net_idle_timeout() / 1000, |
1656 | o2net_idle_timeout() % 1000); |
1657 | |
1658 | o2net_set_nn_state(nn, NULL, 0, -ENOTCONN); |
1659 | } |
1660 | spin_unlock(&nn->nn_lock); |
1661 | } |
1662 | |
1663 | static void o2net_still_up(struct work_struct *work) |
1664 | { |
1665 | struct o2net_node *nn = |
1666 | container_of(work, struct o2net_node, nn_still_up.work); |
1667 | |
1668 | o2quo_hb_still_up(o2net_num_from_nn(nn)); |
1669 | } |
1670 | |
1671 | /* ------------------------------------------------------------ */ |
1672 | |
1673 | void o2net_disconnect_node(struct o2nm_node *node) |
1674 | { |
1675 | struct o2net_node *nn = o2net_nn_from_num(node->nd_num); |
1676 | |
1677 | /* don't reconnect until it's heartbeating again */ |
1678 | spin_lock(&nn->nn_lock); |
1679 | atomic_set(&nn->nn_timeout, 0); |
1680 | o2net_set_nn_state(nn, NULL, 0, -ENOTCONN); |
1681 | spin_unlock(&nn->nn_lock); |
1682 | |
1683 | if (o2net_wq) { |
1684 | cancel_delayed_work(&nn->nn_connect_expired); |
1685 | cancel_delayed_work(&nn->nn_connect_work); |
1686 | cancel_delayed_work(&nn->nn_still_up); |
1687 | flush_workqueue(o2net_wq); |
1688 | } |
1689 | } |
1690 | |
1691 | static void o2net_hb_node_down_cb(struct o2nm_node *node, int node_num, |
1692 | void *data) |
1693 | { |
1694 | o2quo_hb_down(node_num); |
1695 | |
1696 | if (node_num != o2nm_this_node()) |
1697 | o2net_disconnect_node(node); |
1698 | |
1699 | BUG_ON(atomic_read(&o2net_connected_peers) < 0); |
1700 | } |
1701 | |
1702 | static void o2net_hb_node_up_cb(struct o2nm_node *node, int node_num, |
1703 | void *data) |
1704 | { |
1705 | struct o2net_node *nn = o2net_nn_from_num(node_num); |
1706 | |
1707 | o2quo_hb_up(node_num); |
1708 | |
1709 | /* ensure an immediate connect attempt */ |
1710 | nn->nn_last_connect_attempt = jiffies - |
1711 | (msecs_to_jiffies(o2net_reconnect_delay()) + 1); |
1712 | |
1713 | if (node_num != o2nm_this_node()) { |
1714 | /* believe it or not, accept and node hearbeating testing |
1715 | * can succeed for this node before we got here.. so |
1716 | * only use set_nn_state to clear the persistent error |
1717 | * if that hasn't already happened */ |
1718 | spin_lock(&nn->nn_lock); |
1719 | atomic_set(&nn->nn_timeout, 0); |
1720 | if (nn->nn_persistent_error) |
1721 | o2net_set_nn_state(nn, NULL, 0, 0); |
1722 | spin_unlock(&nn->nn_lock); |
1723 | } |
1724 | } |
1725 | |
1726 | void o2net_unregister_hb_callbacks(void) |
1727 | { |
1728 | o2hb_unregister_callback(NULL, &o2net_hb_up); |
1729 | o2hb_unregister_callback(NULL, &o2net_hb_down); |
1730 | } |
1731 | |
1732 | int o2net_register_hb_callbacks(void) |
1733 | { |
1734 | int ret; |
1735 | |
1736 | o2hb_setup_callback(&o2net_hb_down, O2HB_NODE_DOWN_CB, |
1737 | o2net_hb_node_down_cb, NULL, O2NET_HB_PRI); |
1738 | o2hb_setup_callback(&o2net_hb_up, O2HB_NODE_UP_CB, |
1739 | o2net_hb_node_up_cb, NULL, O2NET_HB_PRI); |
1740 | |
1741 | ret = o2hb_register_callback(NULL, &o2net_hb_up); |
1742 | if (ret == 0) |
1743 | ret = o2hb_register_callback(NULL, &o2net_hb_down); |
1744 | |
1745 | if (ret) |
1746 | o2net_unregister_hb_callbacks(); |
1747 | |
1748 | return ret; |
1749 | } |
1750 | |
1751 | /* ------------------------------------------------------------ */ |
1752 | |
1753 | static int o2net_accept_one(struct socket *sock) |
1754 | { |
1755 | int ret, slen; |
1756 | struct sockaddr_in sin; |
1757 | struct socket *new_sock = NULL; |
1758 | struct o2nm_node *node = NULL; |
1759 | struct o2net_sock_container *sc = NULL; |
1760 | struct o2net_node *nn; |
1761 | |
1762 | BUG_ON(sock == NULL); |
1763 | ret = sock_create_lite(sock->sk->sk_family, sock->sk->sk_type, |
1764 | sock->sk->sk_protocol, &new_sock); |
1765 | if (ret) |
1766 | goto out; |
1767 | |
1768 | new_sock->type = sock->type; |
1769 | new_sock->ops = sock->ops; |
1770 | ret = sock->ops->accept(sock, new_sock, O_NONBLOCK); |
1771 | if (ret < 0) |
1772 | goto out; |
1773 | |
1774 | new_sock->sk->sk_allocation = GFP_ATOMIC; |
1775 | |
1776 | ret = o2net_set_nodelay(new_sock); |
1777 | if (ret) { |
1778 | mlog(ML_ERROR, "setting TCP_NODELAY failed with %d\n", ret); |
1779 | goto out; |
1780 | } |
1781 | |
1782 | slen = sizeof(sin); |
1783 | ret = new_sock->ops->getname(new_sock, (struct sockaddr *) &sin, |
1784 | &slen, 1); |
1785 | if (ret < 0) |
1786 | goto out; |
1787 | |
1788 | node = o2nm_get_node_by_ip(sin.sin_addr.s_addr); |
1789 | if (node == NULL) { |
1790 | mlog(ML_NOTICE, "attempt to connect from unknown node at %pI4:%d\n", |
1791 | &sin.sin_addr.s_addr, ntohs(sin.sin_port)); |
1792 | ret = -EINVAL; |
1793 | goto out; |
1794 | } |
1795 | |
1796 | if (o2nm_this_node() > node->nd_num) { |
1797 | mlog(ML_NOTICE, "unexpected connect attempted from a lower " |
1798 | "numbered node '%s' at " "%pI4:%d with num %u\n", |
1799 | node->nd_name, &sin.sin_addr.s_addr, |
1800 | ntohs(sin.sin_port), node->nd_num); |
1801 | ret = -EINVAL; |
1802 | goto out; |
1803 | } |
1804 | |
1805 | /* this happens all the time when the other node sees our heartbeat |
1806 | * and tries to connect before we see their heartbeat */ |
1807 | if (!o2hb_check_node_heartbeating_from_callback(node->nd_num)) { |
1808 | mlog(ML_CONN, "attempt to connect from node '%s' at " |
1809 | "%pI4:%d but it isn't heartbeating\n", |
1810 | node->nd_name, &sin.sin_addr.s_addr, |
1811 | ntohs(sin.sin_port)); |
1812 | ret = -EINVAL; |
1813 | goto out; |
1814 | } |
1815 | |
1816 | nn = o2net_nn_from_num(node->nd_num); |
1817 | |
1818 | spin_lock(&nn->nn_lock); |
1819 | if (nn->nn_sc) |
1820 | ret = -EBUSY; |
1821 | else |
1822 | ret = 0; |
1823 | spin_unlock(&nn->nn_lock); |
1824 | if (ret) { |
1825 | mlog(ML_NOTICE, "attempt to connect from node '%s' at " |
1826 | "%pI4:%d but it already has an open connection\n", |
1827 | node->nd_name, &sin.sin_addr.s_addr, |
1828 | ntohs(sin.sin_port)); |
1829 | goto out; |
1830 | } |
1831 | |
1832 | sc = sc_alloc(node); |
1833 | if (sc == NULL) { |
1834 | ret = -ENOMEM; |
1835 | goto out; |
1836 | } |
1837 | |
1838 | sc->sc_sock = new_sock; |
1839 | new_sock = NULL; |
1840 | |
1841 | spin_lock(&nn->nn_lock); |
1842 | atomic_set(&nn->nn_timeout, 0); |
1843 | o2net_set_nn_state(nn, sc, 0, 0); |
1844 | spin_unlock(&nn->nn_lock); |
1845 | |
1846 | o2net_register_callbacks(sc->sc_sock->sk, sc); |
1847 | o2net_sc_queue_work(sc, &sc->sc_rx_work); |
1848 | |
1849 | o2net_initialize_handshake(); |
1850 | o2net_sendpage(sc, o2net_hand, sizeof(*o2net_hand)); |
1851 | |
1852 | out: |
1853 | if (new_sock) |
1854 | sock_release(new_sock); |
1855 | if (node) |
1856 | o2nm_node_put(node); |
1857 | if (sc) |
1858 | sc_put(sc); |
1859 | return ret; |
1860 | } |
1861 | |
1862 | static void o2net_accept_many(struct work_struct *work) |
1863 | { |
1864 | struct socket *sock = o2net_listen_sock; |
1865 | while (o2net_accept_one(sock) == 0) |
1866 | cond_resched(); |
1867 | } |
1868 | |
1869 | static void o2net_listen_data_ready(struct sock *sk, int bytes) |
1870 | { |
1871 | void (*ready)(struct sock *sk, int bytes); |
1872 | |
1873 | read_lock(&sk->sk_callback_lock); |
1874 | ready = sk->sk_user_data; |
1875 | if (ready == NULL) { /* check for teardown race */ |
1876 | ready = sk->sk_data_ready; |
1877 | goto out; |
1878 | } |
1879 | |
1880 | /* ->sk_data_ready is also called for a newly established child socket |
1881 | * before it has been accepted and the acceptor has set up their |
1882 | * data_ready.. we only want to queue listen work for our listening |
1883 | * socket */ |
1884 | if (sk->sk_state == TCP_LISTEN) { |
1885 | mlog(ML_TCP, "bytes: %d\n", bytes); |
1886 | queue_work(o2net_wq, &o2net_listen_work); |
1887 | } |
1888 | |
1889 | out: |
1890 | read_unlock(&sk->sk_callback_lock); |
1891 | ready(sk, bytes); |
1892 | } |
1893 | |
1894 | static int o2net_open_listening_sock(__be32 addr, __be16 port) |
1895 | { |
1896 | struct socket *sock = NULL; |
1897 | int ret; |
1898 | struct sockaddr_in sin = { |
1899 | .sin_family = PF_INET, |
1900 | .sin_addr = { .s_addr = addr }, |
1901 | .sin_port = port, |
1902 | }; |
1903 | |
1904 | ret = sock_create(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock); |
1905 | if (ret < 0) { |
1906 | mlog(ML_ERROR, "unable to create socket, ret=%d\n", ret); |
1907 | goto out; |
1908 | } |
1909 | |
1910 | sock->sk->sk_allocation = GFP_ATOMIC; |
1911 | |
1912 | write_lock_bh(&sock->sk->sk_callback_lock); |
1913 | sock->sk->sk_user_data = sock->sk->sk_data_ready; |
1914 | sock->sk->sk_data_ready = o2net_listen_data_ready; |
1915 | write_unlock_bh(&sock->sk->sk_callback_lock); |
1916 | |
1917 | o2net_listen_sock = sock; |
1918 | INIT_WORK(&o2net_listen_work, o2net_accept_many); |
1919 | |
1920 | sock->sk->sk_reuse = 1; |
1921 | ret = sock->ops->bind(sock, (struct sockaddr *)&sin, sizeof(sin)); |
1922 | if (ret < 0) { |
1923 | mlog(ML_ERROR, "unable to bind socket at %pI4:%u, " |
1924 | "ret=%d\n", &addr, ntohs(port), ret); |
1925 | goto out; |
1926 | } |
1927 | |
1928 | ret = sock->ops->listen(sock, 64); |
1929 | if (ret < 0) { |
1930 | mlog(ML_ERROR, "unable to listen on %pI4:%u, ret=%d\n", |
1931 | &addr, ntohs(port), ret); |
1932 | } |
1933 | |
1934 | out: |
1935 | if (ret) { |
1936 | o2net_listen_sock = NULL; |
1937 | if (sock) |
1938 | sock_release(sock); |
1939 | } |
1940 | return ret; |
1941 | } |
1942 | |
1943 | /* |
1944 | * called from node manager when we should bring up our network listening |
1945 | * socket. node manager handles all the serialization to only call this |
1946 | * once and to match it with o2net_stop_listening(). note, |
1947 | * o2nm_this_node() doesn't work yet as we're being called while it |
1948 | * is being set up. |
1949 | */ |
1950 | int o2net_start_listening(struct o2nm_node *node) |
1951 | { |
1952 | int ret = 0; |
1953 | |
1954 | BUG_ON(o2net_wq != NULL); |
1955 | BUG_ON(o2net_listen_sock != NULL); |
1956 | |
1957 | mlog(ML_KTHREAD, "starting o2net thread...\n"); |
1958 | o2net_wq = create_singlethread_workqueue("o2net"); |
1959 | if (o2net_wq == NULL) { |
1960 | mlog(ML_ERROR, "unable to launch o2net thread\n"); |
1961 | return -ENOMEM; /* ? */ |
1962 | } |
1963 | |
1964 | ret = o2net_open_listening_sock(node->nd_ipv4_address, |
1965 | node->nd_ipv4_port); |
1966 | if (ret) { |
1967 | destroy_workqueue(o2net_wq); |
1968 | o2net_wq = NULL; |
1969 | } else |
1970 | o2quo_conn_up(node->nd_num); |
1971 | |
1972 | return ret; |
1973 | } |
1974 | |
1975 | /* again, o2nm_this_node() doesn't work here as we're involved in |
1976 | * tearing it down */ |
1977 | void o2net_stop_listening(struct o2nm_node *node) |
1978 | { |
1979 | struct socket *sock = o2net_listen_sock; |
1980 | size_t i; |
1981 | |
1982 | BUG_ON(o2net_wq == NULL); |
1983 | BUG_ON(o2net_listen_sock == NULL); |
1984 | |
1985 | /* stop the listening socket from generating work */ |
1986 | write_lock_bh(&sock->sk->sk_callback_lock); |
1987 | sock->sk->sk_data_ready = sock->sk->sk_user_data; |
1988 | sock->sk->sk_user_data = NULL; |
1989 | write_unlock_bh(&sock->sk->sk_callback_lock); |
1990 | |
1991 | for (i = 0; i < ARRAY_SIZE(o2net_nodes); i++) { |
1992 | struct o2nm_node *node = o2nm_get_node_by_num(i); |
1993 | if (node) { |
1994 | o2net_disconnect_node(node); |
1995 | o2nm_node_put(node); |
1996 | } |
1997 | } |
1998 | |
1999 | /* finish all work and tear down the work queue */ |
2000 | mlog(ML_KTHREAD, "waiting for o2net thread to exit....\n"); |
2001 | destroy_workqueue(o2net_wq); |
2002 | o2net_wq = NULL; |
2003 | |
2004 | sock_release(o2net_listen_sock); |
2005 | o2net_listen_sock = NULL; |
2006 | |
2007 | o2quo_conn_err(node->nd_num); |
2008 | } |
2009 | |
2010 | /* ------------------------------------------------------------ */ |
2011 | |
2012 | int o2net_init(void) |
2013 | { |
2014 | unsigned long i; |
2015 | |
2016 | o2quo_init(); |
2017 | |
2018 | if (o2net_debugfs_init()) |
2019 | return -ENOMEM; |
2020 | |
2021 | o2net_hand = kzalloc(sizeof(struct o2net_handshake), GFP_KERNEL); |
2022 | o2net_keep_req = kzalloc(sizeof(struct o2net_msg), GFP_KERNEL); |
2023 | o2net_keep_resp = kzalloc(sizeof(struct o2net_msg), GFP_KERNEL); |
2024 | if (!o2net_hand || !o2net_keep_req || !o2net_keep_resp) { |
2025 | kfree(o2net_hand); |
2026 | kfree(o2net_keep_req); |
2027 | kfree(o2net_keep_resp); |
2028 | return -ENOMEM; |
2029 | } |
2030 | |
2031 | o2net_hand->protocol_version = cpu_to_be64(O2NET_PROTOCOL_VERSION); |
2032 | o2net_hand->connector_id = cpu_to_be64(1); |
2033 | |
2034 | o2net_keep_req->magic = cpu_to_be16(O2NET_MSG_KEEP_REQ_MAGIC); |
2035 | o2net_keep_resp->magic = cpu_to_be16(O2NET_MSG_KEEP_RESP_MAGIC); |
2036 | |
2037 | for (i = 0; i < ARRAY_SIZE(o2net_nodes); i++) { |
2038 | struct o2net_node *nn = o2net_nn_from_num(i); |
2039 | |
2040 | atomic_set(&nn->nn_timeout, 0); |
2041 | spin_lock_init(&nn->nn_lock); |
2042 | INIT_DELAYED_WORK(&nn->nn_connect_work, o2net_start_connect); |
2043 | INIT_DELAYED_WORK(&nn->nn_connect_expired, |
2044 | o2net_connect_expired); |
2045 | INIT_DELAYED_WORK(&nn->nn_still_up, o2net_still_up); |
2046 | /* until we see hb from a node we'll return einval */ |
2047 | nn->nn_persistent_error = -ENOTCONN; |
2048 | init_waitqueue_head(&nn->nn_sc_wq); |
2049 | idr_init(&nn->nn_status_idr); |
2050 | INIT_LIST_HEAD(&nn->nn_status_list); |
2051 | } |
2052 | |
2053 | return 0; |
2054 | } |
2055 | |
2056 | void o2net_exit(void) |
2057 | { |
2058 | o2quo_exit(); |
2059 | kfree(o2net_hand); |
2060 | kfree(o2net_keep_req); |
2061 | kfree(o2net_keep_resp); |
2062 | o2net_debugfs_exit(); |
2063 | } |
2064 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9