Root/
1 | /* |
2 | * Copyright (c) 2008, 2009 open80211s Ltd. |
3 | * Author: Luis Carlos Cobo <luisca@cozybit.com> |
4 | * |
5 | * This program is free software; you can redistribute it and/or modify |
6 | * it under the terms of the GNU General Public License version 2 as |
7 | * published by the Free Software Foundation. |
8 | */ |
9 | |
10 | #include <linux/etherdevice.h> |
11 | #include <linux/list.h> |
12 | #include <linux/random.h> |
13 | #include <linux/slab.h> |
14 | #include <linux/spinlock.h> |
15 | #include <linux/string.h> |
16 | #include <net/mac80211.h> |
17 | #include "wme.h" |
18 | #include "ieee80211_i.h" |
19 | #include "mesh.h" |
20 | |
21 | #ifdef CONFIG_MAC80211_VERBOSE_MPATH_DEBUG |
22 | #define mpath_dbg(fmt, args...) printk(KERN_DEBUG fmt, ##args) |
23 | #else |
24 | #define mpath_dbg(fmt, args...) do { (void)(0); } while (0) |
25 | #endif |
26 | |
27 | /* There will be initially 2^INIT_PATHS_SIZE_ORDER buckets */ |
28 | #define INIT_PATHS_SIZE_ORDER 2 |
29 | |
30 | /* Keep the mean chain length below this constant */ |
31 | #define MEAN_CHAIN_LEN 2 |
32 | |
33 | #define MPATH_EXPIRED(mpath) ((mpath->flags & MESH_PATH_ACTIVE) && \ |
34 | time_after(jiffies, mpath->exp_time) && \ |
35 | !(mpath->flags & MESH_PATH_FIXED)) |
36 | |
37 | struct mpath_node { |
38 | struct hlist_node list; |
39 | struct rcu_head rcu; |
40 | /* This indirection allows two different tables to point to the same |
41 | * mesh_path structure, useful when resizing |
42 | */ |
43 | struct mesh_path *mpath; |
44 | }; |
45 | |
46 | static struct mesh_table __rcu *mesh_paths; |
47 | static struct mesh_table __rcu *mpp_paths; /* Store paths for MPP&MAP */ |
48 | |
49 | int mesh_paths_generation; |
50 | |
51 | /* This lock will have the grow table function as writer and add / delete nodes |
52 | * as readers. RCU provides sufficient protection only when reading the table |
53 | * (i.e. doing lookups). Adding or adding or removing nodes requires we take |
54 | * the read lock or we risk operating on an old table. The write lock is only |
55 | * needed when modifying the number of buckets a table. |
56 | */ |
57 | static DEFINE_RWLOCK(pathtbl_resize_lock); |
58 | |
59 | |
60 | static inline struct mesh_table *resize_dereference_mesh_paths(void) |
61 | { |
62 | return rcu_dereference_protected(mesh_paths, |
63 | lockdep_is_held(&pathtbl_resize_lock)); |
64 | } |
65 | |
66 | static inline struct mesh_table *resize_dereference_mpp_paths(void) |
67 | { |
68 | return rcu_dereference_protected(mpp_paths, |
69 | lockdep_is_held(&pathtbl_resize_lock)); |
70 | } |
71 | |
72 | /* |
73 | * CAREFUL -- "tbl" must not be an expression, |
74 | * in particular not an rcu_dereference(), since |
75 | * it's used twice. So it is illegal to do |
76 | * for_each_mesh_entry(rcu_dereference(...), ...) |
77 | */ |
78 | #define for_each_mesh_entry(tbl, p, node, i) \ |
79 | for (i = 0; i <= tbl->hash_mask; i++) \ |
80 | hlist_for_each_entry_rcu(node, p, &tbl->hash_buckets[i], list) |
81 | |
82 | |
83 | static struct mesh_table *mesh_table_alloc(int size_order) |
84 | { |
85 | int i; |
86 | struct mesh_table *newtbl; |
87 | |
88 | newtbl = kmalloc(sizeof(struct mesh_table), GFP_ATOMIC); |
89 | if (!newtbl) |
90 | return NULL; |
91 | |
92 | newtbl->hash_buckets = kzalloc(sizeof(struct hlist_head) * |
93 | (1 << size_order), GFP_ATOMIC); |
94 | |
95 | if (!newtbl->hash_buckets) { |
96 | kfree(newtbl); |
97 | return NULL; |
98 | } |
99 | |
100 | newtbl->hashwlock = kmalloc(sizeof(spinlock_t) * |
101 | (1 << size_order), GFP_ATOMIC); |
102 | if (!newtbl->hashwlock) { |
103 | kfree(newtbl->hash_buckets); |
104 | kfree(newtbl); |
105 | return NULL; |
106 | } |
107 | |
108 | newtbl->size_order = size_order; |
109 | newtbl->hash_mask = (1 << size_order) - 1; |
110 | atomic_set(&newtbl->entries, 0); |
111 | get_random_bytes(&newtbl->hash_rnd, |
112 | sizeof(newtbl->hash_rnd)); |
113 | for (i = 0; i <= newtbl->hash_mask; i++) |
114 | spin_lock_init(&newtbl->hashwlock[i]); |
115 | spin_lock_init(&newtbl->gates_lock); |
116 | |
117 | return newtbl; |
118 | } |
119 | |
120 | static void __mesh_table_free(struct mesh_table *tbl) |
121 | { |
122 | kfree(tbl->hash_buckets); |
123 | kfree(tbl->hashwlock); |
124 | kfree(tbl); |
125 | } |
126 | |
127 | static void mesh_table_free(struct mesh_table *tbl, bool free_leafs) |
128 | { |
129 | struct hlist_head *mesh_hash; |
130 | struct hlist_node *p, *q; |
131 | struct mpath_node *gate; |
132 | int i; |
133 | |
134 | mesh_hash = tbl->hash_buckets; |
135 | for (i = 0; i <= tbl->hash_mask; i++) { |
136 | spin_lock_bh(&tbl->hashwlock[i]); |
137 | hlist_for_each_safe(p, q, &mesh_hash[i]) { |
138 | tbl->free_node(p, free_leafs); |
139 | atomic_dec(&tbl->entries); |
140 | } |
141 | spin_unlock_bh(&tbl->hashwlock[i]); |
142 | } |
143 | if (free_leafs) { |
144 | spin_lock_bh(&tbl->gates_lock); |
145 | hlist_for_each_entry_safe(gate, p, q, |
146 | tbl->known_gates, list) { |
147 | hlist_del(&gate->list); |
148 | kfree(gate); |
149 | } |
150 | kfree(tbl->known_gates); |
151 | spin_unlock_bh(&tbl->gates_lock); |
152 | } |
153 | |
154 | __mesh_table_free(tbl); |
155 | } |
156 | |
157 | static int mesh_table_grow(struct mesh_table *oldtbl, |
158 | struct mesh_table *newtbl) |
159 | { |
160 | struct hlist_head *oldhash; |
161 | struct hlist_node *p, *q; |
162 | int i; |
163 | |
164 | if (atomic_read(&oldtbl->entries) |
165 | < oldtbl->mean_chain_len * (oldtbl->hash_mask + 1)) |
166 | return -EAGAIN; |
167 | |
168 | newtbl->free_node = oldtbl->free_node; |
169 | newtbl->mean_chain_len = oldtbl->mean_chain_len; |
170 | newtbl->copy_node = oldtbl->copy_node; |
171 | newtbl->known_gates = oldtbl->known_gates; |
172 | atomic_set(&newtbl->entries, atomic_read(&oldtbl->entries)); |
173 | |
174 | oldhash = oldtbl->hash_buckets; |
175 | for (i = 0; i <= oldtbl->hash_mask; i++) |
176 | hlist_for_each(p, &oldhash[i]) |
177 | if (oldtbl->copy_node(p, newtbl) < 0) |
178 | goto errcopy; |
179 | |
180 | return 0; |
181 | |
182 | errcopy: |
183 | for (i = 0; i <= newtbl->hash_mask; i++) { |
184 | hlist_for_each_safe(p, q, &newtbl->hash_buckets[i]) |
185 | oldtbl->free_node(p, 0); |
186 | } |
187 | return -ENOMEM; |
188 | } |
189 | |
190 | static u32 mesh_table_hash(u8 *addr, struct ieee80211_sub_if_data *sdata, |
191 | struct mesh_table *tbl) |
192 | { |
193 | /* Use last four bytes of hw addr and interface index as hash index */ |
194 | return jhash_2words(*(u32 *)(addr+2), sdata->dev->ifindex, tbl->hash_rnd) |
195 | & tbl->hash_mask; |
196 | } |
197 | |
198 | |
199 | /** |
200 | * |
201 | * mesh_path_assign_nexthop - update mesh path next hop |
202 | * |
203 | * @mpath: mesh path to update |
204 | * @sta: next hop to assign |
205 | * |
206 | * Locking: mpath->state_lock must be held when calling this function |
207 | */ |
208 | void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta) |
209 | { |
210 | struct sk_buff *skb; |
211 | struct ieee80211_hdr *hdr; |
212 | struct sk_buff_head tmpq; |
213 | unsigned long flags; |
214 | |
215 | rcu_assign_pointer(mpath->next_hop, sta); |
216 | |
217 | __skb_queue_head_init(&tmpq); |
218 | |
219 | spin_lock_irqsave(&mpath->frame_queue.lock, flags); |
220 | |
221 | while ((skb = __skb_dequeue(&mpath->frame_queue)) != NULL) { |
222 | hdr = (struct ieee80211_hdr *) skb->data; |
223 | memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN); |
224 | memcpy(hdr->addr2, mpath->sdata->vif.addr, ETH_ALEN); |
225 | __skb_queue_tail(&tmpq, skb); |
226 | } |
227 | |
228 | skb_queue_splice(&tmpq, &mpath->frame_queue); |
229 | spin_unlock_irqrestore(&mpath->frame_queue.lock, flags); |
230 | } |
231 | |
232 | static void prepare_for_gate(struct sk_buff *skb, char *dst_addr, |
233 | struct mesh_path *gate_mpath) |
234 | { |
235 | struct ieee80211_hdr *hdr; |
236 | struct ieee80211s_hdr *mshdr; |
237 | int mesh_hdrlen, hdrlen; |
238 | char *next_hop; |
239 | |
240 | hdr = (struct ieee80211_hdr *) skb->data; |
241 | hdrlen = ieee80211_hdrlen(hdr->frame_control); |
242 | mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); |
243 | |
244 | if (!(mshdr->flags & MESH_FLAGS_AE)) { |
245 | /* size of the fixed part of the mesh header */ |
246 | mesh_hdrlen = 6; |
247 | |
248 | /* make room for the two extended addresses */ |
249 | skb_push(skb, 2 * ETH_ALEN); |
250 | memmove(skb->data, hdr, hdrlen + mesh_hdrlen); |
251 | |
252 | hdr = (struct ieee80211_hdr *) skb->data; |
253 | |
254 | /* we preserve the previous mesh header and only add |
255 | * the new addreses */ |
256 | mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); |
257 | mshdr->flags = MESH_FLAGS_AE_A5_A6; |
258 | memcpy(mshdr->eaddr1, hdr->addr3, ETH_ALEN); |
259 | memcpy(mshdr->eaddr2, hdr->addr4, ETH_ALEN); |
260 | } |
261 | |
262 | /* update next hop */ |
263 | hdr = (struct ieee80211_hdr *) skb->data; |
264 | rcu_read_lock(); |
265 | next_hop = rcu_dereference(gate_mpath->next_hop)->sta.addr; |
266 | memcpy(hdr->addr1, next_hop, ETH_ALEN); |
267 | rcu_read_unlock(); |
268 | memcpy(hdr->addr2, gate_mpath->sdata->vif.addr, ETH_ALEN); |
269 | memcpy(hdr->addr3, dst_addr, ETH_ALEN); |
270 | } |
271 | |
272 | /** |
273 | * |
274 | * mesh_path_move_to_queue - Move or copy frames from one mpath queue to another |
275 | * |
276 | * This function is used to transfer or copy frames from an unresolved mpath to |
277 | * a gate mpath. The function also adds the Address Extension field and |
278 | * updates the next hop. |
279 | * |
280 | * If a frame already has an Address Extension field, only the next hop and |
281 | * destination addresses are updated. |
282 | * |
283 | * The gate mpath must be an active mpath with a valid mpath->next_hop. |
284 | * |
285 | * @mpath: An active mpath the frames will be sent to (i.e. the gate) |
286 | * @from_mpath: The failed mpath |
287 | * @copy: When true, copy all the frames to the new mpath queue. When false, |
288 | * move them. |
289 | */ |
290 | static void mesh_path_move_to_queue(struct mesh_path *gate_mpath, |
291 | struct mesh_path *from_mpath, |
292 | bool copy) |
293 | { |
294 | struct sk_buff *skb, *cp_skb = NULL; |
295 | struct sk_buff_head gateq, failq; |
296 | unsigned long flags; |
297 | int num_skbs; |
298 | |
299 | BUG_ON(gate_mpath == from_mpath); |
300 | BUG_ON(!gate_mpath->next_hop); |
301 | |
302 | __skb_queue_head_init(&gateq); |
303 | __skb_queue_head_init(&failq); |
304 | |
305 | spin_lock_irqsave(&from_mpath->frame_queue.lock, flags); |
306 | skb_queue_splice_init(&from_mpath->frame_queue, &failq); |
307 | spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags); |
308 | |
309 | num_skbs = skb_queue_len(&failq); |
310 | |
311 | while (num_skbs--) { |
312 | skb = __skb_dequeue(&failq); |
313 | if (copy) { |
314 | cp_skb = skb_copy(skb, GFP_ATOMIC); |
315 | if (cp_skb) |
316 | __skb_queue_tail(&failq, cp_skb); |
317 | } |
318 | |
319 | prepare_for_gate(skb, gate_mpath->dst, gate_mpath); |
320 | __skb_queue_tail(&gateq, skb); |
321 | } |
322 | |
323 | spin_lock_irqsave(&gate_mpath->frame_queue.lock, flags); |
324 | skb_queue_splice(&gateq, &gate_mpath->frame_queue); |
325 | mpath_dbg("Mpath queue for gate %pM has %d frames\n", |
326 | gate_mpath->dst, |
327 | skb_queue_len(&gate_mpath->frame_queue)); |
328 | spin_unlock_irqrestore(&gate_mpath->frame_queue.lock, flags); |
329 | |
330 | if (!copy) |
331 | return; |
332 | |
333 | spin_lock_irqsave(&from_mpath->frame_queue.lock, flags); |
334 | skb_queue_splice(&failq, &from_mpath->frame_queue); |
335 | spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags); |
336 | } |
337 | |
338 | |
339 | static struct mesh_path *mpath_lookup(struct mesh_table *tbl, u8 *dst, |
340 | struct ieee80211_sub_if_data *sdata) |
341 | { |
342 | struct mesh_path *mpath; |
343 | struct hlist_node *n; |
344 | struct hlist_head *bucket; |
345 | struct mpath_node *node; |
346 | |
347 | bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)]; |
348 | hlist_for_each_entry_rcu(node, n, bucket, list) { |
349 | mpath = node->mpath; |
350 | if (mpath->sdata == sdata && |
351 | compare_ether_addr(dst, mpath->dst) == 0) { |
352 | if (MPATH_EXPIRED(mpath)) { |
353 | spin_lock_bh(&mpath->state_lock); |
354 | mpath->flags &= ~MESH_PATH_ACTIVE; |
355 | spin_unlock_bh(&mpath->state_lock); |
356 | } |
357 | return mpath; |
358 | } |
359 | } |
360 | return NULL; |
361 | } |
362 | |
363 | /** |
364 | * mesh_path_lookup - look up a path in the mesh path table |
365 | * @dst: hardware address (ETH_ALEN length) of destination |
366 | * @sdata: local subif |
367 | * |
368 | * Returns: pointer to the mesh path structure, or NULL if not found |
369 | * |
370 | * Locking: must be called within a read rcu section. |
371 | */ |
372 | struct mesh_path *mesh_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata) |
373 | { |
374 | return mpath_lookup(rcu_dereference(mesh_paths), dst, sdata); |
375 | } |
376 | |
377 | struct mesh_path *mpp_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata) |
378 | { |
379 | return mpath_lookup(rcu_dereference(mpp_paths), dst, sdata); |
380 | } |
381 | |
382 | |
383 | /** |
384 | * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index |
385 | * @idx: index |
386 | * @sdata: local subif, or NULL for all entries |
387 | * |
388 | * Returns: pointer to the mesh path structure, or NULL if not found. |
389 | * |
390 | * Locking: must be called within a read rcu section. |
391 | */ |
392 | struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data *sdata) |
393 | { |
394 | struct mesh_table *tbl = rcu_dereference(mesh_paths); |
395 | struct mpath_node *node; |
396 | struct hlist_node *p; |
397 | int i; |
398 | int j = 0; |
399 | |
400 | for_each_mesh_entry(tbl, p, node, i) { |
401 | if (sdata && node->mpath->sdata != sdata) |
402 | continue; |
403 | if (j++ == idx) { |
404 | if (MPATH_EXPIRED(node->mpath)) { |
405 | spin_lock_bh(&node->mpath->state_lock); |
406 | node->mpath->flags &= ~MESH_PATH_ACTIVE; |
407 | spin_unlock_bh(&node->mpath->state_lock); |
408 | } |
409 | return node->mpath; |
410 | } |
411 | } |
412 | |
413 | return NULL; |
414 | } |
415 | |
416 | /** |
417 | * mesh_path_add_gate - add the given mpath to a mesh gate to our path table |
418 | * @mpath: gate path to add to table |
419 | */ |
420 | int mesh_path_add_gate(struct mesh_path *mpath) |
421 | { |
422 | struct mesh_table *tbl; |
423 | struct mpath_node *gate, *new_gate; |
424 | struct hlist_node *n; |
425 | int err; |
426 | |
427 | rcu_read_lock(); |
428 | tbl = rcu_dereference(mesh_paths); |
429 | |
430 | hlist_for_each_entry_rcu(gate, n, tbl->known_gates, list) |
431 | if (gate->mpath == mpath) { |
432 | err = -EEXIST; |
433 | goto err_rcu; |
434 | } |
435 | |
436 | new_gate = kzalloc(sizeof(struct mpath_node), GFP_ATOMIC); |
437 | if (!new_gate) { |
438 | err = -ENOMEM; |
439 | goto err_rcu; |
440 | } |
441 | |
442 | mpath->is_gate = true; |
443 | mpath->sdata->u.mesh.num_gates++; |
444 | new_gate->mpath = mpath; |
445 | spin_lock_bh(&tbl->gates_lock); |
446 | hlist_add_head_rcu(&new_gate->list, tbl->known_gates); |
447 | spin_unlock_bh(&tbl->gates_lock); |
448 | rcu_read_unlock(); |
449 | mpath_dbg("Mesh path (%s): Recorded new gate: %pM. %d known gates\n", |
450 | mpath->sdata->name, mpath->dst, |
451 | mpath->sdata->u.mesh.num_gates); |
452 | return 0; |
453 | err_rcu: |
454 | rcu_read_unlock(); |
455 | return err; |
456 | } |
457 | |
458 | /** |
459 | * mesh_gate_del - remove a mesh gate from the list of known gates |
460 | * @tbl: table which holds our list of known gates |
461 | * @mpath: gate mpath |
462 | * |
463 | * Returns: 0 on success |
464 | * |
465 | * Locking: must be called inside rcu_read_lock() section |
466 | */ |
467 | static int mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath) |
468 | { |
469 | struct mpath_node *gate; |
470 | struct hlist_node *p, *q; |
471 | |
472 | hlist_for_each_entry_safe(gate, p, q, tbl->known_gates, list) |
473 | if (gate->mpath == mpath) { |
474 | spin_lock_bh(&tbl->gates_lock); |
475 | hlist_del_rcu(&gate->list); |
476 | kfree_rcu(gate, rcu); |
477 | spin_unlock_bh(&tbl->gates_lock); |
478 | mpath->sdata->u.mesh.num_gates--; |
479 | mpath->is_gate = false; |
480 | mpath_dbg("Mesh path (%s): Deleted gate: %pM. " |
481 | "%d known gates\n", mpath->sdata->name, |
482 | mpath->dst, mpath->sdata->u.mesh.num_gates); |
483 | break; |
484 | } |
485 | |
486 | return 0; |
487 | } |
488 | |
489 | /** |
490 | * mesh_gate_num - number of gates known to this interface |
491 | * @sdata: subif data |
492 | */ |
493 | int mesh_gate_num(struct ieee80211_sub_if_data *sdata) |
494 | { |
495 | return sdata->u.mesh.num_gates; |
496 | } |
497 | |
498 | /** |
499 | * mesh_path_add - allocate and add a new path to the mesh path table |
500 | * @addr: destination address of the path (ETH_ALEN length) |
501 | * @sdata: local subif |
502 | * |
503 | * Returns: 0 on success |
504 | * |
505 | * State: the initial state of the new path is set to 0 |
506 | */ |
507 | int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata) |
508 | { |
509 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; |
510 | struct ieee80211_local *local = sdata->local; |
511 | struct mesh_table *tbl; |
512 | struct mesh_path *mpath, *new_mpath; |
513 | struct mpath_node *node, *new_node; |
514 | struct hlist_head *bucket; |
515 | struct hlist_node *n; |
516 | int grow = 0; |
517 | int err = 0; |
518 | u32 hash_idx; |
519 | |
520 | if (compare_ether_addr(dst, sdata->vif.addr) == 0) |
521 | /* never add ourselves as neighbours */ |
522 | return -ENOTSUPP; |
523 | |
524 | if (is_multicast_ether_addr(dst)) |
525 | return -ENOTSUPP; |
526 | |
527 | if (atomic_add_unless(&sdata->u.mesh.mpaths, 1, MESH_MAX_MPATHS) == 0) |
528 | return -ENOSPC; |
529 | |
530 | err = -ENOMEM; |
531 | new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC); |
532 | if (!new_mpath) |
533 | goto err_path_alloc; |
534 | |
535 | new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC); |
536 | if (!new_node) |
537 | goto err_node_alloc; |
538 | |
539 | read_lock_bh(&pathtbl_resize_lock); |
540 | memcpy(new_mpath->dst, dst, ETH_ALEN); |
541 | new_mpath->sdata = sdata; |
542 | new_mpath->flags = 0; |
543 | skb_queue_head_init(&new_mpath->frame_queue); |
544 | new_node->mpath = new_mpath; |
545 | new_mpath->timer.data = (unsigned long) new_mpath; |
546 | new_mpath->timer.function = mesh_path_timer; |
547 | new_mpath->exp_time = jiffies; |
548 | spin_lock_init(&new_mpath->state_lock); |
549 | init_timer(&new_mpath->timer); |
550 | |
551 | tbl = resize_dereference_mesh_paths(); |
552 | |
553 | hash_idx = mesh_table_hash(dst, sdata, tbl); |
554 | bucket = &tbl->hash_buckets[hash_idx]; |
555 | |
556 | spin_lock(&tbl->hashwlock[hash_idx]); |
557 | |
558 | err = -EEXIST; |
559 | hlist_for_each_entry(node, n, bucket, list) { |
560 | mpath = node->mpath; |
561 | if (mpath->sdata == sdata && |
562 | compare_ether_addr(dst, mpath->dst) == 0) |
563 | goto err_exists; |
564 | } |
565 | |
566 | hlist_add_head_rcu(&new_node->list, bucket); |
567 | if (atomic_inc_return(&tbl->entries) >= |
568 | tbl->mean_chain_len * (tbl->hash_mask + 1)) |
569 | grow = 1; |
570 | |
571 | mesh_paths_generation++; |
572 | |
573 | spin_unlock(&tbl->hashwlock[hash_idx]); |
574 | read_unlock_bh(&pathtbl_resize_lock); |
575 | if (grow) { |
576 | set_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags); |
577 | ieee80211_queue_work(&local->hw, &sdata->work); |
578 | } |
579 | return 0; |
580 | |
581 | err_exists: |
582 | spin_unlock(&tbl->hashwlock[hash_idx]); |
583 | read_unlock_bh(&pathtbl_resize_lock); |
584 | kfree(new_node); |
585 | err_node_alloc: |
586 | kfree(new_mpath); |
587 | err_path_alloc: |
588 | atomic_dec(&sdata->u.mesh.mpaths); |
589 | return err; |
590 | } |
591 | |
592 | static void mesh_table_free_rcu(struct rcu_head *rcu) |
593 | { |
594 | struct mesh_table *tbl = container_of(rcu, struct mesh_table, rcu_head); |
595 | |
596 | mesh_table_free(tbl, false); |
597 | } |
598 | |
599 | void mesh_mpath_table_grow(void) |
600 | { |
601 | struct mesh_table *oldtbl, *newtbl; |
602 | |
603 | write_lock_bh(&pathtbl_resize_lock); |
604 | oldtbl = resize_dereference_mesh_paths(); |
605 | newtbl = mesh_table_alloc(oldtbl->size_order + 1); |
606 | if (!newtbl) |
607 | goto out; |
608 | if (mesh_table_grow(oldtbl, newtbl) < 0) { |
609 | __mesh_table_free(newtbl); |
610 | goto out; |
611 | } |
612 | rcu_assign_pointer(mesh_paths, newtbl); |
613 | |
614 | call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu); |
615 | |
616 | out: |
617 | write_unlock_bh(&pathtbl_resize_lock); |
618 | } |
619 | |
620 | void mesh_mpp_table_grow(void) |
621 | { |
622 | struct mesh_table *oldtbl, *newtbl; |
623 | |
624 | write_lock_bh(&pathtbl_resize_lock); |
625 | oldtbl = resize_dereference_mpp_paths(); |
626 | newtbl = mesh_table_alloc(oldtbl->size_order + 1); |
627 | if (!newtbl) |
628 | goto out; |
629 | if (mesh_table_grow(oldtbl, newtbl) < 0) { |
630 | __mesh_table_free(newtbl); |
631 | goto out; |
632 | } |
633 | rcu_assign_pointer(mpp_paths, newtbl); |
634 | call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu); |
635 | |
636 | out: |
637 | write_unlock_bh(&pathtbl_resize_lock); |
638 | } |
639 | |
640 | int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata) |
641 | { |
642 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; |
643 | struct ieee80211_local *local = sdata->local; |
644 | struct mesh_table *tbl; |
645 | struct mesh_path *mpath, *new_mpath; |
646 | struct mpath_node *node, *new_node; |
647 | struct hlist_head *bucket; |
648 | struct hlist_node *n; |
649 | int grow = 0; |
650 | int err = 0; |
651 | u32 hash_idx; |
652 | |
653 | if (compare_ether_addr(dst, sdata->vif.addr) == 0) |
654 | /* never add ourselves as neighbours */ |
655 | return -ENOTSUPP; |
656 | |
657 | if (is_multicast_ether_addr(dst)) |
658 | return -ENOTSUPP; |
659 | |
660 | err = -ENOMEM; |
661 | new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC); |
662 | if (!new_mpath) |
663 | goto err_path_alloc; |
664 | |
665 | new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC); |
666 | if (!new_node) |
667 | goto err_node_alloc; |
668 | |
669 | read_lock_bh(&pathtbl_resize_lock); |
670 | memcpy(new_mpath->dst, dst, ETH_ALEN); |
671 | memcpy(new_mpath->mpp, mpp, ETH_ALEN); |
672 | new_mpath->sdata = sdata; |
673 | new_mpath->flags = 0; |
674 | skb_queue_head_init(&new_mpath->frame_queue); |
675 | new_node->mpath = new_mpath; |
676 | init_timer(&new_mpath->timer); |
677 | new_mpath->exp_time = jiffies; |
678 | spin_lock_init(&new_mpath->state_lock); |
679 | |
680 | tbl = resize_dereference_mpp_paths(); |
681 | |
682 | hash_idx = mesh_table_hash(dst, sdata, tbl); |
683 | bucket = &tbl->hash_buckets[hash_idx]; |
684 | |
685 | spin_lock(&tbl->hashwlock[hash_idx]); |
686 | |
687 | err = -EEXIST; |
688 | hlist_for_each_entry(node, n, bucket, list) { |
689 | mpath = node->mpath; |
690 | if (mpath->sdata == sdata && |
691 | compare_ether_addr(dst, mpath->dst) == 0) |
692 | goto err_exists; |
693 | } |
694 | |
695 | hlist_add_head_rcu(&new_node->list, bucket); |
696 | if (atomic_inc_return(&tbl->entries) >= |
697 | tbl->mean_chain_len * (tbl->hash_mask + 1)) |
698 | grow = 1; |
699 | |
700 | spin_unlock(&tbl->hashwlock[hash_idx]); |
701 | read_unlock_bh(&pathtbl_resize_lock); |
702 | if (grow) { |
703 | set_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags); |
704 | ieee80211_queue_work(&local->hw, &sdata->work); |
705 | } |
706 | return 0; |
707 | |
708 | err_exists: |
709 | spin_unlock(&tbl->hashwlock[hash_idx]); |
710 | read_unlock_bh(&pathtbl_resize_lock); |
711 | kfree(new_node); |
712 | err_node_alloc: |
713 | kfree(new_mpath); |
714 | err_path_alloc: |
715 | return err; |
716 | } |
717 | |
718 | |
719 | /** |
720 | * mesh_plink_broken - deactivates paths and sends perr when a link breaks |
721 | * |
722 | * @sta: broken peer link |
723 | * |
724 | * This function must be called from the rate control algorithm if enough |
725 | * delivery errors suggest that a peer link is no longer usable. |
726 | */ |
727 | void mesh_plink_broken(struct sta_info *sta) |
728 | { |
729 | struct mesh_table *tbl; |
730 | static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; |
731 | struct mesh_path *mpath; |
732 | struct mpath_node *node; |
733 | struct hlist_node *p; |
734 | struct ieee80211_sub_if_data *sdata = sta->sdata; |
735 | int i; |
736 | __le16 reason = cpu_to_le16(WLAN_REASON_MESH_PATH_DEST_UNREACHABLE); |
737 | |
738 | rcu_read_lock(); |
739 | tbl = rcu_dereference(mesh_paths); |
740 | for_each_mesh_entry(tbl, p, node, i) { |
741 | mpath = node->mpath; |
742 | if (rcu_dereference(mpath->next_hop) == sta && |
743 | mpath->flags & MESH_PATH_ACTIVE && |
744 | !(mpath->flags & MESH_PATH_FIXED)) { |
745 | spin_lock_bh(&mpath->state_lock); |
746 | mpath->flags &= ~MESH_PATH_ACTIVE; |
747 | ++mpath->sn; |
748 | spin_unlock_bh(&mpath->state_lock); |
749 | mesh_path_error_tx(sdata->u.mesh.mshcfg.element_ttl, |
750 | mpath->dst, cpu_to_le32(mpath->sn), |
751 | reason, bcast, sdata); |
752 | } |
753 | } |
754 | rcu_read_unlock(); |
755 | } |
756 | |
757 | static void mesh_path_node_reclaim(struct rcu_head *rp) |
758 | { |
759 | struct mpath_node *node = container_of(rp, struct mpath_node, rcu); |
760 | struct ieee80211_sub_if_data *sdata = node->mpath->sdata; |
761 | |
762 | del_timer_sync(&node->mpath->timer); |
763 | atomic_dec(&sdata->u.mesh.mpaths); |
764 | kfree(node->mpath); |
765 | kfree(node); |
766 | } |
767 | |
768 | /* needs to be called with the corresponding hashwlock taken */ |
769 | static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node) |
770 | { |
771 | struct mesh_path *mpath; |
772 | mpath = node->mpath; |
773 | spin_lock(&mpath->state_lock); |
774 | mpath->flags |= MESH_PATH_RESOLVING; |
775 | if (mpath->is_gate) |
776 | mesh_gate_del(tbl, mpath); |
777 | hlist_del_rcu(&node->list); |
778 | call_rcu(&node->rcu, mesh_path_node_reclaim); |
779 | spin_unlock(&mpath->state_lock); |
780 | atomic_dec(&tbl->entries); |
781 | } |
782 | |
783 | /** |
784 | * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches |
785 | * |
786 | * @sta - mesh peer to match |
787 | * |
788 | * RCU notes: this function is called when a mesh plink transitions from |
789 | * PLINK_ESTAB to any other state, since PLINK_ESTAB state is the only one that |
790 | * allows path creation. This will happen before the sta can be freed (because |
791 | * sta_info_destroy() calls this) so any reader in a rcu read block will be |
792 | * protected against the plink disappearing. |
793 | */ |
794 | void mesh_path_flush_by_nexthop(struct sta_info *sta) |
795 | { |
796 | struct mesh_table *tbl; |
797 | struct mesh_path *mpath; |
798 | struct mpath_node *node; |
799 | struct hlist_node *p; |
800 | int i; |
801 | |
802 | rcu_read_lock(); |
803 | read_lock_bh(&pathtbl_resize_lock); |
804 | tbl = resize_dereference_mesh_paths(); |
805 | for_each_mesh_entry(tbl, p, node, i) { |
806 | mpath = node->mpath; |
807 | if (rcu_dereference(mpath->next_hop) == sta) { |
808 | spin_lock(&tbl->hashwlock[i]); |
809 | __mesh_path_del(tbl, node); |
810 | spin_unlock(&tbl->hashwlock[i]); |
811 | } |
812 | } |
813 | read_unlock_bh(&pathtbl_resize_lock); |
814 | rcu_read_unlock(); |
815 | } |
816 | |
817 | static void table_flush_by_iface(struct mesh_table *tbl, |
818 | struct ieee80211_sub_if_data *sdata) |
819 | { |
820 | struct mesh_path *mpath; |
821 | struct mpath_node *node; |
822 | struct hlist_node *p; |
823 | int i; |
824 | |
825 | WARN_ON(!rcu_read_lock_held()); |
826 | for_each_mesh_entry(tbl, p, node, i) { |
827 | mpath = node->mpath; |
828 | if (mpath->sdata != sdata) |
829 | continue; |
830 | spin_lock_bh(&tbl->hashwlock[i]); |
831 | __mesh_path_del(tbl, node); |
832 | spin_unlock_bh(&tbl->hashwlock[i]); |
833 | } |
834 | } |
835 | |
836 | /** |
837 | * mesh_path_flush_by_iface - Deletes all mesh paths associated with a given iface |
838 | * |
839 | * This function deletes both mesh paths as well as mesh portal paths. |
840 | * |
841 | * @sdata - interface data to match |
842 | * |
843 | */ |
844 | void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata) |
845 | { |
846 | struct mesh_table *tbl; |
847 | |
848 | rcu_read_lock(); |
849 | read_lock_bh(&pathtbl_resize_lock); |
850 | tbl = resize_dereference_mesh_paths(); |
851 | table_flush_by_iface(tbl, sdata); |
852 | tbl = resize_dereference_mpp_paths(); |
853 | table_flush_by_iface(tbl, sdata); |
854 | read_unlock_bh(&pathtbl_resize_lock); |
855 | rcu_read_unlock(); |
856 | } |
857 | |
858 | /** |
859 | * mesh_path_del - delete a mesh path from the table |
860 | * |
861 | * @addr: dst address (ETH_ALEN length) |
862 | * @sdata: local subif |
863 | * |
864 | * Returns: 0 if successful |
865 | */ |
866 | int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata) |
867 | { |
868 | struct mesh_table *tbl; |
869 | struct mesh_path *mpath; |
870 | struct mpath_node *node; |
871 | struct hlist_head *bucket; |
872 | struct hlist_node *n; |
873 | int hash_idx; |
874 | int err = 0; |
875 | |
876 | read_lock_bh(&pathtbl_resize_lock); |
877 | tbl = resize_dereference_mesh_paths(); |
878 | hash_idx = mesh_table_hash(addr, sdata, tbl); |
879 | bucket = &tbl->hash_buckets[hash_idx]; |
880 | |
881 | spin_lock(&tbl->hashwlock[hash_idx]); |
882 | hlist_for_each_entry(node, n, bucket, list) { |
883 | mpath = node->mpath; |
884 | if (mpath->sdata == sdata && |
885 | compare_ether_addr(addr, mpath->dst) == 0) { |
886 | __mesh_path_del(tbl, node); |
887 | goto enddel; |
888 | } |
889 | } |
890 | |
891 | err = -ENXIO; |
892 | enddel: |
893 | mesh_paths_generation++; |
894 | spin_unlock(&tbl->hashwlock[hash_idx]); |
895 | read_unlock_bh(&pathtbl_resize_lock); |
896 | return err; |
897 | } |
898 | |
899 | /** |
900 | * mesh_path_tx_pending - sends pending frames in a mesh path queue |
901 | * |
902 | * @mpath: mesh path to activate |
903 | * |
904 | * Locking: the state_lock of the mpath structure must NOT be held when calling |
905 | * this function. |
906 | */ |
907 | void mesh_path_tx_pending(struct mesh_path *mpath) |
908 | { |
909 | if (mpath->flags & MESH_PATH_ACTIVE) |
910 | ieee80211_add_pending_skbs(mpath->sdata->local, |
911 | &mpath->frame_queue); |
912 | } |
913 | |
914 | /** |
915 | * mesh_path_send_to_gates - sends pending frames to all known mesh gates |
916 | * |
917 | * @mpath: mesh path whose queue will be emptied |
918 | * |
919 | * If there is only one gate, the frames are transferred from the failed mpath |
920 | * queue to that gate's queue. If there are more than one gates, the frames |
921 | * are copied from each gate to the next. After frames are copied, the |
922 | * mpath queues are emptied onto the transmission queue. |
923 | */ |
924 | int mesh_path_send_to_gates(struct mesh_path *mpath) |
925 | { |
926 | struct ieee80211_sub_if_data *sdata = mpath->sdata; |
927 | struct hlist_node *n; |
928 | struct mesh_table *tbl; |
929 | struct mesh_path *from_mpath = mpath; |
930 | struct mpath_node *gate = NULL; |
931 | bool copy = false; |
932 | struct hlist_head *known_gates; |
933 | |
934 | rcu_read_lock(); |
935 | tbl = rcu_dereference(mesh_paths); |
936 | known_gates = tbl->known_gates; |
937 | rcu_read_unlock(); |
938 | |
939 | if (!known_gates) |
940 | return -EHOSTUNREACH; |
941 | |
942 | hlist_for_each_entry_rcu(gate, n, known_gates, list) { |
943 | if (gate->mpath->sdata != sdata) |
944 | continue; |
945 | |
946 | if (gate->mpath->flags & MESH_PATH_ACTIVE) { |
947 | mpath_dbg("Forwarding to %pM\n", gate->mpath->dst); |
948 | mesh_path_move_to_queue(gate->mpath, from_mpath, copy); |
949 | from_mpath = gate->mpath; |
950 | copy = true; |
951 | } else { |
952 | mpath_dbg("Not forwarding %p\n", gate->mpath); |
953 | mpath_dbg("flags %x\n", gate->mpath->flags); |
954 | } |
955 | } |
956 | |
957 | hlist_for_each_entry_rcu(gate, n, known_gates, list) |
958 | if (gate->mpath->sdata == sdata) { |
959 | mpath_dbg("Sending to %pM\n", gate->mpath->dst); |
960 | mesh_path_tx_pending(gate->mpath); |
961 | } |
962 | |
963 | return (from_mpath == mpath) ? -EHOSTUNREACH : 0; |
964 | } |
965 | |
966 | /** |
967 | * mesh_path_discard_frame - discard a frame whose path could not be resolved |
968 | * |
969 | * @skb: frame to discard |
970 | * @sdata: network subif the frame was to be sent through |
971 | * |
972 | * Locking: the function must me called within a rcu_read_lock region |
973 | */ |
974 | void mesh_path_discard_frame(struct sk_buff *skb, |
975 | struct ieee80211_sub_if_data *sdata) |
976 | { |
977 | kfree_skb(skb); |
978 | sdata->u.mesh.mshstats.dropped_frames_no_route++; |
979 | } |
980 | |
981 | /** |
982 | * mesh_path_flush_pending - free the pending queue of a mesh path |
983 | * |
984 | * @mpath: mesh path whose queue has to be freed |
985 | * |
986 | * Locking: the function must me called within a rcu_read_lock region |
987 | */ |
988 | void mesh_path_flush_pending(struct mesh_path *mpath) |
989 | { |
990 | struct sk_buff *skb; |
991 | |
992 | while ((skb = skb_dequeue(&mpath->frame_queue)) != NULL) |
993 | mesh_path_discard_frame(skb, mpath->sdata); |
994 | } |
995 | |
996 | /** |
997 | * mesh_path_fix_nexthop - force a specific next hop for a mesh path |
998 | * |
999 | * @mpath: the mesh path to modify |
1000 | * @next_hop: the next hop to force |
1001 | * |
1002 | * Locking: this function must be called holding mpath->state_lock |
1003 | */ |
1004 | void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop) |
1005 | { |
1006 | spin_lock_bh(&mpath->state_lock); |
1007 | mesh_path_assign_nexthop(mpath, next_hop); |
1008 | mpath->sn = 0xffff; |
1009 | mpath->metric = 0; |
1010 | mpath->hop_count = 0; |
1011 | mpath->exp_time = 0; |
1012 | mpath->flags |= MESH_PATH_FIXED; |
1013 | mesh_path_activate(mpath); |
1014 | spin_unlock_bh(&mpath->state_lock); |
1015 | mesh_path_tx_pending(mpath); |
1016 | } |
1017 | |
1018 | static void mesh_path_node_free(struct hlist_node *p, bool free_leafs) |
1019 | { |
1020 | struct mesh_path *mpath; |
1021 | struct mpath_node *node = hlist_entry(p, struct mpath_node, list); |
1022 | mpath = node->mpath; |
1023 | hlist_del_rcu(p); |
1024 | if (free_leafs) { |
1025 | del_timer_sync(&mpath->timer); |
1026 | kfree(mpath); |
1027 | } |
1028 | kfree(node); |
1029 | } |
1030 | |
1031 | static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl) |
1032 | { |
1033 | struct mesh_path *mpath; |
1034 | struct mpath_node *node, *new_node; |
1035 | u32 hash_idx; |
1036 | |
1037 | new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC); |
1038 | if (new_node == NULL) |
1039 | return -ENOMEM; |
1040 | |
1041 | node = hlist_entry(p, struct mpath_node, list); |
1042 | mpath = node->mpath; |
1043 | new_node->mpath = mpath; |
1044 | hash_idx = mesh_table_hash(mpath->dst, mpath->sdata, newtbl); |
1045 | hlist_add_head(&new_node->list, |
1046 | &newtbl->hash_buckets[hash_idx]); |
1047 | return 0; |
1048 | } |
1049 | |
1050 | int mesh_pathtbl_init(void) |
1051 | { |
1052 | struct mesh_table *tbl_path, *tbl_mpp; |
1053 | int ret; |
1054 | |
1055 | tbl_path = mesh_table_alloc(INIT_PATHS_SIZE_ORDER); |
1056 | if (!tbl_path) |
1057 | return -ENOMEM; |
1058 | tbl_path->free_node = &mesh_path_node_free; |
1059 | tbl_path->copy_node = &mesh_path_node_copy; |
1060 | tbl_path->mean_chain_len = MEAN_CHAIN_LEN; |
1061 | tbl_path->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC); |
1062 | if (!tbl_path->known_gates) { |
1063 | ret = -ENOMEM; |
1064 | goto free_path; |
1065 | } |
1066 | INIT_HLIST_HEAD(tbl_path->known_gates); |
1067 | |
1068 | |
1069 | tbl_mpp = mesh_table_alloc(INIT_PATHS_SIZE_ORDER); |
1070 | if (!tbl_mpp) { |
1071 | ret = -ENOMEM; |
1072 | goto free_path; |
1073 | } |
1074 | tbl_mpp->free_node = &mesh_path_node_free; |
1075 | tbl_mpp->copy_node = &mesh_path_node_copy; |
1076 | tbl_mpp->mean_chain_len = MEAN_CHAIN_LEN; |
1077 | tbl_mpp->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC); |
1078 | if (!tbl_mpp->known_gates) { |
1079 | ret = -ENOMEM; |
1080 | goto free_mpp; |
1081 | } |
1082 | INIT_HLIST_HEAD(tbl_mpp->known_gates); |
1083 | |
1084 | /* Need no locking since this is during init */ |
1085 | RCU_INIT_POINTER(mesh_paths, tbl_path); |
1086 | RCU_INIT_POINTER(mpp_paths, tbl_mpp); |
1087 | |
1088 | return 0; |
1089 | |
1090 | free_mpp: |
1091 | mesh_table_free(tbl_mpp, true); |
1092 | free_path: |
1093 | mesh_table_free(tbl_path, true); |
1094 | return ret; |
1095 | } |
1096 | |
1097 | void mesh_path_expire(struct ieee80211_sub_if_data *sdata) |
1098 | { |
1099 | struct mesh_table *tbl; |
1100 | struct mesh_path *mpath; |
1101 | struct mpath_node *node; |
1102 | struct hlist_node *p; |
1103 | int i; |
1104 | |
1105 | rcu_read_lock(); |
1106 | tbl = rcu_dereference(mesh_paths); |
1107 | for_each_mesh_entry(tbl, p, node, i) { |
1108 | if (node->mpath->sdata != sdata) |
1109 | continue; |
1110 | mpath = node->mpath; |
1111 | if ((!(mpath->flags & MESH_PATH_RESOLVING)) && |
1112 | (!(mpath->flags & MESH_PATH_FIXED)) && |
1113 | time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE)) |
1114 | mesh_path_del(mpath->dst, mpath->sdata); |
1115 | } |
1116 | rcu_read_unlock(); |
1117 | } |
1118 | |
1119 | void mesh_pathtbl_unregister(void) |
1120 | { |
1121 | /* no need for locking during exit path */ |
1122 | mesh_table_free(rcu_dereference_protected(mesh_paths, 1), true); |
1123 | mesh_table_free(rcu_dereference_protected(mpp_paths, 1), true); |
1124 | } |
1125 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9