Root/
1 | /* |
2 | * net/sched/sch_netem.c Network emulator |
3 | * |
4 | * This program is free software; you can redistribute it and/or |
5 | * modify it under the terms of the GNU General Public License |
6 | * as published by the Free Software Foundation; either version |
7 | * 2 of the License. |
8 | * |
9 | * Many of the algorithms and ideas for this came from |
10 | * NIST Net which is not copyrighted. |
11 | * |
12 | * Authors: Stephen Hemminger <shemminger@osdl.org> |
13 | * Catalin(ux aka Dino) BOIE <catab at umbrella dot ro> |
14 | */ |
15 | |
16 | #include <linux/mm.h> |
17 | #include <linux/module.h> |
18 | #include <linux/slab.h> |
19 | #include <linux/types.h> |
20 | #include <linux/kernel.h> |
21 | #include <linux/errno.h> |
22 | #include <linux/skbuff.h> |
23 | #include <linux/vmalloc.h> |
24 | #include <linux/rtnetlink.h> |
25 | #include <linux/reciprocal_div.h> |
26 | #include <linux/rbtree.h> |
27 | |
28 | #include <net/netlink.h> |
29 | #include <net/pkt_sched.h> |
30 | #include <net/inet_ecn.h> |
31 | |
32 | #define VERSION "1.3" |
33 | |
34 | /* Network Emulation Queuing algorithm. |
35 | ==================================== |
36 | |
37 | Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based |
38 | Network Emulation Tool |
39 | [2] Luigi Rizzo, DummyNet for FreeBSD |
40 | |
41 | ---------------------------------------------------------------- |
42 | |
43 | This started out as a simple way to delay outgoing packets to |
44 | test TCP but has grown to include most of the functionality |
45 | of a full blown network emulator like NISTnet. It can delay |
46 | packets and add random jitter (and correlation). The random |
47 | distribution can be loaded from a table as well to provide |
48 | normal, Pareto, or experimental curves. Packet loss, |
49 | duplication, and reordering can also be emulated. |
50 | |
51 | This qdisc does not do classification that can be handled in |
52 | layering other disciplines. It does not need to do bandwidth |
53 | control either since that can be handled by using token |
54 | bucket or other rate control. |
55 | |
56 | Correlated Loss Generator models |
57 | |
58 | Added generation of correlated loss according to the |
59 | "Gilbert-Elliot" model, a 4-state markov model. |
60 | |
61 | References: |
62 | [1] NetemCLG Home http://netgroup.uniroma2.it/NetemCLG |
63 | [2] S. Salsano, F. Ludovici, A. Ordine, "Definition of a general |
64 | and intuitive loss model for packet networks and its implementation |
65 | in the Netem module in the Linux kernel", available in [1] |
66 | |
67 | Authors: Stefano Salsano <stefano.salsano at uniroma2.it |
68 | Fabio Ludovici <fabio.ludovici at yahoo.it> |
69 | */ |
70 | |
71 | struct netem_sched_data { |
72 | /* internal t(ime)fifo qdisc uses t_root and sch->limit */ |
73 | struct rb_root t_root; |
74 | |
75 | /* optional qdisc for classful handling (NULL at netem init) */ |
76 | struct Qdisc *qdisc; |
77 | |
78 | struct qdisc_watchdog watchdog; |
79 | |
80 | psched_tdiff_t latency; |
81 | psched_tdiff_t jitter; |
82 | |
83 | u32 loss; |
84 | u32 ecn; |
85 | u32 limit; |
86 | u32 counter; |
87 | u32 gap; |
88 | u32 duplicate; |
89 | u32 reorder; |
90 | u32 corrupt; |
91 | u64 rate; |
92 | s32 packet_overhead; |
93 | u32 cell_size; |
94 | struct reciprocal_value cell_size_reciprocal; |
95 | s32 cell_overhead; |
96 | |
97 | struct crndstate { |
98 | u32 last; |
99 | u32 rho; |
100 | } delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor; |
101 | |
102 | struct disttable { |
103 | u32 size; |
104 | s16 table[0]; |
105 | } *delay_dist; |
106 | |
107 | enum { |
108 | CLG_RANDOM, |
109 | CLG_4_STATES, |
110 | CLG_GILB_ELL, |
111 | } loss_model; |
112 | |
113 | enum { |
114 | TX_IN_GAP_PERIOD = 1, |
115 | TX_IN_BURST_PERIOD, |
116 | LOST_IN_GAP_PERIOD, |
117 | LOST_IN_BURST_PERIOD, |
118 | } _4_state_model; |
119 | |
120 | enum { |
121 | GOOD_STATE = 1, |
122 | BAD_STATE, |
123 | } GE_state_model; |
124 | |
125 | /* Correlated Loss Generation models */ |
126 | struct clgstate { |
127 | /* state of the Markov chain */ |
128 | u8 state; |
129 | |
130 | /* 4-states and Gilbert-Elliot models */ |
131 | u32 a1; /* p13 for 4-states or p for GE */ |
132 | u32 a2; /* p31 for 4-states or r for GE */ |
133 | u32 a3; /* p32 for 4-states or h for GE */ |
134 | u32 a4; /* p14 for 4-states or 1-k for GE */ |
135 | u32 a5; /* p23 used only in 4-states */ |
136 | } clg; |
137 | |
138 | }; |
139 | |
140 | /* Time stamp put into socket buffer control block |
141 | * Only valid when skbs are in our internal t(ime)fifo queue. |
142 | */ |
143 | struct netem_skb_cb { |
144 | psched_time_t time_to_send; |
145 | ktime_t tstamp_save; |
146 | }; |
147 | |
148 | /* Because space in skb->cb[] is tight, netem overloads skb->next/prev/tstamp |
149 | * to hold a rb_node structure. |
150 | * |
151 | * If struct sk_buff layout is changed, the following checks will complain. |
152 | */ |
153 | static struct rb_node *netem_rb_node(struct sk_buff *skb) |
154 | { |
155 | BUILD_BUG_ON(offsetof(struct sk_buff, next) != 0); |
156 | BUILD_BUG_ON(offsetof(struct sk_buff, prev) != |
157 | offsetof(struct sk_buff, next) + sizeof(skb->next)); |
158 | BUILD_BUG_ON(offsetof(struct sk_buff, tstamp) != |
159 | offsetof(struct sk_buff, prev) + sizeof(skb->prev)); |
160 | BUILD_BUG_ON(sizeof(struct rb_node) > sizeof(skb->next) + |
161 | sizeof(skb->prev) + |
162 | sizeof(skb->tstamp)); |
163 | return (struct rb_node *)&skb->next; |
164 | } |
165 | |
166 | static struct sk_buff *netem_rb_to_skb(struct rb_node *rb) |
167 | { |
168 | return (struct sk_buff *)rb; |
169 | } |
170 | |
171 | static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb) |
172 | { |
173 | /* we assume we can use skb next/prev/tstamp as storage for rb_node */ |
174 | qdisc_cb_private_validate(skb, sizeof(struct netem_skb_cb)); |
175 | return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data; |
176 | } |
177 | |
178 | /* init_crandom - initialize correlated random number generator |
179 | * Use entropy source for initial seed. |
180 | */ |
181 | static void init_crandom(struct crndstate *state, unsigned long rho) |
182 | { |
183 | state->rho = rho; |
184 | state->last = prandom_u32(); |
185 | } |
186 | |
187 | /* get_crandom - correlated random number generator |
188 | * Next number depends on last value. |
189 | * rho is scaled to avoid floating point. |
190 | */ |
191 | static u32 get_crandom(struct crndstate *state) |
192 | { |
193 | u64 value, rho; |
194 | unsigned long answer; |
195 | |
196 | if (state->rho == 0) /* no correlation */ |
197 | return prandom_u32(); |
198 | |
199 | value = prandom_u32(); |
200 | rho = (u64)state->rho + 1; |
201 | answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32; |
202 | state->last = answer; |
203 | return answer; |
204 | } |
205 | |
206 | /* loss_4state - 4-state model loss generator |
207 | * Generates losses according to the 4-state Markov chain adopted in |
208 | * the GI (General and Intuitive) loss model. |
209 | */ |
210 | static bool loss_4state(struct netem_sched_data *q) |
211 | { |
212 | struct clgstate *clg = &q->clg; |
213 | u32 rnd = prandom_u32(); |
214 | |
215 | /* |
216 | * Makes a comparison between rnd and the transition |
217 | * probabilities outgoing from the current state, then decides the |
218 | * next state and if the next packet has to be transmitted or lost. |
219 | * The four states correspond to: |
220 | * TX_IN_GAP_PERIOD => successfully transmitted packets within a gap period |
221 | * LOST_IN_BURST_PERIOD => isolated losses within a gap period |
222 | * LOST_IN_GAP_PERIOD => lost packets within a burst period |
223 | * TX_IN_GAP_PERIOD => successfully transmitted packets within a burst period |
224 | */ |
225 | switch (clg->state) { |
226 | case TX_IN_GAP_PERIOD: |
227 | if (rnd < clg->a4) { |
228 | clg->state = LOST_IN_BURST_PERIOD; |
229 | return true; |
230 | } else if (clg->a4 < rnd && rnd < clg->a1 + clg->a4) { |
231 | clg->state = LOST_IN_GAP_PERIOD; |
232 | return true; |
233 | } else if (clg->a1 + clg->a4 < rnd) { |
234 | clg->state = TX_IN_GAP_PERIOD; |
235 | } |
236 | |
237 | break; |
238 | case TX_IN_BURST_PERIOD: |
239 | if (rnd < clg->a5) { |
240 | clg->state = LOST_IN_GAP_PERIOD; |
241 | return true; |
242 | } else { |
243 | clg->state = TX_IN_BURST_PERIOD; |
244 | } |
245 | |
246 | break; |
247 | case LOST_IN_GAP_PERIOD: |
248 | if (rnd < clg->a3) |
249 | clg->state = TX_IN_BURST_PERIOD; |
250 | else if (clg->a3 < rnd && rnd < clg->a2 + clg->a3) { |
251 | clg->state = TX_IN_GAP_PERIOD; |
252 | } else if (clg->a2 + clg->a3 < rnd) { |
253 | clg->state = LOST_IN_GAP_PERIOD; |
254 | return true; |
255 | } |
256 | break; |
257 | case LOST_IN_BURST_PERIOD: |
258 | clg->state = TX_IN_GAP_PERIOD; |
259 | break; |
260 | } |
261 | |
262 | return false; |
263 | } |
264 | |
265 | /* loss_gilb_ell - Gilbert-Elliot model loss generator |
266 | * Generates losses according to the Gilbert-Elliot loss model or |
267 | * its special cases (Gilbert or Simple Gilbert) |
268 | * |
269 | * Makes a comparison between random number and the transition |
270 | * probabilities outgoing from the current state, then decides the |
271 | * next state. A second random number is extracted and the comparison |
272 | * with the loss probability of the current state decides if the next |
273 | * packet will be transmitted or lost. |
274 | */ |
275 | static bool loss_gilb_ell(struct netem_sched_data *q) |
276 | { |
277 | struct clgstate *clg = &q->clg; |
278 | |
279 | switch (clg->state) { |
280 | case GOOD_STATE: |
281 | if (prandom_u32() < clg->a1) |
282 | clg->state = BAD_STATE; |
283 | if (prandom_u32() < clg->a4) |
284 | return true; |
285 | break; |
286 | case BAD_STATE: |
287 | if (prandom_u32() < clg->a2) |
288 | clg->state = GOOD_STATE; |
289 | if (prandom_u32() > clg->a3) |
290 | return true; |
291 | } |
292 | |
293 | return false; |
294 | } |
295 | |
296 | static bool loss_event(struct netem_sched_data *q) |
297 | { |
298 | switch (q->loss_model) { |
299 | case CLG_RANDOM: |
300 | /* Random packet drop 0 => none, ~0 => all */ |
301 | return q->loss && q->loss >= get_crandom(&q->loss_cor); |
302 | |
303 | case CLG_4_STATES: |
304 | /* 4state loss model algorithm (used also for GI model) |
305 | * Extracts a value from the markov 4 state loss generator, |
306 | * if it is 1 drops a packet and if needed writes the event in |
307 | * the kernel logs |
308 | */ |
309 | return loss_4state(q); |
310 | |
311 | case CLG_GILB_ELL: |
312 | /* Gilbert-Elliot loss model algorithm |
313 | * Extracts a value from the Gilbert-Elliot loss generator, |
314 | * if it is 1 drops a packet and if needed writes the event in |
315 | * the kernel logs |
316 | */ |
317 | return loss_gilb_ell(q); |
318 | } |
319 | |
320 | return false; /* not reached */ |
321 | } |
322 | |
323 | |
324 | /* tabledist - return a pseudo-randomly distributed value with mean mu and |
325 | * std deviation sigma. Uses table lookup to approximate the desired |
326 | * distribution, and a uniformly-distributed pseudo-random source. |
327 | */ |
328 | static psched_tdiff_t tabledist(psched_tdiff_t mu, psched_tdiff_t sigma, |
329 | struct crndstate *state, |
330 | const struct disttable *dist) |
331 | { |
332 | psched_tdiff_t x; |
333 | long t; |
334 | u32 rnd; |
335 | |
336 | if (sigma == 0) |
337 | return mu; |
338 | |
339 | rnd = get_crandom(state); |
340 | |
341 | /* default uniform distribution */ |
342 | if (dist == NULL) |
343 | return (rnd % (2*sigma)) - sigma + mu; |
344 | |
345 | t = dist->table[rnd % dist->size]; |
346 | x = (sigma % NETEM_DIST_SCALE) * t; |
347 | if (x >= 0) |
348 | x += NETEM_DIST_SCALE/2; |
349 | else |
350 | x -= NETEM_DIST_SCALE/2; |
351 | |
352 | return x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu; |
353 | } |
354 | |
355 | static psched_time_t packet_len_2_sched_time(unsigned int len, struct netem_sched_data *q) |
356 | { |
357 | u64 ticks; |
358 | |
359 | len += q->packet_overhead; |
360 | |
361 | if (q->cell_size) { |
362 | u32 cells = reciprocal_divide(len, q->cell_size_reciprocal); |
363 | |
364 | if (len > cells * q->cell_size) /* extra cell needed for remainder */ |
365 | cells++; |
366 | len = cells * (q->cell_size + q->cell_overhead); |
367 | } |
368 | |
369 | ticks = (u64)len * NSEC_PER_SEC; |
370 | |
371 | do_div(ticks, q->rate); |
372 | return PSCHED_NS2TICKS(ticks); |
373 | } |
374 | |
375 | static void tfifo_reset(struct Qdisc *sch) |
376 | { |
377 | struct netem_sched_data *q = qdisc_priv(sch); |
378 | struct rb_node *p; |
379 | |
380 | while ((p = rb_first(&q->t_root))) { |
381 | struct sk_buff *skb = netem_rb_to_skb(p); |
382 | |
383 | rb_erase(p, &q->t_root); |
384 | skb->next = NULL; |
385 | skb->prev = NULL; |
386 | kfree_skb(skb); |
387 | } |
388 | } |
389 | |
390 | static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch) |
391 | { |
392 | struct netem_sched_data *q = qdisc_priv(sch); |
393 | psched_time_t tnext = netem_skb_cb(nskb)->time_to_send; |
394 | struct rb_node **p = &q->t_root.rb_node, *parent = NULL; |
395 | |
396 | while (*p) { |
397 | struct sk_buff *skb; |
398 | |
399 | parent = *p; |
400 | skb = netem_rb_to_skb(parent); |
401 | if (tnext >= netem_skb_cb(skb)->time_to_send) |
402 | p = &parent->rb_right; |
403 | else |
404 | p = &parent->rb_left; |
405 | } |
406 | rb_link_node(netem_rb_node(nskb), parent, p); |
407 | rb_insert_color(netem_rb_node(nskb), &q->t_root); |
408 | sch->q.qlen++; |
409 | } |
410 | |
411 | /* |
412 | * Insert one skb into qdisc. |
413 | * Note: parent depends on return value to account for queue length. |
414 | * NET_XMIT_DROP: queue length didn't change. |
415 | * NET_XMIT_SUCCESS: one skb was queued. |
416 | */ |
417 | static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) |
418 | { |
419 | struct netem_sched_data *q = qdisc_priv(sch); |
420 | /* We don't fill cb now as skb_unshare() may invalidate it */ |
421 | struct netem_skb_cb *cb; |
422 | struct sk_buff *skb2; |
423 | int count = 1; |
424 | |
425 | /* Random duplication */ |
426 | if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor)) |
427 | ++count; |
428 | |
429 | /* Drop packet? */ |
430 | if (loss_event(q)) { |
431 | if (q->ecn && INET_ECN_set_ce(skb)) |
432 | sch->qstats.drops++; /* mark packet */ |
433 | else |
434 | --count; |
435 | } |
436 | if (count == 0) { |
437 | sch->qstats.drops++; |
438 | kfree_skb(skb); |
439 | return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; |
440 | } |
441 | |
442 | /* If a delay is expected, orphan the skb. (orphaning usually takes |
443 | * place at TX completion time, so _before_ the link transit delay) |
444 | */ |
445 | if (q->latency || q->jitter) |
446 | skb_orphan_partial(skb); |
447 | |
448 | /* |
449 | * If we need to duplicate packet, then re-insert at top of the |
450 | * qdisc tree, since parent queuer expects that only one |
451 | * skb will be queued. |
452 | */ |
453 | if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) { |
454 | struct Qdisc *rootq = qdisc_root(sch); |
455 | u32 dupsave = q->duplicate; /* prevent duplicating a dup... */ |
456 | q->duplicate = 0; |
457 | |
458 | qdisc_enqueue_root(skb2, rootq); |
459 | q->duplicate = dupsave; |
460 | } |
461 | |
462 | /* |
463 | * Randomized packet corruption. |
464 | * Make copy if needed since we are modifying |
465 | * If packet is going to be hardware checksummed, then |
466 | * do it now in software before we mangle it. |
467 | */ |
468 | if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) { |
469 | if (!(skb = skb_unshare(skb, GFP_ATOMIC)) || |
470 | (skb->ip_summed == CHECKSUM_PARTIAL && |
471 | skb_checksum_help(skb))) |
472 | return qdisc_drop(skb, sch); |
473 | |
474 | skb->data[prandom_u32() % skb_headlen(skb)] ^= |
475 | 1<<(prandom_u32() % 8); |
476 | } |
477 | |
478 | if (unlikely(skb_queue_len(&sch->q) >= sch->limit)) |
479 | return qdisc_reshape_fail(skb, sch); |
480 | |
481 | sch->qstats.backlog += qdisc_pkt_len(skb); |
482 | |
483 | cb = netem_skb_cb(skb); |
484 | if (q->gap == 0 || /* not doing reordering */ |
485 | q->counter < q->gap - 1 || /* inside last reordering gap */ |
486 | q->reorder < get_crandom(&q->reorder_cor)) { |
487 | psched_time_t now; |
488 | psched_tdiff_t delay; |
489 | |
490 | delay = tabledist(q->latency, q->jitter, |
491 | &q->delay_cor, q->delay_dist); |
492 | |
493 | now = psched_get_time(); |
494 | |
495 | if (q->rate) { |
496 | struct sk_buff *last; |
497 | |
498 | if (!skb_queue_empty(&sch->q)) |
499 | last = skb_peek_tail(&sch->q); |
500 | else |
501 | last = netem_rb_to_skb(rb_last(&q->t_root)); |
502 | if (last) { |
503 | /* |
504 | * Last packet in queue is reference point (now), |
505 | * calculate this time bonus and subtract |
506 | * from delay. |
507 | */ |
508 | delay -= netem_skb_cb(last)->time_to_send - now; |
509 | delay = max_t(psched_tdiff_t, 0, delay); |
510 | now = netem_skb_cb(last)->time_to_send; |
511 | } |
512 | |
513 | delay += packet_len_2_sched_time(qdisc_pkt_len(skb), q); |
514 | } |
515 | |
516 | cb->time_to_send = now + delay; |
517 | cb->tstamp_save = skb->tstamp; |
518 | ++q->counter; |
519 | tfifo_enqueue(skb, sch); |
520 | } else { |
521 | /* |
522 | * Do re-ordering by putting one out of N packets at the front |
523 | * of the queue. |
524 | */ |
525 | cb->time_to_send = psched_get_time(); |
526 | q->counter = 0; |
527 | |
528 | __skb_queue_head(&sch->q, skb); |
529 | sch->qstats.requeues++; |
530 | } |
531 | |
532 | return NET_XMIT_SUCCESS; |
533 | } |
534 | |
535 | static unsigned int netem_drop(struct Qdisc *sch) |
536 | { |
537 | struct netem_sched_data *q = qdisc_priv(sch); |
538 | unsigned int len; |
539 | |
540 | len = qdisc_queue_drop(sch); |
541 | |
542 | if (!len) { |
543 | struct rb_node *p = rb_first(&q->t_root); |
544 | |
545 | if (p) { |
546 | struct sk_buff *skb = netem_rb_to_skb(p); |
547 | |
548 | rb_erase(p, &q->t_root); |
549 | sch->q.qlen--; |
550 | skb->next = NULL; |
551 | skb->prev = NULL; |
552 | len = qdisc_pkt_len(skb); |
553 | sch->qstats.backlog -= len; |
554 | kfree_skb(skb); |
555 | } |
556 | } |
557 | if (!len && q->qdisc && q->qdisc->ops->drop) |
558 | len = q->qdisc->ops->drop(q->qdisc); |
559 | if (len) |
560 | sch->qstats.drops++; |
561 | |
562 | return len; |
563 | } |
564 | |
565 | static struct sk_buff *netem_dequeue(struct Qdisc *sch) |
566 | { |
567 | struct netem_sched_data *q = qdisc_priv(sch); |
568 | struct sk_buff *skb; |
569 | struct rb_node *p; |
570 | |
571 | if (qdisc_is_throttled(sch)) |
572 | return NULL; |
573 | |
574 | tfifo_dequeue: |
575 | skb = __skb_dequeue(&sch->q); |
576 | if (skb) { |
577 | deliver: |
578 | sch->qstats.backlog -= qdisc_pkt_len(skb); |
579 | qdisc_unthrottled(sch); |
580 | qdisc_bstats_update(sch, skb); |
581 | return skb; |
582 | } |
583 | p = rb_first(&q->t_root); |
584 | if (p) { |
585 | psched_time_t time_to_send; |
586 | |
587 | skb = netem_rb_to_skb(p); |
588 | |
589 | /* if more time remaining? */ |
590 | time_to_send = netem_skb_cb(skb)->time_to_send; |
591 | if (time_to_send <= psched_get_time()) { |
592 | rb_erase(p, &q->t_root); |
593 | |
594 | sch->q.qlen--; |
595 | skb->next = NULL; |
596 | skb->prev = NULL; |
597 | skb->tstamp = netem_skb_cb(skb)->tstamp_save; |
598 | |
599 | #ifdef CONFIG_NET_CLS_ACT |
600 | /* |
601 | * If it's at ingress let's pretend the delay is |
602 | * from the network (tstamp will be updated). |
603 | */ |
604 | if (G_TC_FROM(skb->tc_verd) & AT_INGRESS) |
605 | skb->tstamp.tv64 = 0; |
606 | #endif |
607 | |
608 | if (q->qdisc) { |
609 | int err = qdisc_enqueue(skb, q->qdisc); |
610 | |
611 | if (unlikely(err != NET_XMIT_SUCCESS)) { |
612 | if (net_xmit_drop_count(err)) { |
613 | sch->qstats.drops++; |
614 | qdisc_tree_decrease_qlen(sch, 1); |
615 | } |
616 | } |
617 | goto tfifo_dequeue; |
618 | } |
619 | goto deliver; |
620 | } |
621 | |
622 | if (q->qdisc) { |
623 | skb = q->qdisc->ops->dequeue(q->qdisc); |
624 | if (skb) |
625 | goto deliver; |
626 | } |
627 | qdisc_watchdog_schedule(&q->watchdog, time_to_send); |
628 | } |
629 | |
630 | if (q->qdisc) { |
631 | skb = q->qdisc->ops->dequeue(q->qdisc); |
632 | if (skb) |
633 | goto deliver; |
634 | } |
635 | return NULL; |
636 | } |
637 | |
638 | static void netem_reset(struct Qdisc *sch) |
639 | { |
640 | struct netem_sched_data *q = qdisc_priv(sch); |
641 | |
642 | qdisc_reset_queue(sch); |
643 | tfifo_reset(sch); |
644 | if (q->qdisc) |
645 | qdisc_reset(q->qdisc); |
646 | qdisc_watchdog_cancel(&q->watchdog); |
647 | } |
648 | |
649 | static void dist_free(struct disttable *d) |
650 | { |
651 | if (d) { |
652 | if (is_vmalloc_addr(d)) |
653 | vfree(d); |
654 | else |
655 | kfree(d); |
656 | } |
657 | } |
658 | |
659 | /* |
660 | * Distribution data is a variable size payload containing |
661 | * signed 16 bit values. |
662 | */ |
663 | static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr) |
664 | { |
665 | struct netem_sched_data *q = qdisc_priv(sch); |
666 | size_t n = nla_len(attr)/sizeof(__s16); |
667 | const __s16 *data = nla_data(attr); |
668 | spinlock_t *root_lock; |
669 | struct disttable *d; |
670 | int i; |
671 | size_t s; |
672 | |
673 | if (n > NETEM_DIST_MAX) |
674 | return -EINVAL; |
675 | |
676 | s = sizeof(struct disttable) + n * sizeof(s16); |
677 | d = kmalloc(s, GFP_KERNEL | __GFP_NOWARN); |
678 | if (!d) |
679 | d = vmalloc(s); |
680 | if (!d) |
681 | return -ENOMEM; |
682 | |
683 | d->size = n; |
684 | for (i = 0; i < n; i++) |
685 | d->table[i] = data[i]; |
686 | |
687 | root_lock = qdisc_root_sleeping_lock(sch); |
688 | |
689 | spin_lock_bh(root_lock); |
690 | swap(q->delay_dist, d); |
691 | spin_unlock_bh(root_lock); |
692 | |
693 | dist_free(d); |
694 | return 0; |
695 | } |
696 | |
697 | static void get_correlation(struct netem_sched_data *q, const struct nlattr *attr) |
698 | { |
699 | const struct tc_netem_corr *c = nla_data(attr); |
700 | |
701 | init_crandom(&q->delay_cor, c->delay_corr); |
702 | init_crandom(&q->loss_cor, c->loss_corr); |
703 | init_crandom(&q->dup_cor, c->dup_corr); |
704 | } |
705 | |
706 | static void get_reorder(struct netem_sched_data *q, const struct nlattr *attr) |
707 | { |
708 | const struct tc_netem_reorder *r = nla_data(attr); |
709 | |
710 | q->reorder = r->probability; |
711 | init_crandom(&q->reorder_cor, r->correlation); |
712 | } |
713 | |
714 | static void get_corrupt(struct netem_sched_data *q, const struct nlattr *attr) |
715 | { |
716 | const struct tc_netem_corrupt *r = nla_data(attr); |
717 | |
718 | q->corrupt = r->probability; |
719 | init_crandom(&q->corrupt_cor, r->correlation); |
720 | } |
721 | |
722 | static void get_rate(struct netem_sched_data *q, const struct nlattr *attr) |
723 | { |
724 | const struct tc_netem_rate *r = nla_data(attr); |
725 | |
726 | q->rate = r->rate; |
727 | q->packet_overhead = r->packet_overhead; |
728 | q->cell_size = r->cell_size; |
729 | q->cell_overhead = r->cell_overhead; |
730 | if (q->cell_size) |
731 | q->cell_size_reciprocal = reciprocal_value(q->cell_size); |
732 | else |
733 | q->cell_size_reciprocal = (struct reciprocal_value) { 0 }; |
734 | } |
735 | |
736 | static int get_loss_clg(struct netem_sched_data *q, const struct nlattr *attr) |
737 | { |
738 | const struct nlattr *la; |
739 | int rem; |
740 | |
741 | nla_for_each_nested(la, attr, rem) { |
742 | u16 type = nla_type(la); |
743 | |
744 | switch (type) { |
745 | case NETEM_LOSS_GI: { |
746 | const struct tc_netem_gimodel *gi = nla_data(la); |
747 | |
748 | if (nla_len(la) < sizeof(struct tc_netem_gimodel)) { |
749 | pr_info("netem: incorrect gi model size\n"); |
750 | return -EINVAL; |
751 | } |
752 | |
753 | q->loss_model = CLG_4_STATES; |
754 | |
755 | q->clg.state = TX_IN_GAP_PERIOD; |
756 | q->clg.a1 = gi->p13; |
757 | q->clg.a2 = gi->p31; |
758 | q->clg.a3 = gi->p32; |
759 | q->clg.a4 = gi->p14; |
760 | q->clg.a5 = gi->p23; |
761 | break; |
762 | } |
763 | |
764 | case NETEM_LOSS_GE: { |
765 | const struct tc_netem_gemodel *ge = nla_data(la); |
766 | |
767 | if (nla_len(la) < sizeof(struct tc_netem_gemodel)) { |
768 | pr_info("netem: incorrect ge model size\n"); |
769 | return -EINVAL; |
770 | } |
771 | |
772 | q->loss_model = CLG_GILB_ELL; |
773 | q->clg.state = GOOD_STATE; |
774 | q->clg.a1 = ge->p; |
775 | q->clg.a2 = ge->r; |
776 | q->clg.a3 = ge->h; |
777 | q->clg.a4 = ge->k1; |
778 | break; |
779 | } |
780 | |
781 | default: |
782 | pr_info("netem: unknown loss type %u\n", type); |
783 | return -EINVAL; |
784 | } |
785 | } |
786 | |
787 | return 0; |
788 | } |
789 | |
790 | static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = { |
791 | [TCA_NETEM_CORR] = { .len = sizeof(struct tc_netem_corr) }, |
792 | [TCA_NETEM_REORDER] = { .len = sizeof(struct tc_netem_reorder) }, |
793 | [TCA_NETEM_CORRUPT] = { .len = sizeof(struct tc_netem_corrupt) }, |
794 | [TCA_NETEM_RATE] = { .len = sizeof(struct tc_netem_rate) }, |
795 | [TCA_NETEM_LOSS] = { .type = NLA_NESTED }, |
796 | [TCA_NETEM_ECN] = { .type = NLA_U32 }, |
797 | [TCA_NETEM_RATE64] = { .type = NLA_U64 }, |
798 | }; |
799 | |
800 | static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla, |
801 | const struct nla_policy *policy, int len) |
802 | { |
803 | int nested_len = nla_len(nla) - NLA_ALIGN(len); |
804 | |
805 | if (nested_len < 0) { |
806 | pr_info("netem: invalid attributes len %d\n", nested_len); |
807 | return -EINVAL; |
808 | } |
809 | |
810 | if (nested_len >= nla_attr_size(0)) |
811 | return nla_parse(tb, maxtype, nla_data(nla) + NLA_ALIGN(len), |
812 | nested_len, policy); |
813 | |
814 | memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1)); |
815 | return 0; |
816 | } |
817 | |
818 | /* Parse netlink message to set options */ |
819 | static int netem_change(struct Qdisc *sch, struct nlattr *opt) |
820 | { |
821 | struct netem_sched_data *q = qdisc_priv(sch); |
822 | struct nlattr *tb[TCA_NETEM_MAX + 1]; |
823 | struct tc_netem_qopt *qopt; |
824 | struct clgstate old_clg; |
825 | int old_loss_model = CLG_RANDOM; |
826 | int ret; |
827 | |
828 | if (opt == NULL) |
829 | return -EINVAL; |
830 | |
831 | qopt = nla_data(opt); |
832 | ret = parse_attr(tb, TCA_NETEM_MAX, opt, netem_policy, sizeof(*qopt)); |
833 | if (ret < 0) |
834 | return ret; |
835 | |
836 | /* backup q->clg and q->loss_model */ |
837 | old_clg = q->clg; |
838 | old_loss_model = q->loss_model; |
839 | |
840 | if (tb[TCA_NETEM_LOSS]) { |
841 | ret = get_loss_clg(q, tb[TCA_NETEM_LOSS]); |
842 | if (ret) { |
843 | q->loss_model = old_loss_model; |
844 | return ret; |
845 | } |
846 | } else { |
847 | q->loss_model = CLG_RANDOM; |
848 | } |
849 | |
850 | if (tb[TCA_NETEM_DELAY_DIST]) { |
851 | ret = get_dist_table(sch, tb[TCA_NETEM_DELAY_DIST]); |
852 | if (ret) { |
853 | /* recover clg and loss_model, in case of |
854 | * q->clg and q->loss_model were modified |
855 | * in get_loss_clg() |
856 | */ |
857 | q->clg = old_clg; |
858 | q->loss_model = old_loss_model; |
859 | return ret; |
860 | } |
861 | } |
862 | |
863 | sch->limit = qopt->limit; |
864 | |
865 | q->latency = qopt->latency; |
866 | q->jitter = qopt->jitter; |
867 | q->limit = qopt->limit; |
868 | q->gap = qopt->gap; |
869 | q->counter = 0; |
870 | q->loss = qopt->loss; |
871 | q->duplicate = qopt->duplicate; |
872 | |
873 | /* for compatibility with earlier versions. |
874 | * if gap is set, need to assume 100% probability |
875 | */ |
876 | if (q->gap) |
877 | q->reorder = ~0; |
878 | |
879 | if (tb[TCA_NETEM_CORR]) |
880 | get_correlation(q, tb[TCA_NETEM_CORR]); |
881 | |
882 | if (tb[TCA_NETEM_REORDER]) |
883 | get_reorder(q, tb[TCA_NETEM_REORDER]); |
884 | |
885 | if (tb[TCA_NETEM_CORRUPT]) |
886 | get_corrupt(q, tb[TCA_NETEM_CORRUPT]); |
887 | |
888 | if (tb[TCA_NETEM_RATE]) |
889 | get_rate(q, tb[TCA_NETEM_RATE]); |
890 | |
891 | if (tb[TCA_NETEM_RATE64]) |
892 | q->rate = max_t(u64, q->rate, |
893 | nla_get_u64(tb[TCA_NETEM_RATE64])); |
894 | |
895 | if (tb[TCA_NETEM_ECN]) |
896 | q->ecn = nla_get_u32(tb[TCA_NETEM_ECN]); |
897 | |
898 | return ret; |
899 | } |
900 | |
901 | static int netem_init(struct Qdisc *sch, struct nlattr *opt) |
902 | { |
903 | struct netem_sched_data *q = qdisc_priv(sch); |
904 | int ret; |
905 | |
906 | if (!opt) |
907 | return -EINVAL; |
908 | |
909 | qdisc_watchdog_init(&q->watchdog, sch); |
910 | |
911 | q->loss_model = CLG_RANDOM; |
912 | ret = netem_change(sch, opt); |
913 | if (ret) |
914 | pr_info("netem: change failed\n"); |
915 | return ret; |
916 | } |
917 | |
918 | static void netem_destroy(struct Qdisc *sch) |
919 | { |
920 | struct netem_sched_data *q = qdisc_priv(sch); |
921 | |
922 | qdisc_watchdog_cancel(&q->watchdog); |
923 | if (q->qdisc) |
924 | qdisc_destroy(q->qdisc); |
925 | dist_free(q->delay_dist); |
926 | } |
927 | |
928 | static int dump_loss_model(const struct netem_sched_data *q, |
929 | struct sk_buff *skb) |
930 | { |
931 | struct nlattr *nest; |
932 | |
933 | nest = nla_nest_start(skb, TCA_NETEM_LOSS); |
934 | if (nest == NULL) |
935 | goto nla_put_failure; |
936 | |
937 | switch (q->loss_model) { |
938 | case CLG_RANDOM: |
939 | /* legacy loss model */ |
940 | nla_nest_cancel(skb, nest); |
941 | return 0; /* no data */ |
942 | |
943 | case CLG_4_STATES: { |
944 | struct tc_netem_gimodel gi = { |
945 | .p13 = q->clg.a1, |
946 | .p31 = q->clg.a2, |
947 | .p32 = q->clg.a3, |
948 | .p14 = q->clg.a4, |
949 | .p23 = q->clg.a5, |
950 | }; |
951 | |
952 | if (nla_put(skb, NETEM_LOSS_GI, sizeof(gi), &gi)) |
953 | goto nla_put_failure; |
954 | break; |
955 | } |
956 | case CLG_GILB_ELL: { |
957 | struct tc_netem_gemodel ge = { |
958 | .p = q->clg.a1, |
959 | .r = q->clg.a2, |
960 | .h = q->clg.a3, |
961 | .k1 = q->clg.a4, |
962 | }; |
963 | |
964 | if (nla_put(skb, NETEM_LOSS_GE, sizeof(ge), &ge)) |
965 | goto nla_put_failure; |
966 | break; |
967 | } |
968 | } |
969 | |
970 | nla_nest_end(skb, nest); |
971 | return 0; |
972 | |
973 | nla_put_failure: |
974 | nla_nest_cancel(skb, nest); |
975 | return -1; |
976 | } |
977 | |
978 | static int netem_dump(struct Qdisc *sch, struct sk_buff *skb) |
979 | { |
980 | const struct netem_sched_data *q = qdisc_priv(sch); |
981 | struct nlattr *nla = (struct nlattr *) skb_tail_pointer(skb); |
982 | struct tc_netem_qopt qopt; |
983 | struct tc_netem_corr cor; |
984 | struct tc_netem_reorder reorder; |
985 | struct tc_netem_corrupt corrupt; |
986 | struct tc_netem_rate rate; |
987 | |
988 | qopt.latency = q->latency; |
989 | qopt.jitter = q->jitter; |
990 | qopt.limit = q->limit; |
991 | qopt.loss = q->loss; |
992 | qopt.gap = q->gap; |
993 | qopt.duplicate = q->duplicate; |
994 | if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt)) |
995 | goto nla_put_failure; |
996 | |
997 | cor.delay_corr = q->delay_cor.rho; |
998 | cor.loss_corr = q->loss_cor.rho; |
999 | cor.dup_corr = q->dup_cor.rho; |
1000 | if (nla_put(skb, TCA_NETEM_CORR, sizeof(cor), &cor)) |
1001 | goto nla_put_failure; |
1002 | |
1003 | reorder.probability = q->reorder; |
1004 | reorder.correlation = q->reorder_cor.rho; |
1005 | if (nla_put(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder)) |
1006 | goto nla_put_failure; |
1007 | |
1008 | corrupt.probability = q->corrupt; |
1009 | corrupt.correlation = q->corrupt_cor.rho; |
1010 | if (nla_put(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt)) |
1011 | goto nla_put_failure; |
1012 | |
1013 | if (q->rate >= (1ULL << 32)) { |
1014 | if (nla_put_u64(skb, TCA_NETEM_RATE64, q->rate)) |
1015 | goto nla_put_failure; |
1016 | rate.rate = ~0U; |
1017 | } else { |
1018 | rate.rate = q->rate; |
1019 | } |
1020 | rate.packet_overhead = q->packet_overhead; |
1021 | rate.cell_size = q->cell_size; |
1022 | rate.cell_overhead = q->cell_overhead; |
1023 | if (nla_put(skb, TCA_NETEM_RATE, sizeof(rate), &rate)) |
1024 | goto nla_put_failure; |
1025 | |
1026 | if (q->ecn && nla_put_u32(skb, TCA_NETEM_ECN, q->ecn)) |
1027 | goto nla_put_failure; |
1028 | |
1029 | if (dump_loss_model(q, skb) != 0) |
1030 | goto nla_put_failure; |
1031 | |
1032 | return nla_nest_end(skb, nla); |
1033 | |
1034 | nla_put_failure: |
1035 | nlmsg_trim(skb, nla); |
1036 | return -1; |
1037 | } |
1038 | |
1039 | static int netem_dump_class(struct Qdisc *sch, unsigned long cl, |
1040 | struct sk_buff *skb, struct tcmsg *tcm) |
1041 | { |
1042 | struct netem_sched_data *q = qdisc_priv(sch); |
1043 | |
1044 | if (cl != 1 || !q->qdisc) /* only one class */ |
1045 | return -ENOENT; |
1046 | |
1047 | tcm->tcm_handle |= TC_H_MIN(1); |
1048 | tcm->tcm_info = q->qdisc->handle; |
1049 | |
1050 | return 0; |
1051 | } |
1052 | |
1053 | static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, |
1054 | struct Qdisc **old) |
1055 | { |
1056 | struct netem_sched_data *q = qdisc_priv(sch); |
1057 | |
1058 | sch_tree_lock(sch); |
1059 | *old = q->qdisc; |
1060 | q->qdisc = new; |
1061 | if (*old) { |
1062 | qdisc_tree_decrease_qlen(*old, (*old)->q.qlen); |
1063 | qdisc_reset(*old); |
1064 | } |
1065 | sch_tree_unlock(sch); |
1066 | |
1067 | return 0; |
1068 | } |
1069 | |
1070 | static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg) |
1071 | { |
1072 | struct netem_sched_data *q = qdisc_priv(sch); |
1073 | return q->qdisc; |
1074 | } |
1075 | |
1076 | static unsigned long netem_get(struct Qdisc *sch, u32 classid) |
1077 | { |
1078 | return 1; |
1079 | } |
1080 | |
1081 | static void netem_put(struct Qdisc *sch, unsigned long arg) |
1082 | { |
1083 | } |
1084 | |
1085 | static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker) |
1086 | { |
1087 | if (!walker->stop) { |
1088 | if (walker->count >= walker->skip) |
1089 | if (walker->fn(sch, 1, walker) < 0) { |
1090 | walker->stop = 1; |
1091 | return; |
1092 | } |
1093 | walker->count++; |
1094 | } |
1095 | } |
1096 | |
1097 | static const struct Qdisc_class_ops netem_class_ops = { |
1098 | .graft = netem_graft, |
1099 | .leaf = netem_leaf, |
1100 | .get = netem_get, |
1101 | .put = netem_put, |
1102 | .walk = netem_walk, |
1103 | .dump = netem_dump_class, |
1104 | }; |
1105 | |
1106 | static struct Qdisc_ops netem_qdisc_ops __read_mostly = { |
1107 | .id = "netem", |
1108 | .cl_ops = &netem_class_ops, |
1109 | .priv_size = sizeof(struct netem_sched_data), |
1110 | .enqueue = netem_enqueue, |
1111 | .dequeue = netem_dequeue, |
1112 | .peek = qdisc_peek_dequeued, |
1113 | .drop = netem_drop, |
1114 | .init = netem_init, |
1115 | .reset = netem_reset, |
1116 | .destroy = netem_destroy, |
1117 | .change = netem_change, |
1118 | .dump = netem_dump, |
1119 | .owner = THIS_MODULE, |
1120 | }; |
1121 | |
1122 | |
1123 | static int __init netem_module_init(void) |
1124 | { |
1125 | pr_info("netem: version " VERSION "\n"); |
1126 | return register_qdisc(&netem_qdisc_ops); |
1127 | } |
1128 | static void __exit netem_module_exit(void) |
1129 | { |
1130 | unregister_qdisc(&netem_qdisc_ops); |
1131 | } |
1132 | module_init(netem_module_init) |
1133 | module_exit(netem_module_exit) |
1134 | MODULE_LICENSE("GPL"); |
1135 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9