| 1 | From 7bd90773f89001ea4960ed47676b550137f3facb Mon Sep 17 00:00:00 2001 |
| 2 | From: Eric Dumazet <edumazet@google.com> |
| 3 | Date: Wed, 16 May 2012 04:39:09 +0000 |
| 4 | Subject: [PATCH] fq_codel: should use qdisc backlog as threshold |
| 5 | |
| 6 | commit 865ec5523dadbedefbc5710a68969f686a28d928 upstream. |
| 7 | |
| 8 | codel_should_drop() logic allows a packet being not dropped if queue |
| 9 | size is under max packet size. |
| 10 | |
| 11 | In fq_codel, we have two possible backlogs : The qdisc global one, and |
| 12 | the flow local one. |
| 13 | |
| 14 | The meaningful one for codel_should_drop() should be the global backlog, |
| 15 | not the per flow one, so that thin flows can have a non zero drop/mark |
| 16 | probability. |
| 17 | |
| 18 | Signed-off-by: Eric Dumazet <edumazet@google.com> |
| 19 | Cc: Dave Taht <dave.taht@bufferbloat.net> |
| 20 | Cc: Kathleen Nichols <nichols@pollere.com> |
| 21 | Cc: Van Jacobson <van@pollere.net> |
| 22 | Signed-off-by: David S. Miller <davem@davemloft.net> |
| 23 | --- |
| 24 | include/net/codel.h | 15 +++++++-------- |
| 25 | net/sched/sch_codel.c | 4 ++-- |
| 26 | net/sched/sch_fq_codel.c | 5 +++-- |
| 27 | 3 files changed, 12 insertions(+), 12 deletions(-) |
| 28 | |
| 29 | --- a/include/net/codel.h |
| 30 | +++ b/include/net/codel.h |
| 31 | @@ -205,7 +205,7 @@ static codel_time_t codel_control_law(co |
| 32 | |
| 33 | |
| 34 | static bool codel_should_drop(const struct sk_buff *skb, |
| 35 | - unsigned int *backlog, |
| 36 | + struct Qdisc *sch, |
| 37 | struct codel_vars *vars, |
| 38 | struct codel_params *params, |
| 39 | struct codel_stats *stats, |
| 40 | @@ -219,13 +219,13 @@ static bool codel_should_drop(const stru |
| 41 | } |
| 42 | |
| 43 | vars->ldelay = now - codel_get_enqueue_time(skb); |
| 44 | - *backlog -= qdisc_pkt_len(skb); |
| 45 | + sch->qstats.backlog -= qdisc_pkt_len(skb); |
| 46 | |
| 47 | if (unlikely(qdisc_pkt_len(skb) > stats->maxpacket)) |
| 48 | stats->maxpacket = qdisc_pkt_len(skb); |
| 49 | |
| 50 | if (codel_time_before(vars->ldelay, params->target) || |
| 51 | - *backlog <= stats->maxpacket) { |
| 52 | + sch->qstats.backlog <= stats->maxpacket) { |
| 53 | /* went below - stay below for at least interval */ |
| 54 | vars->first_above_time = 0; |
| 55 | return false; |
| 56 | @@ -249,8 +249,7 @@ static struct sk_buff *codel_dequeue(str |
| 57 | struct codel_params *params, |
| 58 | struct codel_vars *vars, |
| 59 | struct codel_stats *stats, |
| 60 | - codel_skb_dequeue_t dequeue_func, |
| 61 | - u32 *backlog) |
| 62 | + codel_skb_dequeue_t dequeue_func) |
| 63 | { |
| 64 | struct sk_buff *skb = dequeue_func(vars, sch); |
| 65 | codel_time_t now; |
| 66 | @@ -261,7 +260,7 @@ static struct sk_buff *codel_dequeue(str |
| 67 | return skb; |
| 68 | } |
| 69 | now = codel_get_time(); |
| 70 | - drop = codel_should_drop(skb, backlog, vars, params, stats, now); |
| 71 | + drop = codel_should_drop(skb, sch, vars, params, stats, now); |
| 72 | if (vars->dropping) { |
| 73 | if (!drop) { |
| 74 | /* sojourn time below target - leave dropping state */ |
| 75 | @@ -292,7 +291,7 @@ static struct sk_buff *codel_dequeue(str |
| 76 | qdisc_drop(skb, sch); |
| 77 | stats->drop_count++; |
| 78 | skb = dequeue_func(vars, sch); |
| 79 | - if (!codel_should_drop(skb, backlog, |
| 80 | + if (!codel_should_drop(skb, sch, |
| 81 | vars, params, stats, now)) { |
| 82 | /* leave dropping state */ |
| 83 | vars->dropping = false; |
| 84 | @@ -313,7 +312,7 @@ static struct sk_buff *codel_dequeue(str |
| 85 | stats->drop_count++; |
| 86 | |
| 87 | skb = dequeue_func(vars, sch); |
| 88 | - drop = codel_should_drop(skb, backlog, vars, params, |
| 89 | + drop = codel_should_drop(skb, sch, vars, params, |
| 90 | stats, now); |
| 91 | } |
| 92 | vars->dropping = true; |
| 93 | --- a/net/sched/sch_codel.c |
| 94 | +++ b/net/sched/sch_codel.c |
| 95 | @@ -77,8 +77,8 @@ static struct sk_buff *codel_qdisc_deque |
| 96 | struct codel_sched_data *q = qdisc_priv(sch); |
| 97 | struct sk_buff *skb; |
| 98 | |
| 99 | - skb = codel_dequeue(sch, &q->params, &q->vars, &q->stats, |
| 100 | - dequeue, &sch->qstats.backlog); |
| 101 | + skb = codel_dequeue(sch, &q->params, &q->vars, &q->stats, dequeue); |
| 102 | + |
| 103 | /* We cant call qdisc_tree_decrease_qlen() if our qlen is 0, |
| 104 | * or HTB crashes. Defer it for next round. |
| 105 | */ |
| 106 | --- a/net/sched/sch_fq_codel.c |
| 107 | +++ b/net/sched/sch_fq_codel.c |
| 108 | @@ -217,13 +217,14 @@ static int fq_codel_enqueue(struct sk_bu |
| 109 | */ |
| 110 | static struct sk_buff *dequeue(struct codel_vars *vars, struct Qdisc *sch) |
| 111 | { |
| 112 | + struct fq_codel_sched_data *q = qdisc_priv(sch); |
| 113 | struct fq_codel_flow *flow; |
| 114 | struct sk_buff *skb = NULL; |
| 115 | |
| 116 | flow = container_of(vars, struct fq_codel_flow, cvars); |
| 117 | if (flow->head) { |
| 118 | skb = dequeue_head(flow); |
| 119 | - sch->qstats.backlog -= qdisc_pkt_len(skb); |
| 120 | + q->backlogs[flow - q->flows] -= qdisc_pkt_len(skb); |
| 121 | sch->q.qlen--; |
| 122 | } |
| 123 | return skb; |
| 124 | @@ -256,7 +257,7 @@ begin: |
| 125 | prev_ecn_mark = q->cstats.ecn_mark; |
| 126 | |
| 127 | skb = codel_dequeue(sch, &q->cparams, &flow->cvars, &q->cstats, |
| 128 | - dequeue, &q->backlogs[flow - q->flows]); |
| 129 | + dequeue); |
| 130 | |
| 131 | flow->dropped += q->cstats.drop_count - prev_drop_count; |
| 132 | flow->dropped += q->cstats.ecn_mark - prev_ecn_mark; |
| 133 | |