diff options
Diffstat (limited to 'net/sched')
-rw-r--r-- | net/sched/act_ct.c | 4 | ||||
-rw-r--r-- | net/sched/act_vlan.c | 1 | ||||
-rw-r--r-- | net/sched/sch_cake.c | 64 | ||||
-rw-r--r-- | net/sched/sch_fq.c | 4 | ||||
-rw-r--r-- | net/sched/sch_netem.c | 9 | ||||
-rw-r--r-- | net/sched/sch_taprio.c | 4 |
6 files changed, 37 insertions, 49 deletions
diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c index 3ba8e7e739b5..2197eb625658 100644 --- a/net/sched/act_ct.c +++ b/net/sched/act_ct.c @@ -44,8 +44,6 @@ static DEFINE_MUTEX(zones_mutex); struct zones_ht_key { struct net *net; u16 zone; - /* Note : pad[] must be the last field. */ - u8 pad[]; }; struct tcf_ct_flow_table { @@ -62,7 +60,7 @@ struct tcf_ct_flow_table { static const struct rhashtable_params zones_params = { .head_offset = offsetof(struct tcf_ct_flow_table, node), .key_offset = offsetof(struct tcf_ct_flow_table, key), - .key_len = offsetof(struct zones_ht_key, pad), + .key_len = offsetofend(struct zones_ht_key, zone), .automatic_shrinking = true, }; diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c index 22f4b1e8ade9..383bf18b6862 100644 --- a/net/sched/act_vlan.c +++ b/net/sched/act_vlan.c @@ -96,6 +96,7 @@ out: if (skb_at_tc_ingress(skb)) skb_pull_rcsum(skb, skb->mac_len); + skb_reset_mac_len(skb); return action; drop: diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c index 9602dafe32e6..f2f9b75008bb 100644 --- a/net/sched/sch_cake.c +++ b/net/sched/sch_cake.c @@ -361,8 +361,24 @@ static const u8 besteffort[] = { static const u8 normal_order[] = {0, 1, 2, 3, 4, 5, 6, 7}; static const u8 bulk_order[] = {1, 0, 2, 3}; +/* There is a big difference in timing between the accurate values placed in the + * cache and the approximations given by a single Newton step for small count + * values, particularly when stepping from count 1 to 2 or vice versa. Hence, + * these values are calculated using eight Newton steps, using the + * implementation below. Above 16, a single Newton step gives sufficient + * accuracy in either direction, given the precision stored. + * + * The magnitude of the error when stepping up to count 2 is such as to give the + * value that *should* have been produced at count 4. + */ + #define REC_INV_SQRT_CACHE (16) -static u32 cobalt_rec_inv_sqrt_cache[REC_INV_SQRT_CACHE] = {0}; +static const u32 inv_sqrt_cache[REC_INV_SQRT_CACHE] = { + ~0, ~0, 3037000500, 2479700525, + 2147483647, 1920767767, 1753413056, 1623345051, + 1518500250, 1431655765, 1358187914, 1294981364, + 1239850263, 1191209601, 1147878294, 1108955788 +}; /* http://en.wikipedia.org/wiki/Methods_of_computing_square_roots * new_invsqrt = (invsqrt / 2) * (3 - count * invsqrt^2) @@ -388,47 +404,14 @@ static void cobalt_newton_step(struct cobalt_vars *vars) static void cobalt_invsqrt(struct cobalt_vars *vars) { if (vars->count < REC_INV_SQRT_CACHE) - vars->rec_inv_sqrt = cobalt_rec_inv_sqrt_cache[vars->count]; + vars->rec_inv_sqrt = inv_sqrt_cache[vars->count]; else cobalt_newton_step(vars); } -/* There is a big difference in timing between the accurate values placed in - * the cache and the approximations given by a single Newton step for small - * count values, particularly when stepping from count 1 to 2 or vice versa. - * Above 16, a single Newton step gives sufficient accuracy in either - * direction, given the precision stored. - * - * The magnitude of the error when stepping up to count 2 is such as to give - * the value that *should* have been produced at count 4. - */ - -static void cobalt_cache_init(void) -{ - struct cobalt_vars v; - - memset(&v, 0, sizeof(v)); - v.rec_inv_sqrt = ~0U; - cobalt_rec_inv_sqrt_cache[0] = v.rec_inv_sqrt; - - for (v.count = 1; v.count < REC_INV_SQRT_CACHE; v.count++) { - cobalt_newton_step(&v); - cobalt_newton_step(&v); - cobalt_newton_step(&v); - cobalt_newton_step(&v); - - cobalt_rec_inv_sqrt_cache[v.count] = v.rec_inv_sqrt; - } -} - static void cobalt_vars_init(struct cobalt_vars *vars) { memset(vars, 0, sizeof(*vars)); - - if (!cobalt_rec_inv_sqrt_cache[0]) { - cobalt_cache_init(); - cobalt_rec_inv_sqrt_cache[0] = ~0; - } } /* CoDel control_law is t + interval/sqrt(count) @@ -786,12 +769,15 @@ skip_hash: * queue, accept the collision, update the host tags. */ q->way_collisions++; - if (q->flows[outer_hash + k].set == CAKE_SET_BULK) { - q->hosts[q->flows[reduced_hash].srchost].srchost_bulk_flow_count--; - q->hosts[q->flows[reduced_hash].dsthost].dsthost_bulk_flow_count--; - } allocate_src = cake_dsrc(flow_mode); allocate_dst = cake_ddst(flow_mode); + + if (q->flows[outer_hash + k].set == CAKE_SET_BULK) { + if (allocate_src) + q->hosts[q->flows[reduced_hash].srchost].srchost_bulk_flow_count--; + if (allocate_dst) + q->hosts[q->flows[reduced_hash].dsthost].dsthost_bulk_flow_count--; + } found: /* reserve queue for future packets in same flow */ reduced_hash = outer_hash + k; diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c index 238974725679..19a49af5a9e5 100644 --- a/net/sched/sch_fq.c +++ b/net/sched/sch_fq.c @@ -663,7 +663,9 @@ begin: pband = &q->band_flows[q->band_nr]; pband->credit = min(pband->credit + pband->quantum, pband->quantum); - goto begin; + if (pband->credit > 0) + goto begin; + retry = 0; } if (q->time_next_delayed_flow != ~0ULL) qdisc_watchdog_schedule_range_ns(&q->watchdog, diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index 0f8d581438c3..39382ee1e331 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -742,11 +742,10 @@ deliver: err = qdisc_enqueue(skb, q->qdisc, &to_free); kfree_skb_list(to_free); - if (err != NET_XMIT_SUCCESS && - net_xmit_drop_count(err)) { - qdisc_qstats_drop(sch); - qdisc_tree_reduce_backlog(sch, 1, - pkt_len); + if (err != NET_XMIT_SUCCESS) { + if (net_xmit_drop_count(err)) + qdisc_qstats_drop(sch); + qdisc_tree_reduce_backlog(sch, 1, pkt_len); } goto tfifo_dequeue; } diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c index cc2df9f8c14a..8498d0606b24 100644 --- a/net/sched/sch_taprio.c +++ b/net/sched/sch_taprio.c @@ -1952,7 +1952,9 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt, goto unlock; } - rcu_assign_pointer(q->admin_sched, new_admin); + /* Not going to race against advance_sched(), but still */ + admin = rcu_replace_pointer(q->admin_sched, new_admin, + lockdep_rtnl_is_held()); if (admin) call_rcu(&admin->rcu, taprio_free_sched_cb); } else { |