diff options
Diffstat (limited to 'net/sched/sch_gred.c')
| -rw-r--r-- | net/sched/sch_gred.c | 65 | 
1 files changed, 39 insertions, 26 deletions
| diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c index 621dc6afde8f..1073c76d05c4 100644 --- a/net/sched/sch_gred.c +++ b/net/sched/sch_gred.c @@ -56,6 +56,7 @@ struct gred_sched {  	u32 		DPs;  	u32 		def;  	struct red_vars wred_set; +	struct tc_gred_qopt_offload *opt;  };  static inline int gred_wred_mode(struct gred_sched *table) @@ -311,48 +312,50 @@ static void gred_offload(struct Qdisc *sch, enum tc_gred_command command)  {  	struct gred_sched *table = qdisc_priv(sch);  	struct net_device *dev = qdisc_dev(sch); -	struct tc_gred_qopt_offload opt = { -		.command	= command, -		.handle		= sch->handle, -		.parent		= sch->parent, -	}; +	struct tc_gred_qopt_offload *opt = table->opt;  	if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)  		return; +	memset(opt, 0, sizeof(*opt)); +	opt->command = command; +	opt->handle = sch->handle; +	opt->parent = sch->parent; +  	if (command == TC_GRED_REPLACE) {  		unsigned int i; -		opt.set.grio_on = gred_rio_mode(table); -		opt.set.wred_on = gred_wred_mode(table); -		opt.set.dp_cnt = table->DPs; -		opt.set.dp_def = table->def; +		opt->set.grio_on = gred_rio_mode(table); +		opt->set.wred_on = gred_wred_mode(table); +		opt->set.dp_cnt = table->DPs; +		opt->set.dp_def = table->def;  		for (i = 0; i < table->DPs; i++) {  			struct gred_sched_data *q = table->tab[i];  			if (!q)  				continue; -			opt.set.tab[i].present = true; -			opt.set.tab[i].limit = q->limit; -			opt.set.tab[i].prio = q->prio; -			opt.set.tab[i].min = q->parms.qth_min >> q->parms.Wlog; -			opt.set.tab[i].max = q->parms.qth_max >> q->parms.Wlog; -			opt.set.tab[i].is_ecn = gred_use_ecn(q); -			opt.set.tab[i].is_harddrop = gred_use_harddrop(q); -			opt.set.tab[i].probability = q->parms.max_P; -			opt.set.tab[i].backlog = &q->backlog; +			opt->set.tab[i].present = true; +			opt->set.tab[i].limit = q->limit; +			opt->set.tab[i].prio = q->prio; +			opt->set.tab[i].min = q->parms.qth_min >> q->parms.Wlog; +			opt->set.tab[i].max = q->parms.qth_max >> q->parms.Wlog; +			opt->set.tab[i].is_ecn = gred_use_ecn(q); +			opt->set.tab[i].is_harddrop = gred_use_harddrop(q); +			opt->set.tab[i].probability = q->parms.max_P; +			opt->set.tab[i].backlog = &q->backlog;  		} -		opt.set.qstats = &sch->qstats; +		opt->set.qstats = &sch->qstats;  	} -	dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_GRED, &opt); +	dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_GRED, opt);  }  static int gred_offload_dump_stats(struct Qdisc *sch)  {  	struct gred_sched *table = qdisc_priv(sch);  	struct tc_gred_qopt_offload *hw_stats; +	u64 bytes = 0, packets = 0;  	unsigned int i;  	int ret; @@ -364,9 +367,11 @@ static int gred_offload_dump_stats(struct Qdisc *sch)  	hw_stats->handle = sch->handle;  	hw_stats->parent = sch->parent; -	for (i = 0; i < MAX_DPs; i++) +	for (i = 0; i < MAX_DPs; i++) { +		gnet_stats_basic_sync_init(&hw_stats->stats.bstats[i]);  		if (table->tab[i])  			hw_stats->stats.xstats[i] = &table->tab[i]->stats; +	}  	ret = qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_GRED, hw_stats);  	/* Even if driver returns failure adjust the stats - in case offload @@ -375,19 +380,19 @@ static int gred_offload_dump_stats(struct Qdisc *sch)  	for (i = 0; i < MAX_DPs; i++) {  		if (!table->tab[i])  			continue; -		table->tab[i]->packetsin += hw_stats->stats.bstats[i].packets; -		table->tab[i]->bytesin += hw_stats->stats.bstats[i].bytes; +		table->tab[i]->packetsin += u64_stats_read(&hw_stats->stats.bstats[i].packets); +		table->tab[i]->bytesin += u64_stats_read(&hw_stats->stats.bstats[i].bytes);  		table->tab[i]->backlog += hw_stats->stats.qstats[i].backlog; -		_bstats_update(&sch->bstats, -			       hw_stats->stats.bstats[i].bytes, -			       hw_stats->stats.bstats[i].packets); +		bytes += u64_stats_read(&hw_stats->stats.bstats[i].bytes); +		packets += u64_stats_read(&hw_stats->stats.bstats[i].packets);  		sch->qstats.qlen += hw_stats->stats.qstats[i].qlen;  		sch->qstats.backlog += hw_stats->stats.qstats[i].backlog;  		sch->qstats.drops += hw_stats->stats.qstats[i].drops;  		sch->qstats.requeues += hw_stats->stats.qstats[i].requeues;  		sch->qstats.overlimits += hw_stats->stats.qstats[i].overlimits;  	} +	_bstats_update(&sch->bstats, bytes, packets);  	kfree(hw_stats);  	return ret; @@ -728,6 +733,7 @@ err_unlock_free:  static int gred_init(struct Qdisc *sch, struct nlattr *opt,  		     struct netlink_ext_ack *extack)  { +	struct gred_sched *table = qdisc_priv(sch);  	struct nlattr *tb[TCA_GRED_MAX + 1];  	int err; @@ -751,6 +757,12 @@ static int gred_init(struct Qdisc *sch, struct nlattr *opt,  		sch->limit = qdisc_dev(sch)->tx_queue_len  		             * psched_mtu(qdisc_dev(sch)); +	if (qdisc_dev(sch)->netdev_ops->ndo_setup_tc) { +		table->opt = kzalloc(sizeof(*table->opt), GFP_KERNEL); +		if (!table->opt) +			return -ENOMEM; +	} +  	return gred_change_table_def(sch, tb[TCA_GRED_DPS], extack);  } @@ -907,6 +919,7 @@ static void gred_destroy(struct Qdisc *sch)  			gred_destroy_vq(table->tab[i]);  	}  	gred_offload(sch, TC_GRED_DESTROY); +	kfree(table->opt);  }  static struct Qdisc_ops gred_qdisc_ops __read_mostly = { |