diff options
| author | Linus Torvalds <[email protected]> | 2018-02-08 14:39:29 -0800 |
|---|---|---|
| committer | Linus Torvalds <[email protected]> | 2018-02-08 14:39:29 -0800 |
| commit | 9d21874da8ec0e0043c85cde8dda173e74ffc24d (patch) | |
| tree | 8c02dce2b04ff5d70cafbd82a8bff383f2a49604 /net/sched/act_api.c | |
| parent | 4ed8244ef8847c8ad7414e1a12ba45fef7998721 (diff) | |
| parent | ac665d9423474e64e64b34b0e2cea43601b50d7d (diff) | |
Merge branch 'idr-2018-02-06' of git://git.infradead.org/users/willy/linux-dax
Pull idr updates from Matthew Wilcox:
- test-suite improvements
- replace the extended API by improving the normal API
- performance improvement for IDRs which are 1-based rather than
0-based
- add documentation
* 'idr-2018-02-06' of git://git.infradead.org/users/willy/linux-dax:
idr: Add documentation
idr: Make 1-based IDRs more efficient
idr: Warn if old iterators see large IDs
idr: Rename idr_for_each_entry_ext
idr: Remove idr_alloc_ext
cls_u32: Convert to idr_alloc_u32
cls_u32: Reinstate cyclic allocation
cls_flower: Convert to idr_alloc_u32
cls_bpf: Convert to use idr_alloc_u32
cls_basic: Convert to use idr_alloc_u32
cls_api: Convert to idr_alloc_u32
net sched actions: Convert to use idr_alloc_u32
idr: Add idr_alloc_u32 helper
idr: Delete idr_find_ext function
idr: Delete idr_replace_ext function
idr: Delete idr_remove_ext function
IDR test suite: Check handling negative end correctly
idr test suite: Fix ida_test_random()
radix tree test suite: Remove ARRAY_SIZE
Diffstat (limited to 'net/sched/act_api.c')
| -rw-r--r-- | net/sched/act_api.c | 72 |
1 files changed, 31 insertions, 41 deletions
diff --git a/net/sched/act_api.c b/net/sched/act_api.c index 52622a3d2517..eba6682727dd 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c @@ -78,7 +78,7 @@ static void free_tcf(struct tc_action *p) static void tcf_idr_remove(struct tcf_idrinfo *idrinfo, struct tc_action *p) { spin_lock_bh(&idrinfo->lock); - idr_remove_ext(&idrinfo->action_idr, p->tcfa_index); + idr_remove(&idrinfo->action_idr, p->tcfa_index); spin_unlock_bh(&idrinfo->lock); gen_kill_estimator(&p->tcfa_rate_est); free_tcf(p); @@ -124,7 +124,7 @@ static int tcf_dump_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb, s_i = cb->args[0]; - idr_for_each_entry_ext(idr, p, id) { + idr_for_each_entry_ul(idr, p, id) { index++; if (index < s_i) continue; @@ -181,7 +181,7 @@ static int tcf_del_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb, if (nla_put_string(skb, TCA_KIND, ops->kind)) goto nla_put_failure; - idr_for_each_entry_ext(idr, p, id) { + idr_for_each_entry_ul(idr, p, id) { ret = __tcf_idr_release(p, false, true); if (ret == ACT_P_DELETED) { module_put(ops->owner); @@ -222,7 +222,7 @@ static struct tc_action *tcf_idr_lookup(u32 index, struct tcf_idrinfo *idrinfo) struct tc_action *p = NULL; spin_lock_bh(&idrinfo->lock); - p = idr_find_ext(&idrinfo->action_idr, index); + p = idr_find(&idrinfo->action_idr, index); spin_unlock_bh(&idrinfo->lock); return p; @@ -274,7 +274,6 @@ int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est, struct tcf_idrinfo *idrinfo = tn->idrinfo; struct idr *idr = &idrinfo->action_idr; int err = -ENOMEM; - unsigned long idr_index; if (unlikely(!p)) return -ENOMEM; @@ -284,45 +283,28 @@ int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est, if (cpustats) { p->cpu_bstats = netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu); - if (!p->cpu_bstats) { -err1: - kfree(p); - return err; - } - p->cpu_qstats = alloc_percpu(struct gnet_stats_queue); - if (!p->cpu_qstats) { -err2: - free_percpu(p->cpu_bstats); + if (!p->cpu_bstats) goto err1; - } + p->cpu_qstats = alloc_percpu(struct gnet_stats_queue); + if (!p->cpu_qstats) + goto err2; } spin_lock_init(&p->tcfa_lock); + idr_preload(GFP_KERNEL); + spin_lock_bh(&idrinfo->lock); /* user doesn't specify an index */ if (!index) { - idr_preload(GFP_KERNEL); - spin_lock_bh(&idrinfo->lock); - err = idr_alloc_ext(idr, NULL, &idr_index, 1, 0, - GFP_ATOMIC); - spin_unlock_bh(&idrinfo->lock); - idr_preload_end(); - if (err) { -err3: - free_percpu(p->cpu_qstats); - goto err2; - } - p->tcfa_index = idr_index; + index = 1; + err = idr_alloc_u32(idr, NULL, &index, UINT_MAX, GFP_ATOMIC); } else { - idr_preload(GFP_KERNEL); - spin_lock_bh(&idrinfo->lock); - err = idr_alloc_ext(idr, NULL, NULL, index, index + 1, - GFP_ATOMIC); - spin_unlock_bh(&idrinfo->lock); - idr_preload_end(); - if (err) - goto err3; - p->tcfa_index = index; + err = idr_alloc_u32(idr, NULL, &index, index, GFP_ATOMIC); } + spin_unlock_bh(&idrinfo->lock); + idr_preload_end(); + if (err) + goto err3; + p->tcfa_index = index; p->tcfa_tm.install = jiffies; p->tcfa_tm.lastuse = jiffies; p->tcfa_tm.firstuse = 0; @@ -330,9 +312,8 @@ err3: err = gen_new_estimator(&p->tcfa_bstats, p->cpu_bstats, &p->tcfa_rate_est, &p->tcfa_lock, NULL, est); - if (err) { - goto err3; - } + if (err) + goto err4; } p->idrinfo = idrinfo; @@ -340,6 +321,15 @@ err3: INIT_LIST_HEAD(&p->list); *a = p; return 0; +err4: + idr_remove(idr, index); +err3: + free_percpu(p->cpu_qstats); +err2: + free_percpu(p->cpu_bstats); +err1: + kfree(p); + return err; } EXPORT_SYMBOL(tcf_idr_create); @@ -348,7 +338,7 @@ void tcf_idr_insert(struct tc_action_net *tn, struct tc_action *a) struct tcf_idrinfo *idrinfo = tn->idrinfo; spin_lock_bh(&idrinfo->lock); - idr_replace_ext(&idrinfo->action_idr, a, a->tcfa_index); + idr_replace(&idrinfo->action_idr, a, a->tcfa_index); spin_unlock_bh(&idrinfo->lock); } EXPORT_SYMBOL(tcf_idr_insert); @@ -361,7 +351,7 @@ void tcf_idrinfo_destroy(const struct tc_action_ops *ops, int ret; unsigned long id = 1; - idr_for_each_entry_ext(idr, p, id) { + idr_for_each_entry_ul(idr, p, id) { ret = __tcf_idr_release(p, false, true); if (ret == ACT_P_DELETED) module_put(ops->owner); |