1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * net/sched/act_api.c Packet action API.
4 *
5 * Author: Jamal Hadi Salim
6 */
7
8 #include <linux/types.h>
9 #include <linux/kernel.h>
10 #include <linux/string.h>
11 #include <linux/errno.h>
12 #include <linux/slab.h>
13 #include <linux/skbuff.h>
14 #include <linux/init.h>
15 #include <linux/kmod.h>
16 #include <linux/err.h>
17 #include <linux/module.h>
18 #include <net/net_namespace.h>
19 #include <net/sock.h>
20 #include <net/sch_generic.h>
21 #include <net/pkt_cls.h>
22 #include <net/act_api.h>
23 #include <net/netlink.h>
24
25 #ifdef CONFIG_INET
26 DEFINE_STATIC_KEY_FALSE(tcf_frag_xmit_count);
27 EXPORT_SYMBOL_GPL(tcf_frag_xmit_count);
28 #endif
29
tcf_dev_queue_xmit(struct sk_buff * skb,int (* xmit)(struct sk_buff * skb))30 int tcf_dev_queue_xmit(struct sk_buff *skb, int (*xmit)(struct sk_buff *skb))
31 {
32 #ifdef CONFIG_INET
33 if (static_branch_unlikely(&tcf_frag_xmit_count))
34 return sch_frag_xmit_hook(skb, xmit);
35 #endif
36
37 return xmit(skb);
38 }
39 EXPORT_SYMBOL_GPL(tcf_dev_queue_xmit);
40
tcf_action_goto_chain_exec(const struct tc_action * a,struct tcf_result * res)41 static void tcf_action_goto_chain_exec(const struct tc_action *a,
42 struct tcf_result *res)
43 {
44 const struct tcf_chain *chain = rcu_dereference_bh(a->goto_chain);
45
46 res->goto_tp = rcu_dereference_bh(chain->filter_chain);
47 }
48
tcf_free_cookie_rcu(struct rcu_head * p)49 static void tcf_free_cookie_rcu(struct rcu_head *p)
50 {
51 struct tc_cookie *cookie = container_of(p, struct tc_cookie, rcu);
52
53 kfree(cookie->data);
54 kfree(cookie);
55 }
56
tcf_set_action_cookie(struct tc_cookie __rcu ** old_cookie,struct tc_cookie * new_cookie)57 static void tcf_set_action_cookie(struct tc_cookie __rcu **old_cookie,
58 struct tc_cookie *new_cookie)
59 {
60 struct tc_cookie *old;
61
62 old = xchg((__force struct tc_cookie **)old_cookie, new_cookie);
63 if (old)
64 call_rcu(&old->rcu, tcf_free_cookie_rcu);
65 }
66
tcf_action_check_ctrlact(int action,struct tcf_proto * tp,struct tcf_chain ** newchain,struct netlink_ext_ack * extack)67 int tcf_action_check_ctrlact(int action, struct tcf_proto *tp,
68 struct tcf_chain **newchain,
69 struct netlink_ext_ack *extack)
70 {
71 int opcode = TC_ACT_EXT_OPCODE(action), ret = -EINVAL;
72 u32 chain_index;
73
74 if (!opcode)
75 ret = action > TC_ACT_VALUE_MAX ? -EINVAL : 0;
76 else if (opcode <= TC_ACT_EXT_OPCODE_MAX || action == TC_ACT_UNSPEC)
77 ret = 0;
78 if (ret) {
79 NL_SET_ERR_MSG(extack, "invalid control action");
80 goto end;
81 }
82
83 if (TC_ACT_EXT_CMP(action, TC_ACT_GOTO_CHAIN)) {
84 chain_index = action & TC_ACT_EXT_VAL_MASK;
85 if (!tp || !newchain) {
86 ret = -EINVAL;
87 NL_SET_ERR_MSG(extack,
88 "can't goto NULL proto/chain");
89 goto end;
90 }
91 *newchain = tcf_chain_get_by_act(tp->chain->block, chain_index);
92 if (!*newchain) {
93 ret = -ENOMEM;
94 NL_SET_ERR_MSG(extack,
95 "can't allocate goto_chain");
96 }
97 }
98 end:
99 return ret;
100 }
101 EXPORT_SYMBOL(tcf_action_check_ctrlact);
102
tcf_action_set_ctrlact(struct tc_action * a,int action,struct tcf_chain * goto_chain)103 struct tcf_chain *tcf_action_set_ctrlact(struct tc_action *a, int action,
104 struct tcf_chain *goto_chain)
105 {
106 a->tcfa_action = action;
107 goto_chain = rcu_replace_pointer(a->goto_chain, goto_chain, 1);
108 return goto_chain;
109 }
110 EXPORT_SYMBOL(tcf_action_set_ctrlact);
111
112 /* XXX: For standalone actions, we don't need a RCU grace period either, because
113 * actions are always connected to filters and filters are already destroyed in
114 * RCU callbacks, so after a RCU grace period actions are already disconnected
115 * from filters. Readers later can not find us.
116 */
free_tcf(struct tc_action * p)117 static void free_tcf(struct tc_action *p)
118 {
119 struct tcf_chain *chain = rcu_dereference_protected(p->goto_chain, 1);
120
121 free_percpu(p->cpu_bstats);
122 free_percpu(p->cpu_bstats_hw);
123 free_percpu(p->cpu_qstats);
124
125 tcf_set_action_cookie(&p->act_cookie, NULL);
126 if (chain)
127 tcf_chain_put_by_act(chain);
128
129 kfree(p);
130 }
131
tcf_action_cleanup(struct tc_action * p)132 static void tcf_action_cleanup(struct tc_action *p)
133 {
134 if (p->ops->cleanup)
135 p->ops->cleanup(p);
136
137 gen_kill_estimator(&p->tcfa_rate_est);
138 free_tcf(p);
139 }
140
__tcf_action_put(struct tc_action * p,bool bind)141 static int __tcf_action_put(struct tc_action *p, bool bind)
142 {
143 struct tcf_idrinfo *idrinfo = p->idrinfo;
144
145 if (refcount_dec_and_mutex_lock(&p->tcfa_refcnt, &idrinfo->lock)) {
146 if (bind)
147 atomic_dec(&p->tcfa_bindcnt);
148 idr_remove(&idrinfo->action_idr, p->tcfa_index);
149 mutex_unlock(&idrinfo->lock);
150
151 tcf_action_cleanup(p);
152 return 1;
153 }
154
155 if (bind)
156 atomic_dec(&p->tcfa_bindcnt);
157
158 return 0;
159 }
160
__tcf_idr_release(struct tc_action * p,bool bind,bool strict)161 static int __tcf_idr_release(struct tc_action *p, bool bind, bool strict)
162 {
163 int ret = 0;
164
165 /* Release with strict==1 and bind==0 is only called through act API
166 * interface (classifiers always bind). Only case when action with
167 * positive reference count and zero bind count can exist is when it was
168 * also created with act API (unbinding last classifier will destroy the
169 * action if it was created by classifier). So only case when bind count
170 * can be changed after initial check is when unbound action is
171 * destroyed by act API while classifier binds to action with same id
172 * concurrently. This result either creation of new action(same behavior
173 * as before), or reusing existing action if concurrent process
174 * increments reference count before action is deleted. Both scenarios
175 * are acceptable.
176 */
177 if (p) {
178 if (!bind && strict && atomic_read(&p->tcfa_bindcnt) > 0)
179 return -EPERM;
180
181 if (__tcf_action_put(p, bind))
182 ret = ACT_P_DELETED;
183 }
184
185 return ret;
186 }
187
tcf_idr_release(struct tc_action * a,bool bind)188 int tcf_idr_release(struct tc_action *a, bool bind)
189 {
190 const struct tc_action_ops *ops = a->ops;
191 int ret;
192
193 ret = __tcf_idr_release(a, bind, false);
194 if (ret == ACT_P_DELETED)
195 module_put(ops->owner);
196 return ret;
197 }
198 EXPORT_SYMBOL(tcf_idr_release);
199
tcf_action_shared_attrs_size(const struct tc_action * act)200 static size_t tcf_action_shared_attrs_size(const struct tc_action *act)
201 {
202 struct tc_cookie *act_cookie;
203 u32 cookie_len = 0;
204
205 rcu_read_lock();
206 act_cookie = rcu_dereference(act->act_cookie);
207
208 if (act_cookie)
209 cookie_len = nla_total_size(act_cookie->len);
210 rcu_read_unlock();
211
212 return nla_total_size(0) /* action number nested */
213 + nla_total_size(IFNAMSIZ) /* TCA_ACT_KIND */
214 + cookie_len /* TCA_ACT_COOKIE */
215 + nla_total_size(sizeof(struct nla_bitfield32)) /* TCA_ACT_HW_STATS */
216 + nla_total_size(0) /* TCA_ACT_STATS nested */
217 + nla_total_size(sizeof(struct nla_bitfield32)) /* TCA_ACT_FLAGS */
218 /* TCA_STATS_BASIC */
219 + nla_total_size_64bit(sizeof(struct gnet_stats_basic))
220 /* TCA_STATS_PKT64 */
221 + nla_total_size_64bit(sizeof(u64))
222 /* TCA_STATS_QUEUE */
223 + nla_total_size_64bit(sizeof(struct gnet_stats_queue))
224 + nla_total_size(0) /* TCA_OPTIONS nested */
225 + nla_total_size(sizeof(struct tcf_t)); /* TCA_GACT_TM */
226 }
227
tcf_action_full_attrs_size(size_t sz)228 static size_t tcf_action_full_attrs_size(size_t sz)
229 {
230 return NLMSG_HDRLEN /* struct nlmsghdr */
231 + sizeof(struct tcamsg)
232 + nla_total_size(0) /* TCA_ACT_TAB nested */
233 + sz;
234 }
235
tcf_action_fill_size(const struct tc_action * act)236 static size_t tcf_action_fill_size(const struct tc_action *act)
237 {
238 size_t sz = tcf_action_shared_attrs_size(act);
239
240 if (act->ops->get_fill_size)
241 return act->ops->get_fill_size(act) + sz;
242 return sz;
243 }
244
245 static int
tcf_action_dump_terse(struct sk_buff * skb,struct tc_action * a,bool from_act)246 tcf_action_dump_terse(struct sk_buff *skb, struct tc_action *a, bool from_act)
247 {
248 unsigned char *b = skb_tail_pointer(skb);
249 struct tc_cookie *cookie;
250
251 if (nla_put_string(skb, TCA_KIND, a->ops->kind))
252 goto nla_put_failure;
253 if (tcf_action_copy_stats(skb, a, 0))
254 goto nla_put_failure;
255 if (from_act && nla_put_u32(skb, TCA_ACT_INDEX, a->tcfa_index))
256 goto nla_put_failure;
257
258 rcu_read_lock();
259 cookie = rcu_dereference(a->act_cookie);
260 if (cookie) {
261 if (nla_put(skb, TCA_ACT_COOKIE, cookie->len, cookie->data)) {
262 rcu_read_unlock();
263 goto nla_put_failure;
264 }
265 }
266 rcu_read_unlock();
267
268 return 0;
269
270 nla_put_failure:
271 nlmsg_trim(skb, b);
272 return -1;
273 }
274
tcf_dump_walker(struct tcf_idrinfo * idrinfo,struct sk_buff * skb,struct netlink_callback * cb)275 static int tcf_dump_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb,
276 struct netlink_callback *cb)
277 {
278 int err = 0, index = -1, s_i = 0, n_i = 0;
279 u32 act_flags = cb->args[2];
280 unsigned long jiffy_since = cb->args[3];
281 struct nlattr *nest;
282 struct idr *idr = &idrinfo->action_idr;
283 struct tc_action *p;
284 unsigned long id = 1;
285 unsigned long tmp;
286
287 mutex_lock(&idrinfo->lock);
288
289 s_i = cb->args[0];
290
291 idr_for_each_entry_ul(idr, p, tmp, id) {
292 index++;
293 if (index < s_i)
294 continue;
295 if (IS_ERR(p))
296 continue;
297
298 if (jiffy_since &&
299 time_after(jiffy_since,
300 (unsigned long)p->tcfa_tm.lastuse))
301 continue;
302
303 nest = nla_nest_start_noflag(skb, n_i);
304 if (!nest) {
305 index--;
306 goto nla_put_failure;
307 }
308 err = (act_flags & TCA_ACT_FLAG_TERSE_DUMP) ?
309 tcf_action_dump_terse(skb, p, true) :
310 tcf_action_dump_1(skb, p, 0, 0);
311 if (err < 0) {
312 index--;
313 nlmsg_trim(skb, nest);
314 goto done;
315 }
316 nla_nest_end(skb, nest);
317 n_i++;
318 if (!(act_flags & TCA_ACT_FLAG_LARGE_DUMP_ON) &&
319 n_i >= TCA_ACT_MAX_PRIO)
320 goto done;
321 }
322 done:
323 if (index >= 0)
324 cb->args[0] = index + 1;
325
326 mutex_unlock(&idrinfo->lock);
327 if (n_i) {
328 if (act_flags & TCA_ACT_FLAG_LARGE_DUMP_ON)
329 cb->args[1] = n_i;
330 }
331 return n_i;
332
333 nla_put_failure:
334 nla_nest_cancel(skb, nest);
335 goto done;
336 }
337
tcf_idr_release_unsafe(struct tc_action * p)338 static int tcf_idr_release_unsafe(struct tc_action *p)
339 {
340 if (atomic_read(&p->tcfa_bindcnt) > 0)
341 return -EPERM;
342
343 if (refcount_dec_and_test(&p->tcfa_refcnt)) {
344 idr_remove(&p->idrinfo->action_idr, p->tcfa_index);
345 tcf_action_cleanup(p);
346 return ACT_P_DELETED;
347 }
348
349 return 0;
350 }
351
tcf_del_walker(struct tcf_idrinfo * idrinfo,struct sk_buff * skb,const struct tc_action_ops * ops)352 static int tcf_del_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb,
353 const struct tc_action_ops *ops)
354 {
355 struct nlattr *nest;
356 int n_i = 0;
357 int ret = -EINVAL;
358 struct idr *idr = &idrinfo->action_idr;
359 struct tc_action *p;
360 unsigned long id = 1;
361 unsigned long tmp;
362
363 nest = nla_nest_start_noflag(skb, 0);
364 if (nest == NULL)
365 goto nla_put_failure;
366 if (nla_put_string(skb, TCA_KIND, ops->kind))
367 goto nla_put_failure;
368
369 mutex_lock(&idrinfo->lock);
370 idr_for_each_entry_ul(idr, p, tmp, id) {
371 if (IS_ERR(p))
372 continue;
373 ret = tcf_idr_release_unsafe(p);
374 if (ret == ACT_P_DELETED) {
375 module_put(ops->owner);
376 n_i++;
377 } else if (ret < 0) {
378 mutex_unlock(&idrinfo->lock);
379 goto nla_put_failure;
380 }
381 }
382 mutex_unlock(&idrinfo->lock);
383
384 ret = nla_put_u32(skb, TCA_FCNT, n_i);
385 if (ret)
386 goto nla_put_failure;
387 nla_nest_end(skb, nest);
388
389 return n_i;
390 nla_put_failure:
391 nla_nest_cancel(skb, nest);
392 return ret;
393 }
394
tcf_generic_walker(struct tc_action_net * tn,struct sk_buff * skb,struct netlink_callback * cb,int type,const struct tc_action_ops * ops,struct netlink_ext_ack * extack)395 int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb,
396 struct netlink_callback *cb, int type,
397 const struct tc_action_ops *ops,
398 struct netlink_ext_ack *extack)
399 {
400 struct tcf_idrinfo *idrinfo = tn->idrinfo;
401
402 if (type == RTM_DELACTION) {
403 return tcf_del_walker(idrinfo, skb, ops);
404 } else if (type == RTM_GETACTION) {
405 return tcf_dump_walker(idrinfo, skb, cb);
406 } else {
407 WARN(1, "tcf_generic_walker: unknown command %d\n", type);
408 NL_SET_ERR_MSG(extack, "tcf_generic_walker: unknown command");
409 return -EINVAL;
410 }
411 }
412 EXPORT_SYMBOL(tcf_generic_walker);
413
tcf_idr_search(struct tc_action_net * tn,struct tc_action ** a,u32 index)414 int tcf_idr_search(struct tc_action_net *tn, struct tc_action **a, u32 index)
415 {
416 struct tcf_idrinfo *idrinfo = tn->idrinfo;
417 struct tc_action *p;
418
419 mutex_lock(&idrinfo->lock);
420 p = idr_find(&idrinfo->action_idr, index);
421 if (IS_ERR(p))
422 p = NULL;
423 else if (p)
424 refcount_inc(&p->tcfa_refcnt);
425 mutex_unlock(&idrinfo->lock);
426
427 if (p) {
428 *a = p;
429 return true;
430 }
431 return false;
432 }
433 EXPORT_SYMBOL(tcf_idr_search);
434
tcf_idr_delete_index(struct tcf_idrinfo * idrinfo,u32 index)435 static int tcf_idr_delete_index(struct tcf_idrinfo *idrinfo, u32 index)
436 {
437 struct tc_action *p;
438 int ret = 0;
439
440 mutex_lock(&idrinfo->lock);
441 p = idr_find(&idrinfo->action_idr, index);
442 if (!p) {
443 mutex_unlock(&idrinfo->lock);
444 return -ENOENT;
445 }
446
447 if (!atomic_read(&p->tcfa_bindcnt)) {
448 if (refcount_dec_and_test(&p->tcfa_refcnt)) {
449 struct module *owner = p->ops->owner;
450
451 WARN_ON(p != idr_remove(&idrinfo->action_idr,
452 p->tcfa_index));
453 mutex_unlock(&idrinfo->lock);
454
455 tcf_action_cleanup(p);
456 module_put(owner);
457 return 0;
458 }
459 ret = 0;
460 } else {
461 ret = -EPERM;
462 }
463
464 mutex_unlock(&idrinfo->lock);
465 return ret;
466 }
467
tcf_idr_create(struct tc_action_net * tn,u32 index,struct nlattr * est,struct tc_action ** a,const struct tc_action_ops * ops,int bind,bool cpustats,u32 flags)468 int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
469 struct tc_action **a, const struct tc_action_ops *ops,
470 int bind, bool cpustats, u32 flags)
471 {
472 struct tc_action *p = kzalloc(ops->size, GFP_KERNEL);
473 struct tcf_idrinfo *idrinfo = tn->idrinfo;
474 int err = -ENOMEM;
475
476 if (unlikely(!p))
477 return -ENOMEM;
478 refcount_set(&p->tcfa_refcnt, 1);
479 if (bind)
480 atomic_set(&p->tcfa_bindcnt, 1);
481
482 if (cpustats) {
483 p->cpu_bstats = netdev_alloc_pcpu_stats(struct gnet_stats_basic_sync);
484 if (!p->cpu_bstats)
485 goto err1;
486 p->cpu_bstats_hw = netdev_alloc_pcpu_stats(struct gnet_stats_basic_sync);
487 if (!p->cpu_bstats_hw)
488 goto err2;
489 p->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
490 if (!p->cpu_qstats)
491 goto err3;
492 }
493 gnet_stats_basic_sync_init(&p->tcfa_bstats);
494 gnet_stats_basic_sync_init(&p->tcfa_bstats_hw);
495 spin_lock_init(&p->tcfa_lock);
496 p->tcfa_index = index;
497 p->tcfa_tm.install = jiffies;
498 p->tcfa_tm.lastuse = jiffies;
499 p->tcfa_tm.firstuse = 0;
500 p->tcfa_flags = flags & TCA_ACT_FLAGS_USER_MASK;
501 if (est) {
502 err = gen_new_estimator(&p->tcfa_bstats, p->cpu_bstats,
503 &p->tcfa_rate_est,
504 &p->tcfa_lock, false, est);
505 if (err)
506 goto err4;
507 }
508
509 p->idrinfo = idrinfo;
510 __module_get(ops->owner);
511 p->ops = ops;
512 *a = p;
513 return 0;
514 err4:
515 free_percpu(p->cpu_qstats);
516 err3:
517 free_percpu(p->cpu_bstats_hw);
518 err2:
519 free_percpu(p->cpu_bstats);
520 err1:
521 kfree(p);
522 return err;
523 }
524 EXPORT_SYMBOL(tcf_idr_create);
525
tcf_idr_create_from_flags(struct tc_action_net * tn,u32 index,struct nlattr * est,struct tc_action ** a,const struct tc_action_ops * ops,int bind,u32 flags)526 int tcf_idr_create_from_flags(struct tc_action_net *tn, u32 index,
527 struct nlattr *est, struct tc_action **a,
528 const struct tc_action_ops *ops, int bind,
529 u32 flags)
530 {
531 /* Set cpustats according to actions flags. */
532 return tcf_idr_create(tn, index, est, a, ops, bind,
533 !(flags & TCA_ACT_FLAGS_NO_PERCPU_STATS), flags);
534 }
535 EXPORT_SYMBOL(tcf_idr_create_from_flags);
536
537 /* Cleanup idr index that was allocated but not initialized. */
538
tcf_idr_cleanup(struct tc_action_net * tn,u32 index)539 void tcf_idr_cleanup(struct tc_action_net *tn, u32 index)
540 {
541 struct tcf_idrinfo *idrinfo = tn->idrinfo;
542
543 mutex_lock(&idrinfo->lock);
544 /* Remove ERR_PTR(-EBUSY) allocated by tcf_idr_check_alloc */
545 WARN_ON(!IS_ERR(idr_remove(&idrinfo->action_idr, index)));
546 mutex_unlock(&idrinfo->lock);
547 }
548 EXPORT_SYMBOL(tcf_idr_cleanup);
549
550 /* Check if action with specified index exists. If actions is found, increments
551 * its reference and bind counters, and return 1. Otherwise insert temporary
552 * error pointer (to prevent concurrent users from inserting actions with same
553 * index) and return 0.
554 */
555
tcf_idr_check_alloc(struct tc_action_net * tn,u32 * index,struct tc_action ** a,int bind)556 int tcf_idr_check_alloc(struct tc_action_net *tn, u32 *index,
557 struct tc_action **a, int bind)
558 {
559 struct tcf_idrinfo *idrinfo = tn->idrinfo;
560 struct tc_action *p;
561 int ret;
562
563 again:
564 mutex_lock(&idrinfo->lock);
565 if (*index) {
566 p = idr_find(&idrinfo->action_idr, *index);
567 if (IS_ERR(p)) {
568 /* This means that another process allocated
569 * index but did not assign the pointer yet.
570 */
571 mutex_unlock(&idrinfo->lock);
572 goto again;
573 }
574
575 if (p) {
576 refcount_inc(&p->tcfa_refcnt);
577 if (bind)
578 atomic_inc(&p->tcfa_bindcnt);
579 *a = p;
580 ret = 1;
581 } else {
582 *a = NULL;
583 ret = idr_alloc_u32(&idrinfo->action_idr, NULL, index,
584 *index, GFP_KERNEL);
585 if (!ret)
586 idr_replace(&idrinfo->action_idr,
587 ERR_PTR(-EBUSY), *index);
588 }
589 } else {
590 *index = 1;
591 *a = NULL;
592 ret = idr_alloc_u32(&idrinfo->action_idr, NULL, index,
593 UINT_MAX, GFP_KERNEL);
594 if (!ret)
595 idr_replace(&idrinfo->action_idr, ERR_PTR(-EBUSY),
596 *index);
597 }
598 mutex_unlock(&idrinfo->lock);
599 return ret;
600 }
601 EXPORT_SYMBOL(tcf_idr_check_alloc);
602
tcf_idrinfo_destroy(const struct tc_action_ops * ops,struct tcf_idrinfo * idrinfo)603 void tcf_idrinfo_destroy(const struct tc_action_ops *ops,
604 struct tcf_idrinfo *idrinfo)
605 {
606 struct idr *idr = &idrinfo->action_idr;
607 struct tc_action *p;
608 int ret;
609 unsigned long id = 1;
610 unsigned long tmp;
611
612 idr_for_each_entry_ul(idr, p, tmp, id) {
613 ret = __tcf_idr_release(p, false, true);
614 if (ret == ACT_P_DELETED)
615 module_put(ops->owner);
616 else if (ret < 0)
617 return;
618 }
619 idr_destroy(&idrinfo->action_idr);
620 }
621 EXPORT_SYMBOL(tcf_idrinfo_destroy);
622
623 static LIST_HEAD(act_base);
624 static DEFINE_RWLOCK(act_mod_lock);
625
tcf_register_action(struct tc_action_ops * act,struct pernet_operations * ops)626 int tcf_register_action(struct tc_action_ops *act,
627 struct pernet_operations *ops)
628 {
629 struct tc_action_ops *a;
630 int ret;
631
632 if (!act->act || !act->dump || !act->init || !act->walk || !act->lookup)
633 return -EINVAL;
634
635 /* We have to register pernet ops before making the action ops visible,
636 * otherwise tcf_action_init_1() could get a partially initialized
637 * netns.
638 */
639 ret = register_pernet_subsys(ops);
640 if (ret)
641 return ret;
642
643 write_lock(&act_mod_lock);
644 list_for_each_entry(a, &act_base, head) {
645 if (act->id == a->id || (strcmp(act->kind, a->kind) == 0)) {
646 write_unlock(&act_mod_lock);
647 unregister_pernet_subsys(ops);
648 return -EEXIST;
649 }
650 }
651 list_add_tail(&act->head, &act_base);
652 write_unlock(&act_mod_lock);
653
654 return 0;
655 }
656 EXPORT_SYMBOL(tcf_register_action);
657
tcf_unregister_action(struct tc_action_ops * act,struct pernet_operations * ops)658 int tcf_unregister_action(struct tc_action_ops *act,
659 struct pernet_operations *ops)
660 {
661 struct tc_action_ops *a;
662 int err = -ENOENT;
663
664 write_lock(&act_mod_lock);
665 list_for_each_entry(a, &act_base, head) {
666 if (a == act) {
667 list_del(&act->head);
668 err = 0;
669 break;
670 }
671 }
672 write_unlock(&act_mod_lock);
673 if (!err)
674 unregister_pernet_subsys(ops);
675 return err;
676 }
677 EXPORT_SYMBOL(tcf_unregister_action);
678
679 /* lookup by name */
tc_lookup_action_n(char * kind)680 static struct tc_action_ops *tc_lookup_action_n(char *kind)
681 {
682 struct tc_action_ops *a, *res = NULL;
683
684 if (kind) {
685 read_lock(&act_mod_lock);
686 list_for_each_entry(a, &act_base, head) {
687 if (strcmp(kind, a->kind) == 0) {
688 if (try_module_get(a->owner))
689 res = a;
690 break;
691 }
692 }
693 read_unlock(&act_mod_lock);
694 }
695 return res;
696 }
697
698 /* lookup by nlattr */
tc_lookup_action(struct nlattr * kind)699 static struct tc_action_ops *tc_lookup_action(struct nlattr *kind)
700 {
701 struct tc_action_ops *a, *res = NULL;
702
703 if (kind) {
704 read_lock(&act_mod_lock);
705 list_for_each_entry(a, &act_base, head) {
706 if (nla_strcmp(kind, a->kind) == 0) {
707 if (try_module_get(a->owner))
708 res = a;
709 break;
710 }
711 }
712 read_unlock(&act_mod_lock);
713 }
714 return res;
715 }
716
717 /*TCA_ACT_MAX_PRIO is 32, there count up to 32 */
718 #define TCA_ACT_MAX_PRIO_MASK 0x1FF
tcf_action_exec(struct sk_buff * skb,struct tc_action ** actions,int nr_actions,struct tcf_result * res)719 int tcf_action_exec(struct sk_buff *skb, struct tc_action **actions,
720 int nr_actions, struct tcf_result *res)
721 {
722 u32 jmp_prgcnt = 0;
723 u32 jmp_ttl = TCA_ACT_MAX_PRIO; /*matches actions per filter */
724 int i;
725 int ret = TC_ACT_OK;
726
727 if (skb_skip_tc_classify(skb))
728 return TC_ACT_OK;
729
730 restart_act_graph:
731 for (i = 0; i < nr_actions; i++) {
732 const struct tc_action *a = actions[i];
733
734 if (jmp_prgcnt > 0) {
735 jmp_prgcnt -= 1;
736 continue;
737 }
738 repeat:
739 ret = a->ops->act(skb, a, res);
740 if (ret == TC_ACT_REPEAT)
741 goto repeat; /* we need a ttl - JHS */
742
743 if (TC_ACT_EXT_CMP(ret, TC_ACT_JUMP)) {
744 jmp_prgcnt = ret & TCA_ACT_MAX_PRIO_MASK;
745 if (!jmp_prgcnt || (jmp_prgcnt > nr_actions)) {
746 /* faulty opcode, stop pipeline */
747 return TC_ACT_OK;
748 } else {
749 jmp_ttl -= 1;
750 if (jmp_ttl > 0)
751 goto restart_act_graph;
752 else /* faulty graph, stop pipeline */
753 return TC_ACT_OK;
754 }
755 } else if (TC_ACT_EXT_CMP(ret, TC_ACT_GOTO_CHAIN)) {
756 if (unlikely(!rcu_access_pointer(a->goto_chain))) {
757 net_warn_ratelimited("can't go to NULL chain!\n");
758 return TC_ACT_SHOT;
759 }
760 tcf_action_goto_chain_exec(a, res);
761 }
762
763 if (ret != TC_ACT_PIPE)
764 break;
765 }
766
767 return ret;
768 }
769 EXPORT_SYMBOL(tcf_action_exec);
770
tcf_action_destroy(struct tc_action * actions[],int bind)771 int tcf_action_destroy(struct tc_action *actions[], int bind)
772 {
773 const struct tc_action_ops *ops;
774 struct tc_action *a;
775 int ret = 0, i;
776
777 for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) {
778 a = actions[i];
779 actions[i] = NULL;
780 ops = a->ops;
781 ret = __tcf_idr_release(a, bind, true);
782 if (ret == ACT_P_DELETED)
783 module_put(ops->owner);
784 else if (ret < 0)
785 return ret;
786 }
787 return ret;
788 }
789
tcf_action_put(struct tc_action * p)790 static int tcf_action_put(struct tc_action *p)
791 {
792 return __tcf_action_put(p, false);
793 }
794
795 /* Put all actions in this array, skip those NULL's. */
tcf_action_put_many(struct tc_action * actions[])796 static void tcf_action_put_many(struct tc_action *actions[])
797 {
798 int i;
799
800 for (i = 0; i < TCA_ACT_MAX_PRIO; i++) {
801 struct tc_action *a = actions[i];
802 const struct tc_action_ops *ops;
803
804 if (!a)
805 continue;
806 ops = a->ops;
807 if (tcf_action_put(a))
808 module_put(ops->owner);
809 }
810 }
811
812 int
tcf_action_dump_old(struct sk_buff * skb,struct tc_action * a,int bind,int ref)813 tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
814 {
815 return a->ops->dump(skb, a, bind, ref);
816 }
817
818 int
tcf_action_dump_1(struct sk_buff * skb,struct tc_action * a,int bind,int ref)819 tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
820 {
821 int err = -EINVAL;
822 unsigned char *b = skb_tail_pointer(skb);
823 struct nlattr *nest;
824
825 if (tcf_action_dump_terse(skb, a, false))
826 goto nla_put_failure;
827
828 if (a->hw_stats != TCA_ACT_HW_STATS_ANY &&
829 nla_put_bitfield32(skb, TCA_ACT_HW_STATS,
830 a->hw_stats, TCA_ACT_HW_STATS_ANY))
831 goto nla_put_failure;
832
833 if (a->used_hw_stats_valid &&
834 nla_put_bitfield32(skb, TCA_ACT_USED_HW_STATS,
835 a->used_hw_stats, TCA_ACT_HW_STATS_ANY))
836 goto nla_put_failure;
837
838 if (a->tcfa_flags &&
839 nla_put_bitfield32(skb, TCA_ACT_FLAGS,
840 a->tcfa_flags, a->tcfa_flags))
841 goto nla_put_failure;
842
843 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
844 if (nest == NULL)
845 goto nla_put_failure;
846 err = tcf_action_dump_old(skb, a, bind, ref);
847 if (err > 0) {
848 nla_nest_end(skb, nest);
849 return err;
850 }
851
852 nla_put_failure:
853 nlmsg_trim(skb, b);
854 return -1;
855 }
856 EXPORT_SYMBOL(tcf_action_dump_1);
857
tcf_action_dump(struct sk_buff * skb,struct tc_action * actions[],int bind,int ref,bool terse)858 int tcf_action_dump(struct sk_buff *skb, struct tc_action *actions[],
859 int bind, int ref, bool terse)
860 {
861 struct tc_action *a;
862 int err = -EINVAL, i;
863 struct nlattr *nest;
864
865 for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) {
866 a = actions[i];
867 nest = nla_nest_start_noflag(skb, i + 1);
868 if (nest == NULL)
869 goto nla_put_failure;
870 err = terse ? tcf_action_dump_terse(skb, a, false) :
871 tcf_action_dump_1(skb, a, bind, ref);
872 if (err < 0)
873 goto errout;
874 nla_nest_end(skb, nest);
875 }
876
877 return 0;
878
879 nla_put_failure:
880 err = -EINVAL;
881 errout:
882 nla_nest_cancel(skb, nest);
883 return err;
884 }
885
nla_memdup_cookie(struct nlattr ** tb)886 static struct tc_cookie *nla_memdup_cookie(struct nlattr **tb)
887 {
888 struct tc_cookie *c = kzalloc(sizeof(*c), GFP_KERNEL);
889 if (!c)
890 return NULL;
891
892 c->data = nla_memdup(tb[TCA_ACT_COOKIE], GFP_KERNEL);
893 if (!c->data) {
894 kfree(c);
895 return NULL;
896 }
897 c->len = nla_len(tb[TCA_ACT_COOKIE]);
898
899 return c;
900 }
901
tcf_action_hw_stats_get(struct nlattr * hw_stats_attr)902 static u8 tcf_action_hw_stats_get(struct nlattr *hw_stats_attr)
903 {
904 struct nla_bitfield32 hw_stats_bf;
905
906 /* If the user did not pass the attr, that means he does
907 * not care about the type. Return "any" in that case
908 * which is setting on all supported types.
909 */
910 if (!hw_stats_attr)
911 return TCA_ACT_HW_STATS_ANY;
912 hw_stats_bf = nla_get_bitfield32(hw_stats_attr);
913 return hw_stats_bf.value;
914 }
915
916 static const struct nla_policy tcf_action_policy[TCA_ACT_MAX + 1] = {
917 [TCA_ACT_KIND] = { .type = NLA_STRING },
918 [TCA_ACT_INDEX] = { .type = NLA_U32 },
919 [TCA_ACT_COOKIE] = { .type = NLA_BINARY,
920 .len = TC_COOKIE_MAX_SIZE },
921 [TCA_ACT_OPTIONS] = { .type = NLA_NESTED },
922 [TCA_ACT_FLAGS] = NLA_POLICY_BITFIELD32(TCA_ACT_FLAGS_NO_PERCPU_STATS),
923 [TCA_ACT_HW_STATS] = NLA_POLICY_BITFIELD32(TCA_ACT_HW_STATS_ANY),
924 };
925
tcf_idr_insert_many(struct tc_action * actions[])926 void tcf_idr_insert_many(struct tc_action *actions[])
927 {
928 int i;
929
930 for (i = 0; i < TCA_ACT_MAX_PRIO; i++) {
931 struct tc_action *a = actions[i];
932 struct tcf_idrinfo *idrinfo;
933
934 if (!a)
935 continue;
936 idrinfo = a->idrinfo;
937 mutex_lock(&idrinfo->lock);
938 /* Replace ERR_PTR(-EBUSY) allocated by tcf_idr_check_alloc if
939 * it is just created, otherwise this is just a nop.
940 */
941 idr_replace(&idrinfo->action_idr, a, a->tcfa_index);
942 mutex_unlock(&idrinfo->lock);
943 }
944 }
945
tc_action_load_ops(struct nlattr * nla,bool police,bool rtnl_held,struct netlink_ext_ack * extack)946 struct tc_action_ops *tc_action_load_ops(struct nlattr *nla, bool police,
947 bool rtnl_held,
948 struct netlink_ext_ack *extack)
949 {
950 struct nlattr *tb[TCA_ACT_MAX + 1];
951 struct tc_action_ops *a_o;
952 char act_name[IFNAMSIZ];
953 struct nlattr *kind;
954 int err;
955
956 if (!police) {
957 err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla,
958 tcf_action_policy, extack);
959 if (err < 0)
960 return ERR_PTR(err);
961 err = -EINVAL;
962 kind = tb[TCA_ACT_KIND];
963 if (!kind) {
964 NL_SET_ERR_MSG(extack, "TC action kind must be specified");
965 return ERR_PTR(err);
966 }
967 if (nla_strscpy(act_name, kind, IFNAMSIZ) < 0) {
968 NL_SET_ERR_MSG(extack, "TC action name too long");
969 return ERR_PTR(err);
970 }
971 } else {
972 if (strlcpy(act_name, "police", IFNAMSIZ) >= IFNAMSIZ) {
973 NL_SET_ERR_MSG(extack, "TC action name too long");
974 return ERR_PTR(-EINVAL);
975 }
976 }
977
978 a_o = tc_lookup_action_n(act_name);
979 if (a_o == NULL) {
980 #ifdef CONFIG_MODULES
981 if (rtnl_held)
982 rtnl_unlock();
983 request_module("act_%s", act_name);
984 if (rtnl_held)
985 rtnl_lock();
986
987 a_o = tc_lookup_action_n(act_name);
988
989 /* We dropped the RTNL semaphore in order to
990 * perform the module load. So, even if we
991 * succeeded in loading the module we have to
992 * tell the caller to replay the request. We
993 * indicate this using -EAGAIN.
994 */
995 if (a_o != NULL) {
996 module_put(a_o->owner);
997 return ERR_PTR(-EAGAIN);
998 }
999 #endif
1000 NL_SET_ERR_MSG(extack, "Failed to load TC action module");
1001 return ERR_PTR(-ENOENT);
1002 }
1003
1004 return a_o;
1005 }
1006
tcf_action_init_1(struct net * net,struct tcf_proto * tp,struct nlattr * nla,struct nlattr * est,struct tc_action_ops * a_o,int * init_res,u32 flags,struct netlink_ext_ack * extack)1007 struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
1008 struct nlattr *nla, struct nlattr *est,
1009 struct tc_action_ops *a_o, int *init_res,
1010 u32 flags, struct netlink_ext_ack *extack)
1011 {
1012 bool police = flags & TCA_ACT_FLAGS_POLICE;
1013 struct nla_bitfield32 userflags = { 0, 0 };
1014 u8 hw_stats = TCA_ACT_HW_STATS_ANY;
1015 struct nlattr *tb[TCA_ACT_MAX + 1];
1016 struct tc_cookie *cookie = NULL;
1017 struct tc_action *a;
1018 int err;
1019
1020 /* backward compatibility for policer */
1021 if (!police) {
1022 err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla,
1023 tcf_action_policy, extack);
1024 if (err < 0)
1025 return ERR_PTR(err);
1026 if (tb[TCA_ACT_COOKIE]) {
1027 cookie = nla_memdup_cookie(tb);
1028 if (!cookie) {
1029 NL_SET_ERR_MSG(extack, "No memory to generate TC cookie");
1030 err = -ENOMEM;
1031 goto err_out;
1032 }
1033 }
1034 hw_stats = tcf_action_hw_stats_get(tb[TCA_ACT_HW_STATS]);
1035 if (tb[TCA_ACT_FLAGS])
1036 userflags = nla_get_bitfield32(tb[TCA_ACT_FLAGS]);
1037
1038 err = a_o->init(net, tb[TCA_ACT_OPTIONS], est, &a, tp,
1039 userflags.value | flags, extack);
1040 } else {
1041 err = a_o->init(net, nla, est, &a, tp, userflags.value | flags,
1042 extack);
1043 }
1044 if (err < 0)
1045 goto err_out;
1046 *init_res = err;
1047
1048 if (!police && tb[TCA_ACT_COOKIE])
1049 tcf_set_action_cookie(&a->act_cookie, cookie);
1050
1051 if (!police)
1052 a->hw_stats = hw_stats;
1053
1054 return a;
1055
1056 err_out:
1057 if (cookie) {
1058 kfree(cookie->data);
1059 kfree(cookie);
1060 }
1061 return ERR_PTR(err);
1062 }
1063
1064 /* Returns numbers of initialized actions or negative error. */
1065
tcf_action_init(struct net * net,struct tcf_proto * tp,struct nlattr * nla,struct nlattr * est,struct tc_action * actions[],int init_res[],size_t * attr_size,u32 flags,struct netlink_ext_ack * extack)1066 int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla,
1067 struct nlattr *est, struct tc_action *actions[],
1068 int init_res[], size_t *attr_size, u32 flags,
1069 struct netlink_ext_ack *extack)
1070 {
1071 struct tc_action_ops *ops[TCA_ACT_MAX_PRIO] = {};
1072 struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
1073 struct tc_action *act;
1074 size_t sz = 0;
1075 int err;
1076 int i;
1077
1078 err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX_PRIO, nla, NULL,
1079 extack);
1080 if (err < 0)
1081 return err;
1082
1083 for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) {
1084 struct tc_action_ops *a_o;
1085
1086 a_o = tc_action_load_ops(tb[i], flags & TCA_ACT_FLAGS_POLICE,
1087 !(flags & TCA_ACT_FLAGS_NO_RTNL),
1088 extack);
1089 if (IS_ERR(a_o)) {
1090 err = PTR_ERR(a_o);
1091 goto err_mod;
1092 }
1093 ops[i - 1] = a_o;
1094 }
1095
1096 for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) {
1097 act = tcf_action_init_1(net, tp, tb[i], est, ops[i - 1],
1098 &init_res[i - 1], flags, extack);
1099 if (IS_ERR(act)) {
1100 err = PTR_ERR(act);
1101 goto err;
1102 }
1103 sz += tcf_action_fill_size(act);
1104 /* Start from index 0 */
1105 actions[i - 1] = act;
1106 }
1107
1108 /* We have to commit them all together, because if any error happened in
1109 * between, we could not handle the failure gracefully.
1110 */
1111 tcf_idr_insert_many(actions);
1112
1113 *attr_size = tcf_action_full_attrs_size(sz);
1114 err = i - 1;
1115 goto err_mod;
1116
1117 err:
1118 tcf_action_destroy(actions, flags & TCA_ACT_FLAGS_BIND);
1119 err_mod:
1120 for (i = 0; i < TCA_ACT_MAX_PRIO; i++) {
1121 if (ops[i])
1122 module_put(ops[i]->owner);
1123 }
1124 return err;
1125 }
1126
tcf_action_update_stats(struct tc_action * a,u64 bytes,u64 packets,u64 drops,bool hw)1127 void tcf_action_update_stats(struct tc_action *a, u64 bytes, u64 packets,
1128 u64 drops, bool hw)
1129 {
1130 if (a->cpu_bstats) {
1131 _bstats_update(this_cpu_ptr(a->cpu_bstats), bytes, packets);
1132
1133 this_cpu_ptr(a->cpu_qstats)->drops += drops;
1134
1135 if (hw)
1136 _bstats_update(this_cpu_ptr(a->cpu_bstats_hw),
1137 bytes, packets);
1138 return;
1139 }
1140
1141 _bstats_update(&a->tcfa_bstats, bytes, packets);
1142 a->tcfa_qstats.drops += drops;
1143 if (hw)
1144 _bstats_update(&a->tcfa_bstats_hw, bytes, packets);
1145 }
1146 EXPORT_SYMBOL(tcf_action_update_stats);
1147
tcf_action_copy_stats(struct sk_buff * skb,struct tc_action * p,int compat_mode)1148 int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *p,
1149 int compat_mode)
1150 {
1151 int err = 0;
1152 struct gnet_dump d;
1153
1154 if (p == NULL)
1155 goto errout;
1156
1157 /* compat_mode being true specifies a call that is supposed
1158 * to add additional backward compatibility statistic TLVs.
1159 */
1160 if (compat_mode) {
1161 if (p->type == TCA_OLD_COMPAT)
1162 err = gnet_stats_start_copy_compat(skb, 0,
1163 TCA_STATS,
1164 TCA_XSTATS,
1165 &p->tcfa_lock, &d,
1166 TCA_PAD);
1167 else
1168 return 0;
1169 } else
1170 err = gnet_stats_start_copy(skb, TCA_ACT_STATS,
1171 &p->tcfa_lock, &d, TCA_ACT_PAD);
1172
1173 if (err < 0)
1174 goto errout;
1175
1176 if (gnet_stats_copy_basic(&d, p->cpu_bstats,
1177 &p->tcfa_bstats, false) < 0 ||
1178 gnet_stats_copy_basic_hw(&d, p->cpu_bstats_hw,
1179 &p->tcfa_bstats_hw, false) < 0 ||
1180 gnet_stats_copy_rate_est(&d, &p->tcfa_rate_est) < 0 ||
1181 gnet_stats_copy_queue(&d, p->cpu_qstats,
1182 &p->tcfa_qstats,
1183 p->tcfa_qstats.qlen) < 0)
1184 goto errout;
1185
1186 if (gnet_stats_finish_copy(&d) < 0)
1187 goto errout;
1188
1189 return 0;
1190
1191 errout:
1192 return -1;
1193 }
1194
tca_get_fill(struct sk_buff * skb,struct tc_action * actions[],u32 portid,u32 seq,u16 flags,int event,int bind,int ref)1195 static int tca_get_fill(struct sk_buff *skb, struct tc_action *actions[],
1196 u32 portid, u32 seq, u16 flags, int event, int bind,
1197 int ref)
1198 {
1199 struct tcamsg *t;
1200 struct nlmsghdr *nlh;
1201 unsigned char *b = skb_tail_pointer(skb);
1202 struct nlattr *nest;
1203
1204 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*t), flags);
1205 if (!nlh)
1206 goto out_nlmsg_trim;
1207 t = nlmsg_data(nlh);
1208 t->tca_family = AF_UNSPEC;
1209 t->tca__pad1 = 0;
1210 t->tca__pad2 = 0;
1211
1212 nest = nla_nest_start_noflag(skb, TCA_ACT_TAB);
1213 if (!nest)
1214 goto out_nlmsg_trim;
1215
1216 if (tcf_action_dump(skb, actions, bind, ref, false) < 0)
1217 goto out_nlmsg_trim;
1218
1219 nla_nest_end(skb, nest);
1220
1221 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1222 return skb->len;
1223
1224 out_nlmsg_trim:
1225 nlmsg_trim(skb, b);
1226 return -1;
1227 }
1228
1229 static int
tcf_get_notify(struct net * net,u32 portid,struct nlmsghdr * n,struct tc_action * actions[],int event,struct netlink_ext_ack * extack)1230 tcf_get_notify(struct net *net, u32 portid, struct nlmsghdr *n,
1231 struct tc_action *actions[], int event,
1232 struct netlink_ext_ack *extack)
1233 {
1234 struct sk_buff *skb;
1235
1236 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1237 if (!skb)
1238 return -ENOBUFS;
1239 if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, event,
1240 0, 1) <= 0) {
1241 NL_SET_ERR_MSG(extack, "Failed to fill netlink attributes while adding TC action");
1242 kfree_skb(skb);
1243 return -EINVAL;
1244 }
1245
1246 return rtnl_unicast(skb, net, portid);
1247 }
1248
tcf_action_get_1(struct net * net,struct nlattr * nla,struct nlmsghdr * n,u32 portid,struct netlink_ext_ack * extack)1249 static struct tc_action *tcf_action_get_1(struct net *net, struct nlattr *nla,
1250 struct nlmsghdr *n, u32 portid,
1251 struct netlink_ext_ack *extack)
1252 {
1253 struct nlattr *tb[TCA_ACT_MAX + 1];
1254 const struct tc_action_ops *ops;
1255 struct tc_action *a;
1256 int index;
1257 int err;
1258
1259 err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla,
1260 tcf_action_policy, extack);
1261 if (err < 0)
1262 goto err_out;
1263
1264 err = -EINVAL;
1265 if (tb[TCA_ACT_INDEX] == NULL ||
1266 nla_len(tb[TCA_ACT_INDEX]) < sizeof(index)) {
1267 NL_SET_ERR_MSG(extack, "Invalid TC action index value");
1268 goto err_out;
1269 }
1270 index = nla_get_u32(tb[TCA_ACT_INDEX]);
1271
1272 err = -EINVAL;
1273 ops = tc_lookup_action(tb[TCA_ACT_KIND]);
1274 if (!ops) { /* could happen in batch of actions */
1275 NL_SET_ERR_MSG(extack, "Specified TC action kind not found");
1276 goto err_out;
1277 }
1278 err = -ENOENT;
1279 if (ops->lookup(net, &a, index) == 0) {
1280 NL_SET_ERR_MSG(extack, "TC action with specified index not found");
1281 goto err_mod;
1282 }
1283
1284 module_put(ops->owner);
1285 return a;
1286
1287 err_mod:
1288 module_put(ops->owner);
1289 err_out:
1290 return ERR_PTR(err);
1291 }
1292
tca_action_flush(struct net * net,struct nlattr * nla,struct nlmsghdr * n,u32 portid,struct netlink_ext_ack * extack)1293 static int tca_action_flush(struct net *net, struct nlattr *nla,
1294 struct nlmsghdr *n, u32 portid,
1295 struct netlink_ext_ack *extack)
1296 {
1297 struct sk_buff *skb;
1298 unsigned char *b;
1299 struct nlmsghdr *nlh;
1300 struct tcamsg *t;
1301 struct netlink_callback dcb;
1302 struct nlattr *nest;
1303 struct nlattr *tb[TCA_ACT_MAX + 1];
1304 const struct tc_action_ops *ops;
1305 struct nlattr *kind;
1306 int err = -ENOMEM;
1307
1308 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1309 if (!skb)
1310 return err;
1311
1312 b = skb_tail_pointer(skb);
1313
1314 err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla,
1315 tcf_action_policy, extack);
1316 if (err < 0)
1317 goto err_out;
1318
1319 err = -EINVAL;
1320 kind = tb[TCA_ACT_KIND];
1321 ops = tc_lookup_action(kind);
1322 if (!ops) { /*some idjot trying to flush unknown action */
1323 NL_SET_ERR_MSG(extack, "Cannot flush unknown TC action");
1324 goto err_out;
1325 }
1326
1327 nlh = nlmsg_put(skb, portid, n->nlmsg_seq, RTM_DELACTION,
1328 sizeof(*t), 0);
1329 if (!nlh) {
1330 NL_SET_ERR_MSG(extack, "Failed to create TC action flush notification");
1331 goto out_module_put;
1332 }
1333 t = nlmsg_data(nlh);
1334 t->tca_family = AF_UNSPEC;
1335 t->tca__pad1 = 0;
1336 t->tca__pad2 = 0;
1337
1338 nest = nla_nest_start_noflag(skb, TCA_ACT_TAB);
1339 if (!nest) {
1340 NL_SET_ERR_MSG(extack, "Failed to add new netlink message");
1341 goto out_module_put;
1342 }
1343
1344 err = ops->walk(net, skb, &dcb, RTM_DELACTION, ops, extack);
1345 if (err <= 0) {
1346 nla_nest_cancel(skb, nest);
1347 goto out_module_put;
1348 }
1349
1350 nla_nest_end(skb, nest);
1351
1352 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1353 nlh->nlmsg_flags |= NLM_F_ROOT;
1354 module_put(ops->owner);
1355 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1356 n->nlmsg_flags & NLM_F_ECHO);
1357 if (err < 0)
1358 NL_SET_ERR_MSG(extack, "Failed to send TC action flush notification");
1359
1360 return err;
1361
1362 out_module_put:
1363 module_put(ops->owner);
1364 err_out:
1365 kfree_skb(skb);
1366 return err;
1367 }
1368
tcf_action_delete(struct net * net,struct tc_action * actions[])1369 static int tcf_action_delete(struct net *net, struct tc_action *actions[])
1370 {
1371 int i;
1372
1373 for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) {
1374 struct tc_action *a = actions[i];
1375 const struct tc_action_ops *ops = a->ops;
1376 /* Actions can be deleted concurrently so we must save their
1377 * type and id to search again after reference is released.
1378 */
1379 struct tcf_idrinfo *idrinfo = a->idrinfo;
1380 u32 act_index = a->tcfa_index;
1381
1382 actions[i] = NULL;
1383 if (tcf_action_put(a)) {
1384 /* last reference, action was deleted concurrently */
1385 module_put(ops->owner);
1386 } else {
1387 int ret;
1388
1389 /* now do the delete */
1390 ret = tcf_idr_delete_index(idrinfo, act_index);
1391 if (ret < 0)
1392 return ret;
1393 }
1394 }
1395 return 0;
1396 }
1397
1398 static int
tcf_del_notify(struct net * net,struct nlmsghdr * n,struct tc_action * actions[],u32 portid,size_t attr_size,struct netlink_ext_ack * extack)1399 tcf_del_notify(struct net *net, struct nlmsghdr *n, struct tc_action *actions[],
1400 u32 portid, size_t attr_size, struct netlink_ext_ack *extack)
1401 {
1402 int ret;
1403 struct sk_buff *skb;
1404
1405 skb = alloc_skb(attr_size <= NLMSG_GOODSIZE ? NLMSG_GOODSIZE : attr_size,
1406 GFP_KERNEL);
1407 if (!skb)
1408 return -ENOBUFS;
1409
1410 if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, RTM_DELACTION,
1411 0, 2) <= 0) {
1412 NL_SET_ERR_MSG(extack, "Failed to fill netlink TC action attributes");
1413 kfree_skb(skb);
1414 return -EINVAL;
1415 }
1416
1417 /* now do the delete */
1418 ret = tcf_action_delete(net, actions);
1419 if (ret < 0) {
1420 NL_SET_ERR_MSG(extack, "Failed to delete TC action");
1421 kfree_skb(skb);
1422 return ret;
1423 }
1424
1425 ret = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1426 n->nlmsg_flags & NLM_F_ECHO);
1427 return ret;
1428 }
1429
1430 static int
tca_action_gd(struct net * net,struct nlattr * nla,struct nlmsghdr * n,u32 portid,int event,struct netlink_ext_ack * extack)1431 tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
1432 u32 portid, int event, struct netlink_ext_ack *extack)
1433 {
1434 int i, ret;
1435 struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
1436 struct tc_action *act;
1437 size_t attr_size = 0;
1438 struct tc_action *actions[TCA_ACT_MAX_PRIO] = {};
1439
1440 ret = nla_parse_nested_deprecated(tb, TCA_ACT_MAX_PRIO, nla, NULL,
1441 extack);
1442 if (ret < 0)
1443 return ret;
1444
1445 if (event == RTM_DELACTION && n->nlmsg_flags & NLM_F_ROOT) {
1446 if (tb[1])
1447 return tca_action_flush(net, tb[1], n, portid, extack);
1448
1449 NL_SET_ERR_MSG(extack, "Invalid netlink attributes while flushing TC action");
1450 return -EINVAL;
1451 }
1452
1453 for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) {
1454 act = tcf_action_get_1(net, tb[i], n, portid, extack);
1455 if (IS_ERR(act)) {
1456 ret = PTR_ERR(act);
1457 goto err;
1458 }
1459 attr_size += tcf_action_fill_size(act);
1460 actions[i - 1] = act;
1461 }
1462
1463 attr_size = tcf_action_full_attrs_size(attr_size);
1464
1465 if (event == RTM_GETACTION)
1466 ret = tcf_get_notify(net, portid, n, actions, event, extack);
1467 else { /* delete */
1468 ret = tcf_del_notify(net, n, actions, portid, attr_size, extack);
1469 if (ret)
1470 goto err;
1471 return 0;
1472 }
1473 err:
1474 tcf_action_put_many(actions);
1475 return ret;
1476 }
1477
1478 static int
tcf_add_notify(struct net * net,struct nlmsghdr * n,struct tc_action * actions[],u32 portid,size_t attr_size,struct netlink_ext_ack * extack)1479 tcf_add_notify(struct net *net, struct nlmsghdr *n, struct tc_action *actions[],
1480 u32 portid, size_t attr_size, struct netlink_ext_ack *extack)
1481 {
1482 struct sk_buff *skb;
1483
1484 skb = alloc_skb(attr_size <= NLMSG_GOODSIZE ? NLMSG_GOODSIZE : attr_size,
1485 GFP_KERNEL);
1486 if (!skb)
1487 return -ENOBUFS;
1488
1489 if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, n->nlmsg_flags,
1490 RTM_NEWACTION, 0, 0) <= 0) {
1491 NL_SET_ERR_MSG(extack, "Failed to fill netlink attributes while adding TC action");
1492 kfree_skb(skb);
1493 return -EINVAL;
1494 }
1495
1496 return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1497 n->nlmsg_flags & NLM_F_ECHO);
1498 }
1499
tcf_action_add(struct net * net,struct nlattr * nla,struct nlmsghdr * n,u32 portid,u32 flags,struct netlink_ext_ack * extack)1500 static int tcf_action_add(struct net *net, struct nlattr *nla,
1501 struct nlmsghdr *n, u32 portid, u32 flags,
1502 struct netlink_ext_ack *extack)
1503 {
1504 size_t attr_size = 0;
1505 int loop, ret, i;
1506 struct tc_action *actions[TCA_ACT_MAX_PRIO] = {};
1507 int init_res[TCA_ACT_MAX_PRIO] = {};
1508
1509 for (loop = 0; loop < 10; loop++) {
1510 ret = tcf_action_init(net, NULL, nla, NULL, actions, init_res,
1511 &attr_size, flags, extack);
1512 if (ret != -EAGAIN)
1513 break;
1514 }
1515
1516 if (ret < 0)
1517 return ret;
1518 ret = tcf_add_notify(net, n, actions, portid, attr_size, extack);
1519
1520 /* only put existing actions */
1521 for (i = 0; i < TCA_ACT_MAX_PRIO; i++)
1522 if (init_res[i] == ACT_P_CREATED)
1523 actions[i] = NULL;
1524 tcf_action_put_many(actions);
1525
1526 return ret;
1527 }
1528
1529 static const struct nla_policy tcaa_policy[TCA_ROOT_MAX + 1] = {
1530 [TCA_ROOT_FLAGS] = NLA_POLICY_BITFIELD32(TCA_ACT_FLAG_LARGE_DUMP_ON |
1531 TCA_ACT_FLAG_TERSE_DUMP),
1532 [TCA_ROOT_TIME_DELTA] = { .type = NLA_U32 },
1533 };
1534
tc_ctl_action(struct sk_buff * skb,struct nlmsghdr * n,struct netlink_ext_ack * extack)1535 static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n,
1536 struct netlink_ext_ack *extack)
1537 {
1538 struct net *net = sock_net(skb->sk);
1539 struct nlattr *tca[TCA_ROOT_MAX + 1];
1540 u32 portid = NETLINK_CB(skb).portid;
1541 u32 flags = 0;
1542 int ret = 0;
1543
1544 if ((n->nlmsg_type != RTM_GETACTION) &&
1545 !netlink_capable(skb, CAP_NET_ADMIN))
1546 return -EPERM;
1547
1548 ret = nlmsg_parse_deprecated(n, sizeof(struct tcamsg), tca,
1549 TCA_ROOT_MAX, NULL, extack);
1550 if (ret < 0)
1551 return ret;
1552
1553 if (tca[TCA_ACT_TAB] == NULL) {
1554 NL_SET_ERR_MSG(extack, "Netlink action attributes missing");
1555 return -EINVAL;
1556 }
1557
1558 /* n->nlmsg_flags & NLM_F_CREATE */
1559 switch (n->nlmsg_type) {
1560 case RTM_NEWACTION:
1561 /* we are going to assume all other flags
1562 * imply create only if it doesn't exist
1563 * Note that CREATE | EXCL implies that
1564 * but since we want avoid ambiguity (eg when flags
1565 * is zero) then just set this
1566 */
1567 if (n->nlmsg_flags & NLM_F_REPLACE)
1568 flags = TCA_ACT_FLAGS_REPLACE;
1569 ret = tcf_action_add(net, tca[TCA_ACT_TAB], n, portid, flags,
1570 extack);
1571 break;
1572 case RTM_DELACTION:
1573 ret = tca_action_gd(net, tca[TCA_ACT_TAB], n,
1574 portid, RTM_DELACTION, extack);
1575 break;
1576 case RTM_GETACTION:
1577 ret = tca_action_gd(net, tca[TCA_ACT_TAB], n,
1578 portid, RTM_GETACTION, extack);
1579 break;
1580 default:
1581 BUG();
1582 }
1583
1584 return ret;
1585 }
1586
find_dump_kind(struct nlattr ** nla)1587 static struct nlattr *find_dump_kind(struct nlattr **nla)
1588 {
1589 struct nlattr *tb1, *tb2[TCA_ACT_MAX + 1];
1590 struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
1591 struct nlattr *kind;
1592
1593 tb1 = nla[TCA_ACT_TAB];
1594 if (tb1 == NULL)
1595 return NULL;
1596
1597 if (nla_parse_deprecated(tb, TCA_ACT_MAX_PRIO, nla_data(tb1), NLMSG_ALIGN(nla_len(tb1)), NULL, NULL) < 0)
1598 return NULL;
1599
1600 if (tb[1] == NULL)
1601 return NULL;
1602 if (nla_parse_nested_deprecated(tb2, TCA_ACT_MAX, tb[1], tcf_action_policy, NULL) < 0)
1603 return NULL;
1604 kind = tb2[TCA_ACT_KIND];
1605
1606 return kind;
1607 }
1608
tc_dump_action(struct sk_buff * skb,struct netlink_callback * cb)1609 static int tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
1610 {
1611 struct net *net = sock_net(skb->sk);
1612 struct nlmsghdr *nlh;
1613 unsigned char *b = skb_tail_pointer(skb);
1614 struct nlattr *nest;
1615 struct tc_action_ops *a_o;
1616 int ret = 0;
1617 struct tcamsg *t = (struct tcamsg *) nlmsg_data(cb->nlh);
1618 struct nlattr *tb[TCA_ROOT_MAX + 1];
1619 struct nlattr *count_attr = NULL;
1620 unsigned long jiffy_since = 0;
1621 struct nlattr *kind = NULL;
1622 struct nla_bitfield32 bf;
1623 u32 msecs_since = 0;
1624 u32 act_count = 0;
1625
1626 ret = nlmsg_parse_deprecated(cb->nlh, sizeof(struct tcamsg), tb,
1627 TCA_ROOT_MAX, tcaa_policy, cb->extack);
1628 if (ret < 0)
1629 return ret;
1630
1631 kind = find_dump_kind(tb);
1632 if (kind == NULL) {
1633 pr_info("tc_dump_action: action bad kind\n");
1634 return 0;
1635 }
1636
1637 a_o = tc_lookup_action(kind);
1638 if (a_o == NULL)
1639 return 0;
1640
1641 cb->args[2] = 0;
1642 if (tb[TCA_ROOT_FLAGS]) {
1643 bf = nla_get_bitfield32(tb[TCA_ROOT_FLAGS]);
1644 cb->args[2] = bf.value;
1645 }
1646
1647 if (tb[TCA_ROOT_TIME_DELTA]) {
1648 msecs_since = nla_get_u32(tb[TCA_ROOT_TIME_DELTA]);
1649 }
1650
1651 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
1652 cb->nlh->nlmsg_type, sizeof(*t), 0);
1653 if (!nlh)
1654 goto out_module_put;
1655
1656 if (msecs_since)
1657 jiffy_since = jiffies - msecs_to_jiffies(msecs_since);
1658
1659 t = nlmsg_data(nlh);
1660 t->tca_family = AF_UNSPEC;
1661 t->tca__pad1 = 0;
1662 t->tca__pad2 = 0;
1663 cb->args[3] = jiffy_since;
1664 count_attr = nla_reserve(skb, TCA_ROOT_COUNT, sizeof(u32));
1665 if (!count_attr)
1666 goto out_module_put;
1667
1668 nest = nla_nest_start_noflag(skb, TCA_ACT_TAB);
1669 if (nest == NULL)
1670 goto out_module_put;
1671
1672 ret = a_o->walk(net, skb, cb, RTM_GETACTION, a_o, NULL);
1673 if (ret < 0)
1674 goto out_module_put;
1675
1676 if (ret > 0) {
1677 nla_nest_end(skb, nest);
1678 ret = skb->len;
1679 act_count = cb->args[1];
1680 memcpy(nla_data(count_attr), &act_count, sizeof(u32));
1681 cb->args[1] = 0;
1682 } else
1683 nlmsg_trim(skb, b);
1684
1685 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1686 if (NETLINK_CB(cb->skb).portid && ret)
1687 nlh->nlmsg_flags |= NLM_F_MULTI;
1688 module_put(a_o->owner);
1689 return skb->len;
1690
1691 out_module_put:
1692 module_put(a_o->owner);
1693 nlmsg_trim(skb, b);
1694 return skb->len;
1695 }
1696
tc_action_init(void)1697 static int __init tc_action_init(void)
1698 {
1699 rtnl_register(PF_UNSPEC, RTM_NEWACTION, tc_ctl_action, NULL, 0);
1700 rtnl_register(PF_UNSPEC, RTM_DELACTION, tc_ctl_action, NULL, 0);
1701 rtnl_register(PF_UNSPEC, RTM_GETACTION, tc_ctl_action, tc_dump_action,
1702 0);
1703
1704 return 0;
1705 }
1706
1707 subsys_initcall(tc_action_init);
1708