1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * net/sched/act_police.c Input police filter
4 *
5 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6 * J Hadi Salim (action changes)
7 */
8
9 #include <linux/module.h>
10 #include <linux/types.h>
11 #include <linux/kernel.h>
12 #include <linux/string.h>
13 #include <linux/errno.h>
14 #include <linux/skbuff.h>
15 #include <linux/rtnetlink.h>
16 #include <linux/init.h>
17 #include <linux/slab.h>
18 #include <net/act_api.h>
19 #include <net/netlink.h>
20 #include <net/pkt_cls.h>
21 #include <net/tc_act/tc_police.h>
22 #include <net/tc_wrapper.h>
23
24 /* Each policer is serialized by its individual spinlock */
25
26 static struct tc_action_ops act_police_ops;
27
28 static const struct nla_policy police_policy[TCA_POLICE_MAX + 1] = {
29 [TCA_POLICE_RATE] = { .len = TC_RTAB_SIZE },
30 [TCA_POLICE_PEAKRATE] = { .len = TC_RTAB_SIZE },
31 [TCA_POLICE_AVRATE] = { .type = NLA_U32 },
32 [TCA_POLICE_RESULT] = { .type = NLA_U32 },
33 [TCA_POLICE_RATE64] = { .type = NLA_U64 },
34 [TCA_POLICE_PEAKRATE64] = { .type = NLA_U64 },
35 [TCA_POLICE_PKTRATE64] = { .type = NLA_U64, .min = 1 },
36 [TCA_POLICE_PKTBURST64] = { .type = NLA_U64, .min = 1 },
37 };
38
tcf_police_init(struct net * net,struct nlattr * nla,struct nlattr * est,struct tc_action ** a,struct tcf_proto * tp,u32 flags,struct netlink_ext_ack * extack)39 static int tcf_police_init(struct net *net, struct nlattr *nla,
40 struct nlattr *est, struct tc_action **a,
41 struct tcf_proto *tp, u32 flags,
42 struct netlink_ext_ack *extack)
43 {
44 int ret = 0, tcfp_result = TC_ACT_OK, err, size;
45 bool bind = flags & TCA_ACT_FLAGS_BIND;
46 struct nlattr *tb[TCA_POLICE_MAX + 1];
47 struct tcf_chain *goto_ch = NULL;
48 struct tc_police *parm;
49 struct tcf_police *police;
50 struct qdisc_rate_table *R_tab = NULL, *P_tab = NULL;
51 struct tc_action_net *tn = net_generic(net, act_police_ops.net_id);
52 struct tcf_police_params *new;
53 bool exists = false;
54 u32 index;
55 u64 rate64, prate64;
56 u64 pps, ppsburst;
57
58 if (nla == NULL)
59 return -EINVAL;
60
61 err = nla_parse_nested_deprecated(tb, TCA_POLICE_MAX, nla,
62 police_policy, NULL);
63 if (err < 0)
64 return err;
65
66 if (tb[TCA_POLICE_TBF] == NULL)
67 return -EINVAL;
68 size = nla_len(tb[TCA_POLICE_TBF]);
69 if (size != sizeof(*parm) && size != sizeof(struct tc_police_compat))
70 return -EINVAL;
71
72 parm = nla_data(tb[TCA_POLICE_TBF]);
73 index = parm->index;
74 err = tcf_idr_check_alloc(tn, &index, a, bind);
75 if (err < 0)
76 return err;
77 exists = err;
78 if (exists && bind)
79 return 0;
80
81 if (!exists) {
82 ret = tcf_idr_create(tn, index, NULL, a,
83 &act_police_ops, bind, true, flags);
84 if (ret) {
85 tcf_idr_cleanup(tn, index);
86 return ret;
87 }
88 ret = ACT_P_CREATED;
89 spin_lock_init(&(to_police(*a)->tcfp_lock));
90 } else if (!(flags & TCA_ACT_FLAGS_REPLACE)) {
91 tcf_idr_release(*a, bind);
92 return -EEXIST;
93 }
94 err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
95 if (err < 0)
96 goto release_idr;
97
98 police = to_police(*a);
99 if (parm->rate.rate) {
100 err = -ENOMEM;
101 R_tab = qdisc_get_rtab(&parm->rate, tb[TCA_POLICE_RATE], NULL);
102 if (R_tab == NULL)
103 goto failure;
104
105 if (parm->peakrate.rate) {
106 P_tab = qdisc_get_rtab(&parm->peakrate,
107 tb[TCA_POLICE_PEAKRATE], NULL);
108 if (P_tab == NULL)
109 goto failure;
110 }
111 }
112
113 if (est) {
114 err = gen_replace_estimator(&police->tcf_bstats,
115 police->common.cpu_bstats,
116 &police->tcf_rate_est,
117 &police->tcf_lock,
118 false, est);
119 if (err)
120 goto failure;
121 } else if (tb[TCA_POLICE_AVRATE] &&
122 (ret == ACT_P_CREATED ||
123 !gen_estimator_active(&police->tcf_rate_est))) {
124 err = -EINVAL;
125 goto failure;
126 }
127
128 if (tb[TCA_POLICE_RESULT]) {
129 tcfp_result = nla_get_u32(tb[TCA_POLICE_RESULT]);
130 if (TC_ACT_EXT_CMP(tcfp_result, TC_ACT_GOTO_CHAIN)) {
131 NL_SET_ERR_MSG(extack,
132 "goto chain not allowed on fallback");
133 err = -EINVAL;
134 goto failure;
135 }
136 }
137
138 if ((tb[TCA_POLICE_PKTRATE64] && !tb[TCA_POLICE_PKTBURST64]) ||
139 (!tb[TCA_POLICE_PKTRATE64] && tb[TCA_POLICE_PKTBURST64])) {
140 NL_SET_ERR_MSG(extack,
141 "Both or neither packet-per-second burst and rate must be provided");
142 err = -EINVAL;
143 goto failure;
144 }
145
146 if (tb[TCA_POLICE_PKTRATE64] && R_tab) {
147 NL_SET_ERR_MSG(extack,
148 "packet-per-second and byte-per-second rate limits not allowed in same action");
149 err = -EINVAL;
150 goto failure;
151 }
152
153 new = kzalloc(sizeof(*new), GFP_KERNEL);
154 if (unlikely(!new)) {
155 err = -ENOMEM;
156 goto failure;
157 }
158
159 /* No failure allowed after this point */
160 new->tcfp_result = tcfp_result;
161 new->tcfp_mtu = parm->mtu;
162 if (!new->tcfp_mtu) {
163 new->tcfp_mtu = ~0;
164 if (R_tab)
165 new->tcfp_mtu = 255 << R_tab->rate.cell_log;
166 }
167 if (R_tab) {
168 new->rate_present = true;
169 rate64 = tb[TCA_POLICE_RATE64] ?
170 nla_get_u64(tb[TCA_POLICE_RATE64]) : 0;
171 psched_ratecfg_precompute(&new->rate, &R_tab->rate, rate64);
172 qdisc_put_rtab(R_tab);
173 } else {
174 new->rate_present = false;
175 }
176 if (P_tab) {
177 new->peak_present = true;
178 prate64 = tb[TCA_POLICE_PEAKRATE64] ?
179 nla_get_u64(tb[TCA_POLICE_PEAKRATE64]) : 0;
180 psched_ratecfg_precompute(&new->peak, &P_tab->rate, prate64);
181 qdisc_put_rtab(P_tab);
182 } else {
183 new->peak_present = false;
184 }
185
186 new->tcfp_burst = PSCHED_TICKS2NS(parm->burst);
187 if (new->peak_present)
188 new->tcfp_mtu_ptoks = (s64)psched_l2t_ns(&new->peak,
189 new->tcfp_mtu);
190
191 if (tb[TCA_POLICE_AVRATE])
192 new->tcfp_ewma_rate = nla_get_u32(tb[TCA_POLICE_AVRATE]);
193
194 if (tb[TCA_POLICE_PKTRATE64]) {
195 pps = nla_get_u64(tb[TCA_POLICE_PKTRATE64]);
196 ppsburst = nla_get_u64(tb[TCA_POLICE_PKTBURST64]);
197 new->pps_present = true;
198 new->tcfp_pkt_burst = PSCHED_TICKS2NS(ppsburst);
199 psched_ppscfg_precompute(&new->ppsrate, pps);
200 }
201
202 spin_lock_bh(&police->tcf_lock);
203 spin_lock_bh(&police->tcfp_lock);
204 police->tcfp_t_c = ktime_get_ns();
205 police->tcfp_toks = new->tcfp_burst;
206 if (new->peak_present)
207 police->tcfp_ptoks = new->tcfp_mtu_ptoks;
208 spin_unlock_bh(&police->tcfp_lock);
209 goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
210 new = rcu_replace_pointer(police->params,
211 new,
212 lockdep_is_held(&police->tcf_lock));
213 spin_unlock_bh(&police->tcf_lock);
214
215 if (goto_ch)
216 tcf_chain_put_by_act(goto_ch);
217 if (new)
218 kfree_rcu(new, rcu);
219
220 return ret;
221
222 failure:
223 qdisc_put_rtab(P_tab);
224 qdisc_put_rtab(R_tab);
225 if (goto_ch)
226 tcf_chain_put_by_act(goto_ch);
227 release_idr:
228 tcf_idr_release(*a, bind);
229 return err;
230 }
231
tcf_police_mtu_check(struct sk_buff * skb,u32 limit)232 static bool tcf_police_mtu_check(struct sk_buff *skb, u32 limit)
233 {
234 u32 len;
235
236 if (skb_is_gso(skb))
237 return skb_gso_validate_mac_len(skb, limit);
238
239 len = qdisc_pkt_len(skb);
240 if (skb_at_tc_ingress(skb))
241 len += skb->mac_len;
242
243 return len <= limit;
244 }
245
tcf_police_act(struct sk_buff * skb,const struct tc_action * a,struct tcf_result * res)246 TC_INDIRECT_SCOPE int tcf_police_act(struct sk_buff *skb,
247 const struct tc_action *a,
248 struct tcf_result *res)
249 {
250 struct tcf_police *police = to_police(a);
251 s64 now, toks, ppstoks = 0, ptoks = 0;
252 struct tcf_police_params *p;
253 int ret;
254
255 tcf_lastuse_update(&police->tcf_tm);
256 bstats_update(this_cpu_ptr(police->common.cpu_bstats), skb);
257
258 ret = READ_ONCE(police->tcf_action);
259 p = rcu_dereference_bh(police->params);
260
261 if (p->tcfp_ewma_rate) {
262 struct gnet_stats_rate_est64 sample;
263
264 if (!gen_estimator_read(&police->tcf_rate_est, &sample) ||
265 sample.bps >= p->tcfp_ewma_rate)
266 goto inc_overlimits;
267 }
268
269 if (tcf_police_mtu_check(skb, p->tcfp_mtu)) {
270 if (!p->rate_present && !p->pps_present) {
271 ret = p->tcfp_result;
272 goto end;
273 }
274
275 now = ktime_get_ns();
276 spin_lock_bh(&police->tcfp_lock);
277 toks = min_t(s64, now - police->tcfp_t_c, p->tcfp_burst);
278 if (p->peak_present) {
279 ptoks = toks + police->tcfp_ptoks;
280 if (ptoks > p->tcfp_mtu_ptoks)
281 ptoks = p->tcfp_mtu_ptoks;
282 ptoks -= (s64)psched_l2t_ns(&p->peak,
283 qdisc_pkt_len(skb));
284 }
285 if (p->rate_present) {
286 toks += police->tcfp_toks;
287 if (toks > p->tcfp_burst)
288 toks = p->tcfp_burst;
289 toks -= (s64)psched_l2t_ns(&p->rate, qdisc_pkt_len(skb));
290 } else if (p->pps_present) {
291 ppstoks = min_t(s64, now - police->tcfp_t_c, p->tcfp_pkt_burst);
292 ppstoks += police->tcfp_pkttoks;
293 if (ppstoks > p->tcfp_pkt_burst)
294 ppstoks = p->tcfp_pkt_burst;
295 ppstoks -= (s64)psched_pkt2t_ns(&p->ppsrate, 1);
296 }
297 if ((toks | ptoks | ppstoks) >= 0) {
298 police->tcfp_t_c = now;
299 police->tcfp_toks = toks;
300 police->tcfp_ptoks = ptoks;
301 police->tcfp_pkttoks = ppstoks;
302 spin_unlock_bh(&police->tcfp_lock);
303 ret = p->tcfp_result;
304 goto inc_drops;
305 }
306 spin_unlock_bh(&police->tcfp_lock);
307 }
308
309 inc_overlimits:
310 qstats_overlimit_inc(this_cpu_ptr(police->common.cpu_qstats));
311 inc_drops:
312 if (ret == TC_ACT_SHOT)
313 qstats_drop_inc(this_cpu_ptr(police->common.cpu_qstats));
314 end:
315 return ret;
316 }
317
tcf_police_cleanup(struct tc_action * a)318 static void tcf_police_cleanup(struct tc_action *a)
319 {
320 struct tcf_police *police = to_police(a);
321 struct tcf_police_params *p;
322
323 p = rcu_dereference_protected(police->params, 1);
324 if (p)
325 kfree_rcu(p, rcu);
326 }
327
tcf_police_stats_update(struct tc_action * a,u64 bytes,u64 packets,u64 drops,u64 lastuse,bool hw)328 static void tcf_police_stats_update(struct tc_action *a,
329 u64 bytes, u64 packets, u64 drops,
330 u64 lastuse, bool hw)
331 {
332 struct tcf_police *police = to_police(a);
333 struct tcf_t *tm = &police->tcf_tm;
334
335 tcf_action_update_stats(a, bytes, packets, drops, hw);
336 tm->lastuse = max_t(u64, tm->lastuse, lastuse);
337 }
338
tcf_police_dump(struct sk_buff * skb,struct tc_action * a,int bind,int ref)339 static int tcf_police_dump(struct sk_buff *skb, struct tc_action *a,
340 int bind, int ref)
341 {
342 unsigned char *b = skb_tail_pointer(skb);
343 struct tcf_police *police = to_police(a);
344 struct tcf_police_params *p;
345 struct tc_police opt = {
346 .index = police->tcf_index,
347 .refcnt = refcount_read(&police->tcf_refcnt) - ref,
348 .bindcnt = atomic_read(&police->tcf_bindcnt) - bind,
349 };
350 struct tcf_t t;
351
352 spin_lock_bh(&police->tcf_lock);
353 opt.action = police->tcf_action;
354 p = rcu_dereference_protected(police->params,
355 lockdep_is_held(&police->tcf_lock));
356 opt.mtu = p->tcfp_mtu;
357 opt.burst = PSCHED_NS2TICKS(p->tcfp_burst);
358 if (p->rate_present) {
359 psched_ratecfg_getrate(&opt.rate, &p->rate);
360 if ((police->params->rate.rate_bytes_ps >= (1ULL << 32)) &&
361 nla_put_u64_64bit(skb, TCA_POLICE_RATE64,
362 police->params->rate.rate_bytes_ps,
363 TCA_POLICE_PAD))
364 goto nla_put_failure;
365 }
366 if (p->peak_present) {
367 psched_ratecfg_getrate(&opt.peakrate, &p->peak);
368 if ((police->params->peak.rate_bytes_ps >= (1ULL << 32)) &&
369 nla_put_u64_64bit(skb, TCA_POLICE_PEAKRATE64,
370 police->params->peak.rate_bytes_ps,
371 TCA_POLICE_PAD))
372 goto nla_put_failure;
373 }
374 if (p->pps_present) {
375 if (nla_put_u64_64bit(skb, TCA_POLICE_PKTRATE64,
376 police->params->ppsrate.rate_pkts_ps,
377 TCA_POLICE_PAD))
378 goto nla_put_failure;
379 if (nla_put_u64_64bit(skb, TCA_POLICE_PKTBURST64,
380 PSCHED_NS2TICKS(p->tcfp_pkt_burst),
381 TCA_POLICE_PAD))
382 goto nla_put_failure;
383 }
384 if (nla_put(skb, TCA_POLICE_TBF, sizeof(opt), &opt))
385 goto nla_put_failure;
386 if (p->tcfp_result &&
387 nla_put_u32(skb, TCA_POLICE_RESULT, p->tcfp_result))
388 goto nla_put_failure;
389 if (p->tcfp_ewma_rate &&
390 nla_put_u32(skb, TCA_POLICE_AVRATE, p->tcfp_ewma_rate))
391 goto nla_put_failure;
392
393 tcf_tm_dump(&t, &police->tcf_tm);
394 if (nla_put_64bit(skb, TCA_POLICE_TM, sizeof(t), &t, TCA_POLICE_PAD))
395 goto nla_put_failure;
396 spin_unlock_bh(&police->tcf_lock);
397
398 return skb->len;
399
400 nla_put_failure:
401 spin_unlock_bh(&police->tcf_lock);
402 nlmsg_trim(skb, b);
403 return -1;
404 }
405
tcf_police_act_to_flow_act(int tc_act,u32 * extval,struct netlink_ext_ack * extack)406 static int tcf_police_act_to_flow_act(int tc_act, u32 *extval,
407 struct netlink_ext_ack *extack)
408 {
409 int act_id = -EOPNOTSUPP;
410
411 if (!TC_ACT_EXT_OPCODE(tc_act)) {
412 if (tc_act == TC_ACT_OK)
413 act_id = FLOW_ACTION_ACCEPT;
414 else if (tc_act == TC_ACT_SHOT)
415 act_id = FLOW_ACTION_DROP;
416 else if (tc_act == TC_ACT_PIPE)
417 act_id = FLOW_ACTION_PIPE;
418 else if (tc_act == TC_ACT_RECLASSIFY)
419 NL_SET_ERR_MSG_MOD(extack, "Offload not supported when conform/exceed action is \"reclassify\"");
420 else
421 NL_SET_ERR_MSG_MOD(extack, "Unsupported conform/exceed action offload");
422 } else if (TC_ACT_EXT_CMP(tc_act, TC_ACT_GOTO_CHAIN)) {
423 act_id = FLOW_ACTION_GOTO;
424 *extval = tc_act & TC_ACT_EXT_VAL_MASK;
425 } else if (TC_ACT_EXT_CMP(tc_act, TC_ACT_JUMP)) {
426 act_id = FLOW_ACTION_JUMP;
427 *extval = tc_act & TC_ACT_EXT_VAL_MASK;
428 } else if (tc_act == TC_ACT_UNSPEC) {
429 act_id = FLOW_ACTION_CONTINUE;
430 } else {
431 NL_SET_ERR_MSG_MOD(extack, "Unsupported conform/exceed action offload");
432 }
433
434 return act_id;
435 }
436
tcf_police_offload_act_setup(struct tc_action * act,void * entry_data,u32 * index_inc,bool bind,struct netlink_ext_ack * extack)437 static int tcf_police_offload_act_setup(struct tc_action *act, void *entry_data,
438 u32 *index_inc, bool bind,
439 struct netlink_ext_ack *extack)
440 {
441 if (bind) {
442 struct flow_action_entry *entry = entry_data;
443 struct tcf_police *police = to_police(act);
444 struct tcf_police_params *p;
445 int act_id;
446
447 p = rcu_dereference_protected(police->params,
448 lockdep_is_held(&police->tcf_lock));
449
450 entry->id = FLOW_ACTION_POLICE;
451 entry->police.burst = tcf_police_burst(act);
452 entry->police.rate_bytes_ps =
453 tcf_police_rate_bytes_ps(act);
454 entry->police.peakrate_bytes_ps = tcf_police_peakrate_bytes_ps(act);
455 entry->police.avrate = tcf_police_tcfp_ewma_rate(act);
456 entry->police.overhead = tcf_police_rate_overhead(act);
457 entry->police.burst_pkt = tcf_police_burst_pkt(act);
458 entry->police.rate_pkt_ps =
459 tcf_police_rate_pkt_ps(act);
460 entry->police.mtu = tcf_police_tcfp_mtu(act);
461
462 act_id = tcf_police_act_to_flow_act(police->tcf_action,
463 &entry->police.exceed.extval,
464 extack);
465 if (act_id < 0)
466 return act_id;
467
468 entry->police.exceed.act_id = act_id;
469
470 act_id = tcf_police_act_to_flow_act(p->tcfp_result,
471 &entry->police.notexceed.extval,
472 extack);
473 if (act_id < 0)
474 return act_id;
475
476 entry->police.notexceed.act_id = act_id;
477
478 *index_inc = 1;
479 } else {
480 struct flow_offload_action *fl_action = entry_data;
481
482 fl_action->id = FLOW_ACTION_POLICE;
483 }
484
485 return 0;
486 }
487
488 MODULE_AUTHOR("Alexey Kuznetsov");
489 MODULE_DESCRIPTION("Policing actions");
490 MODULE_LICENSE("GPL");
491
492 static struct tc_action_ops act_police_ops = {
493 .kind = "police",
494 .id = TCA_ID_POLICE,
495 .owner = THIS_MODULE,
496 .stats_update = tcf_police_stats_update,
497 .act = tcf_police_act,
498 .dump = tcf_police_dump,
499 .init = tcf_police_init,
500 .cleanup = tcf_police_cleanup,
501 .offload_act_setup = tcf_police_offload_act_setup,
502 .size = sizeof(struct tcf_police),
503 };
504
police_init_net(struct net * net)505 static __net_init int police_init_net(struct net *net)
506 {
507 struct tc_action_net *tn = net_generic(net, act_police_ops.net_id);
508
509 return tc_action_net_init(net, tn, &act_police_ops);
510 }
511
police_exit_net(struct list_head * net_list)512 static void __net_exit police_exit_net(struct list_head *net_list)
513 {
514 tc_action_net_exit(net_list, act_police_ops.net_id);
515 }
516
517 static struct pernet_operations police_net_ops = {
518 .init = police_init_net,
519 .exit_batch = police_exit_net,
520 .id = &act_police_ops.net_id,
521 .size = sizeof(struct tc_action_net),
522 };
523
police_init_module(void)524 static int __init police_init_module(void)
525 {
526 return tcf_register_action(&act_police_ops, &police_net_ops);
527 }
528
police_cleanup_module(void)529 static void __exit police_cleanup_module(void)
530 {
531 tcf_unregister_action(&act_police_ops, &police_net_ops);
532 }
533
534 module_init(police_init_module);
535 module_exit(police_cleanup_module);
536