1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/err.h>
3 #include <linux/igmp.h>
4 #include <linux/kernel.h>
5 #include <linux/netdevice.h>
6 #include <linux/rculist.h>
7 #include <linux/skbuff.h>
8 #include <linux/if_ether.h>
9 #include <net/ip.h>
10 #include <net/netlink.h>
11 #include <net/switchdev.h>
12 #if IS_ENABLED(CONFIG_IPV6)
13 #include <net/ipv6.h>
14 #include <net/addrconf.h>
15 #endif
16
17 #include "br_private.h"
18
19 static bool
br_ip4_rports_get_timer(struct net_bridge_mcast_port * pmctx,unsigned long * timer)20 br_ip4_rports_get_timer(struct net_bridge_mcast_port *pmctx,
21 unsigned long *timer)
22 {
23 *timer = br_timer_value(&pmctx->ip4_mc_router_timer);
24 return !hlist_unhashed(&pmctx->ip4_rlist);
25 }
26
27 static bool
br_ip6_rports_get_timer(struct net_bridge_mcast_port * pmctx,unsigned long * timer)28 br_ip6_rports_get_timer(struct net_bridge_mcast_port *pmctx,
29 unsigned long *timer)
30 {
31 #if IS_ENABLED(CONFIG_IPV6)
32 *timer = br_timer_value(&pmctx->ip6_mc_router_timer);
33 return !hlist_unhashed(&pmctx->ip6_rlist);
34 #else
35 *timer = 0;
36 return false;
37 #endif
38 }
39
__br_rports_one_size(void)40 static size_t __br_rports_one_size(void)
41 {
42 return nla_total_size(sizeof(u32)) + /* MDBA_ROUTER_PORT */
43 nla_total_size(sizeof(u32)) + /* MDBA_ROUTER_PATTR_TIMER */
44 nla_total_size(sizeof(u8)) + /* MDBA_ROUTER_PATTR_TYPE */
45 nla_total_size(sizeof(u32)) + /* MDBA_ROUTER_PATTR_INET_TIMER */
46 nla_total_size(sizeof(u32)) + /* MDBA_ROUTER_PATTR_INET6_TIMER */
47 nla_total_size(sizeof(u32)); /* MDBA_ROUTER_PATTR_VID */
48 }
49
br_rports_size(const struct net_bridge_mcast * brmctx)50 size_t br_rports_size(const struct net_bridge_mcast *brmctx)
51 {
52 struct net_bridge_mcast_port *pmctx;
53 size_t size = nla_total_size(0); /* MDBA_ROUTER */
54
55 rcu_read_lock();
56 hlist_for_each_entry_rcu(pmctx, &brmctx->ip4_mc_router_list,
57 ip4_rlist)
58 size += __br_rports_one_size();
59
60 #if IS_ENABLED(CONFIG_IPV6)
61 hlist_for_each_entry_rcu(pmctx, &brmctx->ip6_mc_router_list,
62 ip6_rlist)
63 size += __br_rports_one_size();
64 #endif
65 rcu_read_unlock();
66
67 return size;
68 }
69
br_rports_fill_info(struct sk_buff * skb,const struct net_bridge_mcast * brmctx)70 int br_rports_fill_info(struct sk_buff *skb,
71 const struct net_bridge_mcast *brmctx)
72 {
73 u16 vid = brmctx->vlan ? brmctx->vlan->vid : 0;
74 bool have_ip4_mc_rtr, have_ip6_mc_rtr;
75 unsigned long ip4_timer, ip6_timer;
76 struct nlattr *nest, *port_nest;
77 struct net_bridge_port *p;
78
79 if (!brmctx->multicast_router || !br_rports_have_mc_router(brmctx))
80 return 0;
81
82 nest = nla_nest_start_noflag(skb, MDBA_ROUTER);
83 if (nest == NULL)
84 return -EMSGSIZE;
85
86 list_for_each_entry_rcu(p, &brmctx->br->port_list, list) {
87 struct net_bridge_mcast_port *pmctx;
88
89 if (vid) {
90 struct net_bridge_vlan *v;
91
92 v = br_vlan_find(nbp_vlan_group(p), vid);
93 if (!v)
94 continue;
95 pmctx = &v->port_mcast_ctx;
96 } else {
97 pmctx = &p->multicast_ctx;
98 }
99
100 have_ip4_mc_rtr = br_ip4_rports_get_timer(pmctx, &ip4_timer);
101 have_ip6_mc_rtr = br_ip6_rports_get_timer(pmctx, &ip6_timer);
102
103 if (!have_ip4_mc_rtr && !have_ip6_mc_rtr)
104 continue;
105
106 port_nest = nla_nest_start_noflag(skb, MDBA_ROUTER_PORT);
107 if (!port_nest)
108 goto fail;
109
110 if (nla_put_nohdr(skb, sizeof(u32), &p->dev->ifindex) ||
111 nla_put_u32(skb, MDBA_ROUTER_PATTR_TIMER,
112 max(ip4_timer, ip6_timer)) ||
113 nla_put_u8(skb, MDBA_ROUTER_PATTR_TYPE,
114 p->multicast_ctx.multicast_router) ||
115 (have_ip4_mc_rtr &&
116 nla_put_u32(skb, MDBA_ROUTER_PATTR_INET_TIMER,
117 ip4_timer)) ||
118 (have_ip6_mc_rtr &&
119 nla_put_u32(skb, MDBA_ROUTER_PATTR_INET6_TIMER,
120 ip6_timer)) ||
121 (vid && nla_put_u16(skb, MDBA_ROUTER_PATTR_VID, vid))) {
122 nla_nest_cancel(skb, port_nest);
123 goto fail;
124 }
125 nla_nest_end(skb, port_nest);
126 }
127
128 nla_nest_end(skb, nest);
129 return 0;
130 fail:
131 nla_nest_cancel(skb, nest);
132 return -EMSGSIZE;
133 }
134
__mdb_entry_fill_flags(struct br_mdb_entry * e,unsigned char flags)135 static void __mdb_entry_fill_flags(struct br_mdb_entry *e, unsigned char flags)
136 {
137 e->state = flags & MDB_PG_FLAGS_PERMANENT;
138 e->flags = 0;
139 if (flags & MDB_PG_FLAGS_OFFLOAD)
140 e->flags |= MDB_FLAGS_OFFLOAD;
141 if (flags & MDB_PG_FLAGS_FAST_LEAVE)
142 e->flags |= MDB_FLAGS_FAST_LEAVE;
143 if (flags & MDB_PG_FLAGS_STAR_EXCL)
144 e->flags |= MDB_FLAGS_STAR_EXCL;
145 if (flags & MDB_PG_FLAGS_BLOCKED)
146 e->flags |= MDB_FLAGS_BLOCKED;
147 }
148
__mdb_entry_to_br_ip(struct br_mdb_entry * entry,struct br_ip * ip,struct nlattr ** mdb_attrs)149 static void __mdb_entry_to_br_ip(struct br_mdb_entry *entry, struct br_ip *ip,
150 struct nlattr **mdb_attrs)
151 {
152 memset(ip, 0, sizeof(struct br_ip));
153 ip->vid = entry->vid;
154 ip->proto = entry->addr.proto;
155 switch (ip->proto) {
156 case htons(ETH_P_IP):
157 ip->dst.ip4 = entry->addr.u.ip4;
158 if (mdb_attrs && mdb_attrs[MDBE_ATTR_SOURCE])
159 ip->src.ip4 = nla_get_in_addr(mdb_attrs[MDBE_ATTR_SOURCE]);
160 break;
161 #if IS_ENABLED(CONFIG_IPV6)
162 case htons(ETH_P_IPV6):
163 ip->dst.ip6 = entry->addr.u.ip6;
164 if (mdb_attrs && mdb_attrs[MDBE_ATTR_SOURCE])
165 ip->src.ip6 = nla_get_in6_addr(mdb_attrs[MDBE_ATTR_SOURCE]);
166 break;
167 #endif
168 default:
169 ether_addr_copy(ip->dst.mac_addr, entry->addr.u.mac_addr);
170 }
171
172 }
173
__mdb_fill_srcs(struct sk_buff * skb,struct net_bridge_port_group * p)174 static int __mdb_fill_srcs(struct sk_buff *skb,
175 struct net_bridge_port_group *p)
176 {
177 struct net_bridge_group_src *ent;
178 struct nlattr *nest, *nest_ent;
179
180 if (hlist_empty(&p->src_list))
181 return 0;
182
183 nest = nla_nest_start(skb, MDBA_MDB_EATTR_SRC_LIST);
184 if (!nest)
185 return -EMSGSIZE;
186
187 hlist_for_each_entry_rcu(ent, &p->src_list, node,
188 lockdep_is_held(&p->key.port->br->multicast_lock)) {
189 nest_ent = nla_nest_start(skb, MDBA_MDB_SRCLIST_ENTRY);
190 if (!nest_ent)
191 goto out_cancel_err;
192 switch (ent->addr.proto) {
193 case htons(ETH_P_IP):
194 if (nla_put_in_addr(skb, MDBA_MDB_SRCATTR_ADDRESS,
195 ent->addr.src.ip4)) {
196 nla_nest_cancel(skb, nest_ent);
197 goto out_cancel_err;
198 }
199 break;
200 #if IS_ENABLED(CONFIG_IPV6)
201 case htons(ETH_P_IPV6):
202 if (nla_put_in6_addr(skb, MDBA_MDB_SRCATTR_ADDRESS,
203 &ent->addr.src.ip6)) {
204 nla_nest_cancel(skb, nest_ent);
205 goto out_cancel_err;
206 }
207 break;
208 #endif
209 default:
210 nla_nest_cancel(skb, nest_ent);
211 continue;
212 }
213 if (nla_put_u32(skb, MDBA_MDB_SRCATTR_TIMER,
214 br_timer_value(&ent->timer))) {
215 nla_nest_cancel(skb, nest_ent);
216 goto out_cancel_err;
217 }
218 nla_nest_end(skb, nest_ent);
219 }
220
221 nla_nest_end(skb, nest);
222
223 return 0;
224
225 out_cancel_err:
226 nla_nest_cancel(skb, nest);
227 return -EMSGSIZE;
228 }
229
__mdb_fill_info(struct sk_buff * skb,struct net_bridge_mdb_entry * mp,struct net_bridge_port_group * p)230 static int __mdb_fill_info(struct sk_buff *skb,
231 struct net_bridge_mdb_entry *mp,
232 struct net_bridge_port_group *p)
233 {
234 bool dump_srcs_mode = false;
235 struct timer_list *mtimer;
236 struct nlattr *nest_ent;
237 struct br_mdb_entry e;
238 u8 flags = 0;
239 int ifindex;
240
241 memset(&e, 0, sizeof(e));
242 if (p) {
243 ifindex = p->key.port->dev->ifindex;
244 mtimer = &p->timer;
245 flags = p->flags;
246 } else {
247 ifindex = mp->br->dev->ifindex;
248 mtimer = &mp->timer;
249 }
250
251 __mdb_entry_fill_flags(&e, flags);
252 e.ifindex = ifindex;
253 e.vid = mp->addr.vid;
254 if (mp->addr.proto == htons(ETH_P_IP))
255 e.addr.u.ip4 = mp->addr.dst.ip4;
256 #if IS_ENABLED(CONFIG_IPV6)
257 else if (mp->addr.proto == htons(ETH_P_IPV6))
258 e.addr.u.ip6 = mp->addr.dst.ip6;
259 #endif
260 else
261 ether_addr_copy(e.addr.u.mac_addr, mp->addr.dst.mac_addr);
262 e.addr.proto = mp->addr.proto;
263 nest_ent = nla_nest_start_noflag(skb,
264 MDBA_MDB_ENTRY_INFO);
265 if (!nest_ent)
266 return -EMSGSIZE;
267
268 if (nla_put_nohdr(skb, sizeof(e), &e) ||
269 nla_put_u32(skb,
270 MDBA_MDB_EATTR_TIMER,
271 br_timer_value(mtimer)))
272 goto nest_err;
273
274 switch (mp->addr.proto) {
275 case htons(ETH_P_IP):
276 dump_srcs_mode = !!(mp->br->multicast_ctx.multicast_igmp_version == 3);
277 if (mp->addr.src.ip4) {
278 if (nla_put_in_addr(skb, MDBA_MDB_EATTR_SOURCE,
279 mp->addr.src.ip4))
280 goto nest_err;
281 break;
282 }
283 break;
284 #if IS_ENABLED(CONFIG_IPV6)
285 case htons(ETH_P_IPV6):
286 dump_srcs_mode = !!(mp->br->multicast_ctx.multicast_mld_version == 2);
287 if (!ipv6_addr_any(&mp->addr.src.ip6)) {
288 if (nla_put_in6_addr(skb, MDBA_MDB_EATTR_SOURCE,
289 &mp->addr.src.ip6))
290 goto nest_err;
291 break;
292 }
293 break;
294 #endif
295 default:
296 ether_addr_copy(e.addr.u.mac_addr, mp->addr.dst.mac_addr);
297 }
298 if (p) {
299 if (nla_put_u8(skb, MDBA_MDB_EATTR_RTPROT, p->rt_protocol))
300 goto nest_err;
301 if (dump_srcs_mode &&
302 (__mdb_fill_srcs(skb, p) ||
303 nla_put_u8(skb, MDBA_MDB_EATTR_GROUP_MODE,
304 p->filter_mode)))
305 goto nest_err;
306 }
307 nla_nest_end(skb, nest_ent);
308
309 return 0;
310
311 nest_err:
312 nla_nest_cancel(skb, nest_ent);
313 return -EMSGSIZE;
314 }
315
br_mdb_fill_info(struct sk_buff * skb,struct netlink_callback * cb,struct net_device * dev)316 static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
317 struct net_device *dev)
318 {
319 int idx = 0, s_idx = cb->args[1], err = 0, pidx = 0, s_pidx = cb->args[2];
320 struct net_bridge *br = netdev_priv(dev);
321 struct net_bridge_mdb_entry *mp;
322 struct nlattr *nest, *nest2;
323
324 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
325 return 0;
326
327 nest = nla_nest_start_noflag(skb, MDBA_MDB);
328 if (nest == NULL)
329 return -EMSGSIZE;
330
331 hlist_for_each_entry_rcu(mp, &br->mdb_list, mdb_node) {
332 struct net_bridge_port_group *p;
333 struct net_bridge_port_group __rcu **pp;
334
335 if (idx < s_idx)
336 goto skip;
337
338 nest2 = nla_nest_start_noflag(skb, MDBA_MDB_ENTRY);
339 if (!nest2) {
340 err = -EMSGSIZE;
341 break;
342 }
343
344 if (!s_pidx && mp->host_joined) {
345 err = __mdb_fill_info(skb, mp, NULL);
346 if (err) {
347 nla_nest_cancel(skb, nest2);
348 break;
349 }
350 }
351
352 for (pp = &mp->ports; (p = rcu_dereference(*pp)) != NULL;
353 pp = &p->next) {
354 if (!p->key.port)
355 continue;
356 if (pidx < s_pidx)
357 goto skip_pg;
358
359 err = __mdb_fill_info(skb, mp, p);
360 if (err) {
361 nla_nest_end(skb, nest2);
362 goto out;
363 }
364 skip_pg:
365 pidx++;
366 }
367 pidx = 0;
368 s_pidx = 0;
369 nla_nest_end(skb, nest2);
370 skip:
371 idx++;
372 }
373
374 out:
375 cb->args[1] = idx;
376 cb->args[2] = pidx;
377 nla_nest_end(skb, nest);
378 return err;
379 }
380
br_mdb_valid_dump_req(const struct nlmsghdr * nlh,struct netlink_ext_ack * extack)381 static int br_mdb_valid_dump_req(const struct nlmsghdr *nlh,
382 struct netlink_ext_ack *extack)
383 {
384 struct br_port_msg *bpm;
385
386 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*bpm))) {
387 NL_SET_ERR_MSG_MOD(extack, "Invalid header for mdb dump request");
388 return -EINVAL;
389 }
390
391 bpm = nlmsg_data(nlh);
392 if (bpm->ifindex) {
393 NL_SET_ERR_MSG_MOD(extack, "Filtering by device index is not supported for mdb dump request");
394 return -EINVAL;
395 }
396 if (nlmsg_attrlen(nlh, sizeof(*bpm))) {
397 NL_SET_ERR_MSG(extack, "Invalid data after header in mdb dump request");
398 return -EINVAL;
399 }
400
401 return 0;
402 }
403
br_mdb_dump(struct sk_buff * skb,struct netlink_callback * cb)404 static int br_mdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
405 {
406 struct net_device *dev;
407 struct net *net = sock_net(skb->sk);
408 struct nlmsghdr *nlh = NULL;
409 int idx = 0, s_idx;
410
411 if (cb->strict_check) {
412 int err = br_mdb_valid_dump_req(cb->nlh, cb->extack);
413
414 if (err < 0)
415 return err;
416 }
417
418 s_idx = cb->args[0];
419
420 rcu_read_lock();
421
422 cb->seq = net->dev_base_seq;
423
424 for_each_netdev_rcu(net, dev) {
425 if (netif_is_bridge_master(dev)) {
426 struct net_bridge *br = netdev_priv(dev);
427 struct br_port_msg *bpm;
428
429 if (idx < s_idx)
430 goto skip;
431
432 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid,
433 cb->nlh->nlmsg_seq, RTM_GETMDB,
434 sizeof(*bpm), NLM_F_MULTI);
435 if (nlh == NULL)
436 break;
437
438 bpm = nlmsg_data(nlh);
439 memset(bpm, 0, sizeof(*bpm));
440 bpm->ifindex = dev->ifindex;
441 if (br_mdb_fill_info(skb, cb, dev) < 0)
442 goto out;
443 if (br_rports_fill_info(skb, &br->multicast_ctx) < 0)
444 goto out;
445
446 cb->args[1] = 0;
447 nlmsg_end(skb, nlh);
448 skip:
449 idx++;
450 }
451 }
452
453 out:
454 if (nlh)
455 nlmsg_end(skb, nlh);
456 rcu_read_unlock();
457 cb->args[0] = idx;
458 return skb->len;
459 }
460
nlmsg_populate_mdb_fill(struct sk_buff * skb,struct net_device * dev,struct net_bridge_mdb_entry * mp,struct net_bridge_port_group * pg,int type)461 static int nlmsg_populate_mdb_fill(struct sk_buff *skb,
462 struct net_device *dev,
463 struct net_bridge_mdb_entry *mp,
464 struct net_bridge_port_group *pg,
465 int type)
466 {
467 struct nlmsghdr *nlh;
468 struct br_port_msg *bpm;
469 struct nlattr *nest, *nest2;
470
471 nlh = nlmsg_put(skb, 0, 0, type, sizeof(*bpm), 0);
472 if (!nlh)
473 return -EMSGSIZE;
474
475 bpm = nlmsg_data(nlh);
476 memset(bpm, 0, sizeof(*bpm));
477 bpm->family = AF_BRIDGE;
478 bpm->ifindex = dev->ifindex;
479 nest = nla_nest_start_noflag(skb, MDBA_MDB);
480 if (nest == NULL)
481 goto cancel;
482 nest2 = nla_nest_start_noflag(skb, MDBA_MDB_ENTRY);
483 if (nest2 == NULL)
484 goto end;
485
486 if (__mdb_fill_info(skb, mp, pg))
487 goto end;
488
489 nla_nest_end(skb, nest2);
490 nla_nest_end(skb, nest);
491 nlmsg_end(skb, nlh);
492 return 0;
493
494 end:
495 nla_nest_end(skb, nest);
496 cancel:
497 nlmsg_cancel(skb, nlh);
498 return -EMSGSIZE;
499 }
500
rtnl_mdb_nlmsg_size(struct net_bridge_port_group * pg)501 static size_t rtnl_mdb_nlmsg_size(struct net_bridge_port_group *pg)
502 {
503 size_t nlmsg_size = NLMSG_ALIGN(sizeof(struct br_port_msg)) +
504 nla_total_size(sizeof(struct br_mdb_entry)) +
505 nla_total_size(sizeof(u32));
506 struct net_bridge_group_src *ent;
507 size_t addr_size = 0;
508
509 if (!pg)
510 goto out;
511
512 /* MDBA_MDB_EATTR_RTPROT */
513 nlmsg_size += nla_total_size(sizeof(u8));
514
515 switch (pg->key.addr.proto) {
516 case htons(ETH_P_IP):
517 /* MDBA_MDB_EATTR_SOURCE */
518 if (pg->key.addr.src.ip4)
519 nlmsg_size += nla_total_size(sizeof(__be32));
520 if (pg->key.port->br->multicast_ctx.multicast_igmp_version == 2)
521 goto out;
522 addr_size = sizeof(__be32);
523 break;
524 #if IS_ENABLED(CONFIG_IPV6)
525 case htons(ETH_P_IPV6):
526 /* MDBA_MDB_EATTR_SOURCE */
527 if (!ipv6_addr_any(&pg->key.addr.src.ip6))
528 nlmsg_size += nla_total_size(sizeof(struct in6_addr));
529 if (pg->key.port->br->multicast_ctx.multicast_mld_version == 1)
530 goto out;
531 addr_size = sizeof(struct in6_addr);
532 break;
533 #endif
534 }
535
536 /* MDBA_MDB_EATTR_GROUP_MODE */
537 nlmsg_size += nla_total_size(sizeof(u8));
538
539 /* MDBA_MDB_EATTR_SRC_LIST nested attr */
540 if (!hlist_empty(&pg->src_list))
541 nlmsg_size += nla_total_size(0);
542
543 hlist_for_each_entry(ent, &pg->src_list, node) {
544 /* MDBA_MDB_SRCLIST_ENTRY nested attr +
545 * MDBA_MDB_SRCATTR_ADDRESS + MDBA_MDB_SRCATTR_TIMER
546 */
547 nlmsg_size += nla_total_size(0) +
548 nla_total_size(addr_size) +
549 nla_total_size(sizeof(u32));
550 }
551 out:
552 return nlmsg_size;
553 }
554
br_mdb_notify(struct net_device * dev,struct net_bridge_mdb_entry * mp,struct net_bridge_port_group * pg,int type)555 void br_mdb_notify(struct net_device *dev,
556 struct net_bridge_mdb_entry *mp,
557 struct net_bridge_port_group *pg,
558 int type)
559 {
560 struct net *net = dev_net(dev);
561 struct sk_buff *skb;
562 int err = -ENOBUFS;
563
564 br_switchdev_mdb_notify(dev, mp, pg, type);
565
566 skb = nlmsg_new(rtnl_mdb_nlmsg_size(pg), GFP_ATOMIC);
567 if (!skb)
568 goto errout;
569
570 err = nlmsg_populate_mdb_fill(skb, dev, mp, pg, type);
571 if (err < 0) {
572 kfree_skb(skb);
573 goto errout;
574 }
575
576 rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC);
577 return;
578 errout:
579 rtnl_set_sk_err(net, RTNLGRP_MDB, err);
580 }
581
nlmsg_populate_rtr_fill(struct sk_buff * skb,struct net_device * dev,int ifindex,u16 vid,u32 pid,u32 seq,int type,unsigned int flags)582 static int nlmsg_populate_rtr_fill(struct sk_buff *skb,
583 struct net_device *dev,
584 int ifindex, u16 vid, u32 pid,
585 u32 seq, int type, unsigned int flags)
586 {
587 struct nlattr *nest, *port_nest;
588 struct br_port_msg *bpm;
589 struct nlmsghdr *nlh;
590
591 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), 0);
592 if (!nlh)
593 return -EMSGSIZE;
594
595 bpm = nlmsg_data(nlh);
596 memset(bpm, 0, sizeof(*bpm));
597 bpm->family = AF_BRIDGE;
598 bpm->ifindex = dev->ifindex;
599 nest = nla_nest_start_noflag(skb, MDBA_ROUTER);
600 if (!nest)
601 goto cancel;
602
603 port_nest = nla_nest_start_noflag(skb, MDBA_ROUTER_PORT);
604 if (!port_nest)
605 goto end;
606 if (nla_put_nohdr(skb, sizeof(u32), &ifindex)) {
607 nla_nest_cancel(skb, port_nest);
608 goto end;
609 }
610 if (vid && nla_put_u16(skb, MDBA_ROUTER_PATTR_VID, vid)) {
611 nla_nest_cancel(skb, port_nest);
612 goto end;
613 }
614 nla_nest_end(skb, port_nest);
615
616 nla_nest_end(skb, nest);
617 nlmsg_end(skb, nlh);
618 return 0;
619
620 end:
621 nla_nest_end(skb, nest);
622 cancel:
623 nlmsg_cancel(skb, nlh);
624 return -EMSGSIZE;
625 }
626
rtnl_rtr_nlmsg_size(void)627 static inline size_t rtnl_rtr_nlmsg_size(void)
628 {
629 return NLMSG_ALIGN(sizeof(struct br_port_msg))
630 + nla_total_size(sizeof(__u32))
631 + nla_total_size(sizeof(u16));
632 }
633
br_rtr_notify(struct net_device * dev,struct net_bridge_mcast_port * pmctx,int type)634 void br_rtr_notify(struct net_device *dev, struct net_bridge_mcast_port *pmctx,
635 int type)
636 {
637 struct net *net = dev_net(dev);
638 struct sk_buff *skb;
639 int err = -ENOBUFS;
640 int ifindex;
641 u16 vid;
642
643 ifindex = pmctx ? pmctx->port->dev->ifindex : 0;
644 vid = pmctx && br_multicast_port_ctx_is_vlan(pmctx) ? pmctx->vlan->vid :
645 0;
646 skb = nlmsg_new(rtnl_rtr_nlmsg_size(), GFP_ATOMIC);
647 if (!skb)
648 goto errout;
649
650 err = nlmsg_populate_rtr_fill(skb, dev, ifindex, vid, 0, 0, type,
651 NTF_SELF);
652 if (err < 0) {
653 kfree_skb(skb);
654 goto errout;
655 }
656
657 rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC);
658 return;
659
660 errout:
661 rtnl_set_sk_err(net, RTNLGRP_MDB, err);
662 }
663
is_valid_mdb_entry(struct br_mdb_entry * entry,struct netlink_ext_ack * extack)664 static bool is_valid_mdb_entry(struct br_mdb_entry *entry,
665 struct netlink_ext_ack *extack)
666 {
667 if (entry->ifindex == 0) {
668 NL_SET_ERR_MSG_MOD(extack, "Zero entry ifindex is not allowed");
669 return false;
670 }
671
672 if (entry->addr.proto == htons(ETH_P_IP)) {
673 if (!ipv4_is_multicast(entry->addr.u.ip4)) {
674 NL_SET_ERR_MSG_MOD(extack, "IPv4 entry group address is not multicast");
675 return false;
676 }
677 if (ipv4_is_local_multicast(entry->addr.u.ip4)) {
678 NL_SET_ERR_MSG_MOD(extack, "IPv4 entry group address is local multicast");
679 return false;
680 }
681 #if IS_ENABLED(CONFIG_IPV6)
682 } else if (entry->addr.proto == htons(ETH_P_IPV6)) {
683 if (ipv6_addr_is_ll_all_nodes(&entry->addr.u.ip6)) {
684 NL_SET_ERR_MSG_MOD(extack, "IPv6 entry group address is link-local all nodes");
685 return false;
686 }
687 #endif
688 } else if (entry->addr.proto == 0) {
689 /* L2 mdb */
690 if (!is_multicast_ether_addr(entry->addr.u.mac_addr)) {
691 NL_SET_ERR_MSG_MOD(extack, "L2 entry group is not multicast");
692 return false;
693 }
694 } else {
695 NL_SET_ERR_MSG_MOD(extack, "Unknown entry protocol");
696 return false;
697 }
698
699 if (entry->state != MDB_PERMANENT && entry->state != MDB_TEMPORARY) {
700 NL_SET_ERR_MSG_MOD(extack, "Unknown entry state");
701 return false;
702 }
703 if (entry->vid >= VLAN_VID_MASK) {
704 NL_SET_ERR_MSG_MOD(extack, "Invalid entry VLAN id");
705 return false;
706 }
707
708 return true;
709 }
710
is_valid_mdb_source(struct nlattr * attr,__be16 proto,struct netlink_ext_ack * extack)711 static bool is_valid_mdb_source(struct nlattr *attr, __be16 proto,
712 struct netlink_ext_ack *extack)
713 {
714 switch (proto) {
715 case htons(ETH_P_IP):
716 if (nla_len(attr) != sizeof(struct in_addr)) {
717 NL_SET_ERR_MSG_MOD(extack, "IPv4 invalid source address length");
718 return false;
719 }
720 if (ipv4_is_multicast(nla_get_in_addr(attr))) {
721 NL_SET_ERR_MSG_MOD(extack, "IPv4 multicast source address is not allowed");
722 return false;
723 }
724 break;
725 #if IS_ENABLED(CONFIG_IPV6)
726 case htons(ETH_P_IPV6): {
727 struct in6_addr src;
728
729 if (nla_len(attr) != sizeof(struct in6_addr)) {
730 NL_SET_ERR_MSG_MOD(extack, "IPv6 invalid source address length");
731 return false;
732 }
733 src = nla_get_in6_addr(attr);
734 if (ipv6_addr_is_multicast(&src)) {
735 NL_SET_ERR_MSG_MOD(extack, "IPv6 multicast source address is not allowed");
736 return false;
737 }
738 break;
739 }
740 #endif
741 default:
742 NL_SET_ERR_MSG_MOD(extack, "Invalid protocol used with source address");
743 return false;
744 }
745
746 return true;
747 }
748
749 static const struct nla_policy br_mdbe_attrs_pol[MDBE_ATTR_MAX + 1] = {
750 [MDBE_ATTR_SOURCE] = NLA_POLICY_RANGE(NLA_BINARY,
751 sizeof(struct in_addr),
752 sizeof(struct in6_addr)),
753 };
754
br_mdb_parse(struct sk_buff * skb,struct nlmsghdr * nlh,struct net_device ** pdev,struct br_mdb_entry ** pentry,struct nlattr ** mdb_attrs,struct netlink_ext_ack * extack)755 static int br_mdb_parse(struct sk_buff *skb, struct nlmsghdr *nlh,
756 struct net_device **pdev, struct br_mdb_entry **pentry,
757 struct nlattr **mdb_attrs, struct netlink_ext_ack *extack)
758 {
759 struct net *net = sock_net(skb->sk);
760 struct br_mdb_entry *entry;
761 struct br_port_msg *bpm;
762 struct nlattr *tb[MDBA_SET_ENTRY_MAX+1];
763 struct net_device *dev;
764 int err;
765
766 err = nlmsg_parse_deprecated(nlh, sizeof(*bpm), tb,
767 MDBA_SET_ENTRY_MAX, NULL, NULL);
768 if (err < 0)
769 return err;
770
771 bpm = nlmsg_data(nlh);
772 if (bpm->ifindex == 0) {
773 NL_SET_ERR_MSG_MOD(extack, "Invalid bridge ifindex");
774 return -EINVAL;
775 }
776
777 dev = __dev_get_by_index(net, bpm->ifindex);
778 if (dev == NULL) {
779 NL_SET_ERR_MSG_MOD(extack, "Bridge device doesn't exist");
780 return -ENODEV;
781 }
782
783 if (!netif_is_bridge_master(dev)) {
784 NL_SET_ERR_MSG_MOD(extack, "Device is not a bridge");
785 return -EOPNOTSUPP;
786 }
787
788 *pdev = dev;
789
790 if (!tb[MDBA_SET_ENTRY]) {
791 NL_SET_ERR_MSG_MOD(extack, "Missing MDBA_SET_ENTRY attribute");
792 return -EINVAL;
793 }
794 if (nla_len(tb[MDBA_SET_ENTRY]) != sizeof(struct br_mdb_entry)) {
795 NL_SET_ERR_MSG_MOD(extack, "Invalid MDBA_SET_ENTRY attribute length");
796 return -EINVAL;
797 }
798
799 entry = nla_data(tb[MDBA_SET_ENTRY]);
800 if (!is_valid_mdb_entry(entry, extack))
801 return -EINVAL;
802 *pentry = entry;
803
804 if (tb[MDBA_SET_ENTRY_ATTRS]) {
805 err = nla_parse_nested(mdb_attrs, MDBE_ATTR_MAX,
806 tb[MDBA_SET_ENTRY_ATTRS],
807 br_mdbe_attrs_pol, extack);
808 if (err)
809 return err;
810 if (mdb_attrs[MDBE_ATTR_SOURCE] &&
811 !is_valid_mdb_source(mdb_attrs[MDBE_ATTR_SOURCE],
812 entry->addr.proto, extack))
813 return -EINVAL;
814 } else {
815 memset(mdb_attrs, 0,
816 sizeof(struct nlattr *) * (MDBE_ATTR_MAX + 1));
817 }
818
819 return 0;
820 }
821
822 static struct net_bridge_mcast *
__br_mdb_choose_context(struct net_bridge * br,const struct br_mdb_entry * entry,struct netlink_ext_ack * extack)823 __br_mdb_choose_context(struct net_bridge *br,
824 const struct br_mdb_entry *entry,
825 struct netlink_ext_ack *extack)
826 {
827 struct net_bridge_mcast *brmctx = NULL;
828 struct net_bridge_vlan *v;
829
830 if (!br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED)) {
831 brmctx = &br->multicast_ctx;
832 goto out;
833 }
834
835 if (!entry->vid) {
836 NL_SET_ERR_MSG_MOD(extack, "Cannot add an entry without a vlan when vlan snooping is enabled");
837 goto out;
838 }
839
840 v = br_vlan_find(br_vlan_group(br), entry->vid);
841 if (!v) {
842 NL_SET_ERR_MSG_MOD(extack, "Vlan is not configured");
843 goto out;
844 }
845 if (br_multicast_ctx_vlan_global_disabled(&v->br_mcast_ctx)) {
846 NL_SET_ERR_MSG_MOD(extack, "Vlan's multicast processing is disabled");
847 goto out;
848 }
849 brmctx = &v->br_mcast_ctx;
850 out:
851 return brmctx;
852 }
853
br_mdb_add_group(struct net_bridge * br,struct net_bridge_port * port,struct br_mdb_entry * entry,struct nlattr ** mdb_attrs,struct netlink_ext_ack * extack)854 static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
855 struct br_mdb_entry *entry,
856 struct nlattr **mdb_attrs,
857 struct netlink_ext_ack *extack)
858 {
859 struct net_bridge_mdb_entry *mp, *star_mp;
860 struct net_bridge_port_group __rcu **pp;
861 struct net_bridge_port_group *p;
862 struct net_bridge_mcast *brmctx;
863 struct br_ip group, star_group;
864 unsigned long now = jiffies;
865 unsigned char flags = 0;
866 u8 filter_mode;
867 int err;
868
869 __mdb_entry_to_br_ip(entry, &group, mdb_attrs);
870
871 brmctx = __br_mdb_choose_context(br, entry, extack);
872 if (!brmctx)
873 return -EINVAL;
874
875 /* host join errors which can happen before creating the group */
876 if (!port) {
877 /* don't allow any flags for host-joined groups */
878 if (entry->state) {
879 NL_SET_ERR_MSG_MOD(extack, "Flags are not allowed for host groups");
880 return -EINVAL;
881 }
882 if (!br_multicast_is_star_g(&group)) {
883 NL_SET_ERR_MSG_MOD(extack, "Groups with sources cannot be manually host joined");
884 return -EINVAL;
885 }
886 }
887
888 if (br_group_is_l2(&group) && entry->state != MDB_PERMANENT) {
889 NL_SET_ERR_MSG_MOD(extack, "Only permanent L2 entries allowed");
890 return -EINVAL;
891 }
892
893 mp = br_mdb_ip_get(br, &group);
894 if (!mp) {
895 mp = br_multicast_new_group(br, &group);
896 err = PTR_ERR_OR_ZERO(mp);
897 if (err)
898 return err;
899 }
900
901 /* host join */
902 if (!port) {
903 if (mp->host_joined) {
904 NL_SET_ERR_MSG_MOD(extack, "Group is already joined by host");
905 return -EEXIST;
906 }
907
908 br_multicast_host_join(brmctx, mp, false);
909 br_mdb_notify(br->dev, mp, NULL, RTM_NEWMDB);
910
911 return 0;
912 }
913
914 for (pp = &mp->ports;
915 (p = mlock_dereference(*pp, br)) != NULL;
916 pp = &p->next) {
917 if (p->key.port == port) {
918 NL_SET_ERR_MSG_MOD(extack, "Group is already joined by port");
919 return -EEXIST;
920 }
921 if ((unsigned long)p->key.port < (unsigned long)port)
922 break;
923 }
924
925 filter_mode = br_multicast_is_star_g(&group) ? MCAST_EXCLUDE :
926 MCAST_INCLUDE;
927
928 if (entry->state == MDB_PERMANENT)
929 flags |= MDB_PG_FLAGS_PERMANENT;
930
931 p = br_multicast_new_port_group(port, &group, *pp, flags, NULL,
932 filter_mode, RTPROT_STATIC);
933 if (unlikely(!p)) {
934 NL_SET_ERR_MSG_MOD(extack, "Couldn't allocate new port group");
935 return -ENOMEM;
936 }
937 rcu_assign_pointer(*pp, p);
938 if (entry->state == MDB_TEMPORARY)
939 mod_timer(&p->timer,
940 now + brmctx->multicast_membership_interval);
941 br_mdb_notify(br->dev, mp, p, RTM_NEWMDB);
942 /* if we are adding a new EXCLUDE port group (*,G) it needs to be also
943 * added to all S,G entries for proper replication, if we are adding
944 * a new INCLUDE port (S,G) then all of *,G EXCLUDE ports need to be
945 * added to it for proper replication
946 */
947 if (br_multicast_should_handle_mode(brmctx, group.proto)) {
948 switch (filter_mode) {
949 case MCAST_EXCLUDE:
950 br_multicast_star_g_handle_mode(p, MCAST_EXCLUDE);
951 break;
952 case MCAST_INCLUDE:
953 star_group = p->key.addr;
954 memset(&star_group.src, 0, sizeof(star_group.src));
955 star_mp = br_mdb_ip_get(br, &star_group);
956 if (star_mp)
957 br_multicast_sg_add_exclude_ports(star_mp, p);
958 break;
959 }
960 }
961
962 return 0;
963 }
964
__br_mdb_add(struct net * net,struct net_bridge * br,struct net_bridge_port * p,struct br_mdb_entry * entry,struct nlattr ** mdb_attrs,struct netlink_ext_ack * extack)965 static int __br_mdb_add(struct net *net, struct net_bridge *br,
966 struct net_bridge_port *p,
967 struct br_mdb_entry *entry,
968 struct nlattr **mdb_attrs,
969 struct netlink_ext_ack *extack)
970 {
971 int ret;
972
973 spin_lock_bh(&br->multicast_lock);
974 ret = br_mdb_add_group(br, p, entry, mdb_attrs, extack);
975 spin_unlock_bh(&br->multicast_lock);
976
977 return ret;
978 }
979
br_mdb_add(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)980 static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh,
981 struct netlink_ext_ack *extack)
982 {
983 struct nlattr *mdb_attrs[MDBE_ATTR_MAX + 1];
984 struct net *net = sock_net(skb->sk);
985 struct net_bridge_vlan_group *vg;
986 struct net_bridge_port *p = NULL;
987 struct net_device *dev, *pdev;
988 struct br_mdb_entry *entry;
989 struct net_bridge_vlan *v;
990 struct net_bridge *br;
991 int err;
992
993 err = br_mdb_parse(skb, nlh, &dev, &entry, mdb_attrs, extack);
994 if (err < 0)
995 return err;
996
997 br = netdev_priv(dev);
998
999 if (!netif_running(br->dev)) {
1000 NL_SET_ERR_MSG_MOD(extack, "Bridge device is not running");
1001 return -EINVAL;
1002 }
1003
1004 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) {
1005 NL_SET_ERR_MSG_MOD(extack, "Bridge's multicast processing is disabled");
1006 return -EINVAL;
1007 }
1008
1009 if (entry->ifindex != br->dev->ifindex) {
1010 pdev = __dev_get_by_index(net, entry->ifindex);
1011 if (!pdev) {
1012 NL_SET_ERR_MSG_MOD(extack, "Port net device doesn't exist");
1013 return -ENODEV;
1014 }
1015
1016 p = br_port_get_rtnl(pdev);
1017 if (!p) {
1018 NL_SET_ERR_MSG_MOD(extack, "Net device is not a bridge port");
1019 return -EINVAL;
1020 }
1021
1022 if (p->br != br) {
1023 NL_SET_ERR_MSG_MOD(extack, "Port belongs to a different bridge device");
1024 return -EINVAL;
1025 }
1026 if (p->state == BR_STATE_DISABLED) {
1027 NL_SET_ERR_MSG_MOD(extack, "Port is in disabled state");
1028 return -EINVAL;
1029 }
1030 vg = nbp_vlan_group(p);
1031 } else {
1032 vg = br_vlan_group(br);
1033 }
1034
1035 /* If vlan filtering is enabled and VLAN is not specified
1036 * install mdb entry on all vlans configured on the port.
1037 */
1038 if (br_vlan_enabled(br->dev) && vg && entry->vid == 0) {
1039 list_for_each_entry(v, &vg->vlan_list, vlist) {
1040 entry->vid = v->vid;
1041 err = __br_mdb_add(net, br, p, entry, mdb_attrs, extack);
1042 if (err)
1043 break;
1044 }
1045 } else {
1046 err = __br_mdb_add(net, br, p, entry, mdb_attrs, extack);
1047 }
1048
1049 return err;
1050 }
1051
__br_mdb_del(struct net_bridge * br,struct br_mdb_entry * entry,struct nlattr ** mdb_attrs)1052 static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry,
1053 struct nlattr **mdb_attrs)
1054 {
1055 struct net_bridge_mdb_entry *mp;
1056 struct net_bridge_port_group *p;
1057 struct net_bridge_port_group __rcu **pp;
1058 struct br_ip ip;
1059 int err = -EINVAL;
1060
1061 if (!netif_running(br->dev) || !br_opt_get(br, BROPT_MULTICAST_ENABLED))
1062 return -EINVAL;
1063
1064 __mdb_entry_to_br_ip(entry, &ip, mdb_attrs);
1065
1066 spin_lock_bh(&br->multicast_lock);
1067 mp = br_mdb_ip_get(br, &ip);
1068 if (!mp)
1069 goto unlock;
1070
1071 /* host leave */
1072 if (entry->ifindex == mp->br->dev->ifindex && mp->host_joined) {
1073 br_multicast_host_leave(mp, false);
1074 err = 0;
1075 br_mdb_notify(br->dev, mp, NULL, RTM_DELMDB);
1076 if (!mp->ports && netif_running(br->dev))
1077 mod_timer(&mp->timer, jiffies);
1078 goto unlock;
1079 }
1080
1081 for (pp = &mp->ports;
1082 (p = mlock_dereference(*pp, br)) != NULL;
1083 pp = &p->next) {
1084 if (!p->key.port || p->key.port->dev->ifindex != entry->ifindex)
1085 continue;
1086
1087 if (p->key.port->state == BR_STATE_DISABLED)
1088 goto unlock;
1089
1090 br_multicast_del_pg(mp, p, pp);
1091 err = 0;
1092 break;
1093 }
1094
1095 unlock:
1096 spin_unlock_bh(&br->multicast_lock);
1097 return err;
1098 }
1099
br_mdb_del(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)1100 static int br_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
1101 struct netlink_ext_ack *extack)
1102 {
1103 struct nlattr *mdb_attrs[MDBE_ATTR_MAX + 1];
1104 struct net *net = sock_net(skb->sk);
1105 struct net_bridge_vlan_group *vg;
1106 struct net_bridge_port *p = NULL;
1107 struct net_device *dev, *pdev;
1108 struct br_mdb_entry *entry;
1109 struct net_bridge_vlan *v;
1110 struct net_bridge *br;
1111 int err;
1112
1113 err = br_mdb_parse(skb, nlh, &dev, &entry, mdb_attrs, extack);
1114 if (err < 0)
1115 return err;
1116
1117 br = netdev_priv(dev);
1118
1119 if (entry->ifindex != br->dev->ifindex) {
1120 pdev = __dev_get_by_index(net, entry->ifindex);
1121 if (!pdev)
1122 return -ENODEV;
1123
1124 p = br_port_get_rtnl(pdev);
1125 if (!p || p->br != br || p->state == BR_STATE_DISABLED)
1126 return -EINVAL;
1127 vg = nbp_vlan_group(p);
1128 } else {
1129 vg = br_vlan_group(br);
1130 }
1131
1132 /* If vlan filtering is enabled and VLAN is not specified
1133 * delete mdb entry on all vlans configured on the port.
1134 */
1135 if (br_vlan_enabled(br->dev) && vg && entry->vid == 0) {
1136 list_for_each_entry(v, &vg->vlan_list, vlist) {
1137 entry->vid = v->vid;
1138 err = __br_mdb_del(br, entry, mdb_attrs);
1139 }
1140 } else {
1141 err = __br_mdb_del(br, entry, mdb_attrs);
1142 }
1143
1144 return err;
1145 }
1146
br_mdb_init(void)1147 void br_mdb_init(void)
1148 {
1149 rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_GETMDB, NULL, br_mdb_dump, 0);
1150 rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_NEWMDB, br_mdb_add, NULL, 0);
1151 rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_DELMDB, br_mdb_del, NULL, 0);
1152 }
1153
br_mdb_uninit(void)1154 void br_mdb_uninit(void)
1155 {
1156 rtnl_unregister(PF_BRIDGE, RTM_GETMDB);
1157 rtnl_unregister(PF_BRIDGE, RTM_NEWMDB);
1158 rtnl_unregister(PF_BRIDGE, RTM_DELMDB);
1159 }
1160