1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * net/dsa/dsa2.c - Hardware switch handling, binding version 2
4 * Copyright (c) 2008-2009 Marvell Semiconductor
5 * Copyright (c) 2013 Florian Fainelli <florian@openwrt.org>
6 * Copyright (c) 2016 Andrew Lunn <andrew@lunn.ch>
7 */
8
9 #include <linux/device.h>
10 #include <linux/err.h>
11 #include <linux/list.h>
12 #include <linux/netdevice.h>
13 #include <linux/slab.h>
14 #include <linux/rtnetlink.h>
15 #include <linux/of.h>
16 #include <linux/of_net.h>
17 #include <net/devlink.h>
18
19 #include "dsa_priv.h"
20
21 static DEFINE_MUTEX(dsa2_mutex);
22 LIST_HEAD(dsa_tree_list);
23
24 /* Track the bridges with forwarding offload enabled */
25 static unsigned long dsa_fwd_offloading_bridges;
26
27 /**
28 * dsa_tree_notify - Execute code for all switches in a DSA switch tree.
29 * @dst: collection of struct dsa_switch devices to notify.
30 * @e: event, must be of type DSA_NOTIFIER_*
31 * @v: event-specific value.
32 *
33 * Given a struct dsa_switch_tree, this can be used to run a function once for
34 * each member DSA switch. The other alternative of traversing the tree is only
35 * through its ports list, which does not uniquely list the switches.
36 */
dsa_tree_notify(struct dsa_switch_tree * dst,unsigned long e,void * v)37 int dsa_tree_notify(struct dsa_switch_tree *dst, unsigned long e, void *v)
38 {
39 struct raw_notifier_head *nh = &dst->nh;
40 int err;
41
42 err = raw_notifier_call_chain(nh, e, v);
43
44 return notifier_to_errno(err);
45 }
46
47 /**
48 * dsa_broadcast - Notify all DSA trees in the system.
49 * @e: event, must be of type DSA_NOTIFIER_*
50 * @v: event-specific value.
51 *
52 * Can be used to notify the switching fabric of events such as cross-chip
53 * bridging between disjoint trees (such as islands of tagger-compatible
54 * switches bridged by an incompatible middle switch).
55 *
56 * WARNING: this function is not reliable during probe time, because probing
57 * between trees is asynchronous and not all DSA trees might have probed.
58 */
dsa_broadcast(unsigned long e,void * v)59 int dsa_broadcast(unsigned long e, void *v)
60 {
61 struct dsa_switch_tree *dst;
62 int err = 0;
63
64 list_for_each_entry(dst, &dsa_tree_list, list) {
65 err = dsa_tree_notify(dst, e, v);
66 if (err)
67 break;
68 }
69
70 return err;
71 }
72
73 /**
74 * dsa_lag_map() - Map LAG netdev to a linear LAG ID
75 * @dst: Tree in which to record the mapping.
76 * @lag: Netdev that is to be mapped to an ID.
77 *
78 * dsa_lag_id/dsa_lag_dev can then be used to translate between the
79 * two spaces. The size of the mapping space is determined by the
80 * driver by setting ds->num_lag_ids. It is perfectly legal to leave
81 * it unset if it is not needed, in which case these functions become
82 * no-ops.
83 */
dsa_lag_map(struct dsa_switch_tree * dst,struct net_device * lag)84 void dsa_lag_map(struct dsa_switch_tree *dst, struct net_device *lag)
85 {
86 unsigned int id;
87
88 if (dsa_lag_id(dst, lag) >= 0)
89 /* Already mapped */
90 return;
91
92 for (id = 0; id < dst->lags_len; id++) {
93 if (!dsa_lag_dev(dst, id)) {
94 dst->lags[id] = lag;
95 return;
96 }
97 }
98
99 /* No IDs left, which is OK. Some drivers do not need it. The
100 * ones that do, e.g. mv88e6xxx, will discover that dsa_lag_id
101 * returns an error for this device when joining the LAG. The
102 * driver can then return -EOPNOTSUPP back to DSA, which will
103 * fall back to a software LAG.
104 */
105 }
106
107 /**
108 * dsa_lag_unmap() - Remove a LAG ID mapping
109 * @dst: Tree in which the mapping is recorded.
110 * @lag: Netdev that was mapped.
111 *
112 * As there may be multiple users of the mapping, it is only removed
113 * if there are no other references to it.
114 */
dsa_lag_unmap(struct dsa_switch_tree * dst,struct net_device * lag)115 void dsa_lag_unmap(struct dsa_switch_tree *dst, struct net_device *lag)
116 {
117 struct dsa_port *dp;
118 unsigned int id;
119
120 dsa_lag_foreach_port(dp, dst, lag)
121 /* There are remaining users of this mapping */
122 return;
123
124 dsa_lags_foreach_id(id, dst) {
125 if (dsa_lag_dev(dst, id) == lag) {
126 dst->lags[id] = NULL;
127 break;
128 }
129 }
130 }
131
dsa_bridge_num_find(const struct net_device * bridge_dev)132 static int dsa_bridge_num_find(const struct net_device *bridge_dev)
133 {
134 struct dsa_switch_tree *dst;
135 struct dsa_port *dp;
136
137 /* When preparing the offload for a port, it will have a valid
138 * dp->bridge_dev pointer but a not yet valid dp->bridge_num.
139 * However there might be other ports having the same dp->bridge_dev
140 * and a valid dp->bridge_num, so just ignore this port.
141 */
142 list_for_each_entry(dst, &dsa_tree_list, list)
143 list_for_each_entry(dp, &dst->ports, list)
144 if (dp->bridge_dev == bridge_dev &&
145 dp->bridge_num != -1)
146 return dp->bridge_num;
147
148 return -1;
149 }
150
dsa_bridge_num_get(const struct net_device * bridge_dev,int max)151 int dsa_bridge_num_get(const struct net_device *bridge_dev, int max)
152 {
153 int bridge_num = dsa_bridge_num_find(bridge_dev);
154
155 if (bridge_num < 0) {
156 /* First port that offloads TX forwarding for this bridge */
157 bridge_num = find_first_zero_bit(&dsa_fwd_offloading_bridges,
158 DSA_MAX_NUM_OFFLOADING_BRIDGES);
159 if (bridge_num >= max)
160 return -1;
161
162 set_bit(bridge_num, &dsa_fwd_offloading_bridges);
163 }
164
165 return bridge_num;
166 }
167
dsa_bridge_num_put(const struct net_device * bridge_dev,int bridge_num)168 void dsa_bridge_num_put(const struct net_device *bridge_dev, int bridge_num)
169 {
170 /* Check if the bridge is still in use, otherwise it is time
171 * to clean it up so we can reuse this bridge_num later.
172 */
173 if (dsa_bridge_num_find(bridge_dev) < 0)
174 clear_bit(bridge_num, &dsa_fwd_offloading_bridges);
175 }
176
dsa_switch_find(int tree_index,int sw_index)177 struct dsa_switch *dsa_switch_find(int tree_index, int sw_index)
178 {
179 struct dsa_switch_tree *dst;
180 struct dsa_port *dp;
181
182 list_for_each_entry(dst, &dsa_tree_list, list) {
183 if (dst->index != tree_index)
184 continue;
185
186 list_for_each_entry(dp, &dst->ports, list) {
187 if (dp->ds->index != sw_index)
188 continue;
189
190 return dp->ds;
191 }
192 }
193
194 return NULL;
195 }
196 EXPORT_SYMBOL_GPL(dsa_switch_find);
197
dsa_tree_find(int index)198 static struct dsa_switch_tree *dsa_tree_find(int index)
199 {
200 struct dsa_switch_tree *dst;
201
202 list_for_each_entry(dst, &dsa_tree_list, list)
203 if (dst->index == index)
204 return dst;
205
206 return NULL;
207 }
208
dsa_tree_alloc(int index)209 static struct dsa_switch_tree *dsa_tree_alloc(int index)
210 {
211 struct dsa_switch_tree *dst;
212
213 dst = kzalloc(sizeof(*dst), GFP_KERNEL);
214 if (!dst)
215 return NULL;
216
217 dst->index = index;
218
219 INIT_LIST_HEAD(&dst->rtable);
220
221 INIT_LIST_HEAD(&dst->ports);
222
223 INIT_LIST_HEAD(&dst->list);
224 list_add_tail(&dst->list, &dsa_tree_list);
225
226 kref_init(&dst->refcount);
227
228 return dst;
229 }
230
dsa_tree_free(struct dsa_switch_tree * dst)231 static void dsa_tree_free(struct dsa_switch_tree *dst)
232 {
233 if (dst->tag_ops)
234 dsa_tag_driver_put(dst->tag_ops);
235 list_del(&dst->list);
236 kfree(dst);
237 }
238
dsa_tree_get(struct dsa_switch_tree * dst)239 static struct dsa_switch_tree *dsa_tree_get(struct dsa_switch_tree *dst)
240 {
241 if (dst)
242 kref_get(&dst->refcount);
243
244 return dst;
245 }
246
dsa_tree_touch(int index)247 static struct dsa_switch_tree *dsa_tree_touch(int index)
248 {
249 struct dsa_switch_tree *dst;
250
251 dst = dsa_tree_find(index);
252 if (dst)
253 return dsa_tree_get(dst);
254 else
255 return dsa_tree_alloc(index);
256 }
257
dsa_tree_release(struct kref * ref)258 static void dsa_tree_release(struct kref *ref)
259 {
260 struct dsa_switch_tree *dst;
261
262 dst = container_of(ref, struct dsa_switch_tree, refcount);
263
264 dsa_tree_free(dst);
265 }
266
dsa_tree_put(struct dsa_switch_tree * dst)267 static void dsa_tree_put(struct dsa_switch_tree *dst)
268 {
269 if (dst)
270 kref_put(&dst->refcount, dsa_tree_release);
271 }
272
dsa_tree_find_port_by_node(struct dsa_switch_tree * dst,struct device_node * dn)273 static struct dsa_port *dsa_tree_find_port_by_node(struct dsa_switch_tree *dst,
274 struct device_node *dn)
275 {
276 struct dsa_port *dp;
277
278 list_for_each_entry(dp, &dst->ports, list)
279 if (dp->dn == dn)
280 return dp;
281
282 return NULL;
283 }
284
dsa_link_touch(struct dsa_port * dp,struct dsa_port * link_dp)285 static struct dsa_link *dsa_link_touch(struct dsa_port *dp,
286 struct dsa_port *link_dp)
287 {
288 struct dsa_switch *ds = dp->ds;
289 struct dsa_switch_tree *dst;
290 struct dsa_link *dl;
291
292 dst = ds->dst;
293
294 list_for_each_entry(dl, &dst->rtable, list)
295 if (dl->dp == dp && dl->link_dp == link_dp)
296 return dl;
297
298 dl = kzalloc(sizeof(*dl), GFP_KERNEL);
299 if (!dl)
300 return NULL;
301
302 dl->dp = dp;
303 dl->link_dp = link_dp;
304
305 INIT_LIST_HEAD(&dl->list);
306 list_add_tail(&dl->list, &dst->rtable);
307
308 return dl;
309 }
310
dsa_port_setup_routing_table(struct dsa_port * dp)311 static bool dsa_port_setup_routing_table(struct dsa_port *dp)
312 {
313 struct dsa_switch *ds = dp->ds;
314 struct dsa_switch_tree *dst = ds->dst;
315 struct device_node *dn = dp->dn;
316 struct of_phandle_iterator it;
317 struct dsa_port *link_dp;
318 struct dsa_link *dl;
319 int err;
320
321 of_for_each_phandle(&it, err, dn, "link", NULL, 0) {
322 link_dp = dsa_tree_find_port_by_node(dst, it.node);
323 if (!link_dp) {
324 of_node_put(it.node);
325 return false;
326 }
327
328 dl = dsa_link_touch(dp, link_dp);
329 if (!dl) {
330 of_node_put(it.node);
331 return false;
332 }
333 }
334
335 return true;
336 }
337
dsa_tree_setup_routing_table(struct dsa_switch_tree * dst)338 static bool dsa_tree_setup_routing_table(struct dsa_switch_tree *dst)
339 {
340 bool complete = true;
341 struct dsa_port *dp;
342
343 list_for_each_entry(dp, &dst->ports, list) {
344 if (dsa_port_is_dsa(dp)) {
345 complete = dsa_port_setup_routing_table(dp);
346 if (!complete)
347 break;
348 }
349 }
350
351 return complete;
352 }
353
dsa_tree_find_first_cpu(struct dsa_switch_tree * dst)354 static struct dsa_port *dsa_tree_find_first_cpu(struct dsa_switch_tree *dst)
355 {
356 struct dsa_port *dp;
357
358 list_for_each_entry(dp, &dst->ports, list)
359 if (dsa_port_is_cpu(dp))
360 return dp;
361
362 return NULL;
363 }
364
365 /* Assign the default CPU port (the first one in the tree) to all ports of the
366 * fabric which don't already have one as part of their own switch.
367 */
dsa_tree_setup_default_cpu(struct dsa_switch_tree * dst)368 static int dsa_tree_setup_default_cpu(struct dsa_switch_tree *dst)
369 {
370 struct dsa_port *cpu_dp, *dp;
371
372 cpu_dp = dsa_tree_find_first_cpu(dst);
373 if (!cpu_dp) {
374 pr_err("DSA: tree %d has no CPU port\n", dst->index);
375 return -EINVAL;
376 }
377
378 list_for_each_entry(dp, &dst->ports, list) {
379 if (dp->cpu_dp)
380 continue;
381
382 if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
383 dp->cpu_dp = cpu_dp;
384 }
385
386 return 0;
387 }
388
389 /* Perform initial assignment of CPU ports to user ports and DSA links in the
390 * fabric, giving preference to CPU ports local to each switch. Default to
391 * using the first CPU port in the switch tree if the port does not have a CPU
392 * port local to this switch.
393 */
dsa_tree_setup_cpu_ports(struct dsa_switch_tree * dst)394 static int dsa_tree_setup_cpu_ports(struct dsa_switch_tree *dst)
395 {
396 struct dsa_port *cpu_dp, *dp;
397
398 list_for_each_entry(cpu_dp, &dst->ports, list) {
399 if (!dsa_port_is_cpu(cpu_dp))
400 continue;
401
402 /* Prefer a local CPU port */
403 dsa_switch_for_each_port(dp, cpu_dp->ds) {
404 /* Prefer the first local CPU port found */
405 if (dp->cpu_dp)
406 continue;
407
408 if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
409 dp->cpu_dp = cpu_dp;
410 }
411 }
412
413 return dsa_tree_setup_default_cpu(dst);
414 }
415
dsa_tree_teardown_cpu_ports(struct dsa_switch_tree * dst)416 static void dsa_tree_teardown_cpu_ports(struct dsa_switch_tree *dst)
417 {
418 struct dsa_port *dp;
419
420 list_for_each_entry(dp, &dst->ports, list)
421 if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
422 dp->cpu_dp = NULL;
423 }
424
dsa_port_setup(struct dsa_port * dp)425 static int dsa_port_setup(struct dsa_port *dp)
426 {
427 struct devlink_port *dlp = &dp->devlink_port;
428 bool dsa_port_link_registered = false;
429 struct dsa_switch *ds = dp->ds;
430 bool dsa_port_enabled = false;
431 int err = 0;
432
433 if (dp->setup)
434 return 0;
435
436 mutex_init(&dp->addr_lists_lock);
437 INIT_LIST_HEAD(&dp->fdbs);
438 INIT_LIST_HEAD(&dp->mdbs);
439
440 if (ds->ops->port_setup) {
441 err = ds->ops->port_setup(ds, dp->index);
442 if (err)
443 return err;
444 }
445
446 switch (dp->type) {
447 case DSA_PORT_TYPE_UNUSED:
448 dsa_port_disable(dp);
449 break;
450 case DSA_PORT_TYPE_CPU:
451 err = dsa_port_link_register_of(dp);
452 if (err)
453 break;
454 dsa_port_link_registered = true;
455
456 err = dsa_port_enable(dp, NULL);
457 if (err)
458 break;
459 dsa_port_enabled = true;
460
461 break;
462 case DSA_PORT_TYPE_DSA:
463 err = dsa_port_link_register_of(dp);
464 if (err)
465 break;
466 dsa_port_link_registered = true;
467
468 err = dsa_port_enable(dp, NULL);
469 if (err)
470 break;
471 dsa_port_enabled = true;
472
473 break;
474 case DSA_PORT_TYPE_USER:
475 of_get_mac_address(dp->dn, dp->mac);
476 err = dsa_slave_create(dp);
477 if (err)
478 break;
479
480 devlink_port_type_eth_set(dlp, dp->slave);
481 break;
482 }
483
484 if (err && dsa_port_enabled)
485 dsa_port_disable(dp);
486 if (err && dsa_port_link_registered)
487 dsa_port_link_unregister_of(dp);
488 if (err) {
489 if (ds->ops->port_teardown)
490 ds->ops->port_teardown(ds, dp->index);
491 return err;
492 }
493
494 dp->setup = true;
495
496 return 0;
497 }
498
dsa_port_devlink_setup(struct dsa_port * dp)499 static int dsa_port_devlink_setup(struct dsa_port *dp)
500 {
501 struct devlink_port *dlp = &dp->devlink_port;
502 struct dsa_switch_tree *dst = dp->ds->dst;
503 struct devlink_port_attrs attrs = {};
504 struct devlink *dl = dp->ds->devlink;
505 const unsigned char *id;
506 unsigned char len;
507 int err;
508
509 id = (const unsigned char *)&dst->index;
510 len = sizeof(dst->index);
511
512 attrs.phys.port_number = dp->index;
513 memcpy(attrs.switch_id.id, id, len);
514 attrs.switch_id.id_len = len;
515 memset(dlp, 0, sizeof(*dlp));
516
517 switch (dp->type) {
518 case DSA_PORT_TYPE_UNUSED:
519 attrs.flavour = DEVLINK_PORT_FLAVOUR_UNUSED;
520 break;
521 case DSA_PORT_TYPE_CPU:
522 attrs.flavour = DEVLINK_PORT_FLAVOUR_CPU;
523 break;
524 case DSA_PORT_TYPE_DSA:
525 attrs.flavour = DEVLINK_PORT_FLAVOUR_DSA;
526 break;
527 case DSA_PORT_TYPE_USER:
528 attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
529 break;
530 }
531
532 devlink_port_attrs_set(dlp, &attrs);
533 err = devlink_port_register(dl, dlp, dp->index);
534
535 if (!err)
536 dp->devlink_port_setup = true;
537
538 return err;
539 }
540
dsa_port_teardown(struct dsa_port * dp)541 static void dsa_port_teardown(struct dsa_port *dp)
542 {
543 struct devlink_port *dlp = &dp->devlink_port;
544 struct dsa_switch *ds = dp->ds;
545 struct dsa_mac_addr *a, *tmp;
546
547 if (!dp->setup)
548 return;
549
550 if (ds->ops->port_teardown)
551 ds->ops->port_teardown(ds, dp->index);
552
553 devlink_port_type_clear(dlp);
554
555 switch (dp->type) {
556 case DSA_PORT_TYPE_UNUSED:
557 break;
558 case DSA_PORT_TYPE_CPU:
559 dsa_port_disable(dp);
560 dsa_port_link_unregister_of(dp);
561 break;
562 case DSA_PORT_TYPE_DSA:
563 dsa_port_disable(dp);
564 dsa_port_link_unregister_of(dp);
565 break;
566 case DSA_PORT_TYPE_USER:
567 if (dp->slave) {
568 dsa_slave_destroy(dp->slave);
569 dp->slave = NULL;
570 }
571 break;
572 }
573
574 list_for_each_entry_safe(a, tmp, &dp->fdbs, list) {
575 list_del(&a->list);
576 kfree(a);
577 }
578
579 list_for_each_entry_safe(a, tmp, &dp->mdbs, list) {
580 list_del(&a->list);
581 kfree(a);
582 }
583
584 dp->setup = false;
585 }
586
dsa_port_devlink_teardown(struct dsa_port * dp)587 static void dsa_port_devlink_teardown(struct dsa_port *dp)
588 {
589 struct devlink_port *dlp = &dp->devlink_port;
590
591 if (dp->devlink_port_setup)
592 devlink_port_unregister(dlp);
593 dp->devlink_port_setup = false;
594 }
595
596 /* Destroy the current devlink port, and create a new one which has the UNUSED
597 * flavour. At this point, any call to ds->ops->port_setup has been already
598 * balanced out by a call to ds->ops->port_teardown, so we know that any
599 * devlink port regions the driver had are now unregistered. We then call its
600 * ds->ops->port_setup again, in order for the driver to re-create them on the
601 * new devlink port.
602 */
dsa_port_reinit_as_unused(struct dsa_port * dp)603 static int dsa_port_reinit_as_unused(struct dsa_port *dp)
604 {
605 struct dsa_switch *ds = dp->ds;
606 int err;
607
608 dsa_port_devlink_teardown(dp);
609 dp->type = DSA_PORT_TYPE_UNUSED;
610 err = dsa_port_devlink_setup(dp);
611 if (err)
612 return err;
613
614 if (ds->ops->port_setup) {
615 /* On error, leave the devlink port registered,
616 * dsa_switch_teardown will clean it up later.
617 */
618 err = ds->ops->port_setup(ds, dp->index);
619 if (err)
620 return err;
621 }
622
623 return 0;
624 }
625
dsa_devlink_info_get(struct devlink * dl,struct devlink_info_req * req,struct netlink_ext_ack * extack)626 static int dsa_devlink_info_get(struct devlink *dl,
627 struct devlink_info_req *req,
628 struct netlink_ext_ack *extack)
629 {
630 struct dsa_switch *ds = dsa_devlink_to_ds(dl);
631
632 if (ds->ops->devlink_info_get)
633 return ds->ops->devlink_info_get(ds, req, extack);
634
635 return -EOPNOTSUPP;
636 }
637
dsa_devlink_sb_pool_get(struct devlink * dl,unsigned int sb_index,u16 pool_index,struct devlink_sb_pool_info * pool_info)638 static int dsa_devlink_sb_pool_get(struct devlink *dl,
639 unsigned int sb_index, u16 pool_index,
640 struct devlink_sb_pool_info *pool_info)
641 {
642 struct dsa_switch *ds = dsa_devlink_to_ds(dl);
643
644 if (!ds->ops->devlink_sb_pool_get)
645 return -EOPNOTSUPP;
646
647 return ds->ops->devlink_sb_pool_get(ds, sb_index, pool_index,
648 pool_info);
649 }
650
dsa_devlink_sb_pool_set(struct devlink * dl,unsigned int sb_index,u16 pool_index,u32 size,enum devlink_sb_threshold_type threshold_type,struct netlink_ext_ack * extack)651 static int dsa_devlink_sb_pool_set(struct devlink *dl, unsigned int sb_index,
652 u16 pool_index, u32 size,
653 enum devlink_sb_threshold_type threshold_type,
654 struct netlink_ext_ack *extack)
655 {
656 struct dsa_switch *ds = dsa_devlink_to_ds(dl);
657
658 if (!ds->ops->devlink_sb_pool_set)
659 return -EOPNOTSUPP;
660
661 return ds->ops->devlink_sb_pool_set(ds, sb_index, pool_index, size,
662 threshold_type, extack);
663 }
664
dsa_devlink_sb_port_pool_get(struct devlink_port * dlp,unsigned int sb_index,u16 pool_index,u32 * p_threshold)665 static int dsa_devlink_sb_port_pool_get(struct devlink_port *dlp,
666 unsigned int sb_index, u16 pool_index,
667 u32 *p_threshold)
668 {
669 struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
670 int port = dsa_devlink_port_to_port(dlp);
671
672 if (!ds->ops->devlink_sb_port_pool_get)
673 return -EOPNOTSUPP;
674
675 return ds->ops->devlink_sb_port_pool_get(ds, port, sb_index,
676 pool_index, p_threshold);
677 }
678
dsa_devlink_sb_port_pool_set(struct devlink_port * dlp,unsigned int sb_index,u16 pool_index,u32 threshold,struct netlink_ext_ack * extack)679 static int dsa_devlink_sb_port_pool_set(struct devlink_port *dlp,
680 unsigned int sb_index, u16 pool_index,
681 u32 threshold,
682 struct netlink_ext_ack *extack)
683 {
684 struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
685 int port = dsa_devlink_port_to_port(dlp);
686
687 if (!ds->ops->devlink_sb_port_pool_set)
688 return -EOPNOTSUPP;
689
690 return ds->ops->devlink_sb_port_pool_set(ds, port, sb_index,
691 pool_index, threshold, extack);
692 }
693
694 static int
dsa_devlink_sb_tc_pool_bind_get(struct devlink_port * dlp,unsigned int sb_index,u16 tc_index,enum devlink_sb_pool_type pool_type,u16 * p_pool_index,u32 * p_threshold)695 dsa_devlink_sb_tc_pool_bind_get(struct devlink_port *dlp,
696 unsigned int sb_index, u16 tc_index,
697 enum devlink_sb_pool_type pool_type,
698 u16 *p_pool_index, u32 *p_threshold)
699 {
700 struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
701 int port = dsa_devlink_port_to_port(dlp);
702
703 if (!ds->ops->devlink_sb_tc_pool_bind_get)
704 return -EOPNOTSUPP;
705
706 return ds->ops->devlink_sb_tc_pool_bind_get(ds, port, sb_index,
707 tc_index, pool_type,
708 p_pool_index, p_threshold);
709 }
710
711 static int
dsa_devlink_sb_tc_pool_bind_set(struct devlink_port * dlp,unsigned int sb_index,u16 tc_index,enum devlink_sb_pool_type pool_type,u16 pool_index,u32 threshold,struct netlink_ext_ack * extack)712 dsa_devlink_sb_tc_pool_bind_set(struct devlink_port *dlp,
713 unsigned int sb_index, u16 tc_index,
714 enum devlink_sb_pool_type pool_type,
715 u16 pool_index, u32 threshold,
716 struct netlink_ext_ack *extack)
717 {
718 struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
719 int port = dsa_devlink_port_to_port(dlp);
720
721 if (!ds->ops->devlink_sb_tc_pool_bind_set)
722 return -EOPNOTSUPP;
723
724 return ds->ops->devlink_sb_tc_pool_bind_set(ds, port, sb_index,
725 tc_index, pool_type,
726 pool_index, threshold,
727 extack);
728 }
729
dsa_devlink_sb_occ_snapshot(struct devlink * dl,unsigned int sb_index)730 static int dsa_devlink_sb_occ_snapshot(struct devlink *dl,
731 unsigned int sb_index)
732 {
733 struct dsa_switch *ds = dsa_devlink_to_ds(dl);
734
735 if (!ds->ops->devlink_sb_occ_snapshot)
736 return -EOPNOTSUPP;
737
738 return ds->ops->devlink_sb_occ_snapshot(ds, sb_index);
739 }
740
dsa_devlink_sb_occ_max_clear(struct devlink * dl,unsigned int sb_index)741 static int dsa_devlink_sb_occ_max_clear(struct devlink *dl,
742 unsigned int sb_index)
743 {
744 struct dsa_switch *ds = dsa_devlink_to_ds(dl);
745
746 if (!ds->ops->devlink_sb_occ_max_clear)
747 return -EOPNOTSUPP;
748
749 return ds->ops->devlink_sb_occ_max_clear(ds, sb_index);
750 }
751
dsa_devlink_sb_occ_port_pool_get(struct devlink_port * dlp,unsigned int sb_index,u16 pool_index,u32 * p_cur,u32 * p_max)752 static int dsa_devlink_sb_occ_port_pool_get(struct devlink_port *dlp,
753 unsigned int sb_index,
754 u16 pool_index, u32 *p_cur,
755 u32 *p_max)
756 {
757 struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
758 int port = dsa_devlink_port_to_port(dlp);
759
760 if (!ds->ops->devlink_sb_occ_port_pool_get)
761 return -EOPNOTSUPP;
762
763 return ds->ops->devlink_sb_occ_port_pool_get(ds, port, sb_index,
764 pool_index, p_cur, p_max);
765 }
766
767 static int
dsa_devlink_sb_occ_tc_port_bind_get(struct devlink_port * dlp,unsigned int sb_index,u16 tc_index,enum devlink_sb_pool_type pool_type,u32 * p_cur,u32 * p_max)768 dsa_devlink_sb_occ_tc_port_bind_get(struct devlink_port *dlp,
769 unsigned int sb_index, u16 tc_index,
770 enum devlink_sb_pool_type pool_type,
771 u32 *p_cur, u32 *p_max)
772 {
773 struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
774 int port = dsa_devlink_port_to_port(dlp);
775
776 if (!ds->ops->devlink_sb_occ_tc_port_bind_get)
777 return -EOPNOTSUPP;
778
779 return ds->ops->devlink_sb_occ_tc_port_bind_get(ds, port,
780 sb_index, tc_index,
781 pool_type, p_cur,
782 p_max);
783 }
784
785 static const struct devlink_ops dsa_devlink_ops = {
786 .info_get = dsa_devlink_info_get,
787 .sb_pool_get = dsa_devlink_sb_pool_get,
788 .sb_pool_set = dsa_devlink_sb_pool_set,
789 .sb_port_pool_get = dsa_devlink_sb_port_pool_get,
790 .sb_port_pool_set = dsa_devlink_sb_port_pool_set,
791 .sb_tc_pool_bind_get = dsa_devlink_sb_tc_pool_bind_get,
792 .sb_tc_pool_bind_set = dsa_devlink_sb_tc_pool_bind_set,
793 .sb_occ_snapshot = dsa_devlink_sb_occ_snapshot,
794 .sb_occ_max_clear = dsa_devlink_sb_occ_max_clear,
795 .sb_occ_port_pool_get = dsa_devlink_sb_occ_port_pool_get,
796 .sb_occ_tc_port_bind_get = dsa_devlink_sb_occ_tc_port_bind_get,
797 };
798
dsa_switch_setup_tag_protocol(struct dsa_switch * ds)799 static int dsa_switch_setup_tag_protocol(struct dsa_switch *ds)
800 {
801 const struct dsa_device_ops *tag_ops = ds->dst->tag_ops;
802 struct dsa_switch_tree *dst = ds->dst;
803 struct dsa_port *cpu_dp;
804 int err;
805
806 if (tag_ops->proto == dst->default_proto)
807 return 0;
808
809 dsa_switch_for_each_cpu_port(cpu_dp, ds) {
810 rtnl_lock();
811 err = ds->ops->change_tag_protocol(ds, cpu_dp->index,
812 tag_ops->proto);
813 rtnl_unlock();
814 if (err) {
815 dev_err(ds->dev, "Unable to use tag protocol \"%s\": %pe\n",
816 tag_ops->name, ERR_PTR(err));
817 return err;
818 }
819 }
820
821 return 0;
822 }
823
dsa_switch_setup(struct dsa_switch * ds)824 static int dsa_switch_setup(struct dsa_switch *ds)
825 {
826 struct dsa_devlink_priv *dl_priv;
827 struct dsa_port *dp;
828 int err;
829
830 if (ds->setup)
831 return 0;
832
833 /* Initialize ds->phys_mii_mask before registering the slave MDIO bus
834 * driver and before ops->setup() has run, since the switch drivers and
835 * the slave MDIO bus driver rely on these values for probing PHY
836 * devices or not
837 */
838 ds->phys_mii_mask |= dsa_user_ports(ds);
839
840 /* Add the switch to devlink before calling setup, so that setup can
841 * add dpipe tables
842 */
843 ds->devlink =
844 devlink_alloc(&dsa_devlink_ops, sizeof(*dl_priv), ds->dev);
845 if (!ds->devlink)
846 return -ENOMEM;
847 dl_priv = devlink_priv(ds->devlink);
848 dl_priv->ds = ds;
849
850 /* Setup devlink port instances now, so that the switch
851 * setup() can register regions etc, against the ports
852 */
853 dsa_switch_for_each_port(dp, ds) {
854 err = dsa_port_devlink_setup(dp);
855 if (err)
856 goto unregister_devlink_ports;
857 }
858
859 err = dsa_switch_register_notifier(ds);
860 if (err)
861 goto unregister_devlink_ports;
862
863 ds->configure_vlan_while_not_filtering = true;
864
865 err = ds->ops->setup(ds);
866 if (err < 0)
867 goto unregister_notifier;
868
869 err = dsa_switch_setup_tag_protocol(ds);
870 if (err)
871 goto teardown;
872
873 if (!ds->slave_mii_bus && ds->ops->phy_read) {
874 ds->slave_mii_bus = mdiobus_alloc();
875 if (!ds->slave_mii_bus) {
876 err = -ENOMEM;
877 goto teardown;
878 }
879
880 dsa_slave_mii_bus_init(ds);
881
882 err = mdiobus_register(ds->slave_mii_bus);
883 if (err < 0)
884 goto free_slave_mii_bus;
885 }
886
887 ds->setup = true;
888 devlink_register(ds->devlink);
889 return 0;
890
891 free_slave_mii_bus:
892 if (ds->slave_mii_bus && ds->ops->phy_read)
893 mdiobus_free(ds->slave_mii_bus);
894 teardown:
895 if (ds->ops->teardown)
896 ds->ops->teardown(ds);
897 unregister_notifier:
898 dsa_switch_unregister_notifier(ds);
899 unregister_devlink_ports:
900 dsa_switch_for_each_port(dp, ds)
901 dsa_port_devlink_teardown(dp);
902 devlink_free(ds->devlink);
903 ds->devlink = NULL;
904 return err;
905 }
906
dsa_switch_teardown(struct dsa_switch * ds)907 static void dsa_switch_teardown(struct dsa_switch *ds)
908 {
909 struct dsa_port *dp;
910
911 if (!ds->setup)
912 return;
913
914 if (ds->devlink)
915 devlink_unregister(ds->devlink);
916
917 if (ds->slave_mii_bus && ds->ops->phy_read) {
918 mdiobus_unregister(ds->slave_mii_bus);
919 mdiobus_free(ds->slave_mii_bus);
920 ds->slave_mii_bus = NULL;
921 }
922
923 if (ds->ops->teardown)
924 ds->ops->teardown(ds);
925
926 dsa_switch_unregister_notifier(ds);
927
928 if (ds->devlink) {
929 dsa_switch_for_each_port(dp, ds)
930 dsa_port_devlink_teardown(dp);
931 devlink_free(ds->devlink);
932 ds->devlink = NULL;
933 }
934
935 ds->setup = false;
936 }
937
938 /* First tear down the non-shared, then the shared ports. This ensures that
939 * all work items scheduled by our switchdev handlers for user ports have
940 * completed before we destroy the refcounting kept on the shared ports.
941 */
dsa_tree_teardown_ports(struct dsa_switch_tree * dst)942 static void dsa_tree_teardown_ports(struct dsa_switch_tree *dst)
943 {
944 struct dsa_port *dp;
945
946 list_for_each_entry(dp, &dst->ports, list)
947 if (dsa_port_is_user(dp) || dsa_port_is_unused(dp))
948 dsa_port_teardown(dp);
949
950 dsa_flush_workqueue();
951
952 list_for_each_entry(dp, &dst->ports, list)
953 if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp))
954 dsa_port_teardown(dp);
955 }
956
dsa_tree_teardown_switches(struct dsa_switch_tree * dst)957 static void dsa_tree_teardown_switches(struct dsa_switch_tree *dst)
958 {
959 struct dsa_port *dp;
960
961 list_for_each_entry(dp, &dst->ports, list)
962 dsa_switch_teardown(dp->ds);
963 }
964
dsa_tree_setup_switches(struct dsa_switch_tree * dst)965 static int dsa_tree_setup_switches(struct dsa_switch_tree *dst)
966 {
967 struct dsa_port *dp;
968 int err;
969
970 list_for_each_entry(dp, &dst->ports, list) {
971 err = dsa_switch_setup(dp->ds);
972 if (err)
973 goto teardown;
974 }
975
976 list_for_each_entry(dp, &dst->ports, list) {
977 err = dsa_port_setup(dp);
978 if (err) {
979 err = dsa_port_reinit_as_unused(dp);
980 if (err)
981 goto teardown;
982 }
983 }
984
985 return 0;
986
987 teardown:
988 dsa_tree_teardown_ports(dst);
989
990 dsa_tree_teardown_switches(dst);
991
992 return err;
993 }
994
dsa_tree_setup_master(struct dsa_switch_tree * dst)995 static int dsa_tree_setup_master(struct dsa_switch_tree *dst)
996 {
997 struct dsa_port *dp;
998 int err;
999
1000 list_for_each_entry(dp, &dst->ports, list) {
1001 if (dsa_port_is_cpu(dp)) {
1002 err = dsa_master_setup(dp->master, dp);
1003 if (err)
1004 return err;
1005 }
1006 }
1007
1008 return 0;
1009 }
1010
dsa_tree_teardown_master(struct dsa_switch_tree * dst)1011 static void dsa_tree_teardown_master(struct dsa_switch_tree *dst)
1012 {
1013 struct dsa_port *dp;
1014
1015 list_for_each_entry(dp, &dst->ports, list)
1016 if (dsa_port_is_cpu(dp))
1017 dsa_master_teardown(dp->master);
1018 }
1019
dsa_tree_setup_lags(struct dsa_switch_tree * dst)1020 static int dsa_tree_setup_lags(struct dsa_switch_tree *dst)
1021 {
1022 unsigned int len = 0;
1023 struct dsa_port *dp;
1024
1025 list_for_each_entry(dp, &dst->ports, list) {
1026 if (dp->ds->num_lag_ids > len)
1027 len = dp->ds->num_lag_ids;
1028 }
1029
1030 if (!len)
1031 return 0;
1032
1033 dst->lags = kcalloc(len, sizeof(*dst->lags), GFP_KERNEL);
1034 if (!dst->lags)
1035 return -ENOMEM;
1036
1037 dst->lags_len = len;
1038 return 0;
1039 }
1040
dsa_tree_teardown_lags(struct dsa_switch_tree * dst)1041 static void dsa_tree_teardown_lags(struct dsa_switch_tree *dst)
1042 {
1043 kfree(dst->lags);
1044 }
1045
dsa_tree_setup(struct dsa_switch_tree * dst)1046 static int dsa_tree_setup(struct dsa_switch_tree *dst)
1047 {
1048 bool complete;
1049 int err;
1050
1051 if (dst->setup) {
1052 pr_err("DSA: tree %d already setup! Disjoint trees?\n",
1053 dst->index);
1054 return -EEXIST;
1055 }
1056
1057 complete = dsa_tree_setup_routing_table(dst);
1058 if (!complete)
1059 return 0;
1060
1061 err = dsa_tree_setup_cpu_ports(dst);
1062 if (err)
1063 return err;
1064
1065 err = dsa_tree_setup_switches(dst);
1066 if (err)
1067 goto teardown_cpu_ports;
1068
1069 err = dsa_tree_setup_master(dst);
1070 if (err)
1071 goto teardown_switches;
1072
1073 err = dsa_tree_setup_lags(dst);
1074 if (err)
1075 goto teardown_master;
1076
1077 dst->setup = true;
1078
1079 pr_info("DSA: tree %d setup\n", dst->index);
1080
1081 return 0;
1082
1083 teardown_master:
1084 dsa_tree_teardown_master(dst);
1085 teardown_switches:
1086 dsa_tree_teardown_ports(dst);
1087 dsa_tree_teardown_switches(dst);
1088 teardown_cpu_ports:
1089 dsa_tree_teardown_cpu_ports(dst);
1090
1091 return err;
1092 }
1093
dsa_tree_teardown(struct dsa_switch_tree * dst)1094 static void dsa_tree_teardown(struct dsa_switch_tree *dst)
1095 {
1096 struct dsa_link *dl, *next;
1097
1098 if (!dst->setup)
1099 return;
1100
1101 dsa_tree_teardown_lags(dst);
1102
1103 dsa_tree_teardown_master(dst);
1104
1105 dsa_tree_teardown_ports(dst);
1106
1107 dsa_tree_teardown_switches(dst);
1108
1109 dsa_tree_teardown_cpu_ports(dst);
1110
1111 list_for_each_entry_safe(dl, next, &dst->rtable, list) {
1112 list_del(&dl->list);
1113 kfree(dl);
1114 }
1115
1116 pr_info("DSA: tree %d torn down\n", dst->index);
1117
1118 dst->setup = false;
1119 }
1120
1121 /* Since the dsa/tagging sysfs device attribute is per master, the assumption
1122 * is that all DSA switches within a tree share the same tagger, otherwise
1123 * they would have formed disjoint trees (different "dsa,member" values).
1124 */
dsa_tree_change_tag_proto(struct dsa_switch_tree * dst,struct net_device * master,const struct dsa_device_ops * tag_ops,const struct dsa_device_ops * old_tag_ops)1125 int dsa_tree_change_tag_proto(struct dsa_switch_tree *dst,
1126 struct net_device *master,
1127 const struct dsa_device_ops *tag_ops,
1128 const struct dsa_device_ops *old_tag_ops)
1129 {
1130 struct dsa_notifier_tag_proto_info info;
1131 struct dsa_port *dp;
1132 int err = -EBUSY;
1133
1134 if (!rtnl_trylock())
1135 return restart_syscall();
1136
1137 /* At the moment we don't allow changing the tag protocol under
1138 * traffic. The rtnl_mutex also happens to serialize concurrent
1139 * attempts to change the tagging protocol. If we ever lift the IFF_UP
1140 * restriction, there needs to be another mutex which serializes this.
1141 */
1142 if (master->flags & IFF_UP)
1143 goto out_unlock;
1144
1145 list_for_each_entry(dp, &dst->ports, list) {
1146 if (!dsa_port_is_user(dp))
1147 continue;
1148
1149 if (dp->slave->flags & IFF_UP)
1150 goto out_unlock;
1151 }
1152
1153 info.tag_ops = tag_ops;
1154 err = dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO, &info);
1155 if (err)
1156 goto out_unwind_tagger;
1157
1158 dst->tag_ops = tag_ops;
1159
1160 rtnl_unlock();
1161
1162 return 0;
1163
1164 out_unwind_tagger:
1165 info.tag_ops = old_tag_ops;
1166 dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO, &info);
1167 out_unlock:
1168 rtnl_unlock();
1169 return err;
1170 }
1171
dsa_port_touch(struct dsa_switch * ds,int index)1172 static struct dsa_port *dsa_port_touch(struct dsa_switch *ds, int index)
1173 {
1174 struct dsa_switch_tree *dst = ds->dst;
1175 struct dsa_port *dp;
1176
1177 dsa_switch_for_each_port(dp, ds)
1178 if (dp->index == index)
1179 return dp;
1180
1181 dp = kzalloc(sizeof(*dp), GFP_KERNEL);
1182 if (!dp)
1183 return NULL;
1184
1185 dp->ds = ds;
1186 dp->index = index;
1187 dp->bridge_num = -1;
1188
1189 INIT_LIST_HEAD(&dp->list);
1190 list_add_tail(&dp->list, &dst->ports);
1191
1192 return dp;
1193 }
1194
dsa_port_parse_user(struct dsa_port * dp,const char * name)1195 static int dsa_port_parse_user(struct dsa_port *dp, const char *name)
1196 {
1197 if (!name)
1198 name = "eth%d";
1199
1200 dp->type = DSA_PORT_TYPE_USER;
1201 dp->name = name;
1202
1203 return 0;
1204 }
1205
dsa_port_parse_dsa(struct dsa_port * dp)1206 static int dsa_port_parse_dsa(struct dsa_port *dp)
1207 {
1208 dp->type = DSA_PORT_TYPE_DSA;
1209
1210 return 0;
1211 }
1212
dsa_get_tag_protocol(struct dsa_port * dp,struct net_device * master)1213 static enum dsa_tag_protocol dsa_get_tag_protocol(struct dsa_port *dp,
1214 struct net_device *master)
1215 {
1216 enum dsa_tag_protocol tag_protocol = DSA_TAG_PROTO_NONE;
1217 struct dsa_switch *mds, *ds = dp->ds;
1218 unsigned int mdp_upstream;
1219 struct dsa_port *mdp;
1220
1221 /* It is possible to stack DSA switches onto one another when that
1222 * happens the switch driver may want to know if its tagging protocol
1223 * is going to work in such a configuration.
1224 */
1225 if (dsa_slave_dev_check(master)) {
1226 mdp = dsa_slave_to_port(master);
1227 mds = mdp->ds;
1228 mdp_upstream = dsa_upstream_port(mds, mdp->index);
1229 tag_protocol = mds->ops->get_tag_protocol(mds, mdp_upstream,
1230 DSA_TAG_PROTO_NONE);
1231 }
1232
1233 /* If the master device is not itself a DSA slave in a disjoint DSA
1234 * tree, then return immediately.
1235 */
1236 return ds->ops->get_tag_protocol(ds, dp->index, tag_protocol);
1237 }
1238
dsa_port_parse_cpu(struct dsa_port * dp,struct net_device * master,const char * user_protocol)1239 static int dsa_port_parse_cpu(struct dsa_port *dp, struct net_device *master,
1240 const char *user_protocol)
1241 {
1242 struct dsa_switch *ds = dp->ds;
1243 struct dsa_switch_tree *dst = ds->dst;
1244 const struct dsa_device_ops *tag_ops;
1245 enum dsa_tag_protocol default_proto;
1246
1247 /* Find out which protocol the switch would prefer. */
1248 default_proto = dsa_get_tag_protocol(dp, master);
1249 if (dst->default_proto) {
1250 if (dst->default_proto != default_proto) {
1251 dev_err(ds->dev,
1252 "A DSA switch tree can have only one tagging protocol\n");
1253 return -EINVAL;
1254 }
1255 } else {
1256 dst->default_proto = default_proto;
1257 }
1258
1259 /* See if the user wants to override that preference. */
1260 if (user_protocol) {
1261 if (!ds->ops->change_tag_protocol) {
1262 dev_err(ds->dev, "Tag protocol cannot be modified\n");
1263 return -EINVAL;
1264 }
1265
1266 tag_ops = dsa_find_tagger_by_name(user_protocol);
1267 } else {
1268 tag_ops = dsa_tag_driver_get(default_proto);
1269 }
1270
1271 if (IS_ERR(tag_ops)) {
1272 if (PTR_ERR(tag_ops) == -ENOPROTOOPT)
1273 return -EPROBE_DEFER;
1274
1275 dev_warn(ds->dev, "No tagger for this switch\n");
1276 return PTR_ERR(tag_ops);
1277 }
1278
1279 if (dst->tag_ops) {
1280 if (dst->tag_ops != tag_ops) {
1281 dev_err(ds->dev,
1282 "A DSA switch tree can have only one tagging protocol\n");
1283
1284 dsa_tag_driver_put(tag_ops);
1285 return -EINVAL;
1286 }
1287
1288 /* In the case of multiple CPU ports per switch, the tagging
1289 * protocol is still reference-counted only per switch tree.
1290 */
1291 dsa_tag_driver_put(tag_ops);
1292 } else {
1293 dst->tag_ops = tag_ops;
1294 }
1295
1296 dp->master = master;
1297 dp->type = DSA_PORT_TYPE_CPU;
1298 dsa_port_set_tag_protocol(dp, dst->tag_ops);
1299 dp->dst = dst;
1300
1301 /* At this point, the tree may be configured to use a different
1302 * tagger than the one chosen by the switch driver during
1303 * .setup, in the case when a user selects a custom protocol
1304 * through the DT.
1305 *
1306 * This is resolved by syncing the driver with the tree in
1307 * dsa_switch_setup_tag_protocol once .setup has run and the
1308 * driver is ready to accept calls to .change_tag_protocol. If
1309 * the driver does not support the custom protocol at that
1310 * point, the tree is wholly rejected, thereby ensuring that the
1311 * tree and driver are always in agreement on the protocol to
1312 * use.
1313 */
1314 return 0;
1315 }
1316
dsa_port_parse_of(struct dsa_port * dp,struct device_node * dn)1317 static int dsa_port_parse_of(struct dsa_port *dp, struct device_node *dn)
1318 {
1319 struct device_node *ethernet = of_parse_phandle(dn, "ethernet", 0);
1320 const char *name = of_get_property(dn, "label", NULL);
1321 bool link = of_property_read_bool(dn, "link");
1322
1323 dp->dn = dn;
1324
1325 if (ethernet) {
1326 struct net_device *master;
1327 const char *user_protocol;
1328
1329 master = of_find_net_device_by_node(ethernet);
1330 if (!master)
1331 return -EPROBE_DEFER;
1332
1333 user_protocol = of_get_property(dn, "dsa-tag-protocol", NULL);
1334 return dsa_port_parse_cpu(dp, master, user_protocol);
1335 }
1336
1337 if (link)
1338 return dsa_port_parse_dsa(dp);
1339
1340 return dsa_port_parse_user(dp, name);
1341 }
1342
dsa_switch_parse_ports_of(struct dsa_switch * ds,struct device_node * dn)1343 static int dsa_switch_parse_ports_of(struct dsa_switch *ds,
1344 struct device_node *dn)
1345 {
1346 struct device_node *ports, *port;
1347 struct dsa_port *dp;
1348 int err = 0;
1349 u32 reg;
1350
1351 ports = of_get_child_by_name(dn, "ports");
1352 if (!ports) {
1353 /* The second possibility is "ethernet-ports" */
1354 ports = of_get_child_by_name(dn, "ethernet-ports");
1355 if (!ports) {
1356 dev_err(ds->dev, "no ports child node found\n");
1357 return -EINVAL;
1358 }
1359 }
1360
1361 for_each_available_child_of_node(ports, port) {
1362 err = of_property_read_u32(port, "reg", ®);
1363 if (err) {
1364 of_node_put(port);
1365 goto out_put_node;
1366 }
1367
1368 if (reg >= ds->num_ports) {
1369 dev_err(ds->dev, "port %pOF index %u exceeds num_ports (%zu)\n",
1370 port, reg, ds->num_ports);
1371 of_node_put(port);
1372 err = -EINVAL;
1373 goto out_put_node;
1374 }
1375
1376 dp = dsa_to_port(ds, reg);
1377
1378 err = dsa_port_parse_of(dp, port);
1379 if (err) {
1380 of_node_put(port);
1381 goto out_put_node;
1382 }
1383 }
1384
1385 out_put_node:
1386 of_node_put(ports);
1387 return err;
1388 }
1389
dsa_switch_parse_member_of(struct dsa_switch * ds,struct device_node * dn)1390 static int dsa_switch_parse_member_of(struct dsa_switch *ds,
1391 struct device_node *dn)
1392 {
1393 u32 m[2] = { 0, 0 };
1394 int sz;
1395
1396 /* Don't error out if this optional property isn't found */
1397 sz = of_property_read_variable_u32_array(dn, "dsa,member", m, 2, 2);
1398 if (sz < 0 && sz != -EINVAL)
1399 return sz;
1400
1401 ds->index = m[1];
1402
1403 ds->dst = dsa_tree_touch(m[0]);
1404 if (!ds->dst)
1405 return -ENOMEM;
1406
1407 if (dsa_switch_find(ds->dst->index, ds->index)) {
1408 dev_err(ds->dev,
1409 "A DSA switch with index %d already exists in tree %d\n",
1410 ds->index, ds->dst->index);
1411 return -EEXIST;
1412 }
1413
1414 if (ds->dst->last_switch < ds->index)
1415 ds->dst->last_switch = ds->index;
1416
1417 return 0;
1418 }
1419
dsa_switch_touch_ports(struct dsa_switch * ds)1420 static int dsa_switch_touch_ports(struct dsa_switch *ds)
1421 {
1422 struct dsa_port *dp;
1423 int port;
1424
1425 for (port = 0; port < ds->num_ports; port++) {
1426 dp = dsa_port_touch(ds, port);
1427 if (!dp)
1428 return -ENOMEM;
1429 }
1430
1431 return 0;
1432 }
1433
dsa_switch_parse_of(struct dsa_switch * ds,struct device_node * dn)1434 static int dsa_switch_parse_of(struct dsa_switch *ds, struct device_node *dn)
1435 {
1436 int err;
1437
1438 err = dsa_switch_parse_member_of(ds, dn);
1439 if (err)
1440 return err;
1441
1442 err = dsa_switch_touch_ports(ds);
1443 if (err)
1444 return err;
1445
1446 return dsa_switch_parse_ports_of(ds, dn);
1447 }
1448
dsa_port_parse(struct dsa_port * dp,const char * name,struct device * dev)1449 static int dsa_port_parse(struct dsa_port *dp, const char *name,
1450 struct device *dev)
1451 {
1452 if (!strcmp(name, "cpu")) {
1453 struct net_device *master;
1454
1455 master = dsa_dev_to_net_device(dev);
1456 if (!master)
1457 return -EPROBE_DEFER;
1458
1459 dev_put(master);
1460
1461 return dsa_port_parse_cpu(dp, master, NULL);
1462 }
1463
1464 if (!strcmp(name, "dsa"))
1465 return dsa_port_parse_dsa(dp);
1466
1467 return dsa_port_parse_user(dp, name);
1468 }
1469
dsa_switch_parse_ports(struct dsa_switch * ds,struct dsa_chip_data * cd)1470 static int dsa_switch_parse_ports(struct dsa_switch *ds,
1471 struct dsa_chip_data *cd)
1472 {
1473 bool valid_name_found = false;
1474 struct dsa_port *dp;
1475 struct device *dev;
1476 const char *name;
1477 unsigned int i;
1478 int err;
1479
1480 for (i = 0; i < DSA_MAX_PORTS; i++) {
1481 name = cd->port_names[i];
1482 dev = cd->netdev[i];
1483 dp = dsa_to_port(ds, i);
1484
1485 if (!name)
1486 continue;
1487
1488 err = dsa_port_parse(dp, name, dev);
1489 if (err)
1490 return err;
1491
1492 valid_name_found = true;
1493 }
1494
1495 if (!valid_name_found && i == DSA_MAX_PORTS)
1496 return -EINVAL;
1497
1498 return 0;
1499 }
1500
dsa_switch_parse(struct dsa_switch * ds,struct dsa_chip_data * cd)1501 static int dsa_switch_parse(struct dsa_switch *ds, struct dsa_chip_data *cd)
1502 {
1503 int err;
1504
1505 ds->cd = cd;
1506
1507 /* We don't support interconnected switches nor multiple trees via
1508 * platform data, so this is the unique switch of the tree.
1509 */
1510 ds->index = 0;
1511 ds->dst = dsa_tree_touch(0);
1512 if (!ds->dst)
1513 return -ENOMEM;
1514
1515 err = dsa_switch_touch_ports(ds);
1516 if (err)
1517 return err;
1518
1519 return dsa_switch_parse_ports(ds, cd);
1520 }
1521
dsa_switch_release_ports(struct dsa_switch * ds)1522 static void dsa_switch_release_ports(struct dsa_switch *ds)
1523 {
1524 struct dsa_port *dp, *next;
1525
1526 dsa_switch_for_each_port_safe(dp, next, ds) {
1527 list_del(&dp->list);
1528 kfree(dp);
1529 }
1530 }
1531
dsa_switch_probe(struct dsa_switch * ds)1532 static int dsa_switch_probe(struct dsa_switch *ds)
1533 {
1534 struct dsa_switch_tree *dst;
1535 struct dsa_chip_data *pdata;
1536 struct device_node *np;
1537 int err;
1538
1539 if (!ds->dev)
1540 return -ENODEV;
1541
1542 pdata = ds->dev->platform_data;
1543 np = ds->dev->of_node;
1544
1545 if (!ds->num_ports)
1546 return -EINVAL;
1547
1548 if (np) {
1549 err = dsa_switch_parse_of(ds, np);
1550 if (err)
1551 dsa_switch_release_ports(ds);
1552 } else if (pdata) {
1553 err = dsa_switch_parse(ds, pdata);
1554 if (err)
1555 dsa_switch_release_ports(ds);
1556 } else {
1557 err = -ENODEV;
1558 }
1559
1560 if (err)
1561 return err;
1562
1563 dst = ds->dst;
1564 dsa_tree_get(dst);
1565 err = dsa_tree_setup(dst);
1566 if (err) {
1567 dsa_switch_release_ports(ds);
1568 dsa_tree_put(dst);
1569 }
1570
1571 return err;
1572 }
1573
dsa_register_switch(struct dsa_switch * ds)1574 int dsa_register_switch(struct dsa_switch *ds)
1575 {
1576 int err;
1577
1578 mutex_lock(&dsa2_mutex);
1579 err = dsa_switch_probe(ds);
1580 dsa_tree_put(ds->dst);
1581 mutex_unlock(&dsa2_mutex);
1582
1583 return err;
1584 }
1585 EXPORT_SYMBOL_GPL(dsa_register_switch);
1586
dsa_switch_remove(struct dsa_switch * ds)1587 static void dsa_switch_remove(struct dsa_switch *ds)
1588 {
1589 struct dsa_switch_tree *dst = ds->dst;
1590
1591 dsa_tree_teardown(dst);
1592 dsa_switch_release_ports(ds);
1593 dsa_tree_put(dst);
1594 }
1595
dsa_unregister_switch(struct dsa_switch * ds)1596 void dsa_unregister_switch(struct dsa_switch *ds)
1597 {
1598 mutex_lock(&dsa2_mutex);
1599 dsa_switch_remove(ds);
1600 mutex_unlock(&dsa2_mutex);
1601 }
1602 EXPORT_SYMBOL_GPL(dsa_unregister_switch);
1603
1604 /* If the DSA master chooses to unregister its net_device on .shutdown, DSA is
1605 * blocking that operation from completion, due to the dev_hold taken inside
1606 * netdev_upper_dev_link. Unlink the DSA slave interfaces from being uppers of
1607 * the DSA master, so that the system can reboot successfully.
1608 */
dsa_switch_shutdown(struct dsa_switch * ds)1609 void dsa_switch_shutdown(struct dsa_switch *ds)
1610 {
1611 struct net_device *master, *slave_dev;
1612 LIST_HEAD(unregister_list);
1613 struct dsa_port *dp;
1614
1615 mutex_lock(&dsa2_mutex);
1616 rtnl_lock();
1617
1618 dsa_switch_for_each_user_port(dp, ds) {
1619 master = dp->cpu_dp->master;
1620 slave_dev = dp->slave;
1621
1622 netdev_upper_dev_unlink(master, slave_dev);
1623 /* Just unlinking ourselves as uppers of the master is not
1624 * sufficient. When the master net device unregisters, that will
1625 * also call dev_close, which we will catch as NETDEV_GOING_DOWN
1626 * and trigger a dev_close on our own devices (dsa_slave_close).
1627 * In turn, that will call dev_mc_unsync on the master's net
1628 * device. If the master is also a DSA switch port, this will
1629 * trigger dsa_slave_set_rx_mode which will call dev_mc_sync on
1630 * its own master. Lockdep will complain about the fact that
1631 * all cascaded masters have the same dsa_master_addr_list_lock_key,
1632 * which it normally would not do if the cascaded masters would
1633 * be in a proper upper/lower relationship, which we've just
1634 * destroyed.
1635 * To suppress the lockdep warnings, let's actually unregister
1636 * the DSA slave interfaces too, to avoid the nonsensical
1637 * multicast address list synchronization on shutdown.
1638 */
1639 unregister_netdevice_queue(slave_dev, &unregister_list);
1640 }
1641 unregister_netdevice_many(&unregister_list);
1642
1643 rtnl_unlock();
1644 mutex_unlock(&dsa2_mutex);
1645 }
1646 EXPORT_SYMBOL_GPL(dsa_switch_shutdown);
1647