1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2009 Felix Fietkau <nbd@nbd.name>
4 * Copyright (C) 2011-2012 Gabor Juhos <juhosg@openwrt.org>
5 * Copyright (c) 2015, 2019, The Linux Foundation. All rights reserved.
6 * Copyright (c) 2016 John Crispin <john@phrozen.org>
7 */
8
9 #include <linux/netdevice.h>
10 #include <net/dsa.h>
11 #include <linux/if_bridge.h>
12
13 #include "qca8k.h"
14
15 #define MIB_DESC(_s, _o, _n) \
16 { \
17 .size = (_s), \
18 .offset = (_o), \
19 .name = (_n), \
20 }
21
22 const struct qca8k_mib_desc ar8327_mib[] = {
23 MIB_DESC(1, 0x00, "RxBroad"),
24 MIB_DESC(1, 0x04, "RxPause"),
25 MIB_DESC(1, 0x08, "RxMulti"),
26 MIB_DESC(1, 0x0c, "RxFcsErr"),
27 MIB_DESC(1, 0x10, "RxAlignErr"),
28 MIB_DESC(1, 0x14, "RxRunt"),
29 MIB_DESC(1, 0x18, "RxFragment"),
30 MIB_DESC(1, 0x1c, "Rx64Byte"),
31 MIB_DESC(1, 0x20, "Rx128Byte"),
32 MIB_DESC(1, 0x24, "Rx256Byte"),
33 MIB_DESC(1, 0x28, "Rx512Byte"),
34 MIB_DESC(1, 0x2c, "Rx1024Byte"),
35 MIB_DESC(1, 0x30, "Rx1518Byte"),
36 MIB_DESC(1, 0x34, "RxMaxByte"),
37 MIB_DESC(1, 0x38, "RxTooLong"),
38 MIB_DESC(2, 0x3c, "RxGoodByte"),
39 MIB_DESC(2, 0x44, "RxBadByte"),
40 MIB_DESC(1, 0x4c, "RxOverFlow"),
41 MIB_DESC(1, 0x50, "Filtered"),
42 MIB_DESC(1, 0x54, "TxBroad"),
43 MIB_DESC(1, 0x58, "TxPause"),
44 MIB_DESC(1, 0x5c, "TxMulti"),
45 MIB_DESC(1, 0x60, "TxUnderRun"),
46 MIB_DESC(1, 0x64, "Tx64Byte"),
47 MIB_DESC(1, 0x68, "Tx128Byte"),
48 MIB_DESC(1, 0x6c, "Tx256Byte"),
49 MIB_DESC(1, 0x70, "Tx512Byte"),
50 MIB_DESC(1, 0x74, "Tx1024Byte"),
51 MIB_DESC(1, 0x78, "Tx1518Byte"),
52 MIB_DESC(1, 0x7c, "TxMaxByte"),
53 MIB_DESC(1, 0x80, "TxOverSize"),
54 MIB_DESC(2, 0x84, "TxByte"),
55 MIB_DESC(1, 0x8c, "TxCollision"),
56 MIB_DESC(1, 0x90, "TxAbortCol"),
57 MIB_DESC(1, 0x94, "TxMultiCol"),
58 MIB_DESC(1, 0x98, "TxSingleCol"),
59 MIB_DESC(1, 0x9c, "TxExcDefer"),
60 MIB_DESC(1, 0xa0, "TxDefer"),
61 MIB_DESC(1, 0xa4, "TxLateCol"),
62 MIB_DESC(1, 0xa8, "RXUnicast"),
63 MIB_DESC(1, 0xac, "TXUnicast"),
64 };
65
qca8k_read(struct qca8k_priv * priv,u32 reg,u32 * val)66 int qca8k_read(struct qca8k_priv *priv, u32 reg, u32 *val)
67 {
68 return regmap_read(priv->regmap, reg, val);
69 }
70
qca8k_write(struct qca8k_priv * priv,u32 reg,u32 val)71 int qca8k_write(struct qca8k_priv *priv, u32 reg, u32 val)
72 {
73 return regmap_write(priv->regmap, reg, val);
74 }
75
qca8k_rmw(struct qca8k_priv * priv,u32 reg,u32 mask,u32 write_val)76 int qca8k_rmw(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val)
77 {
78 return regmap_update_bits(priv->regmap, reg, mask, write_val);
79 }
80
81 static const struct regmap_range qca8k_readable_ranges[] = {
82 regmap_reg_range(0x0000, 0x00e4), /* Global control */
83 regmap_reg_range(0x0100, 0x0168), /* EEE control */
84 regmap_reg_range(0x0200, 0x0270), /* Parser control */
85 regmap_reg_range(0x0400, 0x0454), /* ACL */
86 regmap_reg_range(0x0600, 0x0718), /* Lookup */
87 regmap_reg_range(0x0800, 0x0b70), /* QM */
88 regmap_reg_range(0x0c00, 0x0c80), /* PKT */
89 regmap_reg_range(0x0e00, 0x0e98), /* L3 */
90 regmap_reg_range(0x1000, 0x10ac), /* MIB - Port0 */
91 regmap_reg_range(0x1100, 0x11ac), /* MIB - Port1 */
92 regmap_reg_range(0x1200, 0x12ac), /* MIB - Port2 */
93 regmap_reg_range(0x1300, 0x13ac), /* MIB - Port3 */
94 regmap_reg_range(0x1400, 0x14ac), /* MIB - Port4 */
95 regmap_reg_range(0x1500, 0x15ac), /* MIB - Port5 */
96 regmap_reg_range(0x1600, 0x16ac), /* MIB - Port6 */
97 };
98
99 const struct regmap_access_table qca8k_readable_table = {
100 .yes_ranges = qca8k_readable_ranges,
101 .n_yes_ranges = ARRAY_SIZE(qca8k_readable_ranges),
102 };
103
qca8k_busy_wait(struct qca8k_priv * priv,u32 reg,u32 mask)104 static int qca8k_busy_wait(struct qca8k_priv *priv, u32 reg, u32 mask)
105 {
106 u32 val;
107
108 return regmap_read_poll_timeout(priv->regmap, reg, val, !(val & mask), 0,
109 QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC);
110 }
111
qca8k_fdb_read(struct qca8k_priv * priv,struct qca8k_fdb * fdb)112 static int qca8k_fdb_read(struct qca8k_priv *priv, struct qca8k_fdb *fdb)
113 {
114 u32 reg[QCA8K_ATU_TABLE_SIZE];
115 int ret;
116
117 /* load the ARL table into an array */
118 ret = regmap_bulk_read(priv->regmap, QCA8K_REG_ATU_DATA0, reg,
119 QCA8K_ATU_TABLE_SIZE);
120 if (ret)
121 return ret;
122
123 /* vid - 83:72 */
124 fdb->vid = FIELD_GET(QCA8K_ATU_VID_MASK, reg[2]);
125 /* aging - 67:64 */
126 fdb->aging = FIELD_GET(QCA8K_ATU_STATUS_MASK, reg[2]);
127 /* portmask - 54:48 */
128 fdb->port_mask = FIELD_GET(QCA8K_ATU_PORT_MASK, reg[1]);
129 /* mac - 47:0 */
130 fdb->mac[0] = FIELD_GET(QCA8K_ATU_ADDR0_MASK, reg[1]);
131 fdb->mac[1] = FIELD_GET(QCA8K_ATU_ADDR1_MASK, reg[1]);
132 fdb->mac[2] = FIELD_GET(QCA8K_ATU_ADDR2_MASK, reg[0]);
133 fdb->mac[3] = FIELD_GET(QCA8K_ATU_ADDR3_MASK, reg[0]);
134 fdb->mac[4] = FIELD_GET(QCA8K_ATU_ADDR4_MASK, reg[0]);
135 fdb->mac[5] = FIELD_GET(QCA8K_ATU_ADDR5_MASK, reg[0]);
136
137 return 0;
138 }
139
qca8k_fdb_write(struct qca8k_priv * priv,u16 vid,u8 port_mask,const u8 * mac,u8 aging)140 static void qca8k_fdb_write(struct qca8k_priv *priv, u16 vid, u8 port_mask,
141 const u8 *mac, u8 aging)
142 {
143 u32 reg[QCA8K_ATU_TABLE_SIZE] = { 0 };
144
145 /* vid - 83:72 */
146 reg[2] = FIELD_PREP(QCA8K_ATU_VID_MASK, vid);
147 /* aging - 67:64 */
148 reg[2] |= FIELD_PREP(QCA8K_ATU_STATUS_MASK, aging);
149 /* portmask - 54:48 */
150 reg[1] = FIELD_PREP(QCA8K_ATU_PORT_MASK, port_mask);
151 /* mac - 47:0 */
152 reg[1] |= FIELD_PREP(QCA8K_ATU_ADDR0_MASK, mac[0]);
153 reg[1] |= FIELD_PREP(QCA8K_ATU_ADDR1_MASK, mac[1]);
154 reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR2_MASK, mac[2]);
155 reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR3_MASK, mac[3]);
156 reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR4_MASK, mac[4]);
157 reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR5_MASK, mac[5]);
158
159 /* load the array into the ARL table */
160 regmap_bulk_write(priv->regmap, QCA8K_REG_ATU_DATA0, reg,
161 QCA8K_ATU_TABLE_SIZE);
162 }
163
qca8k_fdb_access(struct qca8k_priv * priv,enum qca8k_fdb_cmd cmd,int port)164 static int qca8k_fdb_access(struct qca8k_priv *priv, enum qca8k_fdb_cmd cmd,
165 int port)
166 {
167 u32 reg;
168 int ret;
169
170 /* Set the command and FDB index */
171 reg = QCA8K_ATU_FUNC_BUSY;
172 reg |= cmd;
173 if (port >= 0) {
174 reg |= QCA8K_ATU_FUNC_PORT_EN;
175 reg |= FIELD_PREP(QCA8K_ATU_FUNC_PORT_MASK, port);
176 }
177
178 /* Write the function register triggering the table access */
179 ret = qca8k_write(priv, QCA8K_REG_ATU_FUNC, reg);
180 if (ret)
181 return ret;
182
183 /* wait for completion */
184 ret = qca8k_busy_wait(priv, QCA8K_REG_ATU_FUNC, QCA8K_ATU_FUNC_BUSY);
185 if (ret)
186 return ret;
187
188 /* Check for table full violation when adding an entry */
189 if (cmd == QCA8K_FDB_LOAD) {
190 ret = qca8k_read(priv, QCA8K_REG_ATU_FUNC, ®);
191 if (ret < 0)
192 return ret;
193 if (reg & QCA8K_ATU_FUNC_FULL)
194 return -1;
195 }
196
197 return 0;
198 }
199
qca8k_fdb_next(struct qca8k_priv * priv,struct qca8k_fdb * fdb,int port)200 static int qca8k_fdb_next(struct qca8k_priv *priv, struct qca8k_fdb *fdb,
201 int port)
202 {
203 int ret;
204
205 qca8k_fdb_write(priv, fdb->vid, fdb->port_mask, fdb->mac, fdb->aging);
206 ret = qca8k_fdb_access(priv, QCA8K_FDB_NEXT, port);
207 if (ret < 0)
208 return ret;
209
210 return qca8k_fdb_read(priv, fdb);
211 }
212
qca8k_fdb_add(struct qca8k_priv * priv,const u8 * mac,u16 port_mask,u16 vid,u8 aging)213 static int qca8k_fdb_add(struct qca8k_priv *priv, const u8 *mac,
214 u16 port_mask, u16 vid, u8 aging)
215 {
216 int ret;
217
218 mutex_lock(&priv->reg_mutex);
219 qca8k_fdb_write(priv, vid, port_mask, mac, aging);
220 ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
221 mutex_unlock(&priv->reg_mutex);
222
223 return ret;
224 }
225
qca8k_fdb_del(struct qca8k_priv * priv,const u8 * mac,u16 port_mask,u16 vid)226 static int qca8k_fdb_del(struct qca8k_priv *priv, const u8 *mac,
227 u16 port_mask, u16 vid)
228 {
229 int ret;
230
231 mutex_lock(&priv->reg_mutex);
232 qca8k_fdb_write(priv, vid, port_mask, mac, 0);
233 ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
234 mutex_unlock(&priv->reg_mutex);
235
236 return ret;
237 }
238
qca8k_fdb_flush(struct qca8k_priv * priv)239 void qca8k_fdb_flush(struct qca8k_priv *priv)
240 {
241 mutex_lock(&priv->reg_mutex);
242 qca8k_fdb_access(priv, QCA8K_FDB_FLUSH, -1);
243 mutex_unlock(&priv->reg_mutex);
244 }
245
qca8k_fdb_search_and_insert(struct qca8k_priv * priv,u8 port_mask,const u8 * mac,u16 vid)246 static int qca8k_fdb_search_and_insert(struct qca8k_priv *priv, u8 port_mask,
247 const u8 *mac, u16 vid)
248 {
249 struct qca8k_fdb fdb = { 0 };
250 int ret;
251
252 mutex_lock(&priv->reg_mutex);
253
254 qca8k_fdb_write(priv, vid, 0, mac, 0);
255 ret = qca8k_fdb_access(priv, QCA8K_FDB_SEARCH, -1);
256 if (ret < 0)
257 goto exit;
258
259 ret = qca8k_fdb_read(priv, &fdb);
260 if (ret < 0)
261 goto exit;
262
263 /* Rule exist. Delete first */
264 if (!fdb.aging) {
265 ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
266 if (ret)
267 goto exit;
268 }
269
270 /* Add port to fdb portmask */
271 fdb.port_mask |= port_mask;
272
273 qca8k_fdb_write(priv, vid, fdb.port_mask, mac, fdb.aging);
274 ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
275
276 exit:
277 mutex_unlock(&priv->reg_mutex);
278 return ret;
279 }
280
qca8k_fdb_search_and_del(struct qca8k_priv * priv,u8 port_mask,const u8 * mac,u16 vid)281 static int qca8k_fdb_search_and_del(struct qca8k_priv *priv, u8 port_mask,
282 const u8 *mac, u16 vid)
283 {
284 struct qca8k_fdb fdb = { 0 };
285 int ret;
286
287 mutex_lock(&priv->reg_mutex);
288
289 qca8k_fdb_write(priv, vid, 0, mac, 0);
290 ret = qca8k_fdb_access(priv, QCA8K_FDB_SEARCH, -1);
291 if (ret < 0)
292 goto exit;
293
294 /* Rule doesn't exist. Why delete? */
295 if (!fdb.aging) {
296 ret = -EINVAL;
297 goto exit;
298 }
299
300 ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
301 if (ret)
302 goto exit;
303
304 /* Only port in the rule is this port. Don't re insert */
305 if (fdb.port_mask == port_mask)
306 goto exit;
307
308 /* Remove port from port mask */
309 fdb.port_mask &= ~port_mask;
310
311 qca8k_fdb_write(priv, vid, fdb.port_mask, mac, fdb.aging);
312 ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
313
314 exit:
315 mutex_unlock(&priv->reg_mutex);
316 return ret;
317 }
318
qca8k_vlan_access(struct qca8k_priv * priv,enum qca8k_vlan_cmd cmd,u16 vid)319 static int qca8k_vlan_access(struct qca8k_priv *priv,
320 enum qca8k_vlan_cmd cmd, u16 vid)
321 {
322 u32 reg;
323 int ret;
324
325 /* Set the command and VLAN index */
326 reg = QCA8K_VTU_FUNC1_BUSY;
327 reg |= cmd;
328 reg |= FIELD_PREP(QCA8K_VTU_FUNC1_VID_MASK, vid);
329
330 /* Write the function register triggering the table access */
331 ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC1, reg);
332 if (ret)
333 return ret;
334
335 /* wait for completion */
336 ret = qca8k_busy_wait(priv, QCA8K_REG_VTU_FUNC1, QCA8K_VTU_FUNC1_BUSY);
337 if (ret)
338 return ret;
339
340 /* Check for table full violation when adding an entry */
341 if (cmd == QCA8K_VLAN_LOAD) {
342 ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC1, ®);
343 if (ret < 0)
344 return ret;
345 if (reg & QCA8K_VTU_FUNC1_FULL)
346 return -ENOMEM;
347 }
348
349 return 0;
350 }
351
qca8k_vlan_add(struct qca8k_priv * priv,u8 port,u16 vid,bool untagged)352 static int qca8k_vlan_add(struct qca8k_priv *priv, u8 port, u16 vid,
353 bool untagged)
354 {
355 u32 reg;
356 int ret;
357
358 /* We do the right thing with VLAN 0 and treat it as untagged while
359 * preserving the tag on egress.
360 */
361 if (vid == 0)
362 return 0;
363
364 mutex_lock(&priv->reg_mutex);
365 ret = qca8k_vlan_access(priv, QCA8K_VLAN_READ, vid);
366 if (ret < 0)
367 goto out;
368
369 ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, ®);
370 if (ret < 0)
371 goto out;
372 reg |= QCA8K_VTU_FUNC0_VALID | QCA8K_VTU_FUNC0_IVL_EN;
373 reg &= ~QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(port);
374 if (untagged)
375 reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_UNTAG(port);
376 else
377 reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_TAG(port);
378
379 ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg);
380 if (ret)
381 goto out;
382 ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid);
383
384 out:
385 mutex_unlock(&priv->reg_mutex);
386
387 return ret;
388 }
389
qca8k_vlan_del(struct qca8k_priv * priv,u8 port,u16 vid)390 static int qca8k_vlan_del(struct qca8k_priv *priv, u8 port, u16 vid)
391 {
392 u32 reg, mask;
393 int ret, i;
394 bool del;
395
396 mutex_lock(&priv->reg_mutex);
397 ret = qca8k_vlan_access(priv, QCA8K_VLAN_READ, vid);
398 if (ret < 0)
399 goto out;
400
401 ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, ®);
402 if (ret < 0)
403 goto out;
404 reg &= ~QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(port);
405 reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(port);
406
407 /* Check if we're the last member to be removed */
408 del = true;
409 for (i = 0; i < QCA8K_NUM_PORTS; i++) {
410 mask = QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(i);
411
412 if ((reg & mask) != mask) {
413 del = false;
414 break;
415 }
416 }
417
418 if (del) {
419 ret = qca8k_vlan_access(priv, QCA8K_VLAN_PURGE, vid);
420 } else {
421 ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg);
422 if (ret)
423 goto out;
424 ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid);
425 }
426
427 out:
428 mutex_unlock(&priv->reg_mutex);
429
430 return ret;
431 }
432
qca8k_mib_init(struct qca8k_priv * priv)433 int qca8k_mib_init(struct qca8k_priv *priv)
434 {
435 int ret;
436
437 mutex_lock(&priv->reg_mutex);
438 ret = regmap_update_bits(priv->regmap, QCA8K_REG_MIB,
439 QCA8K_MIB_FUNC | QCA8K_MIB_BUSY,
440 FIELD_PREP(QCA8K_MIB_FUNC, QCA8K_MIB_FLUSH) |
441 QCA8K_MIB_BUSY);
442 if (ret)
443 goto exit;
444
445 ret = qca8k_busy_wait(priv, QCA8K_REG_MIB, QCA8K_MIB_BUSY);
446 if (ret)
447 goto exit;
448
449 ret = regmap_set_bits(priv->regmap, QCA8K_REG_MIB, QCA8K_MIB_CPU_KEEP);
450 if (ret)
451 goto exit;
452
453 ret = qca8k_write(priv, QCA8K_REG_MODULE_EN, QCA8K_MODULE_EN_MIB);
454
455 exit:
456 mutex_unlock(&priv->reg_mutex);
457 return ret;
458 }
459
qca8k_port_set_status(struct qca8k_priv * priv,int port,int enable)460 void qca8k_port_set_status(struct qca8k_priv *priv, int port, int enable)
461 {
462 u32 mask = QCA8K_PORT_STATUS_TXMAC | QCA8K_PORT_STATUS_RXMAC;
463
464 /* Port 0 and 6 have no internal PHY */
465 if (port > 0 && port < 6)
466 mask |= QCA8K_PORT_STATUS_LINK_AUTO;
467
468 if (enable)
469 regmap_set_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask);
470 else
471 regmap_clear_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask);
472 }
473
qca8k_get_strings(struct dsa_switch * ds,int port,u32 stringset,uint8_t * data)474 void qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset,
475 uint8_t *data)
476 {
477 struct qca8k_priv *priv = ds->priv;
478 int i;
479
480 if (stringset != ETH_SS_STATS)
481 return;
482
483 for (i = 0; i < priv->info->mib_count; i++)
484 strncpy(data + i * ETH_GSTRING_LEN, ar8327_mib[i].name,
485 ETH_GSTRING_LEN);
486 }
487
qca8k_get_ethtool_stats(struct dsa_switch * ds,int port,uint64_t * data)488 void qca8k_get_ethtool_stats(struct dsa_switch *ds, int port,
489 uint64_t *data)
490 {
491 struct qca8k_priv *priv = ds->priv;
492 const struct qca8k_mib_desc *mib;
493 u32 reg, i, val;
494 u32 hi = 0;
495 int ret;
496
497 if (priv->mgmt_master && priv->info->ops->autocast_mib &&
498 priv->info->ops->autocast_mib(ds, port, data) > 0)
499 return;
500
501 for (i = 0; i < priv->info->mib_count; i++) {
502 mib = &ar8327_mib[i];
503 reg = QCA8K_PORT_MIB_COUNTER(port) + mib->offset;
504
505 ret = qca8k_read(priv, reg, &val);
506 if (ret < 0)
507 continue;
508
509 if (mib->size == 2) {
510 ret = qca8k_read(priv, reg + 4, &hi);
511 if (ret < 0)
512 continue;
513 }
514
515 data[i] = val;
516 if (mib->size == 2)
517 data[i] |= (u64)hi << 32;
518 }
519 }
520
qca8k_get_sset_count(struct dsa_switch * ds,int port,int sset)521 int qca8k_get_sset_count(struct dsa_switch *ds, int port, int sset)
522 {
523 struct qca8k_priv *priv = ds->priv;
524
525 if (sset != ETH_SS_STATS)
526 return 0;
527
528 return priv->info->mib_count;
529 }
530
qca8k_set_mac_eee(struct dsa_switch * ds,int port,struct ethtool_eee * eee)531 int qca8k_set_mac_eee(struct dsa_switch *ds, int port,
532 struct ethtool_eee *eee)
533 {
534 u32 lpi_en = QCA8K_REG_EEE_CTRL_LPI_EN(port);
535 struct qca8k_priv *priv = ds->priv;
536 u32 reg;
537 int ret;
538
539 mutex_lock(&priv->reg_mutex);
540 ret = qca8k_read(priv, QCA8K_REG_EEE_CTRL, ®);
541 if (ret < 0)
542 goto exit;
543
544 if (eee->eee_enabled)
545 reg |= lpi_en;
546 else
547 reg &= ~lpi_en;
548 ret = qca8k_write(priv, QCA8K_REG_EEE_CTRL, reg);
549
550 exit:
551 mutex_unlock(&priv->reg_mutex);
552 return ret;
553 }
554
qca8k_get_mac_eee(struct dsa_switch * ds,int port,struct ethtool_eee * e)555 int qca8k_get_mac_eee(struct dsa_switch *ds, int port,
556 struct ethtool_eee *e)
557 {
558 /* Nothing to do on the port's MAC */
559 return 0;
560 }
561
qca8k_port_stp_state_set(struct dsa_switch * ds,int port,u8 state)562 void qca8k_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
563 {
564 struct qca8k_priv *priv = ds->priv;
565 u32 stp_state;
566
567 switch (state) {
568 case BR_STATE_DISABLED:
569 stp_state = QCA8K_PORT_LOOKUP_STATE_DISABLED;
570 break;
571 case BR_STATE_BLOCKING:
572 stp_state = QCA8K_PORT_LOOKUP_STATE_BLOCKING;
573 break;
574 case BR_STATE_LISTENING:
575 stp_state = QCA8K_PORT_LOOKUP_STATE_LISTENING;
576 break;
577 case BR_STATE_LEARNING:
578 stp_state = QCA8K_PORT_LOOKUP_STATE_LEARNING;
579 break;
580 case BR_STATE_FORWARDING:
581 default:
582 stp_state = QCA8K_PORT_LOOKUP_STATE_FORWARD;
583 break;
584 }
585
586 qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
587 QCA8K_PORT_LOOKUP_STATE_MASK, stp_state);
588 }
589
qca8k_port_bridge_join(struct dsa_switch * ds,int port,struct dsa_bridge bridge,bool * tx_fwd_offload,struct netlink_ext_ack * extack)590 int qca8k_port_bridge_join(struct dsa_switch *ds, int port,
591 struct dsa_bridge bridge,
592 bool *tx_fwd_offload,
593 struct netlink_ext_ack *extack)
594 {
595 struct qca8k_priv *priv = ds->priv;
596 int port_mask, cpu_port;
597 int i, ret;
598
599 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
600 port_mask = BIT(cpu_port);
601
602 for (i = 0; i < QCA8K_NUM_PORTS; i++) {
603 if (dsa_is_cpu_port(ds, i))
604 continue;
605 if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
606 continue;
607 /* Add this port to the portvlan mask of the other ports
608 * in the bridge
609 */
610 ret = regmap_set_bits(priv->regmap,
611 QCA8K_PORT_LOOKUP_CTRL(i),
612 BIT(port));
613 if (ret)
614 return ret;
615 if (i != port)
616 port_mask |= BIT(i);
617 }
618
619 /* Add all other ports to this ports portvlan mask */
620 ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
621 QCA8K_PORT_LOOKUP_MEMBER, port_mask);
622
623 return ret;
624 }
625
qca8k_port_bridge_leave(struct dsa_switch * ds,int port,struct dsa_bridge bridge)626 void qca8k_port_bridge_leave(struct dsa_switch *ds, int port,
627 struct dsa_bridge bridge)
628 {
629 struct qca8k_priv *priv = ds->priv;
630 int cpu_port, i;
631
632 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
633
634 for (i = 0; i < QCA8K_NUM_PORTS; i++) {
635 if (dsa_is_cpu_port(ds, i))
636 continue;
637 if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
638 continue;
639 /* Remove this port to the portvlan mask of the other ports
640 * in the bridge
641 */
642 regmap_clear_bits(priv->regmap,
643 QCA8K_PORT_LOOKUP_CTRL(i),
644 BIT(port));
645 }
646
647 /* Set the cpu port to be the only one in the portvlan mask of
648 * this port
649 */
650 qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
651 QCA8K_PORT_LOOKUP_MEMBER, BIT(cpu_port));
652 }
653
qca8k_port_fast_age(struct dsa_switch * ds,int port)654 void qca8k_port_fast_age(struct dsa_switch *ds, int port)
655 {
656 struct qca8k_priv *priv = ds->priv;
657
658 mutex_lock(&priv->reg_mutex);
659 qca8k_fdb_access(priv, QCA8K_FDB_FLUSH_PORT, port);
660 mutex_unlock(&priv->reg_mutex);
661 }
662
qca8k_set_ageing_time(struct dsa_switch * ds,unsigned int msecs)663 int qca8k_set_ageing_time(struct dsa_switch *ds, unsigned int msecs)
664 {
665 struct qca8k_priv *priv = ds->priv;
666 unsigned int secs = msecs / 1000;
667 u32 val;
668
669 /* AGE_TIME reg is set in 7s step */
670 val = secs / 7;
671
672 /* Handle case with 0 as val to NOT disable
673 * learning
674 */
675 if (!val)
676 val = 1;
677
678 return regmap_update_bits(priv->regmap, QCA8K_REG_ATU_CTRL,
679 QCA8K_ATU_AGE_TIME_MASK,
680 QCA8K_ATU_AGE_TIME(val));
681 }
682
qca8k_port_enable(struct dsa_switch * ds,int port,struct phy_device * phy)683 int qca8k_port_enable(struct dsa_switch *ds, int port,
684 struct phy_device *phy)
685 {
686 struct qca8k_priv *priv = ds->priv;
687
688 qca8k_port_set_status(priv, port, 1);
689 priv->port_enabled_map |= BIT(port);
690
691 if (dsa_is_user_port(ds, port))
692 phy_support_asym_pause(phy);
693
694 return 0;
695 }
696
qca8k_port_disable(struct dsa_switch * ds,int port)697 void qca8k_port_disable(struct dsa_switch *ds, int port)
698 {
699 struct qca8k_priv *priv = ds->priv;
700
701 qca8k_port_set_status(priv, port, 0);
702 priv->port_enabled_map &= ~BIT(port);
703 }
704
qca8k_port_change_mtu(struct dsa_switch * ds,int port,int new_mtu)705 int qca8k_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
706 {
707 struct qca8k_priv *priv = ds->priv;
708 int ret;
709
710 /* We have only have a general MTU setting.
711 * DSA always set the CPU port's MTU to the largest MTU of the slave
712 * ports.
713 * Setting MTU just for the CPU port is sufficient to correctly set a
714 * value for every port.
715 */
716 if (!dsa_is_cpu_port(ds, port))
717 return 0;
718
719 /* To change the MAX_FRAME_SIZE the cpu ports must be off or
720 * the switch panics.
721 * Turn off both cpu ports before applying the new value to prevent
722 * this.
723 */
724 if (priv->port_enabled_map & BIT(0))
725 qca8k_port_set_status(priv, 0, 0);
726
727 if (priv->port_enabled_map & BIT(6))
728 qca8k_port_set_status(priv, 6, 0);
729
730 /* Include L2 header / FCS length */
731 ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, new_mtu +
732 ETH_HLEN + ETH_FCS_LEN);
733
734 if (priv->port_enabled_map & BIT(0))
735 qca8k_port_set_status(priv, 0, 1);
736
737 if (priv->port_enabled_map & BIT(6))
738 qca8k_port_set_status(priv, 6, 1);
739
740 return ret;
741 }
742
qca8k_port_max_mtu(struct dsa_switch * ds,int port)743 int qca8k_port_max_mtu(struct dsa_switch *ds, int port)
744 {
745 return QCA8K_MAX_MTU;
746 }
747
qca8k_port_fdb_insert(struct qca8k_priv * priv,const u8 * addr,u16 port_mask,u16 vid)748 int qca8k_port_fdb_insert(struct qca8k_priv *priv, const u8 *addr,
749 u16 port_mask, u16 vid)
750 {
751 /* Set the vid to the port vlan id if no vid is set */
752 if (!vid)
753 vid = QCA8K_PORT_VID_DEF;
754
755 return qca8k_fdb_add(priv, addr, port_mask, vid,
756 QCA8K_ATU_STATUS_STATIC);
757 }
758
qca8k_port_fdb_add(struct dsa_switch * ds,int port,const unsigned char * addr,u16 vid,struct dsa_db db)759 int qca8k_port_fdb_add(struct dsa_switch *ds, int port,
760 const unsigned char *addr, u16 vid,
761 struct dsa_db db)
762 {
763 struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
764 u16 port_mask = BIT(port);
765
766 return qca8k_port_fdb_insert(priv, addr, port_mask, vid);
767 }
768
qca8k_port_fdb_del(struct dsa_switch * ds,int port,const unsigned char * addr,u16 vid,struct dsa_db db)769 int qca8k_port_fdb_del(struct dsa_switch *ds, int port,
770 const unsigned char *addr, u16 vid,
771 struct dsa_db db)
772 {
773 struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
774 u16 port_mask = BIT(port);
775
776 if (!vid)
777 vid = QCA8K_PORT_VID_DEF;
778
779 return qca8k_fdb_del(priv, addr, port_mask, vid);
780 }
781
qca8k_port_fdb_dump(struct dsa_switch * ds,int port,dsa_fdb_dump_cb_t * cb,void * data)782 int qca8k_port_fdb_dump(struct dsa_switch *ds, int port,
783 dsa_fdb_dump_cb_t *cb, void *data)
784 {
785 struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
786 struct qca8k_fdb _fdb = { 0 };
787 int cnt = QCA8K_NUM_FDB_RECORDS;
788 bool is_static;
789 int ret = 0;
790
791 mutex_lock(&priv->reg_mutex);
792 while (cnt-- && !qca8k_fdb_next(priv, &_fdb, port)) {
793 if (!_fdb.aging)
794 break;
795 is_static = (_fdb.aging == QCA8K_ATU_STATUS_STATIC);
796 ret = cb(_fdb.mac, _fdb.vid, is_static, data);
797 if (ret)
798 break;
799 }
800 mutex_unlock(&priv->reg_mutex);
801
802 return 0;
803 }
804
qca8k_port_mdb_add(struct dsa_switch * ds,int port,const struct switchdev_obj_port_mdb * mdb,struct dsa_db db)805 int qca8k_port_mdb_add(struct dsa_switch *ds, int port,
806 const struct switchdev_obj_port_mdb *mdb,
807 struct dsa_db db)
808 {
809 struct qca8k_priv *priv = ds->priv;
810 const u8 *addr = mdb->addr;
811 u16 vid = mdb->vid;
812
813 return qca8k_fdb_search_and_insert(priv, BIT(port), addr, vid);
814 }
815
qca8k_port_mdb_del(struct dsa_switch * ds,int port,const struct switchdev_obj_port_mdb * mdb,struct dsa_db db)816 int qca8k_port_mdb_del(struct dsa_switch *ds, int port,
817 const struct switchdev_obj_port_mdb *mdb,
818 struct dsa_db db)
819 {
820 struct qca8k_priv *priv = ds->priv;
821 const u8 *addr = mdb->addr;
822 u16 vid = mdb->vid;
823
824 return qca8k_fdb_search_and_del(priv, BIT(port), addr, vid);
825 }
826
qca8k_port_mirror_add(struct dsa_switch * ds,int port,struct dsa_mall_mirror_tc_entry * mirror,bool ingress,struct netlink_ext_ack * extack)827 int qca8k_port_mirror_add(struct dsa_switch *ds, int port,
828 struct dsa_mall_mirror_tc_entry *mirror,
829 bool ingress, struct netlink_ext_ack *extack)
830 {
831 struct qca8k_priv *priv = ds->priv;
832 int monitor_port, ret;
833 u32 reg, val;
834
835 /* Check for existent entry */
836 if ((ingress ? priv->mirror_rx : priv->mirror_tx) & BIT(port))
837 return -EEXIST;
838
839 ret = regmap_read(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0, &val);
840 if (ret)
841 return ret;
842
843 /* QCA83xx can have only one port set to mirror mode.
844 * Check that the correct port is requested and return error otherwise.
845 * When no mirror port is set, the values is set to 0xF
846 */
847 monitor_port = FIELD_GET(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
848 if (monitor_port != 0xF && monitor_port != mirror->to_local_port)
849 return -EEXIST;
850
851 /* Set the monitor port */
852 val = FIELD_PREP(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM,
853 mirror->to_local_port);
854 ret = regmap_update_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
855 QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
856 if (ret)
857 return ret;
858
859 if (ingress) {
860 reg = QCA8K_PORT_LOOKUP_CTRL(port);
861 val = QCA8K_PORT_LOOKUP_ING_MIRROR_EN;
862 } else {
863 reg = QCA8K_REG_PORT_HOL_CTRL1(port);
864 val = QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN;
865 }
866
867 ret = regmap_update_bits(priv->regmap, reg, val, val);
868 if (ret)
869 return ret;
870
871 /* Track mirror port for tx and rx to decide when the
872 * mirror port has to be disabled.
873 */
874 if (ingress)
875 priv->mirror_rx |= BIT(port);
876 else
877 priv->mirror_tx |= BIT(port);
878
879 return 0;
880 }
881
qca8k_port_mirror_del(struct dsa_switch * ds,int port,struct dsa_mall_mirror_tc_entry * mirror)882 void qca8k_port_mirror_del(struct dsa_switch *ds, int port,
883 struct dsa_mall_mirror_tc_entry *mirror)
884 {
885 struct qca8k_priv *priv = ds->priv;
886 u32 reg, val;
887 int ret;
888
889 if (mirror->ingress) {
890 reg = QCA8K_PORT_LOOKUP_CTRL(port);
891 val = QCA8K_PORT_LOOKUP_ING_MIRROR_EN;
892 } else {
893 reg = QCA8K_REG_PORT_HOL_CTRL1(port);
894 val = QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN;
895 }
896
897 ret = regmap_clear_bits(priv->regmap, reg, val);
898 if (ret)
899 goto err;
900
901 if (mirror->ingress)
902 priv->mirror_rx &= ~BIT(port);
903 else
904 priv->mirror_tx &= ~BIT(port);
905
906 /* No port set to send packet to mirror port. Disable mirror port */
907 if (!priv->mirror_rx && !priv->mirror_tx) {
908 val = FIELD_PREP(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, 0xF);
909 ret = regmap_update_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
910 QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
911 if (ret)
912 goto err;
913 }
914 err:
915 dev_err(priv->dev, "Failed to del mirror port from %d", port);
916 }
917
qca8k_port_vlan_filtering(struct dsa_switch * ds,int port,bool vlan_filtering,struct netlink_ext_ack * extack)918 int qca8k_port_vlan_filtering(struct dsa_switch *ds, int port,
919 bool vlan_filtering,
920 struct netlink_ext_ack *extack)
921 {
922 struct qca8k_priv *priv = ds->priv;
923 int ret;
924
925 if (vlan_filtering) {
926 ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
927 QCA8K_PORT_LOOKUP_VLAN_MODE_MASK,
928 QCA8K_PORT_LOOKUP_VLAN_MODE_SECURE);
929 } else {
930 ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
931 QCA8K_PORT_LOOKUP_VLAN_MODE_MASK,
932 QCA8K_PORT_LOOKUP_VLAN_MODE_NONE);
933 }
934
935 return ret;
936 }
937
qca8k_port_vlan_add(struct dsa_switch * ds,int port,const struct switchdev_obj_port_vlan * vlan,struct netlink_ext_ack * extack)938 int qca8k_port_vlan_add(struct dsa_switch *ds, int port,
939 const struct switchdev_obj_port_vlan *vlan,
940 struct netlink_ext_ack *extack)
941 {
942 bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
943 bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
944 struct qca8k_priv *priv = ds->priv;
945 int ret;
946
947 ret = qca8k_vlan_add(priv, port, vlan->vid, untagged);
948 if (ret) {
949 dev_err(priv->dev, "Failed to add VLAN to port %d (%d)", port, ret);
950 return ret;
951 }
952
953 if (pvid) {
954 ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(port),
955 QCA8K_EGREES_VLAN_PORT_MASK(port),
956 QCA8K_EGREES_VLAN_PORT(port, vlan->vid));
957 if (ret)
958 return ret;
959
960 ret = qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(port),
961 QCA8K_PORT_VLAN_CVID(vlan->vid) |
962 QCA8K_PORT_VLAN_SVID(vlan->vid));
963 }
964
965 return ret;
966 }
967
qca8k_port_vlan_del(struct dsa_switch * ds,int port,const struct switchdev_obj_port_vlan * vlan)968 int qca8k_port_vlan_del(struct dsa_switch *ds, int port,
969 const struct switchdev_obj_port_vlan *vlan)
970 {
971 struct qca8k_priv *priv = ds->priv;
972 int ret;
973
974 ret = qca8k_vlan_del(priv, port, vlan->vid);
975 if (ret)
976 dev_err(priv->dev, "Failed to delete VLAN from port %d (%d)", port, ret);
977
978 return ret;
979 }
980
qca8k_lag_can_offload(struct dsa_switch * ds,struct dsa_lag lag,struct netdev_lag_upper_info * info,struct netlink_ext_ack * extack)981 static bool qca8k_lag_can_offload(struct dsa_switch *ds,
982 struct dsa_lag lag,
983 struct netdev_lag_upper_info *info,
984 struct netlink_ext_ack *extack)
985 {
986 struct dsa_port *dp;
987 int members = 0;
988
989 if (!lag.id)
990 return false;
991
992 dsa_lag_foreach_port(dp, ds->dst, &lag)
993 /* Includes the port joining the LAG */
994 members++;
995
996 if (members > QCA8K_NUM_PORTS_FOR_LAG) {
997 NL_SET_ERR_MSG_MOD(extack,
998 "Cannot offload more than 4 LAG ports");
999 return false;
1000 }
1001
1002 if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
1003 NL_SET_ERR_MSG_MOD(extack,
1004 "Can only offload LAG using hash TX type");
1005 return false;
1006 }
1007
1008 if (info->hash_type != NETDEV_LAG_HASH_L2 &&
1009 info->hash_type != NETDEV_LAG_HASH_L23) {
1010 NL_SET_ERR_MSG_MOD(extack,
1011 "Can only offload L2 or L2+L3 TX hash");
1012 return false;
1013 }
1014
1015 return true;
1016 }
1017
qca8k_lag_setup_hash(struct dsa_switch * ds,struct dsa_lag lag,struct netdev_lag_upper_info * info)1018 static int qca8k_lag_setup_hash(struct dsa_switch *ds,
1019 struct dsa_lag lag,
1020 struct netdev_lag_upper_info *info)
1021 {
1022 struct net_device *lag_dev = lag.dev;
1023 struct qca8k_priv *priv = ds->priv;
1024 bool unique_lag = true;
1025 unsigned int i;
1026 u32 hash = 0;
1027
1028 switch (info->hash_type) {
1029 case NETDEV_LAG_HASH_L23:
1030 hash |= QCA8K_TRUNK_HASH_SIP_EN;
1031 hash |= QCA8K_TRUNK_HASH_DIP_EN;
1032 fallthrough;
1033 case NETDEV_LAG_HASH_L2:
1034 hash |= QCA8K_TRUNK_HASH_SA_EN;
1035 hash |= QCA8K_TRUNK_HASH_DA_EN;
1036 break;
1037 default: /* We should NEVER reach this */
1038 return -EOPNOTSUPP;
1039 }
1040
1041 /* Check if we are the unique configured LAG */
1042 dsa_lags_foreach_id(i, ds->dst)
1043 if (i != lag.id && dsa_lag_by_id(ds->dst, i)) {
1044 unique_lag = false;
1045 break;
1046 }
1047
1048 /* Hash Mode is global. Make sure the same Hash Mode
1049 * is set to all the 4 possible lag.
1050 * If we are the unique LAG we can set whatever hash
1051 * mode we want.
1052 * To change hash mode it's needed to remove all LAG
1053 * and change the mode with the latest.
1054 */
1055 if (unique_lag) {
1056 priv->lag_hash_mode = hash;
1057 } else if (priv->lag_hash_mode != hash) {
1058 netdev_err(lag_dev, "Error: Mismatched Hash Mode across different lag is not supported\n");
1059 return -EOPNOTSUPP;
1060 }
1061
1062 return regmap_update_bits(priv->regmap, QCA8K_TRUNK_HASH_EN_CTRL,
1063 QCA8K_TRUNK_HASH_MASK, hash);
1064 }
1065
qca8k_lag_refresh_portmap(struct dsa_switch * ds,int port,struct dsa_lag lag,bool delete)1066 static int qca8k_lag_refresh_portmap(struct dsa_switch *ds, int port,
1067 struct dsa_lag lag, bool delete)
1068 {
1069 struct qca8k_priv *priv = ds->priv;
1070 int ret, id, i;
1071 u32 val;
1072
1073 /* DSA LAG IDs are one-based, hardware is zero-based */
1074 id = lag.id - 1;
1075
1076 /* Read current port member */
1077 ret = regmap_read(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL0, &val);
1078 if (ret)
1079 return ret;
1080
1081 /* Shift val to the correct trunk */
1082 val >>= QCA8K_REG_GOL_TRUNK_SHIFT(id);
1083 val &= QCA8K_REG_GOL_TRUNK_MEMBER_MASK;
1084 if (delete)
1085 val &= ~BIT(port);
1086 else
1087 val |= BIT(port);
1088
1089 /* Update port member. With empty portmap disable trunk */
1090 ret = regmap_update_bits(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL0,
1091 QCA8K_REG_GOL_TRUNK_MEMBER(id) |
1092 QCA8K_REG_GOL_TRUNK_EN(id),
1093 !val << QCA8K_REG_GOL_TRUNK_SHIFT(id) |
1094 val << QCA8K_REG_GOL_TRUNK_SHIFT(id));
1095
1096 /* Search empty member if adding or port on deleting */
1097 for (i = 0; i < QCA8K_NUM_PORTS_FOR_LAG; i++) {
1098 ret = regmap_read(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL(id), &val);
1099 if (ret)
1100 return ret;
1101
1102 val >>= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i);
1103 val &= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_MASK;
1104
1105 if (delete) {
1106 /* If port flagged to be disabled assume this member is
1107 * empty
1108 */
1109 if (val != QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK)
1110 continue;
1111
1112 val &= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT_MASK;
1113 if (val != port)
1114 continue;
1115 } else {
1116 /* If port flagged to be enabled assume this member is
1117 * already set
1118 */
1119 if (val == QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK)
1120 continue;
1121 }
1122
1123 /* We have found the member to add/remove */
1124 break;
1125 }
1126
1127 /* Set port in the correct port mask or disable port if in delete mode */
1128 return regmap_update_bits(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL(id),
1129 QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN(id, i) |
1130 QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT(id, i),
1131 !delete << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i) |
1132 port << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i));
1133 }
1134
qca8k_port_lag_join(struct dsa_switch * ds,int port,struct dsa_lag lag,struct netdev_lag_upper_info * info,struct netlink_ext_ack * extack)1135 int qca8k_port_lag_join(struct dsa_switch *ds, int port, struct dsa_lag lag,
1136 struct netdev_lag_upper_info *info,
1137 struct netlink_ext_ack *extack)
1138 {
1139 int ret;
1140
1141 if (!qca8k_lag_can_offload(ds, lag, info, extack))
1142 return -EOPNOTSUPP;
1143
1144 ret = qca8k_lag_setup_hash(ds, lag, info);
1145 if (ret)
1146 return ret;
1147
1148 return qca8k_lag_refresh_portmap(ds, port, lag, false);
1149 }
1150
qca8k_port_lag_leave(struct dsa_switch * ds,int port,struct dsa_lag lag)1151 int qca8k_port_lag_leave(struct dsa_switch *ds, int port,
1152 struct dsa_lag lag)
1153 {
1154 return qca8k_lag_refresh_portmap(ds, port, lag, true);
1155 }
1156
qca8k_read_switch_id(struct qca8k_priv * priv)1157 int qca8k_read_switch_id(struct qca8k_priv *priv)
1158 {
1159 u32 val;
1160 u8 id;
1161 int ret;
1162
1163 if (!priv->info)
1164 return -ENODEV;
1165
1166 ret = qca8k_read(priv, QCA8K_REG_MASK_CTRL, &val);
1167 if (ret < 0)
1168 return -ENODEV;
1169
1170 id = QCA8K_MASK_CTRL_DEVICE_ID(val);
1171 if (id != priv->info->id) {
1172 dev_err(priv->dev,
1173 "Switch id detected %x but expected %x",
1174 id, priv->info->id);
1175 return -ENODEV;
1176 }
1177
1178 priv->switch_id = id;
1179
1180 /* Save revision to communicate to the internal PHY driver */
1181 priv->switch_revision = QCA8K_MASK_CTRL_REV_ID(val);
1182
1183 return 0;
1184 }
1185