1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright 2020 NXP
3 */
4 #include <net/tc_act/tc_gate.h>
5 #include <linux/dsa/8021q.h>
6 #include "sja1105_vl.h"
7
8 #define SJA1105_SIZE_VL_STATUS 8
9
10 /* Insert into the global gate list, sorted by gate action time. */
sja1105_insert_gate_entry(struct sja1105_gating_config * gating_cfg,struct sja1105_rule * rule,u8 gate_state,s64 entry_time,struct netlink_ext_ack * extack)11 static int sja1105_insert_gate_entry(struct sja1105_gating_config *gating_cfg,
12 struct sja1105_rule *rule,
13 u8 gate_state, s64 entry_time,
14 struct netlink_ext_ack *extack)
15 {
16 struct sja1105_gate_entry *e;
17 int rc;
18
19 e = kzalloc(sizeof(*e), GFP_KERNEL);
20 if (!e)
21 return -ENOMEM;
22
23 e->rule = rule;
24 e->gate_state = gate_state;
25 e->interval = entry_time;
26
27 if (list_empty(&gating_cfg->entries)) {
28 list_add(&e->list, &gating_cfg->entries);
29 } else {
30 struct sja1105_gate_entry *p;
31
32 list_for_each_entry(p, &gating_cfg->entries, list) {
33 if (p->interval == e->interval) {
34 NL_SET_ERR_MSG_MOD(extack,
35 "Gate conflict");
36 rc = -EBUSY;
37 goto err;
38 }
39
40 if (e->interval < p->interval)
41 break;
42 }
43 list_add(&e->list, p->list.prev);
44 }
45
46 gating_cfg->num_entries++;
47
48 return 0;
49 err:
50 kfree(e);
51 return rc;
52 }
53
54 /* The gate entries contain absolute times in their e->interval field. Convert
55 * that to proper intervals (i.e. "0, 5, 10, 15" to "5, 5, 5, 5").
56 */
57 static void
sja1105_gating_cfg_time_to_interval(struct sja1105_gating_config * gating_cfg,u64 cycle_time)58 sja1105_gating_cfg_time_to_interval(struct sja1105_gating_config *gating_cfg,
59 u64 cycle_time)
60 {
61 struct sja1105_gate_entry *last_e;
62 struct sja1105_gate_entry *e;
63 struct list_head *prev;
64
65 list_for_each_entry(e, &gating_cfg->entries, list) {
66 struct sja1105_gate_entry *p;
67
68 prev = e->list.prev;
69
70 if (prev == &gating_cfg->entries)
71 continue;
72
73 p = list_entry(prev, struct sja1105_gate_entry, list);
74 p->interval = e->interval - p->interval;
75 }
76 last_e = list_last_entry(&gating_cfg->entries,
77 struct sja1105_gate_entry, list);
78 last_e->interval = cycle_time - last_e->interval;
79 }
80
sja1105_free_gating_config(struct sja1105_gating_config * gating_cfg)81 static void sja1105_free_gating_config(struct sja1105_gating_config *gating_cfg)
82 {
83 struct sja1105_gate_entry *e, *n;
84
85 list_for_each_entry_safe(e, n, &gating_cfg->entries, list) {
86 list_del(&e->list);
87 kfree(e);
88 }
89 }
90
sja1105_compose_gating_subschedule(struct sja1105_private * priv,struct netlink_ext_ack * extack)91 static int sja1105_compose_gating_subschedule(struct sja1105_private *priv,
92 struct netlink_ext_ack *extack)
93 {
94 struct sja1105_gating_config *gating_cfg = &priv->tas_data.gating_cfg;
95 struct sja1105_rule *rule;
96 s64 max_cycle_time = 0;
97 s64 its_base_time = 0;
98 int i, rc = 0;
99
100 sja1105_free_gating_config(gating_cfg);
101
102 list_for_each_entry(rule, &priv->flow_block.rules, list) {
103 if (rule->type != SJA1105_RULE_VL)
104 continue;
105 if (rule->vl.type != SJA1105_VL_TIME_TRIGGERED)
106 continue;
107
108 if (max_cycle_time < rule->vl.cycle_time) {
109 max_cycle_time = rule->vl.cycle_time;
110 its_base_time = rule->vl.base_time;
111 }
112 }
113
114 if (!max_cycle_time)
115 return 0;
116
117 dev_dbg(priv->ds->dev, "max_cycle_time %lld its_base_time %lld\n",
118 max_cycle_time, its_base_time);
119
120 gating_cfg->base_time = its_base_time;
121 gating_cfg->cycle_time = max_cycle_time;
122 gating_cfg->num_entries = 0;
123
124 list_for_each_entry(rule, &priv->flow_block.rules, list) {
125 s64 time;
126 s64 rbt;
127
128 if (rule->type != SJA1105_RULE_VL)
129 continue;
130 if (rule->vl.type != SJA1105_VL_TIME_TRIGGERED)
131 continue;
132
133 /* Calculate the difference between this gating schedule's
134 * base time, and the base time of the gating schedule with the
135 * longest cycle time. We call it the relative base time (rbt).
136 */
137 rbt = future_base_time(rule->vl.base_time, rule->vl.cycle_time,
138 its_base_time);
139 rbt -= its_base_time;
140
141 time = rbt;
142
143 for (i = 0; i < rule->vl.num_entries; i++) {
144 u8 gate_state = rule->vl.entries[i].gate_state;
145 s64 entry_time = time;
146
147 while (entry_time < max_cycle_time) {
148 rc = sja1105_insert_gate_entry(gating_cfg, rule,
149 gate_state,
150 entry_time,
151 extack);
152 if (rc)
153 goto err;
154
155 entry_time += rule->vl.cycle_time;
156 }
157 time += rule->vl.entries[i].interval;
158 }
159 }
160
161 sja1105_gating_cfg_time_to_interval(gating_cfg, max_cycle_time);
162
163 return 0;
164 err:
165 sja1105_free_gating_config(gating_cfg);
166 return rc;
167 }
168
169 /* The switch flow classification core implements TTEthernet, which 'thinks' in
170 * terms of Virtual Links (VL), a concept borrowed from ARINC 664 part 7.
171 * However it also has one other operating mode (VLLUPFORMAT=0) where it acts
172 * somewhat closer to a pre-standard implementation of IEEE 802.1Qci
173 * (Per-Stream Filtering and Policing), which is what the driver is going to be
174 * implementing.
175 *
176 * VL Lookup
177 * Key = {DMAC && VLANID +---------+ Key = { (DMAC[47:16] & VLMASK ==
178 * && VLAN PCP | | VLMARKER)
179 * && INGRESS PORT} +---------+ (both fixed)
180 * (exact match, | && DMAC[15:0] == VLID
181 * all specified in rule) | (specified in rule)
182 * v && INGRESS PORT }
183 * ------------
184 * 0 (PSFP) / \ 1 (ARINC664)
185 * +-----------/ VLLUPFORMAT \----------+
186 * | \ (fixed) / |
187 * | \ / |
188 * 0 (forwarding) v ------------ |
189 * ------------ |
190 * / \ 1 (QoS classification) |
191 * +---/ ISCRITICAL \-----------+ |
192 * | \ (per rule) / | |
193 * | \ / VLID taken from VLID taken from
194 * v ------------ index of rule contents of rule
195 * select that matched that matched
196 * DESTPORTS | |
197 * | +---------+--------+
198 * | |
199 * | v
200 * | VL Forwarding
201 * | (indexed by VLID)
202 * | +---------+
203 * | +--------------| |
204 * | | select TYPE +---------+
205 * | v
206 * | 0 (rate ------------ 1 (time
207 * | constrained) / \ triggered)
208 * | +------/ TYPE \------------+
209 * | | \ (per VLID) / |
210 * | v \ / v
211 * | VL Policing ------------ VL Policing
212 * | (indexed by VLID) (indexed by VLID)
213 * | +---------+ +---------+
214 * | | TYPE=0 | | TYPE=1 |
215 * | +---------+ +---------+
216 * | select SHARINDX select SHARINDX to
217 * | to rate-limit re-enter VL Forwarding
218 * | groups of VL's with new VLID for egress
219 * | to same quota |
220 * | | |
221 * | select MAXLEN -> exceed => drop select MAXLEN -> exceed => drop
222 * | | |
223 * | v v
224 * | VL Forwarding VL Forwarding
225 * | (indexed by SHARINDX) (indexed by SHARINDX)
226 * | +---------+ +---------+
227 * | | TYPE=0 | | TYPE=1 |
228 * | +---------+ +---------+
229 * | select PRIORITY, select PRIORITY,
230 * | PARTITION, DESTPORTS PARTITION, DESTPORTS
231 * | | |
232 * | v v
233 * | VL Policing VL Policing
234 * | (indexed by SHARINDX) (indexed by SHARINDX)
235 * | +---------+ +---------+
236 * | | TYPE=0 | | TYPE=1 |
237 * | +---------+ +---------+
238 * | | |
239 * | v |
240 * | select BAG, -> exceed => drop |
241 * | JITTER v
242 * | | ----------------------------------------------
243 * | | / Reception Window is open for this VL \
244 * | | / (the Schedule Table executes an entry i \
245 * | | / M <= i < N, for which these conditions hold): \ no
246 * | | +----/ \-+
247 * | | |yes \ WINST[M] == 1 && WINSTINDEX[M] == VLID / |
248 * | | | \ WINEND[N] == 1 && WINSTINDEX[N] == VLID / |
249 * | | | \ / |
250 * | | | \ (the VL window has opened and not yet closed)/ |
251 * | | | ---------------------------------------------- |
252 * | | v v
253 * | | dispatch to DESTPORTS when the Schedule Table drop
254 * | | executes an entry i with TXEN == 1 && VLINDEX == i
255 * v v
256 * dispatch immediately to DESTPORTS
257 *
258 * The per-port classification key is always composed of {DMAC, VID, PCP} and
259 * is non-maskable. This 'looks like' the NULL stream identification function
260 * from IEEE 802.1CB clause 6, except for the extra VLAN PCP. When the switch
261 * ports operate as VLAN-unaware, we do allow the user to not specify the VLAN
262 * ID and PCP, and then the port-based defaults will be used.
263 *
264 * In TTEthernet, routing is something that needs to be done manually for each
265 * Virtual Link. So the flow action must always include one of:
266 * a. 'redirect', 'trap' or 'drop': select the egress port list
267 * Additionally, the following actions may be applied on a Virtual Link,
268 * turning it into 'critical' traffic:
269 * b. 'police': turn it into a rate-constrained VL, with bandwidth limitation
270 * given by the maximum frame length, bandwidth allocation gap (BAG) and
271 * maximum jitter.
272 * c. 'gate': turn it into a time-triggered VL, which can be only be received
273 * and forwarded according to a given schedule.
274 */
275
sja1105_vl_key_lower(struct sja1105_vl_lookup_entry * a,struct sja1105_vl_lookup_entry * b)276 static bool sja1105_vl_key_lower(struct sja1105_vl_lookup_entry *a,
277 struct sja1105_vl_lookup_entry *b)
278 {
279 if (a->macaddr < b->macaddr)
280 return true;
281 if (a->macaddr > b->macaddr)
282 return false;
283 if (a->vlanid < b->vlanid)
284 return true;
285 if (a->vlanid > b->vlanid)
286 return false;
287 if (a->port < b->port)
288 return true;
289 if (a->port > b->port)
290 return false;
291 if (a->vlanprior < b->vlanprior)
292 return true;
293 if (a->vlanprior > b->vlanprior)
294 return false;
295 /* Keys are equal */
296 return false;
297 }
298
sja1105_init_virtual_links(struct sja1105_private * priv,struct netlink_ext_ack * extack)299 static int sja1105_init_virtual_links(struct sja1105_private *priv,
300 struct netlink_ext_ack *extack)
301 {
302 struct sja1105_vl_policing_entry *vl_policing;
303 struct sja1105_vl_forwarding_entry *vl_fwd;
304 struct sja1105_vl_lookup_entry *vl_lookup;
305 bool have_critical_virtual_links = false;
306 struct sja1105_table *table;
307 struct sja1105_rule *rule;
308 int num_virtual_links = 0;
309 int max_sharindx = 0;
310 int i, j, k;
311
312 /* Figure out the dimensioning of the problem */
313 list_for_each_entry(rule, &priv->flow_block.rules, list) {
314 if (rule->type != SJA1105_RULE_VL)
315 continue;
316 /* Each VL lookup entry matches on a single ingress port */
317 num_virtual_links += hweight_long(rule->port_mask);
318
319 if (rule->vl.type != SJA1105_VL_NONCRITICAL)
320 have_critical_virtual_links = true;
321 if (max_sharindx < rule->vl.sharindx)
322 max_sharindx = rule->vl.sharindx;
323 }
324
325 if (num_virtual_links > SJA1105_MAX_VL_LOOKUP_COUNT) {
326 NL_SET_ERR_MSG_MOD(extack, "Not enough VL entries available");
327 return -ENOSPC;
328 }
329
330 if (max_sharindx + 1 > SJA1105_MAX_VL_LOOKUP_COUNT) {
331 NL_SET_ERR_MSG_MOD(extack, "Policer index out of range");
332 return -ENOSPC;
333 }
334
335 max_sharindx = max_t(int, num_virtual_links, max_sharindx) + 1;
336
337 /* Discard previous VL Lookup Table */
338 table = &priv->static_config.tables[BLK_IDX_VL_LOOKUP];
339 if (table->entry_count) {
340 kfree(table->entries);
341 table->entry_count = 0;
342 }
343
344 /* Discard previous VL Policing Table */
345 table = &priv->static_config.tables[BLK_IDX_VL_POLICING];
346 if (table->entry_count) {
347 kfree(table->entries);
348 table->entry_count = 0;
349 }
350
351 /* Discard previous VL Forwarding Table */
352 table = &priv->static_config.tables[BLK_IDX_VL_FORWARDING];
353 if (table->entry_count) {
354 kfree(table->entries);
355 table->entry_count = 0;
356 }
357
358 /* Discard previous VL Forwarding Parameters Table */
359 table = &priv->static_config.tables[BLK_IDX_VL_FORWARDING_PARAMS];
360 if (table->entry_count) {
361 kfree(table->entries);
362 table->entry_count = 0;
363 }
364
365 /* Nothing to do */
366 if (!num_virtual_links)
367 return 0;
368
369 /* Pre-allocate space in the static config tables */
370
371 /* VL Lookup Table */
372 table = &priv->static_config.tables[BLK_IDX_VL_LOOKUP];
373 table->entries = kcalloc(num_virtual_links,
374 table->ops->unpacked_entry_size,
375 GFP_KERNEL);
376 if (!table->entries)
377 return -ENOMEM;
378 table->entry_count = num_virtual_links;
379 vl_lookup = table->entries;
380
381 k = 0;
382
383 list_for_each_entry(rule, &priv->flow_block.rules, list) {
384 unsigned long port;
385
386 if (rule->type != SJA1105_RULE_VL)
387 continue;
388
389 for_each_set_bit(port, &rule->port_mask, SJA1105_MAX_NUM_PORTS) {
390 vl_lookup[k].format = SJA1105_VL_FORMAT_PSFP;
391 vl_lookup[k].port = port;
392 vl_lookup[k].macaddr = rule->key.vl.dmac;
393 if (rule->key.type == SJA1105_KEY_VLAN_AWARE_VL) {
394 vl_lookup[k].vlanid = rule->key.vl.vid;
395 vl_lookup[k].vlanprior = rule->key.vl.pcp;
396 } else {
397 struct dsa_port *dp = dsa_to_port(priv->ds, port);
398 u16 vid = dsa_tag_8021q_rx_vid(dp);
399
400 vl_lookup[k].vlanid = vid;
401 vl_lookup[k].vlanprior = 0;
402 }
403 /* For critical VLs, the DESTPORTS mask is taken from
404 * the VL Forwarding Table, so no point in putting it
405 * in the VL Lookup Table
406 */
407 if (rule->vl.type == SJA1105_VL_NONCRITICAL)
408 vl_lookup[k].destports = rule->vl.destports;
409 else
410 vl_lookup[k].iscritical = true;
411 vl_lookup[k].flow_cookie = rule->cookie;
412 k++;
413 }
414 }
415
416 /* UM10944.pdf chapter 4.2.3 VL Lookup table:
417 * "the entries in the VL Lookup table must be sorted in ascending
418 * order (i.e. the smallest value must be loaded first) according to
419 * the following sort order: MACADDR, VLANID, PORT, VLANPRIOR."
420 */
421 for (i = 0; i < num_virtual_links; i++) {
422 struct sja1105_vl_lookup_entry *a = &vl_lookup[i];
423
424 for (j = i + 1; j < num_virtual_links; j++) {
425 struct sja1105_vl_lookup_entry *b = &vl_lookup[j];
426
427 if (sja1105_vl_key_lower(b, a)) {
428 struct sja1105_vl_lookup_entry tmp = *a;
429
430 *a = *b;
431 *b = tmp;
432 }
433 }
434 }
435
436 if (!have_critical_virtual_links)
437 return 0;
438
439 /* VL Policing Table */
440 table = &priv->static_config.tables[BLK_IDX_VL_POLICING];
441 table->entries = kcalloc(max_sharindx, table->ops->unpacked_entry_size,
442 GFP_KERNEL);
443 if (!table->entries)
444 return -ENOMEM;
445 table->entry_count = max_sharindx;
446 vl_policing = table->entries;
447
448 /* VL Forwarding Table */
449 table = &priv->static_config.tables[BLK_IDX_VL_FORWARDING];
450 table->entries = kcalloc(max_sharindx, table->ops->unpacked_entry_size,
451 GFP_KERNEL);
452 if (!table->entries)
453 return -ENOMEM;
454 table->entry_count = max_sharindx;
455 vl_fwd = table->entries;
456
457 /* VL Forwarding Parameters Table */
458 table = &priv->static_config.tables[BLK_IDX_VL_FORWARDING_PARAMS];
459 table->entries = kcalloc(1, table->ops->unpacked_entry_size,
460 GFP_KERNEL);
461 if (!table->entries)
462 return -ENOMEM;
463 table->entry_count = 1;
464
465 for (i = 0; i < num_virtual_links; i++) {
466 unsigned long cookie = vl_lookup[i].flow_cookie;
467 struct sja1105_rule *rule = sja1105_rule_find(priv, cookie);
468
469 if (rule->vl.type == SJA1105_VL_NONCRITICAL)
470 continue;
471 if (rule->vl.type == SJA1105_VL_TIME_TRIGGERED) {
472 int sharindx = rule->vl.sharindx;
473
474 vl_policing[i].type = 1;
475 vl_policing[i].sharindx = sharindx;
476 vl_policing[i].maxlen = rule->vl.maxlen;
477 vl_policing[sharindx].type = 1;
478
479 vl_fwd[i].type = 1;
480 vl_fwd[sharindx].type = 1;
481 vl_fwd[sharindx].priority = rule->vl.ipv;
482 vl_fwd[sharindx].partition = 0;
483 vl_fwd[sharindx].destports = rule->vl.destports;
484 }
485 }
486
487 sja1105_frame_memory_partitioning(priv);
488
489 return 0;
490 }
491
sja1105_vl_redirect(struct sja1105_private * priv,int port,struct netlink_ext_ack * extack,unsigned long cookie,struct sja1105_key * key,unsigned long destports,bool append)492 int sja1105_vl_redirect(struct sja1105_private *priv, int port,
493 struct netlink_ext_ack *extack, unsigned long cookie,
494 struct sja1105_key *key, unsigned long destports,
495 bool append)
496 {
497 struct sja1105_rule *rule = sja1105_rule_find(priv, cookie);
498 struct dsa_port *dp = dsa_to_port(priv->ds, port);
499 bool vlan_aware = dsa_port_is_vlan_filtering(dp);
500 int rc;
501
502 if (!vlan_aware && key->type != SJA1105_KEY_VLAN_UNAWARE_VL) {
503 NL_SET_ERR_MSG_MOD(extack,
504 "Can only redirect based on DMAC");
505 return -EOPNOTSUPP;
506 } else if (vlan_aware && key->type != SJA1105_KEY_VLAN_AWARE_VL) {
507 NL_SET_ERR_MSG_MOD(extack,
508 "Can only redirect based on {DMAC, VID, PCP}");
509 return -EOPNOTSUPP;
510 }
511
512 if (!rule) {
513 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
514 if (!rule)
515 return -ENOMEM;
516
517 rule->cookie = cookie;
518 rule->type = SJA1105_RULE_VL;
519 rule->key = *key;
520 list_add(&rule->list, &priv->flow_block.rules);
521 }
522
523 rule->port_mask |= BIT(port);
524 if (append)
525 rule->vl.destports |= destports;
526 else
527 rule->vl.destports = destports;
528
529 rc = sja1105_init_virtual_links(priv, extack);
530 if (rc) {
531 rule->port_mask &= ~BIT(port);
532 if (!rule->port_mask) {
533 list_del(&rule->list);
534 kfree(rule);
535 }
536 }
537
538 return rc;
539 }
540
sja1105_vl_delete(struct sja1105_private * priv,int port,struct sja1105_rule * rule,struct netlink_ext_ack * extack)541 int sja1105_vl_delete(struct sja1105_private *priv, int port,
542 struct sja1105_rule *rule, struct netlink_ext_ack *extack)
543 {
544 int rc;
545
546 rule->port_mask &= ~BIT(port);
547 if (!rule->port_mask) {
548 list_del(&rule->list);
549 kfree(rule);
550 }
551
552 rc = sja1105_compose_gating_subschedule(priv, extack);
553 if (rc)
554 return rc;
555
556 rc = sja1105_init_virtual_links(priv, extack);
557 if (rc)
558 return rc;
559
560 rc = sja1105_init_scheduling(priv);
561 if (rc < 0)
562 return rc;
563
564 return sja1105_static_config_reload(priv, SJA1105_VIRTUAL_LINKS);
565 }
566
sja1105_vl_gate(struct sja1105_private * priv,int port,struct netlink_ext_ack * extack,unsigned long cookie,struct sja1105_key * key,u32 index,s32 prio,u64 base_time,u64 cycle_time,u64 cycle_time_ext,u32 num_entries,struct action_gate_entry * entries)567 int sja1105_vl_gate(struct sja1105_private *priv, int port,
568 struct netlink_ext_ack *extack, unsigned long cookie,
569 struct sja1105_key *key, u32 index, s32 prio,
570 u64 base_time, u64 cycle_time, u64 cycle_time_ext,
571 u32 num_entries, struct action_gate_entry *entries)
572 {
573 struct sja1105_rule *rule = sja1105_rule_find(priv, cookie);
574 struct dsa_port *dp = dsa_to_port(priv->ds, port);
575 bool vlan_aware = dsa_port_is_vlan_filtering(dp);
576 int ipv = -1;
577 int i, rc;
578 s32 rem;
579
580 if (cycle_time_ext) {
581 NL_SET_ERR_MSG_MOD(extack,
582 "Cycle time extension not supported");
583 return -EOPNOTSUPP;
584 }
585
586 div_s64_rem(base_time, sja1105_delta_to_ns(1), &rem);
587 if (rem) {
588 NL_SET_ERR_MSG_MOD(extack,
589 "Base time must be multiple of 200 ns");
590 return -ERANGE;
591 }
592
593 div_s64_rem(cycle_time, sja1105_delta_to_ns(1), &rem);
594 if (rem) {
595 NL_SET_ERR_MSG_MOD(extack,
596 "Cycle time must be multiple of 200 ns");
597 return -ERANGE;
598 }
599
600 if (!vlan_aware && key->type != SJA1105_KEY_VLAN_UNAWARE_VL) {
601 NL_SET_ERR_MSG_MOD(extack,
602 "Can only gate based on DMAC");
603 return -EOPNOTSUPP;
604 } else if (vlan_aware && key->type != SJA1105_KEY_VLAN_AWARE_VL) {
605 NL_SET_ERR_MSG_MOD(extack,
606 "Can only gate based on {DMAC, VID, PCP}");
607 return -EOPNOTSUPP;
608 }
609
610 if (!rule) {
611 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
612 if (!rule)
613 return -ENOMEM;
614
615 list_add(&rule->list, &priv->flow_block.rules);
616 rule->cookie = cookie;
617 rule->type = SJA1105_RULE_VL;
618 rule->key = *key;
619 rule->vl.type = SJA1105_VL_TIME_TRIGGERED;
620 rule->vl.sharindx = index;
621 rule->vl.base_time = base_time;
622 rule->vl.cycle_time = cycle_time;
623 rule->vl.num_entries = num_entries;
624 rule->vl.entries = kcalloc(num_entries,
625 sizeof(struct action_gate_entry),
626 GFP_KERNEL);
627 if (!rule->vl.entries) {
628 rc = -ENOMEM;
629 goto out;
630 }
631
632 for (i = 0; i < num_entries; i++) {
633 div_s64_rem(entries[i].interval,
634 sja1105_delta_to_ns(1), &rem);
635 if (rem) {
636 NL_SET_ERR_MSG_MOD(extack,
637 "Interval must be multiple of 200 ns");
638 rc = -ERANGE;
639 goto out;
640 }
641
642 if (!entries[i].interval) {
643 NL_SET_ERR_MSG_MOD(extack,
644 "Interval cannot be zero");
645 rc = -ERANGE;
646 goto out;
647 }
648
649 if (ns_to_sja1105_delta(entries[i].interval) >
650 SJA1105_TAS_MAX_DELTA) {
651 NL_SET_ERR_MSG_MOD(extack,
652 "Maximum interval is 52 ms");
653 rc = -ERANGE;
654 goto out;
655 }
656
657 if (entries[i].maxoctets != -1) {
658 NL_SET_ERR_MSG_MOD(extack,
659 "Cannot offload IntervalOctetMax");
660 rc = -EOPNOTSUPP;
661 goto out;
662 }
663
664 if (ipv == -1) {
665 ipv = entries[i].ipv;
666 } else if (ipv != entries[i].ipv) {
667 NL_SET_ERR_MSG_MOD(extack,
668 "Only support a single IPV per VL");
669 rc = -EOPNOTSUPP;
670 goto out;
671 }
672
673 rule->vl.entries[i] = entries[i];
674 }
675
676 if (ipv == -1) {
677 if (key->type == SJA1105_KEY_VLAN_AWARE_VL)
678 ipv = key->vl.pcp;
679 else
680 ipv = 0;
681 }
682
683 /* TODO: support per-flow MTU */
684 rule->vl.maxlen = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN;
685 rule->vl.ipv = ipv;
686 }
687
688 rule->port_mask |= BIT(port);
689
690 rc = sja1105_compose_gating_subschedule(priv, extack);
691 if (rc)
692 goto out;
693
694 rc = sja1105_init_virtual_links(priv, extack);
695 if (rc)
696 goto out;
697
698 if (sja1105_gating_check_conflicts(priv, -1, extack)) {
699 NL_SET_ERR_MSG_MOD(extack, "Conflict with tc-taprio schedule");
700 rc = -ERANGE;
701 goto out;
702 }
703
704 out:
705 if (rc) {
706 rule->port_mask &= ~BIT(port);
707 if (!rule->port_mask) {
708 list_del(&rule->list);
709 kfree(rule->vl.entries);
710 kfree(rule);
711 }
712 }
713
714 return rc;
715 }
716
sja1105_find_vlid(struct sja1105_private * priv,int port,struct sja1105_key * key)717 static int sja1105_find_vlid(struct sja1105_private *priv, int port,
718 struct sja1105_key *key)
719 {
720 struct sja1105_vl_lookup_entry *vl_lookup;
721 struct sja1105_table *table;
722 int i;
723
724 if (WARN_ON(key->type != SJA1105_KEY_VLAN_AWARE_VL &&
725 key->type != SJA1105_KEY_VLAN_UNAWARE_VL))
726 return -1;
727
728 table = &priv->static_config.tables[BLK_IDX_VL_LOOKUP];
729 vl_lookup = table->entries;
730
731 for (i = 0; i < table->entry_count; i++) {
732 if (key->type == SJA1105_KEY_VLAN_AWARE_VL) {
733 if (vl_lookup[i].port == port &&
734 vl_lookup[i].macaddr == key->vl.dmac &&
735 vl_lookup[i].vlanid == key->vl.vid &&
736 vl_lookup[i].vlanprior == key->vl.pcp)
737 return i;
738 } else {
739 if (vl_lookup[i].port == port &&
740 vl_lookup[i].macaddr == key->vl.dmac)
741 return i;
742 }
743 }
744
745 return -1;
746 }
747
sja1105_vl_stats(struct sja1105_private * priv,int port,struct sja1105_rule * rule,struct flow_stats * stats,struct netlink_ext_ack * extack)748 int sja1105_vl_stats(struct sja1105_private *priv, int port,
749 struct sja1105_rule *rule, struct flow_stats *stats,
750 struct netlink_ext_ack *extack)
751 {
752 const struct sja1105_regs *regs = priv->info->regs;
753 u8 buf[SJA1105_SIZE_VL_STATUS] = {0};
754 u64 unreleased;
755 u64 timingerr;
756 u64 lengtherr;
757 int vlid, rc;
758 u64 pkts;
759
760 if (rule->vl.type != SJA1105_VL_TIME_TRIGGERED)
761 return 0;
762
763 vlid = sja1105_find_vlid(priv, port, &rule->key);
764 if (vlid < 0)
765 return 0;
766
767 rc = sja1105_xfer_buf(priv, SPI_READ, regs->vl_status + 2 * vlid, buf,
768 SJA1105_SIZE_VL_STATUS);
769 if (rc) {
770 NL_SET_ERR_MSG_MOD(extack, "SPI access failed");
771 return rc;
772 }
773
774 sja1105_unpack(buf, &timingerr, 31, 16, SJA1105_SIZE_VL_STATUS);
775 sja1105_unpack(buf, &unreleased, 15, 0, SJA1105_SIZE_VL_STATUS);
776 sja1105_unpack(buf, &lengtherr, 47, 32, SJA1105_SIZE_VL_STATUS);
777
778 pkts = timingerr + unreleased + lengtherr;
779
780 flow_stats_update(stats, 0, pkts - rule->vl.stats.pkts, 0,
781 jiffies - rule->vl.stats.lastused,
782 FLOW_ACTION_HW_STATS_IMMEDIATE);
783
784 rule->vl.stats.pkts = pkts;
785 rule->vl.stats.lastused = jiffies;
786
787 return 0;
788 }
789