1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 // Copyright (c) 2020 Mellanox Technologies.
3
4 #include <linux/mlx5/driver.h>
5 #include <linux/mlx5/mlx5_ifc.h>
6 #include <linux/mlx5/fs.h>
7
8 #include "lib/fs_chains.h"
9 #include "fs_ft_pool.h"
10 #include "en/mapping.h"
11 #include "fs_core.h"
12 #include "en_tc.h"
13
14 #define chains_lock(chains) ((chains)->lock)
15 #define chains_ht(chains) ((chains)->chains_ht)
16 #define prios_ht(chains) ((chains)->prios_ht)
17 #define tc_default_ft(chains) ((chains)->tc_default_ft)
18 #define tc_end_ft(chains) ((chains)->tc_end_ft)
19 #define ns_to_chains_fs_prio(ns) ((ns) == MLX5_FLOW_NAMESPACE_FDB ? \
20 FDB_TC_OFFLOAD : MLX5E_TC_PRIO)
21 #define FT_TBL_SZ (64 * 1024)
22
23 struct mlx5_fs_chains {
24 struct mlx5_core_dev *dev;
25
26 struct rhashtable chains_ht;
27 struct rhashtable prios_ht;
28 /* Protects above chains_ht and prios_ht */
29 struct mutex lock;
30
31 struct mlx5_flow_table *tc_default_ft;
32 struct mlx5_flow_table *tc_end_ft;
33 struct mapping_ctx *chains_mapping;
34
35 enum mlx5_flow_namespace_type ns;
36 u32 group_num;
37 u32 flags;
38 };
39
40 struct fs_chain {
41 struct rhash_head node;
42
43 u32 chain;
44
45 int ref;
46 int id;
47
48 struct mlx5_fs_chains *chains;
49 struct list_head prios_list;
50 struct mlx5_flow_handle *restore_rule;
51 struct mlx5_modify_hdr *miss_modify_hdr;
52 };
53
54 struct prio_key {
55 u32 chain;
56 u32 prio;
57 u32 level;
58 };
59
60 struct prio {
61 struct rhash_head node;
62 struct list_head list;
63
64 struct prio_key key;
65
66 int ref;
67
68 struct fs_chain *chain;
69 struct mlx5_flow_table *ft;
70 struct mlx5_flow_table *next_ft;
71 struct mlx5_flow_group *miss_group;
72 struct mlx5_flow_handle *miss_rule;
73 };
74
75 static const struct rhashtable_params chain_params = {
76 .head_offset = offsetof(struct fs_chain, node),
77 .key_offset = offsetof(struct fs_chain, chain),
78 .key_len = sizeof_field(struct fs_chain, chain),
79 .automatic_shrinking = true,
80 };
81
82 static const struct rhashtable_params prio_params = {
83 .head_offset = offsetof(struct prio, node),
84 .key_offset = offsetof(struct prio, key),
85 .key_len = sizeof_field(struct prio, key),
86 .automatic_shrinking = true,
87 };
88
mlx5_chains_prios_supported(struct mlx5_fs_chains * chains)89 bool mlx5_chains_prios_supported(struct mlx5_fs_chains *chains)
90 {
91 return chains->flags & MLX5_CHAINS_AND_PRIOS_SUPPORTED;
92 }
93
mlx5_chains_ignore_flow_level_supported(struct mlx5_fs_chains * chains)94 bool mlx5_chains_ignore_flow_level_supported(struct mlx5_fs_chains *chains)
95 {
96 return chains->flags & MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED;
97 }
98
mlx5_chains_backwards_supported(struct mlx5_fs_chains * chains)99 bool mlx5_chains_backwards_supported(struct mlx5_fs_chains *chains)
100 {
101 return mlx5_chains_prios_supported(chains) &&
102 mlx5_chains_ignore_flow_level_supported(chains);
103 }
104
mlx5_chains_get_chain_range(struct mlx5_fs_chains * chains)105 u32 mlx5_chains_get_chain_range(struct mlx5_fs_chains *chains)
106 {
107 if (!mlx5_chains_prios_supported(chains))
108 return 1;
109
110 if (mlx5_chains_ignore_flow_level_supported(chains))
111 return UINT_MAX - 1;
112
113 /* We should get here only for eswitch case */
114 return FDB_TC_MAX_CHAIN;
115 }
116
mlx5_chains_get_nf_ft_chain(struct mlx5_fs_chains * chains)117 u32 mlx5_chains_get_nf_ft_chain(struct mlx5_fs_chains *chains)
118 {
119 return mlx5_chains_get_chain_range(chains) + 1;
120 }
121
mlx5_chains_get_prio_range(struct mlx5_fs_chains * chains)122 u32 mlx5_chains_get_prio_range(struct mlx5_fs_chains *chains)
123 {
124 if (!mlx5_chains_prios_supported(chains))
125 return 1;
126
127 if (mlx5_chains_ignore_flow_level_supported(chains))
128 return UINT_MAX;
129
130 /* We should get here only for eswitch case */
131 return FDB_TC_MAX_PRIO;
132 }
133
mlx5_chains_get_level_range(struct mlx5_fs_chains * chains)134 static unsigned int mlx5_chains_get_level_range(struct mlx5_fs_chains *chains)
135 {
136 if (mlx5_chains_ignore_flow_level_supported(chains))
137 return UINT_MAX;
138
139 /* Same value for FDB and NIC RX tables */
140 return FDB_TC_LEVELS_PER_PRIO;
141 }
142
143 void
mlx5_chains_set_end_ft(struct mlx5_fs_chains * chains,struct mlx5_flow_table * ft)144 mlx5_chains_set_end_ft(struct mlx5_fs_chains *chains,
145 struct mlx5_flow_table *ft)
146 {
147 tc_end_ft(chains) = ft;
148 }
149
150 static struct mlx5_flow_table *
mlx5_chains_create_table(struct mlx5_fs_chains * chains,u32 chain,u32 prio,u32 level)151 mlx5_chains_create_table(struct mlx5_fs_chains *chains,
152 u32 chain, u32 prio, u32 level)
153 {
154 struct mlx5_flow_table_attr ft_attr = {};
155 struct mlx5_flow_namespace *ns;
156 struct mlx5_flow_table *ft;
157 int sz;
158
159 if (chains->flags & MLX5_CHAINS_FT_TUNNEL_SUPPORTED)
160 ft_attr.flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
161 MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
162
163 sz = (chain == mlx5_chains_get_nf_ft_chain(chains)) ? FT_TBL_SZ : POOL_NEXT_SIZE;
164 ft_attr.max_fte = sz;
165
166 /* We use tc_default_ft(chains) as the table's next_ft till
167 * ignore_flow_level is allowed on FT creation and not just for FTEs.
168 * Instead caller should add an explicit miss rule if needed.
169 */
170 ft_attr.next_ft = tc_default_ft(chains);
171
172 /* The root table(chain 0, prio 1, level 0) is required to be
173 * connected to the previous fs_core managed prio.
174 * We always create it, as a managed table, in order to align with
175 * fs_core logic.
176 */
177 if (!mlx5_chains_ignore_flow_level_supported(chains) ||
178 (chain == 0 && prio == 1 && level == 0)) {
179 ft_attr.level = level;
180 ft_attr.prio = prio - 1;
181 ns = (chains->ns == MLX5_FLOW_NAMESPACE_FDB) ?
182 mlx5_get_fdb_sub_ns(chains->dev, chain) :
183 mlx5_get_flow_namespace(chains->dev, chains->ns);
184 } else {
185 ft_attr.flags |= MLX5_FLOW_TABLE_UNMANAGED;
186 ft_attr.prio = ns_to_chains_fs_prio(chains->ns);
187 /* Firmware doesn't allow us to create another level 0 table,
188 * so we create all unmanaged tables as level 1.
189 *
190 * To connect them, we use explicit miss rules with
191 * ignore_flow_level. Caller is responsible to create
192 * these rules (if needed).
193 */
194 ft_attr.level = 1;
195 ns = mlx5_get_flow_namespace(chains->dev, chains->ns);
196 }
197
198 ft_attr.autogroup.num_reserved_entries = 2;
199 ft_attr.autogroup.max_num_groups = chains->group_num;
200 ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
201 if (IS_ERR(ft)) {
202 mlx5_core_warn(chains->dev, "Failed to create chains table err %d (chain: %d, prio: %d, level: %d, size: %d)\n",
203 (int)PTR_ERR(ft), chain, prio, level, sz);
204 return ft;
205 }
206
207 return ft;
208 }
209
210 static int
create_chain_restore(struct fs_chain * chain)211 create_chain_restore(struct fs_chain *chain)
212 {
213 struct mlx5_eswitch *esw = chain->chains->dev->priv.eswitch;
214 char modact[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)];
215 struct mlx5_fs_chains *chains = chain->chains;
216 enum mlx5e_tc_attr_to_reg chain_to_reg;
217 struct mlx5_modify_hdr *mod_hdr;
218 u32 index;
219 int err;
220
221 if (chain->chain == mlx5_chains_get_nf_ft_chain(chains) ||
222 !mlx5_chains_prios_supported(chains))
223 return 0;
224
225 err = mlx5_chains_get_chain_mapping(chains, chain->chain, &index);
226 if (err)
227 return err;
228 if (index == MLX5_FS_DEFAULT_FLOW_TAG) {
229 /* we got the special default flow tag id, so we won't know
230 * if we actually marked the packet with the restore rule
231 * we create.
232 *
233 * This case isn't possible with MLX5_FS_DEFAULT_FLOW_TAG = 0.
234 */
235 err = mlx5_chains_get_chain_mapping(chains, chain->chain, &index);
236 mapping_remove(chains->chains_mapping, MLX5_FS_DEFAULT_FLOW_TAG);
237 if (err)
238 return err;
239 }
240
241 chain->id = index;
242
243 if (chains->ns == MLX5_FLOW_NAMESPACE_FDB) {
244 chain_to_reg = CHAIN_TO_REG;
245 chain->restore_rule = esw_add_restore_rule(esw, chain->id);
246 if (IS_ERR(chain->restore_rule)) {
247 err = PTR_ERR(chain->restore_rule);
248 goto err_rule;
249 }
250 } else if (chains->ns == MLX5_FLOW_NAMESPACE_KERNEL) {
251 /* For NIC RX we don't need a restore rule
252 * since we write the metadata to reg_b
253 * that is passed to SW directly.
254 */
255 chain_to_reg = NIC_CHAIN_TO_REG;
256 } else {
257 err = -EINVAL;
258 goto err_rule;
259 }
260
261 MLX5_SET(set_action_in, modact, action_type, MLX5_ACTION_TYPE_SET);
262 MLX5_SET(set_action_in, modact, field,
263 mlx5e_tc_attr_to_reg_mappings[chain_to_reg].mfield);
264 MLX5_SET(set_action_in, modact, offset,
265 mlx5e_tc_attr_to_reg_mappings[chain_to_reg].moffset);
266 MLX5_SET(set_action_in, modact, length,
267 mlx5e_tc_attr_to_reg_mappings[chain_to_reg].mlen == 32 ?
268 0 : mlx5e_tc_attr_to_reg_mappings[chain_to_reg].mlen);
269 MLX5_SET(set_action_in, modact, data, chain->id);
270 mod_hdr = mlx5_modify_header_alloc(chains->dev, chains->ns,
271 1, modact);
272 if (IS_ERR(mod_hdr)) {
273 err = PTR_ERR(mod_hdr);
274 goto err_mod_hdr;
275 }
276 chain->miss_modify_hdr = mod_hdr;
277
278 return 0;
279
280 err_mod_hdr:
281 if (!IS_ERR_OR_NULL(chain->restore_rule))
282 mlx5_del_flow_rules(chain->restore_rule);
283 err_rule:
284 /* Datapath can't find this mapping, so we can safely remove it */
285 mapping_remove(chains->chains_mapping, chain->id);
286 return err;
287 }
288
destroy_chain_restore(struct fs_chain * chain)289 static void destroy_chain_restore(struct fs_chain *chain)
290 {
291 struct mlx5_fs_chains *chains = chain->chains;
292
293 if (!chain->miss_modify_hdr)
294 return;
295
296 if (chain->restore_rule)
297 mlx5_del_flow_rules(chain->restore_rule);
298
299 mlx5_modify_header_dealloc(chains->dev, chain->miss_modify_hdr);
300 mapping_remove(chains->chains_mapping, chain->id);
301 }
302
303 static struct fs_chain *
mlx5_chains_create_chain(struct mlx5_fs_chains * chains,u32 chain)304 mlx5_chains_create_chain(struct mlx5_fs_chains *chains, u32 chain)
305 {
306 struct fs_chain *chain_s = NULL;
307 int err;
308
309 chain_s = kvzalloc(sizeof(*chain_s), GFP_KERNEL);
310 if (!chain_s)
311 return ERR_PTR(-ENOMEM);
312
313 chain_s->chains = chains;
314 chain_s->chain = chain;
315 INIT_LIST_HEAD(&chain_s->prios_list);
316
317 err = create_chain_restore(chain_s);
318 if (err)
319 goto err_restore;
320
321 err = rhashtable_insert_fast(&chains_ht(chains), &chain_s->node,
322 chain_params);
323 if (err)
324 goto err_insert;
325
326 return chain_s;
327
328 err_insert:
329 destroy_chain_restore(chain_s);
330 err_restore:
331 kvfree(chain_s);
332 return ERR_PTR(err);
333 }
334
335 static void
mlx5_chains_destroy_chain(struct fs_chain * chain)336 mlx5_chains_destroy_chain(struct fs_chain *chain)
337 {
338 struct mlx5_fs_chains *chains = chain->chains;
339
340 rhashtable_remove_fast(&chains_ht(chains), &chain->node,
341 chain_params);
342
343 destroy_chain_restore(chain);
344 kvfree(chain);
345 }
346
347 static struct fs_chain *
mlx5_chains_get_chain(struct mlx5_fs_chains * chains,u32 chain)348 mlx5_chains_get_chain(struct mlx5_fs_chains *chains, u32 chain)
349 {
350 struct fs_chain *chain_s;
351
352 chain_s = rhashtable_lookup_fast(&chains_ht(chains), &chain,
353 chain_params);
354 if (!chain_s) {
355 chain_s = mlx5_chains_create_chain(chains, chain);
356 if (IS_ERR(chain_s))
357 return chain_s;
358 }
359
360 chain_s->ref++;
361
362 return chain_s;
363 }
364
365 static struct mlx5_flow_handle *
mlx5_chains_add_miss_rule(struct fs_chain * chain,struct mlx5_flow_table * ft,struct mlx5_flow_table * next_ft)366 mlx5_chains_add_miss_rule(struct fs_chain *chain,
367 struct mlx5_flow_table *ft,
368 struct mlx5_flow_table *next_ft)
369 {
370 struct mlx5_fs_chains *chains = chain->chains;
371 struct mlx5_flow_destination dest = {};
372 struct mlx5_flow_act act = {};
373
374 act.flags = FLOW_ACT_NO_APPEND;
375 if (mlx5_chains_ignore_flow_level_supported(chain->chains))
376 act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
377
378 act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
379 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
380 dest.ft = next_ft;
381
382 if (next_ft == tc_end_ft(chains) &&
383 chain->chain != mlx5_chains_get_nf_ft_chain(chains) &&
384 mlx5_chains_prios_supported(chains)) {
385 act.modify_hdr = chain->miss_modify_hdr;
386 act.action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
387 }
388
389 return mlx5_add_flow_rules(ft, NULL, &act, &dest, 1);
390 }
391
392 static int
mlx5_chains_update_prio_prevs(struct prio * prio,struct mlx5_flow_table * next_ft)393 mlx5_chains_update_prio_prevs(struct prio *prio,
394 struct mlx5_flow_table *next_ft)
395 {
396 struct mlx5_flow_handle *miss_rules[FDB_TC_LEVELS_PER_PRIO + 1] = {};
397 struct fs_chain *chain = prio->chain;
398 struct prio *pos;
399 int n = 0, err;
400
401 if (prio->key.level)
402 return 0;
403
404 /* Iterate in reverse order until reaching the level 0 rule of
405 * the previous priority, adding all the miss rules first, so we can
406 * revert them if any of them fails.
407 */
408 pos = prio;
409 list_for_each_entry_continue_reverse(pos,
410 &chain->prios_list,
411 list) {
412 miss_rules[n] = mlx5_chains_add_miss_rule(chain,
413 pos->ft,
414 next_ft);
415 if (IS_ERR(miss_rules[n])) {
416 err = PTR_ERR(miss_rules[n]);
417 goto err_prev_rule;
418 }
419
420 n++;
421 if (!pos->key.level)
422 break;
423 }
424
425 /* Success, delete old miss rules, and update the pointers. */
426 n = 0;
427 pos = prio;
428 list_for_each_entry_continue_reverse(pos,
429 &chain->prios_list,
430 list) {
431 mlx5_del_flow_rules(pos->miss_rule);
432
433 pos->miss_rule = miss_rules[n];
434 pos->next_ft = next_ft;
435
436 n++;
437 if (!pos->key.level)
438 break;
439 }
440
441 return 0;
442
443 err_prev_rule:
444 while (--n >= 0)
445 mlx5_del_flow_rules(miss_rules[n]);
446
447 return err;
448 }
449
450 static void
mlx5_chains_put_chain(struct fs_chain * chain)451 mlx5_chains_put_chain(struct fs_chain *chain)
452 {
453 if (--chain->ref == 0)
454 mlx5_chains_destroy_chain(chain);
455 }
456
457 static struct prio *
mlx5_chains_create_prio(struct mlx5_fs_chains * chains,u32 chain,u32 prio,u32 level)458 mlx5_chains_create_prio(struct mlx5_fs_chains *chains,
459 u32 chain, u32 prio, u32 level)
460 {
461 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
462 struct mlx5_flow_handle *miss_rule;
463 struct mlx5_flow_group *miss_group;
464 struct mlx5_flow_table *next_ft;
465 struct mlx5_flow_table *ft;
466 struct fs_chain *chain_s;
467 struct list_head *pos;
468 struct prio *prio_s;
469 u32 *flow_group_in;
470 int err;
471
472 chain_s = mlx5_chains_get_chain(chains, chain);
473 if (IS_ERR(chain_s))
474 return ERR_CAST(chain_s);
475
476 prio_s = kvzalloc(sizeof(*prio_s), GFP_KERNEL);
477 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
478 if (!prio_s || !flow_group_in) {
479 err = -ENOMEM;
480 goto err_alloc;
481 }
482
483 /* Chain's prio list is sorted by prio and level.
484 * And all levels of some prio point to the next prio's level 0.
485 * Example list (prio, level):
486 * (3,0)->(3,1)->(5,0)->(5,1)->(6,1)->(7,0)
487 * In hardware, we will we have the following pointers:
488 * (3,0) -> (5,0) -> (7,0) -> Slow path
489 * (3,1) -> (5,0)
490 * (5,1) -> (7,0)
491 * (6,1) -> (7,0)
492 */
493
494 /* Default miss for each chain: */
495 next_ft = (chain == mlx5_chains_get_nf_ft_chain(chains)) ?
496 tc_default_ft(chains) :
497 tc_end_ft(chains);
498 list_for_each(pos, &chain_s->prios_list) {
499 struct prio *p = list_entry(pos, struct prio, list);
500
501 /* exit on first pos that is larger */
502 if (prio < p->key.prio || (prio == p->key.prio &&
503 level < p->key.level)) {
504 /* Get next level 0 table */
505 next_ft = p->key.level == 0 ? p->ft : p->next_ft;
506 break;
507 }
508 }
509
510 ft = mlx5_chains_create_table(chains, chain, prio, level);
511 if (IS_ERR(ft)) {
512 err = PTR_ERR(ft);
513 goto err_create;
514 }
515
516 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index,
517 ft->max_fte - 2);
518 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
519 ft->max_fte - 1);
520 miss_group = mlx5_create_flow_group(ft, flow_group_in);
521 if (IS_ERR(miss_group)) {
522 err = PTR_ERR(miss_group);
523 goto err_group;
524 }
525
526 /* Add miss rule to next_ft */
527 miss_rule = mlx5_chains_add_miss_rule(chain_s, ft, next_ft);
528 if (IS_ERR(miss_rule)) {
529 err = PTR_ERR(miss_rule);
530 goto err_miss_rule;
531 }
532
533 prio_s->miss_group = miss_group;
534 prio_s->miss_rule = miss_rule;
535 prio_s->next_ft = next_ft;
536 prio_s->chain = chain_s;
537 prio_s->key.chain = chain;
538 prio_s->key.prio = prio;
539 prio_s->key.level = level;
540 prio_s->ft = ft;
541
542 err = rhashtable_insert_fast(&prios_ht(chains), &prio_s->node,
543 prio_params);
544 if (err)
545 goto err_insert;
546
547 list_add(&prio_s->list, pos->prev);
548
549 /* Table is ready, connect it */
550 err = mlx5_chains_update_prio_prevs(prio_s, ft);
551 if (err)
552 goto err_update;
553
554 kvfree(flow_group_in);
555 return prio_s;
556
557 err_update:
558 list_del(&prio_s->list);
559 rhashtable_remove_fast(&prios_ht(chains), &prio_s->node,
560 prio_params);
561 err_insert:
562 mlx5_del_flow_rules(miss_rule);
563 err_miss_rule:
564 mlx5_destroy_flow_group(miss_group);
565 err_group:
566 mlx5_destroy_flow_table(ft);
567 err_create:
568 err_alloc:
569 kvfree(prio_s);
570 kvfree(flow_group_in);
571 mlx5_chains_put_chain(chain_s);
572 return ERR_PTR(err);
573 }
574
575 static void
mlx5_chains_destroy_prio(struct mlx5_fs_chains * chains,struct prio * prio)576 mlx5_chains_destroy_prio(struct mlx5_fs_chains *chains,
577 struct prio *prio)
578 {
579 struct fs_chain *chain = prio->chain;
580
581 WARN_ON(mlx5_chains_update_prio_prevs(prio,
582 prio->next_ft));
583
584 list_del(&prio->list);
585 rhashtable_remove_fast(&prios_ht(chains), &prio->node,
586 prio_params);
587 mlx5_del_flow_rules(prio->miss_rule);
588 mlx5_destroy_flow_group(prio->miss_group);
589 mlx5_destroy_flow_table(prio->ft);
590 mlx5_chains_put_chain(chain);
591 kvfree(prio);
592 }
593
594 struct mlx5_flow_table *
mlx5_chains_get_table(struct mlx5_fs_chains * chains,u32 chain,u32 prio,u32 level)595 mlx5_chains_get_table(struct mlx5_fs_chains *chains, u32 chain, u32 prio,
596 u32 level)
597 {
598 struct mlx5_flow_table *prev_fts;
599 struct prio *prio_s;
600 struct prio_key key;
601 int l = 0;
602
603 if ((chain > mlx5_chains_get_chain_range(chains) &&
604 chain != mlx5_chains_get_nf_ft_chain(chains)) ||
605 prio > mlx5_chains_get_prio_range(chains) ||
606 level > mlx5_chains_get_level_range(chains))
607 return ERR_PTR(-EOPNOTSUPP);
608
609 /* create earlier levels for correct fs_core lookup when
610 * connecting tables.
611 */
612 for (l = 0; l < level; l++) {
613 prev_fts = mlx5_chains_get_table(chains, chain, prio, l);
614 if (IS_ERR(prev_fts)) {
615 prio_s = ERR_CAST(prev_fts);
616 goto err_get_prevs;
617 }
618 }
619
620 key.chain = chain;
621 key.prio = prio;
622 key.level = level;
623
624 mutex_lock(&chains_lock(chains));
625 prio_s = rhashtable_lookup_fast(&prios_ht(chains), &key,
626 prio_params);
627 if (!prio_s) {
628 prio_s = mlx5_chains_create_prio(chains, chain,
629 prio, level);
630 if (IS_ERR(prio_s))
631 goto err_create_prio;
632 }
633
634 ++prio_s->ref;
635 mutex_unlock(&chains_lock(chains));
636
637 return prio_s->ft;
638
639 err_create_prio:
640 mutex_unlock(&chains_lock(chains));
641 err_get_prevs:
642 while (--l >= 0)
643 mlx5_chains_put_table(chains, chain, prio, l);
644 return ERR_CAST(prio_s);
645 }
646
647 void
mlx5_chains_put_table(struct mlx5_fs_chains * chains,u32 chain,u32 prio,u32 level)648 mlx5_chains_put_table(struct mlx5_fs_chains *chains, u32 chain, u32 prio,
649 u32 level)
650 {
651 struct prio *prio_s;
652 struct prio_key key;
653
654 key.chain = chain;
655 key.prio = prio;
656 key.level = level;
657
658 mutex_lock(&chains_lock(chains));
659 prio_s = rhashtable_lookup_fast(&prios_ht(chains), &key,
660 prio_params);
661 if (!prio_s)
662 goto err_get_prio;
663
664 if (--prio_s->ref == 0)
665 mlx5_chains_destroy_prio(chains, prio_s);
666 mutex_unlock(&chains_lock(chains));
667
668 while (level-- > 0)
669 mlx5_chains_put_table(chains, chain, prio, level);
670
671 return;
672
673 err_get_prio:
674 mutex_unlock(&chains_lock(chains));
675 WARN_ONCE(1,
676 "Couldn't find table: (chain: %d prio: %d level: %d)",
677 chain, prio, level);
678 }
679
680 struct mlx5_flow_table *
mlx5_chains_get_tc_end_ft(struct mlx5_fs_chains * chains)681 mlx5_chains_get_tc_end_ft(struct mlx5_fs_chains *chains)
682 {
683 return tc_end_ft(chains);
684 }
685
686 struct mlx5_flow_table *
mlx5_chains_create_global_table(struct mlx5_fs_chains * chains)687 mlx5_chains_create_global_table(struct mlx5_fs_chains *chains)
688 {
689 u32 chain, prio, level;
690 int err;
691
692 if (!mlx5_chains_ignore_flow_level_supported(chains)) {
693 err = -EOPNOTSUPP;
694
695 mlx5_core_warn(chains->dev,
696 "Couldn't create global flow table, ignore_flow_level not supported.");
697 goto err_ignore;
698 }
699
700 chain = mlx5_chains_get_chain_range(chains),
701 prio = mlx5_chains_get_prio_range(chains);
702 level = mlx5_chains_get_level_range(chains);
703
704 return mlx5_chains_create_table(chains, chain, prio, level);
705
706 err_ignore:
707 return ERR_PTR(err);
708 }
709
710 void
mlx5_chains_destroy_global_table(struct mlx5_fs_chains * chains,struct mlx5_flow_table * ft)711 mlx5_chains_destroy_global_table(struct mlx5_fs_chains *chains,
712 struct mlx5_flow_table *ft)
713 {
714 mlx5_destroy_flow_table(ft);
715 }
716
717 static struct mlx5_fs_chains *
mlx5_chains_init(struct mlx5_core_dev * dev,struct mlx5_chains_attr * attr)718 mlx5_chains_init(struct mlx5_core_dev *dev, struct mlx5_chains_attr *attr)
719 {
720 struct mlx5_fs_chains *chains_priv;
721 u32 max_flow_counter;
722 int err;
723
724 chains_priv = kzalloc(sizeof(*chains_priv), GFP_KERNEL);
725 if (!chains_priv)
726 return ERR_PTR(-ENOMEM);
727
728 max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
729 MLX5_CAP_GEN(dev, max_flow_counter_15_0);
730
731 mlx5_core_dbg(dev,
732 "Init flow table chains, max counters(%d), groups(%d), max flow table size(%d)\n",
733 max_flow_counter, attr->max_grp_num, attr->max_ft_sz);
734
735 chains_priv->dev = dev;
736 chains_priv->flags = attr->flags;
737 chains_priv->ns = attr->ns;
738 chains_priv->group_num = attr->max_grp_num;
739 chains_priv->chains_mapping = attr->mapping;
740 tc_default_ft(chains_priv) = tc_end_ft(chains_priv) = attr->default_ft;
741
742 mlx5_core_info(dev, "Supported tc offload range - chains: %u, prios: %u\n",
743 mlx5_chains_get_chain_range(chains_priv),
744 mlx5_chains_get_prio_range(chains_priv));
745
746 err = rhashtable_init(&chains_ht(chains_priv), &chain_params);
747 if (err)
748 goto init_chains_ht_err;
749
750 err = rhashtable_init(&prios_ht(chains_priv), &prio_params);
751 if (err)
752 goto init_prios_ht_err;
753
754 mutex_init(&chains_lock(chains_priv));
755
756 return chains_priv;
757
758 init_prios_ht_err:
759 rhashtable_destroy(&chains_ht(chains_priv));
760 init_chains_ht_err:
761 kfree(chains_priv);
762 return ERR_PTR(err);
763 }
764
765 static void
mlx5_chains_cleanup(struct mlx5_fs_chains * chains)766 mlx5_chains_cleanup(struct mlx5_fs_chains *chains)
767 {
768 mutex_destroy(&chains_lock(chains));
769 rhashtable_destroy(&prios_ht(chains));
770 rhashtable_destroy(&chains_ht(chains));
771
772 kfree(chains);
773 }
774
775 struct mlx5_fs_chains *
mlx5_chains_create(struct mlx5_core_dev * dev,struct mlx5_chains_attr * attr)776 mlx5_chains_create(struct mlx5_core_dev *dev, struct mlx5_chains_attr *attr)
777 {
778 struct mlx5_fs_chains *chains;
779
780 chains = mlx5_chains_init(dev, attr);
781
782 return chains;
783 }
784
785 void
mlx5_chains_destroy(struct mlx5_fs_chains * chains)786 mlx5_chains_destroy(struct mlx5_fs_chains *chains)
787 {
788 mlx5_chains_cleanup(chains);
789 }
790
791 int
mlx5_chains_get_chain_mapping(struct mlx5_fs_chains * chains,u32 chain,u32 * chain_mapping)792 mlx5_chains_get_chain_mapping(struct mlx5_fs_chains *chains, u32 chain,
793 u32 *chain_mapping)
794 {
795 struct mapping_ctx *ctx = chains->chains_mapping;
796 struct mlx5_mapped_obj mapped_obj = {};
797
798 mapped_obj.type = MLX5_MAPPED_OBJ_CHAIN;
799 mapped_obj.chain = chain;
800 return mapping_add(ctx, &mapped_obj, chain_mapping);
801 }
802
803 int
mlx5_chains_put_chain_mapping(struct mlx5_fs_chains * chains,u32 chain_mapping)804 mlx5_chains_put_chain_mapping(struct mlx5_fs_chains *chains, u32 chain_mapping)
805 {
806 struct mapping_ctx *ctx = chains->chains_mapping;
807
808 return mapping_remove(ctx, chain_mapping);
809 }
810