1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 // Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES.
3
4 #include <linux/ip.h>
5 #include <linux/ipv6.h>
6 #include <linux/tcp.h>
7 #include <linux/mlx5/fs.h>
8 #include <linux/mlx5/driver.h>
9 #include "mlx5_core.h"
10 #include "lib/fs_ttc.h"
11
12 #define MLX5_TTC_NUM_GROUPS 3
13 #define MLX5_TTC_GROUP1_SIZE (BIT(3) + MLX5_NUM_TUNNEL_TT)
14 #define MLX5_TTC_GROUP2_SIZE BIT(1)
15 #define MLX5_TTC_GROUP3_SIZE BIT(0)
16 #define MLX5_TTC_TABLE_SIZE (MLX5_TTC_GROUP1_SIZE +\
17 MLX5_TTC_GROUP2_SIZE +\
18 MLX5_TTC_GROUP3_SIZE)
19
20 #define MLX5_INNER_TTC_NUM_GROUPS 3
21 #define MLX5_INNER_TTC_GROUP1_SIZE BIT(3)
22 #define MLX5_INNER_TTC_GROUP2_SIZE BIT(1)
23 #define MLX5_INNER_TTC_GROUP3_SIZE BIT(0)
24 #define MLX5_INNER_TTC_TABLE_SIZE (MLX5_INNER_TTC_GROUP1_SIZE +\
25 MLX5_INNER_TTC_GROUP2_SIZE +\
26 MLX5_INNER_TTC_GROUP3_SIZE)
27
28 /* L3/L4 traffic type classifier */
29 struct mlx5_ttc_table {
30 int num_groups;
31 struct mlx5_flow_table *t;
32 struct mlx5_flow_group **g;
33 struct mlx5_ttc_rule rules[MLX5_NUM_TT];
34 struct mlx5_flow_handle *tunnel_rules[MLX5_NUM_TUNNEL_TT];
35 };
36
mlx5_get_ttc_flow_table(struct mlx5_ttc_table * ttc)37 struct mlx5_flow_table *mlx5_get_ttc_flow_table(struct mlx5_ttc_table *ttc)
38 {
39 return ttc->t;
40 }
41
mlx5_cleanup_ttc_rules(struct mlx5_ttc_table * ttc)42 static void mlx5_cleanup_ttc_rules(struct mlx5_ttc_table *ttc)
43 {
44 int i;
45
46 for (i = 0; i < MLX5_NUM_TT; i++) {
47 if (!IS_ERR_OR_NULL(ttc->rules[i].rule)) {
48 mlx5_del_flow_rules(ttc->rules[i].rule);
49 ttc->rules[i].rule = NULL;
50 }
51 }
52
53 for (i = 0; i < MLX5_NUM_TUNNEL_TT; i++) {
54 if (!IS_ERR_OR_NULL(ttc->tunnel_rules[i])) {
55 mlx5_del_flow_rules(ttc->tunnel_rules[i]);
56 ttc->tunnel_rules[i] = NULL;
57 }
58 }
59 }
60
61 struct mlx5_etype_proto {
62 u16 etype;
63 u8 proto;
64 };
65
66 static struct mlx5_etype_proto ttc_rules[] = {
67 [MLX5_TT_IPV4_TCP] = {
68 .etype = ETH_P_IP,
69 .proto = IPPROTO_TCP,
70 },
71 [MLX5_TT_IPV6_TCP] = {
72 .etype = ETH_P_IPV6,
73 .proto = IPPROTO_TCP,
74 },
75 [MLX5_TT_IPV4_UDP] = {
76 .etype = ETH_P_IP,
77 .proto = IPPROTO_UDP,
78 },
79 [MLX5_TT_IPV6_UDP] = {
80 .etype = ETH_P_IPV6,
81 .proto = IPPROTO_UDP,
82 },
83 [MLX5_TT_IPV4_IPSEC_AH] = {
84 .etype = ETH_P_IP,
85 .proto = IPPROTO_AH,
86 },
87 [MLX5_TT_IPV6_IPSEC_AH] = {
88 .etype = ETH_P_IPV6,
89 .proto = IPPROTO_AH,
90 },
91 [MLX5_TT_IPV4_IPSEC_ESP] = {
92 .etype = ETH_P_IP,
93 .proto = IPPROTO_ESP,
94 },
95 [MLX5_TT_IPV6_IPSEC_ESP] = {
96 .etype = ETH_P_IPV6,
97 .proto = IPPROTO_ESP,
98 },
99 [MLX5_TT_IPV4] = {
100 .etype = ETH_P_IP,
101 .proto = 0,
102 },
103 [MLX5_TT_IPV6] = {
104 .etype = ETH_P_IPV6,
105 .proto = 0,
106 },
107 [MLX5_TT_ANY] = {
108 .etype = 0,
109 .proto = 0,
110 },
111 };
112
113 static struct mlx5_etype_proto ttc_tunnel_rules[] = {
114 [MLX5_TT_IPV4_GRE] = {
115 .etype = ETH_P_IP,
116 .proto = IPPROTO_GRE,
117 },
118 [MLX5_TT_IPV6_GRE] = {
119 .etype = ETH_P_IPV6,
120 .proto = IPPROTO_GRE,
121 },
122 [MLX5_TT_IPV4_IPIP] = {
123 .etype = ETH_P_IP,
124 .proto = IPPROTO_IPIP,
125 },
126 [MLX5_TT_IPV6_IPIP] = {
127 .etype = ETH_P_IPV6,
128 .proto = IPPROTO_IPIP,
129 },
130 [MLX5_TT_IPV4_IPV6] = {
131 .etype = ETH_P_IP,
132 .proto = IPPROTO_IPV6,
133 },
134 [MLX5_TT_IPV6_IPV6] = {
135 .etype = ETH_P_IPV6,
136 .proto = IPPROTO_IPV6,
137 },
138
139 };
140
mlx5_get_proto_by_tunnel_type(enum mlx5_tunnel_types tt)141 u8 mlx5_get_proto_by_tunnel_type(enum mlx5_tunnel_types tt)
142 {
143 return ttc_tunnel_rules[tt].proto;
144 }
145
mlx5_tunnel_proto_supported_rx(struct mlx5_core_dev * mdev,u8 proto_type)146 static bool mlx5_tunnel_proto_supported_rx(struct mlx5_core_dev *mdev,
147 u8 proto_type)
148 {
149 switch (proto_type) {
150 case IPPROTO_GRE:
151 return MLX5_CAP_ETH(mdev, tunnel_stateless_gre);
152 case IPPROTO_IPIP:
153 case IPPROTO_IPV6:
154 return (MLX5_CAP_ETH(mdev, tunnel_stateless_ip_over_ip) ||
155 MLX5_CAP_ETH(mdev, tunnel_stateless_ip_over_ip_rx));
156 default:
157 return false;
158 }
159 }
160
mlx5_tunnel_any_rx_proto_supported(struct mlx5_core_dev * mdev)161 static bool mlx5_tunnel_any_rx_proto_supported(struct mlx5_core_dev *mdev)
162 {
163 int tt;
164
165 for (tt = 0; tt < MLX5_NUM_TUNNEL_TT; tt++) {
166 if (mlx5_tunnel_proto_supported_rx(mdev,
167 ttc_tunnel_rules[tt].proto))
168 return true;
169 }
170 return false;
171 }
172
mlx5_tunnel_inner_ft_supported(struct mlx5_core_dev * mdev)173 bool mlx5_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev)
174 {
175 return (mlx5_tunnel_any_rx_proto_supported(mdev) &&
176 MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
177 ft_field_support.inner_ip_version));
178 }
179
mlx5_etype_to_ipv(u16 ethertype)180 static u8 mlx5_etype_to_ipv(u16 ethertype)
181 {
182 if (ethertype == ETH_P_IP)
183 return 4;
184
185 if (ethertype == ETH_P_IPV6)
186 return 6;
187
188 return 0;
189 }
190
191 static struct mlx5_flow_handle *
mlx5_generate_ttc_rule(struct mlx5_core_dev * dev,struct mlx5_flow_table * ft,struct mlx5_flow_destination * dest,u16 etype,u8 proto)192 mlx5_generate_ttc_rule(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft,
193 struct mlx5_flow_destination *dest, u16 etype, u8 proto)
194 {
195 int match_ipv_outer =
196 MLX5_CAP_FLOWTABLE_NIC_RX(dev,
197 ft_field_support.outer_ip_version);
198 MLX5_DECLARE_FLOW_ACT(flow_act);
199 struct mlx5_flow_handle *rule;
200 struct mlx5_flow_spec *spec;
201 int err = 0;
202 u8 ipv;
203
204 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
205 if (!spec)
206 return ERR_PTR(-ENOMEM);
207
208 if (proto) {
209 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
210 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol);
211 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, proto);
212 }
213
214 ipv = mlx5_etype_to_ipv(etype);
215 if (match_ipv_outer && ipv) {
216 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
217 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version);
218 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, ipv);
219 } else if (etype) {
220 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
221 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ethertype);
222 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, etype);
223 }
224
225 rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1);
226 if (IS_ERR(rule)) {
227 err = PTR_ERR(rule);
228 mlx5_core_err(dev, "%s: add rule failed\n", __func__);
229 }
230
231 kvfree(spec);
232 return err ? ERR_PTR(err) : rule;
233 }
234
mlx5_generate_ttc_table_rules(struct mlx5_core_dev * dev,struct ttc_params * params,struct mlx5_ttc_table * ttc)235 static int mlx5_generate_ttc_table_rules(struct mlx5_core_dev *dev,
236 struct ttc_params *params,
237 struct mlx5_ttc_table *ttc)
238 {
239 struct mlx5_flow_handle **trules;
240 struct mlx5_ttc_rule *rules;
241 struct mlx5_flow_table *ft;
242 int tt;
243 int err;
244
245 ft = ttc->t;
246 rules = ttc->rules;
247 for (tt = 0; tt < MLX5_NUM_TT; tt++) {
248 struct mlx5_ttc_rule *rule = &rules[tt];
249
250 if (test_bit(tt, params->ignore_dests))
251 continue;
252 rule->rule = mlx5_generate_ttc_rule(dev, ft, ¶ms->dests[tt],
253 ttc_rules[tt].etype,
254 ttc_rules[tt].proto);
255 if (IS_ERR(rule->rule)) {
256 err = PTR_ERR(rule->rule);
257 rule->rule = NULL;
258 goto del_rules;
259 }
260 rule->default_dest = params->dests[tt];
261 }
262
263 if (!params->inner_ttc || !mlx5_tunnel_inner_ft_supported(dev))
264 return 0;
265
266 trules = ttc->tunnel_rules;
267 for (tt = 0; tt < MLX5_NUM_TUNNEL_TT; tt++) {
268 if (!mlx5_tunnel_proto_supported_rx(dev,
269 ttc_tunnel_rules[tt].proto))
270 continue;
271 if (test_bit(tt, params->ignore_tunnel_dests))
272 continue;
273 trules[tt] = mlx5_generate_ttc_rule(dev, ft,
274 ¶ms->tunnel_dests[tt],
275 ttc_tunnel_rules[tt].etype,
276 ttc_tunnel_rules[tt].proto);
277 if (IS_ERR(trules[tt])) {
278 err = PTR_ERR(trules[tt]);
279 trules[tt] = NULL;
280 goto del_rules;
281 }
282 }
283
284 return 0;
285
286 del_rules:
287 mlx5_cleanup_ttc_rules(ttc);
288 return err;
289 }
290
mlx5_create_ttc_table_groups(struct mlx5_ttc_table * ttc,bool use_ipv)291 static int mlx5_create_ttc_table_groups(struct mlx5_ttc_table *ttc,
292 bool use_ipv)
293 {
294 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
295 int ix = 0;
296 u32 *in;
297 int err;
298 u8 *mc;
299
300 ttc->g = kcalloc(MLX5_TTC_NUM_GROUPS, sizeof(*ttc->g), GFP_KERNEL);
301 if (!ttc->g)
302 return -ENOMEM;
303 in = kvzalloc(inlen, GFP_KERNEL);
304 if (!in) {
305 kfree(ttc->g);
306 ttc->g = NULL;
307 return -ENOMEM;
308 }
309
310 /* L4 Group */
311 mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
312 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
313 if (use_ipv)
314 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_version);
315 else
316 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
317 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
318 MLX5_SET_CFG(in, start_flow_index, ix);
319 ix += MLX5_TTC_GROUP1_SIZE;
320 MLX5_SET_CFG(in, end_flow_index, ix - 1);
321 ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
322 if (IS_ERR(ttc->g[ttc->num_groups]))
323 goto err;
324 ttc->num_groups++;
325
326 /* L3 Group */
327 MLX5_SET(fte_match_param, mc, outer_headers.ip_protocol, 0);
328 MLX5_SET_CFG(in, start_flow_index, ix);
329 ix += MLX5_TTC_GROUP2_SIZE;
330 MLX5_SET_CFG(in, end_flow_index, ix - 1);
331 ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
332 if (IS_ERR(ttc->g[ttc->num_groups]))
333 goto err;
334 ttc->num_groups++;
335
336 /* Any Group */
337 memset(in, 0, inlen);
338 MLX5_SET_CFG(in, start_flow_index, ix);
339 ix += MLX5_TTC_GROUP3_SIZE;
340 MLX5_SET_CFG(in, end_flow_index, ix - 1);
341 ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
342 if (IS_ERR(ttc->g[ttc->num_groups]))
343 goto err;
344 ttc->num_groups++;
345
346 kvfree(in);
347 return 0;
348
349 err:
350 err = PTR_ERR(ttc->g[ttc->num_groups]);
351 ttc->g[ttc->num_groups] = NULL;
352 kvfree(in);
353
354 return err;
355 }
356
357 static struct mlx5_flow_handle *
mlx5_generate_inner_ttc_rule(struct mlx5_core_dev * dev,struct mlx5_flow_table * ft,struct mlx5_flow_destination * dest,u16 etype,u8 proto)358 mlx5_generate_inner_ttc_rule(struct mlx5_core_dev *dev,
359 struct mlx5_flow_table *ft,
360 struct mlx5_flow_destination *dest,
361 u16 etype, u8 proto)
362 {
363 MLX5_DECLARE_FLOW_ACT(flow_act);
364 struct mlx5_flow_handle *rule;
365 struct mlx5_flow_spec *spec;
366 int err = 0;
367 u8 ipv;
368
369 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
370 if (!spec)
371 return ERR_PTR(-ENOMEM);
372
373 ipv = mlx5_etype_to_ipv(etype);
374 if (etype && ipv) {
375 spec->match_criteria_enable = MLX5_MATCH_INNER_HEADERS;
376 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, inner_headers.ip_version);
377 MLX5_SET(fte_match_param, spec->match_value, inner_headers.ip_version, ipv);
378 }
379
380 if (proto) {
381 spec->match_criteria_enable = MLX5_MATCH_INNER_HEADERS;
382 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, inner_headers.ip_protocol);
383 MLX5_SET(fte_match_param, spec->match_value, inner_headers.ip_protocol, proto);
384 }
385
386 rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1);
387 if (IS_ERR(rule)) {
388 err = PTR_ERR(rule);
389 mlx5_core_err(dev, "%s: add inner TTC rule failed\n", __func__);
390 }
391
392 kvfree(spec);
393 return err ? ERR_PTR(err) : rule;
394 }
395
mlx5_generate_inner_ttc_table_rules(struct mlx5_core_dev * dev,struct ttc_params * params,struct mlx5_ttc_table * ttc)396 static int mlx5_generate_inner_ttc_table_rules(struct mlx5_core_dev *dev,
397 struct ttc_params *params,
398 struct mlx5_ttc_table *ttc)
399 {
400 struct mlx5_ttc_rule *rules;
401 struct mlx5_flow_table *ft;
402 int err;
403 int tt;
404
405 ft = ttc->t;
406 rules = ttc->rules;
407
408 for (tt = 0; tt < MLX5_NUM_TT; tt++) {
409 struct mlx5_ttc_rule *rule = &rules[tt];
410
411 rule->rule = mlx5_generate_inner_ttc_rule(dev, ft,
412 ¶ms->dests[tt],
413 ttc_rules[tt].etype,
414 ttc_rules[tt].proto);
415 if (IS_ERR(rule->rule)) {
416 err = PTR_ERR(rule->rule);
417 rule->rule = NULL;
418 goto del_rules;
419 }
420 rule->default_dest = params->dests[tt];
421 }
422
423 return 0;
424
425 del_rules:
426
427 mlx5_cleanup_ttc_rules(ttc);
428 return err;
429 }
430
mlx5_create_inner_ttc_table_groups(struct mlx5_ttc_table * ttc)431 static int mlx5_create_inner_ttc_table_groups(struct mlx5_ttc_table *ttc)
432 {
433 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
434 int ix = 0;
435 u32 *in;
436 int err;
437 u8 *mc;
438
439 ttc->g = kcalloc(MLX5_INNER_TTC_NUM_GROUPS, sizeof(*ttc->g),
440 GFP_KERNEL);
441 if (!ttc->g)
442 return -ENOMEM;
443 in = kvzalloc(inlen, GFP_KERNEL);
444 if (!in) {
445 kfree(ttc->g);
446 ttc->g = NULL;
447 return -ENOMEM;
448 }
449
450 /* L4 Group */
451 mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
452 MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_protocol);
453 MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_version);
454 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS);
455 MLX5_SET_CFG(in, start_flow_index, ix);
456 ix += MLX5_INNER_TTC_GROUP1_SIZE;
457 MLX5_SET_CFG(in, end_flow_index, ix - 1);
458 ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
459 if (IS_ERR(ttc->g[ttc->num_groups]))
460 goto err;
461 ttc->num_groups++;
462
463 /* L3 Group */
464 MLX5_SET(fte_match_param, mc, inner_headers.ip_protocol, 0);
465 MLX5_SET_CFG(in, start_flow_index, ix);
466 ix += MLX5_INNER_TTC_GROUP2_SIZE;
467 MLX5_SET_CFG(in, end_flow_index, ix - 1);
468 ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
469 if (IS_ERR(ttc->g[ttc->num_groups]))
470 goto err;
471 ttc->num_groups++;
472
473 /* Any Group */
474 memset(in, 0, inlen);
475 MLX5_SET_CFG(in, start_flow_index, ix);
476 ix += MLX5_INNER_TTC_GROUP3_SIZE;
477 MLX5_SET_CFG(in, end_flow_index, ix - 1);
478 ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
479 if (IS_ERR(ttc->g[ttc->num_groups]))
480 goto err;
481 ttc->num_groups++;
482
483 kvfree(in);
484 return 0;
485
486 err:
487 err = PTR_ERR(ttc->g[ttc->num_groups]);
488 ttc->g[ttc->num_groups] = NULL;
489 kvfree(in);
490
491 return err;
492 }
493
mlx5_create_inner_ttc_table(struct mlx5_core_dev * dev,struct ttc_params * params)494 struct mlx5_ttc_table *mlx5_create_inner_ttc_table(struct mlx5_core_dev *dev,
495 struct ttc_params *params)
496 {
497 struct mlx5_ttc_table *ttc;
498 int err;
499
500 ttc = kvzalloc(sizeof(*ttc), GFP_KERNEL);
501 if (!ttc)
502 return ERR_PTR(-ENOMEM);
503
504 WARN_ON_ONCE(params->ft_attr.max_fte);
505 params->ft_attr.max_fte = MLX5_INNER_TTC_TABLE_SIZE;
506 ttc->t = mlx5_create_flow_table(params->ns, ¶ms->ft_attr);
507 if (IS_ERR(ttc->t)) {
508 err = PTR_ERR(ttc->t);
509 kvfree(ttc);
510 return ERR_PTR(err);
511 }
512
513 err = mlx5_create_inner_ttc_table_groups(ttc);
514 if (err)
515 goto destroy_ft;
516
517 err = mlx5_generate_inner_ttc_table_rules(dev, params, ttc);
518 if (err)
519 goto destroy_ft;
520
521 return ttc;
522
523 destroy_ft:
524 mlx5_destroy_ttc_table(ttc);
525 return ERR_PTR(err);
526 }
527
mlx5_destroy_ttc_table(struct mlx5_ttc_table * ttc)528 void mlx5_destroy_ttc_table(struct mlx5_ttc_table *ttc)
529 {
530 int i;
531
532 mlx5_cleanup_ttc_rules(ttc);
533 for (i = ttc->num_groups - 1; i >= 0; i--) {
534 if (!IS_ERR_OR_NULL(ttc->g[i]))
535 mlx5_destroy_flow_group(ttc->g[i]);
536 ttc->g[i] = NULL;
537 }
538
539 kfree(ttc->g);
540 mlx5_destroy_flow_table(ttc->t);
541 kvfree(ttc);
542 }
543
mlx5_create_ttc_table(struct mlx5_core_dev * dev,struct ttc_params * params)544 struct mlx5_ttc_table *mlx5_create_ttc_table(struct mlx5_core_dev *dev,
545 struct ttc_params *params)
546 {
547 bool match_ipv_outer =
548 MLX5_CAP_FLOWTABLE_NIC_RX(dev,
549 ft_field_support.outer_ip_version);
550 struct mlx5_ttc_table *ttc;
551 int err;
552
553 ttc = kvzalloc(sizeof(*ttc), GFP_KERNEL);
554 if (!ttc)
555 return ERR_PTR(-ENOMEM);
556
557 WARN_ON_ONCE(params->ft_attr.max_fte);
558 params->ft_attr.max_fte = MLX5_TTC_TABLE_SIZE;
559 ttc->t = mlx5_create_flow_table(params->ns, ¶ms->ft_attr);
560 if (IS_ERR(ttc->t)) {
561 err = PTR_ERR(ttc->t);
562 kvfree(ttc);
563 return ERR_PTR(err);
564 }
565
566 err = mlx5_create_ttc_table_groups(ttc, match_ipv_outer);
567 if (err)
568 goto destroy_ft;
569
570 err = mlx5_generate_ttc_table_rules(dev, params, ttc);
571 if (err)
572 goto destroy_ft;
573
574 return ttc;
575
576 destroy_ft:
577 mlx5_destroy_ttc_table(ttc);
578 return ERR_PTR(err);
579 }
580
mlx5_ttc_fwd_dest(struct mlx5_ttc_table * ttc,enum mlx5_traffic_types type,struct mlx5_flow_destination * new_dest)581 int mlx5_ttc_fwd_dest(struct mlx5_ttc_table *ttc, enum mlx5_traffic_types type,
582 struct mlx5_flow_destination *new_dest)
583 {
584 return mlx5_modify_rule_destination(ttc->rules[type].rule, new_dest,
585 NULL);
586 }
587
588 struct mlx5_flow_destination
mlx5_ttc_get_default_dest(struct mlx5_ttc_table * ttc,enum mlx5_traffic_types type)589 mlx5_ttc_get_default_dest(struct mlx5_ttc_table *ttc,
590 enum mlx5_traffic_types type)
591 {
592 struct mlx5_flow_destination *dest = &ttc->rules[type].default_dest;
593
594 WARN_ONCE(dest->type != MLX5_FLOW_DESTINATION_TYPE_TIR,
595 "TTC[%d] default dest is not setup yet", type);
596
597 return *dest;
598 }
599
mlx5_ttc_fwd_default_dest(struct mlx5_ttc_table * ttc,enum mlx5_traffic_types type)600 int mlx5_ttc_fwd_default_dest(struct mlx5_ttc_table *ttc,
601 enum mlx5_traffic_types type)
602 {
603 struct mlx5_flow_destination dest = mlx5_ttc_get_default_dest(ttc, type);
604
605 return mlx5_ttc_fwd_dest(ttc, type, &dest);
606 }
607