1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2021, Mellanox Technologies inc. All rights reserved. */
3
4 #include "rx_res.h"
5 #include "channels.h"
6 #include "params.h"
7
8 #define MLX5E_MAX_NUM_RSS 16
9
10 struct mlx5e_rx_res {
11 struct mlx5_core_dev *mdev;
12 enum mlx5e_rx_res_features features;
13 unsigned int max_nch;
14 u32 drop_rqn;
15
16 struct mlx5e_packet_merge_param pkt_merge_param;
17 struct rw_semaphore pkt_merge_param_sem;
18
19 struct mlx5e_rss *rss[MLX5E_MAX_NUM_RSS];
20 bool rss_active;
21 u32 rss_rqns[MLX5E_INDIR_RQT_SIZE];
22 unsigned int rss_nch;
23
24 struct {
25 struct mlx5e_rqt direct_rqt;
26 struct mlx5e_tir direct_tir;
27 struct mlx5e_rqt xsk_rqt;
28 struct mlx5e_tir xsk_tir;
29 } *channels;
30
31 struct {
32 struct mlx5e_rqt rqt;
33 struct mlx5e_tir tir;
34 } ptp;
35 };
36
37 /* API for rx_res_rss_* */
38
mlx5e_rx_res_rss_init_def(struct mlx5e_rx_res * res,const struct mlx5e_packet_merge_param * init_pkt_merge_param,unsigned int init_nch)39 static int mlx5e_rx_res_rss_init_def(struct mlx5e_rx_res *res,
40 const struct mlx5e_packet_merge_param *init_pkt_merge_param,
41 unsigned int init_nch)
42 {
43 bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT;
44 struct mlx5e_rss *rss;
45 int err;
46
47 if (WARN_ON(res->rss[0]))
48 return -EINVAL;
49
50 rss = mlx5e_rss_alloc();
51 if (!rss)
52 return -ENOMEM;
53
54 err = mlx5e_rss_init(rss, res->mdev, inner_ft_support, res->drop_rqn,
55 init_pkt_merge_param);
56 if (err)
57 goto err_rss_free;
58
59 mlx5e_rss_set_indir_uniform(rss, init_nch);
60
61 res->rss[0] = rss;
62
63 return 0;
64
65 err_rss_free:
66 mlx5e_rss_free(rss);
67 return err;
68 }
69
mlx5e_rx_res_rss_init(struct mlx5e_rx_res * res,u32 * rss_idx,unsigned int init_nch)70 int mlx5e_rx_res_rss_init(struct mlx5e_rx_res *res, u32 *rss_idx, unsigned int init_nch)
71 {
72 bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT;
73 struct mlx5e_rss *rss;
74 int err, i;
75
76 for (i = 1; i < MLX5E_MAX_NUM_RSS; i++)
77 if (!res->rss[i])
78 break;
79
80 if (i == MLX5E_MAX_NUM_RSS)
81 return -ENOSPC;
82
83 rss = mlx5e_rss_alloc();
84 if (!rss)
85 return -ENOMEM;
86
87 err = mlx5e_rss_init_no_tirs(rss, res->mdev, inner_ft_support, res->drop_rqn);
88 if (err)
89 goto err_rss_free;
90
91 mlx5e_rss_set_indir_uniform(rss, init_nch);
92 if (res->rss_active)
93 mlx5e_rss_enable(rss, res->rss_rqns, res->rss_nch);
94
95 res->rss[i] = rss;
96 *rss_idx = i;
97
98 return 0;
99
100 err_rss_free:
101 mlx5e_rss_free(rss);
102 return err;
103 }
104
__mlx5e_rx_res_rss_destroy(struct mlx5e_rx_res * res,u32 rss_idx)105 static int __mlx5e_rx_res_rss_destroy(struct mlx5e_rx_res *res, u32 rss_idx)
106 {
107 struct mlx5e_rss *rss = res->rss[rss_idx];
108 int err;
109
110 err = mlx5e_rss_cleanup(rss);
111 if (err)
112 return err;
113
114 mlx5e_rss_free(rss);
115 res->rss[rss_idx] = NULL;
116
117 return 0;
118 }
119
mlx5e_rx_res_rss_destroy(struct mlx5e_rx_res * res,u32 rss_idx)120 int mlx5e_rx_res_rss_destroy(struct mlx5e_rx_res *res, u32 rss_idx)
121 {
122 struct mlx5e_rss *rss;
123
124 if (rss_idx >= MLX5E_MAX_NUM_RSS)
125 return -EINVAL;
126
127 rss = res->rss[rss_idx];
128 if (!rss)
129 return -EINVAL;
130
131 return __mlx5e_rx_res_rss_destroy(res, rss_idx);
132 }
133
mlx5e_rx_res_rss_destroy_all(struct mlx5e_rx_res * res)134 static void mlx5e_rx_res_rss_destroy_all(struct mlx5e_rx_res *res)
135 {
136 int i;
137
138 for (i = 0; i < MLX5E_MAX_NUM_RSS; i++) {
139 struct mlx5e_rss *rss = res->rss[i];
140 int err;
141
142 if (!rss)
143 continue;
144
145 err = __mlx5e_rx_res_rss_destroy(res, i);
146 if (err) {
147 unsigned int refcount;
148
149 refcount = mlx5e_rss_refcnt_read(rss);
150 mlx5_core_warn(res->mdev,
151 "Failed to destroy RSS context %d, refcount = %u, err = %d\n",
152 i, refcount, err);
153 }
154 }
155 }
156
mlx5e_rx_res_rss_enable(struct mlx5e_rx_res * res)157 static void mlx5e_rx_res_rss_enable(struct mlx5e_rx_res *res)
158 {
159 int i;
160
161 res->rss_active = true;
162
163 for (i = 0; i < MLX5E_MAX_NUM_RSS; i++) {
164 struct mlx5e_rss *rss = res->rss[i];
165
166 if (!rss)
167 continue;
168 mlx5e_rss_enable(rss, res->rss_rqns, res->rss_nch);
169 }
170 }
171
mlx5e_rx_res_rss_disable(struct mlx5e_rx_res * res)172 static void mlx5e_rx_res_rss_disable(struct mlx5e_rx_res *res)
173 {
174 int i;
175
176 res->rss_active = false;
177
178 for (i = 0; i < MLX5E_MAX_NUM_RSS; i++) {
179 struct mlx5e_rss *rss = res->rss[i];
180
181 if (!rss)
182 continue;
183 mlx5e_rss_disable(rss);
184 }
185 }
186
187 /* Updates the indirection table SW shadow, does not update the HW resources yet */
mlx5e_rx_res_rss_set_indir_uniform(struct mlx5e_rx_res * res,unsigned int nch)188 void mlx5e_rx_res_rss_set_indir_uniform(struct mlx5e_rx_res *res, unsigned int nch)
189 {
190 WARN_ON_ONCE(res->rss_active);
191 mlx5e_rss_set_indir_uniform(res->rss[0], nch);
192 }
193
mlx5e_rx_res_rss_get_rxfh(struct mlx5e_rx_res * res,u32 rss_idx,u32 * indir,u8 * key,u8 * hfunc)194 int mlx5e_rx_res_rss_get_rxfh(struct mlx5e_rx_res *res, u32 rss_idx,
195 u32 *indir, u8 *key, u8 *hfunc)
196 {
197 struct mlx5e_rss *rss;
198
199 if (rss_idx >= MLX5E_MAX_NUM_RSS)
200 return -EINVAL;
201
202 rss = res->rss[rss_idx];
203 if (!rss)
204 return -ENOENT;
205
206 return mlx5e_rss_get_rxfh(rss, indir, key, hfunc);
207 }
208
mlx5e_rx_res_rss_set_rxfh(struct mlx5e_rx_res * res,u32 rss_idx,const u32 * indir,const u8 * key,const u8 * hfunc)209 int mlx5e_rx_res_rss_set_rxfh(struct mlx5e_rx_res *res, u32 rss_idx,
210 const u32 *indir, const u8 *key, const u8 *hfunc)
211 {
212 struct mlx5e_rss *rss;
213
214 if (rss_idx >= MLX5E_MAX_NUM_RSS)
215 return -EINVAL;
216
217 rss = res->rss[rss_idx];
218 if (!rss)
219 return -ENOENT;
220
221 return mlx5e_rss_set_rxfh(rss, indir, key, hfunc, res->rss_rqns, res->rss_nch);
222 }
223
mlx5e_rx_res_rss_get_hash_fields(struct mlx5e_rx_res * res,enum mlx5_traffic_types tt)224 u8 mlx5e_rx_res_rss_get_hash_fields(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt)
225 {
226 struct mlx5e_rss *rss = res->rss[0];
227
228 return mlx5e_rss_get_hash_fields(rss, tt);
229 }
230
mlx5e_rx_res_rss_set_hash_fields(struct mlx5e_rx_res * res,enum mlx5_traffic_types tt,u8 rx_hash_fields)231 int mlx5e_rx_res_rss_set_hash_fields(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt,
232 u8 rx_hash_fields)
233 {
234 struct mlx5e_rss *rss = res->rss[0];
235
236 return mlx5e_rss_set_hash_fields(rss, tt, rx_hash_fields);
237 }
238
mlx5e_rx_res_rss_cnt(struct mlx5e_rx_res * res)239 int mlx5e_rx_res_rss_cnt(struct mlx5e_rx_res *res)
240 {
241 int i, cnt;
242
243 cnt = 0;
244 for (i = 0; i < MLX5E_MAX_NUM_RSS; i++)
245 if (res->rss[i])
246 cnt++;
247
248 return cnt;
249 }
250
mlx5e_rx_res_rss_index(struct mlx5e_rx_res * res,struct mlx5e_rss * rss)251 int mlx5e_rx_res_rss_index(struct mlx5e_rx_res *res, struct mlx5e_rss *rss)
252 {
253 int i;
254
255 if (!rss)
256 return -EINVAL;
257
258 for (i = 0; i < MLX5E_MAX_NUM_RSS; i++)
259 if (rss == res->rss[i])
260 return i;
261
262 return -ENOENT;
263 }
264
mlx5e_rx_res_rss_get(struct mlx5e_rx_res * res,u32 rss_idx)265 struct mlx5e_rss *mlx5e_rx_res_rss_get(struct mlx5e_rx_res *res, u32 rss_idx)
266 {
267 if (rss_idx >= MLX5E_MAX_NUM_RSS)
268 return NULL;
269
270 return res->rss[rss_idx];
271 }
272
273 /* End of API rx_res_rss_* */
274
mlx5e_rx_res_alloc(void)275 struct mlx5e_rx_res *mlx5e_rx_res_alloc(void)
276 {
277 return kvzalloc(sizeof(struct mlx5e_rx_res), GFP_KERNEL);
278 }
279
mlx5e_rx_res_channels_init(struct mlx5e_rx_res * res,const struct mlx5e_packet_merge_param * init_pkt_merge_param)280 static int mlx5e_rx_res_channels_init(struct mlx5e_rx_res *res,
281 const struct mlx5e_packet_merge_param *init_pkt_merge_param)
282 {
283 bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT;
284 struct mlx5e_tir_builder *builder;
285 int err = 0;
286 int ix;
287
288 builder = mlx5e_tir_builder_alloc(false);
289 if (!builder)
290 return -ENOMEM;
291
292 res->channels = kvcalloc(res->max_nch, sizeof(*res->channels), GFP_KERNEL);
293 if (!res->channels) {
294 err = -ENOMEM;
295 goto out;
296 }
297
298 for (ix = 0; ix < res->max_nch; ix++) {
299 err = mlx5e_rqt_init_direct(&res->channels[ix].direct_rqt,
300 res->mdev, false, res->drop_rqn);
301 if (err) {
302 mlx5_core_warn(res->mdev, "Failed to create a direct RQT: err = %d, ix = %u\n",
303 err, ix);
304 goto err_destroy_direct_rqts;
305 }
306 }
307
308 for (ix = 0; ix < res->max_nch; ix++) {
309 mlx5e_tir_builder_build_rqt(builder, res->mdev->mlx5e_res.hw_objs.td.tdn,
310 mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt),
311 inner_ft_support);
312 mlx5e_tir_builder_build_packet_merge(builder, init_pkt_merge_param);
313 mlx5e_tir_builder_build_direct(builder);
314
315 err = mlx5e_tir_init(&res->channels[ix].direct_tir, builder, res->mdev, true);
316 if (err) {
317 mlx5_core_warn(res->mdev, "Failed to create a direct TIR: err = %d, ix = %u\n",
318 err, ix);
319 goto err_destroy_direct_tirs;
320 }
321
322 mlx5e_tir_builder_clear(builder);
323 }
324
325 if (!(res->features & MLX5E_RX_RES_FEATURE_XSK))
326 goto out;
327
328 for (ix = 0; ix < res->max_nch; ix++) {
329 err = mlx5e_rqt_init_direct(&res->channels[ix].xsk_rqt,
330 res->mdev, false, res->drop_rqn);
331 if (err) {
332 mlx5_core_warn(res->mdev, "Failed to create an XSK RQT: err = %d, ix = %u\n",
333 err, ix);
334 goto err_destroy_xsk_rqts;
335 }
336 }
337
338 for (ix = 0; ix < res->max_nch; ix++) {
339 mlx5e_tir_builder_build_rqt(builder, res->mdev->mlx5e_res.hw_objs.td.tdn,
340 mlx5e_rqt_get_rqtn(&res->channels[ix].xsk_rqt),
341 inner_ft_support);
342 mlx5e_tir_builder_build_packet_merge(builder, init_pkt_merge_param);
343 mlx5e_tir_builder_build_direct(builder);
344
345 err = mlx5e_tir_init(&res->channels[ix].xsk_tir, builder, res->mdev, true);
346 if (err) {
347 mlx5_core_warn(res->mdev, "Failed to create an XSK TIR: err = %d, ix = %u\n",
348 err, ix);
349 goto err_destroy_xsk_tirs;
350 }
351
352 mlx5e_tir_builder_clear(builder);
353 }
354
355 goto out;
356
357 err_destroy_xsk_tirs:
358 while (--ix >= 0)
359 mlx5e_tir_destroy(&res->channels[ix].xsk_tir);
360
361 ix = res->max_nch;
362 err_destroy_xsk_rqts:
363 while (--ix >= 0)
364 mlx5e_rqt_destroy(&res->channels[ix].xsk_rqt);
365
366 ix = res->max_nch;
367 err_destroy_direct_tirs:
368 while (--ix >= 0)
369 mlx5e_tir_destroy(&res->channels[ix].direct_tir);
370
371 ix = res->max_nch;
372 err_destroy_direct_rqts:
373 while (--ix >= 0)
374 mlx5e_rqt_destroy(&res->channels[ix].direct_rqt);
375
376 kvfree(res->channels);
377
378 out:
379 mlx5e_tir_builder_free(builder);
380
381 return err;
382 }
383
mlx5e_rx_res_ptp_init(struct mlx5e_rx_res * res)384 static int mlx5e_rx_res_ptp_init(struct mlx5e_rx_res *res)
385 {
386 bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT;
387 struct mlx5e_tir_builder *builder;
388 int err;
389
390 builder = mlx5e_tir_builder_alloc(false);
391 if (!builder)
392 return -ENOMEM;
393
394 err = mlx5e_rqt_init_direct(&res->ptp.rqt, res->mdev, false, res->drop_rqn);
395 if (err)
396 goto out;
397
398 /* Separated from the channels RQs, does not share pkt_merge state with them */
399 mlx5e_tir_builder_build_rqt(builder, res->mdev->mlx5e_res.hw_objs.td.tdn,
400 mlx5e_rqt_get_rqtn(&res->ptp.rqt),
401 inner_ft_support);
402 mlx5e_tir_builder_build_direct(builder);
403
404 err = mlx5e_tir_init(&res->ptp.tir, builder, res->mdev, true);
405 if (err)
406 goto err_destroy_ptp_rqt;
407
408 goto out;
409
410 err_destroy_ptp_rqt:
411 mlx5e_rqt_destroy(&res->ptp.rqt);
412
413 out:
414 mlx5e_tir_builder_free(builder);
415 return err;
416 }
417
mlx5e_rx_res_channels_destroy(struct mlx5e_rx_res * res)418 static void mlx5e_rx_res_channels_destroy(struct mlx5e_rx_res *res)
419 {
420 unsigned int ix;
421
422 for (ix = 0; ix < res->max_nch; ix++) {
423 mlx5e_tir_destroy(&res->channels[ix].direct_tir);
424 mlx5e_rqt_destroy(&res->channels[ix].direct_rqt);
425
426 if (!(res->features & MLX5E_RX_RES_FEATURE_XSK))
427 continue;
428
429 mlx5e_tir_destroy(&res->channels[ix].xsk_tir);
430 mlx5e_rqt_destroy(&res->channels[ix].xsk_rqt);
431 }
432
433 kvfree(res->channels);
434 }
435
mlx5e_rx_res_ptp_destroy(struct mlx5e_rx_res * res)436 static void mlx5e_rx_res_ptp_destroy(struct mlx5e_rx_res *res)
437 {
438 mlx5e_tir_destroy(&res->ptp.tir);
439 mlx5e_rqt_destroy(&res->ptp.rqt);
440 }
441
mlx5e_rx_res_init(struct mlx5e_rx_res * res,struct mlx5_core_dev * mdev,enum mlx5e_rx_res_features features,unsigned int max_nch,u32 drop_rqn,const struct mlx5e_packet_merge_param * init_pkt_merge_param,unsigned int init_nch)442 int mlx5e_rx_res_init(struct mlx5e_rx_res *res, struct mlx5_core_dev *mdev,
443 enum mlx5e_rx_res_features features, unsigned int max_nch,
444 u32 drop_rqn, const struct mlx5e_packet_merge_param *init_pkt_merge_param,
445 unsigned int init_nch)
446 {
447 int err;
448
449 res->mdev = mdev;
450 res->features = features;
451 res->max_nch = max_nch;
452 res->drop_rqn = drop_rqn;
453
454 res->pkt_merge_param = *init_pkt_merge_param;
455 init_rwsem(&res->pkt_merge_param_sem);
456
457 err = mlx5e_rx_res_rss_init_def(res, init_pkt_merge_param, init_nch);
458 if (err)
459 goto err_out;
460
461 err = mlx5e_rx_res_channels_init(res, init_pkt_merge_param);
462 if (err)
463 goto err_rss_destroy;
464
465 err = mlx5e_rx_res_ptp_init(res);
466 if (err)
467 goto err_channels_destroy;
468
469 return 0;
470
471 err_channels_destroy:
472 mlx5e_rx_res_channels_destroy(res);
473 err_rss_destroy:
474 __mlx5e_rx_res_rss_destroy(res, 0);
475 err_out:
476 return err;
477 }
478
mlx5e_rx_res_destroy(struct mlx5e_rx_res * res)479 void mlx5e_rx_res_destroy(struct mlx5e_rx_res *res)
480 {
481 mlx5e_rx_res_ptp_destroy(res);
482 mlx5e_rx_res_channels_destroy(res);
483 mlx5e_rx_res_rss_destroy_all(res);
484 }
485
mlx5e_rx_res_free(struct mlx5e_rx_res * res)486 void mlx5e_rx_res_free(struct mlx5e_rx_res *res)
487 {
488 kvfree(res);
489 }
490
mlx5e_rx_res_get_tirn_direct(struct mlx5e_rx_res * res,unsigned int ix)491 u32 mlx5e_rx_res_get_tirn_direct(struct mlx5e_rx_res *res, unsigned int ix)
492 {
493 return mlx5e_tir_get_tirn(&res->channels[ix].direct_tir);
494 }
495
mlx5e_rx_res_get_tirn_xsk(struct mlx5e_rx_res * res,unsigned int ix)496 u32 mlx5e_rx_res_get_tirn_xsk(struct mlx5e_rx_res *res, unsigned int ix)
497 {
498 WARN_ON(!(res->features & MLX5E_RX_RES_FEATURE_XSK));
499
500 return mlx5e_tir_get_tirn(&res->channels[ix].xsk_tir);
501 }
502
mlx5e_rx_res_get_tirn_rss(struct mlx5e_rx_res * res,enum mlx5_traffic_types tt)503 u32 mlx5e_rx_res_get_tirn_rss(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt)
504 {
505 struct mlx5e_rss *rss = res->rss[0];
506
507 return mlx5e_rss_get_tirn(rss, tt, false);
508 }
509
mlx5e_rx_res_get_tirn_rss_inner(struct mlx5e_rx_res * res,enum mlx5_traffic_types tt)510 u32 mlx5e_rx_res_get_tirn_rss_inner(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt)
511 {
512 struct mlx5e_rss *rss = res->rss[0];
513
514 return mlx5e_rss_get_tirn(rss, tt, true);
515 }
516
mlx5e_rx_res_get_tirn_ptp(struct mlx5e_rx_res * res)517 u32 mlx5e_rx_res_get_tirn_ptp(struct mlx5e_rx_res *res)
518 {
519 WARN_ON(!(res->features & MLX5E_RX_RES_FEATURE_PTP));
520 return mlx5e_tir_get_tirn(&res->ptp.tir);
521 }
522
mlx5e_rx_res_get_rqtn_direct(struct mlx5e_rx_res * res,unsigned int ix)523 static u32 mlx5e_rx_res_get_rqtn_direct(struct mlx5e_rx_res *res, unsigned int ix)
524 {
525 return mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt);
526 }
527
mlx5e_rx_res_channels_activate(struct mlx5e_rx_res * res,struct mlx5e_channels * chs)528 void mlx5e_rx_res_channels_activate(struct mlx5e_rx_res *res, struct mlx5e_channels *chs)
529 {
530 unsigned int nch, ix;
531 int err;
532
533 nch = mlx5e_channels_get_num(chs);
534
535 for (ix = 0; ix < chs->num; ix++)
536 mlx5e_channels_get_regular_rqn(chs, ix, &res->rss_rqns[ix]);
537 res->rss_nch = chs->num;
538
539 mlx5e_rx_res_rss_enable(res);
540
541 for (ix = 0; ix < nch; ix++) {
542 u32 rqn;
543
544 mlx5e_channels_get_regular_rqn(chs, ix, &rqn);
545 err = mlx5e_rqt_redirect_direct(&res->channels[ix].direct_rqt, rqn);
546 if (err)
547 mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to RQ %#x (channel %u): err = %d\n",
548 mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt),
549 rqn, ix, err);
550
551 if (!(res->features & MLX5E_RX_RES_FEATURE_XSK))
552 continue;
553
554 if (!mlx5e_channels_get_xsk_rqn(chs, ix, &rqn))
555 rqn = res->drop_rqn;
556 err = mlx5e_rqt_redirect_direct(&res->channels[ix].xsk_rqt, rqn);
557 if (err)
558 mlx5_core_warn(res->mdev, "Failed to redirect XSK RQT %#x to RQ %#x (channel %u): err = %d\n",
559 mlx5e_rqt_get_rqtn(&res->channels[ix].xsk_rqt),
560 rqn, ix, err);
561 }
562 for (ix = nch; ix < res->max_nch; ix++) {
563 err = mlx5e_rqt_redirect_direct(&res->channels[ix].direct_rqt, res->drop_rqn);
564 if (err)
565 mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to drop RQ %#x (channel %u): err = %d\n",
566 mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt),
567 res->drop_rqn, ix, err);
568
569 if (!(res->features & MLX5E_RX_RES_FEATURE_XSK))
570 continue;
571
572 err = mlx5e_rqt_redirect_direct(&res->channels[ix].xsk_rqt, res->drop_rqn);
573 if (err)
574 mlx5_core_warn(res->mdev, "Failed to redirect XSK RQT %#x to drop RQ %#x (channel %u): err = %d\n",
575 mlx5e_rqt_get_rqtn(&res->channels[ix].xsk_rqt),
576 res->drop_rqn, ix, err);
577 }
578
579 if (res->features & MLX5E_RX_RES_FEATURE_PTP) {
580 u32 rqn;
581
582 if (!mlx5e_channels_get_ptp_rqn(chs, &rqn))
583 rqn = res->drop_rqn;
584
585 err = mlx5e_rqt_redirect_direct(&res->ptp.rqt, rqn);
586 if (err)
587 mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to RQ %#x (PTP): err = %d\n",
588 mlx5e_rqt_get_rqtn(&res->ptp.rqt),
589 rqn, err);
590 }
591 }
592
mlx5e_rx_res_channels_deactivate(struct mlx5e_rx_res * res)593 void mlx5e_rx_res_channels_deactivate(struct mlx5e_rx_res *res)
594 {
595 unsigned int ix;
596 int err;
597
598 mlx5e_rx_res_rss_disable(res);
599
600 for (ix = 0; ix < res->max_nch; ix++) {
601 err = mlx5e_rqt_redirect_direct(&res->channels[ix].direct_rqt, res->drop_rqn);
602 if (err)
603 mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to drop RQ %#x (channel %u): err = %d\n",
604 mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt),
605 res->drop_rqn, ix, err);
606
607 if (!(res->features & MLX5E_RX_RES_FEATURE_XSK))
608 continue;
609
610 err = mlx5e_rqt_redirect_direct(&res->channels[ix].xsk_rqt, res->drop_rqn);
611 if (err)
612 mlx5_core_warn(res->mdev, "Failed to redirect XSK RQT %#x to drop RQ %#x (channel %u): err = %d\n",
613 mlx5e_rqt_get_rqtn(&res->channels[ix].xsk_rqt),
614 res->drop_rqn, ix, err);
615 }
616
617 if (res->features & MLX5E_RX_RES_FEATURE_PTP) {
618 err = mlx5e_rqt_redirect_direct(&res->ptp.rqt, res->drop_rqn);
619 if (err)
620 mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to drop RQ %#x (PTP): err = %d\n",
621 mlx5e_rqt_get_rqtn(&res->ptp.rqt),
622 res->drop_rqn, err);
623 }
624 }
625
mlx5e_rx_res_xsk_activate(struct mlx5e_rx_res * res,struct mlx5e_channels * chs,unsigned int ix)626 int mlx5e_rx_res_xsk_activate(struct mlx5e_rx_res *res, struct mlx5e_channels *chs,
627 unsigned int ix)
628 {
629 u32 rqn;
630 int err;
631
632 if (!mlx5e_channels_get_xsk_rqn(chs, ix, &rqn))
633 return -EINVAL;
634
635 err = mlx5e_rqt_redirect_direct(&res->channels[ix].xsk_rqt, rqn);
636 if (err)
637 mlx5_core_warn(res->mdev, "Failed to redirect XSK RQT %#x to XSK RQ %#x (channel %u): err = %d\n",
638 mlx5e_rqt_get_rqtn(&res->channels[ix].xsk_rqt),
639 rqn, ix, err);
640 return err;
641 }
642
mlx5e_rx_res_xsk_deactivate(struct mlx5e_rx_res * res,unsigned int ix)643 int mlx5e_rx_res_xsk_deactivate(struct mlx5e_rx_res *res, unsigned int ix)
644 {
645 int err;
646
647 err = mlx5e_rqt_redirect_direct(&res->channels[ix].xsk_rqt, res->drop_rqn);
648 if (err)
649 mlx5_core_warn(res->mdev, "Failed to redirect XSK RQT %#x to drop RQ %#x (channel %u): err = %d\n",
650 mlx5e_rqt_get_rqtn(&res->channels[ix].xsk_rqt),
651 res->drop_rqn, ix, err);
652 return err;
653 }
654
mlx5e_rx_res_packet_merge_set_param(struct mlx5e_rx_res * res,struct mlx5e_packet_merge_param * pkt_merge_param)655 int mlx5e_rx_res_packet_merge_set_param(struct mlx5e_rx_res *res,
656 struct mlx5e_packet_merge_param *pkt_merge_param)
657 {
658 struct mlx5e_tir_builder *builder;
659 int err, final_err;
660 unsigned int ix;
661
662 builder = mlx5e_tir_builder_alloc(true);
663 if (!builder)
664 return -ENOMEM;
665
666 down_write(&res->pkt_merge_param_sem);
667 res->pkt_merge_param = *pkt_merge_param;
668
669 mlx5e_tir_builder_build_packet_merge(builder, pkt_merge_param);
670
671 final_err = 0;
672
673 for (ix = 0; ix < MLX5E_MAX_NUM_RSS; ix++) {
674 struct mlx5e_rss *rss = res->rss[ix];
675
676 if (!rss)
677 continue;
678
679 err = mlx5e_rss_packet_merge_set_param(rss, pkt_merge_param);
680 if (err)
681 final_err = final_err ? : err;
682 }
683
684 for (ix = 0; ix < res->max_nch; ix++) {
685 err = mlx5e_tir_modify(&res->channels[ix].direct_tir, builder);
686 if (err) {
687 mlx5_core_warn(res->mdev, "Failed to update packet merge state of direct TIR %#x for channel %u: err = %d\n",
688 mlx5e_tir_get_tirn(&res->channels[ix].direct_tir), ix, err);
689 if (!final_err)
690 final_err = err;
691 }
692 }
693
694 up_write(&res->pkt_merge_param_sem);
695 mlx5e_tir_builder_free(builder);
696 return final_err;
697 }
698
mlx5e_rx_res_get_current_hash(struct mlx5e_rx_res * res)699 struct mlx5e_rss_params_hash mlx5e_rx_res_get_current_hash(struct mlx5e_rx_res *res)
700 {
701 return mlx5e_rss_get_hash(res->rss[0]);
702 }
703
mlx5e_rx_res_tls_tir_create(struct mlx5e_rx_res * res,unsigned int rxq,struct mlx5e_tir * tir)704 int mlx5e_rx_res_tls_tir_create(struct mlx5e_rx_res *res, unsigned int rxq,
705 struct mlx5e_tir *tir)
706 {
707 bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT;
708 struct mlx5e_tir_builder *builder;
709 u32 rqtn;
710 int err;
711
712 builder = mlx5e_tir_builder_alloc(false);
713 if (!builder)
714 return -ENOMEM;
715
716 rqtn = mlx5e_rx_res_get_rqtn_direct(res, rxq);
717
718 mlx5e_tir_builder_build_rqt(builder, res->mdev->mlx5e_res.hw_objs.td.tdn, rqtn,
719 inner_ft_support);
720 mlx5e_tir_builder_build_direct(builder);
721 mlx5e_tir_builder_build_tls(builder);
722 down_read(&res->pkt_merge_param_sem);
723 mlx5e_tir_builder_build_packet_merge(builder, &res->pkt_merge_param);
724 err = mlx5e_tir_init(tir, builder, res->mdev, false);
725 up_read(&res->pkt_merge_param_sem);
726
727 mlx5e_tir_builder_free(builder);
728
729 return err;
730 }
731