1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3
4 #include <linux/etherdevice.h>
5
6 #include "hclge_cmd.h"
7 #include "hclge_main.h"
8 #include "hclge_tm.h"
9
10 enum hclge_shaper_level {
11 HCLGE_SHAPER_LVL_PRI = 0,
12 HCLGE_SHAPER_LVL_PG = 1,
13 HCLGE_SHAPER_LVL_PORT = 2,
14 HCLGE_SHAPER_LVL_QSET = 3,
15 HCLGE_SHAPER_LVL_CNT = 4,
16 HCLGE_SHAPER_LVL_VF = 0,
17 HCLGE_SHAPER_LVL_PF = 1,
18 };
19
20 #define HCLGE_TM_PFC_PKT_GET_CMD_NUM 3
21 #define HCLGE_TM_PFC_NUM_GET_PER_CMD 3
22
23 #define HCLGE_SHAPER_BS_U_DEF 5
24 #define HCLGE_SHAPER_BS_S_DEF 20
25
26 /* hclge_shaper_para_calc: calculate ir parameter for the shaper
27 * @ir: Rate to be config, its unit is Mbps
28 * @shaper_level: the shaper level. eg: port, pg, priority, queueset
29 * @ir_para: parameters of IR shaper
30 * @max_tm_rate: max tm rate is available to config
31 *
32 * the formula:
33 *
34 * IR_b * (2 ^ IR_u) * 8
35 * IR(Mbps) = ------------------------- * CLOCK(1000Mbps)
36 * Tick * (2 ^ IR_s)
37 *
38 * @return: 0: calculate sucessful, negative: fail
39 */
hclge_shaper_para_calc(u32 ir,u8 shaper_level,struct hclge_shaper_ir_para * ir_para,u32 max_tm_rate)40 static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
41 struct hclge_shaper_ir_para *ir_para,
42 u32 max_tm_rate)
43 {
44 #define DEFAULT_SHAPER_IR_B 126
45 #define DIVISOR_CLK (1000 * 8)
46 #define DEFAULT_DIVISOR_IR_B (DEFAULT_SHAPER_IR_B * DIVISOR_CLK)
47
48 static const u16 tick_array[HCLGE_SHAPER_LVL_CNT] = {
49 6 * 256, /* Prioriy level */
50 6 * 32, /* Prioriy group level */
51 6 * 8, /* Port level */
52 6 * 256 /* Qset level */
53 };
54 u8 ir_u_calc = 0;
55 u8 ir_s_calc = 0;
56 u32 ir_calc;
57 u32 tick;
58
59 /* Calc tick */
60 if (shaper_level >= HCLGE_SHAPER_LVL_CNT ||
61 ir > max_tm_rate)
62 return -EINVAL;
63
64 tick = tick_array[shaper_level];
65
66 /**
67 * Calc the speed if ir_b = 126, ir_u = 0 and ir_s = 0
68 * the formula is changed to:
69 * 126 * 1 * 8
70 * ir_calc = ---------------- * 1000
71 * tick * 1
72 */
73 ir_calc = (DEFAULT_DIVISOR_IR_B + (tick >> 1) - 1) / tick;
74
75 if (ir_calc == ir) {
76 ir_para->ir_b = DEFAULT_SHAPER_IR_B;
77 ir_para->ir_u = 0;
78 ir_para->ir_s = 0;
79
80 return 0;
81 } else if (ir_calc > ir) {
82 /* Increasing the denominator to select ir_s value */
83 while (ir_calc >= ir && ir) {
84 ir_s_calc++;
85 ir_calc = DEFAULT_DIVISOR_IR_B /
86 (tick * (1 << ir_s_calc));
87 }
88
89 ir_para->ir_b = (ir * tick * (1 << ir_s_calc) +
90 (DIVISOR_CLK >> 1)) / DIVISOR_CLK;
91 } else {
92 /* Increasing the numerator to select ir_u value */
93 u32 numerator;
94
95 while (ir_calc < ir) {
96 ir_u_calc++;
97 numerator = DEFAULT_DIVISOR_IR_B * (1 << ir_u_calc);
98 ir_calc = (numerator + (tick >> 1)) / tick;
99 }
100
101 if (ir_calc == ir) {
102 ir_para->ir_b = DEFAULT_SHAPER_IR_B;
103 } else {
104 u32 denominator = DIVISOR_CLK * (1 << --ir_u_calc);
105 ir_para->ir_b = (ir * tick + (denominator >> 1)) /
106 denominator;
107 }
108 }
109
110 ir_para->ir_u = ir_u_calc;
111 ir_para->ir_s = ir_s_calc;
112
113 return 0;
114 }
115
116 static const u16 hclge_pfc_tx_stats_offset[] = {
117 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num),
118 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num),
119 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num),
120 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num),
121 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num),
122 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num),
123 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num),
124 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)
125 };
126
127 static const u16 hclge_pfc_rx_stats_offset[] = {
128 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num),
129 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num),
130 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num),
131 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num),
132 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num),
133 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num),
134 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num),
135 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)
136 };
137
hclge_pfc_stats_get(struct hclge_dev * hdev,bool tx,u64 * stats)138 static void hclge_pfc_stats_get(struct hclge_dev *hdev, bool tx, u64 *stats)
139 {
140 const u16 *offset;
141 int i;
142
143 if (tx)
144 offset = hclge_pfc_tx_stats_offset;
145 else
146 offset = hclge_pfc_rx_stats_offset;
147
148 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
149 stats[i] = HCLGE_STATS_READ(&hdev->mac_stats, offset[i]);
150 }
151
hclge_pfc_rx_stats_get(struct hclge_dev * hdev,u64 * stats)152 void hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats)
153 {
154 hclge_pfc_stats_get(hdev, false, stats);
155 }
156
hclge_pfc_tx_stats_get(struct hclge_dev * hdev,u64 * stats)157 void hclge_pfc_tx_stats_get(struct hclge_dev *hdev, u64 *stats)
158 {
159 hclge_pfc_stats_get(hdev, true, stats);
160 }
161
hclge_mac_pause_en_cfg(struct hclge_dev * hdev,bool tx,bool rx)162 int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx)
163 {
164 struct hclge_desc desc;
165
166 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PAUSE_EN, false);
167
168 desc.data[0] = cpu_to_le32((tx ? HCLGE_TX_MAC_PAUSE_EN_MSK : 0) |
169 (rx ? HCLGE_RX_MAC_PAUSE_EN_MSK : 0));
170
171 return hclge_cmd_send(&hdev->hw, &desc, 1);
172 }
173
hclge_pfc_pause_en_cfg(struct hclge_dev * hdev,u8 tx_rx_bitmap,u8 pfc_bitmap)174 static int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap,
175 u8 pfc_bitmap)
176 {
177 struct hclge_desc desc;
178 struct hclge_pfc_en_cmd *pfc = (struct hclge_pfc_en_cmd *)desc.data;
179
180 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PFC_PAUSE_EN, false);
181
182 pfc->tx_rx_en_bitmap = tx_rx_bitmap;
183 pfc->pri_en_bitmap = pfc_bitmap;
184
185 return hclge_cmd_send(&hdev->hw, &desc, 1);
186 }
187
hclge_pause_param_cfg(struct hclge_dev * hdev,const u8 * addr,u8 pause_trans_gap,u16 pause_trans_time)188 static int hclge_pause_param_cfg(struct hclge_dev *hdev, const u8 *addr,
189 u8 pause_trans_gap, u16 pause_trans_time)
190 {
191 struct hclge_cfg_pause_param_cmd *pause_param;
192 struct hclge_desc desc;
193
194 pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data;
195
196 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, false);
197
198 ether_addr_copy(pause_param->mac_addr, addr);
199 ether_addr_copy(pause_param->mac_addr_extra, addr);
200 pause_param->pause_trans_gap = pause_trans_gap;
201 pause_param->pause_trans_time = cpu_to_le16(pause_trans_time);
202
203 return hclge_cmd_send(&hdev->hw, &desc, 1);
204 }
205
hclge_pause_addr_cfg(struct hclge_dev * hdev,const u8 * mac_addr)206 int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr)
207 {
208 struct hclge_cfg_pause_param_cmd *pause_param;
209 struct hclge_desc desc;
210 u16 trans_time;
211 u8 trans_gap;
212 int ret;
213
214 pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data;
215
216 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true);
217
218 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
219 if (ret)
220 return ret;
221
222 trans_gap = pause_param->pause_trans_gap;
223 trans_time = le16_to_cpu(pause_param->pause_trans_time);
224
225 return hclge_pause_param_cfg(hdev, mac_addr, trans_gap, trans_time);
226 }
227
hclge_fill_pri_array(struct hclge_dev * hdev,u8 * pri,u8 pri_id)228 static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id)
229 {
230 u8 tc;
231
232 tc = hdev->tm_info.prio_tc[pri_id];
233
234 if (tc >= hdev->tm_info.num_tc)
235 return -EINVAL;
236
237 /**
238 * the register for priority has four bytes, the first bytes includes
239 * priority0 and priority1, the higher 4bit stands for priority1
240 * while the lower 4bit stands for priority0, as below:
241 * first byte: | pri_1 | pri_0 |
242 * second byte: | pri_3 | pri_2 |
243 * third byte: | pri_5 | pri_4 |
244 * fourth byte: | pri_7 | pri_6 |
245 */
246 pri[pri_id >> 1] |= tc << ((pri_id & 1) * 4);
247
248 return 0;
249 }
250
hclge_up_to_tc_map(struct hclge_dev * hdev)251 static int hclge_up_to_tc_map(struct hclge_dev *hdev)
252 {
253 struct hclge_desc desc;
254 u8 *pri = (u8 *)desc.data;
255 u8 pri_id;
256 int ret;
257
258 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, false);
259
260 for (pri_id = 0; pri_id < HNAE3_MAX_USER_PRIO; pri_id++) {
261 ret = hclge_fill_pri_array(hdev, pri, pri_id);
262 if (ret)
263 return ret;
264 }
265
266 return hclge_cmd_send(&hdev->hw, &desc, 1);
267 }
268
hclge_tm_pg_to_pri_map_cfg(struct hclge_dev * hdev,u8 pg_id,u8 pri_bit_map)269 static int hclge_tm_pg_to_pri_map_cfg(struct hclge_dev *hdev,
270 u8 pg_id, u8 pri_bit_map)
271 {
272 struct hclge_pg_to_pri_link_cmd *map;
273 struct hclge_desc desc;
274
275 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_TO_PRI_LINK, false);
276
277 map = (struct hclge_pg_to_pri_link_cmd *)desc.data;
278
279 map->pg_id = pg_id;
280 map->pri_bit_map = pri_bit_map;
281
282 return hclge_cmd_send(&hdev->hw, &desc, 1);
283 }
284
hclge_tm_qs_to_pri_map_cfg(struct hclge_dev * hdev,u16 qs_id,u8 pri)285 static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev *hdev,
286 u16 qs_id, u8 pri)
287 {
288 struct hclge_qs_to_pri_link_cmd *map;
289 struct hclge_desc desc;
290
291 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_TO_PRI_LINK, false);
292
293 map = (struct hclge_qs_to_pri_link_cmd *)desc.data;
294
295 map->qs_id = cpu_to_le16(qs_id);
296 map->priority = pri;
297 map->link_vld = HCLGE_TM_QS_PRI_LINK_VLD_MSK;
298
299 return hclge_cmd_send(&hdev->hw, &desc, 1);
300 }
301
hclge_tm_q_to_qs_map_cfg(struct hclge_dev * hdev,u16 q_id,u16 qs_id)302 static int hclge_tm_q_to_qs_map_cfg(struct hclge_dev *hdev,
303 u16 q_id, u16 qs_id)
304 {
305 struct hclge_nq_to_qs_link_cmd *map;
306 struct hclge_desc desc;
307 u16 qs_id_l;
308 u16 qs_id_h;
309
310 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NQ_TO_QS_LINK, false);
311
312 map = (struct hclge_nq_to_qs_link_cmd *)desc.data;
313
314 map->nq_id = cpu_to_le16(q_id);
315
316 /* convert qs_id to the following format to support qset_id >= 1024
317 * qs_id: | 15 | 14 ~ 10 | 9 ~ 0 |
318 * / / \ \
319 * / / \ \
320 * qset_id: | 15 ~ 11 | 10 | 9 ~ 0 |
321 * | qs_id_h | vld | qs_id_l |
322 */
323 qs_id_l = hnae3_get_field(qs_id, HCLGE_TM_QS_ID_L_MSK,
324 HCLGE_TM_QS_ID_L_S);
325 qs_id_h = hnae3_get_field(qs_id, HCLGE_TM_QS_ID_H_MSK,
326 HCLGE_TM_QS_ID_H_S);
327 hnae3_set_field(qs_id, HCLGE_TM_QS_ID_L_MSK, HCLGE_TM_QS_ID_L_S,
328 qs_id_l);
329 hnae3_set_field(qs_id, HCLGE_TM_QS_ID_H_EXT_MSK, HCLGE_TM_QS_ID_H_EXT_S,
330 qs_id_h);
331 map->qset_id = cpu_to_le16(qs_id | HCLGE_TM_Q_QS_LINK_VLD_MSK);
332
333 return hclge_cmd_send(&hdev->hw, &desc, 1);
334 }
335
hclge_tm_pg_weight_cfg(struct hclge_dev * hdev,u8 pg_id,u8 dwrr)336 static int hclge_tm_pg_weight_cfg(struct hclge_dev *hdev, u8 pg_id,
337 u8 dwrr)
338 {
339 struct hclge_pg_weight_cmd *weight;
340 struct hclge_desc desc;
341
342 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_WEIGHT, false);
343
344 weight = (struct hclge_pg_weight_cmd *)desc.data;
345
346 weight->pg_id = pg_id;
347 weight->dwrr = dwrr;
348
349 return hclge_cmd_send(&hdev->hw, &desc, 1);
350 }
351
hclge_tm_pri_weight_cfg(struct hclge_dev * hdev,u8 pri_id,u8 dwrr)352 static int hclge_tm_pri_weight_cfg(struct hclge_dev *hdev, u8 pri_id,
353 u8 dwrr)
354 {
355 struct hclge_priority_weight_cmd *weight;
356 struct hclge_desc desc;
357
358 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_WEIGHT, false);
359
360 weight = (struct hclge_priority_weight_cmd *)desc.data;
361
362 weight->pri_id = pri_id;
363 weight->dwrr = dwrr;
364
365 return hclge_cmd_send(&hdev->hw, &desc, 1);
366 }
367
hclge_tm_qs_weight_cfg(struct hclge_dev * hdev,u16 qs_id,u8 dwrr)368 static int hclge_tm_qs_weight_cfg(struct hclge_dev *hdev, u16 qs_id,
369 u8 dwrr)
370 {
371 struct hclge_qs_weight_cmd *weight;
372 struct hclge_desc desc;
373
374 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_WEIGHT, false);
375
376 weight = (struct hclge_qs_weight_cmd *)desc.data;
377
378 weight->qs_id = cpu_to_le16(qs_id);
379 weight->dwrr = dwrr;
380
381 return hclge_cmd_send(&hdev->hw, &desc, 1);
382 }
383
hclge_tm_get_shapping_para(u8 ir_b,u8 ir_u,u8 ir_s,u8 bs_b,u8 bs_s)384 static u32 hclge_tm_get_shapping_para(u8 ir_b, u8 ir_u, u8 ir_s,
385 u8 bs_b, u8 bs_s)
386 {
387 u32 shapping_para = 0;
388
389 hclge_tm_set_field(shapping_para, IR_B, ir_b);
390 hclge_tm_set_field(shapping_para, IR_U, ir_u);
391 hclge_tm_set_field(shapping_para, IR_S, ir_s);
392 hclge_tm_set_field(shapping_para, BS_B, bs_b);
393 hclge_tm_set_field(shapping_para, BS_S, bs_s);
394
395 return shapping_para;
396 }
397
hclge_tm_pg_shapping_cfg(struct hclge_dev * hdev,enum hclge_shap_bucket bucket,u8 pg_id,u32 shapping_para,u32 rate)398 static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev,
399 enum hclge_shap_bucket bucket, u8 pg_id,
400 u32 shapping_para, u32 rate)
401 {
402 struct hclge_pg_shapping_cmd *shap_cfg_cmd;
403 enum hclge_opcode_type opcode;
404 struct hclge_desc desc;
405
406 opcode = bucket ? HCLGE_OPC_TM_PG_P_SHAPPING :
407 HCLGE_OPC_TM_PG_C_SHAPPING;
408 hclge_cmd_setup_basic_desc(&desc, opcode, false);
409
410 shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data;
411
412 shap_cfg_cmd->pg_id = pg_id;
413
414 shap_cfg_cmd->pg_shapping_para = cpu_to_le32(shapping_para);
415
416 hnae3_set_bit(shap_cfg_cmd->flag, HCLGE_TM_RATE_VLD, 1);
417
418 shap_cfg_cmd->pg_rate = cpu_to_le32(rate);
419
420 return hclge_cmd_send(&hdev->hw, &desc, 1);
421 }
422
hclge_tm_port_shaper_cfg(struct hclge_dev * hdev)423 static int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev)
424 {
425 struct hclge_port_shapping_cmd *shap_cfg_cmd;
426 struct hclge_shaper_ir_para ir_para;
427 struct hclge_desc desc;
428 u32 shapping_para;
429 int ret;
430
431 ret = hclge_shaper_para_calc(hdev->hw.mac.speed, HCLGE_SHAPER_LVL_PORT,
432 &ir_para,
433 hdev->ae_dev->dev_specs.max_tm_rate);
434 if (ret)
435 return ret;
436
437 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PORT_SHAPPING, false);
438 shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data;
439
440 shapping_para = hclge_tm_get_shapping_para(ir_para.ir_b, ir_para.ir_u,
441 ir_para.ir_s,
442 HCLGE_SHAPER_BS_U_DEF,
443 HCLGE_SHAPER_BS_S_DEF);
444
445 shap_cfg_cmd->port_shapping_para = cpu_to_le32(shapping_para);
446
447 hnae3_set_bit(shap_cfg_cmd->flag, HCLGE_TM_RATE_VLD, 1);
448
449 shap_cfg_cmd->port_rate = cpu_to_le32(hdev->hw.mac.speed);
450
451 return hclge_cmd_send(&hdev->hw, &desc, 1);
452 }
453
hclge_tm_pri_shapping_cfg(struct hclge_dev * hdev,enum hclge_shap_bucket bucket,u8 pri_id,u32 shapping_para,u32 rate)454 static int hclge_tm_pri_shapping_cfg(struct hclge_dev *hdev,
455 enum hclge_shap_bucket bucket, u8 pri_id,
456 u32 shapping_para, u32 rate)
457 {
458 struct hclge_pri_shapping_cmd *shap_cfg_cmd;
459 enum hclge_opcode_type opcode;
460 struct hclge_desc desc;
461
462 opcode = bucket ? HCLGE_OPC_TM_PRI_P_SHAPPING :
463 HCLGE_OPC_TM_PRI_C_SHAPPING;
464
465 hclge_cmd_setup_basic_desc(&desc, opcode, false);
466
467 shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data;
468
469 shap_cfg_cmd->pri_id = pri_id;
470
471 shap_cfg_cmd->pri_shapping_para = cpu_to_le32(shapping_para);
472
473 hnae3_set_bit(shap_cfg_cmd->flag, HCLGE_TM_RATE_VLD, 1);
474
475 shap_cfg_cmd->pri_rate = cpu_to_le32(rate);
476
477 return hclge_cmd_send(&hdev->hw, &desc, 1);
478 }
479
hclge_tm_pg_schd_mode_cfg(struct hclge_dev * hdev,u8 pg_id)480 static int hclge_tm_pg_schd_mode_cfg(struct hclge_dev *hdev, u8 pg_id)
481 {
482 struct hclge_desc desc;
483
484 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_SCH_MODE_CFG, false);
485
486 if (hdev->tm_info.pg_info[pg_id].pg_sch_mode == HCLGE_SCH_MODE_DWRR)
487 desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
488 else
489 desc.data[1] = 0;
490
491 desc.data[0] = cpu_to_le32(pg_id);
492
493 return hclge_cmd_send(&hdev->hw, &desc, 1);
494 }
495
hclge_tm_pri_schd_mode_cfg(struct hclge_dev * hdev,u8 pri_id)496 static int hclge_tm_pri_schd_mode_cfg(struct hclge_dev *hdev, u8 pri_id)
497 {
498 struct hclge_desc desc;
499
500 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_SCH_MODE_CFG, false);
501
502 if (hdev->tm_info.tc_info[pri_id].tc_sch_mode == HCLGE_SCH_MODE_DWRR)
503 desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
504 else
505 desc.data[1] = 0;
506
507 desc.data[0] = cpu_to_le32(pri_id);
508
509 return hclge_cmd_send(&hdev->hw, &desc, 1);
510 }
511
hclge_tm_qs_schd_mode_cfg(struct hclge_dev * hdev,u16 qs_id,u8 mode)512 static int hclge_tm_qs_schd_mode_cfg(struct hclge_dev *hdev, u16 qs_id, u8 mode)
513 {
514 struct hclge_desc desc;
515
516 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_SCH_MODE_CFG, false);
517
518 if (mode == HCLGE_SCH_MODE_DWRR)
519 desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
520 else
521 desc.data[1] = 0;
522
523 desc.data[0] = cpu_to_le32(qs_id);
524
525 return hclge_cmd_send(&hdev->hw, &desc, 1);
526 }
527
hclge_tm_qs_bp_cfg(struct hclge_dev * hdev,u8 tc,u8 grp_id,u32 bit_map)528 static int hclge_tm_qs_bp_cfg(struct hclge_dev *hdev, u8 tc, u8 grp_id,
529 u32 bit_map)
530 {
531 struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd;
532 struct hclge_desc desc;
533
534 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_BP_TO_QSET_MAPPING,
535 false);
536
537 bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data;
538
539 bp_to_qs_map_cmd->tc_id = tc;
540 bp_to_qs_map_cmd->qs_group_id = grp_id;
541 bp_to_qs_map_cmd->qs_bit_map = cpu_to_le32(bit_map);
542
543 return hclge_cmd_send(&hdev->hw, &desc, 1);
544 }
545
hclge_tm_qs_shaper_cfg(struct hclge_vport * vport,int max_tx_rate)546 int hclge_tm_qs_shaper_cfg(struct hclge_vport *vport, int max_tx_rate)
547 {
548 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
549 struct hclge_qs_shapping_cmd *shap_cfg_cmd;
550 struct hclge_shaper_ir_para ir_para;
551 struct hclge_dev *hdev = vport->back;
552 struct hclge_desc desc;
553 u32 shaper_para;
554 int ret, i;
555
556 if (!max_tx_rate)
557 max_tx_rate = hdev->ae_dev->dev_specs.max_tm_rate;
558
559 ret = hclge_shaper_para_calc(max_tx_rate, HCLGE_SHAPER_LVL_QSET,
560 &ir_para,
561 hdev->ae_dev->dev_specs.max_tm_rate);
562 if (ret)
563 return ret;
564
565 shaper_para = hclge_tm_get_shapping_para(ir_para.ir_b, ir_para.ir_u,
566 ir_para.ir_s,
567 HCLGE_SHAPER_BS_U_DEF,
568 HCLGE_SHAPER_BS_S_DEF);
569
570 for (i = 0; i < kinfo->tc_info.num_tc; i++) {
571 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QCN_SHAPPING_CFG,
572 false);
573
574 shap_cfg_cmd = (struct hclge_qs_shapping_cmd *)desc.data;
575 shap_cfg_cmd->qs_id = cpu_to_le16(vport->qs_offset + i);
576 shap_cfg_cmd->qs_shapping_para = cpu_to_le32(shaper_para);
577
578 hnae3_set_bit(shap_cfg_cmd->flag, HCLGE_TM_RATE_VLD, 1);
579 shap_cfg_cmd->qs_rate = cpu_to_le32(max_tx_rate);
580
581 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
582 if (ret) {
583 dev_err(&hdev->pdev->dev,
584 "vport%u, qs%u failed to set tx_rate:%d, ret=%d\n",
585 vport->vport_id, shap_cfg_cmd->qs_id,
586 max_tx_rate, ret);
587 return ret;
588 }
589 }
590
591 return 0;
592 }
593
hclge_vport_get_max_rss_size(struct hclge_vport * vport)594 static u16 hclge_vport_get_max_rss_size(struct hclge_vport *vport)
595 {
596 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
597 struct hnae3_tc_info *tc_info = &kinfo->tc_info;
598 struct hclge_dev *hdev = vport->back;
599 u16 max_rss_size = 0;
600 int i;
601
602 if (!tc_info->mqprio_active)
603 return vport->alloc_tqps / tc_info->num_tc;
604
605 for (i = 0; i < HNAE3_MAX_TC; i++) {
606 if (!(hdev->hw_tc_map & BIT(i)) || i >= tc_info->num_tc)
607 continue;
608 if (max_rss_size < tc_info->tqp_count[i])
609 max_rss_size = tc_info->tqp_count[i];
610 }
611
612 return max_rss_size;
613 }
614
hclge_vport_get_tqp_num(struct hclge_vport * vport)615 static u16 hclge_vport_get_tqp_num(struct hclge_vport *vport)
616 {
617 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
618 struct hnae3_tc_info *tc_info = &kinfo->tc_info;
619 struct hclge_dev *hdev = vport->back;
620 int sum = 0;
621 int i;
622
623 if (!tc_info->mqprio_active)
624 return kinfo->rss_size * tc_info->num_tc;
625
626 for (i = 0; i < HNAE3_MAX_TC; i++) {
627 if (hdev->hw_tc_map & BIT(i) && i < tc_info->num_tc)
628 sum += tc_info->tqp_count[i];
629 }
630
631 return sum;
632 }
633
hclge_tm_update_kinfo_rss_size(struct hclge_vport * vport)634 static void hclge_tm_update_kinfo_rss_size(struct hclge_vport *vport)
635 {
636 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
637 struct hclge_dev *hdev = vport->back;
638 u16 vport_max_rss_size;
639 u16 max_rss_size;
640
641 /* TC configuration is shared by PF/VF in one port, only allow
642 * one tc for VF for simplicity. VF's vport_id is non zero.
643 */
644 if (vport->vport_id) {
645 kinfo->tc_info.num_tc = 1;
646 vport->qs_offset = HNAE3_MAX_TC +
647 vport->vport_id - HCLGE_VF_VPORT_START_NUM;
648 vport_max_rss_size = hdev->vf_rss_size_max;
649 } else {
650 kinfo->tc_info.num_tc =
651 min_t(u16, vport->alloc_tqps, hdev->tm_info.num_tc);
652 vport->qs_offset = 0;
653 vport_max_rss_size = hdev->pf_rss_size_max;
654 }
655
656 max_rss_size = min_t(u16, vport_max_rss_size,
657 hclge_vport_get_max_rss_size(vport));
658
659 /* Set to user value, no larger than max_rss_size. */
660 if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size &&
661 kinfo->req_rss_size <= max_rss_size) {
662 dev_info(&hdev->pdev->dev, "rss changes from %u to %u\n",
663 kinfo->rss_size, kinfo->req_rss_size);
664 kinfo->rss_size = kinfo->req_rss_size;
665 } else if (kinfo->rss_size > max_rss_size ||
666 (!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) {
667 /* Set to the maximum specification value (max_rss_size). */
668 kinfo->rss_size = max_rss_size;
669 }
670 }
671
hclge_tm_vport_tc_info_update(struct hclge_vport * vport)672 static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
673 {
674 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
675 struct hclge_dev *hdev = vport->back;
676 u8 i;
677
678 hclge_tm_update_kinfo_rss_size(vport);
679 kinfo->num_tqps = hclge_vport_get_tqp_num(vport);
680 vport->dwrr = 100; /* 100 percent as init */
681 vport->alloc_rss_size = kinfo->rss_size;
682 vport->bw_limit = hdev->tm_info.pg_info[0].bw_limit;
683
684 /* when enable mqprio, the tc_info has been updated. */
685 if (kinfo->tc_info.mqprio_active)
686 return;
687
688 for (i = 0; i < HNAE3_MAX_TC; i++) {
689 if (hdev->hw_tc_map & BIT(i) && i < kinfo->tc_info.num_tc) {
690 kinfo->tc_info.tqp_offset[i] = i * kinfo->rss_size;
691 kinfo->tc_info.tqp_count[i] = kinfo->rss_size;
692 } else {
693 /* Set to default queue if TC is disable */
694 kinfo->tc_info.tqp_offset[i] = 0;
695 kinfo->tc_info.tqp_count[i] = 1;
696 }
697 }
698
699 memcpy(kinfo->tc_info.prio_tc, hdev->tm_info.prio_tc,
700 sizeof_field(struct hnae3_tc_info, prio_tc));
701 }
702
hclge_tm_vport_info_update(struct hclge_dev * hdev)703 static void hclge_tm_vport_info_update(struct hclge_dev *hdev)
704 {
705 struct hclge_vport *vport = hdev->vport;
706 u32 i;
707
708 for (i = 0; i < hdev->num_alloc_vport; i++) {
709 hclge_tm_vport_tc_info_update(vport);
710
711 vport++;
712 }
713 }
714
hclge_tm_tc_info_init(struct hclge_dev * hdev)715 static void hclge_tm_tc_info_init(struct hclge_dev *hdev)
716 {
717 u8 i;
718
719 for (i = 0; i < hdev->tm_info.num_tc; i++) {
720 hdev->tm_info.tc_info[i].tc_id = i;
721 hdev->tm_info.tc_info[i].tc_sch_mode = HCLGE_SCH_MODE_DWRR;
722 hdev->tm_info.tc_info[i].pgid = 0;
723 hdev->tm_info.tc_info[i].bw_limit =
724 hdev->tm_info.pg_info[0].bw_limit;
725 }
726
727 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
728 hdev->tm_info.prio_tc[i] =
729 (i >= hdev->tm_info.num_tc) ? 0 : i;
730 }
731
hclge_tm_pg_info_init(struct hclge_dev * hdev)732 static void hclge_tm_pg_info_init(struct hclge_dev *hdev)
733 {
734 #define BW_PERCENT 100
735
736 u8 i;
737
738 for (i = 0; i < hdev->tm_info.num_pg; i++) {
739 int k;
740
741 hdev->tm_info.pg_dwrr[i] = i ? 0 : BW_PERCENT;
742
743 hdev->tm_info.pg_info[i].pg_id = i;
744 hdev->tm_info.pg_info[i].pg_sch_mode = HCLGE_SCH_MODE_DWRR;
745
746 hdev->tm_info.pg_info[i].bw_limit =
747 hdev->ae_dev->dev_specs.max_tm_rate;
748
749 if (i != 0)
750 continue;
751
752 hdev->tm_info.pg_info[i].tc_bit_map = hdev->hw_tc_map;
753 for (k = 0; k < hdev->tm_info.num_tc; k++)
754 hdev->tm_info.pg_info[i].tc_dwrr[k] = BW_PERCENT;
755 for (; k < HNAE3_MAX_TC; k++)
756 hdev->tm_info.pg_info[i].tc_dwrr[k] = 0;
757 }
758 }
759
hclge_update_fc_mode_by_dcb_flag(struct hclge_dev * hdev)760 static void hclge_update_fc_mode_by_dcb_flag(struct hclge_dev *hdev)
761 {
762 if (hdev->tm_info.num_tc == 1 && !hdev->tm_info.pfc_en) {
763 if (hdev->fc_mode_last_time == HCLGE_FC_PFC)
764 dev_warn(&hdev->pdev->dev,
765 "Only 1 tc used, but last mode is FC_PFC\n");
766
767 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
768 } else if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) {
769 /* fc_mode_last_time record the last fc_mode when
770 * DCB is enabled, so that fc_mode can be set to
771 * the correct value when DCB is disabled.
772 */
773 hdev->fc_mode_last_time = hdev->tm_info.fc_mode;
774 hdev->tm_info.fc_mode = HCLGE_FC_PFC;
775 }
776 }
777
hclge_update_fc_mode(struct hclge_dev * hdev)778 static void hclge_update_fc_mode(struct hclge_dev *hdev)
779 {
780 if (!hdev->tm_info.pfc_en) {
781 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
782 return;
783 }
784
785 if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) {
786 hdev->fc_mode_last_time = hdev->tm_info.fc_mode;
787 hdev->tm_info.fc_mode = HCLGE_FC_PFC;
788 }
789 }
790
hclge_tm_pfc_info_update(struct hclge_dev * hdev)791 void hclge_tm_pfc_info_update(struct hclge_dev *hdev)
792 {
793 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
794 hclge_update_fc_mode(hdev);
795 else
796 hclge_update_fc_mode_by_dcb_flag(hdev);
797 }
798
hclge_tm_schd_info_init(struct hclge_dev * hdev)799 static void hclge_tm_schd_info_init(struct hclge_dev *hdev)
800 {
801 hclge_tm_pg_info_init(hdev);
802
803 hclge_tm_tc_info_init(hdev);
804
805 hclge_tm_vport_info_update(hdev);
806
807 hclge_tm_pfc_info_update(hdev);
808 }
809
hclge_tm_pg_to_pri_map(struct hclge_dev * hdev)810 static int hclge_tm_pg_to_pri_map(struct hclge_dev *hdev)
811 {
812 int ret;
813 u32 i;
814
815 if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
816 return 0;
817
818 for (i = 0; i < hdev->tm_info.num_pg; i++) {
819 /* Cfg mapping */
820 ret = hclge_tm_pg_to_pri_map_cfg(
821 hdev, i, hdev->tm_info.pg_info[i].tc_bit_map);
822 if (ret)
823 return ret;
824 }
825
826 return 0;
827 }
828
hclge_tm_pg_shaper_cfg(struct hclge_dev * hdev)829 static int hclge_tm_pg_shaper_cfg(struct hclge_dev *hdev)
830 {
831 u32 max_tm_rate = hdev->ae_dev->dev_specs.max_tm_rate;
832 struct hclge_shaper_ir_para ir_para;
833 u32 shaper_para;
834 int ret;
835 u32 i;
836
837 /* Cfg pg schd */
838 if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
839 return 0;
840
841 /* Pg to pri */
842 for (i = 0; i < hdev->tm_info.num_pg; i++) {
843 u32 rate = hdev->tm_info.pg_info[i].bw_limit;
844
845 /* Calc shaper para */
846 ret = hclge_shaper_para_calc(rate, HCLGE_SHAPER_LVL_PG,
847 &ir_para, max_tm_rate);
848 if (ret)
849 return ret;
850
851 shaper_para = hclge_tm_get_shapping_para(0, 0, 0,
852 HCLGE_SHAPER_BS_U_DEF,
853 HCLGE_SHAPER_BS_S_DEF);
854 ret = hclge_tm_pg_shapping_cfg(hdev,
855 HCLGE_TM_SHAP_C_BUCKET, i,
856 shaper_para, rate);
857 if (ret)
858 return ret;
859
860 shaper_para = hclge_tm_get_shapping_para(ir_para.ir_b,
861 ir_para.ir_u,
862 ir_para.ir_s,
863 HCLGE_SHAPER_BS_U_DEF,
864 HCLGE_SHAPER_BS_S_DEF);
865 ret = hclge_tm_pg_shapping_cfg(hdev,
866 HCLGE_TM_SHAP_P_BUCKET, i,
867 shaper_para, rate);
868 if (ret)
869 return ret;
870 }
871
872 return 0;
873 }
874
hclge_tm_pg_dwrr_cfg(struct hclge_dev * hdev)875 static int hclge_tm_pg_dwrr_cfg(struct hclge_dev *hdev)
876 {
877 int ret;
878 u32 i;
879
880 /* cfg pg schd */
881 if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
882 return 0;
883
884 /* pg to prio */
885 for (i = 0; i < hdev->tm_info.num_pg; i++) {
886 /* Cfg dwrr */
887 ret = hclge_tm_pg_weight_cfg(hdev, i, hdev->tm_info.pg_dwrr[i]);
888 if (ret)
889 return ret;
890 }
891
892 return 0;
893 }
894
hclge_vport_q_to_qs_map(struct hclge_dev * hdev,struct hclge_vport * vport)895 static int hclge_vport_q_to_qs_map(struct hclge_dev *hdev,
896 struct hclge_vport *vport)
897 {
898 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
899 struct hnae3_tc_info *tc_info = &kinfo->tc_info;
900 struct hnae3_queue **tqp = kinfo->tqp;
901 u32 i, j;
902 int ret;
903
904 for (i = 0; i < tc_info->num_tc; i++) {
905 for (j = 0; j < tc_info->tqp_count[i]; j++) {
906 struct hnae3_queue *q = tqp[tc_info->tqp_offset[i] + j];
907
908 ret = hclge_tm_q_to_qs_map_cfg(hdev,
909 hclge_get_queue_id(q),
910 vport->qs_offset + i);
911 if (ret)
912 return ret;
913 }
914 }
915
916 return 0;
917 }
918
hclge_tm_pri_q_qs_cfg(struct hclge_dev * hdev)919 static int hclge_tm_pri_q_qs_cfg(struct hclge_dev *hdev)
920 {
921 struct hclge_vport *vport = hdev->vport;
922 int ret;
923 u32 i, k;
924
925 if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
926 /* Cfg qs -> pri mapping, one by one mapping */
927 for (k = 0; k < hdev->num_alloc_vport; k++) {
928 struct hnae3_knic_private_info *kinfo =
929 &vport[k].nic.kinfo;
930
931 for (i = 0; i < kinfo->tc_info.num_tc; i++) {
932 ret = hclge_tm_qs_to_pri_map_cfg(
933 hdev, vport[k].qs_offset + i, i);
934 if (ret)
935 return ret;
936 }
937 }
938 } else if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE) {
939 /* Cfg qs -> pri mapping, qs = tc, pri = vf, 8 qs -> 1 pri */
940 for (k = 0; k < hdev->num_alloc_vport; k++)
941 for (i = 0; i < HNAE3_MAX_TC; i++) {
942 ret = hclge_tm_qs_to_pri_map_cfg(
943 hdev, vport[k].qs_offset + i, k);
944 if (ret)
945 return ret;
946 }
947 } else {
948 return -EINVAL;
949 }
950
951 /* Cfg q -> qs mapping */
952 for (i = 0; i < hdev->num_alloc_vport; i++) {
953 ret = hclge_vport_q_to_qs_map(hdev, vport);
954 if (ret)
955 return ret;
956
957 vport++;
958 }
959
960 return 0;
961 }
962
hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev * hdev)963 static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev)
964 {
965 u32 max_tm_rate = hdev->ae_dev->dev_specs.max_tm_rate;
966 struct hclge_shaper_ir_para ir_para;
967 u32 shaper_para;
968 int ret;
969 u32 i;
970
971 for (i = 0; i < hdev->tm_info.num_tc; i++) {
972 u32 rate = hdev->tm_info.tc_info[i].bw_limit;
973
974 ret = hclge_shaper_para_calc(rate, HCLGE_SHAPER_LVL_PRI,
975 &ir_para, max_tm_rate);
976 if (ret)
977 return ret;
978
979 shaper_para = hclge_tm_get_shapping_para(0, 0, 0,
980 HCLGE_SHAPER_BS_U_DEF,
981 HCLGE_SHAPER_BS_S_DEF);
982 ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET, i,
983 shaper_para, rate);
984 if (ret)
985 return ret;
986
987 shaper_para = hclge_tm_get_shapping_para(ir_para.ir_b,
988 ir_para.ir_u,
989 ir_para.ir_s,
990 HCLGE_SHAPER_BS_U_DEF,
991 HCLGE_SHAPER_BS_S_DEF);
992 ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET, i,
993 shaper_para, rate);
994 if (ret)
995 return ret;
996 }
997
998 return 0;
999 }
1000
hclge_tm_pri_vnet_base_shaper_pri_cfg(struct hclge_vport * vport)1001 static int hclge_tm_pri_vnet_base_shaper_pri_cfg(struct hclge_vport *vport)
1002 {
1003 struct hclge_dev *hdev = vport->back;
1004 struct hclge_shaper_ir_para ir_para;
1005 u32 shaper_para;
1006 int ret;
1007
1008 ret = hclge_shaper_para_calc(vport->bw_limit, HCLGE_SHAPER_LVL_VF,
1009 &ir_para,
1010 hdev->ae_dev->dev_specs.max_tm_rate);
1011 if (ret)
1012 return ret;
1013
1014 shaper_para = hclge_tm_get_shapping_para(0, 0, 0,
1015 HCLGE_SHAPER_BS_U_DEF,
1016 HCLGE_SHAPER_BS_S_DEF);
1017 ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET,
1018 vport->vport_id, shaper_para,
1019 vport->bw_limit);
1020 if (ret)
1021 return ret;
1022
1023 shaper_para = hclge_tm_get_shapping_para(ir_para.ir_b, ir_para.ir_u,
1024 ir_para.ir_s,
1025 HCLGE_SHAPER_BS_U_DEF,
1026 HCLGE_SHAPER_BS_S_DEF);
1027 ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET,
1028 vport->vport_id, shaper_para,
1029 vport->bw_limit);
1030 if (ret)
1031 return ret;
1032
1033 return 0;
1034 }
1035
hclge_tm_pri_vnet_base_shaper_qs_cfg(struct hclge_vport * vport)1036 static int hclge_tm_pri_vnet_base_shaper_qs_cfg(struct hclge_vport *vport)
1037 {
1038 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1039 struct hclge_dev *hdev = vport->back;
1040 u32 max_tm_rate = hdev->ae_dev->dev_specs.max_tm_rate;
1041 struct hclge_shaper_ir_para ir_para;
1042 u32 i;
1043 int ret;
1044
1045 for (i = 0; i < kinfo->tc_info.num_tc; i++) {
1046 ret = hclge_shaper_para_calc(hdev->tm_info.tc_info[i].bw_limit,
1047 HCLGE_SHAPER_LVL_QSET,
1048 &ir_para, max_tm_rate);
1049 if (ret)
1050 return ret;
1051 }
1052
1053 return 0;
1054 }
1055
hclge_tm_pri_vnet_base_shaper_cfg(struct hclge_dev * hdev)1056 static int hclge_tm_pri_vnet_base_shaper_cfg(struct hclge_dev *hdev)
1057 {
1058 struct hclge_vport *vport = hdev->vport;
1059 int ret;
1060 u32 i;
1061
1062 /* Need config vport shaper */
1063 for (i = 0; i < hdev->num_alloc_vport; i++) {
1064 ret = hclge_tm_pri_vnet_base_shaper_pri_cfg(vport);
1065 if (ret)
1066 return ret;
1067
1068 ret = hclge_tm_pri_vnet_base_shaper_qs_cfg(vport);
1069 if (ret)
1070 return ret;
1071
1072 vport++;
1073 }
1074
1075 return 0;
1076 }
1077
hclge_tm_pri_shaper_cfg(struct hclge_dev * hdev)1078 static int hclge_tm_pri_shaper_cfg(struct hclge_dev *hdev)
1079 {
1080 int ret;
1081
1082 if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
1083 ret = hclge_tm_pri_tc_base_shaper_cfg(hdev);
1084 if (ret)
1085 return ret;
1086 } else {
1087 ret = hclge_tm_pri_vnet_base_shaper_cfg(hdev);
1088 if (ret)
1089 return ret;
1090 }
1091
1092 return 0;
1093 }
1094
hclge_tm_pri_tc_base_dwrr_cfg(struct hclge_dev * hdev)1095 static int hclge_tm_pri_tc_base_dwrr_cfg(struct hclge_dev *hdev)
1096 {
1097 struct hclge_vport *vport = hdev->vport;
1098 struct hclge_pg_info *pg_info;
1099 u8 dwrr;
1100 int ret;
1101 u32 i, k;
1102
1103 for (i = 0; i < hdev->tm_info.num_tc; i++) {
1104 pg_info =
1105 &hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid];
1106 dwrr = pg_info->tc_dwrr[i];
1107
1108 ret = hclge_tm_pri_weight_cfg(hdev, i, dwrr);
1109 if (ret)
1110 return ret;
1111
1112 for (k = 0; k < hdev->num_alloc_vport; k++) {
1113 ret = hclge_tm_qs_weight_cfg(
1114 hdev, vport[k].qs_offset + i,
1115 vport[k].dwrr);
1116 if (ret)
1117 return ret;
1118 }
1119 }
1120
1121 return 0;
1122 }
1123
hclge_tm_ets_tc_dwrr_cfg(struct hclge_dev * hdev)1124 static int hclge_tm_ets_tc_dwrr_cfg(struct hclge_dev *hdev)
1125 {
1126 #define DEFAULT_TC_OFFSET 14
1127
1128 struct hclge_ets_tc_weight_cmd *ets_weight;
1129 struct hclge_desc desc;
1130 unsigned int i;
1131
1132 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ETS_TC_WEIGHT, false);
1133 ets_weight = (struct hclge_ets_tc_weight_cmd *)desc.data;
1134
1135 for (i = 0; i < HNAE3_MAX_TC; i++) {
1136 struct hclge_pg_info *pg_info;
1137
1138 pg_info = &hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid];
1139 ets_weight->tc_weight[i] = pg_info->tc_dwrr[i];
1140 }
1141
1142 ets_weight->weight_offset = DEFAULT_TC_OFFSET;
1143
1144 return hclge_cmd_send(&hdev->hw, &desc, 1);
1145 }
1146
hclge_tm_pri_vnet_base_dwrr_pri_cfg(struct hclge_vport * vport)1147 static int hclge_tm_pri_vnet_base_dwrr_pri_cfg(struct hclge_vport *vport)
1148 {
1149 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1150 struct hclge_dev *hdev = vport->back;
1151 int ret;
1152 u8 i;
1153
1154 /* Vf dwrr */
1155 ret = hclge_tm_pri_weight_cfg(hdev, vport->vport_id, vport->dwrr);
1156 if (ret)
1157 return ret;
1158
1159 /* Qset dwrr */
1160 for (i = 0; i < kinfo->tc_info.num_tc; i++) {
1161 ret = hclge_tm_qs_weight_cfg(
1162 hdev, vport->qs_offset + i,
1163 hdev->tm_info.pg_info[0].tc_dwrr[i]);
1164 if (ret)
1165 return ret;
1166 }
1167
1168 return 0;
1169 }
1170
hclge_tm_pri_vnet_base_dwrr_cfg(struct hclge_dev * hdev)1171 static int hclge_tm_pri_vnet_base_dwrr_cfg(struct hclge_dev *hdev)
1172 {
1173 struct hclge_vport *vport = hdev->vport;
1174 int ret;
1175 u32 i;
1176
1177 for (i = 0; i < hdev->num_alloc_vport; i++) {
1178 ret = hclge_tm_pri_vnet_base_dwrr_pri_cfg(vport);
1179 if (ret)
1180 return ret;
1181
1182 vport++;
1183 }
1184
1185 return 0;
1186 }
1187
hclge_tm_pri_dwrr_cfg(struct hclge_dev * hdev)1188 static int hclge_tm_pri_dwrr_cfg(struct hclge_dev *hdev)
1189 {
1190 int ret;
1191
1192 if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
1193 ret = hclge_tm_pri_tc_base_dwrr_cfg(hdev);
1194 if (ret)
1195 return ret;
1196
1197 if (!hnae3_dev_dcb_supported(hdev))
1198 return 0;
1199
1200 ret = hclge_tm_ets_tc_dwrr_cfg(hdev);
1201 if (ret == -EOPNOTSUPP) {
1202 dev_warn(&hdev->pdev->dev,
1203 "fw %08x does't support ets tc weight cmd\n",
1204 hdev->fw_version);
1205 ret = 0;
1206 }
1207
1208 return ret;
1209 } else {
1210 ret = hclge_tm_pri_vnet_base_dwrr_cfg(hdev);
1211 if (ret)
1212 return ret;
1213 }
1214
1215 return 0;
1216 }
1217
hclge_tm_map_cfg(struct hclge_dev * hdev)1218 static int hclge_tm_map_cfg(struct hclge_dev *hdev)
1219 {
1220 int ret;
1221
1222 ret = hclge_up_to_tc_map(hdev);
1223 if (ret)
1224 return ret;
1225
1226 ret = hclge_tm_pg_to_pri_map(hdev);
1227 if (ret)
1228 return ret;
1229
1230 return hclge_tm_pri_q_qs_cfg(hdev);
1231 }
1232
hclge_tm_shaper_cfg(struct hclge_dev * hdev)1233 static int hclge_tm_shaper_cfg(struct hclge_dev *hdev)
1234 {
1235 int ret;
1236
1237 ret = hclge_tm_port_shaper_cfg(hdev);
1238 if (ret)
1239 return ret;
1240
1241 ret = hclge_tm_pg_shaper_cfg(hdev);
1242 if (ret)
1243 return ret;
1244
1245 return hclge_tm_pri_shaper_cfg(hdev);
1246 }
1247
hclge_tm_dwrr_cfg(struct hclge_dev * hdev)1248 int hclge_tm_dwrr_cfg(struct hclge_dev *hdev)
1249 {
1250 int ret;
1251
1252 ret = hclge_tm_pg_dwrr_cfg(hdev);
1253 if (ret)
1254 return ret;
1255
1256 return hclge_tm_pri_dwrr_cfg(hdev);
1257 }
1258
hclge_tm_lvl2_schd_mode_cfg(struct hclge_dev * hdev)1259 static int hclge_tm_lvl2_schd_mode_cfg(struct hclge_dev *hdev)
1260 {
1261 int ret;
1262 u8 i;
1263
1264 /* Only being config on TC-Based scheduler mode */
1265 if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE)
1266 return 0;
1267
1268 for (i = 0; i < hdev->tm_info.num_pg; i++) {
1269 ret = hclge_tm_pg_schd_mode_cfg(hdev, i);
1270 if (ret)
1271 return ret;
1272 }
1273
1274 return 0;
1275 }
1276
hclge_tm_schd_mode_vnet_base_cfg(struct hclge_vport * vport)1277 static int hclge_tm_schd_mode_vnet_base_cfg(struct hclge_vport *vport)
1278 {
1279 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1280 struct hclge_dev *hdev = vport->back;
1281 int ret;
1282 u8 i;
1283
1284 if (vport->vport_id >= HNAE3_MAX_TC)
1285 return -EINVAL;
1286
1287 ret = hclge_tm_pri_schd_mode_cfg(hdev, vport->vport_id);
1288 if (ret)
1289 return ret;
1290
1291 for (i = 0; i < kinfo->tc_info.num_tc; i++) {
1292 u8 sch_mode = hdev->tm_info.tc_info[i].tc_sch_mode;
1293
1294 ret = hclge_tm_qs_schd_mode_cfg(hdev, vport->qs_offset + i,
1295 sch_mode);
1296 if (ret)
1297 return ret;
1298 }
1299
1300 return 0;
1301 }
1302
hclge_tm_lvl34_schd_mode_cfg(struct hclge_dev * hdev)1303 static int hclge_tm_lvl34_schd_mode_cfg(struct hclge_dev *hdev)
1304 {
1305 struct hclge_vport *vport = hdev->vport;
1306 int ret;
1307 u8 i, k;
1308
1309 if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
1310 for (i = 0; i < hdev->tm_info.num_tc; i++) {
1311 ret = hclge_tm_pri_schd_mode_cfg(hdev, i);
1312 if (ret)
1313 return ret;
1314
1315 for (k = 0; k < hdev->num_alloc_vport; k++) {
1316 ret = hclge_tm_qs_schd_mode_cfg(
1317 hdev, vport[k].qs_offset + i,
1318 HCLGE_SCH_MODE_DWRR);
1319 if (ret)
1320 return ret;
1321 }
1322 }
1323 } else {
1324 for (i = 0; i < hdev->num_alloc_vport; i++) {
1325 ret = hclge_tm_schd_mode_vnet_base_cfg(vport);
1326 if (ret)
1327 return ret;
1328
1329 vport++;
1330 }
1331 }
1332
1333 return 0;
1334 }
1335
hclge_tm_schd_mode_hw(struct hclge_dev * hdev)1336 static int hclge_tm_schd_mode_hw(struct hclge_dev *hdev)
1337 {
1338 int ret;
1339
1340 ret = hclge_tm_lvl2_schd_mode_cfg(hdev);
1341 if (ret)
1342 return ret;
1343
1344 return hclge_tm_lvl34_schd_mode_cfg(hdev);
1345 }
1346
hclge_tm_schd_setup_hw(struct hclge_dev * hdev)1347 int hclge_tm_schd_setup_hw(struct hclge_dev *hdev)
1348 {
1349 int ret;
1350
1351 /* Cfg tm mapping */
1352 ret = hclge_tm_map_cfg(hdev);
1353 if (ret)
1354 return ret;
1355
1356 /* Cfg tm shaper */
1357 ret = hclge_tm_shaper_cfg(hdev);
1358 if (ret)
1359 return ret;
1360
1361 /* Cfg dwrr */
1362 ret = hclge_tm_dwrr_cfg(hdev);
1363 if (ret)
1364 return ret;
1365
1366 /* Cfg schd mode for each level schd */
1367 return hclge_tm_schd_mode_hw(hdev);
1368 }
1369
hclge_pause_param_setup_hw(struct hclge_dev * hdev)1370 static int hclge_pause_param_setup_hw(struct hclge_dev *hdev)
1371 {
1372 struct hclge_mac *mac = &hdev->hw.mac;
1373
1374 return hclge_pause_param_cfg(hdev, mac->mac_addr,
1375 HCLGE_DEFAULT_PAUSE_TRANS_GAP,
1376 HCLGE_DEFAULT_PAUSE_TRANS_TIME);
1377 }
1378
hclge_pfc_setup_hw(struct hclge_dev * hdev)1379 static int hclge_pfc_setup_hw(struct hclge_dev *hdev)
1380 {
1381 u8 enable_bitmap = 0;
1382
1383 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
1384 enable_bitmap = HCLGE_TX_MAC_PAUSE_EN_MSK |
1385 HCLGE_RX_MAC_PAUSE_EN_MSK;
1386
1387 return hclge_pfc_pause_en_cfg(hdev, enable_bitmap,
1388 hdev->tm_info.pfc_en);
1389 }
1390
1391 /* for the queues that use for backpress, divides to several groups,
1392 * each group contains 32 queue sets, which can be represented by u32 bitmap.
1393 */
hclge_bp_setup_hw(struct hclge_dev * hdev,u8 tc)1394 static int hclge_bp_setup_hw(struct hclge_dev *hdev, u8 tc)
1395 {
1396 u16 grp_id_shift = HCLGE_BP_GRP_ID_S;
1397 u16 grp_id_mask = HCLGE_BP_GRP_ID_M;
1398 u8 grp_num = HCLGE_BP_GRP_NUM;
1399 int i;
1400
1401 if (hdev->num_tqps > HCLGE_TQP_MAX_SIZE_DEV_V2) {
1402 grp_num = HCLGE_BP_EXT_GRP_NUM;
1403 grp_id_mask = HCLGE_BP_EXT_GRP_ID_M;
1404 grp_id_shift = HCLGE_BP_EXT_GRP_ID_S;
1405 }
1406
1407 for (i = 0; i < grp_num; i++) {
1408 u32 qs_bitmap = 0;
1409 int k, ret;
1410
1411 for (k = 0; k < hdev->num_alloc_vport; k++) {
1412 struct hclge_vport *vport = &hdev->vport[k];
1413 u16 qs_id = vport->qs_offset + tc;
1414 u8 grp, sub_grp;
1415
1416 grp = hnae3_get_field(qs_id, grp_id_mask, grp_id_shift);
1417 sub_grp = hnae3_get_field(qs_id, HCLGE_BP_SUB_GRP_ID_M,
1418 HCLGE_BP_SUB_GRP_ID_S);
1419 if (i == grp)
1420 qs_bitmap |= (1 << sub_grp);
1421 }
1422
1423 ret = hclge_tm_qs_bp_cfg(hdev, tc, i, qs_bitmap);
1424 if (ret)
1425 return ret;
1426 }
1427
1428 return 0;
1429 }
1430
hclge_mac_pause_setup_hw(struct hclge_dev * hdev)1431 static int hclge_mac_pause_setup_hw(struct hclge_dev *hdev)
1432 {
1433 bool tx_en, rx_en;
1434
1435 switch (hdev->tm_info.fc_mode) {
1436 case HCLGE_FC_NONE:
1437 tx_en = false;
1438 rx_en = false;
1439 break;
1440 case HCLGE_FC_RX_PAUSE:
1441 tx_en = false;
1442 rx_en = true;
1443 break;
1444 case HCLGE_FC_TX_PAUSE:
1445 tx_en = true;
1446 rx_en = false;
1447 break;
1448 case HCLGE_FC_FULL:
1449 tx_en = true;
1450 rx_en = true;
1451 break;
1452 case HCLGE_FC_PFC:
1453 tx_en = false;
1454 rx_en = false;
1455 break;
1456 default:
1457 tx_en = true;
1458 rx_en = true;
1459 }
1460
1461 return hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
1462 }
1463
hclge_tm_bp_setup(struct hclge_dev * hdev)1464 static int hclge_tm_bp_setup(struct hclge_dev *hdev)
1465 {
1466 int ret;
1467 int i;
1468
1469 for (i = 0; i < hdev->tm_info.num_tc; i++) {
1470 ret = hclge_bp_setup_hw(hdev, i);
1471 if (ret)
1472 return ret;
1473 }
1474
1475 return 0;
1476 }
1477
hclge_pause_setup_hw(struct hclge_dev * hdev,bool init)1478 int hclge_pause_setup_hw(struct hclge_dev *hdev, bool init)
1479 {
1480 int ret;
1481
1482 ret = hclge_pause_param_setup_hw(hdev);
1483 if (ret)
1484 return ret;
1485
1486 ret = hclge_mac_pause_setup_hw(hdev);
1487 if (ret)
1488 return ret;
1489
1490 /* Only DCB-supported dev supports qset back pressure and pfc cmd */
1491 if (!hnae3_dev_dcb_supported(hdev))
1492 return 0;
1493
1494 /* GE MAC does not support PFC, when driver is initializing and MAC
1495 * is in GE Mode, ignore the error here, otherwise initialization
1496 * will fail.
1497 */
1498 ret = hclge_pfc_setup_hw(hdev);
1499 if (init && ret == -EOPNOTSUPP)
1500 dev_warn(&hdev->pdev->dev, "GE MAC does not support pfc\n");
1501 else if (ret) {
1502 dev_err(&hdev->pdev->dev, "config pfc failed! ret = %d\n",
1503 ret);
1504 return ret;
1505 }
1506
1507 return hclge_tm_bp_setup(hdev);
1508 }
1509
hclge_tm_prio_tc_info_update(struct hclge_dev * hdev,u8 * prio_tc)1510 void hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc)
1511 {
1512 struct hclge_vport *vport = hdev->vport;
1513 struct hnae3_knic_private_info *kinfo;
1514 u32 i, k;
1515
1516 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
1517 hdev->tm_info.prio_tc[i] = prio_tc[i];
1518
1519 for (k = 0; k < hdev->num_alloc_vport; k++) {
1520 kinfo = &vport[k].nic.kinfo;
1521 kinfo->tc_info.prio_tc[i] = prio_tc[i];
1522 }
1523 }
1524 }
1525
hclge_tm_schd_info_update(struct hclge_dev * hdev,u8 num_tc)1526 void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc)
1527 {
1528 u8 bit_map = 0;
1529 u8 i;
1530
1531 hdev->tm_info.num_tc = num_tc;
1532
1533 for (i = 0; i < hdev->tm_info.num_tc; i++)
1534 bit_map |= BIT(i);
1535
1536 if (!bit_map) {
1537 bit_map = 1;
1538 hdev->tm_info.num_tc = 1;
1539 }
1540
1541 hdev->hw_tc_map = bit_map;
1542
1543 hclge_tm_schd_info_init(hdev);
1544 }
1545
hclge_tm_init_hw(struct hclge_dev * hdev,bool init)1546 int hclge_tm_init_hw(struct hclge_dev *hdev, bool init)
1547 {
1548 int ret;
1549
1550 if ((hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) &&
1551 (hdev->tx_sch_mode != HCLGE_FLAG_VNET_BASE_SCH_MODE))
1552 return -ENOTSUPP;
1553
1554 ret = hclge_tm_schd_setup_hw(hdev);
1555 if (ret)
1556 return ret;
1557
1558 ret = hclge_pause_setup_hw(hdev, init);
1559 if (ret)
1560 return ret;
1561
1562 return 0;
1563 }
1564
hclge_tm_schd_init(struct hclge_dev * hdev)1565 int hclge_tm_schd_init(struct hclge_dev *hdev)
1566 {
1567 /* fc_mode is HCLGE_FC_FULL on reset */
1568 hdev->tm_info.fc_mode = HCLGE_FC_FULL;
1569 hdev->fc_mode_last_time = hdev->tm_info.fc_mode;
1570
1571 if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE &&
1572 hdev->tm_info.num_pg != 1)
1573 return -EINVAL;
1574
1575 hclge_tm_schd_info_init(hdev);
1576
1577 return hclge_tm_init_hw(hdev, true);
1578 }
1579
hclge_tm_vport_map_update(struct hclge_dev * hdev)1580 int hclge_tm_vport_map_update(struct hclge_dev *hdev)
1581 {
1582 struct hclge_vport *vport = hdev->vport;
1583 int ret;
1584
1585 hclge_tm_vport_tc_info_update(vport);
1586
1587 ret = hclge_vport_q_to_qs_map(hdev, vport);
1588 if (ret)
1589 return ret;
1590
1591 if (hdev->tm_info.num_tc == 1 && !hdev->tm_info.pfc_en)
1592 return 0;
1593
1594 return hclge_tm_bp_setup(hdev);
1595 }
1596
hclge_tm_get_qset_num(struct hclge_dev * hdev,u16 * qset_num)1597 int hclge_tm_get_qset_num(struct hclge_dev *hdev, u16 *qset_num)
1598 {
1599 struct hclge_tm_nodes_cmd *nodes;
1600 struct hclge_desc desc;
1601 int ret;
1602
1603 if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2) {
1604 /* Each PF has 8 qsets and each VF has 1 qset */
1605 *qset_num = HCLGE_TM_PF_MAX_QSET_NUM + pci_num_vf(hdev->pdev);
1606 return 0;
1607 }
1608
1609 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NODES, true);
1610 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1611 if (ret) {
1612 dev_err(&hdev->pdev->dev,
1613 "failed to get qset num, ret = %d\n", ret);
1614 return ret;
1615 }
1616
1617 nodes = (struct hclge_tm_nodes_cmd *)desc.data;
1618 *qset_num = le16_to_cpu(nodes->qset_num);
1619 return 0;
1620 }
1621
hclge_tm_get_pri_num(struct hclge_dev * hdev,u8 * pri_num)1622 int hclge_tm_get_pri_num(struct hclge_dev *hdev, u8 *pri_num)
1623 {
1624 struct hclge_tm_nodes_cmd *nodes;
1625 struct hclge_desc desc;
1626 int ret;
1627
1628 if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2) {
1629 *pri_num = HCLGE_TM_PF_MAX_PRI_NUM;
1630 return 0;
1631 }
1632
1633 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NODES, true);
1634 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1635 if (ret) {
1636 dev_err(&hdev->pdev->dev,
1637 "failed to get pri num, ret = %d\n", ret);
1638 return ret;
1639 }
1640
1641 nodes = (struct hclge_tm_nodes_cmd *)desc.data;
1642 *pri_num = nodes->pri_num;
1643 return 0;
1644 }
1645
hclge_tm_get_qset_map_pri(struct hclge_dev * hdev,u16 qset_id,u8 * priority,u8 * link_vld)1646 int hclge_tm_get_qset_map_pri(struct hclge_dev *hdev, u16 qset_id, u8 *priority,
1647 u8 *link_vld)
1648 {
1649 struct hclge_qs_to_pri_link_cmd *map;
1650 struct hclge_desc desc;
1651 int ret;
1652
1653 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_TO_PRI_LINK, true);
1654 map = (struct hclge_qs_to_pri_link_cmd *)desc.data;
1655 map->qs_id = cpu_to_le16(qset_id);
1656 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1657 if (ret) {
1658 dev_err(&hdev->pdev->dev,
1659 "failed to get qset map priority, ret = %d\n", ret);
1660 return ret;
1661 }
1662
1663 *priority = map->priority;
1664 *link_vld = map->link_vld;
1665 return 0;
1666 }
1667
hclge_tm_get_qset_sch_mode(struct hclge_dev * hdev,u16 qset_id,u8 * mode)1668 int hclge_tm_get_qset_sch_mode(struct hclge_dev *hdev, u16 qset_id, u8 *mode)
1669 {
1670 struct hclge_qs_sch_mode_cfg_cmd *qs_sch_mode;
1671 struct hclge_desc desc;
1672 int ret;
1673
1674 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_SCH_MODE_CFG, true);
1675 qs_sch_mode = (struct hclge_qs_sch_mode_cfg_cmd *)desc.data;
1676 qs_sch_mode->qs_id = cpu_to_le16(qset_id);
1677 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1678 if (ret) {
1679 dev_err(&hdev->pdev->dev,
1680 "failed to get qset sch mode, ret = %d\n", ret);
1681 return ret;
1682 }
1683
1684 *mode = qs_sch_mode->sch_mode;
1685 return 0;
1686 }
1687
hclge_tm_get_qset_weight(struct hclge_dev * hdev,u16 qset_id,u8 * weight)1688 int hclge_tm_get_qset_weight(struct hclge_dev *hdev, u16 qset_id, u8 *weight)
1689 {
1690 struct hclge_qs_weight_cmd *qs_weight;
1691 struct hclge_desc desc;
1692 int ret;
1693
1694 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_WEIGHT, true);
1695 qs_weight = (struct hclge_qs_weight_cmd *)desc.data;
1696 qs_weight->qs_id = cpu_to_le16(qset_id);
1697 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1698 if (ret) {
1699 dev_err(&hdev->pdev->dev,
1700 "failed to get qset weight, ret = %d\n", ret);
1701 return ret;
1702 }
1703
1704 *weight = qs_weight->dwrr;
1705 return 0;
1706 }
1707
hclge_tm_get_qset_shaper(struct hclge_dev * hdev,u16 qset_id,struct hclge_tm_shaper_para * para)1708 int hclge_tm_get_qset_shaper(struct hclge_dev *hdev, u16 qset_id,
1709 struct hclge_tm_shaper_para *para)
1710 {
1711 struct hclge_qs_shapping_cmd *shap_cfg_cmd;
1712 struct hclge_desc desc;
1713 u32 shapping_para;
1714 int ret;
1715
1716 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QCN_SHAPPING_CFG, true);
1717 shap_cfg_cmd = (struct hclge_qs_shapping_cmd *)desc.data;
1718 shap_cfg_cmd->qs_id = cpu_to_le16(qset_id);
1719 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1720 if (ret) {
1721 dev_err(&hdev->pdev->dev,
1722 "failed to get qset %u shaper, ret = %d\n", qset_id,
1723 ret);
1724 return ret;
1725 }
1726
1727 shapping_para = le32_to_cpu(shap_cfg_cmd->qs_shapping_para);
1728 para->ir_b = hclge_tm_get_field(shapping_para, IR_B);
1729 para->ir_u = hclge_tm_get_field(shapping_para, IR_U);
1730 para->ir_s = hclge_tm_get_field(shapping_para, IR_S);
1731 para->bs_b = hclge_tm_get_field(shapping_para, BS_B);
1732 para->bs_s = hclge_tm_get_field(shapping_para, BS_S);
1733 para->flag = shap_cfg_cmd->flag;
1734 para->rate = le32_to_cpu(shap_cfg_cmd->qs_rate);
1735 return 0;
1736 }
1737
hclge_tm_get_pri_sch_mode(struct hclge_dev * hdev,u8 pri_id,u8 * mode)1738 int hclge_tm_get_pri_sch_mode(struct hclge_dev *hdev, u8 pri_id, u8 *mode)
1739 {
1740 struct hclge_pri_sch_mode_cfg_cmd *pri_sch_mode;
1741 struct hclge_desc desc;
1742 int ret;
1743
1744 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_SCH_MODE_CFG, true);
1745 pri_sch_mode = (struct hclge_pri_sch_mode_cfg_cmd *)desc.data;
1746 pri_sch_mode->pri_id = pri_id;
1747 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1748 if (ret) {
1749 dev_err(&hdev->pdev->dev,
1750 "failed to get priority sch mode, ret = %d\n", ret);
1751 return ret;
1752 }
1753
1754 *mode = pri_sch_mode->sch_mode;
1755 return 0;
1756 }
1757
hclge_tm_get_pri_weight(struct hclge_dev * hdev,u8 pri_id,u8 * weight)1758 int hclge_tm_get_pri_weight(struct hclge_dev *hdev, u8 pri_id, u8 *weight)
1759 {
1760 struct hclge_priority_weight_cmd *priority_weight;
1761 struct hclge_desc desc;
1762 int ret;
1763
1764 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_WEIGHT, true);
1765 priority_weight = (struct hclge_priority_weight_cmd *)desc.data;
1766 priority_weight->pri_id = pri_id;
1767 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1768 if (ret) {
1769 dev_err(&hdev->pdev->dev,
1770 "failed to get priority weight, ret = %d\n", ret);
1771 return ret;
1772 }
1773
1774 *weight = priority_weight->dwrr;
1775 return 0;
1776 }
1777
hclge_tm_get_pri_shaper(struct hclge_dev * hdev,u8 pri_id,enum hclge_opcode_type cmd,struct hclge_tm_shaper_para * para)1778 int hclge_tm_get_pri_shaper(struct hclge_dev *hdev, u8 pri_id,
1779 enum hclge_opcode_type cmd,
1780 struct hclge_tm_shaper_para *para)
1781 {
1782 struct hclge_pri_shapping_cmd *shap_cfg_cmd;
1783 struct hclge_desc desc;
1784 u32 shapping_para;
1785 int ret;
1786
1787 if (cmd != HCLGE_OPC_TM_PRI_C_SHAPPING &&
1788 cmd != HCLGE_OPC_TM_PRI_P_SHAPPING)
1789 return -EINVAL;
1790
1791 hclge_cmd_setup_basic_desc(&desc, cmd, true);
1792 shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data;
1793 shap_cfg_cmd->pri_id = pri_id;
1794 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1795 if (ret) {
1796 dev_err(&hdev->pdev->dev,
1797 "failed to get priority shaper(%#x), ret = %d\n",
1798 cmd, ret);
1799 return ret;
1800 }
1801
1802 shapping_para = le32_to_cpu(shap_cfg_cmd->pri_shapping_para);
1803 para->ir_b = hclge_tm_get_field(shapping_para, IR_B);
1804 para->ir_u = hclge_tm_get_field(shapping_para, IR_U);
1805 para->ir_s = hclge_tm_get_field(shapping_para, IR_S);
1806 para->bs_b = hclge_tm_get_field(shapping_para, BS_B);
1807 para->bs_s = hclge_tm_get_field(shapping_para, BS_S);
1808 para->flag = shap_cfg_cmd->flag;
1809 para->rate = le32_to_cpu(shap_cfg_cmd->pri_rate);
1810 return 0;
1811 }
1812
hclge_tm_get_q_to_qs_map(struct hclge_dev * hdev,u16 q_id,u16 * qset_id)1813 int hclge_tm_get_q_to_qs_map(struct hclge_dev *hdev, u16 q_id, u16 *qset_id)
1814 {
1815 struct hclge_nq_to_qs_link_cmd *map;
1816 struct hclge_desc desc;
1817 u16 qs_id_l;
1818 u16 qs_id_h;
1819 int ret;
1820
1821 map = (struct hclge_nq_to_qs_link_cmd *)desc.data;
1822 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NQ_TO_QS_LINK, true);
1823 map->nq_id = cpu_to_le16(q_id);
1824 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1825 if (ret) {
1826 dev_err(&hdev->pdev->dev,
1827 "failed to get queue to qset map, ret = %d\n", ret);
1828 return ret;
1829 }
1830 *qset_id = le16_to_cpu(map->qset_id);
1831
1832 /* convert qset_id to the following format, drop the vld bit
1833 * | qs_id_h | vld | qs_id_l |
1834 * qset_id: | 15 ~ 11 | 10 | 9 ~ 0 |
1835 * \ \ / /
1836 * \ \ / /
1837 * qset_id: | 15 | 14 ~ 10 | 9 ~ 0 |
1838 */
1839 qs_id_l = hnae3_get_field(*qset_id, HCLGE_TM_QS_ID_L_MSK,
1840 HCLGE_TM_QS_ID_L_S);
1841 qs_id_h = hnae3_get_field(*qset_id, HCLGE_TM_QS_ID_H_EXT_MSK,
1842 HCLGE_TM_QS_ID_H_EXT_S);
1843 *qset_id = 0;
1844 hnae3_set_field(*qset_id, HCLGE_TM_QS_ID_L_MSK, HCLGE_TM_QS_ID_L_S,
1845 qs_id_l);
1846 hnae3_set_field(*qset_id, HCLGE_TM_QS_ID_H_MSK, HCLGE_TM_QS_ID_H_S,
1847 qs_id_h);
1848 return 0;
1849 }
1850
hclge_tm_get_q_to_tc(struct hclge_dev * hdev,u16 q_id,u8 * tc_id)1851 int hclge_tm_get_q_to_tc(struct hclge_dev *hdev, u16 q_id, u8 *tc_id)
1852 {
1853 #define HCLGE_TM_TC_MASK 0x7
1854
1855 struct hclge_tqp_tx_queue_tc_cmd *tc;
1856 struct hclge_desc desc;
1857 int ret;
1858
1859 tc = (struct hclge_tqp_tx_queue_tc_cmd *)desc.data;
1860 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TQP_TX_QUEUE_TC, true);
1861 tc->queue_id = cpu_to_le16(q_id);
1862 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1863 if (ret) {
1864 dev_err(&hdev->pdev->dev,
1865 "failed to get queue to tc map, ret = %d\n", ret);
1866 return ret;
1867 }
1868
1869 *tc_id = tc->tc_id & HCLGE_TM_TC_MASK;
1870 return 0;
1871 }
1872
hclge_tm_get_pg_to_pri_map(struct hclge_dev * hdev,u8 pg_id,u8 * pri_bit_map)1873 int hclge_tm_get_pg_to_pri_map(struct hclge_dev *hdev, u8 pg_id,
1874 u8 *pri_bit_map)
1875 {
1876 struct hclge_pg_to_pri_link_cmd *map;
1877 struct hclge_desc desc;
1878 int ret;
1879
1880 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_TO_PRI_LINK, true);
1881 map = (struct hclge_pg_to_pri_link_cmd *)desc.data;
1882 map->pg_id = pg_id;
1883 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1884 if (ret) {
1885 dev_err(&hdev->pdev->dev,
1886 "failed to get pg to pri map, ret = %d\n", ret);
1887 return ret;
1888 }
1889
1890 *pri_bit_map = map->pri_bit_map;
1891 return 0;
1892 }
1893
hclge_tm_get_pg_weight(struct hclge_dev * hdev,u8 pg_id,u8 * weight)1894 int hclge_tm_get_pg_weight(struct hclge_dev *hdev, u8 pg_id, u8 *weight)
1895 {
1896 struct hclge_pg_weight_cmd *pg_weight_cmd;
1897 struct hclge_desc desc;
1898 int ret;
1899
1900 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_WEIGHT, true);
1901 pg_weight_cmd = (struct hclge_pg_weight_cmd *)desc.data;
1902 pg_weight_cmd->pg_id = pg_id;
1903 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1904 if (ret) {
1905 dev_err(&hdev->pdev->dev,
1906 "failed to get pg weight, ret = %d\n", ret);
1907 return ret;
1908 }
1909
1910 *weight = pg_weight_cmd->dwrr;
1911 return 0;
1912 }
1913
hclge_tm_get_pg_sch_mode(struct hclge_dev * hdev,u8 pg_id,u8 * mode)1914 int hclge_tm_get_pg_sch_mode(struct hclge_dev *hdev, u8 pg_id, u8 *mode)
1915 {
1916 struct hclge_desc desc;
1917 int ret;
1918
1919 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_SCH_MODE_CFG, true);
1920 desc.data[0] = cpu_to_le32(pg_id);
1921 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1922 if (ret) {
1923 dev_err(&hdev->pdev->dev,
1924 "failed to get pg sch mode, ret = %d\n", ret);
1925 return ret;
1926 }
1927
1928 *mode = (u8)le32_to_cpu(desc.data[1]);
1929 return 0;
1930 }
1931
hclge_tm_get_pg_shaper(struct hclge_dev * hdev,u8 pg_id,enum hclge_opcode_type cmd,struct hclge_tm_shaper_para * para)1932 int hclge_tm_get_pg_shaper(struct hclge_dev *hdev, u8 pg_id,
1933 enum hclge_opcode_type cmd,
1934 struct hclge_tm_shaper_para *para)
1935 {
1936 struct hclge_pg_shapping_cmd *shap_cfg_cmd;
1937 struct hclge_desc desc;
1938 u32 shapping_para;
1939 int ret;
1940
1941 if (cmd != HCLGE_OPC_TM_PG_C_SHAPPING &&
1942 cmd != HCLGE_OPC_TM_PG_P_SHAPPING)
1943 return -EINVAL;
1944
1945 hclge_cmd_setup_basic_desc(&desc, cmd, true);
1946 shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data;
1947 shap_cfg_cmd->pg_id = pg_id;
1948 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1949 if (ret) {
1950 dev_err(&hdev->pdev->dev,
1951 "failed to get pg shaper(%#x), ret = %d\n",
1952 cmd, ret);
1953 return ret;
1954 }
1955
1956 shapping_para = le32_to_cpu(shap_cfg_cmd->pg_shapping_para);
1957 para->ir_b = hclge_tm_get_field(shapping_para, IR_B);
1958 para->ir_u = hclge_tm_get_field(shapping_para, IR_U);
1959 para->ir_s = hclge_tm_get_field(shapping_para, IR_S);
1960 para->bs_b = hclge_tm_get_field(shapping_para, BS_B);
1961 para->bs_s = hclge_tm_get_field(shapping_para, BS_S);
1962 para->flag = shap_cfg_cmd->flag;
1963 para->rate = le32_to_cpu(shap_cfg_cmd->pg_rate);
1964 return 0;
1965 }
1966
hclge_tm_get_port_shaper(struct hclge_dev * hdev,struct hclge_tm_shaper_para * para)1967 int hclge_tm_get_port_shaper(struct hclge_dev *hdev,
1968 struct hclge_tm_shaper_para *para)
1969 {
1970 struct hclge_port_shapping_cmd *port_shap_cfg_cmd;
1971 struct hclge_desc desc;
1972 u32 shapping_para;
1973 int ret;
1974
1975 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PORT_SHAPPING, true);
1976 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1977 if (ret) {
1978 dev_err(&hdev->pdev->dev,
1979 "failed to get port shaper, ret = %d\n", ret);
1980 return ret;
1981 }
1982
1983 port_shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data;
1984 shapping_para = le32_to_cpu(port_shap_cfg_cmd->port_shapping_para);
1985 para->ir_b = hclge_tm_get_field(shapping_para, IR_B);
1986 para->ir_u = hclge_tm_get_field(shapping_para, IR_U);
1987 para->ir_s = hclge_tm_get_field(shapping_para, IR_S);
1988 para->bs_b = hclge_tm_get_field(shapping_para, BS_B);
1989 para->bs_s = hclge_tm_get_field(shapping_para, BS_S);
1990 para->flag = port_shap_cfg_cmd->flag;
1991 para->rate = le32_to_cpu(port_shap_cfg_cmd->port_rate);
1992
1993 return 0;
1994 }
1995