1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
2 /*
3 * Copyright 2008 - 2016 Freescale Semiconductor Inc.
4 */
5
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
8 #include <linux/string.h>
9 #include <linux/of_platform.h>
10 #include <linux/net_tstamp.h>
11 #include <linux/fsl/ptp_qoriq.h>
12
13 #include "dpaa_eth.h"
14 #include "mac.h"
15
16 static const char dpaa_stats_percpu[][ETH_GSTRING_LEN] = {
17 "interrupts",
18 "rx packets",
19 "tx packets",
20 "tx confirm",
21 "tx S/G",
22 "tx error",
23 "rx error",
24 "rx dropped",
25 "tx dropped",
26 };
27
28 static char dpaa_stats_global[][ETH_GSTRING_LEN] = {
29 /* dpa rx errors */
30 "rx dma error",
31 "rx frame physical error",
32 "rx frame size error",
33 "rx header error",
34
35 /* demultiplexing errors */
36 "qman cg_tdrop",
37 "qman wred",
38 "qman error cond",
39 "qman early window",
40 "qman late window",
41 "qman fq tdrop",
42 "qman fq retired",
43 "qman orp disabled",
44
45 /* congestion related stats */
46 "congestion time (ms)",
47 "entered congestion",
48 "congested (0/1)"
49 };
50
51 #define DPAA_STATS_PERCPU_LEN ARRAY_SIZE(dpaa_stats_percpu)
52 #define DPAA_STATS_GLOBAL_LEN ARRAY_SIZE(dpaa_stats_global)
53
dpaa_get_link_ksettings(struct net_device * net_dev,struct ethtool_link_ksettings * cmd)54 static int dpaa_get_link_ksettings(struct net_device *net_dev,
55 struct ethtool_link_ksettings *cmd)
56 {
57 struct dpaa_priv *priv = netdev_priv(net_dev);
58 struct mac_device *mac_dev = priv->mac_dev;
59
60 return phylink_ethtool_ksettings_get(mac_dev->phylink, cmd);
61 }
62
dpaa_set_link_ksettings(struct net_device * net_dev,const struct ethtool_link_ksettings * cmd)63 static int dpaa_set_link_ksettings(struct net_device *net_dev,
64 const struct ethtool_link_ksettings *cmd)
65 {
66 struct dpaa_priv *priv = netdev_priv(net_dev);
67 struct mac_device *mac_dev = priv->mac_dev;
68
69 return phylink_ethtool_ksettings_set(mac_dev->phylink, cmd);
70 }
71
dpaa_get_drvinfo(struct net_device * net_dev,struct ethtool_drvinfo * drvinfo)72 static void dpaa_get_drvinfo(struct net_device *net_dev,
73 struct ethtool_drvinfo *drvinfo)
74 {
75 strscpy(drvinfo->driver, KBUILD_MODNAME,
76 sizeof(drvinfo->driver));
77 strscpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent),
78 sizeof(drvinfo->bus_info));
79 }
80
dpaa_get_msglevel(struct net_device * net_dev)81 static u32 dpaa_get_msglevel(struct net_device *net_dev)
82 {
83 return ((struct dpaa_priv *)netdev_priv(net_dev))->msg_enable;
84 }
85
dpaa_set_msglevel(struct net_device * net_dev,u32 msg_enable)86 static void dpaa_set_msglevel(struct net_device *net_dev,
87 u32 msg_enable)
88 {
89 ((struct dpaa_priv *)netdev_priv(net_dev))->msg_enable = msg_enable;
90 }
91
dpaa_nway_reset(struct net_device * net_dev)92 static int dpaa_nway_reset(struct net_device *net_dev)
93 {
94 struct dpaa_priv *priv = netdev_priv(net_dev);
95 struct mac_device *mac_dev = priv->mac_dev;
96
97 return phylink_ethtool_nway_reset(mac_dev->phylink);
98 }
99
dpaa_get_pauseparam(struct net_device * net_dev,struct ethtool_pauseparam * epause)100 static void dpaa_get_pauseparam(struct net_device *net_dev,
101 struct ethtool_pauseparam *epause)
102 {
103 struct dpaa_priv *priv = netdev_priv(net_dev);
104 struct mac_device *mac_dev = priv->mac_dev;
105
106 phylink_ethtool_get_pauseparam(mac_dev->phylink, epause);
107 }
108
dpaa_set_pauseparam(struct net_device * net_dev,struct ethtool_pauseparam * epause)109 static int dpaa_set_pauseparam(struct net_device *net_dev,
110 struct ethtool_pauseparam *epause)
111 {
112 struct dpaa_priv *priv = netdev_priv(net_dev);
113 struct mac_device *mac_dev = priv->mac_dev;
114
115 return phylink_ethtool_set_pauseparam(mac_dev->phylink, epause);
116 }
117
dpaa_get_sset_count(struct net_device * net_dev,int type)118 static int dpaa_get_sset_count(struct net_device *net_dev, int type)
119 {
120 unsigned int total_stats, num_stats;
121
122 num_stats = num_online_cpus() + 1;
123 total_stats = num_stats * (DPAA_STATS_PERCPU_LEN + 1) +
124 DPAA_STATS_GLOBAL_LEN;
125
126 switch (type) {
127 case ETH_SS_STATS:
128 return total_stats;
129 default:
130 return -EOPNOTSUPP;
131 }
132 }
133
copy_stats(struct dpaa_percpu_priv * percpu_priv,int num_cpus,int crr_cpu,u64 bp_count,u64 * data)134 static void copy_stats(struct dpaa_percpu_priv *percpu_priv, int num_cpus,
135 int crr_cpu, u64 bp_count, u64 *data)
136 {
137 int num_values = num_cpus + 1;
138 int crr = 0;
139
140 /* update current CPU's stats and also add them to the total values */
141 data[crr * num_values + crr_cpu] = percpu_priv->in_interrupt;
142 data[crr++ * num_values + num_cpus] += percpu_priv->in_interrupt;
143
144 data[crr * num_values + crr_cpu] = percpu_priv->stats.rx_packets;
145 data[crr++ * num_values + num_cpus] += percpu_priv->stats.rx_packets;
146
147 data[crr * num_values + crr_cpu] = percpu_priv->stats.tx_packets;
148 data[crr++ * num_values + num_cpus] += percpu_priv->stats.tx_packets;
149
150 data[crr * num_values + crr_cpu] = percpu_priv->tx_confirm;
151 data[crr++ * num_values + num_cpus] += percpu_priv->tx_confirm;
152
153 data[crr * num_values + crr_cpu] = percpu_priv->tx_frag_skbuffs;
154 data[crr++ * num_values + num_cpus] += percpu_priv->tx_frag_skbuffs;
155
156 data[crr * num_values + crr_cpu] = percpu_priv->stats.tx_errors;
157 data[crr++ * num_values + num_cpus] += percpu_priv->stats.tx_errors;
158
159 data[crr * num_values + crr_cpu] = percpu_priv->stats.rx_errors;
160 data[crr++ * num_values + num_cpus] += percpu_priv->stats.rx_errors;
161
162 data[crr * num_values + crr_cpu] = percpu_priv->stats.rx_dropped;
163 data[crr++ * num_values + num_cpus] += percpu_priv->stats.rx_dropped;
164
165 data[crr * num_values + crr_cpu] = percpu_priv->stats.tx_dropped;
166 data[crr++ * num_values + num_cpus] += percpu_priv->stats.tx_dropped;
167
168 data[crr * num_values + crr_cpu] = bp_count;
169 data[crr++ * num_values + num_cpus] += bp_count;
170 }
171
dpaa_get_ethtool_stats(struct net_device * net_dev,struct ethtool_stats * stats,u64 * data)172 static void dpaa_get_ethtool_stats(struct net_device *net_dev,
173 struct ethtool_stats *stats, u64 *data)
174 {
175 struct dpaa_percpu_priv *percpu_priv;
176 struct dpaa_rx_errors rx_errors;
177 unsigned int num_cpus, offset;
178 u64 bp_count, cg_time, cg_num;
179 struct dpaa_ern_cnt ern_cnt;
180 struct dpaa_bp *dpaa_bp;
181 struct dpaa_priv *priv;
182 int total_stats, i;
183 bool cg_status;
184
185 total_stats = dpaa_get_sset_count(net_dev, ETH_SS_STATS);
186 priv = netdev_priv(net_dev);
187 num_cpus = num_online_cpus();
188
189 memset(&bp_count, 0, sizeof(bp_count));
190 memset(&rx_errors, 0, sizeof(struct dpaa_rx_errors));
191 memset(&ern_cnt, 0, sizeof(struct dpaa_ern_cnt));
192 memset(data, 0, total_stats * sizeof(u64));
193
194 for_each_online_cpu(i) {
195 percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
196 dpaa_bp = priv->dpaa_bp;
197 if (!dpaa_bp->percpu_count)
198 continue;
199 bp_count = *(per_cpu_ptr(dpaa_bp->percpu_count, i));
200 rx_errors.dme += percpu_priv->rx_errors.dme;
201 rx_errors.fpe += percpu_priv->rx_errors.fpe;
202 rx_errors.fse += percpu_priv->rx_errors.fse;
203 rx_errors.phe += percpu_priv->rx_errors.phe;
204
205 ern_cnt.cg_tdrop += percpu_priv->ern_cnt.cg_tdrop;
206 ern_cnt.wred += percpu_priv->ern_cnt.wred;
207 ern_cnt.err_cond += percpu_priv->ern_cnt.err_cond;
208 ern_cnt.early_window += percpu_priv->ern_cnt.early_window;
209 ern_cnt.late_window += percpu_priv->ern_cnt.late_window;
210 ern_cnt.fq_tdrop += percpu_priv->ern_cnt.fq_tdrop;
211 ern_cnt.fq_retired += percpu_priv->ern_cnt.fq_retired;
212 ern_cnt.orp_zero += percpu_priv->ern_cnt.orp_zero;
213
214 copy_stats(percpu_priv, num_cpus, i, bp_count, data);
215 }
216
217 offset = (num_cpus + 1) * (DPAA_STATS_PERCPU_LEN + 1);
218 memcpy(data + offset, &rx_errors, sizeof(struct dpaa_rx_errors));
219
220 offset += sizeof(struct dpaa_rx_errors) / sizeof(u64);
221 memcpy(data + offset, &ern_cnt, sizeof(struct dpaa_ern_cnt));
222
223 /* gather congestion related counters */
224 cg_num = 0;
225 cg_status = false;
226 cg_time = jiffies_to_msecs(priv->cgr_data.congested_jiffies);
227 if (qman_query_cgr_congested(&priv->cgr_data.cgr, &cg_status) == 0) {
228 cg_num = priv->cgr_data.cgr_congested_count;
229
230 /* reset congestion stats (like QMan API does */
231 priv->cgr_data.congested_jiffies = 0;
232 priv->cgr_data.cgr_congested_count = 0;
233 }
234
235 offset += sizeof(struct dpaa_ern_cnt) / sizeof(u64);
236 data[offset++] = cg_time;
237 data[offset++] = cg_num;
238 data[offset++] = cg_status;
239 }
240
dpaa_get_strings(struct net_device * net_dev,u32 stringset,u8 * data)241 static void dpaa_get_strings(struct net_device *net_dev, u32 stringset,
242 u8 *data)
243 {
244 unsigned int i, j, num_cpus, size;
245 char string_cpu[ETH_GSTRING_LEN];
246 u8 *strings;
247
248 memset(string_cpu, 0, sizeof(string_cpu));
249 strings = data;
250 num_cpus = num_online_cpus();
251 size = DPAA_STATS_GLOBAL_LEN * ETH_GSTRING_LEN;
252
253 for (i = 0; i < DPAA_STATS_PERCPU_LEN; i++) {
254 for (j = 0; j < num_cpus; j++) {
255 snprintf(string_cpu, ETH_GSTRING_LEN, "%s [CPU %d]",
256 dpaa_stats_percpu[i], j);
257 memcpy(strings, string_cpu, ETH_GSTRING_LEN);
258 strings += ETH_GSTRING_LEN;
259 }
260 snprintf(string_cpu, ETH_GSTRING_LEN, "%s [TOTAL]",
261 dpaa_stats_percpu[i]);
262 memcpy(strings, string_cpu, ETH_GSTRING_LEN);
263 strings += ETH_GSTRING_LEN;
264 }
265 for (j = 0; j < num_cpus; j++) {
266 snprintf(string_cpu, ETH_GSTRING_LEN,
267 "bpool [CPU %d]", j);
268 memcpy(strings, string_cpu, ETH_GSTRING_LEN);
269 strings += ETH_GSTRING_LEN;
270 }
271 snprintf(string_cpu, ETH_GSTRING_LEN, "bpool [TOTAL]");
272 memcpy(strings, string_cpu, ETH_GSTRING_LEN);
273 strings += ETH_GSTRING_LEN;
274
275 memcpy(strings, dpaa_stats_global, size);
276 }
277
dpaa_get_hash_opts(struct net_device * dev,struct ethtool_rxnfc * cmd)278 static int dpaa_get_hash_opts(struct net_device *dev,
279 struct ethtool_rxnfc *cmd)
280 {
281 struct dpaa_priv *priv = netdev_priv(dev);
282
283 cmd->data = 0;
284
285 switch (cmd->flow_type) {
286 case TCP_V4_FLOW:
287 case TCP_V6_FLOW:
288 case UDP_V4_FLOW:
289 case UDP_V6_FLOW:
290 if (priv->keygen_in_use)
291 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
292 fallthrough;
293 case IPV4_FLOW:
294 case IPV6_FLOW:
295 case SCTP_V4_FLOW:
296 case SCTP_V6_FLOW:
297 case AH_ESP_V4_FLOW:
298 case AH_ESP_V6_FLOW:
299 case AH_V4_FLOW:
300 case AH_V6_FLOW:
301 case ESP_V4_FLOW:
302 case ESP_V6_FLOW:
303 if (priv->keygen_in_use)
304 cmd->data |= RXH_IP_SRC | RXH_IP_DST;
305 break;
306 default:
307 cmd->data = 0;
308 break;
309 }
310
311 return 0;
312 }
313
dpaa_get_rxnfc(struct net_device * dev,struct ethtool_rxnfc * cmd,u32 * unused)314 static int dpaa_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
315 u32 *unused)
316 {
317 int ret = -EOPNOTSUPP;
318
319 switch (cmd->cmd) {
320 case ETHTOOL_GRXFH:
321 ret = dpaa_get_hash_opts(dev, cmd);
322 break;
323 default:
324 break;
325 }
326
327 return ret;
328 }
329
dpaa_set_hash(struct net_device * net_dev,bool enable)330 static void dpaa_set_hash(struct net_device *net_dev, bool enable)
331 {
332 struct mac_device *mac_dev;
333 struct fman_port *rxport;
334 struct dpaa_priv *priv;
335
336 priv = netdev_priv(net_dev);
337 mac_dev = priv->mac_dev;
338 rxport = mac_dev->port[0];
339
340 fman_port_use_kg_hash(rxport, enable);
341 priv->keygen_in_use = enable;
342 }
343
dpaa_set_hash_opts(struct net_device * dev,struct ethtool_rxnfc * nfc)344 static int dpaa_set_hash_opts(struct net_device *dev,
345 struct ethtool_rxnfc *nfc)
346 {
347 int ret = -EINVAL;
348
349 /* we support hashing on IPv4/v6 src/dest IP and L4 src/dest port */
350 if (nfc->data &
351 ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3))
352 return -EINVAL;
353
354 switch (nfc->flow_type) {
355 case TCP_V4_FLOW:
356 case TCP_V6_FLOW:
357 case UDP_V4_FLOW:
358 case UDP_V6_FLOW:
359 case IPV4_FLOW:
360 case IPV6_FLOW:
361 case SCTP_V4_FLOW:
362 case SCTP_V6_FLOW:
363 case AH_ESP_V4_FLOW:
364 case AH_ESP_V6_FLOW:
365 case AH_V4_FLOW:
366 case AH_V6_FLOW:
367 case ESP_V4_FLOW:
368 case ESP_V6_FLOW:
369 dpaa_set_hash(dev, !!nfc->data);
370 ret = 0;
371 break;
372 default:
373 break;
374 }
375
376 return ret;
377 }
378
dpaa_set_rxnfc(struct net_device * dev,struct ethtool_rxnfc * cmd)379 static int dpaa_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
380 {
381 int ret = -EOPNOTSUPP;
382
383 switch (cmd->cmd) {
384 case ETHTOOL_SRXFH:
385 ret = dpaa_set_hash_opts(dev, cmd);
386 break;
387 default:
388 break;
389 }
390
391 return ret;
392 }
393
dpaa_get_ts_info(struct net_device * net_dev,struct ethtool_ts_info * info)394 static int dpaa_get_ts_info(struct net_device *net_dev,
395 struct ethtool_ts_info *info)
396 {
397 struct device *dev = net_dev->dev.parent;
398 struct device_node *mac_node = dev->of_node;
399 struct device_node *fman_node = NULL, *ptp_node = NULL;
400 struct platform_device *ptp_dev = NULL;
401 struct ptp_qoriq *ptp = NULL;
402
403 info->phc_index = -1;
404
405 fman_node = of_get_parent(mac_node);
406 if (fman_node) {
407 ptp_node = of_parse_phandle(fman_node, "ptimer-handle", 0);
408 of_node_put(fman_node);
409 }
410
411 if (ptp_node) {
412 ptp_dev = of_find_device_by_node(ptp_node);
413 of_node_put(ptp_node);
414 }
415
416 if (ptp_dev)
417 ptp = platform_get_drvdata(ptp_dev);
418
419 if (ptp)
420 info->phc_index = ptp->phc_index;
421
422 info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
423 SOF_TIMESTAMPING_RX_HARDWARE |
424 SOF_TIMESTAMPING_RAW_HARDWARE;
425 info->tx_types = (1 << HWTSTAMP_TX_OFF) |
426 (1 << HWTSTAMP_TX_ON);
427 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
428 (1 << HWTSTAMP_FILTER_ALL);
429
430 return 0;
431 }
432
dpaa_get_coalesce(struct net_device * dev,struct ethtool_coalesce * c,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)433 static int dpaa_get_coalesce(struct net_device *dev,
434 struct ethtool_coalesce *c,
435 struct kernel_ethtool_coalesce *kernel_coal,
436 struct netlink_ext_ack *extack)
437 {
438 struct qman_portal *portal;
439 u32 period;
440 u8 thresh;
441
442 portal = qman_get_affine_portal(smp_processor_id());
443 qman_portal_get_iperiod(portal, &period);
444 qman_dqrr_get_ithresh(portal, &thresh);
445
446 c->rx_coalesce_usecs = period;
447 c->rx_max_coalesced_frames = thresh;
448
449 return 0;
450 }
451
dpaa_set_coalesce(struct net_device * dev,struct ethtool_coalesce * c,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)452 static int dpaa_set_coalesce(struct net_device *dev,
453 struct ethtool_coalesce *c,
454 struct kernel_ethtool_coalesce *kernel_coal,
455 struct netlink_ext_ack *extack)
456 {
457 const cpumask_t *cpus = qman_affine_cpus();
458 bool needs_revert[NR_CPUS] = {false};
459 struct qman_portal *portal;
460 u32 period, prev_period;
461 u8 thresh, prev_thresh;
462 int cpu, res;
463
464 period = c->rx_coalesce_usecs;
465 thresh = c->rx_max_coalesced_frames;
466
467 /* save previous values */
468 portal = qman_get_affine_portal(smp_processor_id());
469 qman_portal_get_iperiod(portal, &prev_period);
470 qman_dqrr_get_ithresh(portal, &prev_thresh);
471
472 /* set new values */
473 for_each_cpu_and(cpu, cpus, cpu_online_mask) {
474 portal = qman_get_affine_portal(cpu);
475 res = qman_portal_set_iperiod(portal, period);
476 if (res)
477 goto revert_values;
478 res = qman_dqrr_set_ithresh(portal, thresh);
479 if (res) {
480 qman_portal_set_iperiod(portal, prev_period);
481 goto revert_values;
482 }
483 needs_revert[cpu] = true;
484 }
485
486 return 0;
487
488 revert_values:
489 /* restore previous values */
490 for_each_cpu_and(cpu, cpus, cpu_online_mask) {
491 if (!needs_revert[cpu])
492 continue;
493 portal = qman_get_affine_portal(cpu);
494 /* previous values will not fail, ignore return value */
495 qman_portal_set_iperiod(portal, prev_period);
496 qman_dqrr_set_ithresh(portal, prev_thresh);
497 }
498
499 return res;
500 }
501
502 const struct ethtool_ops dpaa_ethtool_ops = {
503 .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
504 ETHTOOL_COALESCE_RX_MAX_FRAMES,
505 .get_drvinfo = dpaa_get_drvinfo,
506 .get_msglevel = dpaa_get_msglevel,
507 .set_msglevel = dpaa_set_msglevel,
508 .nway_reset = dpaa_nway_reset,
509 .get_pauseparam = dpaa_get_pauseparam,
510 .set_pauseparam = dpaa_set_pauseparam,
511 .get_link = ethtool_op_get_link,
512 .get_sset_count = dpaa_get_sset_count,
513 .get_ethtool_stats = dpaa_get_ethtool_stats,
514 .get_strings = dpaa_get_strings,
515 .get_link_ksettings = dpaa_get_link_ksettings,
516 .set_link_ksettings = dpaa_set_link_ksettings,
517 .get_rxnfc = dpaa_get_rxnfc,
518 .set_rxnfc = dpaa_set_rxnfc,
519 .get_ts_info = dpaa_get_ts_info,
520 .get_coalesce = dpaa_get_coalesce,
521 .set_coalesce = dpaa_set_coalesce,
522 };
523