1 /*
2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/kernel.h>
34 #include <linux/module.h>
35 #include <linux/random.h>
36 #include <linux/vmalloc.h>
37 #include <linux/hardirq.h>
38 #include <linux/mlx5/driver.h>
39 #include <linux/kern_levels.h>
40 #include "mlx5_core.h"
41 #include "lib/eq.h"
42 #include "lib/mlx5.h"
43 #include "lib/pci_vsc.h"
44 #include "lib/tout.h"
45 #include "diag/fw_tracer.h"
46
47 enum {
48 MAX_MISSES = 3,
49 };
50
51 enum {
52 MLX5_HEALTH_SYNDR_FW_ERR = 0x1,
53 MLX5_HEALTH_SYNDR_IRISC_ERR = 0x7,
54 MLX5_HEALTH_SYNDR_HW_UNRECOVERABLE_ERR = 0x8,
55 MLX5_HEALTH_SYNDR_CRC_ERR = 0x9,
56 MLX5_HEALTH_SYNDR_FETCH_PCI_ERR = 0xa,
57 MLX5_HEALTH_SYNDR_HW_FTL_ERR = 0xb,
58 MLX5_HEALTH_SYNDR_ASYNC_EQ_OVERRUN_ERR = 0xc,
59 MLX5_HEALTH_SYNDR_EQ_ERR = 0xd,
60 MLX5_HEALTH_SYNDR_EQ_INV = 0xe,
61 MLX5_HEALTH_SYNDR_FFSER_ERR = 0xf,
62 MLX5_HEALTH_SYNDR_HIGH_TEMP = 0x10
63 };
64
65 enum {
66 MLX5_DROP_NEW_HEALTH_WORK,
67 };
68
69 enum {
70 MLX5_SENSOR_NO_ERR = 0,
71 MLX5_SENSOR_PCI_COMM_ERR = 1,
72 MLX5_SENSOR_PCI_ERR = 2,
73 MLX5_SENSOR_NIC_DISABLED = 3,
74 MLX5_SENSOR_NIC_SW_RESET = 4,
75 MLX5_SENSOR_FW_SYND_RFR = 5,
76 };
77
78 enum {
79 MLX5_SEVERITY_MASK = 0x7,
80 MLX5_SEVERITY_VALID_MASK = 0x8,
81 };
82
mlx5_get_nic_state(struct mlx5_core_dev * dev)83 u8 mlx5_get_nic_state(struct mlx5_core_dev *dev)
84 {
85 return (ioread32be(&dev->iseg->cmdq_addr_l_sz) >> 8) & 7;
86 }
87
mlx5_set_nic_state(struct mlx5_core_dev * dev,u8 state)88 void mlx5_set_nic_state(struct mlx5_core_dev *dev, u8 state)
89 {
90 u32 cur_cmdq_addr_l_sz;
91
92 cur_cmdq_addr_l_sz = ioread32be(&dev->iseg->cmdq_addr_l_sz);
93 iowrite32be((cur_cmdq_addr_l_sz & 0xFFFFF000) |
94 state << MLX5_NIC_IFC_OFFSET,
95 &dev->iseg->cmdq_addr_l_sz);
96 }
97
sensor_pci_not_working(struct mlx5_core_dev * dev)98 static bool sensor_pci_not_working(struct mlx5_core_dev *dev)
99 {
100 struct mlx5_core_health *health = &dev->priv.health;
101 struct health_buffer __iomem *h = health->health;
102
103 /* Offline PCI reads return 0xffffffff */
104 return (ioread32be(&h->fw_ver) == 0xffffffff);
105 }
106
mlx5_health_get_rfr(u8 rfr_severity)107 static int mlx5_health_get_rfr(u8 rfr_severity)
108 {
109 return rfr_severity >> MLX5_RFR_BIT_OFFSET;
110 }
111
sensor_fw_synd_rfr(struct mlx5_core_dev * dev)112 static bool sensor_fw_synd_rfr(struct mlx5_core_dev *dev)
113 {
114 struct mlx5_core_health *health = &dev->priv.health;
115 struct health_buffer __iomem *h = health->health;
116 u8 synd = ioread8(&h->synd);
117 u8 rfr;
118
119 rfr = mlx5_health_get_rfr(ioread8(&h->rfr_severity));
120
121 if (rfr && synd)
122 mlx5_core_dbg(dev, "FW requests reset, synd: %d\n", synd);
123 return rfr && synd;
124 }
125
mlx5_health_check_fatal_sensors(struct mlx5_core_dev * dev)126 u32 mlx5_health_check_fatal_sensors(struct mlx5_core_dev *dev)
127 {
128 if (sensor_pci_not_working(dev))
129 return MLX5_SENSOR_PCI_COMM_ERR;
130 if (pci_channel_offline(dev->pdev))
131 return MLX5_SENSOR_PCI_ERR;
132 if (mlx5_get_nic_state(dev) == MLX5_NIC_IFC_DISABLED)
133 return MLX5_SENSOR_NIC_DISABLED;
134 if (mlx5_get_nic_state(dev) == MLX5_NIC_IFC_SW_RESET)
135 return MLX5_SENSOR_NIC_SW_RESET;
136 if (sensor_fw_synd_rfr(dev))
137 return MLX5_SENSOR_FW_SYND_RFR;
138
139 return MLX5_SENSOR_NO_ERR;
140 }
141
lock_sem_sw_reset(struct mlx5_core_dev * dev,bool lock)142 static int lock_sem_sw_reset(struct mlx5_core_dev *dev, bool lock)
143 {
144 enum mlx5_vsc_state state;
145 int ret;
146
147 if (!mlx5_core_is_pf(dev))
148 return -EBUSY;
149
150 /* Try to lock GW access, this stage doesn't return
151 * EBUSY because locked GW does not mean that other PF
152 * already started the reset.
153 */
154 ret = mlx5_vsc_gw_lock(dev);
155 if (ret == -EBUSY)
156 return -EINVAL;
157 if (ret)
158 return ret;
159
160 state = lock ? MLX5_VSC_LOCK : MLX5_VSC_UNLOCK;
161 /* At this stage, if the return status == EBUSY, then we know
162 * for sure that another PF started the reset, so don't allow
163 * another reset.
164 */
165 ret = mlx5_vsc_sem_set_space(dev, MLX5_SEMAPHORE_SW_RESET, state);
166 if (ret)
167 mlx5_core_warn(dev, "Failed to lock SW reset semaphore\n");
168
169 /* Unlock GW access */
170 mlx5_vsc_gw_unlock(dev);
171
172 return ret;
173 }
174
reset_fw_if_needed(struct mlx5_core_dev * dev)175 static bool reset_fw_if_needed(struct mlx5_core_dev *dev)
176 {
177 bool supported = (ioread32be(&dev->iseg->initializing) >>
178 MLX5_FW_RESET_SUPPORTED_OFFSET) & 1;
179 u32 fatal_error;
180
181 if (!supported)
182 return false;
183
184 /* The reset only needs to be issued by one PF. The health buffer is
185 * shared between all functions, and will be cleared during a reset.
186 * Check again to avoid a redundant 2nd reset. If the fatal errors was
187 * PCI related a reset won't help.
188 */
189 fatal_error = mlx5_health_check_fatal_sensors(dev);
190 if (fatal_error == MLX5_SENSOR_PCI_COMM_ERR ||
191 fatal_error == MLX5_SENSOR_NIC_DISABLED ||
192 fatal_error == MLX5_SENSOR_NIC_SW_RESET) {
193 mlx5_core_warn(dev, "Not issuing FW reset. Either it's already done or won't help.");
194 return false;
195 }
196
197 mlx5_core_warn(dev, "Issuing FW Reset\n");
198 /* Write the NIC interface field to initiate the reset, the command
199 * interface address also resides here, don't overwrite it.
200 */
201 mlx5_set_nic_state(dev, MLX5_NIC_IFC_SW_RESET);
202
203 return true;
204 }
205
enter_error_state(struct mlx5_core_dev * dev,bool force)206 static void enter_error_state(struct mlx5_core_dev *dev, bool force)
207 {
208 if (mlx5_health_check_fatal_sensors(dev) || force) { /* protected state setting */
209 dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
210 mlx5_cmd_flush(dev);
211 }
212
213 mlx5_notifier_call_chain(dev->priv.events, MLX5_DEV_EVENT_SYS_ERROR, (void *)1);
214 }
215
mlx5_enter_error_state(struct mlx5_core_dev * dev,bool force)216 void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force)
217 {
218 bool err_detected = false;
219
220 /* Mark the device as fatal in order to abort FW commands */
221 if ((mlx5_health_check_fatal_sensors(dev) || force) &&
222 dev->state == MLX5_DEVICE_STATE_UP) {
223 dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
224 err_detected = true;
225 }
226 mutex_lock(&dev->intf_state_mutex);
227 if (!err_detected && dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
228 goto unlock;/* a previous error is still being handled */
229
230 enter_error_state(dev, force);
231 unlock:
232 mutex_unlock(&dev->intf_state_mutex);
233 }
234
mlx5_error_sw_reset(struct mlx5_core_dev * dev)235 void mlx5_error_sw_reset(struct mlx5_core_dev *dev)
236 {
237 unsigned long end, delay_ms = mlx5_tout_ms(dev, PCI_TOGGLE);
238 int lock = -EBUSY;
239
240 mutex_lock(&dev->intf_state_mutex);
241 if (dev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR)
242 goto unlock;
243
244 mlx5_core_err(dev, "start\n");
245
246 if (mlx5_health_check_fatal_sensors(dev) == MLX5_SENSOR_FW_SYND_RFR) {
247 /* Get cr-dump and reset FW semaphore */
248 lock = lock_sem_sw_reset(dev, true);
249
250 if (lock == -EBUSY) {
251 delay_ms = mlx5_tout_ms(dev, FULL_CRDUMP);
252 goto recover_from_sw_reset;
253 }
254 /* Execute SW reset */
255 reset_fw_if_needed(dev);
256 }
257
258 recover_from_sw_reset:
259 /* Recover from SW reset */
260 end = jiffies + msecs_to_jiffies(delay_ms);
261 do {
262 if (mlx5_get_nic_state(dev) == MLX5_NIC_IFC_DISABLED)
263 break;
264
265 msleep(20);
266 } while (!time_after(jiffies, end));
267
268 if (mlx5_get_nic_state(dev) != MLX5_NIC_IFC_DISABLED) {
269 dev_err(&dev->pdev->dev, "NIC IFC still %d after %lums.\n",
270 mlx5_get_nic_state(dev), delay_ms);
271 }
272
273 /* Release FW semaphore if you are the lock owner */
274 if (!lock)
275 lock_sem_sw_reset(dev, false);
276
277 mlx5_core_err(dev, "end\n");
278
279 unlock:
280 mutex_unlock(&dev->intf_state_mutex);
281 }
282
mlx5_handle_bad_state(struct mlx5_core_dev * dev)283 static void mlx5_handle_bad_state(struct mlx5_core_dev *dev)
284 {
285 u8 nic_interface = mlx5_get_nic_state(dev);
286
287 switch (nic_interface) {
288 case MLX5_NIC_IFC_FULL:
289 mlx5_core_warn(dev, "Expected to see disabled NIC but it is full driver\n");
290 break;
291
292 case MLX5_NIC_IFC_DISABLED:
293 mlx5_core_warn(dev, "starting teardown\n");
294 break;
295
296 case MLX5_NIC_IFC_NO_DRAM_NIC:
297 mlx5_core_warn(dev, "Expected to see disabled NIC but it is no dram nic\n");
298 break;
299
300 case MLX5_NIC_IFC_SW_RESET:
301 /* The IFC mode field is 3 bits, so it will read 0x7 in 2 cases:
302 * 1. PCI has been disabled (ie. PCI-AER, PF driver unloaded
303 * and this is a VF), this is not recoverable by SW reset.
304 * Logging of this is handled elsewhere.
305 * 2. FW reset has been issued by another function, driver can
306 * be reloaded to recover after the mode switches to
307 * MLX5_NIC_IFC_DISABLED.
308 */
309 if (dev->priv.health.fatal_error != MLX5_SENSOR_PCI_COMM_ERR)
310 mlx5_core_warn(dev, "NIC SW reset in progress\n");
311 break;
312
313 default:
314 mlx5_core_warn(dev, "Expected to see disabled NIC but it is has invalid value %d\n",
315 nic_interface);
316 }
317
318 mlx5_disable_device(dev);
319 }
320
mlx5_health_wait_pci_up(struct mlx5_core_dev * dev)321 int mlx5_health_wait_pci_up(struct mlx5_core_dev *dev)
322 {
323 unsigned long end;
324
325 end = jiffies + msecs_to_jiffies(mlx5_tout_ms(dev, FW_RESET));
326 while (sensor_pci_not_working(dev)) {
327 if (time_after(jiffies, end))
328 return -ETIMEDOUT;
329 msleep(100);
330 }
331 return 0;
332 }
333
mlx5_health_try_recover(struct mlx5_core_dev * dev)334 static int mlx5_health_try_recover(struct mlx5_core_dev *dev)
335 {
336 mlx5_core_warn(dev, "handling bad device here\n");
337 mlx5_handle_bad_state(dev);
338 if (mlx5_health_wait_pci_up(dev)) {
339 mlx5_core_err(dev, "health recovery flow aborted, PCI reads still not working\n");
340 return -EIO;
341 }
342 mlx5_core_err(dev, "starting health recovery flow\n");
343 if (mlx5_recover_device(dev) || mlx5_health_check_fatal_sensors(dev)) {
344 mlx5_core_err(dev, "health recovery failed\n");
345 return -EIO;
346 }
347
348 mlx5_core_info(dev, "health recovery succeeded\n");
349 return 0;
350 }
351
hsynd_str(u8 synd)352 static const char *hsynd_str(u8 synd)
353 {
354 switch (synd) {
355 case MLX5_HEALTH_SYNDR_FW_ERR:
356 return "firmware internal error";
357 case MLX5_HEALTH_SYNDR_IRISC_ERR:
358 return "irisc not responding";
359 case MLX5_HEALTH_SYNDR_HW_UNRECOVERABLE_ERR:
360 return "unrecoverable hardware error";
361 case MLX5_HEALTH_SYNDR_CRC_ERR:
362 return "firmware CRC error";
363 case MLX5_HEALTH_SYNDR_FETCH_PCI_ERR:
364 return "ICM fetch PCI error";
365 case MLX5_HEALTH_SYNDR_HW_FTL_ERR:
366 return "HW fatal error\n";
367 case MLX5_HEALTH_SYNDR_ASYNC_EQ_OVERRUN_ERR:
368 return "async EQ buffer overrun";
369 case MLX5_HEALTH_SYNDR_EQ_ERR:
370 return "EQ error";
371 case MLX5_HEALTH_SYNDR_EQ_INV:
372 return "Invalid EQ referenced";
373 case MLX5_HEALTH_SYNDR_FFSER_ERR:
374 return "FFSER error";
375 case MLX5_HEALTH_SYNDR_HIGH_TEMP:
376 return "High temperature";
377 default:
378 return "unrecognized error";
379 }
380 }
381
mlx5_loglevel_str(int level)382 static const char *mlx5_loglevel_str(int level)
383 {
384 switch (level) {
385 case LOGLEVEL_EMERG:
386 return "EMERGENCY";
387 case LOGLEVEL_ALERT:
388 return "ALERT";
389 case LOGLEVEL_CRIT:
390 return "CRITICAL";
391 case LOGLEVEL_ERR:
392 return "ERROR";
393 case LOGLEVEL_WARNING:
394 return "WARNING";
395 case LOGLEVEL_NOTICE:
396 return "NOTICE";
397 case LOGLEVEL_INFO:
398 return "INFO";
399 case LOGLEVEL_DEBUG:
400 return "DEBUG";
401 }
402 return "Unknown log level";
403 }
404
mlx5_health_get_severity(u8 rfr_severity)405 static int mlx5_health_get_severity(u8 rfr_severity)
406 {
407 return rfr_severity & MLX5_SEVERITY_VALID_MASK ?
408 rfr_severity & MLX5_SEVERITY_MASK : LOGLEVEL_ERR;
409 }
410
print_health_info(struct mlx5_core_dev * dev)411 static void print_health_info(struct mlx5_core_dev *dev)
412 {
413 struct mlx5_core_health *health = &dev->priv.health;
414 struct health_buffer __iomem *h = health->health;
415 u8 rfr_severity;
416 int severity;
417 int i;
418
419 /* If the syndrome is 0, the device is OK and no need to print buffer */
420 if (!ioread8(&h->synd))
421 return;
422
423 rfr_severity = ioread8(&h->rfr_severity);
424 severity = mlx5_health_get_severity(rfr_severity);
425 mlx5_log(dev, severity, "Health issue observed, %s, severity(%d) %s:\n",
426 hsynd_str(ioread8(&h->synd)), severity, mlx5_loglevel_str(severity));
427
428 for (i = 0; i < ARRAY_SIZE(h->assert_var); i++)
429 mlx5_log(dev, severity, "assert_var[%d] 0x%08x\n", i,
430 ioread32be(h->assert_var + i));
431
432 mlx5_log(dev, severity, "assert_exit_ptr 0x%08x\n", ioread32be(&h->assert_exit_ptr));
433 mlx5_log(dev, severity, "assert_callra 0x%08x\n", ioread32be(&h->assert_callra));
434 mlx5_log(dev, severity, "fw_ver %d.%d.%d", fw_rev_maj(dev), fw_rev_min(dev),
435 fw_rev_sub(dev));
436 mlx5_log(dev, severity, "time %u\n", ioread32be(&h->time));
437 mlx5_log(dev, severity, "hw_id 0x%08x\n", ioread32be(&h->hw_id));
438 mlx5_log(dev, severity, "rfr %d\n", mlx5_health_get_rfr(rfr_severity));
439 mlx5_log(dev, severity, "severity %d (%s)\n", severity, mlx5_loglevel_str(severity));
440 mlx5_log(dev, severity, "irisc_index %d\n", ioread8(&h->irisc_index));
441 mlx5_log(dev, severity, "synd 0x%x: %s\n", ioread8(&h->synd),
442 hsynd_str(ioread8(&h->synd)));
443 mlx5_log(dev, severity, "ext_synd 0x%04x\n", ioread16be(&h->ext_synd));
444 mlx5_log(dev, severity, "raw fw_ver 0x%08x\n", ioread32be(&h->fw_ver));
445 }
446
447 static int
mlx5_fw_reporter_diagnose(struct devlink_health_reporter * reporter,struct devlink_fmsg * fmsg,struct netlink_ext_ack * extack)448 mlx5_fw_reporter_diagnose(struct devlink_health_reporter *reporter,
449 struct devlink_fmsg *fmsg,
450 struct netlink_ext_ack *extack)
451 {
452 struct mlx5_core_dev *dev = devlink_health_reporter_priv(reporter);
453 struct mlx5_core_health *health = &dev->priv.health;
454 struct health_buffer __iomem *h = health->health;
455 u8 synd;
456 int err;
457
458 synd = ioread8(&h->synd);
459 err = devlink_fmsg_u8_pair_put(fmsg, "Syndrome", synd);
460 if (err || !synd)
461 return err;
462 return devlink_fmsg_string_pair_put(fmsg, "Description", hsynd_str(synd));
463 }
464
465 struct mlx5_fw_reporter_ctx {
466 u8 err_synd;
467 int miss_counter;
468 };
469
470 static int
mlx5_fw_reporter_ctx_pairs_put(struct devlink_fmsg * fmsg,struct mlx5_fw_reporter_ctx * fw_reporter_ctx)471 mlx5_fw_reporter_ctx_pairs_put(struct devlink_fmsg *fmsg,
472 struct mlx5_fw_reporter_ctx *fw_reporter_ctx)
473 {
474 int err;
475
476 err = devlink_fmsg_u8_pair_put(fmsg, "syndrome",
477 fw_reporter_ctx->err_synd);
478 if (err)
479 return err;
480 err = devlink_fmsg_u32_pair_put(fmsg, "fw_miss_counter",
481 fw_reporter_ctx->miss_counter);
482 if (err)
483 return err;
484 return 0;
485 }
486
487 static int
mlx5_fw_reporter_heath_buffer_data_put(struct mlx5_core_dev * dev,struct devlink_fmsg * fmsg)488 mlx5_fw_reporter_heath_buffer_data_put(struct mlx5_core_dev *dev,
489 struct devlink_fmsg *fmsg)
490 {
491 struct mlx5_core_health *health = &dev->priv.health;
492 struct health_buffer __iomem *h = health->health;
493 u8 rfr_severity;
494 int err;
495 int i;
496
497 if (!ioread8(&h->synd))
498 return 0;
499
500 err = devlink_fmsg_pair_nest_start(fmsg, "health buffer");
501 if (err)
502 return err;
503 err = devlink_fmsg_obj_nest_start(fmsg);
504 if (err)
505 return err;
506 err = devlink_fmsg_arr_pair_nest_start(fmsg, "assert_var");
507 if (err)
508 return err;
509
510 for (i = 0; i < ARRAY_SIZE(h->assert_var); i++) {
511 err = devlink_fmsg_u32_put(fmsg, ioread32be(h->assert_var + i));
512 if (err)
513 return err;
514 }
515 err = devlink_fmsg_arr_pair_nest_end(fmsg);
516 if (err)
517 return err;
518 err = devlink_fmsg_u32_pair_put(fmsg, "assert_exit_ptr",
519 ioread32be(&h->assert_exit_ptr));
520 if (err)
521 return err;
522 err = devlink_fmsg_u32_pair_put(fmsg, "assert_callra",
523 ioread32be(&h->assert_callra));
524 if (err)
525 return err;
526 err = devlink_fmsg_u32_pair_put(fmsg, "time", ioread32be(&h->time));
527 if (err)
528 return err;
529 err = devlink_fmsg_u32_pair_put(fmsg, "hw_id", ioread32be(&h->hw_id));
530 if (err)
531 return err;
532 rfr_severity = ioread8(&h->rfr_severity);
533 err = devlink_fmsg_u8_pair_put(fmsg, "rfr", mlx5_health_get_rfr(rfr_severity));
534 if (err)
535 return err;
536 err = devlink_fmsg_u8_pair_put(fmsg, "severity", mlx5_health_get_severity(rfr_severity));
537 if (err)
538 return err;
539 err = devlink_fmsg_u8_pair_put(fmsg, "irisc_index",
540 ioread8(&h->irisc_index));
541 if (err)
542 return err;
543 err = devlink_fmsg_u8_pair_put(fmsg, "synd", ioread8(&h->synd));
544 if (err)
545 return err;
546 err = devlink_fmsg_u32_pair_put(fmsg, "ext_synd",
547 ioread16be(&h->ext_synd));
548 if (err)
549 return err;
550 err = devlink_fmsg_u32_pair_put(fmsg, "raw_fw_ver",
551 ioread32be(&h->fw_ver));
552 if (err)
553 return err;
554 err = devlink_fmsg_obj_nest_end(fmsg);
555 if (err)
556 return err;
557 return devlink_fmsg_pair_nest_end(fmsg);
558 }
559
560 static int
mlx5_fw_reporter_dump(struct devlink_health_reporter * reporter,struct devlink_fmsg * fmsg,void * priv_ctx,struct netlink_ext_ack * extack)561 mlx5_fw_reporter_dump(struct devlink_health_reporter *reporter,
562 struct devlink_fmsg *fmsg, void *priv_ctx,
563 struct netlink_ext_ack *extack)
564 {
565 struct mlx5_core_dev *dev = devlink_health_reporter_priv(reporter);
566 int err;
567
568 err = mlx5_fw_tracer_trigger_core_dump_general(dev);
569 if (err)
570 return err;
571
572 if (priv_ctx) {
573 struct mlx5_fw_reporter_ctx *fw_reporter_ctx = priv_ctx;
574
575 err = mlx5_fw_reporter_ctx_pairs_put(fmsg, fw_reporter_ctx);
576 if (err)
577 return err;
578 }
579
580 err = mlx5_fw_reporter_heath_buffer_data_put(dev, fmsg);
581 if (err)
582 return err;
583 return mlx5_fw_tracer_get_saved_traces_objects(dev->tracer, fmsg);
584 }
585
mlx5_fw_reporter_err_work(struct work_struct * work)586 static void mlx5_fw_reporter_err_work(struct work_struct *work)
587 {
588 struct mlx5_fw_reporter_ctx fw_reporter_ctx;
589 struct mlx5_core_health *health;
590
591 health = container_of(work, struct mlx5_core_health, report_work);
592
593 if (IS_ERR_OR_NULL(health->fw_reporter))
594 return;
595
596 fw_reporter_ctx.err_synd = health->synd;
597 fw_reporter_ctx.miss_counter = health->miss_counter;
598 if (fw_reporter_ctx.err_synd) {
599 devlink_health_report(health->fw_reporter,
600 "FW syndrom reported", &fw_reporter_ctx);
601 return;
602 }
603 if (fw_reporter_ctx.miss_counter)
604 devlink_health_report(health->fw_reporter,
605 "FW miss counter reported",
606 &fw_reporter_ctx);
607 }
608
609 static const struct devlink_health_reporter_ops mlx5_fw_reporter_ops = {
610 .name = "fw",
611 .diagnose = mlx5_fw_reporter_diagnose,
612 .dump = mlx5_fw_reporter_dump,
613 };
614
615 static int
mlx5_fw_fatal_reporter_recover(struct devlink_health_reporter * reporter,void * priv_ctx,struct netlink_ext_ack * extack)616 mlx5_fw_fatal_reporter_recover(struct devlink_health_reporter *reporter,
617 void *priv_ctx,
618 struct netlink_ext_ack *extack)
619 {
620 struct mlx5_core_dev *dev = devlink_health_reporter_priv(reporter);
621
622 return mlx5_health_try_recover(dev);
623 }
624
625 static int
mlx5_fw_fatal_reporter_dump(struct devlink_health_reporter * reporter,struct devlink_fmsg * fmsg,void * priv_ctx,struct netlink_ext_ack * extack)626 mlx5_fw_fatal_reporter_dump(struct devlink_health_reporter *reporter,
627 struct devlink_fmsg *fmsg, void *priv_ctx,
628 struct netlink_ext_ack *extack)
629 {
630 struct mlx5_core_dev *dev = devlink_health_reporter_priv(reporter);
631 u32 crdump_size = dev->priv.health.crdump_size;
632 u32 *cr_data;
633 int err;
634
635 if (!mlx5_core_is_pf(dev))
636 return -EPERM;
637
638 cr_data = kvmalloc(crdump_size, GFP_KERNEL);
639 if (!cr_data)
640 return -ENOMEM;
641 err = mlx5_crdump_collect(dev, cr_data);
642 if (err)
643 goto free_data;
644
645 if (priv_ctx) {
646 struct mlx5_fw_reporter_ctx *fw_reporter_ctx = priv_ctx;
647
648 err = mlx5_fw_reporter_ctx_pairs_put(fmsg, fw_reporter_ctx);
649 if (err)
650 goto free_data;
651 }
652
653 err = devlink_fmsg_binary_pair_put(fmsg, "crdump_data", cr_data, crdump_size);
654
655 free_data:
656 kvfree(cr_data);
657 return err;
658 }
659
mlx5_fw_fatal_reporter_err_work(struct work_struct * work)660 static void mlx5_fw_fatal_reporter_err_work(struct work_struct *work)
661 {
662 struct mlx5_fw_reporter_ctx fw_reporter_ctx;
663 struct mlx5_core_health *health;
664 struct mlx5_core_dev *dev;
665 struct mlx5_priv *priv;
666
667 health = container_of(work, struct mlx5_core_health, fatal_report_work);
668 priv = container_of(health, struct mlx5_priv, health);
669 dev = container_of(priv, struct mlx5_core_dev, priv);
670
671 enter_error_state(dev, false);
672 if (IS_ERR_OR_NULL(health->fw_fatal_reporter)) {
673 if (mlx5_health_try_recover(dev))
674 mlx5_core_err(dev, "health recovery failed\n");
675 return;
676 }
677 fw_reporter_ctx.err_synd = health->synd;
678 fw_reporter_ctx.miss_counter = health->miss_counter;
679 if (devlink_health_report(health->fw_fatal_reporter,
680 "FW fatal error reported", &fw_reporter_ctx) == -ECANCELED) {
681 /* If recovery wasn't performed, due to grace period,
682 * unload the driver. This ensures that the driver
683 * closes all its resources and it is not subjected to
684 * requests from the kernel.
685 */
686 mlx5_core_err(dev, "Driver is in error state. Unloading\n");
687 mlx5_unload_one(dev);
688 }
689 }
690
691 static const struct devlink_health_reporter_ops mlx5_fw_fatal_reporter_ops = {
692 .name = "fw_fatal",
693 .recover = mlx5_fw_fatal_reporter_recover,
694 .dump = mlx5_fw_fatal_reporter_dump,
695 };
696
697 #define MLX5_REPORTER_FW_GRACEFUL_PERIOD 1200000
mlx5_fw_reporters_create(struct mlx5_core_dev * dev)698 static void mlx5_fw_reporters_create(struct mlx5_core_dev *dev)
699 {
700 struct mlx5_core_health *health = &dev->priv.health;
701 struct devlink *devlink = priv_to_devlink(dev);
702
703 health->fw_reporter =
704 devlink_health_reporter_create(devlink, &mlx5_fw_reporter_ops,
705 0, dev);
706 if (IS_ERR(health->fw_reporter))
707 mlx5_core_warn(dev, "Failed to create fw reporter, err = %ld\n",
708 PTR_ERR(health->fw_reporter));
709
710 health->fw_fatal_reporter =
711 devlink_health_reporter_create(devlink,
712 &mlx5_fw_fatal_reporter_ops,
713 MLX5_REPORTER_FW_GRACEFUL_PERIOD,
714 dev);
715 if (IS_ERR(health->fw_fatal_reporter))
716 mlx5_core_warn(dev, "Failed to create fw fatal reporter, err = %ld\n",
717 PTR_ERR(health->fw_fatal_reporter));
718 }
719
mlx5_fw_reporters_destroy(struct mlx5_core_dev * dev)720 static void mlx5_fw_reporters_destroy(struct mlx5_core_dev *dev)
721 {
722 struct mlx5_core_health *health = &dev->priv.health;
723
724 if (!IS_ERR_OR_NULL(health->fw_reporter))
725 devlink_health_reporter_destroy(health->fw_reporter);
726
727 if (!IS_ERR_OR_NULL(health->fw_fatal_reporter))
728 devlink_health_reporter_destroy(health->fw_fatal_reporter);
729 }
730
get_next_poll_jiffies(struct mlx5_core_dev * dev)731 static unsigned long get_next_poll_jiffies(struct mlx5_core_dev *dev)
732 {
733 unsigned long next;
734
735 get_random_bytes(&next, sizeof(next));
736 next %= HZ;
737 next += jiffies + msecs_to_jiffies(mlx5_tout_ms(dev, HEALTH_POLL_INTERVAL));
738
739 return next;
740 }
741
mlx5_trigger_health_work(struct mlx5_core_dev * dev)742 void mlx5_trigger_health_work(struct mlx5_core_dev *dev)
743 {
744 struct mlx5_core_health *health = &dev->priv.health;
745 unsigned long flags;
746
747 spin_lock_irqsave(&health->wq_lock, flags);
748 if (!test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags))
749 queue_work(health->wq, &health->fatal_report_work);
750 else
751 mlx5_core_err(dev, "new health works are not permitted at this stage\n");
752 spin_unlock_irqrestore(&health->wq_lock, flags);
753 }
754
755 #define MLX5_MSEC_PER_HOUR (MSEC_PER_SEC * 60 * 60)
mlx5_health_log_ts_update(struct work_struct * work)756 static void mlx5_health_log_ts_update(struct work_struct *work)
757 {
758 struct delayed_work *dwork = to_delayed_work(work);
759 u32 out[MLX5_ST_SZ_DW(mrtc_reg)] = {};
760 u32 in[MLX5_ST_SZ_DW(mrtc_reg)] = {};
761 struct mlx5_core_health *health;
762 struct mlx5_core_dev *dev;
763 struct mlx5_priv *priv;
764 u64 now_us;
765
766 health = container_of(dwork, struct mlx5_core_health, update_fw_log_ts_work);
767 priv = container_of(health, struct mlx5_priv, health);
768 dev = container_of(priv, struct mlx5_core_dev, priv);
769
770 now_us = ktime_to_us(ktime_get_real());
771
772 MLX5_SET(mrtc_reg, in, time_h, now_us >> 32);
773 MLX5_SET(mrtc_reg, in, time_l, now_us & 0xFFFFFFFF);
774 mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), MLX5_REG_MRTC, 0, 1);
775
776 queue_delayed_work(health->wq, &health->update_fw_log_ts_work,
777 msecs_to_jiffies(MLX5_MSEC_PER_HOUR));
778 }
779
poll_health(struct timer_list * t)780 static void poll_health(struct timer_list *t)
781 {
782 struct mlx5_core_dev *dev = from_timer(dev, t, priv.health.timer);
783 struct mlx5_core_health *health = &dev->priv.health;
784 struct health_buffer __iomem *h = health->health;
785 u32 fatal_error;
786 u8 prev_synd;
787 u32 count;
788
789 if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
790 goto out;
791
792 fatal_error = mlx5_health_check_fatal_sensors(dev);
793
794 if (fatal_error && !health->fatal_error) {
795 mlx5_core_err(dev, "Fatal error %u detected\n", fatal_error);
796 dev->priv.health.fatal_error = fatal_error;
797 print_health_info(dev);
798 dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
799 mlx5_trigger_health_work(dev);
800 return;
801 }
802
803 count = ioread32be(health->health_counter);
804 if (count == health->prev)
805 ++health->miss_counter;
806 else
807 health->miss_counter = 0;
808
809 health->prev = count;
810 if (health->miss_counter == MAX_MISSES) {
811 mlx5_core_err(dev, "device's health compromised - reached miss count\n");
812 print_health_info(dev);
813 queue_work(health->wq, &health->report_work);
814 }
815
816 prev_synd = health->synd;
817 health->synd = ioread8(&h->synd);
818 if (health->synd && health->synd != prev_synd)
819 queue_work(health->wq, &health->report_work);
820
821 out:
822 mod_timer(&health->timer, get_next_poll_jiffies(dev));
823 }
824
mlx5_start_health_poll(struct mlx5_core_dev * dev)825 void mlx5_start_health_poll(struct mlx5_core_dev *dev)
826 {
827 u64 poll_interval_ms = mlx5_tout_ms(dev, HEALTH_POLL_INTERVAL);
828 struct mlx5_core_health *health = &dev->priv.health;
829
830 timer_setup(&health->timer, poll_health, 0);
831 health->fatal_error = MLX5_SENSOR_NO_ERR;
832 clear_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
833 health->health = &dev->iseg->health;
834 health->health_counter = &dev->iseg->health_counter;
835
836 health->timer.expires = jiffies + msecs_to_jiffies(poll_interval_ms);
837 add_timer(&health->timer);
838
839 if (mlx5_core_is_pf(dev) && MLX5_CAP_MCAM_REG(dev, mrtc))
840 queue_delayed_work(health->wq, &health->update_fw_log_ts_work, 0);
841 }
842
mlx5_stop_health_poll(struct mlx5_core_dev * dev,bool disable_health)843 void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health)
844 {
845 struct mlx5_core_health *health = &dev->priv.health;
846 unsigned long flags;
847
848 if (disable_health) {
849 spin_lock_irqsave(&health->wq_lock, flags);
850 set_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
851 spin_unlock_irqrestore(&health->wq_lock, flags);
852 }
853
854 del_timer_sync(&health->timer);
855 }
856
mlx5_drain_health_wq(struct mlx5_core_dev * dev)857 void mlx5_drain_health_wq(struct mlx5_core_dev *dev)
858 {
859 struct mlx5_core_health *health = &dev->priv.health;
860 unsigned long flags;
861
862 spin_lock_irqsave(&health->wq_lock, flags);
863 set_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
864 spin_unlock_irqrestore(&health->wq_lock, flags);
865 cancel_delayed_work_sync(&health->update_fw_log_ts_work);
866 cancel_work_sync(&health->report_work);
867 cancel_work_sync(&health->fatal_report_work);
868 }
869
mlx5_health_flush(struct mlx5_core_dev * dev)870 void mlx5_health_flush(struct mlx5_core_dev *dev)
871 {
872 struct mlx5_core_health *health = &dev->priv.health;
873
874 flush_workqueue(health->wq);
875 }
876
mlx5_health_cleanup(struct mlx5_core_dev * dev)877 void mlx5_health_cleanup(struct mlx5_core_dev *dev)
878 {
879 struct mlx5_core_health *health = &dev->priv.health;
880
881 cancel_delayed_work_sync(&health->update_fw_log_ts_work);
882 destroy_workqueue(health->wq);
883 mlx5_fw_reporters_destroy(dev);
884 }
885
mlx5_health_init(struct mlx5_core_dev * dev)886 int mlx5_health_init(struct mlx5_core_dev *dev)
887 {
888 struct mlx5_core_health *health;
889 char *name;
890
891 mlx5_fw_reporters_create(dev);
892
893 health = &dev->priv.health;
894 name = kmalloc(64, GFP_KERNEL);
895 if (!name)
896 goto out_err;
897
898 strcpy(name, "mlx5_health");
899 strcat(name, dev_name(dev->device));
900 health->wq = create_singlethread_workqueue(name);
901 kfree(name);
902 if (!health->wq)
903 goto out_err;
904 spin_lock_init(&health->wq_lock);
905 INIT_WORK(&health->fatal_report_work, mlx5_fw_fatal_reporter_err_work);
906 INIT_WORK(&health->report_work, mlx5_fw_reporter_err_work);
907 INIT_DELAYED_WORK(&health->update_fw_log_ts_work, mlx5_health_log_ts_update);
908
909 return 0;
910
911 out_err:
912 mlx5_fw_reporters_destroy(dev);
913 return -ENOMEM;
914 }
915