1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
3
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/device.h>
7 #include <linux/export.h>
8 #include <linux/err.h>
9 #include <linux/if_link.h>
10 #include <linux/netdevice.h>
11 #include <linux/completion.h>
12 #include <linux/skbuff.h>
13 #include <linux/etherdevice.h>
14 #include <linux/types.h>
15 #include <linux/string.h>
16 #include <linux/gfp.h>
17 #include <linux/random.h>
18 #include <linux/jiffies.h>
19 #include <linux/mutex.h>
20 #include <linux/rcupdate.h>
21 #include <linux/slab.h>
22 #include <linux/workqueue.h>
23 #include <linux/firmware.h>
24 #include <asm/byteorder.h>
25 #include <net/devlink.h>
26 #include <trace/events/devlink.h>
27
28 #include "core.h"
29 #include "core_env.h"
30 #include "item.h"
31 #include "cmd.h"
32 #include "port.h"
33 #include "trap.h"
34 #include "emad.h"
35 #include "reg.h"
36 #include "resources.h"
37 #include "../mlxfw/mlxfw.h"
38
39 static LIST_HEAD(mlxsw_core_driver_list);
40 static DEFINE_SPINLOCK(mlxsw_core_driver_list_lock);
41
42 static const char mlxsw_core_driver_name[] = "mlxsw_core";
43
44 static struct workqueue_struct *mlxsw_wq;
45 static struct workqueue_struct *mlxsw_owq;
46
47 struct mlxsw_core_port {
48 struct devlink_port devlink_port;
49 void *port_driver_priv;
50 u8 local_port;
51 };
52
mlxsw_core_port_driver_priv(struct mlxsw_core_port * mlxsw_core_port)53 void *mlxsw_core_port_driver_priv(struct mlxsw_core_port *mlxsw_core_port)
54 {
55 return mlxsw_core_port->port_driver_priv;
56 }
57 EXPORT_SYMBOL(mlxsw_core_port_driver_priv);
58
mlxsw_core_port_check(struct mlxsw_core_port * mlxsw_core_port)59 static bool mlxsw_core_port_check(struct mlxsw_core_port *mlxsw_core_port)
60 {
61 return mlxsw_core_port->port_driver_priv != NULL;
62 }
63
64 struct mlxsw_core {
65 struct mlxsw_driver *driver;
66 const struct mlxsw_bus *bus;
67 void *bus_priv;
68 const struct mlxsw_bus_info *bus_info;
69 struct workqueue_struct *emad_wq;
70 struct list_head rx_listener_list;
71 struct list_head event_listener_list;
72 struct {
73 atomic64_t tid;
74 struct list_head trans_list;
75 spinlock_t trans_list_lock; /* protects trans_list writes */
76 bool use_emad;
77 bool enable_string_tlv;
78 } emad;
79 struct {
80 u8 *mapping; /* lag_id+port_index to local_port mapping */
81 } lag;
82 struct mlxsw_res res;
83 struct mlxsw_hwmon *hwmon;
84 struct mlxsw_thermal *thermal;
85 struct mlxsw_core_port *ports;
86 unsigned int max_ports;
87 atomic_t active_ports_count;
88 bool fw_flash_in_progress;
89 struct {
90 struct devlink_health_reporter *fw_fatal;
91 } health;
92 struct mlxsw_env *env;
93 unsigned long driver_priv[];
94 /* driver_priv has to be always the last item */
95 };
96
97 #define MLXSW_PORT_MAX_PORTS_DEFAULT 0x40
98
mlxsw_ports_occ_get(void * priv)99 static u64 mlxsw_ports_occ_get(void *priv)
100 {
101 struct mlxsw_core *mlxsw_core = priv;
102
103 return atomic_read(&mlxsw_core->active_ports_count);
104 }
105
mlxsw_core_resources_ports_register(struct mlxsw_core * mlxsw_core)106 static int mlxsw_core_resources_ports_register(struct mlxsw_core *mlxsw_core)
107 {
108 struct devlink *devlink = priv_to_devlink(mlxsw_core);
109 struct devlink_resource_size_params ports_num_params;
110 u32 max_ports;
111
112 max_ports = mlxsw_core->max_ports - 1;
113 devlink_resource_size_params_init(&ports_num_params, max_ports,
114 max_ports, 1,
115 DEVLINK_RESOURCE_UNIT_ENTRY);
116
117 return devlink_resource_register(devlink,
118 DEVLINK_RESOURCE_GENERIC_NAME_PORTS,
119 max_ports, MLXSW_CORE_RESOURCE_PORTS,
120 DEVLINK_RESOURCE_ID_PARENT_TOP,
121 &ports_num_params);
122 }
123
mlxsw_ports_init(struct mlxsw_core * mlxsw_core,bool reload)124 static int mlxsw_ports_init(struct mlxsw_core *mlxsw_core, bool reload)
125 {
126 struct devlink *devlink = priv_to_devlink(mlxsw_core);
127 int err;
128
129 /* Switch ports are numbered from 1 to queried value */
130 if (MLXSW_CORE_RES_VALID(mlxsw_core, MAX_SYSTEM_PORT))
131 mlxsw_core->max_ports = MLXSW_CORE_RES_GET(mlxsw_core,
132 MAX_SYSTEM_PORT) + 1;
133 else
134 mlxsw_core->max_ports = MLXSW_PORT_MAX_PORTS_DEFAULT + 1;
135
136 mlxsw_core->ports = kcalloc(mlxsw_core->max_ports,
137 sizeof(struct mlxsw_core_port), GFP_KERNEL);
138 if (!mlxsw_core->ports)
139 return -ENOMEM;
140
141 if (!reload) {
142 err = mlxsw_core_resources_ports_register(mlxsw_core);
143 if (err)
144 goto err_resources_ports_register;
145 }
146 atomic_set(&mlxsw_core->active_ports_count, 0);
147 devlink_resource_occ_get_register(devlink, MLXSW_CORE_RESOURCE_PORTS,
148 mlxsw_ports_occ_get, mlxsw_core);
149
150 return 0;
151
152 err_resources_ports_register:
153 kfree(mlxsw_core->ports);
154 return err;
155 }
156
mlxsw_ports_fini(struct mlxsw_core * mlxsw_core,bool reload)157 static void mlxsw_ports_fini(struct mlxsw_core *mlxsw_core, bool reload)
158 {
159 struct devlink *devlink = priv_to_devlink(mlxsw_core);
160
161 devlink_resource_occ_get_unregister(devlink, MLXSW_CORE_RESOURCE_PORTS);
162 if (!reload)
163 devlink_resources_unregister(priv_to_devlink(mlxsw_core), NULL);
164
165 kfree(mlxsw_core->ports);
166 }
167
mlxsw_core_max_ports(const struct mlxsw_core * mlxsw_core)168 unsigned int mlxsw_core_max_ports(const struct mlxsw_core *mlxsw_core)
169 {
170 return mlxsw_core->max_ports;
171 }
172 EXPORT_SYMBOL(mlxsw_core_max_ports);
173
mlxsw_core_driver_priv(struct mlxsw_core * mlxsw_core)174 void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core)
175 {
176 return mlxsw_core->driver_priv;
177 }
178 EXPORT_SYMBOL(mlxsw_core_driver_priv);
179
mlxsw_core_res_query_enabled(const struct mlxsw_core * mlxsw_core)180 bool mlxsw_core_res_query_enabled(const struct mlxsw_core *mlxsw_core)
181 {
182 return mlxsw_core->driver->res_query_enabled;
183 }
184 EXPORT_SYMBOL(mlxsw_core_res_query_enabled);
185
mlxsw_core_temp_warn_enabled(const struct mlxsw_core * mlxsw_core)186 bool mlxsw_core_temp_warn_enabled(const struct mlxsw_core *mlxsw_core)
187 {
188 return mlxsw_core->driver->temp_warn_enabled;
189 }
190
191 bool
mlxsw_core_fw_rev_minor_subminor_validate(const struct mlxsw_fw_rev * rev,const struct mlxsw_fw_rev * req_rev)192 mlxsw_core_fw_rev_minor_subminor_validate(const struct mlxsw_fw_rev *rev,
193 const struct mlxsw_fw_rev *req_rev)
194 {
195 return rev->minor > req_rev->minor ||
196 (rev->minor == req_rev->minor &&
197 rev->subminor >= req_rev->subminor);
198 }
199 EXPORT_SYMBOL(mlxsw_core_fw_rev_minor_subminor_validate);
200
201 struct mlxsw_rx_listener_item {
202 struct list_head list;
203 struct mlxsw_rx_listener rxl;
204 void *priv;
205 bool enabled;
206 };
207
208 struct mlxsw_event_listener_item {
209 struct list_head list;
210 struct mlxsw_core *mlxsw_core;
211 struct mlxsw_event_listener el;
212 void *priv;
213 };
214
215 /******************
216 * EMAD processing
217 ******************/
218
219 /* emad_eth_hdr_dmac
220 * Destination MAC in EMAD's Ethernet header.
221 * Must be set to 01:02:c9:00:00:01
222 */
223 MLXSW_ITEM_BUF(emad, eth_hdr, dmac, 0x00, 6);
224
225 /* emad_eth_hdr_smac
226 * Source MAC in EMAD's Ethernet header.
227 * Must be set to 00:02:c9:01:02:03
228 */
229 MLXSW_ITEM_BUF(emad, eth_hdr, smac, 0x06, 6);
230
231 /* emad_eth_hdr_ethertype
232 * Ethertype in EMAD's Ethernet header.
233 * Must be set to 0x8932
234 */
235 MLXSW_ITEM32(emad, eth_hdr, ethertype, 0x0C, 16, 16);
236
237 /* emad_eth_hdr_mlx_proto
238 * Mellanox protocol.
239 * Must be set to 0x0.
240 */
241 MLXSW_ITEM32(emad, eth_hdr, mlx_proto, 0x0C, 8, 8);
242
243 /* emad_eth_hdr_ver
244 * Mellanox protocol version.
245 * Must be set to 0x0.
246 */
247 MLXSW_ITEM32(emad, eth_hdr, ver, 0x0C, 4, 4);
248
249 /* emad_op_tlv_type
250 * Type of the TLV.
251 * Must be set to 0x1 (operation TLV).
252 */
253 MLXSW_ITEM32(emad, op_tlv, type, 0x00, 27, 5);
254
255 /* emad_op_tlv_len
256 * Length of the operation TLV in u32.
257 * Must be set to 0x4.
258 */
259 MLXSW_ITEM32(emad, op_tlv, len, 0x00, 16, 11);
260
261 /* emad_op_tlv_dr
262 * Direct route bit. Setting to 1 indicates the EMAD is a direct route
263 * EMAD. DR TLV must follow.
264 *
265 * Note: Currently not supported and must not be set.
266 */
267 MLXSW_ITEM32(emad, op_tlv, dr, 0x00, 15, 1);
268
269 /* emad_op_tlv_status
270 * Returned status in case of EMAD response. Must be set to 0 in case
271 * of EMAD request.
272 * 0x0 - success
273 * 0x1 - device is busy. Requester should retry
274 * 0x2 - Mellanox protocol version not supported
275 * 0x3 - unknown TLV
276 * 0x4 - register not supported
277 * 0x5 - operation class not supported
278 * 0x6 - EMAD method not supported
279 * 0x7 - bad parameter (e.g. port out of range)
280 * 0x8 - resource not available
281 * 0x9 - message receipt acknowledgment. Requester should retry
282 * 0x70 - internal error
283 */
284 MLXSW_ITEM32(emad, op_tlv, status, 0x00, 8, 7);
285
286 /* emad_op_tlv_register_id
287 * Register ID of register within register TLV.
288 */
289 MLXSW_ITEM32(emad, op_tlv, register_id, 0x04, 16, 16);
290
291 /* emad_op_tlv_r
292 * Response bit. Setting to 1 indicates Response, otherwise request.
293 */
294 MLXSW_ITEM32(emad, op_tlv, r, 0x04, 15, 1);
295
296 /* emad_op_tlv_method
297 * EMAD method type.
298 * 0x1 - query
299 * 0x2 - write
300 * 0x3 - send (currently not supported)
301 * 0x4 - event
302 */
303 MLXSW_ITEM32(emad, op_tlv, method, 0x04, 8, 7);
304
305 /* emad_op_tlv_class
306 * EMAD operation class. Must be set to 0x1 (REG_ACCESS).
307 */
308 MLXSW_ITEM32(emad, op_tlv, class, 0x04, 0, 8);
309
310 /* emad_op_tlv_tid
311 * EMAD transaction ID. Used for pairing request and response EMADs.
312 */
313 MLXSW_ITEM64(emad, op_tlv, tid, 0x08, 0, 64);
314
315 /* emad_string_tlv_type
316 * Type of the TLV.
317 * Must be set to 0x2 (string TLV).
318 */
319 MLXSW_ITEM32(emad, string_tlv, type, 0x00, 27, 5);
320
321 /* emad_string_tlv_len
322 * Length of the string TLV in u32.
323 */
324 MLXSW_ITEM32(emad, string_tlv, len, 0x00, 16, 11);
325
326 #define MLXSW_EMAD_STRING_TLV_STRING_LEN 128
327
328 /* emad_string_tlv_string
329 * String provided by the device's firmware in case of erroneous register access
330 */
331 MLXSW_ITEM_BUF(emad, string_tlv, string, 0x04,
332 MLXSW_EMAD_STRING_TLV_STRING_LEN);
333
334 /* emad_reg_tlv_type
335 * Type of the TLV.
336 * Must be set to 0x3 (register TLV).
337 */
338 MLXSW_ITEM32(emad, reg_tlv, type, 0x00, 27, 5);
339
340 /* emad_reg_tlv_len
341 * Length of the operation TLV in u32.
342 */
343 MLXSW_ITEM32(emad, reg_tlv, len, 0x00, 16, 11);
344
345 /* emad_end_tlv_type
346 * Type of the TLV.
347 * Must be set to 0x0 (end TLV).
348 */
349 MLXSW_ITEM32(emad, end_tlv, type, 0x00, 27, 5);
350
351 /* emad_end_tlv_len
352 * Length of the end TLV in u32.
353 * Must be set to 1.
354 */
355 MLXSW_ITEM32(emad, end_tlv, len, 0x00, 16, 11);
356
357 enum mlxsw_core_reg_access_type {
358 MLXSW_CORE_REG_ACCESS_TYPE_QUERY,
359 MLXSW_CORE_REG_ACCESS_TYPE_WRITE,
360 };
361
362 static inline const char *
mlxsw_core_reg_access_type_str(enum mlxsw_core_reg_access_type type)363 mlxsw_core_reg_access_type_str(enum mlxsw_core_reg_access_type type)
364 {
365 switch (type) {
366 case MLXSW_CORE_REG_ACCESS_TYPE_QUERY:
367 return "query";
368 case MLXSW_CORE_REG_ACCESS_TYPE_WRITE:
369 return "write";
370 }
371 BUG();
372 }
373
mlxsw_emad_pack_end_tlv(char * end_tlv)374 static void mlxsw_emad_pack_end_tlv(char *end_tlv)
375 {
376 mlxsw_emad_end_tlv_type_set(end_tlv, MLXSW_EMAD_TLV_TYPE_END);
377 mlxsw_emad_end_tlv_len_set(end_tlv, MLXSW_EMAD_END_TLV_LEN);
378 }
379
mlxsw_emad_pack_reg_tlv(char * reg_tlv,const struct mlxsw_reg_info * reg,char * payload)380 static void mlxsw_emad_pack_reg_tlv(char *reg_tlv,
381 const struct mlxsw_reg_info *reg,
382 char *payload)
383 {
384 mlxsw_emad_reg_tlv_type_set(reg_tlv, MLXSW_EMAD_TLV_TYPE_REG);
385 mlxsw_emad_reg_tlv_len_set(reg_tlv, reg->len / sizeof(u32) + 1);
386 memcpy(reg_tlv + sizeof(u32), payload, reg->len);
387 }
388
mlxsw_emad_pack_string_tlv(char * string_tlv)389 static void mlxsw_emad_pack_string_tlv(char *string_tlv)
390 {
391 mlxsw_emad_string_tlv_type_set(string_tlv, MLXSW_EMAD_TLV_TYPE_STRING);
392 mlxsw_emad_string_tlv_len_set(string_tlv, MLXSW_EMAD_STRING_TLV_LEN);
393 }
394
mlxsw_emad_pack_op_tlv(char * op_tlv,const struct mlxsw_reg_info * reg,enum mlxsw_core_reg_access_type type,u64 tid)395 static void mlxsw_emad_pack_op_tlv(char *op_tlv,
396 const struct mlxsw_reg_info *reg,
397 enum mlxsw_core_reg_access_type type,
398 u64 tid)
399 {
400 mlxsw_emad_op_tlv_type_set(op_tlv, MLXSW_EMAD_TLV_TYPE_OP);
401 mlxsw_emad_op_tlv_len_set(op_tlv, MLXSW_EMAD_OP_TLV_LEN);
402 mlxsw_emad_op_tlv_dr_set(op_tlv, 0);
403 mlxsw_emad_op_tlv_status_set(op_tlv, 0);
404 mlxsw_emad_op_tlv_register_id_set(op_tlv, reg->id);
405 mlxsw_emad_op_tlv_r_set(op_tlv, MLXSW_EMAD_OP_TLV_REQUEST);
406 if (type == MLXSW_CORE_REG_ACCESS_TYPE_QUERY)
407 mlxsw_emad_op_tlv_method_set(op_tlv,
408 MLXSW_EMAD_OP_TLV_METHOD_QUERY);
409 else
410 mlxsw_emad_op_tlv_method_set(op_tlv,
411 MLXSW_EMAD_OP_TLV_METHOD_WRITE);
412 mlxsw_emad_op_tlv_class_set(op_tlv,
413 MLXSW_EMAD_OP_TLV_CLASS_REG_ACCESS);
414 mlxsw_emad_op_tlv_tid_set(op_tlv, tid);
415 }
416
mlxsw_emad_construct_eth_hdr(struct sk_buff * skb)417 static int mlxsw_emad_construct_eth_hdr(struct sk_buff *skb)
418 {
419 char *eth_hdr = skb_push(skb, MLXSW_EMAD_ETH_HDR_LEN);
420
421 mlxsw_emad_eth_hdr_dmac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_DMAC);
422 mlxsw_emad_eth_hdr_smac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_SMAC);
423 mlxsw_emad_eth_hdr_ethertype_set(eth_hdr, MLXSW_EMAD_EH_ETHERTYPE);
424 mlxsw_emad_eth_hdr_mlx_proto_set(eth_hdr, MLXSW_EMAD_EH_MLX_PROTO);
425 mlxsw_emad_eth_hdr_ver_set(eth_hdr, MLXSW_EMAD_EH_PROTO_VERSION);
426
427 skb_reset_mac_header(skb);
428
429 return 0;
430 }
431
mlxsw_emad_construct(struct sk_buff * skb,const struct mlxsw_reg_info * reg,char * payload,enum mlxsw_core_reg_access_type type,u64 tid,bool enable_string_tlv)432 static void mlxsw_emad_construct(struct sk_buff *skb,
433 const struct mlxsw_reg_info *reg,
434 char *payload,
435 enum mlxsw_core_reg_access_type type,
436 u64 tid, bool enable_string_tlv)
437 {
438 char *buf;
439
440 buf = skb_push(skb, MLXSW_EMAD_END_TLV_LEN * sizeof(u32));
441 mlxsw_emad_pack_end_tlv(buf);
442
443 buf = skb_push(skb, reg->len + sizeof(u32));
444 mlxsw_emad_pack_reg_tlv(buf, reg, payload);
445
446 if (enable_string_tlv) {
447 buf = skb_push(skb, MLXSW_EMAD_STRING_TLV_LEN * sizeof(u32));
448 mlxsw_emad_pack_string_tlv(buf);
449 }
450
451 buf = skb_push(skb, MLXSW_EMAD_OP_TLV_LEN * sizeof(u32));
452 mlxsw_emad_pack_op_tlv(buf, reg, type, tid);
453
454 mlxsw_emad_construct_eth_hdr(skb);
455 }
456
457 struct mlxsw_emad_tlv_offsets {
458 u16 op_tlv;
459 u16 string_tlv;
460 u16 reg_tlv;
461 };
462
mlxsw_emad_tlv_is_string_tlv(const char * tlv)463 static bool mlxsw_emad_tlv_is_string_tlv(const char *tlv)
464 {
465 u8 tlv_type = mlxsw_emad_string_tlv_type_get(tlv);
466
467 return tlv_type == MLXSW_EMAD_TLV_TYPE_STRING;
468 }
469
mlxsw_emad_tlv_parse(struct sk_buff * skb)470 static void mlxsw_emad_tlv_parse(struct sk_buff *skb)
471 {
472 struct mlxsw_emad_tlv_offsets *offsets =
473 (struct mlxsw_emad_tlv_offsets *) skb->cb;
474
475 offsets->op_tlv = MLXSW_EMAD_ETH_HDR_LEN;
476 offsets->string_tlv = 0;
477 offsets->reg_tlv = MLXSW_EMAD_ETH_HDR_LEN +
478 MLXSW_EMAD_OP_TLV_LEN * sizeof(u32);
479
480 /* If string TLV is present, it must come after the operation TLV. */
481 if (mlxsw_emad_tlv_is_string_tlv(skb->data + offsets->reg_tlv)) {
482 offsets->string_tlv = offsets->reg_tlv;
483 offsets->reg_tlv += MLXSW_EMAD_STRING_TLV_LEN * sizeof(u32);
484 }
485 }
486
mlxsw_emad_op_tlv(const struct sk_buff * skb)487 static char *mlxsw_emad_op_tlv(const struct sk_buff *skb)
488 {
489 struct mlxsw_emad_tlv_offsets *offsets =
490 (struct mlxsw_emad_tlv_offsets *) skb->cb;
491
492 return ((char *) (skb->data + offsets->op_tlv));
493 }
494
mlxsw_emad_string_tlv(const struct sk_buff * skb)495 static char *mlxsw_emad_string_tlv(const struct sk_buff *skb)
496 {
497 struct mlxsw_emad_tlv_offsets *offsets =
498 (struct mlxsw_emad_tlv_offsets *) skb->cb;
499
500 if (!offsets->string_tlv)
501 return NULL;
502
503 return ((char *) (skb->data + offsets->string_tlv));
504 }
505
mlxsw_emad_reg_tlv(const struct sk_buff * skb)506 static char *mlxsw_emad_reg_tlv(const struct sk_buff *skb)
507 {
508 struct mlxsw_emad_tlv_offsets *offsets =
509 (struct mlxsw_emad_tlv_offsets *) skb->cb;
510
511 return ((char *) (skb->data + offsets->reg_tlv));
512 }
513
mlxsw_emad_reg_payload(const char * reg_tlv)514 static char *mlxsw_emad_reg_payload(const char *reg_tlv)
515 {
516 return ((char *) (reg_tlv + sizeof(u32)));
517 }
518
mlxsw_emad_reg_payload_cmd(const char * mbox)519 static char *mlxsw_emad_reg_payload_cmd(const char *mbox)
520 {
521 return ((char *) (mbox + (MLXSW_EMAD_OP_TLV_LEN + 1) * sizeof(u32)));
522 }
523
mlxsw_emad_get_tid(const struct sk_buff * skb)524 static u64 mlxsw_emad_get_tid(const struct sk_buff *skb)
525 {
526 char *op_tlv;
527
528 op_tlv = mlxsw_emad_op_tlv(skb);
529 return mlxsw_emad_op_tlv_tid_get(op_tlv);
530 }
531
mlxsw_emad_is_resp(const struct sk_buff * skb)532 static bool mlxsw_emad_is_resp(const struct sk_buff *skb)
533 {
534 char *op_tlv;
535
536 op_tlv = mlxsw_emad_op_tlv(skb);
537 return (mlxsw_emad_op_tlv_r_get(op_tlv) == MLXSW_EMAD_OP_TLV_RESPONSE);
538 }
539
mlxsw_emad_process_status(char * op_tlv,enum mlxsw_emad_op_tlv_status * p_status)540 static int mlxsw_emad_process_status(char *op_tlv,
541 enum mlxsw_emad_op_tlv_status *p_status)
542 {
543 *p_status = mlxsw_emad_op_tlv_status_get(op_tlv);
544
545 switch (*p_status) {
546 case MLXSW_EMAD_OP_TLV_STATUS_SUCCESS:
547 return 0;
548 case MLXSW_EMAD_OP_TLV_STATUS_BUSY:
549 case MLXSW_EMAD_OP_TLV_STATUS_MESSAGE_RECEIPT_ACK:
550 return -EAGAIN;
551 case MLXSW_EMAD_OP_TLV_STATUS_VERSION_NOT_SUPPORTED:
552 case MLXSW_EMAD_OP_TLV_STATUS_UNKNOWN_TLV:
553 case MLXSW_EMAD_OP_TLV_STATUS_REGISTER_NOT_SUPPORTED:
554 case MLXSW_EMAD_OP_TLV_STATUS_CLASS_NOT_SUPPORTED:
555 case MLXSW_EMAD_OP_TLV_STATUS_METHOD_NOT_SUPPORTED:
556 case MLXSW_EMAD_OP_TLV_STATUS_BAD_PARAMETER:
557 case MLXSW_EMAD_OP_TLV_STATUS_RESOURCE_NOT_AVAILABLE:
558 case MLXSW_EMAD_OP_TLV_STATUS_INTERNAL_ERROR:
559 default:
560 return -EIO;
561 }
562 }
563
564 static int
mlxsw_emad_process_status_skb(struct sk_buff * skb,enum mlxsw_emad_op_tlv_status * p_status)565 mlxsw_emad_process_status_skb(struct sk_buff *skb,
566 enum mlxsw_emad_op_tlv_status *p_status)
567 {
568 return mlxsw_emad_process_status(mlxsw_emad_op_tlv(skb), p_status);
569 }
570
571 struct mlxsw_reg_trans {
572 struct list_head list;
573 struct list_head bulk_list;
574 struct mlxsw_core *core;
575 struct sk_buff *tx_skb;
576 struct mlxsw_tx_info tx_info;
577 struct delayed_work timeout_dw;
578 unsigned int retries;
579 u64 tid;
580 struct completion completion;
581 atomic_t active;
582 mlxsw_reg_trans_cb_t *cb;
583 unsigned long cb_priv;
584 const struct mlxsw_reg_info *reg;
585 enum mlxsw_core_reg_access_type type;
586 int err;
587 char *emad_err_string;
588 enum mlxsw_emad_op_tlv_status emad_status;
589 struct rcu_head rcu;
590 };
591
mlxsw_emad_process_string_tlv(const struct sk_buff * skb,struct mlxsw_reg_trans * trans)592 static void mlxsw_emad_process_string_tlv(const struct sk_buff *skb,
593 struct mlxsw_reg_trans *trans)
594 {
595 char *string_tlv;
596 char *string;
597
598 string_tlv = mlxsw_emad_string_tlv(skb);
599 if (!string_tlv)
600 return;
601
602 trans->emad_err_string = kzalloc(MLXSW_EMAD_STRING_TLV_STRING_LEN,
603 GFP_ATOMIC);
604 if (!trans->emad_err_string)
605 return;
606
607 string = mlxsw_emad_string_tlv_string_data(string_tlv);
608 strlcpy(trans->emad_err_string, string,
609 MLXSW_EMAD_STRING_TLV_STRING_LEN);
610 }
611
612 #define MLXSW_EMAD_TIMEOUT_DURING_FW_FLASH_MS 3000
613 #define MLXSW_EMAD_TIMEOUT_MS 200
614
mlxsw_emad_trans_timeout_schedule(struct mlxsw_reg_trans * trans)615 static void mlxsw_emad_trans_timeout_schedule(struct mlxsw_reg_trans *trans)
616 {
617 unsigned long timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS);
618
619 if (trans->core->fw_flash_in_progress)
620 timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_DURING_FW_FLASH_MS);
621
622 queue_delayed_work(trans->core->emad_wq, &trans->timeout_dw,
623 timeout << trans->retries);
624 }
625
mlxsw_emad_transmit(struct mlxsw_core * mlxsw_core,struct mlxsw_reg_trans * trans)626 static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core,
627 struct mlxsw_reg_trans *trans)
628 {
629 struct sk_buff *skb;
630 int err;
631
632 skb = skb_clone(trans->tx_skb, GFP_KERNEL);
633 if (!skb)
634 return -ENOMEM;
635
636 trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), false, 0,
637 skb->data + mlxsw_core->driver->txhdr_len,
638 skb->len - mlxsw_core->driver->txhdr_len);
639
640 atomic_set(&trans->active, 1);
641 err = mlxsw_core_skb_transmit(mlxsw_core, skb, &trans->tx_info);
642 if (err) {
643 dev_kfree_skb(skb);
644 return err;
645 }
646 mlxsw_emad_trans_timeout_schedule(trans);
647 return 0;
648 }
649
mlxsw_emad_trans_finish(struct mlxsw_reg_trans * trans,int err)650 static void mlxsw_emad_trans_finish(struct mlxsw_reg_trans *trans, int err)
651 {
652 struct mlxsw_core *mlxsw_core = trans->core;
653
654 dev_kfree_skb(trans->tx_skb);
655 spin_lock_bh(&mlxsw_core->emad.trans_list_lock);
656 list_del_rcu(&trans->list);
657 spin_unlock_bh(&mlxsw_core->emad.trans_list_lock);
658 trans->err = err;
659 complete(&trans->completion);
660 }
661
mlxsw_emad_transmit_retry(struct mlxsw_core * mlxsw_core,struct mlxsw_reg_trans * trans)662 static void mlxsw_emad_transmit_retry(struct mlxsw_core *mlxsw_core,
663 struct mlxsw_reg_trans *trans)
664 {
665 int err;
666
667 if (trans->retries < MLXSW_EMAD_MAX_RETRY) {
668 trans->retries++;
669 err = mlxsw_emad_transmit(trans->core, trans);
670 if (err == 0)
671 return;
672
673 if (!atomic_dec_and_test(&trans->active))
674 return;
675 } else {
676 err = -EIO;
677 }
678 mlxsw_emad_trans_finish(trans, err);
679 }
680
mlxsw_emad_trans_timeout_work(struct work_struct * work)681 static void mlxsw_emad_trans_timeout_work(struct work_struct *work)
682 {
683 struct mlxsw_reg_trans *trans = container_of(work,
684 struct mlxsw_reg_trans,
685 timeout_dw.work);
686
687 if (!atomic_dec_and_test(&trans->active))
688 return;
689
690 mlxsw_emad_transmit_retry(trans->core, trans);
691 }
692
mlxsw_emad_process_response(struct mlxsw_core * mlxsw_core,struct mlxsw_reg_trans * trans,struct sk_buff * skb)693 static void mlxsw_emad_process_response(struct mlxsw_core *mlxsw_core,
694 struct mlxsw_reg_trans *trans,
695 struct sk_buff *skb)
696 {
697 int err;
698
699 if (!atomic_dec_and_test(&trans->active))
700 return;
701
702 err = mlxsw_emad_process_status_skb(skb, &trans->emad_status);
703 if (err == -EAGAIN) {
704 mlxsw_emad_transmit_retry(mlxsw_core, trans);
705 } else {
706 if (err == 0) {
707 char *reg_tlv = mlxsw_emad_reg_tlv(skb);
708
709 if (trans->cb)
710 trans->cb(mlxsw_core,
711 mlxsw_emad_reg_payload(reg_tlv),
712 trans->reg->len, trans->cb_priv);
713 } else {
714 mlxsw_emad_process_string_tlv(skb, trans);
715 }
716 mlxsw_emad_trans_finish(trans, err);
717 }
718 }
719
720 /* called with rcu read lock held */
mlxsw_emad_rx_listener_func(struct sk_buff * skb,u8 local_port,void * priv)721 static void mlxsw_emad_rx_listener_func(struct sk_buff *skb, u8 local_port,
722 void *priv)
723 {
724 struct mlxsw_core *mlxsw_core = priv;
725 struct mlxsw_reg_trans *trans;
726
727 trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), true, 0,
728 skb->data, skb->len);
729
730 mlxsw_emad_tlv_parse(skb);
731
732 if (!mlxsw_emad_is_resp(skb))
733 goto free_skb;
734
735 list_for_each_entry_rcu(trans, &mlxsw_core->emad.trans_list, list) {
736 if (mlxsw_emad_get_tid(skb) == trans->tid) {
737 mlxsw_emad_process_response(mlxsw_core, trans, skb);
738 break;
739 }
740 }
741
742 free_skb:
743 dev_kfree_skb(skb);
744 }
745
746 static const struct mlxsw_listener mlxsw_emad_rx_listener =
747 MLXSW_RXL(mlxsw_emad_rx_listener_func, ETHEMAD, TRAP_TO_CPU, false,
748 EMAD, DISCARD);
749
mlxsw_emad_init(struct mlxsw_core * mlxsw_core)750 static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
751 {
752 struct workqueue_struct *emad_wq;
753 u64 tid;
754 int err;
755
756 if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
757 return 0;
758
759 emad_wq = alloc_workqueue("mlxsw_core_emad", 0, 0);
760 if (!emad_wq)
761 return -ENOMEM;
762 mlxsw_core->emad_wq = emad_wq;
763
764 /* Set the upper 32 bits of the transaction ID field to a random
765 * number. This allows us to discard EMADs addressed to other
766 * devices.
767 */
768 get_random_bytes(&tid, 4);
769 tid <<= 32;
770 atomic64_set(&mlxsw_core->emad.tid, tid);
771
772 INIT_LIST_HEAD(&mlxsw_core->emad.trans_list);
773 spin_lock_init(&mlxsw_core->emad.trans_list_lock);
774
775 err = mlxsw_core_trap_register(mlxsw_core, &mlxsw_emad_rx_listener,
776 mlxsw_core);
777 if (err)
778 goto err_trap_register;
779
780 err = mlxsw_core->driver->basic_trap_groups_set(mlxsw_core);
781 if (err)
782 goto err_emad_trap_set;
783 mlxsw_core->emad.use_emad = true;
784
785 return 0;
786
787 err_emad_trap_set:
788 mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener,
789 mlxsw_core);
790 err_trap_register:
791 destroy_workqueue(mlxsw_core->emad_wq);
792 return err;
793 }
794
mlxsw_emad_fini(struct mlxsw_core * mlxsw_core)795 static void mlxsw_emad_fini(struct mlxsw_core *mlxsw_core)
796 {
797
798 if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
799 return;
800
801 mlxsw_core->emad.use_emad = false;
802 mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener,
803 mlxsw_core);
804 destroy_workqueue(mlxsw_core->emad_wq);
805 }
806
mlxsw_emad_alloc(const struct mlxsw_core * mlxsw_core,u16 reg_len,bool enable_string_tlv)807 static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core,
808 u16 reg_len, bool enable_string_tlv)
809 {
810 struct sk_buff *skb;
811 u16 emad_len;
812
813 emad_len = (reg_len + sizeof(u32) + MLXSW_EMAD_ETH_HDR_LEN +
814 (MLXSW_EMAD_OP_TLV_LEN + MLXSW_EMAD_END_TLV_LEN) *
815 sizeof(u32) + mlxsw_core->driver->txhdr_len);
816 if (enable_string_tlv)
817 emad_len += MLXSW_EMAD_STRING_TLV_LEN * sizeof(u32);
818 if (emad_len > MLXSW_EMAD_MAX_FRAME_LEN)
819 return NULL;
820
821 skb = netdev_alloc_skb(NULL, emad_len);
822 if (!skb)
823 return NULL;
824 memset(skb->data, 0, emad_len);
825 skb_reserve(skb, emad_len);
826
827 return skb;
828 }
829
mlxsw_emad_reg_access(struct mlxsw_core * mlxsw_core,const struct mlxsw_reg_info * reg,char * payload,enum mlxsw_core_reg_access_type type,struct mlxsw_reg_trans * trans,struct list_head * bulk_list,mlxsw_reg_trans_cb_t * cb,unsigned long cb_priv,u64 tid)830 static int mlxsw_emad_reg_access(struct mlxsw_core *mlxsw_core,
831 const struct mlxsw_reg_info *reg,
832 char *payload,
833 enum mlxsw_core_reg_access_type type,
834 struct mlxsw_reg_trans *trans,
835 struct list_head *bulk_list,
836 mlxsw_reg_trans_cb_t *cb,
837 unsigned long cb_priv, u64 tid)
838 {
839 bool enable_string_tlv;
840 struct sk_buff *skb;
841 int err;
842
843 dev_dbg(mlxsw_core->bus_info->dev, "EMAD reg access (tid=%llx,reg_id=%x(%s),type=%s)\n",
844 tid, reg->id, mlxsw_reg_id_str(reg->id),
845 mlxsw_core_reg_access_type_str(type));
846
847 /* Since this can be changed during emad_reg_access, read it once and
848 * use the value all the way.
849 */
850 enable_string_tlv = mlxsw_core->emad.enable_string_tlv;
851
852 skb = mlxsw_emad_alloc(mlxsw_core, reg->len, enable_string_tlv);
853 if (!skb)
854 return -ENOMEM;
855
856 list_add_tail(&trans->bulk_list, bulk_list);
857 trans->core = mlxsw_core;
858 trans->tx_skb = skb;
859 trans->tx_info.local_port = MLXSW_PORT_CPU_PORT;
860 trans->tx_info.is_emad = true;
861 INIT_DELAYED_WORK(&trans->timeout_dw, mlxsw_emad_trans_timeout_work);
862 trans->tid = tid;
863 init_completion(&trans->completion);
864 trans->cb = cb;
865 trans->cb_priv = cb_priv;
866 trans->reg = reg;
867 trans->type = type;
868
869 mlxsw_emad_construct(skb, reg, payload, type, trans->tid,
870 enable_string_tlv);
871 mlxsw_core->driver->txhdr_construct(skb, &trans->tx_info);
872
873 spin_lock_bh(&mlxsw_core->emad.trans_list_lock);
874 list_add_tail_rcu(&trans->list, &mlxsw_core->emad.trans_list);
875 spin_unlock_bh(&mlxsw_core->emad.trans_list_lock);
876 err = mlxsw_emad_transmit(mlxsw_core, trans);
877 if (err)
878 goto err_out;
879 return 0;
880
881 err_out:
882 spin_lock_bh(&mlxsw_core->emad.trans_list_lock);
883 list_del_rcu(&trans->list);
884 spin_unlock_bh(&mlxsw_core->emad.trans_list_lock);
885 list_del(&trans->bulk_list);
886 dev_kfree_skb(trans->tx_skb);
887 return err;
888 }
889
890 /*****************
891 * Core functions
892 *****************/
893
mlxsw_core_driver_register(struct mlxsw_driver * mlxsw_driver)894 int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver)
895 {
896 spin_lock(&mlxsw_core_driver_list_lock);
897 list_add_tail(&mlxsw_driver->list, &mlxsw_core_driver_list);
898 spin_unlock(&mlxsw_core_driver_list_lock);
899 return 0;
900 }
901 EXPORT_SYMBOL(mlxsw_core_driver_register);
902
mlxsw_core_driver_unregister(struct mlxsw_driver * mlxsw_driver)903 void mlxsw_core_driver_unregister(struct mlxsw_driver *mlxsw_driver)
904 {
905 spin_lock(&mlxsw_core_driver_list_lock);
906 list_del(&mlxsw_driver->list);
907 spin_unlock(&mlxsw_core_driver_list_lock);
908 }
909 EXPORT_SYMBOL(mlxsw_core_driver_unregister);
910
__driver_find(const char * kind)911 static struct mlxsw_driver *__driver_find(const char *kind)
912 {
913 struct mlxsw_driver *mlxsw_driver;
914
915 list_for_each_entry(mlxsw_driver, &mlxsw_core_driver_list, list) {
916 if (strcmp(mlxsw_driver->kind, kind) == 0)
917 return mlxsw_driver;
918 }
919 return NULL;
920 }
921
mlxsw_core_driver_get(const char * kind)922 static struct mlxsw_driver *mlxsw_core_driver_get(const char *kind)
923 {
924 struct mlxsw_driver *mlxsw_driver;
925
926 spin_lock(&mlxsw_core_driver_list_lock);
927 mlxsw_driver = __driver_find(kind);
928 spin_unlock(&mlxsw_core_driver_list_lock);
929 return mlxsw_driver;
930 }
931
932 struct mlxsw_core_fw_info {
933 struct mlxfw_dev mlxfw_dev;
934 struct mlxsw_core *mlxsw_core;
935 };
936
mlxsw_core_fw_component_query(struct mlxfw_dev * mlxfw_dev,u16 component_index,u32 * p_max_size,u8 * p_align_bits,u16 * p_max_write_size)937 static int mlxsw_core_fw_component_query(struct mlxfw_dev *mlxfw_dev,
938 u16 component_index, u32 *p_max_size,
939 u8 *p_align_bits, u16 *p_max_write_size)
940 {
941 struct mlxsw_core_fw_info *mlxsw_core_fw_info =
942 container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev);
943 struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core;
944 char mcqi_pl[MLXSW_REG_MCQI_LEN];
945 int err;
946
947 mlxsw_reg_mcqi_pack(mcqi_pl, component_index);
948 err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mcqi), mcqi_pl);
949 if (err)
950 return err;
951 mlxsw_reg_mcqi_unpack(mcqi_pl, p_max_size, p_align_bits, p_max_write_size);
952
953 *p_align_bits = max_t(u8, *p_align_bits, 2);
954 *p_max_write_size = min_t(u16, *p_max_write_size, MLXSW_REG_MCDA_MAX_DATA_LEN);
955 return 0;
956 }
957
mlxsw_core_fw_fsm_lock(struct mlxfw_dev * mlxfw_dev,u32 * fwhandle)958 static int mlxsw_core_fw_fsm_lock(struct mlxfw_dev *mlxfw_dev, u32 *fwhandle)
959 {
960 struct mlxsw_core_fw_info *mlxsw_core_fw_info =
961 container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev);
962 struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core;
963 char mcc_pl[MLXSW_REG_MCC_LEN];
964 u8 control_state;
965 int err;
966
967 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, 0, 0);
968 err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mcc), mcc_pl);
969 if (err)
970 return err;
971
972 mlxsw_reg_mcc_unpack(mcc_pl, fwhandle, NULL, &control_state);
973 if (control_state != MLXFW_FSM_STATE_IDLE)
974 return -EBUSY;
975
976 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE, 0, *fwhandle, 0);
977 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcc), mcc_pl);
978 }
979
mlxsw_core_fw_fsm_component_update(struct mlxfw_dev * mlxfw_dev,u32 fwhandle,u16 component_index,u32 component_size)980 static int mlxsw_core_fw_fsm_component_update(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
981 u16 component_index, u32 component_size)
982 {
983 struct mlxsw_core_fw_info *mlxsw_core_fw_info =
984 container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev);
985 struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core;
986 char mcc_pl[MLXSW_REG_MCC_LEN];
987
988 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_UPDATE_COMPONENT,
989 component_index, fwhandle, component_size);
990 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcc), mcc_pl);
991 }
992
mlxsw_core_fw_fsm_block_download(struct mlxfw_dev * mlxfw_dev,u32 fwhandle,u8 * data,u16 size,u32 offset)993 static int mlxsw_core_fw_fsm_block_download(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
994 u8 *data, u16 size, u32 offset)
995 {
996 struct mlxsw_core_fw_info *mlxsw_core_fw_info =
997 container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev);
998 struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core;
999 char mcda_pl[MLXSW_REG_MCDA_LEN];
1000
1001 mlxsw_reg_mcda_pack(mcda_pl, fwhandle, offset, size, data);
1002 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcda), mcda_pl);
1003 }
1004
mlxsw_core_fw_fsm_component_verify(struct mlxfw_dev * mlxfw_dev,u32 fwhandle,u16 component_index)1005 static int mlxsw_core_fw_fsm_component_verify(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
1006 u16 component_index)
1007 {
1008 struct mlxsw_core_fw_info *mlxsw_core_fw_info =
1009 container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev);
1010 struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core;
1011 char mcc_pl[MLXSW_REG_MCC_LEN];
1012
1013 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_VERIFY_COMPONENT,
1014 component_index, fwhandle, 0);
1015 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcc), mcc_pl);
1016 }
1017
mlxsw_core_fw_fsm_activate(struct mlxfw_dev * mlxfw_dev,u32 fwhandle)1018 static int mlxsw_core_fw_fsm_activate(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
1019 {
1020 struct mlxsw_core_fw_info *mlxsw_core_fw_info =
1021 container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev);
1022 struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core;
1023 char mcc_pl[MLXSW_REG_MCC_LEN];
1024
1025 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_ACTIVATE, 0, fwhandle, 0);
1026 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcc), mcc_pl);
1027 }
1028
mlxsw_core_fw_fsm_query_state(struct mlxfw_dev * mlxfw_dev,u32 fwhandle,enum mlxfw_fsm_state * fsm_state,enum mlxfw_fsm_state_err * fsm_state_err)1029 static int mlxsw_core_fw_fsm_query_state(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
1030 enum mlxfw_fsm_state *fsm_state,
1031 enum mlxfw_fsm_state_err *fsm_state_err)
1032 {
1033 struct mlxsw_core_fw_info *mlxsw_core_fw_info =
1034 container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev);
1035 struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core;
1036 char mcc_pl[MLXSW_REG_MCC_LEN];
1037 u8 control_state;
1038 u8 error_code;
1039 int err;
1040
1041 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, fwhandle, 0);
1042 err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mcc), mcc_pl);
1043 if (err)
1044 return err;
1045
1046 mlxsw_reg_mcc_unpack(mcc_pl, NULL, &error_code, &control_state);
1047 *fsm_state = control_state;
1048 *fsm_state_err = min_t(enum mlxfw_fsm_state_err, error_code, MLXFW_FSM_STATE_ERR_MAX);
1049 return 0;
1050 }
1051
mlxsw_core_fw_fsm_cancel(struct mlxfw_dev * mlxfw_dev,u32 fwhandle)1052 static void mlxsw_core_fw_fsm_cancel(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
1053 {
1054 struct mlxsw_core_fw_info *mlxsw_core_fw_info =
1055 container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev);
1056 struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core;
1057 char mcc_pl[MLXSW_REG_MCC_LEN];
1058
1059 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_CANCEL, 0, fwhandle, 0);
1060 mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcc), mcc_pl);
1061 }
1062
mlxsw_core_fw_fsm_release(struct mlxfw_dev * mlxfw_dev,u32 fwhandle)1063 static void mlxsw_core_fw_fsm_release(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
1064 {
1065 struct mlxsw_core_fw_info *mlxsw_core_fw_info =
1066 container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev);
1067 struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core;
1068 char mcc_pl[MLXSW_REG_MCC_LEN];
1069
1070 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE, 0, fwhandle, 0);
1071 mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcc), mcc_pl);
1072 }
1073
1074 static const struct mlxfw_dev_ops mlxsw_core_fw_mlxsw_dev_ops = {
1075 .component_query = mlxsw_core_fw_component_query,
1076 .fsm_lock = mlxsw_core_fw_fsm_lock,
1077 .fsm_component_update = mlxsw_core_fw_fsm_component_update,
1078 .fsm_block_download = mlxsw_core_fw_fsm_block_download,
1079 .fsm_component_verify = mlxsw_core_fw_fsm_component_verify,
1080 .fsm_activate = mlxsw_core_fw_fsm_activate,
1081 .fsm_query_state = mlxsw_core_fw_fsm_query_state,
1082 .fsm_cancel = mlxsw_core_fw_fsm_cancel,
1083 .fsm_release = mlxsw_core_fw_fsm_release,
1084 };
1085
mlxsw_core_fw_flash(struct mlxsw_core * mlxsw_core,const struct firmware * firmware,struct netlink_ext_ack * extack)1086 static int mlxsw_core_fw_flash(struct mlxsw_core *mlxsw_core, const struct firmware *firmware,
1087 struct netlink_ext_ack *extack)
1088 {
1089 struct mlxsw_core_fw_info mlxsw_core_fw_info = {
1090 .mlxfw_dev = {
1091 .ops = &mlxsw_core_fw_mlxsw_dev_ops,
1092 .psid = mlxsw_core->bus_info->psid,
1093 .psid_size = strlen(mlxsw_core->bus_info->psid),
1094 .devlink = priv_to_devlink(mlxsw_core),
1095 },
1096 .mlxsw_core = mlxsw_core
1097 };
1098 int err;
1099
1100 mlxsw_core->fw_flash_in_progress = true;
1101 err = mlxfw_firmware_flash(&mlxsw_core_fw_info.mlxfw_dev, firmware, extack);
1102 mlxsw_core->fw_flash_in_progress = false;
1103
1104 return err;
1105 }
1106
mlxsw_core_fw_rev_validate(struct mlxsw_core * mlxsw_core,const struct mlxsw_bus_info * mlxsw_bus_info,const struct mlxsw_fw_rev * req_rev,const char * filename)1107 static int mlxsw_core_fw_rev_validate(struct mlxsw_core *mlxsw_core,
1108 const struct mlxsw_bus_info *mlxsw_bus_info,
1109 const struct mlxsw_fw_rev *req_rev,
1110 const char *filename)
1111 {
1112 const struct mlxsw_fw_rev *rev = &mlxsw_bus_info->fw_rev;
1113 union devlink_param_value value;
1114 const struct firmware *firmware;
1115 int err;
1116
1117 /* Don't check if driver does not require it */
1118 if (!req_rev || !filename)
1119 return 0;
1120
1121 /* Don't check if devlink 'fw_load_policy' param is 'flash' */
1122 err = devlink_param_driverinit_value_get(priv_to_devlink(mlxsw_core),
1123 DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY,
1124 &value);
1125 if (err)
1126 return err;
1127 if (value.vu8 == DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH)
1128 return 0;
1129
1130 /* Validate driver & FW are compatible */
1131 if (rev->major != req_rev->major) {
1132 WARN(1, "Mismatch in major FW version [%d:%d] is never expected; Please contact support\n",
1133 rev->major, req_rev->major);
1134 return -EINVAL;
1135 }
1136 if (mlxsw_core_fw_rev_minor_subminor_validate(rev, req_rev))
1137 return 0;
1138
1139 dev_err(mlxsw_bus_info->dev, "The firmware version %d.%d.%d is incompatible with the driver (required >= %d.%d.%d)\n",
1140 rev->major, rev->minor, rev->subminor, req_rev->major,
1141 req_rev->minor, req_rev->subminor);
1142 dev_info(mlxsw_bus_info->dev, "Flashing firmware using file %s\n", filename);
1143
1144 err = request_firmware_direct(&firmware, filename, mlxsw_bus_info->dev);
1145 if (err) {
1146 dev_err(mlxsw_bus_info->dev, "Could not request firmware file %s\n", filename);
1147 return err;
1148 }
1149
1150 err = mlxsw_core_fw_flash(mlxsw_core, firmware, NULL);
1151 release_firmware(firmware);
1152 if (err)
1153 dev_err(mlxsw_bus_info->dev, "Could not upgrade firmware\n");
1154
1155 /* On FW flash success, tell the caller FW reset is needed
1156 * if current FW supports it.
1157 */
1158 if (rev->minor >= req_rev->can_reset_minor)
1159 return err ? err : -EAGAIN;
1160 else
1161 return 0;
1162 }
1163
mlxsw_core_fw_flash_update(struct mlxsw_core * mlxsw_core,struct devlink_flash_update_params * params,struct netlink_ext_ack * extack)1164 static int mlxsw_core_fw_flash_update(struct mlxsw_core *mlxsw_core,
1165 struct devlink_flash_update_params *params,
1166 struct netlink_ext_ack *extack)
1167 {
1168 return mlxsw_core_fw_flash(mlxsw_core, params->fw, extack);
1169 }
1170
mlxsw_core_devlink_param_fw_load_policy_validate(struct devlink * devlink,u32 id,union devlink_param_value val,struct netlink_ext_ack * extack)1171 static int mlxsw_core_devlink_param_fw_load_policy_validate(struct devlink *devlink, u32 id,
1172 union devlink_param_value val,
1173 struct netlink_ext_ack *extack)
1174 {
1175 if (val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER &&
1176 val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH) {
1177 NL_SET_ERR_MSG_MOD(extack, "'fw_load_policy' must be 'driver' or 'flash'");
1178 return -EINVAL;
1179 }
1180
1181 return 0;
1182 }
1183
1184 static const struct devlink_param mlxsw_core_fw_devlink_params[] = {
1185 DEVLINK_PARAM_GENERIC(FW_LOAD_POLICY, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), NULL, NULL,
1186 mlxsw_core_devlink_param_fw_load_policy_validate),
1187 };
1188
mlxsw_core_fw_params_register(struct mlxsw_core * mlxsw_core)1189 static int mlxsw_core_fw_params_register(struct mlxsw_core *mlxsw_core)
1190 {
1191 struct devlink *devlink = priv_to_devlink(mlxsw_core);
1192 union devlink_param_value value;
1193 int err;
1194
1195 err = devlink_params_register(devlink, mlxsw_core_fw_devlink_params,
1196 ARRAY_SIZE(mlxsw_core_fw_devlink_params));
1197 if (err)
1198 return err;
1199
1200 value.vu8 = DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER;
1201 devlink_param_driverinit_value_set(devlink, DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY, value);
1202 return 0;
1203 }
1204
mlxsw_core_fw_params_unregister(struct mlxsw_core * mlxsw_core)1205 static void mlxsw_core_fw_params_unregister(struct mlxsw_core *mlxsw_core)
1206 {
1207 devlink_params_unregister(priv_to_devlink(mlxsw_core), mlxsw_core_fw_devlink_params,
1208 ARRAY_SIZE(mlxsw_core_fw_devlink_params));
1209 }
1210
mlxsw_devlink_port_split(struct devlink * devlink,unsigned int port_index,unsigned int count,struct netlink_ext_ack * extack)1211 static int mlxsw_devlink_port_split(struct devlink *devlink,
1212 unsigned int port_index,
1213 unsigned int count,
1214 struct netlink_ext_ack *extack)
1215 {
1216 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1217
1218 if (port_index >= mlxsw_core->max_ports) {
1219 NL_SET_ERR_MSG_MOD(extack, "Port index exceeds maximum number of ports");
1220 return -EINVAL;
1221 }
1222 if (!mlxsw_core->driver->port_split)
1223 return -EOPNOTSUPP;
1224 return mlxsw_core->driver->port_split(mlxsw_core, port_index, count,
1225 extack);
1226 }
1227
mlxsw_devlink_port_unsplit(struct devlink * devlink,unsigned int port_index,struct netlink_ext_ack * extack)1228 static int mlxsw_devlink_port_unsplit(struct devlink *devlink,
1229 unsigned int port_index,
1230 struct netlink_ext_ack *extack)
1231 {
1232 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1233
1234 if (port_index >= mlxsw_core->max_ports) {
1235 NL_SET_ERR_MSG_MOD(extack, "Port index exceeds maximum number of ports");
1236 return -EINVAL;
1237 }
1238 if (!mlxsw_core->driver->port_unsplit)
1239 return -EOPNOTSUPP;
1240 return mlxsw_core->driver->port_unsplit(mlxsw_core, port_index,
1241 extack);
1242 }
1243
1244 static int
mlxsw_devlink_sb_pool_get(struct devlink * devlink,unsigned int sb_index,u16 pool_index,struct devlink_sb_pool_info * pool_info)1245 mlxsw_devlink_sb_pool_get(struct devlink *devlink,
1246 unsigned int sb_index, u16 pool_index,
1247 struct devlink_sb_pool_info *pool_info)
1248 {
1249 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1250 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1251
1252 if (!mlxsw_driver->sb_pool_get)
1253 return -EOPNOTSUPP;
1254 return mlxsw_driver->sb_pool_get(mlxsw_core, sb_index,
1255 pool_index, pool_info);
1256 }
1257
1258 static int
mlxsw_devlink_sb_pool_set(struct devlink * devlink,unsigned int sb_index,u16 pool_index,u32 size,enum devlink_sb_threshold_type threshold_type,struct netlink_ext_ack * extack)1259 mlxsw_devlink_sb_pool_set(struct devlink *devlink,
1260 unsigned int sb_index, u16 pool_index, u32 size,
1261 enum devlink_sb_threshold_type threshold_type,
1262 struct netlink_ext_ack *extack)
1263 {
1264 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1265 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1266
1267 if (!mlxsw_driver->sb_pool_set)
1268 return -EOPNOTSUPP;
1269 return mlxsw_driver->sb_pool_set(mlxsw_core, sb_index,
1270 pool_index, size, threshold_type,
1271 extack);
1272 }
1273
__dl_port(struct devlink_port * devlink_port)1274 static void *__dl_port(struct devlink_port *devlink_port)
1275 {
1276 return container_of(devlink_port, struct mlxsw_core_port, devlink_port);
1277 }
1278
mlxsw_devlink_port_type_set(struct devlink_port * devlink_port,enum devlink_port_type port_type)1279 static int mlxsw_devlink_port_type_set(struct devlink_port *devlink_port,
1280 enum devlink_port_type port_type)
1281 {
1282 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
1283 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1284 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
1285
1286 if (!mlxsw_driver->port_type_set)
1287 return -EOPNOTSUPP;
1288
1289 return mlxsw_driver->port_type_set(mlxsw_core,
1290 mlxsw_core_port->local_port,
1291 port_type);
1292 }
1293
mlxsw_devlink_sb_port_pool_get(struct devlink_port * devlink_port,unsigned int sb_index,u16 pool_index,u32 * p_threshold)1294 static int mlxsw_devlink_sb_port_pool_get(struct devlink_port *devlink_port,
1295 unsigned int sb_index, u16 pool_index,
1296 u32 *p_threshold)
1297 {
1298 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
1299 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1300 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
1301
1302 if (!mlxsw_driver->sb_port_pool_get ||
1303 !mlxsw_core_port_check(mlxsw_core_port))
1304 return -EOPNOTSUPP;
1305 return mlxsw_driver->sb_port_pool_get(mlxsw_core_port, sb_index,
1306 pool_index, p_threshold);
1307 }
1308
mlxsw_devlink_sb_port_pool_set(struct devlink_port * devlink_port,unsigned int sb_index,u16 pool_index,u32 threshold,struct netlink_ext_ack * extack)1309 static int mlxsw_devlink_sb_port_pool_set(struct devlink_port *devlink_port,
1310 unsigned int sb_index, u16 pool_index,
1311 u32 threshold,
1312 struct netlink_ext_ack *extack)
1313 {
1314 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
1315 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1316 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
1317
1318 if (!mlxsw_driver->sb_port_pool_set ||
1319 !mlxsw_core_port_check(mlxsw_core_port))
1320 return -EOPNOTSUPP;
1321 return mlxsw_driver->sb_port_pool_set(mlxsw_core_port, sb_index,
1322 pool_index, threshold, extack);
1323 }
1324
1325 static int
mlxsw_devlink_sb_tc_pool_bind_get(struct devlink_port * devlink_port,unsigned int sb_index,u16 tc_index,enum devlink_sb_pool_type pool_type,u16 * p_pool_index,u32 * p_threshold)1326 mlxsw_devlink_sb_tc_pool_bind_get(struct devlink_port *devlink_port,
1327 unsigned int sb_index, u16 tc_index,
1328 enum devlink_sb_pool_type pool_type,
1329 u16 *p_pool_index, u32 *p_threshold)
1330 {
1331 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
1332 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1333 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
1334
1335 if (!mlxsw_driver->sb_tc_pool_bind_get ||
1336 !mlxsw_core_port_check(mlxsw_core_port))
1337 return -EOPNOTSUPP;
1338 return mlxsw_driver->sb_tc_pool_bind_get(mlxsw_core_port, sb_index,
1339 tc_index, pool_type,
1340 p_pool_index, p_threshold);
1341 }
1342
1343 static int
mlxsw_devlink_sb_tc_pool_bind_set(struct devlink_port * devlink_port,unsigned int sb_index,u16 tc_index,enum devlink_sb_pool_type pool_type,u16 pool_index,u32 threshold,struct netlink_ext_ack * extack)1344 mlxsw_devlink_sb_tc_pool_bind_set(struct devlink_port *devlink_port,
1345 unsigned int sb_index, u16 tc_index,
1346 enum devlink_sb_pool_type pool_type,
1347 u16 pool_index, u32 threshold,
1348 struct netlink_ext_ack *extack)
1349 {
1350 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
1351 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1352 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
1353
1354 if (!mlxsw_driver->sb_tc_pool_bind_set ||
1355 !mlxsw_core_port_check(mlxsw_core_port))
1356 return -EOPNOTSUPP;
1357 return mlxsw_driver->sb_tc_pool_bind_set(mlxsw_core_port, sb_index,
1358 tc_index, pool_type,
1359 pool_index, threshold, extack);
1360 }
1361
mlxsw_devlink_sb_occ_snapshot(struct devlink * devlink,unsigned int sb_index)1362 static int mlxsw_devlink_sb_occ_snapshot(struct devlink *devlink,
1363 unsigned int sb_index)
1364 {
1365 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1366 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1367
1368 if (!mlxsw_driver->sb_occ_snapshot)
1369 return -EOPNOTSUPP;
1370 return mlxsw_driver->sb_occ_snapshot(mlxsw_core, sb_index);
1371 }
1372
mlxsw_devlink_sb_occ_max_clear(struct devlink * devlink,unsigned int sb_index)1373 static int mlxsw_devlink_sb_occ_max_clear(struct devlink *devlink,
1374 unsigned int sb_index)
1375 {
1376 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1377 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1378
1379 if (!mlxsw_driver->sb_occ_max_clear)
1380 return -EOPNOTSUPP;
1381 return mlxsw_driver->sb_occ_max_clear(mlxsw_core, sb_index);
1382 }
1383
1384 static int
mlxsw_devlink_sb_occ_port_pool_get(struct devlink_port * devlink_port,unsigned int sb_index,u16 pool_index,u32 * p_cur,u32 * p_max)1385 mlxsw_devlink_sb_occ_port_pool_get(struct devlink_port *devlink_port,
1386 unsigned int sb_index, u16 pool_index,
1387 u32 *p_cur, u32 *p_max)
1388 {
1389 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
1390 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1391 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
1392
1393 if (!mlxsw_driver->sb_occ_port_pool_get ||
1394 !mlxsw_core_port_check(mlxsw_core_port))
1395 return -EOPNOTSUPP;
1396 return mlxsw_driver->sb_occ_port_pool_get(mlxsw_core_port, sb_index,
1397 pool_index, p_cur, p_max);
1398 }
1399
1400 static int
mlxsw_devlink_sb_occ_tc_port_bind_get(struct devlink_port * devlink_port,unsigned int sb_index,u16 tc_index,enum devlink_sb_pool_type pool_type,u32 * p_cur,u32 * p_max)1401 mlxsw_devlink_sb_occ_tc_port_bind_get(struct devlink_port *devlink_port,
1402 unsigned int sb_index, u16 tc_index,
1403 enum devlink_sb_pool_type pool_type,
1404 u32 *p_cur, u32 *p_max)
1405 {
1406 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
1407 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1408 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
1409
1410 if (!mlxsw_driver->sb_occ_tc_port_bind_get ||
1411 !mlxsw_core_port_check(mlxsw_core_port))
1412 return -EOPNOTSUPP;
1413 return mlxsw_driver->sb_occ_tc_port_bind_get(mlxsw_core_port,
1414 sb_index, tc_index,
1415 pool_type, p_cur, p_max);
1416 }
1417
1418 static int
mlxsw_devlink_info_get(struct devlink * devlink,struct devlink_info_req * req,struct netlink_ext_ack * extack)1419 mlxsw_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req,
1420 struct netlink_ext_ack *extack)
1421 {
1422 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1423 char fw_info_psid[MLXSW_REG_MGIR_FW_INFO_PSID_SIZE];
1424 u32 hw_rev, fw_major, fw_minor, fw_sub_minor;
1425 char mgir_pl[MLXSW_REG_MGIR_LEN];
1426 char buf[32];
1427 int err;
1428
1429 err = devlink_info_driver_name_put(req,
1430 mlxsw_core->bus_info->device_kind);
1431 if (err)
1432 return err;
1433
1434 mlxsw_reg_mgir_pack(mgir_pl);
1435 err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mgir), mgir_pl);
1436 if (err)
1437 return err;
1438 mlxsw_reg_mgir_unpack(mgir_pl, &hw_rev, fw_info_psid, &fw_major,
1439 &fw_minor, &fw_sub_minor);
1440
1441 sprintf(buf, "%X", hw_rev);
1442 err = devlink_info_version_fixed_put(req, "hw.revision", buf);
1443 if (err)
1444 return err;
1445
1446 err = devlink_info_version_fixed_put(req,
1447 DEVLINK_INFO_VERSION_GENERIC_FW_PSID,
1448 fw_info_psid);
1449 if (err)
1450 return err;
1451
1452 sprintf(buf, "%d.%d.%d", fw_major, fw_minor, fw_sub_minor);
1453 err = devlink_info_version_running_put(req, "fw.version", buf);
1454 if (err)
1455 return err;
1456
1457 return devlink_info_version_running_put(req,
1458 DEVLINK_INFO_VERSION_GENERIC_FW,
1459 buf);
1460 }
1461
1462 static int
mlxsw_devlink_core_bus_device_reload_down(struct devlink * devlink,bool netns_change,enum devlink_reload_action action,enum devlink_reload_limit limit,struct netlink_ext_ack * extack)1463 mlxsw_devlink_core_bus_device_reload_down(struct devlink *devlink,
1464 bool netns_change, enum devlink_reload_action action,
1465 enum devlink_reload_limit limit,
1466 struct netlink_ext_ack *extack)
1467 {
1468 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1469
1470 if (!(mlxsw_core->bus->features & MLXSW_BUS_F_RESET))
1471 return -EOPNOTSUPP;
1472
1473 mlxsw_core_bus_device_unregister(mlxsw_core, true);
1474 return 0;
1475 }
1476
1477 static int
mlxsw_devlink_core_bus_device_reload_up(struct devlink * devlink,enum devlink_reload_action action,enum devlink_reload_limit limit,u32 * actions_performed,struct netlink_ext_ack * extack)1478 mlxsw_devlink_core_bus_device_reload_up(struct devlink *devlink, enum devlink_reload_action action,
1479 enum devlink_reload_limit limit, u32 *actions_performed,
1480 struct netlink_ext_ack *extack)
1481 {
1482 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1483
1484 *actions_performed = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) |
1485 BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE);
1486 return mlxsw_core_bus_device_register(mlxsw_core->bus_info,
1487 mlxsw_core->bus,
1488 mlxsw_core->bus_priv, true,
1489 devlink, extack);
1490 }
1491
mlxsw_devlink_flash_update(struct devlink * devlink,struct devlink_flash_update_params * params,struct netlink_ext_ack * extack)1492 static int mlxsw_devlink_flash_update(struct devlink *devlink,
1493 struct devlink_flash_update_params *params,
1494 struct netlink_ext_ack *extack)
1495 {
1496 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1497
1498 return mlxsw_core_fw_flash_update(mlxsw_core, params, extack);
1499 }
1500
mlxsw_devlink_trap_init(struct devlink * devlink,const struct devlink_trap * trap,void * trap_ctx)1501 static int mlxsw_devlink_trap_init(struct devlink *devlink,
1502 const struct devlink_trap *trap,
1503 void *trap_ctx)
1504 {
1505 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1506 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1507
1508 if (!mlxsw_driver->trap_init)
1509 return -EOPNOTSUPP;
1510 return mlxsw_driver->trap_init(mlxsw_core, trap, trap_ctx);
1511 }
1512
mlxsw_devlink_trap_fini(struct devlink * devlink,const struct devlink_trap * trap,void * trap_ctx)1513 static void mlxsw_devlink_trap_fini(struct devlink *devlink,
1514 const struct devlink_trap *trap,
1515 void *trap_ctx)
1516 {
1517 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1518 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1519
1520 if (!mlxsw_driver->trap_fini)
1521 return;
1522 mlxsw_driver->trap_fini(mlxsw_core, trap, trap_ctx);
1523 }
1524
mlxsw_devlink_trap_action_set(struct devlink * devlink,const struct devlink_trap * trap,enum devlink_trap_action action,struct netlink_ext_ack * extack)1525 static int mlxsw_devlink_trap_action_set(struct devlink *devlink,
1526 const struct devlink_trap *trap,
1527 enum devlink_trap_action action,
1528 struct netlink_ext_ack *extack)
1529 {
1530 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1531 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1532
1533 if (!mlxsw_driver->trap_action_set)
1534 return -EOPNOTSUPP;
1535 return mlxsw_driver->trap_action_set(mlxsw_core, trap, action, extack);
1536 }
1537
1538 static int
mlxsw_devlink_trap_group_init(struct devlink * devlink,const struct devlink_trap_group * group)1539 mlxsw_devlink_trap_group_init(struct devlink *devlink,
1540 const struct devlink_trap_group *group)
1541 {
1542 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1543 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1544
1545 if (!mlxsw_driver->trap_group_init)
1546 return -EOPNOTSUPP;
1547 return mlxsw_driver->trap_group_init(mlxsw_core, group);
1548 }
1549
1550 static int
mlxsw_devlink_trap_group_set(struct devlink * devlink,const struct devlink_trap_group * group,const struct devlink_trap_policer * policer,struct netlink_ext_ack * extack)1551 mlxsw_devlink_trap_group_set(struct devlink *devlink,
1552 const struct devlink_trap_group *group,
1553 const struct devlink_trap_policer *policer,
1554 struct netlink_ext_ack *extack)
1555 {
1556 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1557 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1558
1559 if (!mlxsw_driver->trap_group_set)
1560 return -EOPNOTSUPP;
1561 return mlxsw_driver->trap_group_set(mlxsw_core, group, policer, extack);
1562 }
1563
1564 static int
mlxsw_devlink_trap_policer_init(struct devlink * devlink,const struct devlink_trap_policer * policer)1565 mlxsw_devlink_trap_policer_init(struct devlink *devlink,
1566 const struct devlink_trap_policer *policer)
1567 {
1568 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1569 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1570
1571 if (!mlxsw_driver->trap_policer_init)
1572 return -EOPNOTSUPP;
1573 return mlxsw_driver->trap_policer_init(mlxsw_core, policer);
1574 }
1575
1576 static void
mlxsw_devlink_trap_policer_fini(struct devlink * devlink,const struct devlink_trap_policer * policer)1577 mlxsw_devlink_trap_policer_fini(struct devlink *devlink,
1578 const struct devlink_trap_policer *policer)
1579 {
1580 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1581 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1582
1583 if (!mlxsw_driver->trap_policer_fini)
1584 return;
1585 mlxsw_driver->trap_policer_fini(mlxsw_core, policer);
1586 }
1587
1588 static int
mlxsw_devlink_trap_policer_set(struct devlink * devlink,const struct devlink_trap_policer * policer,u64 rate,u64 burst,struct netlink_ext_ack * extack)1589 mlxsw_devlink_trap_policer_set(struct devlink *devlink,
1590 const struct devlink_trap_policer *policer,
1591 u64 rate, u64 burst,
1592 struct netlink_ext_ack *extack)
1593 {
1594 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1595 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1596
1597 if (!mlxsw_driver->trap_policer_set)
1598 return -EOPNOTSUPP;
1599 return mlxsw_driver->trap_policer_set(mlxsw_core, policer, rate, burst,
1600 extack);
1601 }
1602
1603 static int
mlxsw_devlink_trap_policer_counter_get(struct devlink * devlink,const struct devlink_trap_policer * policer,u64 * p_drops)1604 mlxsw_devlink_trap_policer_counter_get(struct devlink *devlink,
1605 const struct devlink_trap_policer *policer,
1606 u64 *p_drops)
1607 {
1608 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1609 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1610
1611 if (!mlxsw_driver->trap_policer_counter_get)
1612 return -EOPNOTSUPP;
1613 return mlxsw_driver->trap_policer_counter_get(mlxsw_core, policer,
1614 p_drops);
1615 }
1616
1617 static const struct devlink_ops mlxsw_devlink_ops = {
1618 .reload_actions = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) |
1619 BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE),
1620 .reload_down = mlxsw_devlink_core_bus_device_reload_down,
1621 .reload_up = mlxsw_devlink_core_bus_device_reload_up,
1622 .port_type_set = mlxsw_devlink_port_type_set,
1623 .port_split = mlxsw_devlink_port_split,
1624 .port_unsplit = mlxsw_devlink_port_unsplit,
1625 .sb_pool_get = mlxsw_devlink_sb_pool_get,
1626 .sb_pool_set = mlxsw_devlink_sb_pool_set,
1627 .sb_port_pool_get = mlxsw_devlink_sb_port_pool_get,
1628 .sb_port_pool_set = mlxsw_devlink_sb_port_pool_set,
1629 .sb_tc_pool_bind_get = mlxsw_devlink_sb_tc_pool_bind_get,
1630 .sb_tc_pool_bind_set = mlxsw_devlink_sb_tc_pool_bind_set,
1631 .sb_occ_snapshot = mlxsw_devlink_sb_occ_snapshot,
1632 .sb_occ_max_clear = mlxsw_devlink_sb_occ_max_clear,
1633 .sb_occ_port_pool_get = mlxsw_devlink_sb_occ_port_pool_get,
1634 .sb_occ_tc_port_bind_get = mlxsw_devlink_sb_occ_tc_port_bind_get,
1635 .info_get = mlxsw_devlink_info_get,
1636 .flash_update = mlxsw_devlink_flash_update,
1637 .trap_init = mlxsw_devlink_trap_init,
1638 .trap_fini = mlxsw_devlink_trap_fini,
1639 .trap_action_set = mlxsw_devlink_trap_action_set,
1640 .trap_group_init = mlxsw_devlink_trap_group_init,
1641 .trap_group_set = mlxsw_devlink_trap_group_set,
1642 .trap_policer_init = mlxsw_devlink_trap_policer_init,
1643 .trap_policer_fini = mlxsw_devlink_trap_policer_fini,
1644 .trap_policer_set = mlxsw_devlink_trap_policer_set,
1645 .trap_policer_counter_get = mlxsw_devlink_trap_policer_counter_get,
1646 };
1647
mlxsw_core_params_register(struct mlxsw_core * mlxsw_core)1648 static int mlxsw_core_params_register(struct mlxsw_core *mlxsw_core)
1649 {
1650 int err;
1651
1652 err = mlxsw_core_fw_params_register(mlxsw_core);
1653 if (err)
1654 return err;
1655
1656 if (mlxsw_core->driver->params_register) {
1657 err = mlxsw_core->driver->params_register(mlxsw_core);
1658 if (err)
1659 goto err_params_register;
1660 }
1661 return 0;
1662
1663 err_params_register:
1664 mlxsw_core_fw_params_unregister(mlxsw_core);
1665 return err;
1666 }
1667
mlxsw_core_params_unregister(struct mlxsw_core * mlxsw_core)1668 static void mlxsw_core_params_unregister(struct mlxsw_core *mlxsw_core)
1669 {
1670 mlxsw_core_fw_params_unregister(mlxsw_core);
1671 if (mlxsw_core->driver->params_register)
1672 mlxsw_core->driver->params_unregister(mlxsw_core);
1673 }
1674
1675 struct mlxsw_core_health_event {
1676 struct mlxsw_core *mlxsw_core;
1677 char mfde_pl[MLXSW_REG_MFDE_LEN];
1678 struct work_struct work;
1679 };
1680
mlxsw_core_health_event_work(struct work_struct * work)1681 static void mlxsw_core_health_event_work(struct work_struct *work)
1682 {
1683 struct mlxsw_core_health_event *event;
1684 struct mlxsw_core *mlxsw_core;
1685
1686 event = container_of(work, struct mlxsw_core_health_event, work);
1687 mlxsw_core = event->mlxsw_core;
1688 devlink_health_report(mlxsw_core->health.fw_fatal, "FW fatal event occurred",
1689 event->mfde_pl);
1690 kfree(event);
1691 }
1692
mlxsw_core_health_listener_func(const struct mlxsw_reg_info * reg,char * mfde_pl,void * priv)1693 static void mlxsw_core_health_listener_func(const struct mlxsw_reg_info *reg,
1694 char *mfde_pl, void *priv)
1695 {
1696 struct mlxsw_core_health_event *event;
1697 struct mlxsw_core *mlxsw_core = priv;
1698
1699 event = kmalloc(sizeof(*event), GFP_ATOMIC);
1700 if (!event)
1701 return;
1702 event->mlxsw_core = mlxsw_core;
1703 memcpy(event->mfde_pl, mfde_pl, sizeof(event->mfde_pl));
1704 INIT_WORK(&event->work, mlxsw_core_health_event_work);
1705 mlxsw_core_schedule_work(&event->work);
1706 }
1707
1708 static const struct mlxsw_listener mlxsw_core_health_listener =
1709 MLXSW_EVENTL(mlxsw_core_health_listener_func, MFDE, MFDE);
1710
mlxsw_core_health_fw_fatal_dump(struct devlink_health_reporter * reporter,struct devlink_fmsg * fmsg,void * priv_ctx,struct netlink_ext_ack * extack)1711 static int mlxsw_core_health_fw_fatal_dump(struct devlink_health_reporter *reporter,
1712 struct devlink_fmsg *fmsg, void *priv_ctx,
1713 struct netlink_ext_ack *extack)
1714 {
1715 char *mfde_pl = priv_ctx;
1716 char *val_str;
1717 u8 event_id;
1718 u32 val;
1719 int err;
1720
1721 if (!priv_ctx)
1722 /* User-triggered dumps are not possible */
1723 return -EOPNOTSUPP;
1724
1725 val = mlxsw_reg_mfde_irisc_id_get(mfde_pl);
1726 err = devlink_fmsg_u8_pair_put(fmsg, "irisc_id", val);
1727 if (err)
1728 return err;
1729 err = devlink_fmsg_arr_pair_nest_start(fmsg, "event");
1730 if (err)
1731 return err;
1732
1733 event_id = mlxsw_reg_mfde_event_id_get(mfde_pl);
1734 err = devlink_fmsg_u32_pair_put(fmsg, "id", event_id);
1735 if (err)
1736 return err;
1737 switch (event_id) {
1738 case MLXSW_REG_MFDE_EVENT_ID_CRSPACE_TO:
1739 val_str = "CR space timeout";
1740 break;
1741 case MLXSW_REG_MFDE_EVENT_ID_KVD_IM_STOP:
1742 val_str = "KVD insertion machine stopped";
1743 break;
1744 default:
1745 val_str = NULL;
1746 }
1747 if (val_str) {
1748 err = devlink_fmsg_string_pair_put(fmsg, "desc", val_str);
1749 if (err)
1750 return err;
1751 }
1752 err = devlink_fmsg_arr_pair_nest_end(fmsg);
1753 if (err)
1754 return err;
1755
1756 val = mlxsw_reg_mfde_method_get(mfde_pl);
1757 switch (val) {
1758 case MLXSW_REG_MFDE_METHOD_QUERY:
1759 val_str = "query";
1760 break;
1761 case MLXSW_REG_MFDE_METHOD_WRITE:
1762 val_str = "write";
1763 break;
1764 default:
1765 val_str = NULL;
1766 }
1767 if (val_str) {
1768 err = devlink_fmsg_string_pair_put(fmsg, "method", val_str);
1769 if (err)
1770 return err;
1771 }
1772
1773 val = mlxsw_reg_mfde_long_process_get(mfde_pl);
1774 err = devlink_fmsg_bool_pair_put(fmsg, "long_process", val);
1775 if (err)
1776 return err;
1777
1778 val = mlxsw_reg_mfde_command_type_get(mfde_pl);
1779 switch (val) {
1780 case MLXSW_REG_MFDE_COMMAND_TYPE_MAD:
1781 val_str = "mad";
1782 break;
1783 case MLXSW_REG_MFDE_COMMAND_TYPE_EMAD:
1784 val_str = "emad";
1785 break;
1786 case MLXSW_REG_MFDE_COMMAND_TYPE_CMDIF:
1787 val_str = "cmdif";
1788 break;
1789 default:
1790 val_str = NULL;
1791 }
1792 if (val_str) {
1793 err = devlink_fmsg_string_pair_put(fmsg, "command_type", val_str);
1794 if (err)
1795 return err;
1796 }
1797
1798 val = mlxsw_reg_mfde_reg_attr_id_get(mfde_pl);
1799 err = devlink_fmsg_u32_pair_put(fmsg, "reg_attr_id", val);
1800 if (err)
1801 return err;
1802
1803 if (event_id == MLXSW_REG_MFDE_EVENT_ID_CRSPACE_TO) {
1804 val = mlxsw_reg_mfde_log_address_get(mfde_pl);
1805 err = devlink_fmsg_u32_pair_put(fmsg, "log_address", val);
1806 if (err)
1807 return err;
1808 val = mlxsw_reg_mfde_log_id_get(mfde_pl);
1809 err = devlink_fmsg_u8_pair_put(fmsg, "log_irisc_id", val);
1810 if (err)
1811 return err;
1812 val = mlxsw_reg_mfde_log_ip_get(mfde_pl);
1813 err = devlink_fmsg_u64_pair_put(fmsg, "log_ip", val);
1814 if (err)
1815 return err;
1816 } else if (event_id == MLXSW_REG_MFDE_EVENT_ID_KVD_IM_STOP) {
1817 val = mlxsw_reg_mfde_pipes_mask_get(mfde_pl);
1818 err = devlink_fmsg_u32_pair_put(fmsg, "pipes_mask", val);
1819 if (err)
1820 return err;
1821 }
1822
1823 return 0;
1824 }
1825
1826 static int
mlxsw_core_health_fw_fatal_test(struct devlink_health_reporter * reporter,struct netlink_ext_ack * extack)1827 mlxsw_core_health_fw_fatal_test(struct devlink_health_reporter *reporter,
1828 struct netlink_ext_ack *extack)
1829 {
1830 struct mlxsw_core *mlxsw_core = devlink_health_reporter_priv(reporter);
1831 char mfgd_pl[MLXSW_REG_MFGD_LEN];
1832 int err;
1833
1834 /* Read the register first to make sure no other bits are changed. */
1835 err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mfgd), mfgd_pl);
1836 if (err)
1837 return err;
1838 mlxsw_reg_mfgd_trigger_test_set(mfgd_pl, true);
1839 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mfgd), mfgd_pl);
1840 }
1841
1842 static const struct devlink_health_reporter_ops
1843 mlxsw_core_health_fw_fatal_ops = {
1844 .name = "fw_fatal",
1845 .dump = mlxsw_core_health_fw_fatal_dump,
1846 .test = mlxsw_core_health_fw_fatal_test,
1847 };
1848
mlxsw_core_health_fw_fatal_config(struct mlxsw_core * mlxsw_core,bool enable)1849 static int mlxsw_core_health_fw_fatal_config(struct mlxsw_core *mlxsw_core,
1850 bool enable)
1851 {
1852 char mfgd_pl[MLXSW_REG_MFGD_LEN];
1853 int err;
1854
1855 /* Read the register first to make sure no other bits are changed. */
1856 err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mfgd), mfgd_pl);
1857 if (err)
1858 return err;
1859 mlxsw_reg_mfgd_fatal_event_mode_set(mfgd_pl, enable);
1860 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mfgd), mfgd_pl);
1861 }
1862
mlxsw_core_health_init(struct mlxsw_core * mlxsw_core)1863 static int mlxsw_core_health_init(struct mlxsw_core *mlxsw_core)
1864 {
1865 struct devlink *devlink = priv_to_devlink(mlxsw_core);
1866 struct devlink_health_reporter *fw_fatal;
1867 int err;
1868
1869 if (!mlxsw_core->driver->fw_fatal_enabled)
1870 return 0;
1871
1872 fw_fatal = devlink_health_reporter_create(devlink, &mlxsw_core_health_fw_fatal_ops,
1873 0, mlxsw_core);
1874 if (IS_ERR(fw_fatal)) {
1875 dev_err(mlxsw_core->bus_info->dev, "Failed to create fw fatal reporter");
1876 return PTR_ERR(fw_fatal);
1877 }
1878 mlxsw_core->health.fw_fatal = fw_fatal;
1879
1880 err = mlxsw_core_trap_register(mlxsw_core, &mlxsw_core_health_listener, mlxsw_core);
1881 if (err)
1882 goto err_trap_register;
1883
1884 err = mlxsw_core_health_fw_fatal_config(mlxsw_core, true);
1885 if (err)
1886 goto err_fw_fatal_config;
1887
1888 return 0;
1889
1890 err_fw_fatal_config:
1891 mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_core_health_listener, mlxsw_core);
1892 err_trap_register:
1893 devlink_health_reporter_destroy(mlxsw_core->health.fw_fatal);
1894 return err;
1895 }
1896
mlxsw_core_health_fini(struct mlxsw_core * mlxsw_core)1897 static void mlxsw_core_health_fini(struct mlxsw_core *mlxsw_core)
1898 {
1899 if (!mlxsw_core->driver->fw_fatal_enabled)
1900 return;
1901
1902 mlxsw_core_health_fw_fatal_config(mlxsw_core, false);
1903 mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_core_health_listener, mlxsw_core);
1904 /* Make sure there is no more event work scheduled */
1905 mlxsw_core_flush_owq();
1906 devlink_health_reporter_destroy(mlxsw_core->health.fw_fatal);
1907 }
1908
1909 static int
__mlxsw_core_bus_device_register(const struct mlxsw_bus_info * mlxsw_bus_info,const struct mlxsw_bus * mlxsw_bus,void * bus_priv,bool reload,struct devlink * devlink,struct netlink_ext_ack * extack)1910 __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
1911 const struct mlxsw_bus *mlxsw_bus,
1912 void *bus_priv, bool reload,
1913 struct devlink *devlink,
1914 struct netlink_ext_ack *extack)
1915 {
1916 const char *device_kind = mlxsw_bus_info->device_kind;
1917 struct mlxsw_core *mlxsw_core;
1918 struct mlxsw_driver *mlxsw_driver;
1919 struct mlxsw_res *res;
1920 size_t alloc_size;
1921 int err;
1922
1923 mlxsw_driver = mlxsw_core_driver_get(device_kind);
1924 if (!mlxsw_driver)
1925 return -EINVAL;
1926
1927 if (!reload) {
1928 alloc_size = sizeof(*mlxsw_core) + mlxsw_driver->priv_size;
1929 devlink = devlink_alloc(&mlxsw_devlink_ops, alloc_size,
1930 mlxsw_bus_info->dev);
1931 if (!devlink) {
1932 err = -ENOMEM;
1933 goto err_devlink_alloc;
1934 }
1935 }
1936
1937 mlxsw_core = devlink_priv(devlink);
1938 INIT_LIST_HEAD(&mlxsw_core->rx_listener_list);
1939 INIT_LIST_HEAD(&mlxsw_core->event_listener_list);
1940 mlxsw_core->driver = mlxsw_driver;
1941 mlxsw_core->bus = mlxsw_bus;
1942 mlxsw_core->bus_priv = bus_priv;
1943 mlxsw_core->bus_info = mlxsw_bus_info;
1944
1945 res = mlxsw_driver->res_query_enabled ? &mlxsw_core->res : NULL;
1946 err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile, res);
1947 if (err)
1948 goto err_bus_init;
1949
1950 if (mlxsw_driver->resources_register && !reload) {
1951 err = mlxsw_driver->resources_register(mlxsw_core);
1952 if (err)
1953 goto err_register_resources;
1954 }
1955
1956 err = mlxsw_ports_init(mlxsw_core, reload);
1957 if (err)
1958 goto err_ports_init;
1959
1960 if (MLXSW_CORE_RES_VALID(mlxsw_core, MAX_LAG) &&
1961 MLXSW_CORE_RES_VALID(mlxsw_core, MAX_LAG_MEMBERS)) {
1962 alloc_size = sizeof(u8) *
1963 MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG) *
1964 MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG_MEMBERS);
1965 mlxsw_core->lag.mapping = kzalloc(alloc_size, GFP_KERNEL);
1966 if (!mlxsw_core->lag.mapping) {
1967 err = -ENOMEM;
1968 goto err_alloc_lag_mapping;
1969 }
1970 }
1971
1972 err = mlxsw_emad_init(mlxsw_core);
1973 if (err)
1974 goto err_emad_init;
1975
1976 if (!reload) {
1977 err = mlxsw_core_params_register(mlxsw_core);
1978 if (err)
1979 goto err_register_params;
1980 }
1981
1982 err = mlxsw_core_fw_rev_validate(mlxsw_core, mlxsw_bus_info, mlxsw_driver->fw_req_rev,
1983 mlxsw_driver->fw_filename);
1984 if (err)
1985 goto err_fw_rev_validate;
1986
1987 err = mlxsw_core_health_init(mlxsw_core);
1988 if (err)
1989 goto err_health_init;
1990
1991 err = mlxsw_hwmon_init(mlxsw_core, mlxsw_bus_info, &mlxsw_core->hwmon);
1992 if (err)
1993 goto err_hwmon_init;
1994
1995 err = mlxsw_thermal_init(mlxsw_core, mlxsw_bus_info,
1996 &mlxsw_core->thermal);
1997 if (err)
1998 goto err_thermal_init;
1999
2000 err = mlxsw_env_init(mlxsw_core, &mlxsw_core->env);
2001 if (err)
2002 goto err_env_init;
2003
2004 if (mlxsw_driver->init) {
2005 err = mlxsw_driver->init(mlxsw_core, mlxsw_bus_info, extack);
2006 if (err)
2007 goto err_driver_init;
2008 }
2009
2010 if (!reload) {
2011 devlink_set_features(devlink, DEVLINK_F_RELOAD);
2012 devlink_register(devlink);
2013 }
2014 return 0;
2015
2016 err_driver_init:
2017 mlxsw_env_fini(mlxsw_core->env);
2018 err_env_init:
2019 mlxsw_thermal_fini(mlxsw_core->thermal);
2020 err_thermal_init:
2021 mlxsw_hwmon_fini(mlxsw_core->hwmon);
2022 err_hwmon_init:
2023 mlxsw_core_health_fini(mlxsw_core);
2024 err_health_init:
2025 err_fw_rev_validate:
2026 if (!reload)
2027 mlxsw_core_params_unregister(mlxsw_core);
2028 err_register_params:
2029 mlxsw_emad_fini(mlxsw_core);
2030 err_emad_init:
2031 kfree(mlxsw_core->lag.mapping);
2032 err_alloc_lag_mapping:
2033 mlxsw_ports_fini(mlxsw_core, reload);
2034 err_ports_init:
2035 if (!reload)
2036 devlink_resources_unregister(devlink, NULL);
2037 err_register_resources:
2038 mlxsw_bus->fini(bus_priv);
2039 err_bus_init:
2040 if (!reload)
2041 devlink_free(devlink);
2042 err_devlink_alloc:
2043 return err;
2044 }
2045
mlxsw_core_bus_device_register(const struct mlxsw_bus_info * mlxsw_bus_info,const struct mlxsw_bus * mlxsw_bus,void * bus_priv,bool reload,struct devlink * devlink,struct netlink_ext_ack * extack)2046 int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
2047 const struct mlxsw_bus *mlxsw_bus,
2048 void *bus_priv, bool reload,
2049 struct devlink *devlink,
2050 struct netlink_ext_ack *extack)
2051 {
2052 bool called_again = false;
2053 int err;
2054
2055 again:
2056 err = __mlxsw_core_bus_device_register(mlxsw_bus_info, mlxsw_bus,
2057 bus_priv, reload,
2058 devlink, extack);
2059 /* -EAGAIN is returned in case the FW was updated. FW needs
2060 * a reset, so lets try to call __mlxsw_core_bus_device_register()
2061 * again.
2062 */
2063 if (err == -EAGAIN && !called_again) {
2064 called_again = true;
2065 goto again;
2066 }
2067
2068 return err;
2069 }
2070 EXPORT_SYMBOL(mlxsw_core_bus_device_register);
2071
mlxsw_core_bus_device_unregister(struct mlxsw_core * mlxsw_core,bool reload)2072 void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
2073 bool reload)
2074 {
2075 struct devlink *devlink = priv_to_devlink(mlxsw_core);
2076
2077 if (!reload)
2078 devlink_unregister(devlink);
2079
2080 if (devlink_is_reload_failed(devlink)) {
2081 if (!reload)
2082 /* Only the parts that were not de-initialized in the
2083 * failed reload attempt need to be de-initialized.
2084 */
2085 goto reload_fail_deinit;
2086 else
2087 return;
2088 }
2089
2090 if (mlxsw_core->driver->fini)
2091 mlxsw_core->driver->fini(mlxsw_core);
2092 mlxsw_env_fini(mlxsw_core->env);
2093 mlxsw_thermal_fini(mlxsw_core->thermal);
2094 mlxsw_hwmon_fini(mlxsw_core->hwmon);
2095 mlxsw_core_health_fini(mlxsw_core);
2096 if (!reload)
2097 mlxsw_core_params_unregister(mlxsw_core);
2098 mlxsw_emad_fini(mlxsw_core);
2099 kfree(mlxsw_core->lag.mapping);
2100 mlxsw_ports_fini(mlxsw_core, reload);
2101 if (!reload)
2102 devlink_resources_unregister(devlink, NULL);
2103 mlxsw_core->bus->fini(mlxsw_core->bus_priv);
2104 if (!reload)
2105 devlink_free(devlink);
2106
2107 return;
2108
2109 reload_fail_deinit:
2110 mlxsw_core_params_unregister(mlxsw_core);
2111 devlink_resources_unregister(devlink, NULL);
2112 devlink_free(devlink);
2113 }
2114 EXPORT_SYMBOL(mlxsw_core_bus_device_unregister);
2115
mlxsw_core_skb_transmit_busy(struct mlxsw_core * mlxsw_core,const struct mlxsw_tx_info * tx_info)2116 bool mlxsw_core_skb_transmit_busy(struct mlxsw_core *mlxsw_core,
2117 const struct mlxsw_tx_info *tx_info)
2118 {
2119 return mlxsw_core->bus->skb_transmit_busy(mlxsw_core->bus_priv,
2120 tx_info);
2121 }
2122 EXPORT_SYMBOL(mlxsw_core_skb_transmit_busy);
2123
mlxsw_core_skb_transmit(struct mlxsw_core * mlxsw_core,struct sk_buff * skb,const struct mlxsw_tx_info * tx_info)2124 int mlxsw_core_skb_transmit(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
2125 const struct mlxsw_tx_info *tx_info)
2126 {
2127 return mlxsw_core->bus->skb_transmit(mlxsw_core->bus_priv, skb,
2128 tx_info);
2129 }
2130 EXPORT_SYMBOL(mlxsw_core_skb_transmit);
2131
mlxsw_core_ptp_transmitted(struct mlxsw_core * mlxsw_core,struct sk_buff * skb,u8 local_port)2132 void mlxsw_core_ptp_transmitted(struct mlxsw_core *mlxsw_core,
2133 struct sk_buff *skb, u8 local_port)
2134 {
2135 if (mlxsw_core->driver->ptp_transmitted)
2136 mlxsw_core->driver->ptp_transmitted(mlxsw_core, skb,
2137 local_port);
2138 }
2139 EXPORT_SYMBOL(mlxsw_core_ptp_transmitted);
2140
__is_rx_listener_equal(const struct mlxsw_rx_listener * rxl_a,const struct mlxsw_rx_listener * rxl_b)2141 static bool __is_rx_listener_equal(const struct mlxsw_rx_listener *rxl_a,
2142 const struct mlxsw_rx_listener *rxl_b)
2143 {
2144 return (rxl_a->func == rxl_b->func &&
2145 rxl_a->local_port == rxl_b->local_port &&
2146 rxl_a->trap_id == rxl_b->trap_id &&
2147 rxl_a->mirror_reason == rxl_b->mirror_reason);
2148 }
2149
2150 static struct mlxsw_rx_listener_item *
__find_rx_listener_item(struct mlxsw_core * mlxsw_core,const struct mlxsw_rx_listener * rxl)2151 __find_rx_listener_item(struct mlxsw_core *mlxsw_core,
2152 const struct mlxsw_rx_listener *rxl)
2153 {
2154 struct mlxsw_rx_listener_item *rxl_item;
2155
2156 list_for_each_entry(rxl_item, &mlxsw_core->rx_listener_list, list) {
2157 if (__is_rx_listener_equal(&rxl_item->rxl, rxl))
2158 return rxl_item;
2159 }
2160 return NULL;
2161 }
2162
mlxsw_core_rx_listener_register(struct mlxsw_core * mlxsw_core,const struct mlxsw_rx_listener * rxl,void * priv,bool enabled)2163 int mlxsw_core_rx_listener_register(struct mlxsw_core *mlxsw_core,
2164 const struct mlxsw_rx_listener *rxl,
2165 void *priv, bool enabled)
2166 {
2167 struct mlxsw_rx_listener_item *rxl_item;
2168
2169 rxl_item = __find_rx_listener_item(mlxsw_core, rxl);
2170 if (rxl_item)
2171 return -EEXIST;
2172 rxl_item = kmalloc(sizeof(*rxl_item), GFP_KERNEL);
2173 if (!rxl_item)
2174 return -ENOMEM;
2175 rxl_item->rxl = *rxl;
2176 rxl_item->priv = priv;
2177 rxl_item->enabled = enabled;
2178
2179 list_add_rcu(&rxl_item->list, &mlxsw_core->rx_listener_list);
2180 return 0;
2181 }
2182 EXPORT_SYMBOL(mlxsw_core_rx_listener_register);
2183
mlxsw_core_rx_listener_unregister(struct mlxsw_core * mlxsw_core,const struct mlxsw_rx_listener * rxl)2184 void mlxsw_core_rx_listener_unregister(struct mlxsw_core *mlxsw_core,
2185 const struct mlxsw_rx_listener *rxl)
2186 {
2187 struct mlxsw_rx_listener_item *rxl_item;
2188
2189 rxl_item = __find_rx_listener_item(mlxsw_core, rxl);
2190 if (!rxl_item)
2191 return;
2192 list_del_rcu(&rxl_item->list);
2193 synchronize_rcu();
2194 kfree(rxl_item);
2195 }
2196 EXPORT_SYMBOL(mlxsw_core_rx_listener_unregister);
2197
2198 static void
mlxsw_core_rx_listener_state_set(struct mlxsw_core * mlxsw_core,const struct mlxsw_rx_listener * rxl,bool enabled)2199 mlxsw_core_rx_listener_state_set(struct mlxsw_core *mlxsw_core,
2200 const struct mlxsw_rx_listener *rxl,
2201 bool enabled)
2202 {
2203 struct mlxsw_rx_listener_item *rxl_item;
2204
2205 rxl_item = __find_rx_listener_item(mlxsw_core, rxl);
2206 if (WARN_ON(!rxl_item))
2207 return;
2208 rxl_item->enabled = enabled;
2209 }
2210
mlxsw_core_event_listener_func(struct sk_buff * skb,u8 local_port,void * priv)2211 static void mlxsw_core_event_listener_func(struct sk_buff *skb, u8 local_port,
2212 void *priv)
2213 {
2214 struct mlxsw_event_listener_item *event_listener_item = priv;
2215 struct mlxsw_core *mlxsw_core;
2216 struct mlxsw_reg_info reg;
2217 char *payload;
2218 char *reg_tlv;
2219 char *op_tlv;
2220
2221 mlxsw_core = event_listener_item->mlxsw_core;
2222 trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), true, 0,
2223 skb->data, skb->len);
2224
2225 mlxsw_emad_tlv_parse(skb);
2226 op_tlv = mlxsw_emad_op_tlv(skb);
2227 reg_tlv = mlxsw_emad_reg_tlv(skb);
2228
2229 reg.id = mlxsw_emad_op_tlv_register_id_get(op_tlv);
2230 reg.len = (mlxsw_emad_reg_tlv_len_get(reg_tlv) - 1) * sizeof(u32);
2231 payload = mlxsw_emad_reg_payload(reg_tlv);
2232 event_listener_item->el.func(®, payload, event_listener_item->priv);
2233 dev_kfree_skb(skb);
2234 }
2235
__is_event_listener_equal(const struct mlxsw_event_listener * el_a,const struct mlxsw_event_listener * el_b)2236 static bool __is_event_listener_equal(const struct mlxsw_event_listener *el_a,
2237 const struct mlxsw_event_listener *el_b)
2238 {
2239 return (el_a->func == el_b->func &&
2240 el_a->trap_id == el_b->trap_id);
2241 }
2242
2243 static struct mlxsw_event_listener_item *
__find_event_listener_item(struct mlxsw_core * mlxsw_core,const struct mlxsw_event_listener * el)2244 __find_event_listener_item(struct mlxsw_core *mlxsw_core,
2245 const struct mlxsw_event_listener *el)
2246 {
2247 struct mlxsw_event_listener_item *el_item;
2248
2249 list_for_each_entry(el_item, &mlxsw_core->event_listener_list, list) {
2250 if (__is_event_listener_equal(&el_item->el, el))
2251 return el_item;
2252 }
2253 return NULL;
2254 }
2255
mlxsw_core_event_listener_register(struct mlxsw_core * mlxsw_core,const struct mlxsw_event_listener * el,void * priv)2256 int mlxsw_core_event_listener_register(struct mlxsw_core *mlxsw_core,
2257 const struct mlxsw_event_listener *el,
2258 void *priv)
2259 {
2260 int err;
2261 struct mlxsw_event_listener_item *el_item;
2262 const struct mlxsw_rx_listener rxl = {
2263 .func = mlxsw_core_event_listener_func,
2264 .local_port = MLXSW_PORT_DONT_CARE,
2265 .trap_id = el->trap_id,
2266 };
2267
2268 el_item = __find_event_listener_item(mlxsw_core, el);
2269 if (el_item)
2270 return -EEXIST;
2271 el_item = kmalloc(sizeof(*el_item), GFP_KERNEL);
2272 if (!el_item)
2273 return -ENOMEM;
2274 el_item->mlxsw_core = mlxsw_core;
2275 el_item->el = *el;
2276 el_item->priv = priv;
2277
2278 err = mlxsw_core_rx_listener_register(mlxsw_core, &rxl, el_item, true);
2279 if (err)
2280 goto err_rx_listener_register;
2281
2282 /* No reason to save item if we did not manage to register an RX
2283 * listener for it.
2284 */
2285 list_add_rcu(&el_item->list, &mlxsw_core->event_listener_list);
2286
2287 return 0;
2288
2289 err_rx_listener_register:
2290 kfree(el_item);
2291 return err;
2292 }
2293 EXPORT_SYMBOL(mlxsw_core_event_listener_register);
2294
mlxsw_core_event_listener_unregister(struct mlxsw_core * mlxsw_core,const struct mlxsw_event_listener * el)2295 void mlxsw_core_event_listener_unregister(struct mlxsw_core *mlxsw_core,
2296 const struct mlxsw_event_listener *el)
2297 {
2298 struct mlxsw_event_listener_item *el_item;
2299 const struct mlxsw_rx_listener rxl = {
2300 .func = mlxsw_core_event_listener_func,
2301 .local_port = MLXSW_PORT_DONT_CARE,
2302 .trap_id = el->trap_id,
2303 };
2304
2305 el_item = __find_event_listener_item(mlxsw_core, el);
2306 if (!el_item)
2307 return;
2308 mlxsw_core_rx_listener_unregister(mlxsw_core, &rxl);
2309 list_del(&el_item->list);
2310 kfree(el_item);
2311 }
2312 EXPORT_SYMBOL(mlxsw_core_event_listener_unregister);
2313
mlxsw_core_listener_register(struct mlxsw_core * mlxsw_core,const struct mlxsw_listener * listener,void * priv,bool enabled)2314 static int mlxsw_core_listener_register(struct mlxsw_core *mlxsw_core,
2315 const struct mlxsw_listener *listener,
2316 void *priv, bool enabled)
2317 {
2318 if (listener->is_event) {
2319 WARN_ON(!enabled);
2320 return mlxsw_core_event_listener_register(mlxsw_core,
2321 &listener->event_listener,
2322 priv);
2323 } else {
2324 return mlxsw_core_rx_listener_register(mlxsw_core,
2325 &listener->rx_listener,
2326 priv, enabled);
2327 }
2328 }
2329
mlxsw_core_listener_unregister(struct mlxsw_core * mlxsw_core,const struct mlxsw_listener * listener,void * priv)2330 static void mlxsw_core_listener_unregister(struct mlxsw_core *mlxsw_core,
2331 const struct mlxsw_listener *listener,
2332 void *priv)
2333 {
2334 if (listener->is_event)
2335 mlxsw_core_event_listener_unregister(mlxsw_core,
2336 &listener->event_listener);
2337 else
2338 mlxsw_core_rx_listener_unregister(mlxsw_core,
2339 &listener->rx_listener);
2340 }
2341
mlxsw_core_trap_register(struct mlxsw_core * mlxsw_core,const struct mlxsw_listener * listener,void * priv)2342 int mlxsw_core_trap_register(struct mlxsw_core *mlxsw_core,
2343 const struct mlxsw_listener *listener, void *priv)
2344 {
2345 enum mlxsw_reg_htgt_trap_group trap_group;
2346 enum mlxsw_reg_hpkt_action action;
2347 char hpkt_pl[MLXSW_REG_HPKT_LEN];
2348 int err;
2349
2350 err = mlxsw_core_listener_register(mlxsw_core, listener, priv,
2351 listener->enabled_on_register);
2352 if (err)
2353 return err;
2354
2355 action = listener->enabled_on_register ? listener->en_action :
2356 listener->dis_action;
2357 trap_group = listener->enabled_on_register ? listener->en_trap_group :
2358 listener->dis_trap_group;
2359 mlxsw_reg_hpkt_pack(hpkt_pl, action, listener->trap_id,
2360 trap_group, listener->is_ctrl);
2361 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
2362 if (err)
2363 goto err_trap_set;
2364
2365 return 0;
2366
2367 err_trap_set:
2368 mlxsw_core_listener_unregister(mlxsw_core, listener, priv);
2369 return err;
2370 }
2371 EXPORT_SYMBOL(mlxsw_core_trap_register);
2372
mlxsw_core_trap_unregister(struct mlxsw_core * mlxsw_core,const struct mlxsw_listener * listener,void * priv)2373 void mlxsw_core_trap_unregister(struct mlxsw_core *mlxsw_core,
2374 const struct mlxsw_listener *listener,
2375 void *priv)
2376 {
2377 char hpkt_pl[MLXSW_REG_HPKT_LEN];
2378
2379 if (!listener->is_event) {
2380 mlxsw_reg_hpkt_pack(hpkt_pl, listener->dis_action,
2381 listener->trap_id, listener->dis_trap_group,
2382 listener->is_ctrl);
2383 mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
2384 }
2385
2386 mlxsw_core_listener_unregister(mlxsw_core, listener, priv);
2387 }
2388 EXPORT_SYMBOL(mlxsw_core_trap_unregister);
2389
mlxsw_core_trap_state_set(struct mlxsw_core * mlxsw_core,const struct mlxsw_listener * listener,bool enabled)2390 int mlxsw_core_trap_state_set(struct mlxsw_core *mlxsw_core,
2391 const struct mlxsw_listener *listener,
2392 bool enabled)
2393 {
2394 enum mlxsw_reg_htgt_trap_group trap_group;
2395 enum mlxsw_reg_hpkt_action action;
2396 char hpkt_pl[MLXSW_REG_HPKT_LEN];
2397 int err;
2398
2399 /* Not supported for event listener */
2400 if (WARN_ON(listener->is_event))
2401 return -EINVAL;
2402
2403 action = enabled ? listener->en_action : listener->dis_action;
2404 trap_group = enabled ? listener->en_trap_group :
2405 listener->dis_trap_group;
2406 mlxsw_reg_hpkt_pack(hpkt_pl, action, listener->trap_id,
2407 trap_group, listener->is_ctrl);
2408 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
2409 if (err)
2410 return err;
2411
2412 mlxsw_core_rx_listener_state_set(mlxsw_core, &listener->rx_listener,
2413 enabled);
2414 return 0;
2415 }
2416 EXPORT_SYMBOL(mlxsw_core_trap_state_set);
2417
mlxsw_core_tid_get(struct mlxsw_core * mlxsw_core)2418 static u64 mlxsw_core_tid_get(struct mlxsw_core *mlxsw_core)
2419 {
2420 return atomic64_inc_return(&mlxsw_core->emad.tid);
2421 }
2422
mlxsw_core_reg_access_emad(struct mlxsw_core * mlxsw_core,const struct mlxsw_reg_info * reg,char * payload,enum mlxsw_core_reg_access_type type,struct list_head * bulk_list,mlxsw_reg_trans_cb_t * cb,unsigned long cb_priv)2423 static int mlxsw_core_reg_access_emad(struct mlxsw_core *mlxsw_core,
2424 const struct mlxsw_reg_info *reg,
2425 char *payload,
2426 enum mlxsw_core_reg_access_type type,
2427 struct list_head *bulk_list,
2428 mlxsw_reg_trans_cb_t *cb,
2429 unsigned long cb_priv)
2430 {
2431 u64 tid = mlxsw_core_tid_get(mlxsw_core);
2432 struct mlxsw_reg_trans *trans;
2433 int err;
2434
2435 trans = kzalloc(sizeof(*trans), GFP_KERNEL);
2436 if (!trans)
2437 return -ENOMEM;
2438
2439 err = mlxsw_emad_reg_access(mlxsw_core, reg, payload, type, trans,
2440 bulk_list, cb, cb_priv, tid);
2441 if (err) {
2442 kfree_rcu(trans, rcu);
2443 return err;
2444 }
2445 return 0;
2446 }
2447
mlxsw_reg_trans_query(struct mlxsw_core * mlxsw_core,const struct mlxsw_reg_info * reg,char * payload,struct list_head * bulk_list,mlxsw_reg_trans_cb_t * cb,unsigned long cb_priv)2448 int mlxsw_reg_trans_query(struct mlxsw_core *mlxsw_core,
2449 const struct mlxsw_reg_info *reg, char *payload,
2450 struct list_head *bulk_list,
2451 mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv)
2452 {
2453 return mlxsw_core_reg_access_emad(mlxsw_core, reg, payload,
2454 MLXSW_CORE_REG_ACCESS_TYPE_QUERY,
2455 bulk_list, cb, cb_priv);
2456 }
2457 EXPORT_SYMBOL(mlxsw_reg_trans_query);
2458
mlxsw_reg_trans_write(struct mlxsw_core * mlxsw_core,const struct mlxsw_reg_info * reg,char * payload,struct list_head * bulk_list,mlxsw_reg_trans_cb_t * cb,unsigned long cb_priv)2459 int mlxsw_reg_trans_write(struct mlxsw_core *mlxsw_core,
2460 const struct mlxsw_reg_info *reg, char *payload,
2461 struct list_head *bulk_list,
2462 mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv)
2463 {
2464 return mlxsw_core_reg_access_emad(mlxsw_core, reg, payload,
2465 MLXSW_CORE_REG_ACCESS_TYPE_WRITE,
2466 bulk_list, cb, cb_priv);
2467 }
2468 EXPORT_SYMBOL(mlxsw_reg_trans_write);
2469
2470 #define MLXSW_REG_TRANS_ERR_STRING_SIZE 256
2471
mlxsw_reg_trans_wait(struct mlxsw_reg_trans * trans)2472 static int mlxsw_reg_trans_wait(struct mlxsw_reg_trans *trans)
2473 {
2474 char err_string[MLXSW_REG_TRANS_ERR_STRING_SIZE];
2475 struct mlxsw_core *mlxsw_core = trans->core;
2476 int err;
2477
2478 wait_for_completion(&trans->completion);
2479 cancel_delayed_work_sync(&trans->timeout_dw);
2480 err = trans->err;
2481
2482 if (trans->retries)
2483 dev_warn(mlxsw_core->bus_info->dev, "EMAD retries (%d/%d) (tid=%llx)\n",
2484 trans->retries, MLXSW_EMAD_MAX_RETRY, trans->tid);
2485 if (err) {
2486 dev_err(mlxsw_core->bus_info->dev, "EMAD reg access failed (tid=%llx,reg_id=%x(%s),type=%s,status=%x(%s))\n",
2487 trans->tid, trans->reg->id,
2488 mlxsw_reg_id_str(trans->reg->id),
2489 mlxsw_core_reg_access_type_str(trans->type),
2490 trans->emad_status,
2491 mlxsw_emad_op_tlv_status_str(trans->emad_status));
2492
2493 snprintf(err_string, MLXSW_REG_TRANS_ERR_STRING_SIZE,
2494 "(tid=%llx,reg_id=%x(%s)) %s (%s)\n", trans->tid,
2495 trans->reg->id, mlxsw_reg_id_str(trans->reg->id),
2496 mlxsw_emad_op_tlv_status_str(trans->emad_status),
2497 trans->emad_err_string ? trans->emad_err_string : "");
2498
2499 trace_devlink_hwerr(priv_to_devlink(mlxsw_core),
2500 trans->emad_status, err_string);
2501
2502 kfree(trans->emad_err_string);
2503 }
2504
2505 list_del(&trans->bulk_list);
2506 kfree_rcu(trans, rcu);
2507 return err;
2508 }
2509
mlxsw_reg_trans_bulk_wait(struct list_head * bulk_list)2510 int mlxsw_reg_trans_bulk_wait(struct list_head *bulk_list)
2511 {
2512 struct mlxsw_reg_trans *trans;
2513 struct mlxsw_reg_trans *tmp;
2514 int sum_err = 0;
2515 int err;
2516
2517 list_for_each_entry_safe(trans, tmp, bulk_list, bulk_list) {
2518 err = mlxsw_reg_trans_wait(trans);
2519 if (err && sum_err == 0)
2520 sum_err = err; /* first error to be returned */
2521 }
2522 return sum_err;
2523 }
2524 EXPORT_SYMBOL(mlxsw_reg_trans_bulk_wait);
2525
mlxsw_core_reg_access_cmd(struct mlxsw_core * mlxsw_core,const struct mlxsw_reg_info * reg,char * payload,enum mlxsw_core_reg_access_type type)2526 static int mlxsw_core_reg_access_cmd(struct mlxsw_core *mlxsw_core,
2527 const struct mlxsw_reg_info *reg,
2528 char *payload,
2529 enum mlxsw_core_reg_access_type type)
2530 {
2531 enum mlxsw_emad_op_tlv_status status;
2532 int err, n_retry;
2533 bool reset_ok;
2534 char *in_mbox, *out_mbox, *tmp;
2535
2536 dev_dbg(mlxsw_core->bus_info->dev, "Reg cmd access (reg_id=%x(%s),type=%s)\n",
2537 reg->id, mlxsw_reg_id_str(reg->id),
2538 mlxsw_core_reg_access_type_str(type));
2539
2540 in_mbox = mlxsw_cmd_mbox_alloc();
2541 if (!in_mbox)
2542 return -ENOMEM;
2543
2544 out_mbox = mlxsw_cmd_mbox_alloc();
2545 if (!out_mbox) {
2546 err = -ENOMEM;
2547 goto free_in_mbox;
2548 }
2549
2550 mlxsw_emad_pack_op_tlv(in_mbox, reg, type,
2551 mlxsw_core_tid_get(mlxsw_core));
2552 tmp = in_mbox + MLXSW_EMAD_OP_TLV_LEN * sizeof(u32);
2553 mlxsw_emad_pack_reg_tlv(tmp, reg, payload);
2554
2555 /* There is a special treatment needed for MRSR (reset) register.
2556 * The command interface will return error after the command
2557 * is executed, so tell the lower layer to expect it
2558 * and cope accordingly.
2559 */
2560 reset_ok = reg->id == MLXSW_REG_MRSR_ID;
2561
2562 n_retry = 0;
2563 retry:
2564 err = mlxsw_cmd_access_reg(mlxsw_core, reset_ok, in_mbox, out_mbox);
2565 if (!err) {
2566 err = mlxsw_emad_process_status(out_mbox, &status);
2567 if (err) {
2568 if (err == -EAGAIN && n_retry++ < MLXSW_EMAD_MAX_RETRY)
2569 goto retry;
2570 dev_err(mlxsw_core->bus_info->dev, "Reg cmd access status failed (status=%x(%s))\n",
2571 status, mlxsw_emad_op_tlv_status_str(status));
2572 }
2573 }
2574
2575 if (!err)
2576 memcpy(payload, mlxsw_emad_reg_payload_cmd(out_mbox),
2577 reg->len);
2578
2579 mlxsw_cmd_mbox_free(out_mbox);
2580 free_in_mbox:
2581 mlxsw_cmd_mbox_free(in_mbox);
2582 if (err)
2583 dev_err(mlxsw_core->bus_info->dev, "Reg cmd access failed (reg_id=%x(%s),type=%s)\n",
2584 reg->id, mlxsw_reg_id_str(reg->id),
2585 mlxsw_core_reg_access_type_str(type));
2586 return err;
2587 }
2588
mlxsw_core_reg_access_cb(struct mlxsw_core * mlxsw_core,char * payload,size_t payload_len,unsigned long cb_priv)2589 static void mlxsw_core_reg_access_cb(struct mlxsw_core *mlxsw_core,
2590 char *payload, size_t payload_len,
2591 unsigned long cb_priv)
2592 {
2593 char *orig_payload = (char *) cb_priv;
2594
2595 memcpy(orig_payload, payload, payload_len);
2596 }
2597
mlxsw_core_reg_access(struct mlxsw_core * mlxsw_core,const struct mlxsw_reg_info * reg,char * payload,enum mlxsw_core_reg_access_type type)2598 static int mlxsw_core_reg_access(struct mlxsw_core *mlxsw_core,
2599 const struct mlxsw_reg_info *reg,
2600 char *payload,
2601 enum mlxsw_core_reg_access_type type)
2602 {
2603 LIST_HEAD(bulk_list);
2604 int err;
2605
2606 /* During initialization EMAD interface is not available to us,
2607 * so we default to command interface. We switch to EMAD interface
2608 * after setting the appropriate traps.
2609 */
2610 if (!mlxsw_core->emad.use_emad)
2611 return mlxsw_core_reg_access_cmd(mlxsw_core, reg,
2612 payload, type);
2613
2614 err = mlxsw_core_reg_access_emad(mlxsw_core, reg,
2615 payload, type, &bulk_list,
2616 mlxsw_core_reg_access_cb,
2617 (unsigned long) payload);
2618 if (err)
2619 return err;
2620 return mlxsw_reg_trans_bulk_wait(&bulk_list);
2621 }
2622
mlxsw_reg_query(struct mlxsw_core * mlxsw_core,const struct mlxsw_reg_info * reg,char * payload)2623 int mlxsw_reg_query(struct mlxsw_core *mlxsw_core,
2624 const struct mlxsw_reg_info *reg, char *payload)
2625 {
2626 return mlxsw_core_reg_access(mlxsw_core, reg, payload,
2627 MLXSW_CORE_REG_ACCESS_TYPE_QUERY);
2628 }
2629 EXPORT_SYMBOL(mlxsw_reg_query);
2630
mlxsw_reg_write(struct mlxsw_core * mlxsw_core,const struct mlxsw_reg_info * reg,char * payload)2631 int mlxsw_reg_write(struct mlxsw_core *mlxsw_core,
2632 const struct mlxsw_reg_info *reg, char *payload)
2633 {
2634 return mlxsw_core_reg_access(mlxsw_core, reg, payload,
2635 MLXSW_CORE_REG_ACCESS_TYPE_WRITE);
2636 }
2637 EXPORT_SYMBOL(mlxsw_reg_write);
2638
mlxsw_core_skb_receive(struct mlxsw_core * mlxsw_core,struct sk_buff * skb,struct mlxsw_rx_info * rx_info)2639 void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
2640 struct mlxsw_rx_info *rx_info)
2641 {
2642 struct mlxsw_rx_listener_item *rxl_item;
2643 const struct mlxsw_rx_listener *rxl;
2644 u8 local_port;
2645 bool found = false;
2646
2647 if (rx_info->is_lag) {
2648 dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: lag_id = %d, lag_port_index = 0x%x\n",
2649 __func__, rx_info->u.lag_id,
2650 rx_info->trap_id);
2651 /* Upper layer does not care if the skb came from LAG or not,
2652 * so just get the local_port for the lag port and push it up.
2653 */
2654 local_port = mlxsw_core_lag_mapping_get(mlxsw_core,
2655 rx_info->u.lag_id,
2656 rx_info->lag_port_index);
2657 } else {
2658 local_port = rx_info->u.sys_port;
2659 }
2660
2661 dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: local_port = %d, trap_id = 0x%x\n",
2662 __func__, local_port, rx_info->trap_id);
2663
2664 if ((rx_info->trap_id >= MLXSW_TRAP_ID_MAX) ||
2665 (local_port >= mlxsw_core->max_ports))
2666 goto drop;
2667
2668 rcu_read_lock();
2669 list_for_each_entry_rcu(rxl_item, &mlxsw_core->rx_listener_list, list) {
2670 rxl = &rxl_item->rxl;
2671 if ((rxl->local_port == MLXSW_PORT_DONT_CARE ||
2672 rxl->local_port == local_port) &&
2673 rxl->trap_id == rx_info->trap_id &&
2674 rxl->mirror_reason == rx_info->mirror_reason) {
2675 if (rxl_item->enabled)
2676 found = true;
2677 break;
2678 }
2679 }
2680 if (!found) {
2681 rcu_read_unlock();
2682 goto drop;
2683 }
2684
2685 rxl->func(skb, local_port, rxl_item->priv);
2686 rcu_read_unlock();
2687 return;
2688
2689 drop:
2690 dev_kfree_skb(skb);
2691 }
2692 EXPORT_SYMBOL(mlxsw_core_skb_receive);
2693
mlxsw_core_lag_mapping_index(struct mlxsw_core * mlxsw_core,u16 lag_id,u8 port_index)2694 static int mlxsw_core_lag_mapping_index(struct mlxsw_core *mlxsw_core,
2695 u16 lag_id, u8 port_index)
2696 {
2697 return MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG_MEMBERS) * lag_id +
2698 port_index;
2699 }
2700
mlxsw_core_lag_mapping_set(struct mlxsw_core * mlxsw_core,u16 lag_id,u8 port_index,u8 local_port)2701 void mlxsw_core_lag_mapping_set(struct mlxsw_core *mlxsw_core,
2702 u16 lag_id, u8 port_index, u8 local_port)
2703 {
2704 int index = mlxsw_core_lag_mapping_index(mlxsw_core,
2705 lag_id, port_index);
2706
2707 mlxsw_core->lag.mapping[index] = local_port;
2708 }
2709 EXPORT_SYMBOL(mlxsw_core_lag_mapping_set);
2710
mlxsw_core_lag_mapping_get(struct mlxsw_core * mlxsw_core,u16 lag_id,u8 port_index)2711 u8 mlxsw_core_lag_mapping_get(struct mlxsw_core *mlxsw_core,
2712 u16 lag_id, u8 port_index)
2713 {
2714 int index = mlxsw_core_lag_mapping_index(mlxsw_core,
2715 lag_id, port_index);
2716
2717 return mlxsw_core->lag.mapping[index];
2718 }
2719 EXPORT_SYMBOL(mlxsw_core_lag_mapping_get);
2720
mlxsw_core_lag_mapping_clear(struct mlxsw_core * mlxsw_core,u16 lag_id,u8 local_port)2721 void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core,
2722 u16 lag_id, u8 local_port)
2723 {
2724 int i;
2725
2726 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG_MEMBERS); i++) {
2727 int index = mlxsw_core_lag_mapping_index(mlxsw_core,
2728 lag_id, i);
2729
2730 if (mlxsw_core->lag.mapping[index] == local_port)
2731 mlxsw_core->lag.mapping[index] = 0;
2732 }
2733 }
2734 EXPORT_SYMBOL(mlxsw_core_lag_mapping_clear);
2735
mlxsw_core_res_valid(struct mlxsw_core * mlxsw_core,enum mlxsw_res_id res_id)2736 bool mlxsw_core_res_valid(struct mlxsw_core *mlxsw_core,
2737 enum mlxsw_res_id res_id)
2738 {
2739 return mlxsw_res_valid(&mlxsw_core->res, res_id);
2740 }
2741 EXPORT_SYMBOL(mlxsw_core_res_valid);
2742
mlxsw_core_res_get(struct mlxsw_core * mlxsw_core,enum mlxsw_res_id res_id)2743 u64 mlxsw_core_res_get(struct mlxsw_core *mlxsw_core,
2744 enum mlxsw_res_id res_id)
2745 {
2746 return mlxsw_res_get(&mlxsw_core->res, res_id);
2747 }
2748 EXPORT_SYMBOL(mlxsw_core_res_get);
2749
__mlxsw_core_port_init(struct mlxsw_core * mlxsw_core,u8 local_port,enum devlink_port_flavour flavour,u32 port_number,bool split,u32 split_port_subnumber,bool splittable,u32 lanes,const unsigned char * switch_id,unsigned char switch_id_len)2750 static int __mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port,
2751 enum devlink_port_flavour flavour,
2752 u32 port_number, bool split,
2753 u32 split_port_subnumber,
2754 bool splittable, u32 lanes,
2755 const unsigned char *switch_id,
2756 unsigned char switch_id_len)
2757 {
2758 struct devlink *devlink = priv_to_devlink(mlxsw_core);
2759 struct mlxsw_core_port *mlxsw_core_port =
2760 &mlxsw_core->ports[local_port];
2761 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
2762 struct devlink_port_attrs attrs = {};
2763 int err;
2764
2765 attrs.split = split;
2766 attrs.lanes = lanes;
2767 attrs.splittable = splittable;
2768 attrs.flavour = flavour;
2769 attrs.phys.port_number = port_number;
2770 attrs.phys.split_subport_number = split_port_subnumber;
2771 memcpy(attrs.switch_id.id, switch_id, switch_id_len);
2772 attrs.switch_id.id_len = switch_id_len;
2773 mlxsw_core_port->local_port = local_port;
2774 devlink_port_attrs_set(devlink_port, &attrs);
2775 err = devlink_port_register(devlink, devlink_port, local_port);
2776 if (err)
2777 memset(mlxsw_core_port, 0, sizeof(*mlxsw_core_port));
2778 return err;
2779 }
2780
__mlxsw_core_port_fini(struct mlxsw_core * mlxsw_core,u8 local_port)2781 static void __mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u8 local_port)
2782 {
2783 struct mlxsw_core_port *mlxsw_core_port =
2784 &mlxsw_core->ports[local_port];
2785 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
2786
2787 devlink_port_unregister(devlink_port);
2788 memset(mlxsw_core_port, 0, sizeof(*mlxsw_core_port));
2789 }
2790
mlxsw_core_port_init(struct mlxsw_core * mlxsw_core,u8 local_port,u32 port_number,bool split,u32 split_port_subnumber,bool splittable,u32 lanes,const unsigned char * switch_id,unsigned char switch_id_len)2791 int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port,
2792 u32 port_number, bool split,
2793 u32 split_port_subnumber,
2794 bool splittable, u32 lanes,
2795 const unsigned char *switch_id,
2796 unsigned char switch_id_len)
2797 {
2798 int err;
2799
2800 err = __mlxsw_core_port_init(mlxsw_core, local_port,
2801 DEVLINK_PORT_FLAVOUR_PHYSICAL,
2802 port_number, split, split_port_subnumber,
2803 splittable, lanes,
2804 switch_id, switch_id_len);
2805 if (err)
2806 return err;
2807
2808 atomic_inc(&mlxsw_core->active_ports_count);
2809 return 0;
2810 }
2811 EXPORT_SYMBOL(mlxsw_core_port_init);
2812
mlxsw_core_port_fini(struct mlxsw_core * mlxsw_core,u8 local_port)2813 void mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u8 local_port)
2814 {
2815 atomic_dec(&mlxsw_core->active_ports_count);
2816
2817 __mlxsw_core_port_fini(mlxsw_core, local_port);
2818 }
2819 EXPORT_SYMBOL(mlxsw_core_port_fini);
2820
mlxsw_core_cpu_port_init(struct mlxsw_core * mlxsw_core,void * port_driver_priv,const unsigned char * switch_id,unsigned char switch_id_len)2821 int mlxsw_core_cpu_port_init(struct mlxsw_core *mlxsw_core,
2822 void *port_driver_priv,
2823 const unsigned char *switch_id,
2824 unsigned char switch_id_len)
2825 {
2826 struct mlxsw_core_port *mlxsw_core_port =
2827 &mlxsw_core->ports[MLXSW_PORT_CPU_PORT];
2828 int err;
2829
2830 err = __mlxsw_core_port_init(mlxsw_core, MLXSW_PORT_CPU_PORT,
2831 DEVLINK_PORT_FLAVOUR_CPU,
2832 0, false, 0, false, 0,
2833 switch_id, switch_id_len);
2834 if (err)
2835 return err;
2836
2837 mlxsw_core_port->port_driver_priv = port_driver_priv;
2838 return 0;
2839 }
2840 EXPORT_SYMBOL(mlxsw_core_cpu_port_init);
2841
mlxsw_core_cpu_port_fini(struct mlxsw_core * mlxsw_core)2842 void mlxsw_core_cpu_port_fini(struct mlxsw_core *mlxsw_core)
2843 {
2844 __mlxsw_core_port_fini(mlxsw_core, MLXSW_PORT_CPU_PORT);
2845 }
2846 EXPORT_SYMBOL(mlxsw_core_cpu_port_fini);
2847
mlxsw_core_port_eth_set(struct mlxsw_core * mlxsw_core,u8 local_port,void * port_driver_priv,struct net_device * dev)2848 void mlxsw_core_port_eth_set(struct mlxsw_core *mlxsw_core, u8 local_port,
2849 void *port_driver_priv, struct net_device *dev)
2850 {
2851 struct mlxsw_core_port *mlxsw_core_port =
2852 &mlxsw_core->ports[local_port];
2853 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
2854
2855 mlxsw_core_port->port_driver_priv = port_driver_priv;
2856 devlink_port_type_eth_set(devlink_port, dev);
2857 }
2858 EXPORT_SYMBOL(mlxsw_core_port_eth_set);
2859
mlxsw_core_port_ib_set(struct mlxsw_core * mlxsw_core,u8 local_port,void * port_driver_priv)2860 void mlxsw_core_port_ib_set(struct mlxsw_core *mlxsw_core, u8 local_port,
2861 void *port_driver_priv)
2862 {
2863 struct mlxsw_core_port *mlxsw_core_port =
2864 &mlxsw_core->ports[local_port];
2865 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
2866
2867 mlxsw_core_port->port_driver_priv = port_driver_priv;
2868 devlink_port_type_ib_set(devlink_port, NULL);
2869 }
2870 EXPORT_SYMBOL(mlxsw_core_port_ib_set);
2871
mlxsw_core_port_clear(struct mlxsw_core * mlxsw_core,u8 local_port,void * port_driver_priv)2872 void mlxsw_core_port_clear(struct mlxsw_core *mlxsw_core, u8 local_port,
2873 void *port_driver_priv)
2874 {
2875 struct mlxsw_core_port *mlxsw_core_port =
2876 &mlxsw_core->ports[local_port];
2877 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
2878
2879 mlxsw_core_port->port_driver_priv = port_driver_priv;
2880 devlink_port_type_clear(devlink_port);
2881 }
2882 EXPORT_SYMBOL(mlxsw_core_port_clear);
2883
mlxsw_core_port_type_get(struct mlxsw_core * mlxsw_core,u8 local_port)2884 enum devlink_port_type mlxsw_core_port_type_get(struct mlxsw_core *mlxsw_core,
2885 u8 local_port)
2886 {
2887 struct mlxsw_core_port *mlxsw_core_port =
2888 &mlxsw_core->ports[local_port];
2889 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
2890
2891 return devlink_port->type;
2892 }
2893 EXPORT_SYMBOL(mlxsw_core_port_type_get);
2894
2895
2896 struct devlink_port *
mlxsw_core_port_devlink_port_get(struct mlxsw_core * mlxsw_core,u8 local_port)2897 mlxsw_core_port_devlink_port_get(struct mlxsw_core *mlxsw_core,
2898 u8 local_port)
2899 {
2900 struct mlxsw_core_port *mlxsw_core_port =
2901 &mlxsw_core->ports[local_port];
2902 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
2903
2904 return devlink_port;
2905 }
2906 EXPORT_SYMBOL(mlxsw_core_port_devlink_port_get);
2907
mlxsw_core_port_is_xm(const struct mlxsw_core * mlxsw_core,u8 local_port)2908 bool mlxsw_core_port_is_xm(const struct mlxsw_core *mlxsw_core, u8 local_port)
2909 {
2910 const struct mlxsw_bus_info *bus_info = mlxsw_core->bus_info;
2911 int i;
2912
2913 for (i = 0; i < bus_info->xm_local_ports_count; i++)
2914 if (bus_info->xm_local_ports[i] == local_port)
2915 return true;
2916 return false;
2917 }
2918 EXPORT_SYMBOL(mlxsw_core_port_is_xm);
2919
mlxsw_core_env(const struct mlxsw_core * mlxsw_core)2920 struct mlxsw_env *mlxsw_core_env(const struct mlxsw_core *mlxsw_core)
2921 {
2922 return mlxsw_core->env;
2923 }
2924
mlxsw_core_buf_dump_dbg(struct mlxsw_core * mlxsw_core,const char * buf,size_t size)2925 static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core,
2926 const char *buf, size_t size)
2927 {
2928 __be32 *m = (__be32 *) buf;
2929 int i;
2930 int count = size / sizeof(__be32);
2931
2932 for (i = count - 1; i >= 0; i--)
2933 if (m[i])
2934 break;
2935 i++;
2936 count = i ? i : 1;
2937 for (i = 0; i < count; i += 4)
2938 dev_dbg(mlxsw_core->bus_info->dev, "%04x - %08x %08x %08x %08x\n",
2939 i * 4, be32_to_cpu(m[i]), be32_to_cpu(m[i + 1]),
2940 be32_to_cpu(m[i + 2]), be32_to_cpu(m[i + 3]));
2941 }
2942
mlxsw_cmd_exec(struct mlxsw_core * mlxsw_core,u16 opcode,u8 opcode_mod,u32 in_mod,bool out_mbox_direct,bool reset_ok,char * in_mbox,size_t in_mbox_size,char * out_mbox,size_t out_mbox_size)2943 int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod,
2944 u32 in_mod, bool out_mbox_direct, bool reset_ok,
2945 char *in_mbox, size_t in_mbox_size,
2946 char *out_mbox, size_t out_mbox_size)
2947 {
2948 u8 status;
2949 int err;
2950
2951 BUG_ON(in_mbox_size % sizeof(u32) || out_mbox_size % sizeof(u32));
2952 if (!mlxsw_core->bus->cmd_exec)
2953 return -EOPNOTSUPP;
2954
2955 dev_dbg(mlxsw_core->bus_info->dev, "Cmd exec (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n",
2956 opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod, in_mod);
2957 if (in_mbox) {
2958 dev_dbg(mlxsw_core->bus_info->dev, "Input mailbox:\n");
2959 mlxsw_core_buf_dump_dbg(mlxsw_core, in_mbox, in_mbox_size);
2960 }
2961
2962 err = mlxsw_core->bus->cmd_exec(mlxsw_core->bus_priv, opcode,
2963 opcode_mod, in_mod, out_mbox_direct,
2964 in_mbox, in_mbox_size,
2965 out_mbox, out_mbox_size, &status);
2966
2967 if (!err && out_mbox) {
2968 dev_dbg(mlxsw_core->bus_info->dev, "Output mailbox:\n");
2969 mlxsw_core_buf_dump_dbg(mlxsw_core, out_mbox, out_mbox_size);
2970 }
2971
2972 if (reset_ok && err == -EIO &&
2973 status == MLXSW_CMD_STATUS_RUNNING_RESET) {
2974 err = 0;
2975 } else if (err == -EIO && status != MLXSW_CMD_STATUS_OK) {
2976 dev_err(mlxsw_core->bus_info->dev, "Cmd exec failed (opcode=%x(%s),opcode_mod=%x,in_mod=%x,status=%x(%s))\n",
2977 opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod,
2978 in_mod, status, mlxsw_cmd_status_str(status));
2979 } else if (err == -ETIMEDOUT) {
2980 dev_err(mlxsw_core->bus_info->dev, "Cmd exec timed-out (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n",
2981 opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod,
2982 in_mod);
2983 }
2984
2985 return err;
2986 }
2987 EXPORT_SYMBOL(mlxsw_cmd_exec);
2988
mlxsw_core_schedule_dw(struct delayed_work * dwork,unsigned long delay)2989 int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay)
2990 {
2991 return queue_delayed_work(mlxsw_wq, dwork, delay);
2992 }
2993 EXPORT_SYMBOL(mlxsw_core_schedule_dw);
2994
mlxsw_core_schedule_work(struct work_struct * work)2995 bool mlxsw_core_schedule_work(struct work_struct *work)
2996 {
2997 return queue_work(mlxsw_owq, work);
2998 }
2999 EXPORT_SYMBOL(mlxsw_core_schedule_work);
3000
mlxsw_core_flush_owq(void)3001 void mlxsw_core_flush_owq(void)
3002 {
3003 flush_workqueue(mlxsw_owq);
3004 }
3005 EXPORT_SYMBOL(mlxsw_core_flush_owq);
3006
mlxsw_core_kvd_sizes_get(struct mlxsw_core * mlxsw_core,const struct mlxsw_config_profile * profile,u64 * p_single_size,u64 * p_double_size,u64 * p_linear_size)3007 int mlxsw_core_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
3008 const struct mlxsw_config_profile *profile,
3009 u64 *p_single_size, u64 *p_double_size,
3010 u64 *p_linear_size)
3011 {
3012 struct mlxsw_driver *driver = mlxsw_core->driver;
3013
3014 if (!driver->kvd_sizes_get)
3015 return -EINVAL;
3016
3017 return driver->kvd_sizes_get(mlxsw_core, profile,
3018 p_single_size, p_double_size,
3019 p_linear_size);
3020 }
3021 EXPORT_SYMBOL(mlxsw_core_kvd_sizes_get);
3022
mlxsw_core_resources_query(struct mlxsw_core * mlxsw_core,char * mbox,struct mlxsw_res * res)3023 int mlxsw_core_resources_query(struct mlxsw_core *mlxsw_core, char *mbox,
3024 struct mlxsw_res *res)
3025 {
3026 int index, i;
3027 u64 data;
3028 u16 id;
3029 int err;
3030
3031 if (!res)
3032 return 0;
3033
3034 mlxsw_cmd_mbox_zero(mbox);
3035
3036 for (index = 0; index < MLXSW_CMD_QUERY_RESOURCES_MAX_QUERIES;
3037 index++) {
3038 err = mlxsw_cmd_query_resources(mlxsw_core, mbox, index);
3039 if (err)
3040 return err;
3041
3042 for (i = 0; i < MLXSW_CMD_QUERY_RESOURCES_PER_QUERY; i++) {
3043 id = mlxsw_cmd_mbox_query_resource_id_get(mbox, i);
3044 data = mlxsw_cmd_mbox_query_resource_data_get(mbox, i);
3045
3046 if (id == MLXSW_CMD_QUERY_RESOURCES_TABLE_END_ID)
3047 return 0;
3048
3049 mlxsw_res_parse(res, id, data);
3050 }
3051 }
3052
3053 /* If after MLXSW_RESOURCES_QUERY_MAX_QUERIES we still didn't get
3054 * MLXSW_RESOURCES_TABLE_END_ID, something went bad in the FW.
3055 */
3056 return -EIO;
3057 }
3058 EXPORT_SYMBOL(mlxsw_core_resources_query);
3059
mlxsw_core_read_frc_h(struct mlxsw_core * mlxsw_core)3060 u32 mlxsw_core_read_frc_h(struct mlxsw_core *mlxsw_core)
3061 {
3062 return mlxsw_core->bus->read_frc_h(mlxsw_core->bus_priv);
3063 }
3064 EXPORT_SYMBOL(mlxsw_core_read_frc_h);
3065
mlxsw_core_read_frc_l(struct mlxsw_core * mlxsw_core)3066 u32 mlxsw_core_read_frc_l(struct mlxsw_core *mlxsw_core)
3067 {
3068 return mlxsw_core->bus->read_frc_l(mlxsw_core->bus_priv);
3069 }
3070 EXPORT_SYMBOL(mlxsw_core_read_frc_l);
3071
mlxsw_core_emad_string_tlv_enable(struct mlxsw_core * mlxsw_core)3072 void mlxsw_core_emad_string_tlv_enable(struct mlxsw_core *mlxsw_core)
3073 {
3074 mlxsw_core->emad.enable_string_tlv = true;
3075 }
3076 EXPORT_SYMBOL(mlxsw_core_emad_string_tlv_enable);
3077
mlxsw_core_module_init(void)3078 static int __init mlxsw_core_module_init(void)
3079 {
3080 int err;
3081
3082 mlxsw_wq = alloc_workqueue(mlxsw_core_driver_name, 0, 0);
3083 if (!mlxsw_wq)
3084 return -ENOMEM;
3085 mlxsw_owq = alloc_ordered_workqueue("%s_ordered", 0,
3086 mlxsw_core_driver_name);
3087 if (!mlxsw_owq) {
3088 err = -ENOMEM;
3089 goto err_alloc_ordered_workqueue;
3090 }
3091 return 0;
3092
3093 err_alloc_ordered_workqueue:
3094 destroy_workqueue(mlxsw_wq);
3095 return err;
3096 }
3097
mlxsw_core_module_exit(void)3098 static void __exit mlxsw_core_module_exit(void)
3099 {
3100 destroy_workqueue(mlxsw_owq);
3101 destroy_workqueue(mlxsw_wq);
3102 }
3103
3104 module_init(mlxsw_core_module_init);
3105 module_exit(mlxsw_core_module_exit);
3106
3107 MODULE_LICENSE("Dual BSD/GPL");
3108 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
3109 MODULE_DESCRIPTION("Mellanox switch device core driver");
3110