1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 /* QLogic qed NIC Driver
3 * Copyright (c) 2015-2017 QLogic Corporation
4 * Copyright (c) 2019-2020 Marvell International Ltd.
5 */
6
7 #include <linux/types.h>
8 #include <asm/byteorder.h>
9 #include <linux/io.h>
10 #include <linux/delay.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/errno.h>
13 #include <linux/kernel.h>
14 #include <linux/list.h>
15 #include <linux/pci.h>
16 #include <linux/slab.h>
17 #include <linux/spinlock.h>
18 #include <linux/string.h>
19 #include "qed.h"
20 #include "qed_cxt.h"
21 #include "qed_dev_api.h"
22 #include "qed_hsi.h"
23 #include "qed_iro_hsi.h"
24 #include "qed_hw.h"
25 #include "qed_int.h"
26 #include "qed_iscsi.h"
27 #include "qed_mcp.h"
28 #include "qed_ooo.h"
29 #include "qed_reg_addr.h"
30 #include "qed_sp.h"
31 #include "qed_sriov.h"
32 #include "qed_rdma.h"
33
34 /***************************************************************************
35 * Structures & Definitions
36 ***************************************************************************/
37
38 #define SPQ_HIGH_PRI_RESERVE_DEFAULT (1)
39
40 #define SPQ_BLOCK_DELAY_MAX_ITER (10)
41 #define SPQ_BLOCK_DELAY_US (10)
42 #define SPQ_BLOCK_SLEEP_MAX_ITER (1000)
43 #define SPQ_BLOCK_SLEEP_MS (5)
44
45 /***************************************************************************
46 * Blocking Imp. (BLOCK/EBLOCK mode)
47 ***************************************************************************/
qed_spq_blocking_cb(struct qed_hwfn * p_hwfn,void * cookie,union event_ring_data * data,u8 fw_return_code)48 static void qed_spq_blocking_cb(struct qed_hwfn *p_hwfn,
49 void *cookie,
50 union event_ring_data *data, u8 fw_return_code)
51 {
52 struct qed_spq_comp_done *comp_done;
53
54 comp_done = (struct qed_spq_comp_done *)cookie;
55
56 comp_done->fw_return_code = fw_return_code;
57
58 /* Make sure completion done is visible on waiting thread */
59 smp_store_release(&comp_done->done, 0x1);
60 }
61
__qed_spq_block(struct qed_hwfn * p_hwfn,struct qed_spq_entry * p_ent,u8 * p_fw_ret,bool sleep_between_iter)62 static int __qed_spq_block(struct qed_hwfn *p_hwfn,
63 struct qed_spq_entry *p_ent,
64 u8 *p_fw_ret, bool sleep_between_iter)
65 {
66 struct qed_spq_comp_done *comp_done;
67 u32 iter_cnt;
68
69 comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
70 iter_cnt = sleep_between_iter ? SPQ_BLOCK_SLEEP_MAX_ITER
71 : SPQ_BLOCK_DELAY_MAX_ITER;
72
73 while (iter_cnt--) {
74 /* Validate we receive completion update */
75 if (smp_load_acquire(&comp_done->done) == 1) { /* ^^^ */
76 if (p_fw_ret)
77 *p_fw_ret = comp_done->fw_return_code;
78 return 0;
79 }
80
81 if (sleep_between_iter)
82 msleep(SPQ_BLOCK_SLEEP_MS);
83 else
84 udelay(SPQ_BLOCK_DELAY_US);
85 }
86
87 return -EBUSY;
88 }
89
qed_spq_block(struct qed_hwfn * p_hwfn,struct qed_spq_entry * p_ent,u8 * p_fw_ret,bool skip_quick_poll)90 static int qed_spq_block(struct qed_hwfn *p_hwfn,
91 struct qed_spq_entry *p_ent,
92 u8 *p_fw_ret, bool skip_quick_poll)
93 {
94 struct qed_spq_comp_done *comp_done;
95 struct qed_ptt *p_ptt;
96 int rc;
97
98 /* A relatively short polling period w/o sleeping, to allow the FW to
99 * complete the ramrod and thus possibly to avoid the following sleeps.
100 */
101 if (!skip_quick_poll) {
102 rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, false);
103 if (!rc)
104 return 0;
105 }
106
107 /* Move to polling with a sleeping period between iterations */
108 rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true);
109 if (!rc)
110 return 0;
111
112 p_ptt = qed_ptt_acquire(p_hwfn);
113 if (!p_ptt) {
114 DP_NOTICE(p_hwfn, "ptt, failed to acquire\n");
115 return -EAGAIN;
116 }
117
118 DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
119 rc = qed_mcp_drain(p_hwfn, p_ptt);
120 qed_ptt_release(p_hwfn, p_ptt);
121 if (rc) {
122 DP_NOTICE(p_hwfn, "MCP drain failed\n");
123 goto err;
124 }
125
126 /* Retry after drain */
127 rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true);
128 if (!rc)
129 return 0;
130
131 comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
132 if (comp_done->done == 1) {
133 if (p_fw_ret)
134 *p_fw_ret = comp_done->fw_return_code;
135 return 0;
136 }
137 err:
138 p_ptt = qed_ptt_acquire(p_hwfn);
139 if (!p_ptt)
140 return -EBUSY;
141 qed_hw_err_notify(p_hwfn, p_ptt, QED_HW_ERR_RAMROD_FAIL,
142 "Ramrod is stuck [CID %08x cmd %02x protocol %02x echo %04x]\n",
143 le32_to_cpu(p_ent->elem.hdr.cid),
144 p_ent->elem.hdr.cmd_id,
145 p_ent->elem.hdr.protocol_id,
146 le16_to_cpu(p_ent->elem.hdr.echo));
147 qed_ptt_release(p_hwfn, p_ptt);
148
149 return -EBUSY;
150 }
151
152 /***************************************************************************
153 * SPQ entries inner API
154 ***************************************************************************/
qed_spq_fill_entry(struct qed_hwfn * p_hwfn,struct qed_spq_entry * p_ent)155 static int qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
156 struct qed_spq_entry *p_ent)
157 {
158 p_ent->flags = 0;
159
160 switch (p_ent->comp_mode) {
161 case QED_SPQ_MODE_EBLOCK:
162 case QED_SPQ_MODE_BLOCK:
163 p_ent->comp_cb.function = qed_spq_blocking_cb;
164 break;
165 case QED_SPQ_MODE_CB:
166 break;
167 default:
168 DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n",
169 p_ent->comp_mode);
170 return -EINVAL;
171 }
172
173 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
174 "Ramrod header: [CID 0x%08x CMD 0x%02x protocol 0x%02x] Data pointer: [%08x:%08x] Completion Mode: %s\n",
175 p_ent->elem.hdr.cid,
176 p_ent->elem.hdr.cmd_id,
177 p_ent->elem.hdr.protocol_id,
178 p_ent->elem.data_ptr.hi,
179 p_ent->elem.data_ptr.lo,
180 D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK,
181 QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
182 "MODE_CB"));
183
184 return 0;
185 }
186
187 /***************************************************************************
188 * HSI access
189 ***************************************************************************/
qed_spq_hw_initialize(struct qed_hwfn * p_hwfn,struct qed_spq * p_spq)190 static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
191 struct qed_spq *p_spq)
192 {
193 struct core_conn_context *p_cxt;
194 struct qed_cxt_info cxt_info;
195 u16 physical_q;
196 int rc;
197
198 cxt_info.iid = p_spq->cid;
199
200 rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info);
201
202 if (rc < 0) {
203 DP_NOTICE(p_hwfn, "Cannot find context info for cid=%d\n",
204 p_spq->cid);
205 return;
206 }
207
208 p_cxt = cxt_info.p_cxt;
209
210 SET_FIELD(p_cxt->xstorm_ag_context.flags10,
211 XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
212 SET_FIELD(p_cxt->xstorm_ag_context.flags1,
213 XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
214 SET_FIELD(p_cxt->xstorm_ag_context.flags9,
215 XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
216
217 /* QM physical queue */
218 physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
219 p_cxt->xstorm_ag_context.physical_q0 = cpu_to_le16(physical_q);
220
221 p_cxt->xstorm_st_context.spq_base_addr.lo =
222 DMA_LO_LE(p_spq->chain.p_phys_addr);
223 p_cxt->xstorm_st_context.spq_base_addr.hi =
224 DMA_HI_LE(p_spq->chain.p_phys_addr);
225 }
226
qed_spq_hw_post(struct qed_hwfn * p_hwfn,struct qed_spq * p_spq,struct qed_spq_entry * p_ent)227 static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
228 struct qed_spq *p_spq, struct qed_spq_entry *p_ent)
229 {
230 struct qed_chain *p_chain = &p_hwfn->p_spq->chain;
231 struct core_db_data *p_db_data = &p_spq->db_data;
232 u16 echo = qed_chain_get_prod_idx(p_chain);
233 struct slow_path_element *elem;
234
235 p_ent->elem.hdr.echo = cpu_to_le16(echo);
236 elem = qed_chain_produce(p_chain);
237 if (!elem) {
238 DP_NOTICE(p_hwfn, "Failed to produce from SPQ chain\n");
239 return -EINVAL;
240 }
241
242 *elem = p_ent->elem; /* struct assignment */
243
244 /* send a doorbell on the slow hwfn session */
245 p_db_data->spq_prod = cpu_to_le16(qed_chain_get_prod_idx(p_chain));
246
247 /* make sure the SPQE is updated before the doorbell */
248 wmb();
249
250 DOORBELL(p_hwfn, p_spq->db_addr_offset, *(u32 *)p_db_data);
251
252 /* make sure doorbell is rang */
253 wmb();
254
255 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
256 "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x agg_params: %02x, prod: %04x\n",
257 p_spq->db_addr_offset,
258 p_spq->cid,
259 p_db_data->params,
260 p_db_data->agg_flags, qed_chain_get_prod_idx(p_chain));
261
262 return 0;
263 }
264
265 /***************************************************************************
266 * Asynchronous events
267 ***************************************************************************/
268 static int
qed_async_event_completion(struct qed_hwfn * p_hwfn,struct event_ring_entry * p_eqe)269 qed_async_event_completion(struct qed_hwfn *p_hwfn,
270 struct event_ring_entry *p_eqe)
271 {
272 qed_spq_async_comp_cb cb;
273
274 if (!p_hwfn->p_spq || (p_eqe->protocol_id >= MAX_PROTOCOL_TYPE))
275 return -EINVAL;
276
277 cb = p_hwfn->p_spq->async_comp_cb[p_eqe->protocol_id];
278 if (cb) {
279 return cb(p_hwfn, p_eqe->opcode, p_eqe->echo,
280 &p_eqe->data, p_eqe->fw_return_code);
281 } else {
282 DP_NOTICE(p_hwfn,
283 "Unknown Async completion for protocol: %d\n",
284 p_eqe->protocol_id);
285 return -EINVAL;
286 }
287 }
288
289 int
qed_spq_register_async_cb(struct qed_hwfn * p_hwfn,enum protocol_type protocol_id,qed_spq_async_comp_cb cb)290 qed_spq_register_async_cb(struct qed_hwfn *p_hwfn,
291 enum protocol_type protocol_id,
292 qed_spq_async_comp_cb cb)
293 {
294 if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE))
295 return -EINVAL;
296
297 p_hwfn->p_spq->async_comp_cb[protocol_id] = cb;
298 return 0;
299 }
300
301 void
qed_spq_unregister_async_cb(struct qed_hwfn * p_hwfn,enum protocol_type protocol_id)302 qed_spq_unregister_async_cb(struct qed_hwfn *p_hwfn,
303 enum protocol_type protocol_id)
304 {
305 if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE))
306 return;
307
308 p_hwfn->p_spq->async_comp_cb[protocol_id] = NULL;
309 }
310
311 /***************************************************************************
312 * EQ API
313 ***************************************************************************/
qed_eq_prod_update(struct qed_hwfn * p_hwfn,u16 prod)314 void qed_eq_prod_update(struct qed_hwfn *p_hwfn, u16 prod)
315 {
316 u32 addr = GET_GTT_REG_ADDR(GTT_BAR0_MAP_REG_USDM_RAM,
317 USTORM_EQE_CONS, p_hwfn->rel_pf_id);
318
319 REG_WR16(p_hwfn, addr, prod);
320 }
321
qed_eq_completion(struct qed_hwfn * p_hwfn,void * cookie)322 int qed_eq_completion(struct qed_hwfn *p_hwfn, void *cookie)
323 {
324 struct qed_eq *p_eq = cookie;
325 struct qed_chain *p_chain = &p_eq->chain;
326 int rc = 0;
327
328 /* take a snapshot of the FW consumer */
329 u16 fw_cons_idx = le16_to_cpu(*p_eq->p_fw_cons);
330
331 DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx);
332
333 /* Need to guarantee the fw_cons index we use points to a usuable
334 * element (to comply with our chain), so our macros would comply
335 */
336 if ((fw_cons_idx & qed_chain_get_usable_per_page(p_chain)) ==
337 qed_chain_get_usable_per_page(p_chain))
338 fw_cons_idx += qed_chain_get_unusable_per_page(p_chain);
339
340 /* Complete current segment of eq entries */
341 while (fw_cons_idx != qed_chain_get_cons_idx(p_chain)) {
342 struct event_ring_entry *p_eqe = qed_chain_consume(p_chain);
343
344 if (!p_eqe) {
345 rc = -EINVAL;
346 break;
347 }
348
349 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
350 "op %x prot %x res0 %x echo %x fwret %x flags %x\n",
351 p_eqe->opcode,
352 p_eqe->protocol_id,
353 p_eqe->reserved0,
354 le16_to_cpu(p_eqe->echo),
355 p_eqe->fw_return_code,
356 p_eqe->flags);
357
358 if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) {
359 if (qed_async_event_completion(p_hwfn, p_eqe))
360 rc = -EINVAL;
361 } else if (qed_spq_completion(p_hwfn,
362 p_eqe->echo,
363 p_eqe->fw_return_code,
364 &p_eqe->data)) {
365 rc = -EINVAL;
366 }
367
368 qed_chain_recycle_consumed(p_chain);
369 }
370
371 qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(p_chain));
372
373 /* Attempt to post pending requests */
374 spin_lock_bh(&p_hwfn->p_spq->lock);
375 rc = qed_spq_pend_post(p_hwfn);
376 spin_unlock_bh(&p_hwfn->p_spq->lock);
377
378 return rc;
379 }
380
qed_eq_alloc(struct qed_hwfn * p_hwfn,u16 num_elem)381 int qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem)
382 {
383 struct qed_chain_init_params params = {
384 .mode = QED_CHAIN_MODE_PBL,
385 .intended_use = QED_CHAIN_USE_TO_PRODUCE,
386 .cnt_type = QED_CHAIN_CNT_TYPE_U16,
387 .num_elems = num_elem,
388 .elem_size = sizeof(union event_ring_element),
389 };
390 struct qed_eq *p_eq;
391 int ret;
392
393 /* Allocate EQ struct */
394 p_eq = kzalloc(sizeof(*p_eq), GFP_KERNEL);
395 if (!p_eq)
396 return -ENOMEM;
397
398 ret = qed_chain_alloc(p_hwfn->cdev, &p_eq->chain, ¶ms);
399 if (ret) {
400 DP_NOTICE(p_hwfn, "Failed to allocate EQ chain\n");
401 goto eq_allocate_fail;
402 }
403
404 /* register EQ completion on the SP SB */
405 qed_int_register_cb(p_hwfn, qed_eq_completion,
406 p_eq, &p_eq->eq_sb_index, &p_eq->p_fw_cons);
407
408 p_hwfn->p_eq = p_eq;
409 return 0;
410
411 eq_allocate_fail:
412 kfree(p_eq);
413
414 return ret;
415 }
416
qed_eq_setup(struct qed_hwfn * p_hwfn)417 void qed_eq_setup(struct qed_hwfn *p_hwfn)
418 {
419 qed_chain_reset(&p_hwfn->p_eq->chain);
420 }
421
qed_eq_free(struct qed_hwfn * p_hwfn)422 void qed_eq_free(struct qed_hwfn *p_hwfn)
423 {
424 if (!p_hwfn->p_eq)
425 return;
426
427 qed_chain_free(p_hwfn->cdev, &p_hwfn->p_eq->chain);
428
429 kfree(p_hwfn->p_eq);
430 p_hwfn->p_eq = NULL;
431 }
432
433 /***************************************************************************
434 * CQE API - manipulate EQ functionality
435 ***************************************************************************/
qed_cqe_completion(struct qed_hwfn * p_hwfn,struct eth_slow_path_rx_cqe * cqe,enum protocol_type protocol)436 static int qed_cqe_completion(struct qed_hwfn *p_hwfn,
437 struct eth_slow_path_rx_cqe *cqe,
438 enum protocol_type protocol)
439 {
440 if (IS_VF(p_hwfn->cdev))
441 return 0;
442
443 /* @@@tmp - it's possible we'll eventually want to handle some
444 * actual commands that can arrive here, but for now this is only
445 * used to complete the ramrod using the echo value on the cqe
446 */
447 return qed_spq_completion(p_hwfn, cqe->echo, 0, NULL);
448 }
449
qed_eth_cqe_completion(struct qed_hwfn * p_hwfn,struct eth_slow_path_rx_cqe * cqe)450 int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn,
451 struct eth_slow_path_rx_cqe *cqe)
452 {
453 int rc;
454
455 rc = qed_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH);
456 if (rc)
457 DP_NOTICE(p_hwfn,
458 "Failed to handle RXQ CQE [cmd 0x%02x]\n",
459 cqe->ramrod_cmd_id);
460
461 return rc;
462 }
463
464 /***************************************************************************
465 * Slow hwfn Queue (spq)
466 ***************************************************************************/
qed_spq_setup(struct qed_hwfn * p_hwfn)467 void qed_spq_setup(struct qed_hwfn *p_hwfn)
468 {
469 struct qed_spq *p_spq = p_hwfn->p_spq;
470 struct qed_spq_entry *p_virt = NULL;
471 struct core_db_data *p_db_data;
472 void __iomem *db_addr;
473 dma_addr_t p_phys = 0;
474 u32 i, capacity;
475 int rc;
476
477 INIT_LIST_HEAD(&p_spq->pending);
478 INIT_LIST_HEAD(&p_spq->completion_pending);
479 INIT_LIST_HEAD(&p_spq->free_pool);
480 INIT_LIST_HEAD(&p_spq->unlimited_pending);
481 spin_lock_init(&p_spq->lock);
482
483 /* SPQ empty pool */
484 p_phys = p_spq->p_phys + offsetof(struct qed_spq_entry, ramrod);
485 p_virt = p_spq->p_virt;
486
487 capacity = qed_chain_get_capacity(&p_spq->chain);
488 for (i = 0; i < capacity; i++) {
489 DMA_REGPAIR_LE(p_virt->elem.data_ptr, p_phys);
490
491 list_add_tail(&p_virt->list, &p_spq->free_pool);
492
493 p_virt++;
494 p_phys += sizeof(struct qed_spq_entry);
495 }
496
497 /* Statistics */
498 p_spq->normal_count = 0;
499 p_spq->comp_count = 0;
500 p_spq->comp_sent_count = 0;
501 p_spq->unlimited_pending_count = 0;
502
503 bitmap_zero(p_spq->p_comp_bitmap, SPQ_RING_SIZE);
504 p_spq->comp_bitmap_idx = 0;
505
506 /* SPQ cid, cannot fail */
507 qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid);
508 qed_spq_hw_initialize(p_hwfn, p_spq);
509
510 /* reset the chain itself */
511 qed_chain_reset(&p_spq->chain);
512
513 /* Initialize the address/data of the SPQ doorbell */
514 p_spq->db_addr_offset = qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY);
515 p_db_data = &p_spq->db_data;
516 memset(p_db_data, 0, sizeof(*p_db_data));
517 SET_FIELD(p_db_data->params, CORE_DB_DATA_DEST, DB_DEST_XCM);
518 SET_FIELD(p_db_data->params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_MAX);
519 SET_FIELD(p_db_data->params, CORE_DB_DATA_AGG_VAL_SEL,
520 DQ_XCM_CORE_SPQ_PROD_CMD);
521 p_db_data->agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
522
523 /* Register the SPQ doorbell with the doorbell recovery mechanism */
524 db_addr = (void __iomem *)((u8 __iomem *)p_hwfn->doorbells +
525 p_spq->db_addr_offset);
526 rc = qed_db_recovery_add(p_hwfn->cdev, db_addr, &p_spq->db_data,
527 DB_REC_WIDTH_32B, DB_REC_KERNEL);
528 if (rc)
529 DP_INFO(p_hwfn,
530 "Failed to register the SPQ doorbell with the doorbell recovery mechanism\n");
531 }
532
qed_spq_alloc(struct qed_hwfn * p_hwfn)533 int qed_spq_alloc(struct qed_hwfn *p_hwfn)
534 {
535 struct qed_chain_init_params params = {
536 .mode = QED_CHAIN_MODE_SINGLE,
537 .intended_use = QED_CHAIN_USE_TO_PRODUCE,
538 .cnt_type = QED_CHAIN_CNT_TYPE_U16,
539 .elem_size = sizeof(struct slow_path_element),
540 };
541 struct qed_dev *cdev = p_hwfn->cdev;
542 struct qed_spq_entry *p_virt = NULL;
543 struct qed_spq *p_spq = NULL;
544 dma_addr_t p_phys = 0;
545 u32 capacity;
546 int ret;
547
548 /* SPQ struct */
549 p_spq = kzalloc(sizeof(*p_spq), GFP_KERNEL);
550 if (!p_spq)
551 return -ENOMEM;
552
553 /* SPQ ring */
554 ret = qed_chain_alloc(cdev, &p_spq->chain, ¶ms);
555 if (ret) {
556 DP_NOTICE(p_hwfn, "Failed to allocate SPQ chain\n");
557 goto spq_chain_alloc_fail;
558 }
559
560 /* allocate and fill the SPQ elements (incl. ramrod data list) */
561 capacity = qed_chain_get_capacity(&p_spq->chain);
562 ret = -ENOMEM;
563
564 p_virt = dma_alloc_coherent(&cdev->pdev->dev,
565 capacity * sizeof(struct qed_spq_entry),
566 &p_phys, GFP_KERNEL);
567 if (!p_virt)
568 goto spq_alloc_fail;
569
570 p_spq->p_virt = p_virt;
571 p_spq->p_phys = p_phys;
572 p_hwfn->p_spq = p_spq;
573
574 return 0;
575
576 spq_alloc_fail:
577 qed_chain_free(cdev, &p_spq->chain);
578 spq_chain_alloc_fail:
579 kfree(p_spq);
580
581 return ret;
582 }
583
qed_spq_free(struct qed_hwfn * p_hwfn)584 void qed_spq_free(struct qed_hwfn *p_hwfn)
585 {
586 struct qed_spq *p_spq = p_hwfn->p_spq;
587 void __iomem *db_addr;
588 u32 capacity;
589
590 if (!p_spq)
591 return;
592
593 /* Delete the SPQ doorbell from the doorbell recovery mechanism */
594 db_addr = (void __iomem *)((u8 __iomem *)p_hwfn->doorbells +
595 p_spq->db_addr_offset);
596 qed_db_recovery_del(p_hwfn->cdev, db_addr, &p_spq->db_data);
597
598 if (p_spq->p_virt) {
599 capacity = qed_chain_get_capacity(&p_spq->chain);
600 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
601 capacity *
602 sizeof(struct qed_spq_entry),
603 p_spq->p_virt, p_spq->p_phys);
604 }
605
606 qed_chain_free(p_hwfn->cdev, &p_spq->chain);
607 kfree(p_spq);
608 p_hwfn->p_spq = NULL;
609 }
610
qed_spq_get_entry(struct qed_hwfn * p_hwfn,struct qed_spq_entry ** pp_ent)611 int qed_spq_get_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry **pp_ent)
612 {
613 struct qed_spq *p_spq = p_hwfn->p_spq;
614 struct qed_spq_entry *p_ent = NULL;
615 int rc = 0;
616
617 spin_lock_bh(&p_spq->lock);
618
619 if (list_empty(&p_spq->free_pool)) {
620 p_ent = kzalloc(sizeof(*p_ent), GFP_ATOMIC);
621 if (!p_ent) {
622 DP_NOTICE(p_hwfn,
623 "Failed to allocate an SPQ entry for a pending ramrod\n");
624 rc = -ENOMEM;
625 goto out_unlock;
626 }
627 p_ent->queue = &p_spq->unlimited_pending;
628 } else {
629 p_ent = list_first_entry(&p_spq->free_pool,
630 struct qed_spq_entry, list);
631 list_del(&p_ent->list);
632 p_ent->queue = &p_spq->pending;
633 }
634
635 *pp_ent = p_ent;
636
637 out_unlock:
638 spin_unlock_bh(&p_spq->lock);
639 return rc;
640 }
641
642 /* Locked variant; Should be called while the SPQ lock is taken */
__qed_spq_return_entry(struct qed_hwfn * p_hwfn,struct qed_spq_entry * p_ent)643 static void __qed_spq_return_entry(struct qed_hwfn *p_hwfn,
644 struct qed_spq_entry *p_ent)
645 {
646 list_add_tail(&p_ent->list, &p_hwfn->p_spq->free_pool);
647 }
648
qed_spq_return_entry(struct qed_hwfn * p_hwfn,struct qed_spq_entry * p_ent)649 void qed_spq_return_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent)
650 {
651 spin_lock_bh(&p_hwfn->p_spq->lock);
652 __qed_spq_return_entry(p_hwfn, p_ent);
653 spin_unlock_bh(&p_hwfn->p_spq->lock);
654 }
655
656 /**
657 * qed_spq_add_entry() - Add a new entry to the pending list.
658 * Should be used while lock is being held.
659 *
660 * @p_hwfn: HW device data.
661 * @p_ent: An entry to add.
662 * @priority: Desired priority.
663 *
664 * Adds an entry to the pending list is there is room (an empty
665 * element is available in the free_pool), or else places the
666 * entry in the unlimited_pending pool.
667 *
668 * Return: zero on success, -EINVAL on invalid @priority.
669 */
qed_spq_add_entry(struct qed_hwfn * p_hwfn,struct qed_spq_entry * p_ent,enum spq_priority priority)670 static int qed_spq_add_entry(struct qed_hwfn *p_hwfn,
671 struct qed_spq_entry *p_ent,
672 enum spq_priority priority)
673 {
674 struct qed_spq *p_spq = p_hwfn->p_spq;
675
676 if (p_ent->queue == &p_spq->unlimited_pending) {
677 if (list_empty(&p_spq->free_pool)) {
678 list_add_tail(&p_ent->list, &p_spq->unlimited_pending);
679 p_spq->unlimited_pending_count++;
680
681 return 0;
682 } else {
683 struct qed_spq_entry *p_en2;
684
685 p_en2 = list_first_entry(&p_spq->free_pool,
686 struct qed_spq_entry, list);
687 list_del(&p_en2->list);
688
689 /* Copy the ring element physical pointer to the new
690 * entry, since we are about to override the entire ring
691 * entry and don't want to lose the pointer.
692 */
693 p_ent->elem.data_ptr = p_en2->elem.data_ptr;
694
695 *p_en2 = *p_ent;
696
697 /* EBLOCK responsible to free the allocated p_ent */
698 if (p_ent->comp_mode != QED_SPQ_MODE_EBLOCK)
699 kfree(p_ent);
700 else
701 p_ent->post_ent = p_en2;
702
703 p_ent = p_en2;
704 }
705 }
706
707 /* entry is to be placed in 'pending' queue */
708 switch (priority) {
709 case QED_SPQ_PRIORITY_NORMAL:
710 list_add_tail(&p_ent->list, &p_spq->pending);
711 p_spq->normal_count++;
712 break;
713 case QED_SPQ_PRIORITY_HIGH:
714 list_add(&p_ent->list, &p_spq->pending);
715 p_spq->high_count++;
716 break;
717 default:
718 return -EINVAL;
719 }
720
721 return 0;
722 }
723
724 /***************************************************************************
725 * Accessor
726 ***************************************************************************/
qed_spq_get_cid(struct qed_hwfn * p_hwfn)727 u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn)
728 {
729 if (!p_hwfn->p_spq)
730 return 0xffffffff; /* illegal */
731 return p_hwfn->p_spq->cid;
732 }
733
734 /***************************************************************************
735 * Posting new Ramrods
736 ***************************************************************************/
qed_spq_post_list(struct qed_hwfn * p_hwfn,struct list_head * head,u32 keep_reserve)737 static int qed_spq_post_list(struct qed_hwfn *p_hwfn,
738 struct list_head *head, u32 keep_reserve)
739 {
740 struct qed_spq *p_spq = p_hwfn->p_spq;
741 int rc;
742
743 while (qed_chain_get_elem_left(&p_spq->chain) > keep_reserve &&
744 !list_empty(head)) {
745 struct qed_spq_entry *p_ent =
746 list_first_entry(head, struct qed_spq_entry, list);
747 list_move_tail(&p_ent->list, &p_spq->completion_pending);
748 p_spq->comp_sent_count++;
749
750 rc = qed_spq_hw_post(p_hwfn, p_spq, p_ent);
751 if (rc) {
752 list_del(&p_ent->list);
753 __qed_spq_return_entry(p_hwfn, p_ent);
754 return rc;
755 }
756 }
757
758 return 0;
759 }
760
qed_spq_pend_post(struct qed_hwfn * p_hwfn)761 int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
762 {
763 struct qed_spq *p_spq = p_hwfn->p_spq;
764 struct qed_spq_entry *p_ent = NULL;
765
766 while (!list_empty(&p_spq->free_pool)) {
767 if (list_empty(&p_spq->unlimited_pending))
768 break;
769
770 p_ent = list_first_entry(&p_spq->unlimited_pending,
771 struct qed_spq_entry, list);
772 if (!p_ent)
773 return -EINVAL;
774
775 list_del(&p_ent->list);
776
777 qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
778 }
779
780 return qed_spq_post_list(p_hwfn, &p_spq->pending,
781 SPQ_HIGH_PRI_RESERVE_DEFAULT);
782 }
783
qed_spq_recov_set_ret_code(struct qed_spq_entry * p_ent,u8 * fw_return_code)784 static void qed_spq_recov_set_ret_code(struct qed_spq_entry *p_ent,
785 u8 *fw_return_code)
786 {
787 if (!fw_return_code)
788 return;
789
790 if (p_ent->elem.hdr.protocol_id == PROTOCOLID_ROCE ||
791 p_ent->elem.hdr.protocol_id == PROTOCOLID_IWARP)
792 *fw_return_code = RDMA_RETURN_OK;
793 }
794
795 /* Avoid overriding of SPQ entries when getting out-of-order completions, by
796 * marking the completions in a bitmap and increasing the chain consumer only
797 * for the first successive completed entries.
798 */
qed_spq_comp_bmap_update(struct qed_hwfn * p_hwfn,__le16 echo)799 static void qed_spq_comp_bmap_update(struct qed_hwfn *p_hwfn, __le16 echo)
800 {
801 u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE;
802 struct qed_spq *p_spq = p_hwfn->p_spq;
803
804 __set_bit(pos, p_spq->p_comp_bitmap);
805 while (test_bit(p_spq->comp_bitmap_idx,
806 p_spq->p_comp_bitmap)) {
807 __clear_bit(p_spq->comp_bitmap_idx,
808 p_spq->p_comp_bitmap);
809 p_spq->comp_bitmap_idx++;
810 qed_chain_return_produced(&p_spq->chain);
811 }
812 }
813
qed_spq_post(struct qed_hwfn * p_hwfn,struct qed_spq_entry * p_ent,u8 * fw_return_code)814 int qed_spq_post(struct qed_hwfn *p_hwfn,
815 struct qed_spq_entry *p_ent, u8 *fw_return_code)
816 {
817 int rc = 0;
818 struct qed_spq *p_spq = p_hwfn ? p_hwfn->p_spq : NULL;
819 bool b_ret_ent = true;
820 bool eblock;
821
822 if (!p_hwfn)
823 return -EINVAL;
824
825 if (!p_ent) {
826 DP_NOTICE(p_hwfn, "Got a NULL pointer\n");
827 return -EINVAL;
828 }
829
830 if (p_hwfn->cdev->recov_in_prog) {
831 DP_VERBOSE(p_hwfn,
832 QED_MSG_SPQ,
833 "Recovery is in progress. Skip spq post [cmd %02x protocol %02x]\n",
834 p_ent->elem.hdr.cmd_id, p_ent->elem.hdr.protocol_id);
835
836 /* Let the flow complete w/o any error handling */
837 qed_spq_recov_set_ret_code(p_ent, fw_return_code);
838 return 0;
839 }
840
841 /* Complete the entry */
842 rc = qed_spq_fill_entry(p_hwfn, p_ent);
843
844 spin_lock_bh(&p_spq->lock);
845
846 /* Check return value after LOCK is taken for cleaner error flow */
847 if (rc)
848 goto spq_post_fail;
849
850 /* Check if entry is in block mode before qed_spq_add_entry,
851 * which might kfree p_ent.
852 */
853 eblock = (p_ent->comp_mode == QED_SPQ_MODE_EBLOCK);
854
855 /* Add the request to the pending queue */
856 rc = qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
857 if (rc)
858 goto spq_post_fail;
859
860 rc = qed_spq_pend_post(p_hwfn);
861 if (rc) {
862 /* Since it's possible that pending failed for a different
863 * entry [although unlikely], the failed entry was already
864 * dealt with; No need to return it here.
865 */
866 b_ret_ent = false;
867 goto spq_post_fail;
868 }
869
870 spin_unlock_bh(&p_spq->lock);
871
872 if (eblock) {
873 /* For entries in QED BLOCK mode, the completion code cannot
874 * perform the necessary cleanup - if it did, we couldn't
875 * access p_ent here to see whether it's successful or not.
876 * Thus, after gaining the answer perform the cleanup here.
877 */
878 rc = qed_spq_block(p_hwfn, p_ent, fw_return_code,
879 p_ent->queue == &p_spq->unlimited_pending);
880
881 if (p_ent->queue == &p_spq->unlimited_pending) {
882 struct qed_spq_entry *p_post_ent = p_ent->post_ent;
883
884 kfree(p_ent);
885
886 /* Return the entry which was actually posted */
887 p_ent = p_post_ent;
888 }
889
890 if (rc)
891 goto spq_post_fail2;
892
893 /* return to pool */
894 qed_spq_return_entry(p_hwfn, p_ent);
895 }
896 return rc;
897
898 spq_post_fail2:
899 spin_lock_bh(&p_spq->lock);
900 list_del(&p_ent->list);
901 qed_spq_comp_bmap_update(p_hwfn, p_ent->elem.hdr.echo);
902
903 spq_post_fail:
904 /* return to the free pool */
905 if (b_ret_ent)
906 __qed_spq_return_entry(p_hwfn, p_ent);
907 spin_unlock_bh(&p_spq->lock);
908
909 return rc;
910 }
911
qed_spq_completion(struct qed_hwfn * p_hwfn,__le16 echo,u8 fw_return_code,union event_ring_data * p_data)912 int qed_spq_completion(struct qed_hwfn *p_hwfn,
913 __le16 echo,
914 u8 fw_return_code,
915 union event_ring_data *p_data)
916 {
917 struct qed_spq *p_spq;
918 struct qed_spq_entry *p_ent = NULL;
919 struct qed_spq_entry *tmp;
920 struct qed_spq_entry *found = NULL;
921
922 if (!p_hwfn)
923 return -EINVAL;
924
925 p_spq = p_hwfn->p_spq;
926 if (!p_spq)
927 return -EINVAL;
928
929 spin_lock_bh(&p_spq->lock);
930 list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending, list) {
931 if (p_ent->elem.hdr.echo == echo) {
932 list_del(&p_ent->list);
933 qed_spq_comp_bmap_update(p_hwfn, echo);
934 p_spq->comp_count++;
935 found = p_ent;
936 break;
937 }
938
939 /* This is relatively uncommon - depends on scenarios
940 * which have mutliple per-PF sent ramrods.
941 */
942 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
943 "Got completion for echo %04x - doesn't match echo %04x in completion pending list\n",
944 le16_to_cpu(echo),
945 le16_to_cpu(p_ent->elem.hdr.echo));
946 }
947
948 /* Release lock before callback, as callback may post
949 * an additional ramrod.
950 */
951 spin_unlock_bh(&p_spq->lock);
952
953 if (!found) {
954 DP_NOTICE(p_hwfn,
955 "Failed to find an entry this EQE [echo %04x] completes\n",
956 le16_to_cpu(echo));
957 return -EEXIST;
958 }
959
960 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
961 "Complete EQE [echo %04x]: func %p cookie %p)\n",
962 le16_to_cpu(echo),
963 p_ent->comp_cb.function, p_ent->comp_cb.cookie);
964 if (found->comp_cb.function)
965 found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
966 fw_return_code);
967 else
968 DP_VERBOSE(p_hwfn,
969 QED_MSG_SPQ,
970 "Got a completion without a callback function\n");
971
972 if (found->comp_mode != QED_SPQ_MODE_EBLOCK)
973 /* EBLOCK is responsible for returning its own entry into the
974 * free list.
975 */
976 qed_spq_return_entry(p_hwfn, found);
977
978 return 0;
979 }
980
981 #define QED_SPQ_CONSQ_ELEM_SIZE 0x80
982
qed_consq_alloc(struct qed_hwfn * p_hwfn)983 int qed_consq_alloc(struct qed_hwfn *p_hwfn)
984 {
985 struct qed_chain_init_params params = {
986 .mode = QED_CHAIN_MODE_PBL,
987 .intended_use = QED_CHAIN_USE_TO_PRODUCE,
988 .cnt_type = QED_CHAIN_CNT_TYPE_U16,
989 .num_elems = QED_CHAIN_PAGE_SIZE / QED_SPQ_CONSQ_ELEM_SIZE,
990 .elem_size = QED_SPQ_CONSQ_ELEM_SIZE,
991 };
992 struct qed_consq *p_consq;
993 int ret;
994
995 /* Allocate ConsQ struct */
996 p_consq = kzalloc(sizeof(*p_consq), GFP_KERNEL);
997 if (!p_consq)
998 return -ENOMEM;
999
1000 /* Allocate and initialize ConsQ chain */
1001 ret = qed_chain_alloc(p_hwfn->cdev, &p_consq->chain, ¶ms);
1002 if (ret) {
1003 DP_NOTICE(p_hwfn, "Failed to allocate ConsQ chain");
1004 goto consq_alloc_fail;
1005 }
1006
1007 p_hwfn->p_consq = p_consq;
1008
1009 return 0;
1010
1011 consq_alloc_fail:
1012 kfree(p_consq);
1013
1014 return ret;
1015 }
1016
qed_consq_setup(struct qed_hwfn * p_hwfn)1017 void qed_consq_setup(struct qed_hwfn *p_hwfn)
1018 {
1019 qed_chain_reset(&p_hwfn->p_consq->chain);
1020 }
1021
qed_consq_free(struct qed_hwfn * p_hwfn)1022 void qed_consq_free(struct qed_hwfn *p_hwfn)
1023 {
1024 if (!p_hwfn->p_consq)
1025 return;
1026
1027 qed_chain_free(p_hwfn->cdev, &p_hwfn->p_consq->chain);
1028
1029 kfree(p_hwfn->p_consq);
1030 p_hwfn->p_consq = NULL;
1031 }
1032