1 // SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
2 /* Copyright (c) 2015 - 2021 Intel Corporation */
3 #include "osdep.h"
4 #include "status.h"
5 #include "defs.h"
6 #include "user.h"
7 #include "irdma.h"
8
9 /**
10 * irdma_set_fragment - set fragment in wqe
11 * @wqe: wqe for setting fragment
12 * @offset: offset value
13 * @sge: sge length and stag
14 * @valid: The wqe valid
15 */
irdma_set_fragment(__le64 * wqe,u32 offset,struct ib_sge * sge,u8 valid)16 static void irdma_set_fragment(__le64 *wqe, u32 offset, struct ib_sge *sge,
17 u8 valid)
18 {
19 if (sge) {
20 set_64bit_val(wqe, offset,
21 FIELD_PREP(IRDMAQPSQ_FRAG_TO, sge->addr));
22 set_64bit_val(wqe, offset + 8,
23 FIELD_PREP(IRDMAQPSQ_VALID, valid) |
24 FIELD_PREP(IRDMAQPSQ_FRAG_LEN, sge->length) |
25 FIELD_PREP(IRDMAQPSQ_FRAG_STAG, sge->lkey));
26 } else {
27 set_64bit_val(wqe, offset, 0);
28 set_64bit_val(wqe, offset + 8,
29 FIELD_PREP(IRDMAQPSQ_VALID, valid));
30 }
31 }
32
33 /**
34 * irdma_set_fragment_gen_1 - set fragment in wqe
35 * @wqe: wqe for setting fragment
36 * @offset: offset value
37 * @sge: sge length and stag
38 * @valid: wqe valid flag
39 */
irdma_set_fragment_gen_1(__le64 * wqe,u32 offset,struct ib_sge * sge,u8 valid)40 static void irdma_set_fragment_gen_1(__le64 *wqe, u32 offset,
41 struct ib_sge *sge, u8 valid)
42 {
43 if (sge) {
44 set_64bit_val(wqe, offset,
45 FIELD_PREP(IRDMAQPSQ_FRAG_TO, sge->addr));
46 set_64bit_val(wqe, offset + 8,
47 FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, sge->length) |
48 FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_STAG, sge->lkey));
49 } else {
50 set_64bit_val(wqe, offset, 0);
51 set_64bit_val(wqe, offset + 8, 0);
52 }
53 }
54
55 /**
56 * irdma_nop_1 - insert a NOP wqe
57 * @qp: hw qp ptr
58 */
irdma_nop_1(struct irdma_qp_uk * qp)59 static enum irdma_status_code irdma_nop_1(struct irdma_qp_uk *qp)
60 {
61 u64 hdr;
62 __le64 *wqe;
63 u32 wqe_idx;
64 bool signaled = false;
65
66 if (!qp->sq_ring.head)
67 return IRDMA_ERR_PARAM;
68
69 wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
70 wqe = qp->sq_base[wqe_idx].elem;
71
72 qp->sq_wrtrk_array[wqe_idx].quanta = IRDMA_QP_WQE_MIN_QUANTA;
73
74 set_64bit_val(wqe, 0, 0);
75 set_64bit_val(wqe, 8, 0);
76 set_64bit_val(wqe, 16, 0);
77
78 hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_NOP) |
79 FIELD_PREP(IRDMAQPSQ_SIGCOMPL, signaled) |
80 FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
81
82 /* make sure WQE is written before valid bit is set */
83 dma_wmb();
84
85 set_64bit_val(wqe, 24, hdr);
86
87 return 0;
88 }
89
90 /**
91 * irdma_clr_wqes - clear next 128 sq entries
92 * @qp: hw qp ptr
93 * @qp_wqe_idx: wqe_idx
94 */
irdma_clr_wqes(struct irdma_qp_uk * qp,u32 qp_wqe_idx)95 void irdma_clr_wqes(struct irdma_qp_uk *qp, u32 qp_wqe_idx)
96 {
97 __le64 *wqe;
98 u32 wqe_idx;
99
100 if (!(qp_wqe_idx & 0x7F)) {
101 wqe_idx = (qp_wqe_idx + 128) % qp->sq_ring.size;
102 wqe = qp->sq_base[wqe_idx].elem;
103 if (wqe_idx)
104 memset(wqe, qp->swqe_polarity ? 0 : 0xFF, 0x1000);
105 else
106 memset(wqe, qp->swqe_polarity ? 0xFF : 0, 0x1000);
107 }
108 }
109
110 /**
111 * irdma_uk_qp_post_wr - ring doorbell
112 * @qp: hw qp ptr
113 */
irdma_uk_qp_post_wr(struct irdma_qp_uk * qp)114 void irdma_uk_qp_post_wr(struct irdma_qp_uk *qp)
115 {
116 u64 temp;
117 u32 hw_sq_tail;
118 u32 sw_sq_head;
119
120 /* valid bit is written and loads completed before reading shadow */
121 mb();
122
123 /* read the doorbell shadow area */
124 get_64bit_val(qp->shadow_area, 0, &temp);
125
126 hw_sq_tail = (u32)FIELD_GET(IRDMA_QP_DBSA_HW_SQ_TAIL, temp);
127 sw_sq_head = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
128 if (sw_sq_head != qp->initial_ring.head) {
129 if (qp->push_dropped) {
130 writel(qp->qp_id, qp->wqe_alloc_db);
131 qp->push_dropped = false;
132 } else if (sw_sq_head != hw_sq_tail) {
133 if (sw_sq_head > qp->initial_ring.head) {
134 if (hw_sq_tail >= qp->initial_ring.head &&
135 hw_sq_tail < sw_sq_head)
136 writel(qp->qp_id, qp->wqe_alloc_db);
137 } else {
138 if (hw_sq_tail >= qp->initial_ring.head ||
139 hw_sq_tail < sw_sq_head)
140 writel(qp->qp_id, qp->wqe_alloc_db);
141 }
142 }
143 }
144
145 qp->initial_ring.head = qp->sq_ring.head;
146 }
147
148 /**
149 * irdma_qp_ring_push_db - ring qp doorbell
150 * @qp: hw qp ptr
151 * @wqe_idx: wqe index
152 */
irdma_qp_ring_push_db(struct irdma_qp_uk * qp,u32 wqe_idx)153 static void irdma_qp_ring_push_db(struct irdma_qp_uk *qp, u32 wqe_idx)
154 {
155 set_32bit_val(qp->push_db, 0,
156 FIELD_PREP(IRDMA_WQEALLOC_WQE_DESC_INDEX, wqe_idx >> 3) | qp->qp_id);
157 qp->initial_ring.head = qp->sq_ring.head;
158 qp->push_mode = true;
159 qp->push_dropped = false;
160 }
161
irdma_qp_push_wqe(struct irdma_qp_uk * qp,__le64 * wqe,u16 quanta,u32 wqe_idx,bool post_sq)162 void irdma_qp_push_wqe(struct irdma_qp_uk *qp, __le64 *wqe, u16 quanta,
163 u32 wqe_idx, bool post_sq)
164 {
165 __le64 *push;
166
167 if (IRDMA_RING_CURRENT_HEAD(qp->initial_ring) !=
168 IRDMA_RING_CURRENT_TAIL(qp->sq_ring) &&
169 !qp->push_mode) {
170 if (post_sq)
171 irdma_uk_qp_post_wr(qp);
172 } else {
173 push = (__le64 *)((uintptr_t)qp->push_wqe +
174 (wqe_idx & 0x7) * 0x20);
175 memcpy(push, wqe, quanta * IRDMA_QP_WQE_MIN_SIZE);
176 irdma_qp_ring_push_db(qp, wqe_idx);
177 }
178 }
179
180 /**
181 * irdma_qp_get_next_send_wqe - pad with NOP if needed, return where next WR should go
182 * @qp: hw qp ptr
183 * @wqe_idx: return wqe index
184 * @quanta: size of WR in quanta
185 * @total_size: size of WR in bytes
186 * @info: info on WR
187 */
irdma_qp_get_next_send_wqe(struct irdma_qp_uk * qp,u32 * wqe_idx,u16 quanta,u32 total_size,struct irdma_post_sq_info * info)188 __le64 *irdma_qp_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx,
189 u16 quanta, u32 total_size,
190 struct irdma_post_sq_info *info)
191 {
192 __le64 *wqe;
193 __le64 *wqe_0 = NULL;
194 u32 nop_wqe_idx;
195 u16 avail_quanta;
196 u16 i;
197
198 avail_quanta = qp->uk_attrs->max_hw_sq_chunk -
199 (IRDMA_RING_CURRENT_HEAD(qp->sq_ring) %
200 qp->uk_attrs->max_hw_sq_chunk);
201 if (quanta <= avail_quanta) {
202 /* WR fits in current chunk */
203 if (quanta > IRDMA_SQ_RING_FREE_QUANTA(qp->sq_ring))
204 return NULL;
205 } else {
206 /* Need to pad with NOP */
207 if (quanta + avail_quanta >
208 IRDMA_SQ_RING_FREE_QUANTA(qp->sq_ring))
209 return NULL;
210
211 nop_wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
212 for (i = 0; i < avail_quanta; i++) {
213 irdma_nop_1(qp);
214 IRDMA_RING_MOVE_HEAD_NOCHECK(qp->sq_ring);
215 }
216 if (qp->push_db && info->push_wqe)
217 irdma_qp_push_wqe(qp, qp->sq_base[nop_wqe_idx].elem,
218 avail_quanta, nop_wqe_idx, true);
219 }
220
221 *wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
222 if (!*wqe_idx)
223 qp->swqe_polarity = !qp->swqe_polarity;
224
225 IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->sq_ring, quanta);
226
227 wqe = qp->sq_base[*wqe_idx].elem;
228 if (qp->uk_attrs->hw_rev == IRDMA_GEN_1 && quanta == 1 &&
229 (IRDMA_RING_CURRENT_HEAD(qp->sq_ring) & 1)) {
230 wqe_0 = qp->sq_base[IRDMA_RING_CURRENT_HEAD(qp->sq_ring)].elem;
231 wqe_0[3] = cpu_to_le64(FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity ? 0 : 1));
232 }
233 qp->sq_wrtrk_array[*wqe_idx].wrid = info->wr_id;
234 qp->sq_wrtrk_array[*wqe_idx].wr_len = total_size;
235 qp->sq_wrtrk_array[*wqe_idx].quanta = quanta;
236
237 return wqe;
238 }
239
240 /**
241 * irdma_qp_get_next_recv_wqe - get next qp's rcv wqe
242 * @qp: hw qp ptr
243 * @wqe_idx: return wqe index
244 */
irdma_qp_get_next_recv_wqe(struct irdma_qp_uk * qp,u32 * wqe_idx)245 __le64 *irdma_qp_get_next_recv_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx)
246 {
247 __le64 *wqe;
248 enum irdma_status_code ret_code;
249
250 if (IRDMA_RING_FULL_ERR(qp->rq_ring))
251 return NULL;
252
253 IRDMA_ATOMIC_RING_MOVE_HEAD(qp->rq_ring, *wqe_idx, ret_code);
254 if (ret_code)
255 return NULL;
256
257 if (!*wqe_idx)
258 qp->rwqe_polarity = !qp->rwqe_polarity;
259 /* rq_wqe_size_multiplier is no of 32 byte quanta in one rq wqe */
260 wqe = qp->rq_base[*wqe_idx * qp->rq_wqe_size_multiplier].elem;
261
262 return wqe;
263 }
264
265 /**
266 * irdma_uk_rdma_write - rdma write operation
267 * @qp: hw qp ptr
268 * @info: post sq information
269 * @post_sq: flag to post sq
270 */
irdma_uk_rdma_write(struct irdma_qp_uk * qp,struct irdma_post_sq_info * info,bool post_sq)271 enum irdma_status_code irdma_uk_rdma_write(struct irdma_qp_uk *qp,
272 struct irdma_post_sq_info *info,
273 bool post_sq)
274 {
275 u64 hdr;
276 __le64 *wqe;
277 struct irdma_rdma_write *op_info;
278 u32 i, wqe_idx;
279 u32 total_size = 0, byte_off;
280 enum irdma_status_code ret_code;
281 u32 frag_cnt, addl_frag_cnt;
282 bool read_fence = false;
283 u16 quanta;
284
285 info->push_wqe = qp->push_db ? true : false;
286
287 op_info = &info->op.rdma_write;
288 if (op_info->num_lo_sges > qp->max_sq_frag_cnt)
289 return IRDMA_ERR_INVALID_FRAG_COUNT;
290
291 for (i = 0; i < op_info->num_lo_sges; i++)
292 total_size += op_info->lo_sg_list[i].length;
293
294 read_fence |= info->read_fence;
295
296 if (info->imm_data_valid)
297 frag_cnt = op_info->num_lo_sges + 1;
298 else
299 frag_cnt = op_info->num_lo_sges;
300 addl_frag_cnt = frag_cnt > 1 ? (frag_cnt - 1) : 0;
301 ret_code = irdma_fragcnt_to_quanta_sq(frag_cnt, &quanta);
302 if (ret_code)
303 return ret_code;
304
305 wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
306 info);
307 if (!wqe)
308 return IRDMA_ERR_QP_TOOMANY_WRS_POSTED;
309
310 irdma_clr_wqes(qp, wqe_idx);
311
312 set_64bit_val(wqe, 16,
313 FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.addr));
314
315 if (info->imm_data_valid) {
316 set_64bit_val(wqe, 0,
317 FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
318 i = 0;
319 } else {
320 qp->wqe_ops.iw_set_fragment(wqe, 0,
321 op_info->lo_sg_list,
322 qp->swqe_polarity);
323 i = 1;
324 }
325
326 for (byte_off = 32; i < op_info->num_lo_sges; i++) {
327 qp->wqe_ops.iw_set_fragment(wqe, byte_off,
328 &op_info->lo_sg_list[i],
329 qp->swqe_polarity);
330 byte_off += 16;
331 }
332
333 /* if not an odd number set valid bit in next fragment */
334 if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 && !(frag_cnt & 0x01) &&
335 frag_cnt) {
336 qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
337 qp->swqe_polarity);
338 if (qp->uk_attrs->hw_rev == IRDMA_GEN_2)
339 ++addl_frag_cnt;
340 }
341
342 hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.lkey) |
343 FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
344 FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG, info->imm_data_valid) |
345 FIELD_PREP(IRDMAQPSQ_REPORTRTT, info->report_rtt) |
346 FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
347 FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
348 FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
349 FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
350 FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
351 FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
352
353 dma_wmb(); /* make sure WQE is populated before valid bit is set */
354
355 set_64bit_val(wqe, 24, hdr);
356 if (info->push_wqe) {
357 irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
358 } else {
359 if (post_sq)
360 irdma_uk_qp_post_wr(qp);
361 }
362
363 return 0;
364 }
365
366 /**
367 * irdma_uk_rdma_read - rdma read command
368 * @qp: hw qp ptr
369 * @info: post sq information
370 * @inv_stag: flag for inv_stag
371 * @post_sq: flag to post sq
372 */
irdma_uk_rdma_read(struct irdma_qp_uk * qp,struct irdma_post_sq_info * info,bool inv_stag,bool post_sq)373 enum irdma_status_code irdma_uk_rdma_read(struct irdma_qp_uk *qp,
374 struct irdma_post_sq_info *info,
375 bool inv_stag, bool post_sq)
376 {
377 struct irdma_rdma_read *op_info;
378 enum irdma_status_code ret_code;
379 u32 i, byte_off, total_size = 0;
380 bool local_fence = false;
381 u32 addl_frag_cnt;
382 __le64 *wqe;
383 u32 wqe_idx;
384 u16 quanta;
385 u64 hdr;
386
387 info->push_wqe = qp->push_db ? true : false;
388
389 op_info = &info->op.rdma_read;
390 if (qp->max_sq_frag_cnt < op_info->num_lo_sges)
391 return IRDMA_ERR_INVALID_FRAG_COUNT;
392
393 for (i = 0; i < op_info->num_lo_sges; i++)
394 total_size += op_info->lo_sg_list[i].length;
395
396 ret_code = irdma_fragcnt_to_quanta_sq(op_info->num_lo_sges, &quanta);
397 if (ret_code)
398 return ret_code;
399
400 wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
401 info);
402 if (!wqe)
403 return IRDMA_ERR_QP_TOOMANY_WRS_POSTED;
404
405 irdma_clr_wqes(qp, wqe_idx);
406
407 addl_frag_cnt = op_info->num_lo_sges > 1 ?
408 (op_info->num_lo_sges - 1) : 0;
409 local_fence |= info->local_fence;
410
411 qp->wqe_ops.iw_set_fragment(wqe, 0, op_info->lo_sg_list,
412 qp->swqe_polarity);
413 for (i = 1, byte_off = 32; i < op_info->num_lo_sges; ++i) {
414 qp->wqe_ops.iw_set_fragment(wqe, byte_off,
415 &op_info->lo_sg_list[i],
416 qp->swqe_polarity);
417 byte_off += 16;
418 }
419
420 /* if not an odd number set valid bit in next fragment */
421 if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 &&
422 !(op_info->num_lo_sges & 0x01) && op_info->num_lo_sges) {
423 qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
424 qp->swqe_polarity);
425 if (qp->uk_attrs->hw_rev == IRDMA_GEN_2)
426 ++addl_frag_cnt;
427 }
428 set_64bit_val(wqe, 16,
429 FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.addr));
430 hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.lkey) |
431 FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) |
432 FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
433 FIELD_PREP(IRDMAQPSQ_OPCODE,
434 (inv_stag ? IRDMAQP_OP_RDMA_READ_LOC_INV : IRDMAQP_OP_RDMA_READ)) |
435 FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
436 FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
437 FIELD_PREP(IRDMAQPSQ_LOCALFENCE, local_fence) |
438 FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
439 FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
440
441 dma_wmb(); /* make sure WQE is populated before valid bit is set */
442
443 set_64bit_val(wqe, 24, hdr);
444 if (info->push_wqe) {
445 irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
446 } else {
447 if (post_sq)
448 irdma_uk_qp_post_wr(qp);
449 }
450
451 return 0;
452 }
453
454 /**
455 * irdma_uk_send - rdma send command
456 * @qp: hw qp ptr
457 * @info: post sq information
458 * @post_sq: flag to post sq
459 */
irdma_uk_send(struct irdma_qp_uk * qp,struct irdma_post_sq_info * info,bool post_sq)460 enum irdma_status_code irdma_uk_send(struct irdma_qp_uk *qp,
461 struct irdma_post_sq_info *info,
462 bool post_sq)
463 {
464 __le64 *wqe;
465 struct irdma_post_send *op_info;
466 u64 hdr;
467 u32 i, wqe_idx, total_size = 0, byte_off;
468 enum irdma_status_code ret_code;
469 u32 frag_cnt, addl_frag_cnt;
470 bool read_fence = false;
471 u16 quanta;
472
473 info->push_wqe = qp->push_db ? true : false;
474
475 op_info = &info->op.send;
476 if (qp->max_sq_frag_cnt < op_info->num_sges)
477 return IRDMA_ERR_INVALID_FRAG_COUNT;
478
479 for (i = 0; i < op_info->num_sges; i++)
480 total_size += op_info->sg_list[i].length;
481
482 if (info->imm_data_valid)
483 frag_cnt = op_info->num_sges + 1;
484 else
485 frag_cnt = op_info->num_sges;
486 ret_code = irdma_fragcnt_to_quanta_sq(frag_cnt, &quanta);
487 if (ret_code)
488 return ret_code;
489
490 wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
491 info);
492 if (!wqe)
493 return IRDMA_ERR_QP_TOOMANY_WRS_POSTED;
494
495 irdma_clr_wqes(qp, wqe_idx);
496
497 read_fence |= info->read_fence;
498 addl_frag_cnt = frag_cnt > 1 ? (frag_cnt - 1) : 0;
499 if (info->imm_data_valid) {
500 set_64bit_val(wqe, 0,
501 FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
502 i = 0;
503 } else {
504 qp->wqe_ops.iw_set_fragment(wqe, 0, op_info->sg_list,
505 qp->swqe_polarity);
506 i = 1;
507 }
508
509 for (byte_off = 32; i < op_info->num_sges; i++) {
510 qp->wqe_ops.iw_set_fragment(wqe, byte_off, &op_info->sg_list[i],
511 qp->swqe_polarity);
512 byte_off += 16;
513 }
514
515 /* if not an odd number set valid bit in next fragment */
516 if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 && !(frag_cnt & 0x01) &&
517 frag_cnt) {
518 qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
519 qp->swqe_polarity);
520 if (qp->uk_attrs->hw_rev == IRDMA_GEN_2)
521 ++addl_frag_cnt;
522 }
523
524 set_64bit_val(wqe, 16,
525 FIELD_PREP(IRDMAQPSQ_DESTQKEY, op_info->qkey) |
526 FIELD_PREP(IRDMAQPSQ_DESTQPN, op_info->dest_qp));
527 hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, info->stag_to_inv) |
528 FIELD_PREP(IRDMAQPSQ_AHID, op_info->ah_id) |
529 FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG,
530 (info->imm_data_valid ? 1 : 0)) |
531 FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) |
532 FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
533 FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
534 FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
535 FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
536 FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
537 FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
538 FIELD_PREP(IRDMAQPSQ_UDPHEADER, info->udp_hdr) |
539 FIELD_PREP(IRDMAQPSQ_L4LEN, info->l4len) |
540 FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
541
542 dma_wmb(); /* make sure WQE is populated before valid bit is set */
543
544 set_64bit_val(wqe, 24, hdr);
545 if (info->push_wqe) {
546 irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
547 } else {
548 if (post_sq)
549 irdma_uk_qp_post_wr(qp);
550 }
551
552 return 0;
553 }
554
555 /**
556 * irdma_set_mw_bind_wqe_gen_1 - set mw bind wqe
557 * @wqe: wqe for setting fragment
558 * @op_info: info for setting bind wqe values
559 */
irdma_set_mw_bind_wqe_gen_1(__le64 * wqe,struct irdma_bind_window * op_info)560 static void irdma_set_mw_bind_wqe_gen_1(__le64 *wqe,
561 struct irdma_bind_window *op_info)
562 {
563 set_64bit_val(wqe, 0, (uintptr_t)op_info->va);
564 set_64bit_val(wqe, 8,
565 FIELD_PREP(IRDMAQPSQ_PARENTMRSTAG, op_info->mw_stag) |
566 FIELD_PREP(IRDMAQPSQ_MWSTAG, op_info->mr_stag));
567 set_64bit_val(wqe, 16, op_info->bind_len);
568 }
569
570 /**
571 * irdma_copy_inline_data_gen_1 - Copy inline data to wqe
572 * @dest: pointer to wqe
573 * @src: pointer to inline data
574 * @len: length of inline data to copy
575 * @polarity: compatibility parameter
576 */
irdma_copy_inline_data_gen_1(u8 * dest,u8 * src,u32 len,u8 polarity)577 static void irdma_copy_inline_data_gen_1(u8 *dest, u8 *src, u32 len,
578 u8 polarity)
579 {
580 if (len <= 16) {
581 memcpy(dest, src, len);
582 } else {
583 memcpy(dest, src, 16);
584 src += 16;
585 dest = dest + 32;
586 memcpy(dest, src, len - 16);
587 }
588 }
589
590 /**
591 * irdma_inline_data_size_to_quanta_gen_1 - based on inline data, quanta
592 * @data_size: data size for inline
593 *
594 * Gets the quanta based on inline and immediate data.
595 */
irdma_inline_data_size_to_quanta_gen_1(u32 data_size)596 static inline u16 irdma_inline_data_size_to_quanta_gen_1(u32 data_size)
597 {
598 return data_size <= 16 ? IRDMA_QP_WQE_MIN_QUANTA : 2;
599 }
600
601 /**
602 * irdma_set_mw_bind_wqe - set mw bind in wqe
603 * @wqe: wqe for setting mw bind
604 * @op_info: info for setting wqe values
605 */
irdma_set_mw_bind_wqe(__le64 * wqe,struct irdma_bind_window * op_info)606 static void irdma_set_mw_bind_wqe(__le64 *wqe,
607 struct irdma_bind_window *op_info)
608 {
609 set_64bit_val(wqe, 0, (uintptr_t)op_info->va);
610 set_64bit_val(wqe, 8,
611 FIELD_PREP(IRDMAQPSQ_PARENTMRSTAG, op_info->mr_stag) |
612 FIELD_PREP(IRDMAQPSQ_MWSTAG, op_info->mw_stag));
613 set_64bit_val(wqe, 16, op_info->bind_len);
614 }
615
616 /**
617 * irdma_copy_inline_data - Copy inline data to wqe
618 * @dest: pointer to wqe
619 * @src: pointer to inline data
620 * @len: length of inline data to copy
621 * @polarity: polarity of wqe valid bit
622 */
irdma_copy_inline_data(u8 * dest,u8 * src,u32 len,u8 polarity)623 static void irdma_copy_inline_data(u8 *dest, u8 *src, u32 len, u8 polarity)
624 {
625 u8 inline_valid = polarity << IRDMA_INLINE_VALID_S;
626 u32 copy_size;
627
628 dest += 8;
629 if (len <= 8) {
630 memcpy(dest, src, len);
631 return;
632 }
633
634 *((u64 *)dest) = *((u64 *)src);
635 len -= 8;
636 src += 8;
637 dest += 24; /* point to additional 32 byte quanta */
638
639 while (len) {
640 copy_size = len < 31 ? len : 31;
641 memcpy(dest, src, copy_size);
642 *(dest + 31) = inline_valid;
643 len -= copy_size;
644 dest += 32;
645 src += copy_size;
646 }
647 }
648
649 /**
650 * irdma_inline_data_size_to_quanta - based on inline data, quanta
651 * @data_size: data size for inline
652 *
653 * Gets the quanta based on inline and immediate data.
654 */
irdma_inline_data_size_to_quanta(u32 data_size)655 static u16 irdma_inline_data_size_to_quanta(u32 data_size)
656 {
657 if (data_size <= 8)
658 return IRDMA_QP_WQE_MIN_QUANTA;
659 else if (data_size <= 39)
660 return 2;
661 else if (data_size <= 70)
662 return 3;
663 else if (data_size <= 101)
664 return 4;
665 else if (data_size <= 132)
666 return 5;
667 else if (data_size <= 163)
668 return 6;
669 else if (data_size <= 194)
670 return 7;
671 else
672 return 8;
673 }
674
675 /**
676 * irdma_uk_inline_rdma_write - inline rdma write operation
677 * @qp: hw qp ptr
678 * @info: post sq information
679 * @post_sq: flag to post sq
680 */
681 enum irdma_status_code
irdma_uk_inline_rdma_write(struct irdma_qp_uk * qp,struct irdma_post_sq_info * info,bool post_sq)682 irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
683 bool post_sq)
684 {
685 __le64 *wqe;
686 struct irdma_inline_rdma_write *op_info;
687 u64 hdr = 0;
688 u32 wqe_idx;
689 bool read_fence = false;
690 u16 quanta;
691
692 info->push_wqe = qp->push_db ? true : false;
693 op_info = &info->op.inline_rdma_write;
694
695 if (op_info->len > qp->max_inline_data)
696 return IRDMA_ERR_INVALID_INLINE_DATA_SIZE;
697
698 quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(op_info->len);
699 wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, op_info->len,
700 info);
701 if (!wqe)
702 return IRDMA_ERR_QP_TOOMANY_WRS_POSTED;
703
704 irdma_clr_wqes(qp, wqe_idx);
705
706 read_fence |= info->read_fence;
707 set_64bit_val(wqe, 16,
708 FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.addr));
709
710 hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.lkey) |
711 FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
712 FIELD_PREP(IRDMAQPSQ_INLINEDATALEN, op_info->len) |
713 FIELD_PREP(IRDMAQPSQ_REPORTRTT, info->report_rtt ? 1 : 0) |
714 FIELD_PREP(IRDMAQPSQ_INLINEDATAFLAG, 1) |
715 FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG, info->imm_data_valid ? 1 : 0) |
716 FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe ? 1 : 0) |
717 FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
718 FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
719 FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
720 FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
721
722 if (info->imm_data_valid)
723 set_64bit_val(wqe, 0,
724 FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
725
726 qp->wqe_ops.iw_copy_inline_data((u8 *)wqe, op_info->data, op_info->len,
727 qp->swqe_polarity);
728 dma_wmb(); /* make sure WQE is populated before valid bit is set */
729
730 set_64bit_val(wqe, 24, hdr);
731
732 if (info->push_wqe) {
733 irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
734 } else {
735 if (post_sq)
736 irdma_uk_qp_post_wr(qp);
737 }
738
739 return 0;
740 }
741
742 /**
743 * irdma_uk_inline_send - inline send operation
744 * @qp: hw qp ptr
745 * @info: post sq information
746 * @post_sq: flag to post sq
747 */
irdma_uk_inline_send(struct irdma_qp_uk * qp,struct irdma_post_sq_info * info,bool post_sq)748 enum irdma_status_code irdma_uk_inline_send(struct irdma_qp_uk *qp,
749 struct irdma_post_sq_info *info,
750 bool post_sq)
751 {
752 __le64 *wqe;
753 struct irdma_post_inline_send *op_info;
754 u64 hdr;
755 u32 wqe_idx;
756 bool read_fence = false;
757 u16 quanta;
758
759 info->push_wqe = qp->push_db ? true : false;
760 op_info = &info->op.inline_send;
761
762 if (op_info->len > qp->max_inline_data)
763 return IRDMA_ERR_INVALID_INLINE_DATA_SIZE;
764
765 quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(op_info->len);
766 wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, op_info->len,
767 info);
768 if (!wqe)
769 return IRDMA_ERR_QP_TOOMANY_WRS_POSTED;
770
771 irdma_clr_wqes(qp, wqe_idx);
772
773 set_64bit_val(wqe, 16,
774 FIELD_PREP(IRDMAQPSQ_DESTQKEY, op_info->qkey) |
775 FIELD_PREP(IRDMAQPSQ_DESTQPN, op_info->dest_qp));
776
777 read_fence |= info->read_fence;
778 hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, info->stag_to_inv) |
779 FIELD_PREP(IRDMAQPSQ_AHID, op_info->ah_id) |
780 FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
781 FIELD_PREP(IRDMAQPSQ_INLINEDATALEN, op_info->len) |
782 FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG,
783 (info->imm_data_valid ? 1 : 0)) |
784 FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) |
785 FIELD_PREP(IRDMAQPSQ_INLINEDATAFLAG, 1) |
786 FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
787 FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
788 FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
789 FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
790 FIELD_PREP(IRDMAQPSQ_UDPHEADER, info->udp_hdr) |
791 FIELD_PREP(IRDMAQPSQ_L4LEN, info->l4len) |
792 FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
793
794 if (info->imm_data_valid)
795 set_64bit_val(wqe, 0,
796 FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
797 qp->wqe_ops.iw_copy_inline_data((u8 *)wqe, op_info->data, op_info->len,
798 qp->swqe_polarity);
799
800 dma_wmb(); /* make sure WQE is populated before valid bit is set */
801
802 set_64bit_val(wqe, 24, hdr);
803
804 if (info->push_wqe) {
805 irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
806 } else {
807 if (post_sq)
808 irdma_uk_qp_post_wr(qp);
809 }
810
811 return 0;
812 }
813
814 /**
815 * irdma_uk_stag_local_invalidate - stag invalidate operation
816 * @qp: hw qp ptr
817 * @info: post sq information
818 * @post_sq: flag to post sq
819 */
820 enum irdma_status_code
irdma_uk_stag_local_invalidate(struct irdma_qp_uk * qp,struct irdma_post_sq_info * info,bool post_sq)821 irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp,
822 struct irdma_post_sq_info *info, bool post_sq)
823 {
824 __le64 *wqe;
825 struct irdma_inv_local_stag *op_info;
826 u64 hdr;
827 u32 wqe_idx;
828 bool local_fence = false;
829 struct ib_sge sge = {};
830
831 info->push_wqe = qp->push_db ? true : false;
832 op_info = &info->op.inv_local_stag;
833 local_fence = info->local_fence;
834
835 wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, IRDMA_QP_WQE_MIN_QUANTA,
836 0, info);
837 if (!wqe)
838 return IRDMA_ERR_QP_TOOMANY_WRS_POSTED;
839
840 irdma_clr_wqes(qp, wqe_idx);
841
842 sge.lkey = op_info->target_stag;
843 qp->wqe_ops.iw_set_fragment(wqe, 0, &sge, 0);
844
845 set_64bit_val(wqe, 16, 0);
846
847 hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMA_OP_TYPE_INV_STAG) |
848 FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
849 FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
850 FIELD_PREP(IRDMAQPSQ_LOCALFENCE, local_fence) |
851 FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
852 FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
853
854 dma_wmb(); /* make sure WQE is populated before valid bit is set */
855
856 set_64bit_val(wqe, 24, hdr);
857
858 if (info->push_wqe) {
859 irdma_qp_push_wqe(qp, wqe, IRDMA_QP_WQE_MIN_QUANTA, wqe_idx,
860 post_sq);
861 } else {
862 if (post_sq)
863 irdma_uk_qp_post_wr(qp);
864 }
865
866 return 0;
867 }
868
869 /**
870 * irdma_uk_post_receive - post receive wqe
871 * @qp: hw qp ptr
872 * @info: post rq information
873 */
irdma_uk_post_receive(struct irdma_qp_uk * qp,struct irdma_post_rq_info * info)874 enum irdma_status_code irdma_uk_post_receive(struct irdma_qp_uk *qp,
875 struct irdma_post_rq_info *info)
876 {
877 u32 wqe_idx, i, byte_off;
878 u32 addl_frag_cnt;
879 __le64 *wqe;
880 u64 hdr;
881
882 if (qp->max_rq_frag_cnt < info->num_sges)
883 return IRDMA_ERR_INVALID_FRAG_COUNT;
884
885 wqe = irdma_qp_get_next_recv_wqe(qp, &wqe_idx);
886 if (!wqe)
887 return IRDMA_ERR_QP_TOOMANY_WRS_POSTED;
888
889 qp->rq_wrid_array[wqe_idx] = info->wr_id;
890 addl_frag_cnt = info->num_sges > 1 ? (info->num_sges - 1) : 0;
891 qp->wqe_ops.iw_set_fragment(wqe, 0, info->sg_list,
892 qp->rwqe_polarity);
893
894 for (i = 1, byte_off = 32; i < info->num_sges; i++) {
895 qp->wqe_ops.iw_set_fragment(wqe, byte_off, &info->sg_list[i],
896 qp->rwqe_polarity);
897 byte_off += 16;
898 }
899
900 /* if not an odd number set valid bit in next fragment */
901 if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 && !(info->num_sges & 0x01) &&
902 info->num_sges) {
903 qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
904 qp->rwqe_polarity);
905 if (qp->uk_attrs->hw_rev == IRDMA_GEN_2)
906 ++addl_frag_cnt;
907 }
908
909 set_64bit_val(wqe, 16, 0);
910 hdr = FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
911 FIELD_PREP(IRDMAQPSQ_VALID, qp->rwqe_polarity);
912
913 dma_wmb(); /* make sure WQE is populated before valid bit is set */
914
915 set_64bit_val(wqe, 24, hdr);
916
917 return 0;
918 }
919
920 /**
921 * irdma_uk_cq_resize - reset the cq buffer info
922 * @cq: cq to resize
923 * @cq_base: new cq buffer addr
924 * @cq_size: number of cqes
925 */
irdma_uk_cq_resize(struct irdma_cq_uk * cq,void * cq_base,int cq_size)926 void irdma_uk_cq_resize(struct irdma_cq_uk *cq, void *cq_base, int cq_size)
927 {
928 cq->cq_base = cq_base;
929 cq->cq_size = cq_size;
930 IRDMA_RING_INIT(cq->cq_ring, cq->cq_size);
931 cq->polarity = 1;
932 }
933
934 /**
935 * irdma_uk_cq_set_resized_cnt - record the count of the resized buffers
936 * @cq: cq to resize
937 * @cq_cnt: the count of the resized cq buffers
938 */
irdma_uk_cq_set_resized_cnt(struct irdma_cq_uk * cq,u16 cq_cnt)939 void irdma_uk_cq_set_resized_cnt(struct irdma_cq_uk *cq, u16 cq_cnt)
940 {
941 u64 temp_val;
942 u16 sw_cq_sel;
943 u8 arm_next_se;
944 u8 arm_next;
945 u8 arm_seq_num;
946
947 get_64bit_val(cq->shadow_area, 32, &temp_val);
948
949 sw_cq_sel = (u16)FIELD_GET(IRDMA_CQ_DBSA_SW_CQ_SELECT, temp_val);
950 sw_cq_sel += cq_cnt;
951
952 arm_seq_num = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_SEQ_NUM, temp_val);
953 arm_next_se = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT_SE, temp_val);
954 arm_next = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT, temp_val);
955
956 temp_val = FIELD_PREP(IRDMA_CQ_DBSA_ARM_SEQ_NUM, arm_seq_num) |
957 FIELD_PREP(IRDMA_CQ_DBSA_SW_CQ_SELECT, sw_cq_sel) |
958 FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT_SE, arm_next_se) |
959 FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT, arm_next);
960
961 set_64bit_val(cq->shadow_area, 32, temp_val);
962 }
963
964 /**
965 * irdma_uk_cq_request_notification - cq notification request (door bell)
966 * @cq: hw cq
967 * @cq_notify: notification type
968 */
irdma_uk_cq_request_notification(struct irdma_cq_uk * cq,enum irdma_cmpl_notify cq_notify)969 void irdma_uk_cq_request_notification(struct irdma_cq_uk *cq,
970 enum irdma_cmpl_notify cq_notify)
971 {
972 u64 temp_val;
973 u16 sw_cq_sel;
974 u8 arm_next_se = 0;
975 u8 arm_next = 0;
976 u8 arm_seq_num;
977
978 get_64bit_val(cq->shadow_area, 32, &temp_val);
979 arm_seq_num = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_SEQ_NUM, temp_val);
980 arm_seq_num++;
981 sw_cq_sel = (u16)FIELD_GET(IRDMA_CQ_DBSA_SW_CQ_SELECT, temp_val);
982 arm_next_se = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT_SE, temp_val);
983 arm_next_se |= 1;
984 if (cq_notify == IRDMA_CQ_COMPL_EVENT)
985 arm_next = 1;
986 temp_val = FIELD_PREP(IRDMA_CQ_DBSA_ARM_SEQ_NUM, arm_seq_num) |
987 FIELD_PREP(IRDMA_CQ_DBSA_SW_CQ_SELECT, sw_cq_sel) |
988 FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT_SE, arm_next_se) |
989 FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT, arm_next);
990
991 set_64bit_val(cq->shadow_area, 32, temp_val);
992
993 dma_wmb(); /* make sure WQE is populated before valid bit is set */
994
995 writel(cq->cq_id, cq->cqe_alloc_db);
996 }
997
998 /**
999 * irdma_uk_cq_poll_cmpl - get cq completion info
1000 * @cq: hw cq
1001 * @info: cq poll information returned
1002 */
1003 enum irdma_status_code
irdma_uk_cq_poll_cmpl(struct irdma_cq_uk * cq,struct irdma_cq_poll_info * info)1004 irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq, struct irdma_cq_poll_info *info)
1005 {
1006 u64 comp_ctx, qword0, qword2, qword3;
1007 __le64 *cqe;
1008 struct irdma_qp_uk *qp;
1009 struct irdma_ring *pring = NULL;
1010 u32 wqe_idx, q_type;
1011 enum irdma_status_code ret_code;
1012 bool move_cq_head = true;
1013 u8 polarity;
1014 bool ext_valid;
1015 __le64 *ext_cqe;
1016
1017 if (cq->avoid_mem_cflct)
1018 cqe = IRDMA_GET_CURRENT_EXTENDED_CQ_ELEM(cq);
1019 else
1020 cqe = IRDMA_GET_CURRENT_CQ_ELEM(cq);
1021
1022 get_64bit_val(cqe, 24, &qword3);
1023 polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
1024 if (polarity != cq->polarity)
1025 return IRDMA_ERR_Q_EMPTY;
1026
1027 /* Ensure CQE contents are read after valid bit is checked */
1028 dma_rmb();
1029
1030 ext_valid = (bool)FIELD_GET(IRDMA_CQ_EXTCQE, qword3);
1031 if (ext_valid) {
1032 u64 qword6, qword7;
1033 u32 peek_head;
1034
1035 if (cq->avoid_mem_cflct) {
1036 ext_cqe = (__le64 *)((u8 *)cqe + 32);
1037 get_64bit_val(ext_cqe, 24, &qword7);
1038 polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword7);
1039 } else {
1040 peek_head = (cq->cq_ring.head + 1) % cq->cq_ring.size;
1041 ext_cqe = cq->cq_base[peek_head].buf;
1042 get_64bit_val(ext_cqe, 24, &qword7);
1043 polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword7);
1044 if (!peek_head)
1045 polarity ^= 1;
1046 }
1047 if (polarity != cq->polarity)
1048 return IRDMA_ERR_Q_EMPTY;
1049
1050 /* Ensure ext CQE contents are read after ext valid bit is checked */
1051 dma_rmb();
1052
1053 info->imm_valid = (bool)FIELD_GET(IRDMA_CQ_IMMVALID, qword7);
1054 if (info->imm_valid) {
1055 u64 qword4;
1056
1057 get_64bit_val(ext_cqe, 0, &qword4);
1058 info->imm_data = (u32)FIELD_GET(IRDMA_CQ_IMMDATALOW32, qword4);
1059 }
1060 info->ud_smac_valid = (bool)FIELD_GET(IRDMA_CQ_UDSMACVALID, qword7);
1061 info->ud_vlan_valid = (bool)FIELD_GET(IRDMA_CQ_UDVLANVALID, qword7);
1062 if (info->ud_smac_valid || info->ud_vlan_valid) {
1063 get_64bit_val(ext_cqe, 16, &qword6);
1064 if (info->ud_vlan_valid)
1065 info->ud_vlan = (u16)FIELD_GET(IRDMA_CQ_UDVLAN, qword6);
1066 if (info->ud_smac_valid) {
1067 info->ud_smac[5] = qword6 & 0xFF;
1068 info->ud_smac[4] = (qword6 >> 8) & 0xFF;
1069 info->ud_smac[3] = (qword6 >> 16) & 0xFF;
1070 info->ud_smac[2] = (qword6 >> 24) & 0xFF;
1071 info->ud_smac[1] = (qword6 >> 32) & 0xFF;
1072 info->ud_smac[0] = (qword6 >> 40) & 0xFF;
1073 }
1074 }
1075 } else {
1076 info->imm_valid = false;
1077 info->ud_smac_valid = false;
1078 info->ud_vlan_valid = false;
1079 }
1080
1081 q_type = (u8)FIELD_GET(IRDMA_CQ_SQ, qword3);
1082 info->error = (bool)FIELD_GET(IRDMA_CQ_ERROR, qword3);
1083 info->push_dropped = (bool)FIELD_GET(IRDMACQ_PSHDROP, qword3);
1084 info->ipv4 = (bool)FIELD_GET(IRDMACQ_IPV4, qword3);
1085 if (info->error) {
1086 info->major_err = FIELD_GET(IRDMA_CQ_MAJERR, qword3);
1087 info->minor_err = FIELD_GET(IRDMA_CQ_MINERR, qword3);
1088 if (info->major_err == IRDMA_FLUSH_MAJOR_ERR) {
1089 info->comp_status = IRDMA_COMPL_STATUS_FLUSHED;
1090 /* Set the min error to standard flush error code for remaining cqes */
1091 if (info->minor_err != FLUSH_GENERAL_ERR) {
1092 qword3 &= ~IRDMA_CQ_MINERR;
1093 qword3 |= FIELD_PREP(IRDMA_CQ_MINERR, FLUSH_GENERAL_ERR);
1094 set_64bit_val(cqe, 24, qword3);
1095 }
1096 } else {
1097 info->comp_status = IRDMA_COMPL_STATUS_UNKNOWN;
1098 }
1099 } else {
1100 info->comp_status = IRDMA_COMPL_STATUS_SUCCESS;
1101 }
1102
1103 get_64bit_val(cqe, 0, &qword0);
1104 get_64bit_val(cqe, 16, &qword2);
1105
1106 info->tcp_seq_num_rtt = (u32)FIELD_GET(IRDMACQ_TCPSEQNUMRTT, qword0);
1107 info->qp_id = (u32)FIELD_GET(IRDMACQ_QPID, qword2);
1108 info->ud_src_qpn = (u32)FIELD_GET(IRDMACQ_UDSRCQPN, qword2);
1109
1110 get_64bit_val(cqe, 8, &comp_ctx);
1111
1112 info->solicited_event = (bool)FIELD_GET(IRDMACQ_SOEVENT, qword3);
1113 qp = (struct irdma_qp_uk *)(unsigned long)comp_ctx;
1114 if (!qp || qp->destroy_pending) {
1115 ret_code = IRDMA_ERR_Q_DESTROYED;
1116 goto exit;
1117 }
1118 wqe_idx = (u32)FIELD_GET(IRDMA_CQ_WQEIDX, qword3);
1119 info->qp_handle = (irdma_qp_handle)(unsigned long)qp;
1120
1121 if (q_type == IRDMA_CQE_QTYPE_RQ) {
1122 u32 array_idx;
1123
1124 array_idx = wqe_idx / qp->rq_wqe_size_multiplier;
1125
1126 if (info->comp_status == IRDMA_COMPL_STATUS_FLUSHED ||
1127 info->comp_status == IRDMA_COMPL_STATUS_UNKNOWN) {
1128 if (!IRDMA_RING_MORE_WORK(qp->rq_ring)) {
1129 ret_code = IRDMA_ERR_Q_EMPTY;
1130 goto exit;
1131 }
1132
1133 info->wr_id = qp->rq_wrid_array[qp->rq_ring.tail];
1134 array_idx = qp->rq_ring.tail;
1135 } else {
1136 info->wr_id = qp->rq_wrid_array[array_idx];
1137 }
1138
1139 info->bytes_xfered = (u32)FIELD_GET(IRDMACQ_PAYLDLEN, qword0);
1140
1141 if (info->imm_valid)
1142 info->op_type = IRDMA_OP_TYPE_REC_IMM;
1143 else
1144 info->op_type = IRDMA_OP_TYPE_REC;
1145 if (qword3 & IRDMACQ_STAG) {
1146 info->stag_invalid_set = true;
1147 info->inv_stag = (u32)FIELD_GET(IRDMACQ_INVSTAG, qword2);
1148 } else {
1149 info->stag_invalid_set = false;
1150 }
1151 IRDMA_RING_SET_TAIL(qp->rq_ring, array_idx + 1);
1152 if (info->comp_status == IRDMA_COMPL_STATUS_FLUSHED) {
1153 qp->rq_flush_seen = true;
1154 if (!IRDMA_RING_MORE_WORK(qp->rq_ring))
1155 qp->rq_flush_complete = true;
1156 else
1157 move_cq_head = false;
1158 }
1159 pring = &qp->rq_ring;
1160 } else { /* q_type is IRDMA_CQE_QTYPE_SQ */
1161 if (qp->first_sq_wq) {
1162 if (wqe_idx + 1 >= qp->conn_wqes)
1163 qp->first_sq_wq = false;
1164
1165 if (wqe_idx < qp->conn_wqes && qp->sq_ring.head == qp->sq_ring.tail) {
1166 IRDMA_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);
1167 IRDMA_RING_MOVE_TAIL(cq->cq_ring);
1168 set_64bit_val(cq->shadow_area, 0,
1169 IRDMA_RING_CURRENT_HEAD(cq->cq_ring));
1170 memset(info, 0,
1171 sizeof(struct irdma_cq_poll_info));
1172 return irdma_uk_cq_poll_cmpl(cq, info);
1173 }
1174 }
1175 /*cease posting push mode on push drop*/
1176 if (info->push_dropped) {
1177 qp->push_mode = false;
1178 qp->push_dropped = true;
1179 }
1180 if (info->comp_status != IRDMA_COMPL_STATUS_FLUSHED) {
1181 info->wr_id = qp->sq_wrtrk_array[wqe_idx].wrid;
1182 if (!info->comp_status)
1183 info->bytes_xfered = qp->sq_wrtrk_array[wqe_idx].wr_len;
1184 info->op_type = (u8)FIELD_GET(IRDMACQ_OP, qword3);
1185 IRDMA_RING_SET_TAIL(qp->sq_ring,
1186 wqe_idx + qp->sq_wrtrk_array[wqe_idx].quanta);
1187 } else {
1188 if (!IRDMA_RING_MORE_WORK(qp->sq_ring)) {
1189 ret_code = IRDMA_ERR_Q_EMPTY;
1190 goto exit;
1191 }
1192
1193 do {
1194 __le64 *sw_wqe;
1195 u64 wqe_qword;
1196 u8 op_type;
1197 u32 tail;
1198
1199 tail = qp->sq_ring.tail;
1200 sw_wqe = qp->sq_base[tail].elem;
1201 get_64bit_val(sw_wqe, 24,
1202 &wqe_qword);
1203 op_type = (u8)FIELD_GET(IRDMAQPSQ_OPCODE, wqe_qword);
1204 info->op_type = op_type;
1205 IRDMA_RING_SET_TAIL(qp->sq_ring,
1206 tail + qp->sq_wrtrk_array[tail].quanta);
1207 if (op_type != IRDMAQP_OP_NOP) {
1208 info->wr_id = qp->sq_wrtrk_array[tail].wrid;
1209 info->bytes_xfered = qp->sq_wrtrk_array[tail].wr_len;
1210 break;
1211 }
1212 } while (1);
1213 qp->sq_flush_seen = true;
1214 if (!IRDMA_RING_MORE_WORK(qp->sq_ring))
1215 qp->sq_flush_complete = true;
1216 }
1217 pring = &qp->sq_ring;
1218 }
1219
1220 ret_code = 0;
1221
1222 exit:
1223 if (!ret_code && info->comp_status == IRDMA_COMPL_STATUS_FLUSHED)
1224 if (pring && IRDMA_RING_MORE_WORK(*pring))
1225 move_cq_head = false;
1226
1227 if (move_cq_head) {
1228 IRDMA_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);
1229 if (!IRDMA_RING_CURRENT_HEAD(cq->cq_ring))
1230 cq->polarity ^= 1;
1231
1232 if (ext_valid && !cq->avoid_mem_cflct) {
1233 IRDMA_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);
1234 if (!IRDMA_RING_CURRENT_HEAD(cq->cq_ring))
1235 cq->polarity ^= 1;
1236 }
1237
1238 IRDMA_RING_MOVE_TAIL(cq->cq_ring);
1239 if (!cq->avoid_mem_cflct && ext_valid)
1240 IRDMA_RING_MOVE_TAIL(cq->cq_ring);
1241 set_64bit_val(cq->shadow_area, 0,
1242 IRDMA_RING_CURRENT_HEAD(cq->cq_ring));
1243 } else {
1244 qword3 &= ~IRDMA_CQ_WQEIDX;
1245 qword3 |= FIELD_PREP(IRDMA_CQ_WQEIDX, pring->tail);
1246 set_64bit_val(cqe, 24, qword3);
1247 }
1248
1249 return ret_code;
1250 }
1251
1252 /**
1253 * irdma_qp_round_up - return round up qp wq depth
1254 * @wqdepth: wq depth in quanta to round up
1255 */
irdma_qp_round_up(u32 wqdepth)1256 static int irdma_qp_round_up(u32 wqdepth)
1257 {
1258 int scount = 1;
1259
1260 for (wqdepth--; scount <= 16; scount *= 2)
1261 wqdepth |= wqdepth >> scount;
1262
1263 return ++wqdepth;
1264 }
1265
1266 /**
1267 * irdma_get_wqe_shift - get shift count for maximum wqe size
1268 * @uk_attrs: qp HW attributes
1269 * @sge: Maximum Scatter Gather Elements wqe
1270 * @inline_data: Maximum inline data size
1271 * @shift: Returns the shift needed based on sge
1272 *
1273 * Shift can be used to left shift the wqe size based on number of SGEs and inlind data size.
1274 * For 1 SGE or inline data <= 8, shift = 0 (wqe size of 32
1275 * bytes). For 2 or 3 SGEs or inline data <= 39, shift = 1 (wqe
1276 * size of 64 bytes).
1277 * For 4-7 SGE's and inline <= 101 Shift of 2 otherwise (wqe
1278 * size of 256 bytes).
1279 */
irdma_get_wqe_shift(struct irdma_uk_attrs * uk_attrs,u32 sge,u32 inline_data,u8 * shift)1280 void irdma_get_wqe_shift(struct irdma_uk_attrs *uk_attrs, u32 sge,
1281 u32 inline_data, u8 *shift)
1282 {
1283 *shift = 0;
1284 if (uk_attrs->hw_rev >= IRDMA_GEN_2) {
1285 if (sge > 1 || inline_data > 8) {
1286 if (sge < 4 && inline_data <= 39)
1287 *shift = 1;
1288 else if (sge < 8 && inline_data <= 101)
1289 *shift = 2;
1290 else
1291 *shift = 3;
1292 }
1293 } else if (sge > 1 || inline_data > 16) {
1294 *shift = (sge < 4 && inline_data <= 48) ? 1 : 2;
1295 }
1296 }
1297
1298 /*
1299 * irdma_get_sqdepth - get SQ depth (quanta)
1300 * @uk_attrs: qp HW attributes
1301 * @sq_size: SQ size
1302 * @shift: shift which determines size of WQE
1303 * @sqdepth: depth of SQ
1304 *
1305 */
irdma_get_sqdepth(struct irdma_uk_attrs * uk_attrs,u32 sq_size,u8 shift,u32 * sqdepth)1306 enum irdma_status_code irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs,
1307 u32 sq_size, u8 shift, u32 *sqdepth)
1308 {
1309 *sqdepth = irdma_qp_round_up((sq_size << shift) + IRDMA_SQ_RSVD);
1310
1311 if (*sqdepth < (IRDMA_QP_SW_MIN_WQSIZE << shift))
1312 *sqdepth = IRDMA_QP_SW_MIN_WQSIZE << shift;
1313 else if (*sqdepth > uk_attrs->max_hw_wq_quanta)
1314 return IRDMA_ERR_INVALID_SIZE;
1315
1316 return 0;
1317 }
1318
1319 /*
1320 * irdma_get_rqdepth - get RQ depth (quanta)
1321 * @uk_attrs: qp HW attributes
1322 * @rq_size: RQ size
1323 * @shift: shift which determines size of WQE
1324 * @rqdepth: depth of RQ
1325 */
irdma_get_rqdepth(struct irdma_uk_attrs * uk_attrs,u32 rq_size,u8 shift,u32 * rqdepth)1326 enum irdma_status_code irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs,
1327 u32 rq_size, u8 shift, u32 *rqdepth)
1328 {
1329 *rqdepth = irdma_qp_round_up((rq_size << shift) + IRDMA_RQ_RSVD);
1330
1331 if (*rqdepth < (IRDMA_QP_SW_MIN_WQSIZE << shift))
1332 *rqdepth = IRDMA_QP_SW_MIN_WQSIZE << shift;
1333 else if (*rqdepth > uk_attrs->max_hw_rq_quanta)
1334 return IRDMA_ERR_INVALID_SIZE;
1335
1336 return 0;
1337 }
1338
1339 static const struct irdma_wqe_uk_ops iw_wqe_uk_ops = {
1340 .iw_copy_inline_data = irdma_copy_inline_data,
1341 .iw_inline_data_size_to_quanta = irdma_inline_data_size_to_quanta,
1342 .iw_set_fragment = irdma_set_fragment,
1343 .iw_set_mw_bind_wqe = irdma_set_mw_bind_wqe,
1344 };
1345
1346 static const struct irdma_wqe_uk_ops iw_wqe_uk_ops_gen_1 = {
1347 .iw_copy_inline_data = irdma_copy_inline_data_gen_1,
1348 .iw_inline_data_size_to_quanta = irdma_inline_data_size_to_quanta_gen_1,
1349 .iw_set_fragment = irdma_set_fragment_gen_1,
1350 .iw_set_mw_bind_wqe = irdma_set_mw_bind_wqe_gen_1,
1351 };
1352
1353 /**
1354 * irdma_setup_connection_wqes - setup WQEs necessary to complete
1355 * connection.
1356 * @qp: hw qp (user and kernel)
1357 * @info: qp initialization info
1358 */
irdma_setup_connection_wqes(struct irdma_qp_uk * qp,struct irdma_qp_uk_init_info * info)1359 static void irdma_setup_connection_wqes(struct irdma_qp_uk *qp,
1360 struct irdma_qp_uk_init_info *info)
1361 {
1362 u16 move_cnt = 1;
1363
1364 if (!info->legacy_mode &&
1365 (qp->uk_attrs->feature_flags & IRDMA_FEATURE_RTS_AE))
1366 move_cnt = 3;
1367
1368 qp->conn_wqes = move_cnt;
1369 IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->sq_ring, move_cnt);
1370 IRDMA_RING_MOVE_TAIL_BY_COUNT(qp->sq_ring, move_cnt);
1371 IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->initial_ring, move_cnt);
1372 }
1373
1374 /**
1375 * irdma_uk_qp_init - initialize shared qp
1376 * @qp: hw qp (user and kernel)
1377 * @info: qp initialization info
1378 *
1379 * initializes the vars used in both user and kernel mode.
1380 * size of the wqe depends on numbers of max. fragements
1381 * allowed. Then size of wqe * the number of wqes should be the
1382 * amount of memory allocated for sq and rq.
1383 */
irdma_uk_qp_init(struct irdma_qp_uk * qp,struct irdma_qp_uk_init_info * info)1384 enum irdma_status_code irdma_uk_qp_init(struct irdma_qp_uk *qp,
1385 struct irdma_qp_uk_init_info *info)
1386 {
1387 enum irdma_status_code ret_code = 0;
1388 u32 sq_ring_size;
1389 u8 sqshift, rqshift;
1390
1391 qp->uk_attrs = info->uk_attrs;
1392 if (info->max_sq_frag_cnt > qp->uk_attrs->max_hw_wq_frags ||
1393 info->max_rq_frag_cnt > qp->uk_attrs->max_hw_wq_frags)
1394 return IRDMA_ERR_INVALID_FRAG_COUNT;
1395
1396 irdma_get_wqe_shift(qp->uk_attrs, info->max_rq_frag_cnt, 0, &rqshift);
1397 if (qp->uk_attrs->hw_rev == IRDMA_GEN_1) {
1398 irdma_get_wqe_shift(qp->uk_attrs, info->max_sq_frag_cnt,
1399 info->max_inline_data, &sqshift);
1400 if (info->abi_ver > 4)
1401 rqshift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1;
1402 } else {
1403 irdma_get_wqe_shift(qp->uk_attrs, info->max_sq_frag_cnt + 1,
1404 info->max_inline_data, &sqshift);
1405 }
1406 qp->qp_caps = info->qp_caps;
1407 qp->sq_base = info->sq;
1408 qp->rq_base = info->rq;
1409 qp->qp_type = info->type ? info->type : IRDMA_QP_TYPE_IWARP;
1410 qp->shadow_area = info->shadow_area;
1411 qp->sq_wrtrk_array = info->sq_wrtrk_array;
1412
1413 qp->rq_wrid_array = info->rq_wrid_array;
1414 qp->wqe_alloc_db = info->wqe_alloc_db;
1415 qp->qp_id = info->qp_id;
1416 qp->sq_size = info->sq_size;
1417 qp->push_mode = false;
1418 qp->max_sq_frag_cnt = info->max_sq_frag_cnt;
1419 sq_ring_size = qp->sq_size << sqshift;
1420 IRDMA_RING_INIT(qp->sq_ring, sq_ring_size);
1421 IRDMA_RING_INIT(qp->initial_ring, sq_ring_size);
1422 if (info->first_sq_wq) {
1423 irdma_setup_connection_wqes(qp, info);
1424 qp->swqe_polarity = 1;
1425 qp->first_sq_wq = true;
1426 } else {
1427 qp->swqe_polarity = 0;
1428 }
1429 qp->swqe_polarity_deferred = 1;
1430 qp->rwqe_polarity = 0;
1431 qp->rq_size = info->rq_size;
1432 qp->max_rq_frag_cnt = info->max_rq_frag_cnt;
1433 qp->max_inline_data = info->max_inline_data;
1434 qp->rq_wqe_size = rqshift;
1435 IRDMA_RING_INIT(qp->rq_ring, qp->rq_size);
1436 qp->rq_wqe_size_multiplier = 1 << rqshift;
1437 if (qp->uk_attrs->hw_rev == IRDMA_GEN_1)
1438 qp->wqe_ops = iw_wqe_uk_ops_gen_1;
1439 else
1440 qp->wqe_ops = iw_wqe_uk_ops;
1441 return ret_code;
1442 }
1443
1444 /**
1445 * irdma_uk_cq_init - initialize shared cq (user and kernel)
1446 * @cq: hw cq
1447 * @info: hw cq initialization info
1448 */
irdma_uk_cq_init(struct irdma_cq_uk * cq,struct irdma_cq_uk_init_info * info)1449 void irdma_uk_cq_init(struct irdma_cq_uk *cq,
1450 struct irdma_cq_uk_init_info *info)
1451 {
1452 cq->cq_base = info->cq_base;
1453 cq->cq_id = info->cq_id;
1454 cq->cq_size = info->cq_size;
1455 cq->cqe_alloc_db = info->cqe_alloc_db;
1456 cq->cq_ack_db = info->cq_ack_db;
1457 cq->shadow_area = info->shadow_area;
1458 cq->avoid_mem_cflct = info->avoid_mem_cflct;
1459 IRDMA_RING_INIT(cq->cq_ring, cq->cq_size);
1460 cq->polarity = 1;
1461 }
1462
1463 /**
1464 * irdma_uk_clean_cq - clean cq entries
1465 * @q: completion context
1466 * @cq: cq to clean
1467 */
irdma_uk_clean_cq(void * q,struct irdma_cq_uk * cq)1468 void irdma_uk_clean_cq(void *q, struct irdma_cq_uk *cq)
1469 {
1470 __le64 *cqe;
1471 u64 qword3, comp_ctx;
1472 u32 cq_head;
1473 u8 polarity, temp;
1474
1475 cq_head = cq->cq_ring.head;
1476 temp = cq->polarity;
1477 do {
1478 if (cq->avoid_mem_cflct)
1479 cqe = ((struct irdma_extended_cqe *)(cq->cq_base))[cq_head].buf;
1480 else
1481 cqe = cq->cq_base[cq_head].buf;
1482 get_64bit_val(cqe, 24, &qword3);
1483 polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
1484
1485 if (polarity != temp)
1486 break;
1487
1488 get_64bit_val(cqe, 8, &comp_ctx);
1489 if ((void *)(unsigned long)comp_ctx == q)
1490 set_64bit_val(cqe, 8, 0);
1491
1492 cq_head = (cq_head + 1) % cq->cq_ring.size;
1493 if (!cq_head)
1494 temp ^= 1;
1495 } while (true);
1496 }
1497
1498 /**
1499 * irdma_nop - post a nop
1500 * @qp: hw qp ptr
1501 * @wr_id: work request id
1502 * @signaled: signaled for completion
1503 * @post_sq: ring doorbell
1504 */
irdma_nop(struct irdma_qp_uk * qp,u64 wr_id,bool signaled,bool post_sq)1505 enum irdma_status_code irdma_nop(struct irdma_qp_uk *qp, u64 wr_id,
1506 bool signaled, bool post_sq)
1507 {
1508 __le64 *wqe;
1509 u64 hdr;
1510 u32 wqe_idx;
1511 struct irdma_post_sq_info info = {};
1512
1513 info.push_wqe = false;
1514 info.wr_id = wr_id;
1515 wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, IRDMA_QP_WQE_MIN_QUANTA,
1516 0, &info);
1517 if (!wqe)
1518 return IRDMA_ERR_QP_TOOMANY_WRS_POSTED;
1519
1520 irdma_clr_wqes(qp, wqe_idx);
1521
1522 set_64bit_val(wqe, 0, 0);
1523 set_64bit_val(wqe, 8, 0);
1524 set_64bit_val(wqe, 16, 0);
1525
1526 hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_NOP) |
1527 FIELD_PREP(IRDMAQPSQ_SIGCOMPL, signaled) |
1528 FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
1529
1530 dma_wmb(); /* make sure WQE is populated before valid bit is set */
1531
1532 set_64bit_val(wqe, 24, hdr);
1533 if (post_sq)
1534 irdma_uk_qp_post_wr(qp);
1535
1536 return 0;
1537 }
1538
1539 /**
1540 * irdma_fragcnt_to_quanta_sq - calculate quanta based on fragment count for SQ
1541 * @frag_cnt: number of fragments
1542 * @quanta: quanta for frag_cnt
1543 */
irdma_fragcnt_to_quanta_sq(u32 frag_cnt,u16 * quanta)1544 enum irdma_status_code irdma_fragcnt_to_quanta_sq(u32 frag_cnt, u16 *quanta)
1545 {
1546 switch (frag_cnt) {
1547 case 0:
1548 case 1:
1549 *quanta = IRDMA_QP_WQE_MIN_QUANTA;
1550 break;
1551 case 2:
1552 case 3:
1553 *quanta = 2;
1554 break;
1555 case 4:
1556 case 5:
1557 *quanta = 3;
1558 break;
1559 case 6:
1560 case 7:
1561 *quanta = 4;
1562 break;
1563 case 8:
1564 case 9:
1565 *quanta = 5;
1566 break;
1567 case 10:
1568 case 11:
1569 *quanta = 6;
1570 break;
1571 case 12:
1572 case 13:
1573 *quanta = 7;
1574 break;
1575 case 14:
1576 case 15: /* when immediate data is present */
1577 *quanta = 8;
1578 break;
1579 default:
1580 return IRDMA_ERR_INVALID_FRAG_COUNT;
1581 }
1582
1583 return 0;
1584 }
1585
1586 /**
1587 * irdma_fragcnt_to_wqesize_rq - calculate wqe size based on fragment count for RQ
1588 * @frag_cnt: number of fragments
1589 * @wqe_size: size in bytes given frag_cnt
1590 */
irdma_fragcnt_to_wqesize_rq(u32 frag_cnt,u16 * wqe_size)1591 enum irdma_status_code irdma_fragcnt_to_wqesize_rq(u32 frag_cnt, u16 *wqe_size)
1592 {
1593 switch (frag_cnt) {
1594 case 0:
1595 case 1:
1596 *wqe_size = 32;
1597 break;
1598 case 2:
1599 case 3:
1600 *wqe_size = 64;
1601 break;
1602 case 4:
1603 case 5:
1604 case 6:
1605 case 7:
1606 *wqe_size = 128;
1607 break;
1608 case 8:
1609 case 9:
1610 case 10:
1611 case 11:
1612 case 12:
1613 case 13:
1614 case 14:
1615 *wqe_size = 256;
1616 break;
1617 default:
1618 return IRDMA_ERR_INVALID_FRAG_COUNT;
1619 }
1620
1621 return 0;
1622 }
1623