1 // SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
2 /* Copyright (c) 2015 - 2021 Intel Corporation */
12 * irdma_get_qp_from_list - get next qp from a list
13 * @head: Listhead of qp's
16 struct irdma_sc_qp *irdma_get_qp_from_list(struct list_head *head,
17 struct irdma_sc_qp *qp)
19 struct list_head *lastentry;
20 struct list_head *entry = NULL;
28 lastentry = &qp->list;
29 entry = lastentry->next;
34 return container_of(entry, struct irdma_sc_qp, list);
38 * irdma_sc_suspend_resume_qps - suspend/resume all qp's on VSI
39 * @vsi: the VSI struct pointer
40 * @op: Set to IRDMA_OP_RESUME or IRDMA_OP_SUSPEND
42 void irdma_sc_suspend_resume_qps(struct irdma_sc_vsi *vsi, u8 op)
44 struct irdma_sc_qp *qp = NULL;
47 for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
48 mutex_lock(&vsi->qos[i].qos_mutex);
49 qp = irdma_get_qp_from_list(&vsi->qos[i].qplist, qp);
51 if (op == IRDMA_OP_RESUME) {
52 if (!qp->dev->ws_add(vsi, i)) {
54 vsi->qos[qp->user_pri].qs_handle;
55 irdma_cqp_qp_suspend_resume(qp, op);
57 irdma_cqp_qp_suspend_resume(qp, op);
58 irdma_modify_qp_to_err(qp);
60 } else if (op == IRDMA_OP_SUSPEND) {
61 /* issue cqp suspend command */
62 if (!irdma_cqp_qp_suspend_resume(qp, op))
63 atomic_inc(&vsi->qp_suspend_reqs);
65 qp = irdma_get_qp_from_list(&vsi->qos[i].qplist, qp);
67 mutex_unlock(&vsi->qos[i].qos_mutex);
72 * irdma_change_l2params - given the new l2 parameters, change all qp
73 * @vsi: RDMA VSI pointer
74 * @l2params: New parameters from l2
76 void irdma_change_l2params(struct irdma_sc_vsi *vsi,
77 struct irdma_l2params *l2params)
79 if (l2params->mtu_changed) {
80 vsi->mtu = l2params->mtu;
82 irdma_reinitialize_ieq(vsi);
85 if (!l2params->tc_changed)
88 vsi->tc_change_pending = false;
89 irdma_sc_suspend_resume_qps(vsi, IRDMA_OP_RESUME);
93 * irdma_qp_rem_qos - remove qp from qos lists during destroy qp
94 * @qp: qp to be removed from qos
96 void irdma_qp_rem_qos(struct irdma_sc_qp *qp)
98 struct irdma_sc_vsi *vsi = qp->vsi;
100 ibdev_dbg(to_ibdev(qp->dev),
101 "DCB: DCB: Remove qp[%d] UP[%d] qset[%d] on_qoslist[%d]\n",
102 qp->qp_uk.qp_id, qp->user_pri, qp->qs_handle,
104 mutex_lock(&vsi->qos[qp->user_pri].qos_mutex);
105 if (qp->on_qoslist) {
106 qp->on_qoslist = false;
109 mutex_unlock(&vsi->qos[qp->user_pri].qos_mutex);
113 * irdma_qp_add_qos - called during setctx for qp to be added to qos
114 * @qp: qp to be added to qos
116 void irdma_qp_add_qos(struct irdma_sc_qp *qp)
118 struct irdma_sc_vsi *vsi = qp->vsi;
120 ibdev_dbg(to_ibdev(qp->dev),
121 "DCB: DCB: Add qp[%d] UP[%d] qset[%d] on_qoslist[%d]\n",
122 qp->qp_uk.qp_id, qp->user_pri, qp->qs_handle,
124 mutex_lock(&vsi->qos[qp->user_pri].qos_mutex);
125 if (!qp->on_qoslist) {
126 list_add(&qp->list, &vsi->qos[qp->user_pri].qplist);
127 qp->on_qoslist = true;
128 qp->qs_handle = vsi->qos[qp->user_pri].qs_handle;
130 mutex_unlock(&vsi->qos[qp->user_pri].qos_mutex);
134 * irdma_sc_pd_init - initialize sc pd struct
135 * @dev: sc device struct
137 * @pd_id: pd_id for allocated pd
138 * @abi_ver: User/Kernel ABI version
140 void irdma_sc_pd_init(struct irdma_sc_dev *dev, struct irdma_sc_pd *pd, u32 pd_id,
144 pd->abi_ver = abi_ver;
149 * irdma_sc_add_arp_cache_entry - cqp wqe add arp cache entry
150 * @cqp: struct for cqp hw
151 * @info: arp entry information
152 * @scratch: u64 saved to be used during cqp completion
153 * @post_sq: flag for cqp db to ring
155 static enum irdma_status_code
156 irdma_sc_add_arp_cache_entry(struct irdma_sc_cqp *cqp,
157 struct irdma_add_arp_cache_entry_info *info,
158 u64 scratch, bool post_sq)
163 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
165 return IRDMA_ERR_RING_FULL;
166 set_64bit_val(wqe, 8, info->reach_max);
167 set_64bit_val(wqe, 16, ether_addr_to_u64(info->mac_addr));
169 hdr = info->arp_index |
170 FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MANAGE_ARP) |
171 FIELD_PREP(IRDMA_CQPSQ_MAT_PERMANENT, (info->permanent ? 1 : 0)) |
172 FIELD_PREP(IRDMA_CQPSQ_MAT_ENTRYVALID, 1) |
173 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
174 dma_wmb(); /* make sure WQE is written before valid bit is set */
176 set_64bit_val(wqe, 24, hdr);
178 print_hex_dump_debug("WQE: ARP_CACHE_ENTRY WQE", DUMP_PREFIX_OFFSET,
179 16, 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
181 irdma_sc_cqp_post_sq(cqp);
187 * irdma_sc_del_arp_cache_entry - dele arp cache entry
188 * @cqp: struct for cqp hw
189 * @scratch: u64 saved to be used during cqp completion
190 * @arp_index: arp index to delete arp entry
191 * @post_sq: flag for cqp db to ring
193 static enum irdma_status_code
194 irdma_sc_del_arp_cache_entry(struct irdma_sc_cqp *cqp, u64 scratch,
195 u16 arp_index, bool post_sq)
200 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
202 return IRDMA_ERR_RING_FULL;
205 FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MANAGE_ARP) |
206 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
207 dma_wmb(); /* make sure WQE is written before valid bit is set */
209 set_64bit_val(wqe, 24, hdr);
211 print_hex_dump_debug("WQE: ARP_CACHE_DEL_ENTRY WQE",
212 DUMP_PREFIX_OFFSET, 16, 8, wqe,
213 IRDMA_CQP_WQE_SIZE * 8, false);
215 irdma_sc_cqp_post_sq(cqp);
221 * irdma_sc_manage_apbvt_entry - for adding and deleting apbvt entries
222 * @cqp: struct for cqp hw
223 * @info: info for apbvt entry to add or delete
224 * @scratch: u64 saved to be used during cqp completion
225 * @post_sq: flag for cqp db to ring
227 static enum irdma_status_code
228 irdma_sc_manage_apbvt_entry(struct irdma_sc_cqp *cqp,
229 struct irdma_apbvt_info *info, u64 scratch,
235 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
237 return IRDMA_ERR_RING_FULL;
239 set_64bit_val(wqe, 16, info->port);
241 hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MANAGE_APBVT) |
242 FIELD_PREP(IRDMA_CQPSQ_MAPT_ADDPORT, info->add) |
243 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
244 dma_wmb(); /* make sure WQE is written before valid bit is set */
246 set_64bit_val(wqe, 24, hdr);
248 print_hex_dump_debug("WQE: MANAGE_APBVT WQE", DUMP_PREFIX_OFFSET, 16,
249 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
251 irdma_sc_cqp_post_sq(cqp);
257 * irdma_sc_manage_qhash_table_entry - manage quad hash entries
258 * @cqp: struct for cqp hw
259 * @info: info for quad hash to manage
260 * @scratch: u64 saved to be used during cqp completion
261 * @post_sq: flag for cqp db to ring
263 * This is called before connection establishment is started.
264 * For passive connections, when listener is created, it will
265 * call with entry type of IRDMA_QHASH_TYPE_TCP_SYN with local
266 * ip address and tcp port. When SYN is received (passive
267 * connections) or sent (active connections), this routine is
268 * called with entry type of IRDMA_QHASH_TYPE_TCP_ESTABLISHED
269 * and quad is passed in info.
271 * When iwarp connection is done and its state moves to RTS, the
272 * quad hash entry in the hardware will point to iwarp's qp
273 * number and requires no calls from the driver.
275 static enum irdma_status_code
276 irdma_sc_manage_qhash_table_entry(struct irdma_sc_cqp *cqp,
277 struct irdma_qhash_table_info *info,
278 u64 scratch, bool post_sq)
284 struct irdma_sc_vsi *vsi = info->vsi;
286 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
288 return IRDMA_ERR_RING_FULL;
290 set_64bit_val(wqe, 0, ether_addr_to_u64(info->mac_addr));
292 qw1 = FIELD_PREP(IRDMA_CQPSQ_QHASH_QPN, info->qp_num) |
293 FIELD_PREP(IRDMA_CQPSQ_QHASH_DEST_PORT, info->dest_port);
294 if (info->ipv4_valid) {
295 set_64bit_val(wqe, 48,
296 FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR3, info->dest_ip[0]));
298 set_64bit_val(wqe, 56,
299 FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR0, info->dest_ip[0]) |
300 FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR1, info->dest_ip[1]));
302 set_64bit_val(wqe, 48,
303 FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR2, info->dest_ip[2]) |
304 FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR3, info->dest_ip[3]));
306 qw2 = FIELD_PREP(IRDMA_CQPSQ_QHASH_QS_HANDLE,
307 vsi->qos[info->user_pri].qs_handle);
308 if (info->vlan_valid)
309 qw2 |= FIELD_PREP(IRDMA_CQPSQ_QHASH_VLANID, info->vlan_id);
310 set_64bit_val(wqe, 16, qw2);
311 if (info->entry_type == IRDMA_QHASH_TYPE_TCP_ESTABLISHED) {
312 qw1 |= FIELD_PREP(IRDMA_CQPSQ_QHASH_SRC_PORT, info->src_port);
313 if (!info->ipv4_valid) {
314 set_64bit_val(wqe, 40,
315 FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR0, info->src_ip[0]) |
316 FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR1, info->src_ip[1]));
317 set_64bit_val(wqe, 32,
318 FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR2, info->src_ip[2]) |
319 FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR3, info->src_ip[3]));
321 set_64bit_val(wqe, 32,
322 FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR3, info->src_ip[0]));
326 set_64bit_val(wqe, 8, qw1);
327 temp = FIELD_PREP(IRDMA_CQPSQ_QHASH_WQEVALID, cqp->polarity) |
328 FIELD_PREP(IRDMA_CQPSQ_QHASH_OPCODE,
329 IRDMA_CQP_OP_MANAGE_QUAD_HASH_TABLE_ENTRY) |
330 FIELD_PREP(IRDMA_CQPSQ_QHASH_MANAGE, info->manage) |
331 FIELD_PREP(IRDMA_CQPSQ_QHASH_IPV4VALID, info->ipv4_valid) |
332 FIELD_PREP(IRDMA_CQPSQ_QHASH_VLANVALID, info->vlan_valid) |
333 FIELD_PREP(IRDMA_CQPSQ_QHASH_ENTRYTYPE, info->entry_type);
334 dma_wmb(); /* make sure WQE is written before valid bit is set */
336 set_64bit_val(wqe, 24, temp);
338 print_hex_dump_debug("WQE: MANAGE_QHASH WQE", DUMP_PREFIX_OFFSET, 16,
339 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
341 irdma_sc_cqp_post_sq(cqp);
347 * irdma_sc_qp_init - initialize qp
349 * @info: initialization qp info
351 enum irdma_status_code irdma_sc_qp_init(struct irdma_sc_qp *qp,
352 struct irdma_qp_init_info *info)
354 enum irdma_status_code ret_code;
358 if (info->qp_uk_init_info.max_sq_frag_cnt >
359 info->pd->dev->hw_attrs.uk_attrs.max_hw_wq_frags ||
360 info->qp_uk_init_info.max_rq_frag_cnt >
361 info->pd->dev->hw_attrs.uk_attrs.max_hw_wq_frags)
362 return IRDMA_ERR_INVALID_FRAG_COUNT;
364 qp->dev = info->pd->dev;
366 qp->ieq_qp = info->vsi->exception_lan_q;
367 qp->sq_pa = info->sq_pa;
368 qp->rq_pa = info->rq_pa;
369 qp->hw_host_ctx_pa = info->host_ctx_pa;
370 qp->q2_pa = info->q2_pa;
371 qp->shadow_area_pa = info->shadow_area_pa;
372 qp->q2_buf = info->q2;
374 qp->hw_host_ctx = info->host_ctx;
375 info->qp_uk_init_info.wqe_alloc_db = qp->pd->dev->wqe_alloc_db;
376 ret_code = irdma_uk_qp_init(&qp->qp_uk, &info->qp_uk_init_info);
380 qp->virtual_map = info->virtual_map;
381 pble_obj_cnt = info->pd->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
383 if ((info->virtual_map && info->sq_pa >= pble_obj_cnt) ||
384 (info->virtual_map && info->rq_pa >= pble_obj_cnt))
385 return IRDMA_ERR_INVALID_PBLE_INDEX;
387 qp->llp_stream_handle = (void *)(-1);
388 qp->hw_sq_size = irdma_get_encoded_wqe_size(qp->qp_uk.sq_ring.size,
389 IRDMA_QUEUE_TYPE_SQ_RQ);
390 ibdev_dbg(to_ibdev(qp->dev),
391 "WQE: hw_sq_size[%04d] sq_ring.size[%04d]\n",
392 qp->hw_sq_size, qp->qp_uk.sq_ring.size);
393 if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1 && qp->pd->abi_ver > 4)
394 wqe_size = IRDMA_WQE_SIZE_128;
396 ret_code = irdma_fragcnt_to_wqesize_rq(qp->qp_uk.max_rq_frag_cnt,
401 qp->hw_rq_size = irdma_get_encoded_wqe_size(qp->qp_uk.rq_size *
402 (wqe_size / IRDMA_QP_WQE_MIN_SIZE), IRDMA_QUEUE_TYPE_SQ_RQ);
403 ibdev_dbg(to_ibdev(qp->dev),
404 "WQE: hw_rq_size[%04d] qp_uk.rq_size[%04d] wqe_size[%04d]\n",
405 qp->hw_rq_size, qp->qp_uk.rq_size, wqe_size);
406 qp->sq_tph_val = info->sq_tph_val;
407 qp->rq_tph_val = info->rq_tph_val;
408 qp->sq_tph_en = info->sq_tph_en;
409 qp->rq_tph_en = info->rq_tph_en;
410 qp->rcv_tph_en = info->rcv_tph_en;
411 qp->xmit_tph_en = info->xmit_tph_en;
412 qp->qp_uk.first_sq_wq = info->qp_uk_init_info.first_sq_wq;
413 qp->qs_handle = qp->vsi->qos[qp->user_pri].qs_handle;
419 * irdma_sc_qp_create - create qp
421 * @info: qp create info
422 * @scratch: u64 saved to be used during cqp completion
423 * @post_sq: flag for cqp db to ring
425 enum irdma_status_code irdma_sc_qp_create(struct irdma_sc_qp *qp, struct irdma_create_qp_info *info,
426 u64 scratch, bool post_sq)
428 struct irdma_sc_cqp *cqp;
433 if (qp->qp_uk.qp_id < cqp->dev->hw_attrs.min_hw_qp_id ||
434 qp->qp_uk.qp_id > (cqp->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_QP].max_cnt - 1))
435 return IRDMA_ERR_INVALID_QP_ID;
437 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
439 return IRDMA_ERR_RING_FULL;
441 set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
442 set_64bit_val(wqe, 40, qp->shadow_area_pa);
444 hdr = qp->qp_uk.qp_id |
445 FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_QP) |
446 FIELD_PREP(IRDMA_CQPSQ_QP_ORDVALID, (info->ord_valid ? 1 : 0)) |
447 FIELD_PREP(IRDMA_CQPSQ_QP_TOECTXVALID, info->tcp_ctx_valid) |
448 FIELD_PREP(IRDMA_CQPSQ_QP_MACVALID, info->mac_valid) |
449 FIELD_PREP(IRDMA_CQPSQ_QP_QPTYPE, qp->qp_uk.qp_type) |
450 FIELD_PREP(IRDMA_CQPSQ_QP_VQ, qp->virtual_map) |
451 FIELD_PREP(IRDMA_CQPSQ_QP_FORCELOOPBACK, info->force_lpb) |
452 FIELD_PREP(IRDMA_CQPSQ_QP_CQNUMVALID, info->cq_num_valid) |
453 FIELD_PREP(IRDMA_CQPSQ_QP_ARPTABIDXVALID,
454 info->arp_cache_idx_valid) |
455 FIELD_PREP(IRDMA_CQPSQ_QP_NEXTIWSTATE, info->next_iwarp_state) |
456 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
457 dma_wmb(); /* make sure WQE is written before valid bit is set */
459 set_64bit_val(wqe, 24, hdr);
461 print_hex_dump_debug("WQE: QP_CREATE WQE", DUMP_PREFIX_OFFSET, 16, 8,
462 wqe, IRDMA_CQP_WQE_SIZE * 8, false);
464 irdma_sc_cqp_post_sq(cqp);
470 * irdma_sc_qp_modify - modify qp cqp wqe
472 * @info: modify qp info
473 * @scratch: u64 saved to be used during cqp completion
474 * @post_sq: flag for cqp db to ring
476 enum irdma_status_code irdma_sc_qp_modify(struct irdma_sc_qp *qp,
477 struct irdma_modify_qp_info *info,
478 u64 scratch, bool post_sq)
481 struct irdma_sc_cqp *cqp;
487 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
489 return IRDMA_ERR_RING_FULL;
491 if (info->next_iwarp_state == IRDMA_QP_STATE_TERMINATE) {
492 if (info->dont_send_fin)
493 term_actions += IRDMAQP_TERM_SEND_TERM_ONLY;
494 if (info->dont_send_term)
495 term_actions += IRDMAQP_TERM_SEND_FIN_ONLY;
496 if (term_actions == IRDMAQP_TERM_SEND_TERM_AND_FIN ||
497 term_actions == IRDMAQP_TERM_SEND_TERM_ONLY)
498 term_len = info->termlen;
501 set_64bit_val(wqe, 8,
502 FIELD_PREP(IRDMA_CQPSQ_QP_NEWMSS, info->new_mss) |
503 FIELD_PREP(IRDMA_CQPSQ_QP_TERMLEN, term_len));
504 set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
505 set_64bit_val(wqe, 40, qp->shadow_area_pa);
507 hdr = qp->qp_uk.qp_id |
508 FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MODIFY_QP) |
509 FIELD_PREP(IRDMA_CQPSQ_QP_ORDVALID, info->ord_valid) |
510 FIELD_PREP(IRDMA_CQPSQ_QP_TOECTXVALID, info->tcp_ctx_valid) |
511 FIELD_PREP(IRDMA_CQPSQ_QP_CACHEDVARVALID,
512 info->cached_var_valid) |
513 FIELD_PREP(IRDMA_CQPSQ_QP_VQ, qp->virtual_map) |
514 FIELD_PREP(IRDMA_CQPSQ_QP_FORCELOOPBACK, info->force_lpb) |
515 FIELD_PREP(IRDMA_CQPSQ_QP_CQNUMVALID, info->cq_num_valid) |
516 FIELD_PREP(IRDMA_CQPSQ_QP_MACVALID, info->mac_valid) |
517 FIELD_PREP(IRDMA_CQPSQ_QP_QPTYPE, qp->qp_uk.qp_type) |
518 FIELD_PREP(IRDMA_CQPSQ_QP_MSSCHANGE, info->mss_change) |
519 FIELD_PREP(IRDMA_CQPSQ_QP_REMOVEHASHENTRY,
520 info->remove_hash_idx) |
521 FIELD_PREP(IRDMA_CQPSQ_QP_TERMACT, term_actions) |
522 FIELD_PREP(IRDMA_CQPSQ_QP_RESETCON, info->reset_tcp_conn) |
523 FIELD_PREP(IRDMA_CQPSQ_QP_ARPTABIDXVALID,
524 info->arp_cache_idx_valid) |
525 FIELD_PREP(IRDMA_CQPSQ_QP_NEXTIWSTATE, info->next_iwarp_state) |
526 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
527 dma_wmb(); /* make sure WQE is written before valid bit is set */
529 set_64bit_val(wqe, 24, hdr);
531 print_hex_dump_debug("WQE: QP_MODIFY WQE", DUMP_PREFIX_OFFSET, 16, 8,
532 wqe, IRDMA_CQP_WQE_SIZE * 8, false);
534 irdma_sc_cqp_post_sq(cqp);
540 * irdma_sc_qp_destroy - cqp destroy qp
542 * @scratch: u64 saved to be used during cqp completion
543 * @remove_hash_idx: flag if to remove hash idx
544 * @ignore_mw_bnd: memory window bind flag
545 * @post_sq: flag for cqp db to ring
547 enum irdma_status_code irdma_sc_qp_destroy(struct irdma_sc_qp *qp, u64 scratch,
548 bool remove_hash_idx, bool ignore_mw_bnd,
552 struct irdma_sc_cqp *cqp;
556 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
558 return IRDMA_ERR_RING_FULL;
560 set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
561 set_64bit_val(wqe, 40, qp->shadow_area_pa);
563 hdr = qp->qp_uk.qp_id |
564 FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DESTROY_QP) |
565 FIELD_PREP(IRDMA_CQPSQ_QP_QPTYPE, qp->qp_uk.qp_type) |
566 FIELD_PREP(IRDMA_CQPSQ_QP_IGNOREMWBOUND, ignore_mw_bnd) |
567 FIELD_PREP(IRDMA_CQPSQ_QP_REMOVEHASHENTRY, remove_hash_idx) |
568 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
569 dma_wmb(); /* make sure WQE is written before valid bit is set */
571 set_64bit_val(wqe, 24, hdr);
573 print_hex_dump_debug("WQE: QP_DESTROY WQE", DUMP_PREFIX_OFFSET, 16, 8,
574 wqe, IRDMA_CQP_WQE_SIZE * 8, false);
576 irdma_sc_cqp_post_sq(cqp);
582 * irdma_sc_get_encoded_ird_size -
583 * @ird_size: IRD size
584 * The ird from the connection is rounded to a supported HW setting and then encoded
585 * for ird_size field of qp_ctx. Consumers are expected to provide valid ird size based
586 * on hardware attributes. IRD size defaults to a value of 4 in case of invalid input
588 static u8 irdma_sc_get_encoded_ird_size(u16 ird_size)
591 roundup_pow_of_two(2 * ird_size) : 4) {
593 return IRDMA_IRD_HW_SIZE_256;
595 return IRDMA_IRD_HW_SIZE_128;
598 return IRDMA_IRD_HW_SIZE_64;
601 return IRDMA_IRD_HW_SIZE_16;
607 return IRDMA_IRD_HW_SIZE_4;
611 * irdma_sc_qp_setctx_roce - set qp's context
613 * @qp_ctx: context ptr
616 void irdma_sc_qp_setctx_roce(struct irdma_sc_qp *qp, __le64 *qp_ctx,
617 struct irdma_qp_host_ctx_info *info)
619 struct irdma_roce_offload_info *roce_info;
620 struct irdma_udp_offload_info *udp;
624 roce_info = info->roce_info;
625 udp = info->udp_info;
626 qp->user_pri = info->user_pri;
627 if (qp->push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX) {
632 push_idx = qp->push_idx;
634 set_64bit_val(qp_ctx, 0,
635 FIELD_PREP(IRDMAQPC_RQWQESIZE, qp->qp_uk.rq_wqe_size) |
636 FIELD_PREP(IRDMAQPC_RCVTPHEN, qp->rcv_tph_en) |
637 FIELD_PREP(IRDMAQPC_XMITTPHEN, qp->xmit_tph_en) |
638 FIELD_PREP(IRDMAQPC_RQTPHEN, qp->rq_tph_en) |
639 FIELD_PREP(IRDMAQPC_SQTPHEN, qp->sq_tph_en) |
640 FIELD_PREP(IRDMAQPC_PPIDX, push_idx) |
641 FIELD_PREP(IRDMAQPC_PMENA, push_mode_en) |
642 FIELD_PREP(IRDMAQPC_PDIDXHI, roce_info->pd_id >> 16) |
643 FIELD_PREP(IRDMAQPC_DC_TCP_EN, roce_info->dctcp_en) |
644 FIELD_PREP(IRDMAQPC_ERR_RQ_IDX_VALID, roce_info->err_rq_idx_valid) |
645 FIELD_PREP(IRDMAQPC_ISQP1, roce_info->is_qp1) |
646 FIELD_PREP(IRDMAQPC_ROCE_TVER, roce_info->roce_tver) |
647 FIELD_PREP(IRDMAQPC_IPV4, udp->ipv4) |
648 FIELD_PREP(IRDMAQPC_INSERTVLANTAG, udp->insert_vlan_tag));
649 set_64bit_val(qp_ctx, 8, qp->sq_pa);
650 set_64bit_val(qp_ctx, 16, qp->rq_pa);
651 if ((roce_info->dcqcn_en || roce_info->dctcp_en) &&
653 udp->tos |= ECN_CODE_PT_VAL;
654 set_64bit_val(qp_ctx, 24,
655 FIELD_PREP(IRDMAQPC_RQSIZE, qp->hw_rq_size) |
656 FIELD_PREP(IRDMAQPC_SQSIZE, qp->hw_sq_size) |
657 FIELD_PREP(IRDMAQPC_TTL, udp->ttl) | FIELD_PREP(IRDMAQPC_TOS, udp->tos) |
658 FIELD_PREP(IRDMAQPC_SRCPORTNUM, udp->src_port) |
659 FIELD_PREP(IRDMAQPC_DESTPORTNUM, udp->dst_port));
660 set_64bit_val(qp_ctx, 32,
661 FIELD_PREP(IRDMAQPC_DESTIPADDR2, udp->dest_ip_addr[2]) |
662 FIELD_PREP(IRDMAQPC_DESTIPADDR3, udp->dest_ip_addr[3]));
663 set_64bit_val(qp_ctx, 40,
664 FIELD_PREP(IRDMAQPC_DESTIPADDR0, udp->dest_ip_addr[0]) |
665 FIELD_PREP(IRDMAQPC_DESTIPADDR1, udp->dest_ip_addr[1]));
666 set_64bit_val(qp_ctx, 48,
667 FIELD_PREP(IRDMAQPC_SNDMSS, udp->snd_mss) |
668 FIELD_PREP(IRDMAQPC_VLANTAG, udp->vlan_tag) |
669 FIELD_PREP(IRDMAQPC_ARPIDX, udp->arp_idx));
670 set_64bit_val(qp_ctx, 56,
671 FIELD_PREP(IRDMAQPC_PKEY, roce_info->p_key) |
672 FIELD_PREP(IRDMAQPC_PDIDX, roce_info->pd_id) |
673 FIELD_PREP(IRDMAQPC_ACKCREDITS, roce_info->ack_credits) |
674 FIELD_PREP(IRDMAQPC_FLOWLABEL, udp->flow_label));
675 set_64bit_val(qp_ctx, 64,
676 FIELD_PREP(IRDMAQPC_QKEY, roce_info->qkey) |
677 FIELD_PREP(IRDMAQPC_DESTQP, roce_info->dest_qp));
678 set_64bit_val(qp_ctx, 80,
679 FIELD_PREP(IRDMAQPC_PSNNXT, udp->psn_nxt) |
680 FIELD_PREP(IRDMAQPC_LSN, udp->lsn));
681 set_64bit_val(qp_ctx, 88,
682 FIELD_PREP(IRDMAQPC_EPSN, udp->epsn));
683 set_64bit_val(qp_ctx, 96,
684 FIELD_PREP(IRDMAQPC_PSNMAX, udp->psn_max) |
685 FIELD_PREP(IRDMAQPC_PSNUNA, udp->psn_una));
686 set_64bit_val(qp_ctx, 112,
687 FIELD_PREP(IRDMAQPC_CWNDROCE, udp->cwnd));
688 set_64bit_val(qp_ctx, 128,
689 FIELD_PREP(IRDMAQPC_ERR_RQ_IDX, roce_info->err_rq_idx) |
690 FIELD_PREP(IRDMAQPC_RNRNAK_THRESH, udp->rnr_nak_thresh) |
691 FIELD_PREP(IRDMAQPC_REXMIT_THRESH, udp->rexmit_thresh) |
692 FIELD_PREP(IRDMAQPC_RTOMIN, roce_info->rtomin));
693 set_64bit_val(qp_ctx, 136,
694 FIELD_PREP(IRDMAQPC_TXCQNUM, info->send_cq_num) |
695 FIELD_PREP(IRDMAQPC_RXCQNUM, info->rcv_cq_num));
696 set_64bit_val(qp_ctx, 144,
697 FIELD_PREP(IRDMAQPC_STAT_INDEX, info->stats_idx));
698 set_64bit_val(qp_ctx, 152, ether_addr_to_u64(roce_info->mac_addr) << 16);
699 set_64bit_val(qp_ctx, 160,
700 FIELD_PREP(IRDMAQPC_ORDSIZE, roce_info->ord_size) |
701 FIELD_PREP(IRDMAQPC_IRDSIZE, irdma_sc_get_encoded_ird_size(roce_info->ird_size)) |
702 FIELD_PREP(IRDMAQPC_WRRDRSPOK, roce_info->wr_rdresp_en) |
703 FIELD_PREP(IRDMAQPC_RDOK, roce_info->rd_en) |
704 FIELD_PREP(IRDMAQPC_USESTATSINSTANCE, info->stats_idx_valid) |
705 FIELD_PREP(IRDMAQPC_BINDEN, roce_info->bind_en) |
706 FIELD_PREP(IRDMAQPC_FASTREGEN, roce_info->fast_reg_en) |
707 FIELD_PREP(IRDMAQPC_DCQCNENABLE, roce_info->dcqcn_en) |
708 FIELD_PREP(IRDMAQPC_RCVNOICRC, roce_info->rcv_no_icrc) |
709 FIELD_PREP(IRDMAQPC_FW_CC_ENABLE, roce_info->fw_cc_enable) |
710 FIELD_PREP(IRDMAQPC_UDPRIVCQENABLE, roce_info->udprivcq_en) |
711 FIELD_PREP(IRDMAQPC_PRIVEN, roce_info->priv_mode_en) |
712 FIELD_PREP(IRDMAQPC_TIMELYENABLE, roce_info->timely_en));
713 set_64bit_val(qp_ctx, 168,
714 FIELD_PREP(IRDMAQPC_QPCOMPCTX, info->qp_compl_ctx));
715 set_64bit_val(qp_ctx, 176,
716 FIELD_PREP(IRDMAQPC_SQTPHVAL, qp->sq_tph_val) |
717 FIELD_PREP(IRDMAQPC_RQTPHVAL, qp->rq_tph_val) |
718 FIELD_PREP(IRDMAQPC_QSHANDLE, qp->qs_handle));
719 set_64bit_val(qp_ctx, 184,
720 FIELD_PREP(IRDMAQPC_LOCAL_IPADDR3, udp->local_ipaddr[3]) |
721 FIELD_PREP(IRDMAQPC_LOCAL_IPADDR2, udp->local_ipaddr[2]));
722 set_64bit_val(qp_ctx, 192,
723 FIELD_PREP(IRDMAQPC_LOCAL_IPADDR1, udp->local_ipaddr[1]) |
724 FIELD_PREP(IRDMAQPC_LOCAL_IPADDR0, udp->local_ipaddr[0]));
725 set_64bit_val(qp_ctx, 200,
726 FIELD_PREP(IRDMAQPC_THIGH, roce_info->t_high) |
727 FIELD_PREP(IRDMAQPC_TLOW, roce_info->t_low));
728 set_64bit_val(qp_ctx, 208,
729 FIELD_PREP(IRDMAQPC_REMENDPOINTIDX, info->rem_endpoint_idx));
731 print_hex_dump_debug("WQE: QP_HOST CTX WQE", DUMP_PREFIX_OFFSET, 16,
732 8, qp_ctx, IRDMA_QP_CTX_SIZE, false);
735 /* irdma_sc_alloc_local_mac_entry - allocate a mac entry
736 * @cqp: struct for cqp hw
737 * @scratch: u64 saved to be used during cqp completion
738 * @post_sq: flag for cqp db to ring
740 static enum irdma_status_code
741 irdma_sc_alloc_local_mac_entry(struct irdma_sc_cqp *cqp, u64 scratch,
747 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
749 return IRDMA_ERR_RING_FULL;
751 hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE,
752 IRDMA_CQP_OP_ALLOCATE_LOC_MAC_TABLE_ENTRY) |
753 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
755 dma_wmb(); /* make sure WQE is written before valid bit is set */
757 set_64bit_val(wqe, 24, hdr);
759 print_hex_dump_debug("WQE: ALLOCATE_LOCAL_MAC WQE",
760 DUMP_PREFIX_OFFSET, 16, 8, wqe,
761 IRDMA_CQP_WQE_SIZE * 8, false);
764 irdma_sc_cqp_post_sq(cqp);
769 * irdma_sc_add_local_mac_entry - add mac enry
770 * @cqp: struct for cqp hw
771 * @info:mac addr info
772 * @scratch: u64 saved to be used during cqp completion
773 * @post_sq: flag for cqp db to ring
775 static enum irdma_status_code
776 irdma_sc_add_local_mac_entry(struct irdma_sc_cqp *cqp,
777 struct irdma_local_mac_entry_info *info,
778 u64 scratch, bool post_sq)
783 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
785 return IRDMA_ERR_RING_FULL;
787 set_64bit_val(wqe, 32, ether_addr_to_u64(info->mac_addr));
789 header = FIELD_PREP(IRDMA_CQPSQ_MLM_TABLEIDX, info->entry_idx) |
790 FIELD_PREP(IRDMA_CQPSQ_OPCODE,
791 IRDMA_CQP_OP_MANAGE_LOC_MAC_TABLE) |
792 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
794 dma_wmb(); /* make sure WQE is written before valid bit is set */
796 set_64bit_val(wqe, 24, header);
798 print_hex_dump_debug("WQE: ADD_LOCAL_MAC WQE", DUMP_PREFIX_OFFSET, 16,
799 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
802 irdma_sc_cqp_post_sq(cqp);
807 * irdma_sc_del_local_mac_entry - cqp wqe to dele local mac
808 * @cqp: struct for cqp hw
809 * @scratch: u64 saved to be used during cqp completion
810 * @entry_idx: index of mac entry
811 * @ignore_ref_count: to force mac adde delete
812 * @post_sq: flag for cqp db to ring
814 static enum irdma_status_code
815 irdma_sc_del_local_mac_entry(struct irdma_sc_cqp *cqp, u64 scratch,
816 u16 entry_idx, u8 ignore_ref_count, bool post_sq)
821 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
823 return IRDMA_ERR_RING_FULL;
824 header = FIELD_PREP(IRDMA_CQPSQ_MLM_TABLEIDX, entry_idx) |
825 FIELD_PREP(IRDMA_CQPSQ_OPCODE,
826 IRDMA_CQP_OP_MANAGE_LOC_MAC_TABLE) |
827 FIELD_PREP(IRDMA_CQPSQ_MLM_FREEENTRY, 1) |
828 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity) |
829 FIELD_PREP(IRDMA_CQPSQ_MLM_IGNORE_REF_CNT, ignore_ref_count);
831 dma_wmb(); /* make sure WQE is written before valid bit is set */
833 set_64bit_val(wqe, 24, header);
835 print_hex_dump_debug("WQE: DEL_LOCAL_MAC_IPADDR WQE",
836 DUMP_PREFIX_OFFSET, 16, 8, wqe,
837 IRDMA_CQP_WQE_SIZE * 8, false);
840 irdma_sc_cqp_post_sq(cqp);
845 * irdma_sc_qp_setctx - set qp's context
847 * @qp_ctx: context ptr
850 void irdma_sc_qp_setctx(struct irdma_sc_qp *qp, __le64 *qp_ctx,
851 struct irdma_qp_host_ctx_info *info)
853 struct irdma_iwarp_offload_info *iw;
854 struct irdma_tcp_offload_info *tcp;
855 struct irdma_sc_dev *dev;
858 u64 qw0, qw3, qw7 = 0, qw16 = 0;
861 iw = info->iwarp_info;
862 tcp = info->tcp_info;
864 if (iw->rcv_mark_en) {
865 qp->pfpdu.marker_len = 4;
866 qp->pfpdu.rcv_start_seq = tcp->rcv_nxt;
868 qp->user_pri = info->user_pri;
869 if (qp->push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX) {
874 push_idx = qp->push_idx;
876 qw0 = FIELD_PREP(IRDMAQPC_RQWQESIZE, qp->qp_uk.rq_wqe_size) |
877 FIELD_PREP(IRDMAQPC_RCVTPHEN, qp->rcv_tph_en) |
878 FIELD_PREP(IRDMAQPC_XMITTPHEN, qp->xmit_tph_en) |
879 FIELD_PREP(IRDMAQPC_RQTPHEN, qp->rq_tph_en) |
880 FIELD_PREP(IRDMAQPC_SQTPHEN, qp->sq_tph_en) |
881 FIELD_PREP(IRDMAQPC_PPIDX, push_idx) |
882 FIELD_PREP(IRDMAQPC_PMENA, push_mode_en);
884 set_64bit_val(qp_ctx, 8, qp->sq_pa);
885 set_64bit_val(qp_ctx, 16, qp->rq_pa);
887 qw3 = FIELD_PREP(IRDMAQPC_RQSIZE, qp->hw_rq_size) |
888 FIELD_PREP(IRDMAQPC_SQSIZE, qp->hw_sq_size);
889 if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
890 qw3 |= FIELD_PREP(IRDMAQPC_GEN1_SRCMACADDRIDX,
891 qp->src_mac_addr_idx);
892 set_64bit_val(qp_ctx, 136,
893 FIELD_PREP(IRDMAQPC_TXCQNUM, info->send_cq_num) |
894 FIELD_PREP(IRDMAQPC_RXCQNUM, info->rcv_cq_num));
895 set_64bit_val(qp_ctx, 168,
896 FIELD_PREP(IRDMAQPC_QPCOMPCTX, info->qp_compl_ctx));
897 set_64bit_val(qp_ctx, 176,
898 FIELD_PREP(IRDMAQPC_SQTPHVAL, qp->sq_tph_val) |
899 FIELD_PREP(IRDMAQPC_RQTPHVAL, qp->rq_tph_val) |
900 FIELD_PREP(IRDMAQPC_QSHANDLE, qp->qs_handle) |
901 FIELD_PREP(IRDMAQPC_EXCEPTION_LAN_QUEUE, qp->ieq_qp));
902 if (info->iwarp_info_valid) {
903 qw0 |= FIELD_PREP(IRDMAQPC_DDP_VER, iw->ddp_ver) |
904 FIELD_PREP(IRDMAQPC_RDMAP_VER, iw->rdmap_ver) |
905 FIELD_PREP(IRDMAQPC_DC_TCP_EN, iw->dctcp_en) |
906 FIELD_PREP(IRDMAQPC_ECN_EN, iw->ecn_en) |
907 FIELD_PREP(IRDMAQPC_IBRDENABLE, iw->ib_rd_en) |
908 FIELD_PREP(IRDMAQPC_PDIDXHI, iw->pd_id >> 16) |
909 FIELD_PREP(IRDMAQPC_ERR_RQ_IDX_VALID,
910 iw->err_rq_idx_valid);
911 qw7 |= FIELD_PREP(IRDMAQPC_PDIDX, iw->pd_id);
912 qw16 |= FIELD_PREP(IRDMAQPC_ERR_RQ_IDX, iw->err_rq_idx) |
913 FIELD_PREP(IRDMAQPC_RTOMIN, iw->rtomin);
914 set_64bit_val(qp_ctx, 144,
915 FIELD_PREP(IRDMAQPC_Q2ADDR, qp->q2_pa >> 8) |
916 FIELD_PREP(IRDMAQPC_STAT_INDEX, info->stats_idx));
918 if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
919 mac = ether_addr_to_u64(iw->mac_addr);
921 set_64bit_val(qp_ctx, 152,
922 mac << 16 | FIELD_PREP(IRDMAQPC_LASTBYTESENT, iw->last_byte_sent));
923 set_64bit_val(qp_ctx, 160,
924 FIELD_PREP(IRDMAQPC_ORDSIZE, iw->ord_size) |
925 FIELD_PREP(IRDMAQPC_IRDSIZE, irdma_sc_get_encoded_ird_size(iw->ird_size)) |
926 FIELD_PREP(IRDMAQPC_WRRDRSPOK, iw->wr_rdresp_en) |
927 FIELD_PREP(IRDMAQPC_RDOK, iw->rd_en) |
928 FIELD_PREP(IRDMAQPC_SNDMARKERS, iw->snd_mark_en) |
929 FIELD_PREP(IRDMAQPC_BINDEN, iw->bind_en) |
930 FIELD_PREP(IRDMAQPC_FASTREGEN, iw->fast_reg_en) |
931 FIELD_PREP(IRDMAQPC_PRIVEN, iw->priv_mode_en) |
932 FIELD_PREP(IRDMAQPC_USESTATSINSTANCE, info->stats_idx_valid) |
933 FIELD_PREP(IRDMAQPC_IWARPMODE, 1) |
934 FIELD_PREP(IRDMAQPC_RCVMARKERS, iw->rcv_mark_en) |
935 FIELD_PREP(IRDMAQPC_ALIGNHDRS, iw->align_hdrs) |
936 FIELD_PREP(IRDMAQPC_RCVNOMPACRC, iw->rcv_no_mpa_crc) |
937 FIELD_PREP(IRDMAQPC_RCVMARKOFFSET, iw->rcv_mark_offset || !tcp ? iw->rcv_mark_offset : tcp->rcv_nxt) |
938 FIELD_PREP(IRDMAQPC_SNDMARKOFFSET, iw->snd_mark_offset || !tcp ? iw->snd_mark_offset : tcp->snd_nxt) |
939 FIELD_PREP(IRDMAQPC_TIMELYENABLE, iw->timely_en));
941 if (info->tcp_info_valid) {
942 qw0 |= FIELD_PREP(IRDMAQPC_IPV4, tcp->ipv4) |
943 FIELD_PREP(IRDMAQPC_NONAGLE, tcp->no_nagle) |
944 FIELD_PREP(IRDMAQPC_INSERTVLANTAG,
945 tcp->insert_vlan_tag) |
946 FIELD_PREP(IRDMAQPC_TIMESTAMP, tcp->time_stamp) |
947 FIELD_PREP(IRDMAQPC_LIMIT, tcp->cwnd_inc_limit) |
948 FIELD_PREP(IRDMAQPC_DROPOOOSEG, tcp->drop_ooo_seg) |
949 FIELD_PREP(IRDMAQPC_DUPACK_THRESH, tcp->dup_ack_thresh);
951 if ((iw->ecn_en || iw->dctcp_en) && !(tcp->tos & 0x03))
952 tcp->tos |= ECN_CODE_PT_VAL;
954 qw3 |= FIELD_PREP(IRDMAQPC_TTL, tcp->ttl) |
955 FIELD_PREP(IRDMAQPC_AVOIDSTRETCHACK, tcp->avoid_stretch_ack) |
956 FIELD_PREP(IRDMAQPC_TOS, tcp->tos) |
957 FIELD_PREP(IRDMAQPC_SRCPORTNUM, tcp->src_port) |
958 FIELD_PREP(IRDMAQPC_DESTPORTNUM, tcp->dst_port);
959 if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) {
960 qw3 |= FIELD_PREP(IRDMAQPC_GEN1_SRCMACADDRIDX, tcp->src_mac_addr_idx);
962 qp->src_mac_addr_idx = tcp->src_mac_addr_idx;
964 set_64bit_val(qp_ctx, 32,
965 FIELD_PREP(IRDMAQPC_DESTIPADDR2, tcp->dest_ip_addr[2]) |
966 FIELD_PREP(IRDMAQPC_DESTIPADDR3, tcp->dest_ip_addr[3]));
967 set_64bit_val(qp_ctx, 40,
968 FIELD_PREP(IRDMAQPC_DESTIPADDR0, tcp->dest_ip_addr[0]) |
969 FIELD_PREP(IRDMAQPC_DESTIPADDR1, tcp->dest_ip_addr[1]));
970 set_64bit_val(qp_ctx, 48,
971 FIELD_PREP(IRDMAQPC_SNDMSS, tcp->snd_mss) |
972 FIELD_PREP(IRDMAQPC_SYN_RST_HANDLING, tcp->syn_rst_handling) |
973 FIELD_PREP(IRDMAQPC_VLANTAG, tcp->vlan_tag) |
974 FIELD_PREP(IRDMAQPC_ARPIDX, tcp->arp_idx));
975 qw7 |= FIELD_PREP(IRDMAQPC_FLOWLABEL, tcp->flow_label) |
976 FIELD_PREP(IRDMAQPC_WSCALE, tcp->wscale) |
977 FIELD_PREP(IRDMAQPC_IGNORE_TCP_OPT,
978 tcp->ignore_tcp_opt) |
979 FIELD_PREP(IRDMAQPC_IGNORE_TCP_UNS_OPT,
980 tcp->ignore_tcp_uns_opt) |
981 FIELD_PREP(IRDMAQPC_TCPSTATE, tcp->tcp_state) |
982 FIELD_PREP(IRDMAQPC_RCVSCALE, tcp->rcv_wscale) |
983 FIELD_PREP(IRDMAQPC_SNDSCALE, tcp->snd_wscale);
984 set_64bit_val(qp_ctx, 72,
985 FIELD_PREP(IRDMAQPC_TIMESTAMP_RECENT, tcp->time_stamp_recent) |
986 FIELD_PREP(IRDMAQPC_TIMESTAMP_AGE, tcp->time_stamp_age));
987 set_64bit_val(qp_ctx, 80,
988 FIELD_PREP(IRDMAQPC_SNDNXT, tcp->snd_nxt) |
989 FIELD_PREP(IRDMAQPC_SNDWND, tcp->snd_wnd));
990 set_64bit_val(qp_ctx, 88,
991 FIELD_PREP(IRDMAQPC_RCVNXT, tcp->rcv_nxt) |
992 FIELD_PREP(IRDMAQPC_RCVWND, tcp->rcv_wnd));
993 set_64bit_val(qp_ctx, 96,
994 FIELD_PREP(IRDMAQPC_SNDMAX, tcp->snd_max) |
995 FIELD_PREP(IRDMAQPC_SNDUNA, tcp->snd_una));
996 set_64bit_val(qp_ctx, 104,
997 FIELD_PREP(IRDMAQPC_SRTT, tcp->srtt) |
998 FIELD_PREP(IRDMAQPC_RTTVAR, tcp->rtt_var));
999 set_64bit_val(qp_ctx, 112,
1000 FIELD_PREP(IRDMAQPC_SSTHRESH, tcp->ss_thresh) |
1001 FIELD_PREP(IRDMAQPC_CWND, tcp->cwnd));
1002 set_64bit_val(qp_ctx, 120,
1003 FIELD_PREP(IRDMAQPC_SNDWL1, tcp->snd_wl1) |
1004 FIELD_PREP(IRDMAQPC_SNDWL2, tcp->snd_wl2));
1005 qw16 |= FIELD_PREP(IRDMAQPC_MAXSNDWND, tcp->max_snd_window) |
1006 FIELD_PREP(IRDMAQPC_REXMIT_THRESH, tcp->rexmit_thresh);
1007 set_64bit_val(qp_ctx, 184,
1008 FIELD_PREP(IRDMAQPC_LOCAL_IPADDR3, tcp->local_ipaddr[3]) |
1009 FIELD_PREP(IRDMAQPC_LOCAL_IPADDR2, tcp->local_ipaddr[2]));
1010 set_64bit_val(qp_ctx, 192,
1011 FIELD_PREP(IRDMAQPC_LOCAL_IPADDR1, tcp->local_ipaddr[1]) |
1012 FIELD_PREP(IRDMAQPC_LOCAL_IPADDR0, tcp->local_ipaddr[0]));
1013 set_64bit_val(qp_ctx, 200,
1014 FIELD_PREP(IRDMAQPC_THIGH, iw->t_high) |
1015 FIELD_PREP(IRDMAQPC_TLOW, iw->t_low));
1016 set_64bit_val(qp_ctx, 208,
1017 FIELD_PREP(IRDMAQPC_REMENDPOINTIDX, info->rem_endpoint_idx));
1020 set_64bit_val(qp_ctx, 0, qw0);
1021 set_64bit_val(qp_ctx, 24, qw3);
1022 set_64bit_val(qp_ctx, 56, qw7);
1023 set_64bit_val(qp_ctx, 128, qw16);
1025 print_hex_dump_debug("WQE: QP_HOST CTX", DUMP_PREFIX_OFFSET, 16, 8,
1026 qp_ctx, IRDMA_QP_CTX_SIZE, false);
1030 * irdma_sc_alloc_stag - mr stag alloc
1031 * @dev: sc device struct
1033 * @scratch: u64 saved to be used during cqp completion
1034 * @post_sq: flag for cqp db to ring
1036 static enum irdma_status_code
1037 irdma_sc_alloc_stag(struct irdma_sc_dev *dev,
1038 struct irdma_allocate_stag_info *info, u64 scratch,
1042 struct irdma_sc_cqp *cqp;
1044 enum irdma_page_size page_size;
1046 if (info->page_size == 0x40000000)
1047 page_size = IRDMA_PAGE_SIZE_1G;
1048 else if (info->page_size == 0x200000)
1049 page_size = IRDMA_PAGE_SIZE_2M;
1051 page_size = IRDMA_PAGE_SIZE_4K;
1054 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
1056 return IRDMA_ERR_RING_FULL;
1058 set_64bit_val(wqe, 8,
1059 FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID) |
1060 FIELD_PREP(IRDMA_CQPSQ_STAG_STAGLEN, info->total_len));
1061 set_64bit_val(wqe, 16,
1062 FIELD_PREP(IRDMA_CQPSQ_STAG_IDX, info->stag_idx));
1063 set_64bit_val(wqe, 40,
1064 FIELD_PREP(IRDMA_CQPSQ_STAG_HMCFNIDX, info->hmc_fcn_index));
1066 if (info->chunk_size)
1067 set_64bit_val(wqe, 48,
1068 FIELD_PREP(IRDMA_CQPSQ_STAG_FIRSTPMPBLIDX, info->first_pm_pbl_idx));
1070 hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_ALLOC_STAG) |
1071 FIELD_PREP(IRDMA_CQPSQ_STAG_MR, 1) |
1072 FIELD_PREP(IRDMA_CQPSQ_STAG_ARIGHTS, info->access_rights) |
1073 FIELD_PREP(IRDMA_CQPSQ_STAG_LPBLSIZE, info->chunk_size) |
1074 FIELD_PREP(IRDMA_CQPSQ_STAG_HPAGESIZE, page_size) |
1075 FIELD_PREP(IRDMA_CQPSQ_STAG_REMACCENABLED, info->remote_access) |
1076 FIELD_PREP(IRDMA_CQPSQ_STAG_USEHMCFNIDX, info->use_hmc_fcn_index) |
1077 FIELD_PREP(IRDMA_CQPSQ_STAG_USEPFRID, info->use_pf_rid) |
1078 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
1079 dma_wmb(); /* make sure WQE is written before valid bit is set */
1081 set_64bit_val(wqe, 24, hdr);
1083 print_hex_dump_debug("WQE: ALLOC_STAG WQE", DUMP_PREFIX_OFFSET, 16, 8,
1084 wqe, IRDMA_CQP_WQE_SIZE * 8, false);
1086 irdma_sc_cqp_post_sq(cqp);
1092 * irdma_sc_mr_reg_non_shared - non-shared mr registration
1093 * @dev: sc device struct
1095 * @scratch: u64 saved to be used during cqp completion
1096 * @post_sq: flag for cqp db to ring
1098 static enum irdma_status_code
1099 irdma_sc_mr_reg_non_shared(struct irdma_sc_dev *dev,
1100 struct irdma_reg_ns_stag_info *info, u64 scratch,
1105 struct irdma_sc_cqp *cqp;
1110 enum irdma_page_size page_size;
1112 if (info->page_size == 0x40000000)
1113 page_size = IRDMA_PAGE_SIZE_1G;
1114 else if (info->page_size == 0x200000)
1115 page_size = IRDMA_PAGE_SIZE_2M;
1116 else if (info->page_size == 0x1000)
1117 page_size = IRDMA_PAGE_SIZE_4K;
1119 return IRDMA_ERR_PARAM;
1121 if (info->access_rights & (IRDMA_ACCESS_FLAGS_REMOTEREAD_ONLY |
1122 IRDMA_ACCESS_FLAGS_REMOTEWRITE_ONLY))
1123 remote_access = true;
1125 remote_access = false;
1127 pble_obj_cnt = dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
1128 if (info->chunk_size && info->first_pm_pbl_index >= pble_obj_cnt)
1129 return IRDMA_ERR_INVALID_PBLE_INDEX;
1132 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
1134 return IRDMA_ERR_RING_FULL;
1135 fbo = info->va & (info->page_size - 1);
1137 set_64bit_val(wqe, 0,
1138 (info->addr_type == IRDMA_ADDR_TYPE_VA_BASED ?
1140 set_64bit_val(wqe, 8,
1141 FIELD_PREP(IRDMA_CQPSQ_STAG_STAGLEN, info->total_len) |
1142 FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID));
1143 set_64bit_val(wqe, 16,
1144 FIELD_PREP(IRDMA_CQPSQ_STAG_KEY, info->stag_key) |
1145 FIELD_PREP(IRDMA_CQPSQ_STAG_IDX, info->stag_idx));
1146 if (!info->chunk_size) {
1147 set_64bit_val(wqe, 32, info->reg_addr_pa);
1148 set_64bit_val(wqe, 48, 0);
1150 set_64bit_val(wqe, 32, 0);
1151 set_64bit_val(wqe, 48,
1152 FIELD_PREP(IRDMA_CQPSQ_STAG_FIRSTPMPBLIDX, info->first_pm_pbl_index));
1154 set_64bit_val(wqe, 40, info->hmc_fcn_index);
1155 set_64bit_val(wqe, 56, 0);
1157 addr_type = (info->addr_type == IRDMA_ADDR_TYPE_VA_BASED) ? 1 : 0;
1158 hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_REG_MR) |
1159 FIELD_PREP(IRDMA_CQPSQ_STAG_MR, 1) |
1160 FIELD_PREP(IRDMA_CQPSQ_STAG_LPBLSIZE, info->chunk_size) |
1161 FIELD_PREP(IRDMA_CQPSQ_STAG_HPAGESIZE, page_size) |
1162 FIELD_PREP(IRDMA_CQPSQ_STAG_ARIGHTS, info->access_rights) |
1163 FIELD_PREP(IRDMA_CQPSQ_STAG_REMACCENABLED, remote_access) |
1164 FIELD_PREP(IRDMA_CQPSQ_STAG_VABASEDTO, addr_type) |
1165 FIELD_PREP(IRDMA_CQPSQ_STAG_USEHMCFNIDX, info->use_hmc_fcn_index) |
1166 FIELD_PREP(IRDMA_CQPSQ_STAG_USEPFRID, info->use_pf_rid) |
1167 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
1168 dma_wmb(); /* make sure WQE is written before valid bit is set */
1170 set_64bit_val(wqe, 24, hdr);
1172 print_hex_dump_debug("WQE: MR_REG_NS WQE", DUMP_PREFIX_OFFSET, 16, 8,
1173 wqe, IRDMA_CQP_WQE_SIZE * 8, false);
1175 irdma_sc_cqp_post_sq(cqp);
1181 * irdma_sc_dealloc_stag - deallocate stag
1182 * @dev: sc device struct
1183 * @info: dealloc stag info
1184 * @scratch: u64 saved to be used during cqp completion
1185 * @post_sq: flag for cqp db to ring
1187 static enum irdma_status_code
1188 irdma_sc_dealloc_stag(struct irdma_sc_dev *dev,
1189 struct irdma_dealloc_stag_info *info, u64 scratch,
1194 struct irdma_sc_cqp *cqp;
1197 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
1199 return IRDMA_ERR_RING_FULL;
1201 set_64bit_val(wqe, 8,
1202 FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID));
1203 set_64bit_val(wqe, 16,
1204 FIELD_PREP(IRDMA_CQPSQ_STAG_IDX, info->stag_idx));
1206 hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DEALLOC_STAG) |
1207 FIELD_PREP(IRDMA_CQPSQ_STAG_MR, info->mr) |
1208 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
1209 dma_wmb(); /* make sure WQE is written before valid bit is set */
1211 set_64bit_val(wqe, 24, hdr);
1213 print_hex_dump_debug("WQE: DEALLOC_STAG WQE", DUMP_PREFIX_OFFSET, 16,
1214 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
1216 irdma_sc_cqp_post_sq(cqp);
1222 * irdma_sc_mw_alloc - mw allocate
1223 * @dev: sc device struct
1224 * @info: memory window allocation information
1225 * @scratch: u64 saved to be used during cqp completion
1226 * @post_sq: flag for cqp db to ring
1228 static enum irdma_status_code
1229 irdma_sc_mw_alloc(struct irdma_sc_dev *dev, struct irdma_mw_alloc_info *info,
1230 u64 scratch, bool post_sq)
1233 struct irdma_sc_cqp *cqp;
1237 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
1239 return IRDMA_ERR_RING_FULL;
1241 set_64bit_val(wqe, 8,
1242 FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID));
1243 set_64bit_val(wqe, 16,
1244 FIELD_PREP(IRDMA_CQPSQ_STAG_IDX, info->mw_stag_index));
1246 hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_ALLOC_STAG) |
1247 FIELD_PREP(IRDMA_CQPSQ_STAG_MWTYPE, info->mw_wide) |
1248 FIELD_PREP(IRDMA_CQPSQ_STAG_MW1_BIND_DONT_VLDT_KEY,
1249 info->mw1_bind_dont_vldt_key) |
1250 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
1251 dma_wmb(); /* make sure WQE is written before valid bit is set */
1253 set_64bit_val(wqe, 24, hdr);
1255 print_hex_dump_debug("WQE: MW_ALLOC WQE", DUMP_PREFIX_OFFSET, 16, 8,
1256 wqe, IRDMA_CQP_WQE_SIZE * 8, false);
1258 irdma_sc_cqp_post_sq(cqp);
1264 * irdma_sc_mr_fast_register - Posts RDMA fast register mr WR to iwarp qp
1266 * @info: fast mr info
1267 * @post_sq: flag for cqp db to ring
1269 enum irdma_status_code
1270 irdma_sc_mr_fast_register(struct irdma_sc_qp *qp,
1271 struct irdma_fast_reg_stag_info *info, bool post_sq)
1276 enum irdma_page_size page_size;
1277 struct irdma_post_sq_info sq_info = {};
1279 if (info->page_size == 0x40000000)
1280 page_size = IRDMA_PAGE_SIZE_1G;
1281 else if (info->page_size == 0x200000)
1282 page_size = IRDMA_PAGE_SIZE_2M;
1284 page_size = IRDMA_PAGE_SIZE_4K;
1286 sq_info.wr_id = info->wr_id;
1287 sq_info.signaled = info->signaled;
1288 sq_info.push_wqe = info->push_wqe;
1290 wqe = irdma_qp_get_next_send_wqe(&qp->qp_uk, &wqe_idx,
1291 IRDMA_QP_WQE_MIN_QUANTA, 0, &sq_info);
1293 return IRDMA_ERR_QP_TOOMANY_WRS_POSTED;
1295 irdma_clr_wqes(&qp->qp_uk, wqe_idx);
1297 ibdev_dbg(to_ibdev(qp->dev),
1298 "MR: wr_id[%llxh] wqe_idx[%04d] location[%p]\n",
1299 info->wr_id, wqe_idx,
1300 &qp->qp_uk.sq_wrtrk_array[wqe_idx].wrid);
1302 temp = (info->addr_type == IRDMA_ADDR_TYPE_VA_BASED) ?
1303 (uintptr_t)info->va : info->fbo;
1304 set_64bit_val(wqe, 0, temp);
1306 temp = FIELD_GET(IRDMAQPSQ_FIRSTPMPBLIDXHI,
1307 info->first_pm_pbl_index >> 16);
1308 set_64bit_val(wqe, 8,
1309 FIELD_PREP(IRDMAQPSQ_FIRSTPMPBLIDXHI, temp) |
1310 FIELD_PREP(IRDMAQPSQ_PBLADDR >> IRDMA_HW_PAGE_SHIFT, info->reg_addr_pa));
1311 set_64bit_val(wqe, 16,
1313 FIELD_PREP(IRDMAQPSQ_FIRSTPMPBLIDXLO, info->first_pm_pbl_index));
1315 hdr = FIELD_PREP(IRDMAQPSQ_STAGKEY, info->stag_key) |
1316 FIELD_PREP(IRDMAQPSQ_STAGINDEX, info->stag_idx) |
1317 FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_FAST_REGISTER) |
1318 FIELD_PREP(IRDMAQPSQ_LPBLSIZE, info->chunk_size) |
1319 FIELD_PREP(IRDMAQPSQ_HPAGESIZE, page_size) |
1320 FIELD_PREP(IRDMAQPSQ_STAGRIGHTS, info->access_rights) |
1321 FIELD_PREP(IRDMAQPSQ_VABASEDTO, info->addr_type) |
1322 FIELD_PREP(IRDMAQPSQ_PUSHWQE, (sq_info.push_wqe ? 1 : 0)) |
1323 FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
1324 FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
1325 FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
1326 FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity);
1327 dma_wmb(); /* make sure WQE is written before valid bit is set */
1329 set_64bit_val(wqe, 24, hdr);
1331 print_hex_dump_debug("WQE: FAST_REG WQE", DUMP_PREFIX_OFFSET, 16, 8,
1332 wqe, IRDMA_QP_WQE_MIN_SIZE, false);
1333 if (sq_info.push_wqe) {
1334 irdma_qp_push_wqe(&qp->qp_uk, wqe, IRDMA_QP_WQE_MIN_QUANTA,
1338 irdma_uk_qp_post_wr(&qp->qp_uk);
1345 * irdma_sc_gen_rts_ae - request AE generated after RTS
1348 static void irdma_sc_gen_rts_ae(struct irdma_sc_qp *qp)
1352 struct irdma_qp_uk *qp_uk;
1356 wqe = qp_uk->sq_base[1].elem;
1358 hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_NOP) |
1359 FIELD_PREP(IRDMAQPSQ_LOCALFENCE, 1) |
1360 FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity);
1361 dma_wmb(); /* make sure WQE is written before valid bit is set */
1363 set_64bit_val(wqe, 24, hdr);
1364 print_hex_dump_debug("QP: NOP W/LOCAL FENCE WQE", DUMP_PREFIX_OFFSET,
1365 16, 8, wqe, IRDMA_QP_WQE_MIN_SIZE, false);
1367 wqe = qp_uk->sq_base[2].elem;
1368 hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_GEN_RTS_AE) |
1369 FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity);
1370 dma_wmb(); /* make sure WQE is written before valid bit is set */
1372 set_64bit_val(wqe, 24, hdr);
1373 print_hex_dump_debug("QP: CONN EST WQE", DUMP_PREFIX_OFFSET, 16, 8,
1374 wqe, IRDMA_QP_WQE_MIN_SIZE, false);
1378 * irdma_sc_send_lsmm - send last streaming mode message
1380 * @lsmm_buf: buffer with lsmm message
1381 * @size: size of lsmm buffer
1382 * @stag: stag of lsmm buffer
1384 void irdma_sc_send_lsmm(struct irdma_sc_qp *qp, void *lsmm_buf, u32 size,
1389 struct irdma_qp_uk *qp_uk;
1392 wqe = qp_uk->sq_base->elem;
1394 set_64bit_val(wqe, 0, (uintptr_t)lsmm_buf);
1395 if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1) {
1396 set_64bit_val(wqe, 8,
1397 FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, size) |
1398 FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_STAG, stag));
1400 set_64bit_val(wqe, 8,
1401 FIELD_PREP(IRDMAQPSQ_FRAG_LEN, size) |
1402 FIELD_PREP(IRDMAQPSQ_FRAG_STAG, stag) |
1403 FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity));
1405 set_64bit_val(wqe, 16, 0);
1407 hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_RDMA_SEND) |
1408 FIELD_PREP(IRDMAQPSQ_STREAMMODE, 1) |
1409 FIELD_PREP(IRDMAQPSQ_WAITFORRCVPDU, 1) |
1410 FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity);
1411 dma_wmb(); /* make sure WQE is written before valid bit is set */
1413 set_64bit_val(wqe, 24, hdr);
1415 print_hex_dump_debug("WQE: SEND_LSMM WQE", DUMP_PREFIX_OFFSET, 16, 8,
1416 wqe, IRDMA_QP_WQE_MIN_SIZE, false);
1418 if (qp->dev->hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_RTS_AE)
1419 irdma_sc_gen_rts_ae(qp);
1423 * irdma_sc_send_lsmm_nostag - for privilege qp
1425 * @lsmm_buf: buffer with lsmm message
1426 * @size: size of lsmm buffer
1428 void irdma_sc_send_lsmm_nostag(struct irdma_sc_qp *qp, void *lsmm_buf, u32 size)
1432 struct irdma_qp_uk *qp_uk;
1435 wqe = qp_uk->sq_base->elem;
1437 set_64bit_val(wqe, 0, (uintptr_t)lsmm_buf);
1439 if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1)
1440 set_64bit_val(wqe, 8,
1441 FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, size));
1443 set_64bit_val(wqe, 8,
1444 FIELD_PREP(IRDMAQPSQ_FRAG_LEN, size) |
1445 FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity));
1446 set_64bit_val(wqe, 16, 0);
1448 hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_RDMA_SEND) |
1449 FIELD_PREP(IRDMAQPSQ_STREAMMODE, 1) |
1450 FIELD_PREP(IRDMAQPSQ_WAITFORRCVPDU, 1) |
1451 FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity);
1452 dma_wmb(); /* make sure WQE is written before valid bit is set */
1454 set_64bit_val(wqe, 24, hdr);
1456 print_hex_dump_debug("WQE: SEND_LSMM_NOSTAG WQE", DUMP_PREFIX_OFFSET,
1457 16, 8, wqe, IRDMA_QP_WQE_MIN_SIZE, false);
1461 * irdma_sc_send_rtt - send last read0 or write0
1463 * @read: Do read0 or write0
1465 void irdma_sc_send_rtt(struct irdma_sc_qp *qp, bool read)
1469 struct irdma_qp_uk *qp_uk;
1472 wqe = qp_uk->sq_base->elem;
1474 set_64bit_val(wqe, 0, 0);
1475 set_64bit_val(wqe, 16, 0);
1477 if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1) {
1478 set_64bit_val(wqe, 8,
1479 FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_STAG, 0xabcd));
1481 set_64bit_val(wqe, 8,
1482 (u64)0xabcd | FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity));
1484 hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, 0x1234) |
1485 FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_RDMA_READ) |
1486 FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity);
1489 if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1) {
1490 set_64bit_val(wqe, 8, 0);
1492 set_64bit_val(wqe, 8,
1493 FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity));
1495 hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_RDMA_WRITE) |
1496 FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity);
1499 dma_wmb(); /* make sure WQE is written before valid bit is set */
1501 set_64bit_val(wqe, 24, hdr);
1503 print_hex_dump_debug("WQE: RTR WQE", DUMP_PREFIX_OFFSET, 16, 8, wqe,
1504 IRDMA_QP_WQE_MIN_SIZE, false);
1506 if (qp->dev->hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_RTS_AE)
1507 irdma_sc_gen_rts_ae(qp);
1511 * irdma_iwarp_opcode - determine if incoming is rdma layer
1512 * @info: aeq info for the packet
1513 * @pkt: packet for error
1515 static u32 irdma_iwarp_opcode(struct irdma_aeqe_info *info, u8 *pkt)
1518 u32 opcode = 0xffffffff;
1520 if (info->q2_data_written) {
1521 mpa = (__be16 *)pkt;
1522 opcode = ntohs(mpa[1]) & 0xf;
1529 * irdma_locate_mpa - return pointer to mpa in the pkt
1530 * @pkt: packet with data
1532 static u8 *irdma_locate_mpa(u8 *pkt)
1534 /* skip over ethernet header */
1535 pkt += IRDMA_MAC_HLEN;
1537 /* Skip over IP and TCP headers */
1538 pkt += 4 * (pkt[0] & 0x0f);
1539 pkt += 4 * ((pkt[12] >> 4) & 0x0f);
1545 * irdma_bld_termhdr_ctrl - setup terminate hdr control fields
1546 * @qp: sc qp ptr for pkt
1548 * @opcode: flush opcode for termhdr
1549 * @layer_etype: error layer + error type
1550 * @err: error cod ein the header
1552 static void irdma_bld_termhdr_ctrl(struct irdma_sc_qp *qp,
1553 struct irdma_terminate_hdr *hdr,
1554 enum irdma_flush_opcode opcode,
1555 u8 layer_etype, u8 err)
1557 qp->flush_code = opcode;
1558 hdr->layer_etype = layer_etype;
1559 hdr->error_code = err;
1563 * irdma_bld_termhdr_ddp_rdma - setup ddp and rdma hdrs in terminate hdr
1564 * @pkt: ptr to mpa in offending pkt
1566 * @copy_len: offending pkt length to be copied to term hdr
1567 * @is_tagged: DDP tagged or untagged
1569 static void irdma_bld_termhdr_ddp_rdma(u8 *pkt, struct irdma_terminate_hdr *hdr,
1570 int *copy_len, u8 *is_tagged)
1574 ddp_seg_len = ntohs(*(__be16 *)pkt);
1577 hdr->hdrct = DDP_LEN_FLAG;
1578 if (pkt[2] & 0x80) {
1580 if (ddp_seg_len >= TERM_DDP_LEN_TAGGED) {
1581 *copy_len += TERM_DDP_LEN_TAGGED;
1582 hdr->hdrct |= DDP_HDR_FLAG;
1585 if (ddp_seg_len >= TERM_DDP_LEN_UNTAGGED) {
1586 *copy_len += TERM_DDP_LEN_UNTAGGED;
1587 hdr->hdrct |= DDP_HDR_FLAG;
1589 if (ddp_seg_len >= (TERM_DDP_LEN_UNTAGGED + TERM_RDMA_LEN) &&
1590 ((pkt[3] & RDMA_OPCODE_M) == RDMA_READ_REQ_OPCODE)) {
1591 *copy_len += TERM_RDMA_LEN;
1592 hdr->hdrct |= RDMA_HDR_FLAG;
1599 * irdma_bld_terminate_hdr - build terminate message header
1600 * @qp: qp associated with received terminate AE
1601 * @info: the struct contiaing AE information
1603 static int irdma_bld_terminate_hdr(struct irdma_sc_qp *qp,
1604 struct irdma_aeqe_info *info)
1606 u8 *pkt = qp->q2_buf + Q2_BAD_FRAME_OFFSET;
1610 struct irdma_terminate_hdr *termhdr;
1612 termhdr = (struct irdma_terminate_hdr *)qp->q2_buf;
1613 memset(termhdr, 0, Q2_BAD_FRAME_OFFSET);
1615 if (info->q2_data_written) {
1616 pkt = irdma_locate_mpa(pkt);
1617 irdma_bld_termhdr_ddp_rdma(pkt, termhdr, ©_len, &is_tagged);
1620 opcode = irdma_iwarp_opcode(info, pkt);
1621 qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
1622 qp->sq_flush_code = info->sq;
1623 qp->rq_flush_code = info->rq;
1625 switch (info->ae_id) {
1626 case IRDMA_AE_AMP_UNALLOCATED_STAG:
1627 qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
1628 if (opcode == IRDMA_OP_TYPE_RDMA_WRITE)
1629 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_PROT_ERR,
1630 (LAYER_DDP << 4) | DDP_TAGGED_BUF,
1631 DDP_TAGGED_INV_STAG);
1633 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
1634 (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT,
1637 case IRDMA_AE_AMP_BOUNDS_VIOLATION:
1638 qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
1639 if (info->q2_data_written)
1640 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_PROT_ERR,
1641 (LAYER_DDP << 4) | DDP_TAGGED_BUF,
1644 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
1645 (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT,
1648 case IRDMA_AE_AMP_BAD_PD:
1650 case IRDMA_OP_TYPE_RDMA_WRITE:
1651 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_PROT_ERR,
1652 (LAYER_DDP << 4) | DDP_TAGGED_BUF,
1653 DDP_TAGGED_UNASSOC_STAG);
1655 case IRDMA_OP_TYPE_SEND_INV:
1656 case IRDMA_OP_TYPE_SEND_SOL_INV:
1657 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
1658 (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT,
1659 RDMAP_CANT_INV_STAG);
1662 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
1663 (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT,
1664 RDMAP_UNASSOC_STAG);
1667 case IRDMA_AE_AMP_INVALID_STAG:
1668 qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
1669 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
1670 (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT,
1673 case IRDMA_AE_AMP_BAD_QP:
1674 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_LOC_QP_OP_ERR,
1675 (LAYER_DDP << 4) | DDP_UNTAGGED_BUF,
1676 DDP_UNTAGGED_INV_QN);
1678 case IRDMA_AE_AMP_BAD_STAG_KEY:
1679 case IRDMA_AE_AMP_BAD_STAG_INDEX:
1680 qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
1682 case IRDMA_OP_TYPE_SEND_INV:
1683 case IRDMA_OP_TYPE_SEND_SOL_INV:
1684 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_OP_ERR,
1685 (LAYER_RDMA << 4) | RDMAP_REMOTE_OP,
1686 RDMAP_CANT_INV_STAG);
1689 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
1690 (LAYER_RDMA << 4) | RDMAP_REMOTE_OP,
1694 case IRDMA_AE_AMP_RIGHTS_VIOLATION:
1695 case IRDMA_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS:
1696 case IRDMA_AE_PRIV_OPERATION_DENIED:
1697 qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
1698 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
1699 (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT,
1702 case IRDMA_AE_AMP_TO_WRAP:
1703 qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
1704 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
1705 (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT,
1708 case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR:
1709 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR,
1710 (LAYER_MPA << 4) | DDP_LLP, MPA_CRC);
1712 case IRDMA_AE_LLP_SEGMENT_TOO_SMALL:
1713 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_LOC_LEN_ERR,
1714 (LAYER_DDP << 4) | DDP_CATASTROPHIC,
1715 DDP_CATASTROPHIC_LOCAL);
1717 case IRDMA_AE_LCE_QP_CATASTROPHIC:
1718 case IRDMA_AE_DDP_NO_L_BIT:
1719 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_FATAL_ERR,
1720 (LAYER_DDP << 4) | DDP_CATASTROPHIC,
1721 DDP_CATASTROPHIC_LOCAL);
1723 case IRDMA_AE_DDP_INVALID_MSN_GAP_IN_MSN:
1724 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR,
1725 (LAYER_DDP << 4) | DDP_UNTAGGED_BUF,
1726 DDP_UNTAGGED_INV_MSN_RANGE);
1728 case IRDMA_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER:
1729 qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
1730 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_LOC_LEN_ERR,
1731 (LAYER_DDP << 4) | DDP_UNTAGGED_BUF,
1732 DDP_UNTAGGED_INV_TOO_LONG);
1734 case IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION:
1736 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR,
1737 (LAYER_DDP << 4) | DDP_TAGGED_BUF,
1738 DDP_TAGGED_INV_DDP_VER);
1740 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR,
1741 (LAYER_DDP << 4) | DDP_UNTAGGED_BUF,
1742 DDP_UNTAGGED_INV_DDP_VER);
1744 case IRDMA_AE_DDP_UBE_INVALID_MO:
1745 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR,
1746 (LAYER_DDP << 4) | DDP_UNTAGGED_BUF,
1747 DDP_UNTAGGED_INV_MO);
1749 case IRDMA_AE_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE:
1750 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_OP_ERR,
1751 (LAYER_DDP << 4) | DDP_UNTAGGED_BUF,
1752 DDP_UNTAGGED_INV_MSN_NO_BUF);
1754 case IRDMA_AE_DDP_UBE_INVALID_QN:
1755 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR,
1756 (LAYER_DDP << 4) | DDP_UNTAGGED_BUF,
1757 DDP_UNTAGGED_INV_QN);
1759 case IRDMA_AE_RDMAP_ROE_INVALID_RDMAP_VERSION:
1760 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR,
1761 (LAYER_RDMA << 4) | RDMAP_REMOTE_OP,
1762 RDMAP_INV_RDMAP_VER);
1765 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_FATAL_ERR,
1766 (LAYER_RDMA << 4) | RDMAP_REMOTE_OP,
1772 memcpy(termhdr + 1, pkt, copy_len);
1774 return sizeof(struct irdma_terminate_hdr) + copy_len;
1778 * irdma_terminate_send_fin() - Send fin for terminate message
1779 * @qp: qp associated with received terminate AE
1781 void irdma_terminate_send_fin(struct irdma_sc_qp *qp)
1783 irdma_term_modify_qp(qp, IRDMA_QP_STATE_TERMINATE,
1784 IRDMAQP_TERM_SEND_FIN_ONLY, 0);
1788 * irdma_terminate_connection() - Bad AE and send terminate to remote QP
1789 * @qp: qp associated with received terminate AE
1790 * @info: the struct contiaing AE information
1792 void irdma_terminate_connection(struct irdma_sc_qp *qp,
1793 struct irdma_aeqe_info *info)
1797 if (qp->term_flags & IRDMA_TERM_SENT)
1800 termlen = irdma_bld_terminate_hdr(qp, info);
1801 irdma_terminate_start_timer(qp);
1802 qp->term_flags |= IRDMA_TERM_SENT;
1803 irdma_term_modify_qp(qp, IRDMA_QP_STATE_TERMINATE,
1804 IRDMAQP_TERM_SEND_TERM_ONLY, termlen);
1808 * irdma_terminate_received - handle terminate received AE
1809 * @qp: qp associated with received terminate AE
1810 * @info: the struct contiaing AE information
1812 void irdma_terminate_received(struct irdma_sc_qp *qp,
1813 struct irdma_aeqe_info *info)
1815 u8 *pkt = qp->q2_buf + Q2_BAD_FRAME_OFFSET;
1820 struct irdma_terminate_hdr *termhdr;
1822 mpa = (__be32 *)irdma_locate_mpa(pkt);
1823 if (info->q2_data_written) {
1824 /* did not validate the frame - do it now */
1825 ddp_ctl = (ntohl(mpa[0]) >> 8) & 0xff;
1826 rdma_ctl = ntohl(mpa[0]) & 0xff;
1827 if ((ddp_ctl & 0xc0) != 0x40)
1828 aeq_id = IRDMA_AE_LCE_QP_CATASTROPHIC;
1829 else if ((ddp_ctl & 0x03) != 1)
1830 aeq_id = IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION;
1831 else if (ntohl(mpa[2]) != 2)
1832 aeq_id = IRDMA_AE_DDP_UBE_INVALID_QN;
1833 else if (ntohl(mpa[3]) != 1)
1834 aeq_id = IRDMA_AE_DDP_INVALID_MSN_GAP_IN_MSN;
1835 else if (ntohl(mpa[4]) != 0)
1836 aeq_id = IRDMA_AE_DDP_UBE_INVALID_MO;
1837 else if ((rdma_ctl & 0xc0) != 0x40)
1838 aeq_id = IRDMA_AE_RDMAP_ROE_INVALID_RDMAP_VERSION;
1840 info->ae_id = aeq_id;
1842 /* Bad terminate recvd - send back a terminate */
1843 irdma_terminate_connection(qp, info);
1848 qp->term_flags |= IRDMA_TERM_RCVD;
1849 qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
1850 termhdr = (struct irdma_terminate_hdr *)&mpa[5];
1851 if (termhdr->layer_etype == RDMAP_REMOTE_PROT ||
1852 termhdr->layer_etype == RDMAP_REMOTE_OP) {
1853 irdma_terminate_done(qp, 0);
1855 irdma_terminate_start_timer(qp);
1856 irdma_terminate_send_fin(qp);
1860 static enum irdma_status_code irdma_null_ws_add(struct irdma_sc_vsi *vsi,
1866 static void irdma_null_ws_remove(struct irdma_sc_vsi *vsi, u8 user_pri)
1871 static void irdma_null_ws_reset(struct irdma_sc_vsi *vsi)
1877 * irdma_sc_vsi_init - Init the vsi structure
1878 * @vsi: pointer to vsi structure to initialize
1879 * @info: the info used to initialize the vsi struct
1881 void irdma_sc_vsi_init(struct irdma_sc_vsi *vsi,
1882 struct irdma_vsi_init_info *info)
1884 struct irdma_l2params *l2p;
1887 vsi->dev = info->dev;
1888 vsi->back_vsi = info->back_vsi;
1889 vsi->register_qset = info->register_qset;
1890 vsi->unregister_qset = info->unregister_qset;
1891 vsi->mtu = info->params->mtu;
1892 vsi->exception_lan_q = info->exception_lan_q;
1893 vsi->vsi_idx = info->pf_data_vsi_num;
1894 if (vsi->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
1895 vsi->fcn_id = info->dev->hmc_fn_id;
1898 vsi->qos_rel_bw = l2p->vsi_rel_bw;
1899 vsi->qos_prio_type = l2p->vsi_prio_type;
1900 for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
1901 if (vsi->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
1902 vsi->qos[i].qs_handle = l2p->qs_handle_list[i];
1903 vsi->qos[i].traffic_class = info->params->up2tc[i];
1904 vsi->qos[i].rel_bw =
1905 l2p->tc_info[vsi->qos[i].traffic_class].rel_bw;
1906 vsi->qos[i].prio_type =
1907 l2p->tc_info[vsi->qos[i].traffic_class].prio_type;
1908 vsi->qos[i].valid = false;
1909 mutex_init(&vsi->qos[i].qos_mutex);
1910 INIT_LIST_HEAD(&vsi->qos[i].qplist);
1912 if (vsi->register_qset) {
1913 vsi->dev->ws_add = irdma_ws_add;
1914 vsi->dev->ws_remove = irdma_ws_remove;
1915 vsi->dev->ws_reset = irdma_ws_reset;
1917 vsi->dev->ws_add = irdma_null_ws_add;
1918 vsi->dev->ws_remove = irdma_null_ws_remove;
1919 vsi->dev->ws_reset = irdma_null_ws_reset;
1924 * irdma_get_fcn_id - Return the function id
1925 * @vsi: pointer to the vsi
1927 static u8 irdma_get_fcn_id(struct irdma_sc_vsi *vsi)
1929 struct irdma_stats_inst_info stats_info = {};
1930 struct irdma_sc_dev *dev = vsi->dev;
1931 u8 fcn_id = IRDMA_INVALID_FCN_ID;
1932 u8 start_idx, max_stats, i;
1934 if (dev->hw_attrs.uk_attrs.hw_rev != IRDMA_GEN_1) {
1935 if (!irdma_cqp_stats_inst_cmd(vsi, IRDMA_OP_STATS_ALLOCATE,
1937 return stats_info.stats_idx;
1942 for (i = start_idx; i < max_stats; i++)
1943 if (!dev->fcn_id_array[i]) {
1945 dev->fcn_id_array[i] = true;
1953 * irdma_vsi_stats_init - Initialize the vsi statistics
1954 * @vsi: pointer to the vsi structure
1955 * @info: The info structure used for initialization
1957 enum irdma_status_code irdma_vsi_stats_init(struct irdma_sc_vsi *vsi,
1958 struct irdma_vsi_stats_info *info)
1960 u8 fcn_id = info->fcn_id;
1961 struct irdma_dma_mem *stats_buff_mem;
1963 vsi->pestat = info->pestat;
1964 vsi->pestat->hw = vsi->dev->hw;
1965 vsi->pestat->vsi = vsi;
1966 stats_buff_mem = &vsi->pestat->gather_info.stats_buff_mem;
1967 stats_buff_mem->size = ALIGN(IRDMA_GATHER_STATS_BUF_SIZE * 2, 1);
1968 stats_buff_mem->va = dma_alloc_coherent(vsi->pestat->hw->device,
1969 stats_buff_mem->size,
1970 &stats_buff_mem->pa,
1972 if (!stats_buff_mem->va)
1973 return IRDMA_ERR_NO_MEMORY;
1975 vsi->pestat->gather_info.gather_stats_va = stats_buff_mem->va;
1976 vsi->pestat->gather_info.last_gather_stats_va =
1977 (void *)((uintptr_t)stats_buff_mem->va +
1978 IRDMA_GATHER_STATS_BUF_SIZE);
1980 irdma_hw_stats_start_timer(vsi);
1981 if (info->alloc_fcn_id)
1982 fcn_id = irdma_get_fcn_id(vsi);
1983 if (fcn_id == IRDMA_INVALID_FCN_ID)
1986 vsi->stats_fcn_id_alloc = info->alloc_fcn_id;
1987 vsi->fcn_id = fcn_id;
1988 if (info->alloc_fcn_id) {
1989 vsi->pestat->gather_info.use_stats_inst = true;
1990 vsi->pestat->gather_info.stats_inst_index = fcn_id;
1996 dma_free_coherent(vsi->pestat->hw->device, stats_buff_mem->size,
1997 stats_buff_mem->va, stats_buff_mem->pa);
1998 stats_buff_mem->va = NULL;
2000 return IRDMA_ERR_CQP_COMPL_ERROR;
2004 * irdma_vsi_stats_free - Free the vsi stats
2005 * @vsi: pointer to the vsi structure
2007 void irdma_vsi_stats_free(struct irdma_sc_vsi *vsi)
2009 struct irdma_stats_inst_info stats_info = {};
2010 u8 fcn_id = vsi->fcn_id;
2011 struct irdma_sc_dev *dev = vsi->dev;
2013 if (dev->hw_attrs.uk_attrs.hw_rev != IRDMA_GEN_1) {
2014 if (vsi->stats_fcn_id_alloc) {
2015 stats_info.stats_idx = vsi->fcn_id;
2016 irdma_cqp_stats_inst_cmd(vsi, IRDMA_OP_STATS_FREE,
2020 if (vsi->stats_fcn_id_alloc &&
2021 fcn_id < vsi->dev->hw_attrs.max_stat_inst)
2022 vsi->dev->fcn_id_array[fcn_id] = false;
2027 irdma_hw_stats_stop_timer(vsi);
2028 dma_free_coherent(vsi->pestat->hw->device,
2029 vsi->pestat->gather_info.stats_buff_mem.size,
2030 vsi->pestat->gather_info.stats_buff_mem.va,
2031 vsi->pestat->gather_info.stats_buff_mem.pa);
2032 vsi->pestat->gather_info.stats_buff_mem.va = NULL;
2036 * irdma_get_encoded_wqe_size - given wq size, returns hardware encoded size
2037 * @wqsize: size of the wq (sq, rq) to encoded_size
2038 * @queue_type: queue type selected for the calculation algorithm
2040 u8 irdma_get_encoded_wqe_size(u32 wqsize, enum irdma_queue_type queue_type)
2042 u8 encoded_size = 0;
2044 /* cqp sq's hw coded value starts from 1 for size of 4
2045 * while it starts from 0 for qp' wq's.
2047 if (queue_type == IRDMA_QUEUE_TYPE_CQP)
2050 while (wqsize >>= 1)
2053 return encoded_size;
2057 * irdma_sc_gather_stats - collect the statistics
2058 * @cqp: struct for cqp hw
2059 * @info: gather stats info structure
2060 * @scratch: u64 saved to be used during cqp completion
2062 static enum irdma_status_code
2063 irdma_sc_gather_stats(struct irdma_sc_cqp *cqp,
2064 struct irdma_stats_gather_info *info, u64 scratch)
2069 if (info->stats_buff_mem.size < IRDMA_GATHER_STATS_BUF_SIZE)
2070 return IRDMA_ERR_BUF_TOO_SHORT;
2072 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
2074 return IRDMA_ERR_RING_FULL;
2076 set_64bit_val(wqe, 40,
2077 FIELD_PREP(IRDMA_CQPSQ_STATS_HMC_FCN_INDEX, info->hmc_fcn_index));
2078 set_64bit_val(wqe, 32, info->stats_buff_mem.pa);
2080 temp = FIELD_PREP(IRDMA_CQPSQ_STATS_WQEVALID, cqp->polarity) |
2081 FIELD_PREP(IRDMA_CQPSQ_STATS_USE_INST, info->use_stats_inst) |
2082 FIELD_PREP(IRDMA_CQPSQ_STATS_INST_INDEX,
2083 info->stats_inst_index) |
2084 FIELD_PREP(IRDMA_CQPSQ_STATS_USE_HMC_FCN_INDEX,
2085 info->use_hmc_fcn_index) |
2086 FIELD_PREP(IRDMA_CQPSQ_STATS_OP, IRDMA_CQP_OP_GATHER_STATS);
2087 dma_wmb(); /* make sure WQE is written before valid bit is set */
2089 set_64bit_val(wqe, 24, temp);
2091 print_hex_dump_debug("STATS: GATHER_STATS WQE", DUMP_PREFIX_OFFSET,
2092 16, 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
2094 irdma_sc_cqp_post_sq(cqp);
2095 ibdev_dbg(to_ibdev(cqp->dev),
2096 "STATS: CQP SQ head 0x%x tail 0x%x size 0x%x\n",
2097 cqp->sq_ring.head, cqp->sq_ring.tail, cqp->sq_ring.size);
2103 * irdma_sc_manage_stats_inst - allocate or free stats instance
2104 * @cqp: struct for cqp hw
2105 * @info: stats info structure
2106 * @alloc: alloc vs. delete flag
2107 * @scratch: u64 saved to be used during cqp completion
2109 static enum irdma_status_code
2110 irdma_sc_manage_stats_inst(struct irdma_sc_cqp *cqp,
2111 struct irdma_stats_inst_info *info, bool alloc,
2117 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
2119 return IRDMA_ERR_RING_FULL;
2121 set_64bit_val(wqe, 40,
2122 FIELD_PREP(IRDMA_CQPSQ_STATS_HMC_FCN_INDEX, info->hmc_fn_id));
2123 temp = FIELD_PREP(IRDMA_CQPSQ_STATS_WQEVALID, cqp->polarity) |
2124 FIELD_PREP(IRDMA_CQPSQ_STATS_ALLOC_INST, alloc) |
2125 FIELD_PREP(IRDMA_CQPSQ_STATS_USE_HMC_FCN_INDEX,
2126 info->use_hmc_fcn_index) |
2127 FIELD_PREP(IRDMA_CQPSQ_STATS_INST_INDEX, info->stats_idx) |
2128 FIELD_PREP(IRDMA_CQPSQ_STATS_OP, IRDMA_CQP_OP_MANAGE_STATS);
2130 dma_wmb(); /* make sure WQE is written before valid bit is set */
2132 set_64bit_val(wqe, 24, temp);
2134 print_hex_dump_debug("WQE: MANAGE_STATS WQE", DUMP_PREFIX_OFFSET, 16,
2135 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
2137 irdma_sc_cqp_post_sq(cqp);
2142 * irdma_sc_set_up_map - set the up map table
2143 * @cqp: struct for cqp hw
2144 * @info: User priority map info
2145 * @scratch: u64 saved to be used during cqp completion
2147 static enum irdma_status_code irdma_sc_set_up_map(struct irdma_sc_cqp *cqp,
2148 struct irdma_up_info *info,
2155 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
2157 return IRDMA_ERR_RING_FULL;
2159 for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++)
2160 temp |= (u64)info->map[i] << (i * 8);
2162 set_64bit_val(wqe, 0, temp);
2163 set_64bit_val(wqe, 40,
2164 FIELD_PREP(IRDMA_CQPSQ_UP_CNPOVERRIDE, info->cnp_up_override) |
2165 FIELD_PREP(IRDMA_CQPSQ_UP_HMCFCNIDX, info->hmc_fcn_idx));
2167 temp = FIELD_PREP(IRDMA_CQPSQ_UP_WQEVALID, cqp->polarity) |
2168 FIELD_PREP(IRDMA_CQPSQ_UP_USEVLAN, info->use_vlan) |
2169 FIELD_PREP(IRDMA_CQPSQ_UP_USEOVERRIDE,
2170 info->use_cnp_up_override) |
2171 FIELD_PREP(IRDMA_CQPSQ_UP_OP, IRDMA_CQP_OP_UP_MAP);
2172 dma_wmb(); /* make sure WQE is written before valid bit is set */
2174 set_64bit_val(wqe, 24, temp);
2176 print_hex_dump_debug("WQE: UPMAP WQE", DUMP_PREFIX_OFFSET, 16, 8, wqe,
2177 IRDMA_CQP_WQE_SIZE * 8, false);
2178 irdma_sc_cqp_post_sq(cqp);
2184 * irdma_sc_manage_ws_node - create/modify/destroy WS node
2185 * @cqp: struct for cqp hw
2186 * @info: node info structure
2187 * @node_op: 0 for add 1 for modify, 2 for delete
2188 * @scratch: u64 saved to be used during cqp completion
2190 static enum irdma_status_code
2191 irdma_sc_manage_ws_node(struct irdma_sc_cqp *cqp,
2192 struct irdma_ws_node_info *info,
2193 enum irdma_ws_node_op node_op, u64 scratch)
2198 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
2200 return IRDMA_ERR_RING_FULL;
2202 set_64bit_val(wqe, 32,
2203 FIELD_PREP(IRDMA_CQPSQ_WS_VSI, info->vsi) |
2204 FIELD_PREP(IRDMA_CQPSQ_WS_WEIGHT, info->weight));
2206 temp = FIELD_PREP(IRDMA_CQPSQ_WS_WQEVALID, cqp->polarity) |
2207 FIELD_PREP(IRDMA_CQPSQ_WS_NODEOP, node_op) |
2208 FIELD_PREP(IRDMA_CQPSQ_WS_ENABLENODE, info->enable) |
2209 FIELD_PREP(IRDMA_CQPSQ_WS_NODETYPE, info->type_leaf) |
2210 FIELD_PREP(IRDMA_CQPSQ_WS_PRIOTYPE, info->prio_type) |
2211 FIELD_PREP(IRDMA_CQPSQ_WS_TC, info->tc) |
2212 FIELD_PREP(IRDMA_CQPSQ_WS_OP, IRDMA_CQP_OP_WORK_SCHED_NODE) |
2213 FIELD_PREP(IRDMA_CQPSQ_WS_PARENTID, info->parent_id) |
2214 FIELD_PREP(IRDMA_CQPSQ_WS_NODEID, info->id);
2215 dma_wmb(); /* make sure WQE is written before valid bit is set */
2217 set_64bit_val(wqe, 24, temp);
2219 print_hex_dump_debug("WQE: MANAGE_WS WQE", DUMP_PREFIX_OFFSET, 16, 8,
2220 wqe, IRDMA_CQP_WQE_SIZE * 8, false);
2221 irdma_sc_cqp_post_sq(cqp);
2227 * irdma_sc_qp_flush_wqes - flush qp's wqe
2229 * @info: dlush information
2230 * @scratch: u64 saved to be used during cqp completion
2231 * @post_sq: flag for cqp db to ring
2233 enum irdma_status_code irdma_sc_qp_flush_wqes(struct irdma_sc_qp *qp,
2234 struct irdma_qp_flush_info *info,
2235 u64 scratch, bool post_sq)
2239 struct irdma_sc_cqp *cqp;
2241 bool flush_sq = false, flush_rq = false;
2243 if (info->rq && !qp->flush_rq)
2245 if (info->sq && !qp->flush_sq)
2247 qp->flush_sq |= flush_sq;
2248 qp->flush_rq |= flush_rq;
2250 if (!flush_sq && !flush_rq) {
2251 ibdev_dbg(to_ibdev(qp->dev),
2252 "CQP: Additional flush request ignored for qp %x\n",
2254 return IRDMA_ERR_FLUSHED_Q;
2257 cqp = qp->pd->dev->cqp;
2258 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
2260 return IRDMA_ERR_RING_FULL;
2262 if (info->userflushcode) {
2264 temp |= FIELD_PREP(IRDMA_CQPSQ_FWQE_RQMNERR,
2265 info->rq_minor_code) |
2266 FIELD_PREP(IRDMA_CQPSQ_FWQE_RQMJERR,
2267 info->rq_major_code);
2269 temp |= FIELD_PREP(IRDMA_CQPSQ_FWQE_SQMNERR,
2270 info->sq_minor_code) |
2271 FIELD_PREP(IRDMA_CQPSQ_FWQE_SQMJERR,
2272 info->sq_major_code);
2274 set_64bit_val(wqe, 16, temp);
2276 temp = (info->generate_ae) ?
2277 info->ae_code | FIELD_PREP(IRDMA_CQPSQ_FWQE_AESOURCE,
2279 set_64bit_val(wqe, 8, temp);
2281 hdr = qp->qp_uk.qp_id |
2282 FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_FLUSH_WQES) |
2283 FIELD_PREP(IRDMA_CQPSQ_FWQE_GENERATE_AE, info->generate_ae) |
2284 FIELD_PREP(IRDMA_CQPSQ_FWQE_USERFLCODE, info->userflushcode) |
2285 FIELD_PREP(IRDMA_CQPSQ_FWQE_FLUSHSQ, flush_sq) |
2286 FIELD_PREP(IRDMA_CQPSQ_FWQE_FLUSHRQ, flush_rq) |
2287 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
2288 dma_wmb(); /* make sure WQE is written before valid bit is set */
2290 set_64bit_val(wqe, 24, hdr);
2292 print_hex_dump_debug("WQE: QP_FLUSH WQE", DUMP_PREFIX_OFFSET, 16, 8,
2293 wqe, IRDMA_CQP_WQE_SIZE * 8, false);
2295 irdma_sc_cqp_post_sq(cqp);
2301 * irdma_sc_gen_ae - generate AE, uses flush WQE CQP OP
2303 * @info: gen ae information
2304 * @scratch: u64 saved to be used during cqp completion
2305 * @post_sq: flag for cqp db to ring
2307 static enum irdma_status_code irdma_sc_gen_ae(struct irdma_sc_qp *qp,
2308 struct irdma_gen_ae_info *info,
2309 u64 scratch, bool post_sq)
2313 struct irdma_sc_cqp *cqp;
2316 cqp = qp->pd->dev->cqp;
2317 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
2319 return IRDMA_ERR_RING_FULL;
2321 temp = info->ae_code | FIELD_PREP(IRDMA_CQPSQ_FWQE_AESOURCE,
2323 set_64bit_val(wqe, 8, temp);
2325 hdr = qp->qp_uk.qp_id | FIELD_PREP(IRDMA_CQPSQ_OPCODE,
2326 IRDMA_CQP_OP_GEN_AE) |
2327 FIELD_PREP(IRDMA_CQPSQ_FWQE_GENERATE_AE, 1) |
2328 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
2329 dma_wmb(); /* make sure WQE is written before valid bit is set */
2331 set_64bit_val(wqe, 24, hdr);
2333 print_hex_dump_debug("WQE: GEN_AE WQE", DUMP_PREFIX_OFFSET, 16, 8,
2334 wqe, IRDMA_CQP_WQE_SIZE * 8, false);
2336 irdma_sc_cqp_post_sq(cqp);
2341 /*** irdma_sc_qp_upload_context - upload qp's context
2342 * @dev: sc device struct
2343 * @info: upload context info ptr for return
2344 * @scratch: u64 saved to be used during cqp completion
2345 * @post_sq: flag for cqp db to ring
2347 static enum irdma_status_code
2348 irdma_sc_qp_upload_context(struct irdma_sc_dev *dev,
2349 struct irdma_upload_context_info *info, u64 scratch,
2353 struct irdma_sc_cqp *cqp;
2357 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
2359 return IRDMA_ERR_RING_FULL;
2361 set_64bit_val(wqe, 16, info->buf_pa);
2363 hdr = FIELD_PREP(IRDMA_CQPSQ_UCTX_QPID, info->qp_id) |
2364 FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_UPLOAD_CONTEXT) |
2365 FIELD_PREP(IRDMA_CQPSQ_UCTX_QPTYPE, info->qp_type) |
2366 FIELD_PREP(IRDMA_CQPSQ_UCTX_RAWFORMAT, info->raw_format) |
2367 FIELD_PREP(IRDMA_CQPSQ_UCTX_FREEZEQP, info->freeze_qp) |
2368 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
2369 dma_wmb(); /* make sure WQE is written before valid bit is set */
2371 set_64bit_val(wqe, 24, hdr);
2373 print_hex_dump_debug("WQE: QP_UPLOAD_CTX WQE", DUMP_PREFIX_OFFSET, 16,
2374 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
2376 irdma_sc_cqp_post_sq(cqp);
2382 * irdma_sc_manage_push_page - Handle push page
2383 * @cqp: struct for cqp hw
2384 * @info: push page info
2385 * @scratch: u64 saved to be used during cqp completion
2386 * @post_sq: flag for cqp db to ring
2388 static enum irdma_status_code
2389 irdma_sc_manage_push_page(struct irdma_sc_cqp *cqp,
2390 struct irdma_cqp_manage_push_page_info *info,
2391 u64 scratch, bool post_sq)
2396 if (info->free_page &&
2397 info->push_idx >= cqp->dev->hw_attrs.max_hw_device_pages)
2398 return IRDMA_ERR_INVALID_PUSH_PAGE_INDEX;
2400 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
2402 return IRDMA_ERR_RING_FULL;
2404 set_64bit_val(wqe, 16, info->qs_handle);
2405 hdr = FIELD_PREP(IRDMA_CQPSQ_MPP_PPIDX, info->push_idx) |
2406 FIELD_PREP(IRDMA_CQPSQ_MPP_PPTYPE, info->push_page_type) |
2407 FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MANAGE_PUSH_PAGES) |
2408 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity) |
2409 FIELD_PREP(IRDMA_CQPSQ_MPP_FREE_PAGE, info->free_page);
2410 dma_wmb(); /* make sure WQE is written before valid bit is set */
2412 set_64bit_val(wqe, 24, hdr);
2414 print_hex_dump_debug("WQE: MANAGE_PUSH_PAGES WQE", DUMP_PREFIX_OFFSET,
2415 16, 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
2417 irdma_sc_cqp_post_sq(cqp);
2423 * irdma_sc_suspend_qp - suspend qp for param change
2424 * @cqp: struct for cqp hw
2426 * @scratch: u64 saved to be used during cqp completion
2428 static enum irdma_status_code irdma_sc_suspend_qp(struct irdma_sc_cqp *cqp,
2429 struct irdma_sc_qp *qp,
2435 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
2437 return IRDMA_ERR_RING_FULL;
2439 hdr = FIELD_PREP(IRDMA_CQPSQ_SUSPENDQP_QPID, qp->qp_uk.qp_id) |
2440 FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_SUSPEND_QP) |
2441 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
2442 dma_wmb(); /* make sure WQE is written before valid bit is set */
2444 set_64bit_val(wqe, 24, hdr);
2446 print_hex_dump_debug("WQE: SUSPEND_QP WQE", DUMP_PREFIX_OFFSET, 16, 8,
2447 wqe, IRDMA_CQP_WQE_SIZE * 8, false);
2448 irdma_sc_cqp_post_sq(cqp);
2454 * irdma_sc_resume_qp - resume qp after suspend
2455 * @cqp: struct for cqp hw
2457 * @scratch: u64 saved to be used during cqp completion
2459 static enum irdma_status_code irdma_sc_resume_qp(struct irdma_sc_cqp *cqp,
2460 struct irdma_sc_qp *qp,
2466 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
2468 return IRDMA_ERR_RING_FULL;
2470 set_64bit_val(wqe, 16,
2471 FIELD_PREP(IRDMA_CQPSQ_RESUMEQP_QSHANDLE, qp->qs_handle));
2473 hdr = FIELD_PREP(IRDMA_CQPSQ_RESUMEQP_QPID, qp->qp_uk.qp_id) |
2474 FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_RESUME_QP) |
2475 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
2476 dma_wmb(); /* make sure WQE is written before valid bit is set */
2478 set_64bit_val(wqe, 24, hdr);
2480 print_hex_dump_debug("WQE: RESUME_QP WQE", DUMP_PREFIX_OFFSET, 16, 8,
2481 wqe, IRDMA_CQP_WQE_SIZE * 8, false);
2482 irdma_sc_cqp_post_sq(cqp);
2488 * irdma_sc_cq_ack - acknowledge completion q
2491 static inline void irdma_sc_cq_ack(struct irdma_sc_cq *cq)
2493 writel(cq->cq_uk.cq_id, cq->cq_uk.cq_ack_db);
2497 * irdma_sc_cq_init - initialize completion q
2499 * @info: cq initialization info
2501 enum irdma_status_code irdma_sc_cq_init(struct irdma_sc_cq *cq,
2502 struct irdma_cq_init_info *info)
2504 enum irdma_status_code ret_code;
2507 pble_obj_cnt = info->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
2508 if (info->virtual_map && info->first_pm_pbl_idx >= pble_obj_cnt)
2509 return IRDMA_ERR_INVALID_PBLE_INDEX;
2511 cq->cq_pa = info->cq_base_pa;
2512 cq->dev = info->dev;
2513 cq->ceq_id = info->ceq_id;
2514 info->cq_uk_init_info.cqe_alloc_db = cq->dev->cq_arm_db;
2515 info->cq_uk_init_info.cq_ack_db = cq->dev->cq_ack_db;
2516 ret_code = irdma_uk_cq_init(&cq->cq_uk, &info->cq_uk_init_info);
2520 cq->virtual_map = info->virtual_map;
2521 cq->pbl_chunk_size = info->pbl_chunk_size;
2522 cq->ceqe_mask = info->ceqe_mask;
2523 cq->cq_type = (info->type) ? info->type : IRDMA_CQ_TYPE_IWARP;
2524 cq->shadow_area_pa = info->shadow_area_pa;
2525 cq->shadow_read_threshold = info->shadow_read_threshold;
2526 cq->ceq_id_valid = info->ceq_id_valid;
2527 cq->tph_en = info->tph_en;
2528 cq->tph_val = info->tph_val;
2529 cq->first_pm_pbl_idx = info->first_pm_pbl_idx;
2530 cq->vsi = info->vsi;
2536 * irdma_sc_cq_create - create completion q
2538 * @scratch: u64 saved to be used during cqp completion
2539 * @check_overflow: flag for overflow check
2540 * @post_sq: flag for cqp db to ring
2542 static enum irdma_status_code irdma_sc_cq_create(struct irdma_sc_cq *cq,
2544 bool check_overflow,
2548 struct irdma_sc_cqp *cqp;
2550 struct irdma_sc_ceq *ceq;
2551 enum irdma_status_code ret_code = 0;
2554 if (cq->cq_uk.cq_id > (cqp->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].max_cnt - 1))
2555 return IRDMA_ERR_INVALID_CQ_ID;
2557 if (cq->ceq_id > (cq->dev->hmc_fpm_misc.max_ceqs - 1))
2558 return IRDMA_ERR_INVALID_CEQ_ID;
2560 ceq = cq->dev->ceq[cq->ceq_id];
2561 if (ceq && ceq->reg_cq)
2562 ret_code = irdma_sc_add_cq_ctx(ceq, cq);
2567 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
2569 if (ceq && ceq->reg_cq)
2570 irdma_sc_remove_cq_ctx(ceq, cq);
2571 return IRDMA_ERR_RING_FULL;
2574 set_64bit_val(wqe, 0, cq->cq_uk.cq_size);
2575 set_64bit_val(wqe, 8, (uintptr_t)cq >> 1);
2576 set_64bit_val(wqe, 16,
2577 FIELD_PREP(IRDMA_CQPSQ_CQ_SHADOW_READ_THRESHOLD, cq->shadow_read_threshold));
2578 set_64bit_val(wqe, 32, (cq->virtual_map ? 0 : cq->cq_pa));
2579 set_64bit_val(wqe, 40, cq->shadow_area_pa);
2580 set_64bit_val(wqe, 48,
2581 FIELD_PREP(IRDMA_CQPSQ_CQ_FIRSTPMPBLIDX, (cq->virtual_map ? cq->first_pm_pbl_idx : 0)));
2582 set_64bit_val(wqe, 56,
2583 FIELD_PREP(IRDMA_CQPSQ_TPHVAL, cq->tph_val) |
2584 FIELD_PREP(IRDMA_CQPSQ_VSIIDX, cq->vsi->vsi_idx));
2586 hdr = FLD_LS_64(cq->dev, cq->cq_uk.cq_id, IRDMA_CQPSQ_CQ_CQID) |
2587 FLD_LS_64(cq->dev, (cq->ceq_id_valid ? cq->ceq_id : 0),
2588 IRDMA_CQPSQ_CQ_CEQID) |
2589 FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_CQ) |
2590 FIELD_PREP(IRDMA_CQPSQ_CQ_LPBLSIZE, cq->pbl_chunk_size) |
2591 FIELD_PREP(IRDMA_CQPSQ_CQ_CHKOVERFLOW, check_overflow) |
2592 FIELD_PREP(IRDMA_CQPSQ_CQ_VIRTMAP, cq->virtual_map) |
2593 FIELD_PREP(IRDMA_CQPSQ_CQ_ENCEQEMASK, cq->ceqe_mask) |
2594 FIELD_PREP(IRDMA_CQPSQ_CQ_CEQIDVALID, cq->ceq_id_valid) |
2595 FIELD_PREP(IRDMA_CQPSQ_TPHEN, cq->tph_en) |
2596 FIELD_PREP(IRDMA_CQPSQ_CQ_AVOIDMEMCNFLCT,
2597 cq->cq_uk.avoid_mem_cflct) |
2598 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
2599 dma_wmb(); /* make sure WQE is written before valid bit is set */
2601 set_64bit_val(wqe, 24, hdr);
2603 print_hex_dump_debug("WQE: CQ_CREATE WQE", DUMP_PREFIX_OFFSET, 16, 8,
2604 wqe, IRDMA_CQP_WQE_SIZE * 8, false);
2606 irdma_sc_cqp_post_sq(cqp);
2612 * irdma_sc_cq_destroy - destroy completion q
2614 * @scratch: u64 saved to be used during cqp completion
2615 * @post_sq: flag for cqp db to ring
2617 enum irdma_status_code irdma_sc_cq_destroy(struct irdma_sc_cq *cq, u64 scratch,
2620 struct irdma_sc_cqp *cqp;
2623 struct irdma_sc_ceq *ceq;
2626 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
2628 return IRDMA_ERR_RING_FULL;
2630 ceq = cq->dev->ceq[cq->ceq_id];
2631 if (ceq && ceq->reg_cq)
2632 irdma_sc_remove_cq_ctx(ceq, cq);
2634 set_64bit_val(wqe, 0, cq->cq_uk.cq_size);
2635 set_64bit_val(wqe, 8, (uintptr_t)cq >> 1);
2636 set_64bit_val(wqe, 40, cq->shadow_area_pa);
2637 set_64bit_val(wqe, 48,
2638 (cq->virtual_map ? cq->first_pm_pbl_idx : 0));
2640 hdr = cq->cq_uk.cq_id |
2641 FLD_LS_64(cq->dev, (cq->ceq_id_valid ? cq->ceq_id : 0),
2642 IRDMA_CQPSQ_CQ_CEQID) |
2643 FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DESTROY_CQ) |
2644 FIELD_PREP(IRDMA_CQPSQ_CQ_LPBLSIZE, cq->pbl_chunk_size) |
2645 FIELD_PREP(IRDMA_CQPSQ_CQ_VIRTMAP, cq->virtual_map) |
2646 FIELD_PREP(IRDMA_CQPSQ_CQ_ENCEQEMASK, cq->ceqe_mask) |
2647 FIELD_PREP(IRDMA_CQPSQ_CQ_CEQIDVALID, cq->ceq_id_valid) |
2648 FIELD_PREP(IRDMA_CQPSQ_TPHEN, cq->tph_en) |
2649 FIELD_PREP(IRDMA_CQPSQ_CQ_AVOIDMEMCNFLCT, cq->cq_uk.avoid_mem_cflct) |
2650 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
2651 dma_wmb(); /* make sure WQE is written before valid bit is set */
2653 set_64bit_val(wqe, 24, hdr);
2655 print_hex_dump_debug("WQE: CQ_DESTROY WQE", DUMP_PREFIX_OFFSET, 16, 8,
2656 wqe, IRDMA_CQP_WQE_SIZE * 8, false);
2658 irdma_sc_cqp_post_sq(cqp);
2664 * irdma_sc_cq_resize - set resized cq buffer info
2666 * @info: resized cq buffer info
2668 void irdma_sc_cq_resize(struct irdma_sc_cq *cq, struct irdma_modify_cq_info *info)
2670 cq->virtual_map = info->virtual_map;
2671 cq->cq_pa = info->cq_pa;
2672 cq->first_pm_pbl_idx = info->first_pm_pbl_idx;
2673 cq->pbl_chunk_size = info->pbl_chunk_size;
2674 irdma_uk_cq_resize(&cq->cq_uk, info->cq_base, info->cq_size);
2678 * irdma_sc_cq_modify - modify a Completion Queue
2680 * @info: modification info struct
2681 * @scratch: u64 saved to be used during cqp completion
2682 * @post_sq: flag to post to sq
2684 static enum irdma_status_code
2685 irdma_sc_cq_modify(struct irdma_sc_cq *cq, struct irdma_modify_cq_info *info,
2686 u64 scratch, bool post_sq)
2688 struct irdma_sc_cqp *cqp;
2693 pble_obj_cnt = cq->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
2694 if (info->cq_resize && info->virtual_map &&
2695 info->first_pm_pbl_idx >= pble_obj_cnt)
2696 return IRDMA_ERR_INVALID_PBLE_INDEX;
2699 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
2701 return IRDMA_ERR_RING_FULL;
2703 set_64bit_val(wqe, 0, info->cq_size);
2704 set_64bit_val(wqe, 8, (uintptr_t)cq >> 1);
2705 set_64bit_val(wqe, 16,
2706 FIELD_PREP(IRDMA_CQPSQ_CQ_SHADOW_READ_THRESHOLD, info->shadow_read_threshold));
2707 set_64bit_val(wqe, 32, info->cq_pa);
2708 set_64bit_val(wqe, 40, cq->shadow_area_pa);
2709 set_64bit_val(wqe, 48, info->first_pm_pbl_idx);
2710 set_64bit_val(wqe, 56,
2711 FIELD_PREP(IRDMA_CQPSQ_TPHVAL, cq->tph_val) |
2712 FIELD_PREP(IRDMA_CQPSQ_VSIIDX, cq->vsi->vsi_idx));
2714 hdr = cq->cq_uk.cq_id |
2715 FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MODIFY_CQ) |
2716 FIELD_PREP(IRDMA_CQPSQ_CQ_CQRESIZE, info->cq_resize) |
2717 FIELD_PREP(IRDMA_CQPSQ_CQ_LPBLSIZE, info->pbl_chunk_size) |
2718 FIELD_PREP(IRDMA_CQPSQ_CQ_CHKOVERFLOW, info->check_overflow) |
2719 FIELD_PREP(IRDMA_CQPSQ_CQ_VIRTMAP, info->virtual_map) |
2720 FIELD_PREP(IRDMA_CQPSQ_CQ_ENCEQEMASK, cq->ceqe_mask) |
2721 FIELD_PREP(IRDMA_CQPSQ_TPHEN, cq->tph_en) |
2722 FIELD_PREP(IRDMA_CQPSQ_CQ_AVOIDMEMCNFLCT,
2723 cq->cq_uk.avoid_mem_cflct) |
2724 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
2725 dma_wmb(); /* make sure WQE is written before valid bit is set */
2727 set_64bit_val(wqe, 24, hdr);
2729 print_hex_dump_debug("WQE: CQ_MODIFY WQE", DUMP_PREFIX_OFFSET, 16, 8,
2730 wqe, IRDMA_CQP_WQE_SIZE * 8, false);
2732 irdma_sc_cqp_post_sq(cqp);
2738 * irdma_check_cqp_progress - check cqp processing progress
2739 * @timeout: timeout info struct
2740 * @dev: sc device struct
2742 void irdma_check_cqp_progress(struct irdma_cqp_timeout *timeout, struct irdma_sc_dev *dev)
2744 if (timeout->compl_cqp_cmds != dev->cqp_cmd_stats[IRDMA_OP_CMPL_CMDS]) {
2745 timeout->compl_cqp_cmds = dev->cqp_cmd_stats[IRDMA_OP_CMPL_CMDS];
2748 if (dev->cqp_cmd_stats[IRDMA_OP_REQ_CMDS] !=
2749 timeout->compl_cqp_cmds)
2755 * irdma_get_cqp_reg_info - get head and tail for cqp using registers
2756 * @cqp: struct for cqp hw
2757 * @val: cqp tail register value
2758 * @tail: wqtail register value
2759 * @error: cqp processing err
2761 static inline void irdma_get_cqp_reg_info(struct irdma_sc_cqp *cqp, u32 *val,
2762 u32 *tail, u32 *error)
2764 *val = readl(cqp->dev->hw_regs[IRDMA_CQPTAIL]);
2765 *tail = FIELD_GET(IRDMA_CQPTAIL_WQTAIL, *val);
2766 *error = FIELD_GET(IRDMA_CQPTAIL_CQP_OP_ERR, *val);
2770 * irdma_cqp_poll_registers - poll cqp registers
2771 * @cqp: struct for cqp hw
2772 * @tail: wqtail register value
2773 * @count: how many times to try for completion
2775 static enum irdma_status_code irdma_cqp_poll_registers(struct irdma_sc_cqp *cqp,
2776 u32 tail, u32 count)
2779 u32 newtail, error, val;
2781 while (i++ < count) {
2782 irdma_get_cqp_reg_info(cqp, &val, &newtail, &error);
2784 error = readl(cqp->dev->hw_regs[IRDMA_CQPERRCODES]);
2785 ibdev_dbg(to_ibdev(cqp->dev),
2786 "CQP: CQPERRCODES error_code[x%08X]\n",
2788 return IRDMA_ERR_CQP_COMPL_ERROR;
2790 if (newtail != tail) {
2792 IRDMA_RING_MOVE_TAIL(cqp->sq_ring);
2793 cqp->dev->cqp_cmd_stats[IRDMA_OP_CMPL_CMDS]++;
2796 udelay(cqp->dev->hw_attrs.max_sleep_count);
2799 return IRDMA_ERR_TIMEOUT;
2803 * irdma_sc_decode_fpm_commit - decode a 64 bit value into count and base
2804 * @dev: sc device struct
2805 * @buf: pointer to commit buffer
2806 * @buf_idx: buffer index
2807 * @obj_info: object info pointer
2808 * @rsrc_idx: indexs of memory resource
2810 static u64 irdma_sc_decode_fpm_commit(struct irdma_sc_dev *dev, __le64 *buf,
2811 u32 buf_idx, struct irdma_hmc_obj_info *obj_info,
2816 get_64bit_val(buf, buf_idx, &temp);
2819 case IRDMA_HMC_IW_QP:
2820 obj_info[rsrc_idx].cnt = (u32)FIELD_GET(IRDMA_COMMIT_FPM_QPCNT, temp);
2822 case IRDMA_HMC_IW_CQ:
2823 obj_info[rsrc_idx].cnt = (u32)FLD_RS_64(dev, temp, IRDMA_COMMIT_FPM_CQCNT);
2825 case IRDMA_HMC_IW_APBVT_ENTRY:
2826 obj_info[rsrc_idx].cnt = 1;
2829 obj_info[rsrc_idx].cnt = (u32)temp;
2833 obj_info[rsrc_idx].base = (temp >> IRDMA_COMMIT_FPM_BASE_S) * 512;
2839 * irdma_sc_parse_fpm_commit_buf - parse fpm commit buffer
2840 * @dev: pointer to dev struct
2841 * @buf: ptr to fpm commit buffer
2842 * @info: ptr to irdma_hmc_obj_info struct
2843 * @sd: number of SDs for HMC objects
2845 * parses fpm commit info and copy base value
2846 * of hmc objects in hmc_info
2848 static enum irdma_status_code
2849 irdma_sc_parse_fpm_commit_buf(struct irdma_sc_dev *dev, __le64 *buf,
2850 struct irdma_hmc_obj_info *info, u32 *sd)
2855 u32 last_hmc_obj = 0;
2857 irdma_sc_decode_fpm_commit(dev, buf, 0, info,
2859 irdma_sc_decode_fpm_commit(dev, buf, 8, info,
2862 irdma_sc_decode_fpm_commit(dev, buf, 24, info,
2864 irdma_sc_decode_fpm_commit(dev, buf, 32, info,
2866 irdma_sc_decode_fpm_commit(dev, buf, 40, info,
2867 IRDMA_HMC_IW_APBVT_ENTRY);
2868 irdma_sc_decode_fpm_commit(dev, buf, 48, info,
2870 irdma_sc_decode_fpm_commit(dev, buf, 56, info,
2872 irdma_sc_decode_fpm_commit(dev, buf, 64, info,
2874 irdma_sc_decode_fpm_commit(dev, buf, 72, info,
2876 irdma_sc_decode_fpm_commit(dev, buf, 80, info,
2878 irdma_sc_decode_fpm_commit(dev, buf, 88, info,
2879 IRDMA_HMC_IW_TIMER);
2880 irdma_sc_decode_fpm_commit(dev, buf, 112, info,
2882 /* skipping RSVD. */
2883 if (dev->hw_attrs.uk_attrs.hw_rev != IRDMA_GEN_1) {
2884 irdma_sc_decode_fpm_commit(dev, buf, 96, info,
2885 IRDMA_HMC_IW_FSIMC);
2886 irdma_sc_decode_fpm_commit(dev, buf, 104, info,
2887 IRDMA_HMC_IW_FSIAV);
2888 irdma_sc_decode_fpm_commit(dev, buf, 128, info,
2890 irdma_sc_decode_fpm_commit(dev, buf, 136, info,
2891 IRDMA_HMC_IW_RRFFL);
2892 irdma_sc_decode_fpm_commit(dev, buf, 144, info,
2894 irdma_sc_decode_fpm_commit(dev, buf, 152, info,
2896 irdma_sc_decode_fpm_commit(dev, buf, 160, info,
2897 IRDMA_HMC_IW_OOISC);
2898 irdma_sc_decode_fpm_commit(dev, buf, 168, info,
2899 IRDMA_HMC_IW_OOISCFFL);
2902 /* searching for the last object in HMC to find the size of the HMC area. */
2903 for (i = IRDMA_HMC_IW_QP; i < IRDMA_HMC_IW_MAX; i++) {
2904 if (info[i].base > max_base) {
2905 max_base = info[i].base;
2910 size = info[last_hmc_obj].cnt * info[last_hmc_obj].size +
2911 info[last_hmc_obj].base;
2913 if (size & 0x1FFFFF)
2914 *sd = (u32)((size >> 21) + 1); /* add 1 for remainder */
2916 *sd = (u32)(size >> 21);
2922 * irdma_sc_decode_fpm_query() - Decode a 64 bit value into max count and size
2923 * @buf: ptr to fpm query buffer
2924 * @buf_idx: index into buf
2925 * @obj_info: ptr to irdma_hmc_obj_info struct
2926 * @rsrc_idx: resource index into info
2928 * Decode a 64 bit value from fpm query buffer into max count and size
2930 static u64 irdma_sc_decode_fpm_query(__le64 *buf, u32 buf_idx,
2931 struct irdma_hmc_obj_info *obj_info,
2937 get_64bit_val(buf, buf_idx, &temp);
2938 obj_info[rsrc_idx].max_cnt = (u32)temp;
2939 size = (u32)(temp >> 32);
2940 obj_info[rsrc_idx].size = BIT_ULL(size);
2946 * irdma_sc_parse_fpm_query_buf() - parses fpm query buffer
2947 * @dev: ptr to shared code device
2948 * @buf: ptr to fpm query buffer
2949 * @hmc_info: ptr to irdma_hmc_obj_info struct
2950 * @hmc_fpm_misc: ptr to fpm data
2952 * parses fpm query buffer and copy max_cnt and
2953 * size value of hmc objects in hmc_info
2955 static enum irdma_status_code
2956 irdma_sc_parse_fpm_query_buf(struct irdma_sc_dev *dev, __le64 *buf,
2957 struct irdma_hmc_info *hmc_info,
2958 struct irdma_hmc_fpm_misc *hmc_fpm_misc)
2960 struct irdma_hmc_obj_info *obj_info;
2965 obj_info = hmc_info->hmc_obj;
2967 get_64bit_val(buf, 0, &temp);
2968 hmc_info->first_sd_index = (u16)FIELD_GET(IRDMA_QUERY_FPM_FIRST_PE_SD_INDEX, temp);
2969 max_pe_sds = (u16)FIELD_GET(IRDMA_QUERY_FPM_MAX_PE_SDS, temp);
2971 hmc_fpm_misc->max_sds = max_pe_sds;
2972 hmc_info->sd_table.sd_cnt = max_pe_sds + hmc_info->first_sd_index;
2973 get_64bit_val(buf, 8, &temp);
2974 obj_info[IRDMA_HMC_IW_QP].max_cnt = (u32)FIELD_GET(IRDMA_QUERY_FPM_MAX_QPS, temp);
2975 size = (u32)(temp >> 32);
2976 obj_info[IRDMA_HMC_IW_QP].size = BIT_ULL(size);
2978 get_64bit_val(buf, 16, &temp);
2979 obj_info[IRDMA_HMC_IW_CQ].max_cnt = (u32)FIELD_GET(IRDMA_QUERY_FPM_MAX_CQS, temp);
2980 size = (u32)(temp >> 32);
2981 obj_info[IRDMA_HMC_IW_CQ].size = BIT_ULL(size);
2983 irdma_sc_decode_fpm_query(buf, 32, obj_info, IRDMA_HMC_IW_HTE);
2984 irdma_sc_decode_fpm_query(buf, 40, obj_info, IRDMA_HMC_IW_ARP);
2986 obj_info[IRDMA_HMC_IW_APBVT_ENTRY].size = 8192;
2987 obj_info[IRDMA_HMC_IW_APBVT_ENTRY].max_cnt = 1;
2989 irdma_sc_decode_fpm_query(buf, 48, obj_info, IRDMA_HMC_IW_MR);
2990 irdma_sc_decode_fpm_query(buf, 56, obj_info, IRDMA_HMC_IW_XF);
2992 get_64bit_val(buf, 64, &temp);
2993 obj_info[IRDMA_HMC_IW_XFFL].max_cnt = (u32)temp;
2994 obj_info[IRDMA_HMC_IW_XFFL].size = 4;
2995 hmc_fpm_misc->xf_block_size = FIELD_GET(IRDMA_QUERY_FPM_XFBLOCKSIZE, temp);
2996 if (!hmc_fpm_misc->xf_block_size)
2997 return IRDMA_ERR_INVALID_SIZE;
2999 irdma_sc_decode_fpm_query(buf, 72, obj_info, IRDMA_HMC_IW_Q1);
3000 get_64bit_val(buf, 80, &temp);
3001 obj_info[IRDMA_HMC_IW_Q1FL].max_cnt = (u32)temp;
3002 obj_info[IRDMA_HMC_IW_Q1FL].size = 4;
3004 hmc_fpm_misc->q1_block_size = FIELD_GET(IRDMA_QUERY_FPM_Q1BLOCKSIZE, temp);
3005 if (!hmc_fpm_misc->q1_block_size)
3006 return IRDMA_ERR_INVALID_SIZE;
3008 irdma_sc_decode_fpm_query(buf, 88, obj_info, IRDMA_HMC_IW_TIMER);
3010 get_64bit_val(buf, 112, &temp);
3011 obj_info[IRDMA_HMC_IW_PBLE].max_cnt = (u32)temp;
3012 obj_info[IRDMA_HMC_IW_PBLE].size = 8;
3014 get_64bit_val(buf, 120, &temp);
3015 hmc_fpm_misc->max_ceqs = FIELD_GET(IRDMA_QUERY_FPM_MAX_CEQS, temp);
3016 hmc_fpm_misc->ht_multiplier = FIELD_GET(IRDMA_QUERY_FPM_HTMULTIPLIER, temp);
3017 hmc_fpm_misc->timer_bucket = FIELD_GET(IRDMA_QUERY_FPM_TIMERBUCKET, temp);
3018 if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
3020 irdma_sc_decode_fpm_query(buf, 96, obj_info, IRDMA_HMC_IW_FSIMC);
3021 irdma_sc_decode_fpm_query(buf, 104, obj_info, IRDMA_HMC_IW_FSIAV);
3022 irdma_sc_decode_fpm_query(buf, 128, obj_info, IRDMA_HMC_IW_RRF);
3024 get_64bit_val(buf, 136, &temp);
3025 obj_info[IRDMA_HMC_IW_RRFFL].max_cnt = (u32)temp;
3026 obj_info[IRDMA_HMC_IW_RRFFL].size = 4;
3027 hmc_fpm_misc->rrf_block_size = FIELD_GET(IRDMA_QUERY_FPM_RRFBLOCKSIZE, temp);
3028 if (!hmc_fpm_misc->rrf_block_size &&
3029 obj_info[IRDMA_HMC_IW_RRFFL].max_cnt)
3030 return IRDMA_ERR_INVALID_SIZE;
3032 irdma_sc_decode_fpm_query(buf, 144, obj_info, IRDMA_HMC_IW_HDR);
3033 irdma_sc_decode_fpm_query(buf, 152, obj_info, IRDMA_HMC_IW_MD);
3034 irdma_sc_decode_fpm_query(buf, 160, obj_info, IRDMA_HMC_IW_OOISC);
3036 get_64bit_val(buf, 168, &temp);
3037 obj_info[IRDMA_HMC_IW_OOISCFFL].max_cnt = (u32)temp;
3038 obj_info[IRDMA_HMC_IW_OOISCFFL].size = 4;
3039 hmc_fpm_misc->ooiscf_block_size = FIELD_GET(IRDMA_QUERY_FPM_OOISCFBLOCKSIZE, temp);
3040 if (!hmc_fpm_misc->ooiscf_block_size &&
3041 obj_info[IRDMA_HMC_IW_OOISCFFL].max_cnt)
3042 return IRDMA_ERR_INVALID_SIZE;
3048 * irdma_sc_find_reg_cq - find cq ctx index
3049 * @ceq: ceq sc structure
3050 * @cq: cq sc structure
3052 static u32 irdma_sc_find_reg_cq(struct irdma_sc_ceq *ceq,
3053 struct irdma_sc_cq *cq)
3057 for (i = 0; i < ceq->reg_cq_size; i++) {
3058 if (cq == ceq->reg_cq[i])
3062 return IRDMA_INVALID_CQ_IDX;
3066 * irdma_sc_add_cq_ctx - add cq ctx tracking for ceq
3067 * @ceq: ceq sc structure
3068 * @cq: cq sc structure
3070 enum irdma_status_code irdma_sc_add_cq_ctx(struct irdma_sc_ceq *ceq,
3071 struct irdma_sc_cq *cq)
3073 unsigned long flags;
3075 spin_lock_irqsave(&ceq->req_cq_lock, flags);
3077 if (ceq->reg_cq_size == ceq->elem_cnt) {
3078 spin_unlock_irqrestore(&ceq->req_cq_lock, flags);
3079 return IRDMA_ERR_REG_CQ_FULL;
3082 ceq->reg_cq[ceq->reg_cq_size++] = cq;
3084 spin_unlock_irqrestore(&ceq->req_cq_lock, flags);
3090 * irdma_sc_remove_cq_ctx - remove cq ctx tracking for ceq
3091 * @ceq: ceq sc structure
3092 * @cq: cq sc structure
3094 void irdma_sc_remove_cq_ctx(struct irdma_sc_ceq *ceq, struct irdma_sc_cq *cq)
3096 unsigned long flags;
3099 spin_lock_irqsave(&ceq->req_cq_lock, flags);
3100 cq_ctx_idx = irdma_sc_find_reg_cq(ceq, cq);
3101 if (cq_ctx_idx == IRDMA_INVALID_CQ_IDX)
3105 if (cq_ctx_idx != ceq->reg_cq_size)
3106 ceq->reg_cq[cq_ctx_idx] = ceq->reg_cq[ceq->reg_cq_size];
3107 ceq->reg_cq[ceq->reg_cq_size] = NULL;
3110 spin_unlock_irqrestore(&ceq->req_cq_lock, flags);
3114 * irdma_sc_cqp_init - Initialize buffers for a control Queue Pair
3115 * @cqp: IWARP control queue pair pointer
3116 * @info: IWARP control queue pair init info pointer
3118 * Initializes the object and context buffers for a control Queue Pair.
3120 enum irdma_status_code irdma_sc_cqp_init(struct irdma_sc_cqp *cqp,
3121 struct irdma_cqp_init_info *info)
3125 if (info->sq_size > IRDMA_CQP_SW_SQSIZE_2048 ||
3126 info->sq_size < IRDMA_CQP_SW_SQSIZE_4 ||
3127 ((info->sq_size & (info->sq_size - 1))))
3128 return IRDMA_ERR_INVALID_SIZE;
3130 hw_sq_size = irdma_get_encoded_wqe_size(info->sq_size,
3131 IRDMA_QUEUE_TYPE_CQP);
3132 cqp->size = sizeof(*cqp);
3133 cqp->sq_size = info->sq_size;
3134 cqp->hw_sq_size = hw_sq_size;
3135 cqp->sq_base = info->sq;
3136 cqp->host_ctx = info->host_ctx;
3137 cqp->sq_pa = info->sq_pa;
3138 cqp->host_ctx_pa = info->host_ctx_pa;
3139 cqp->dev = info->dev;
3140 cqp->struct_ver = info->struct_ver;
3141 cqp->hw_maj_ver = info->hw_maj_ver;
3142 cqp->hw_min_ver = info->hw_min_ver;
3143 cqp->scratch_array = info->scratch_array;
3145 cqp->en_datacenter_tcp = info->en_datacenter_tcp;
3146 cqp->ena_vf_count = info->ena_vf_count;
3147 cqp->hmc_profile = info->hmc_profile;
3148 cqp->ceqs_per_vf = info->ceqs_per_vf;
3149 cqp->disable_packed = info->disable_packed;
3150 cqp->rocev2_rto_policy = info->rocev2_rto_policy;
3151 cqp->protocol_used = info->protocol_used;
3152 memcpy(&cqp->dcqcn_params, &info->dcqcn_params, sizeof(cqp->dcqcn_params));
3153 info->dev->cqp = cqp;
3155 IRDMA_RING_INIT(cqp->sq_ring, cqp->sq_size);
3156 cqp->dev->cqp_cmd_stats[IRDMA_OP_REQ_CMDS] = 0;
3157 cqp->dev->cqp_cmd_stats[IRDMA_OP_CMPL_CMDS] = 0;
3158 /* for the cqp commands backlog. */
3159 INIT_LIST_HEAD(&cqp->dev->cqp_cmd_head);
3161 writel(0, cqp->dev->hw_regs[IRDMA_CQPTAIL]);
3162 writel(0, cqp->dev->hw_regs[IRDMA_CQPDB]);
3163 writel(0, cqp->dev->hw_regs[IRDMA_CCQPSTATUS]);
3165 ibdev_dbg(to_ibdev(cqp->dev),
3166 "WQE: sq_size[%04d] hw_sq_size[%04d] sq_base[%p] sq_pa[%pK] cqp[%p] polarity[x%04x]\n",
3167 cqp->sq_size, cqp->hw_sq_size, cqp->sq_base,
3168 (u64 *)(uintptr_t)cqp->sq_pa, cqp, cqp->polarity);
3173 * irdma_sc_cqp_create - create cqp during bringup
3174 * @cqp: struct for cqp hw
3175 * @maj_err: If error, major err number
3176 * @min_err: If error, minor err number
3178 enum irdma_status_code irdma_sc_cqp_create(struct irdma_sc_cqp *cqp, u16 *maj_err,
3183 u32 cnt = 0, p1, p2, val = 0, err_code;
3184 enum irdma_status_code ret_code;
3186 hw_rev = cqp->dev->hw_attrs.uk_attrs.hw_rev;
3187 cqp->sdbuf.size = ALIGN(IRDMA_UPDATE_SD_BUFF_SIZE * cqp->sq_size,
3188 IRDMA_SD_BUF_ALIGNMENT);
3189 cqp->sdbuf.va = dma_alloc_coherent(cqp->dev->hw->device,
3190 cqp->sdbuf.size, &cqp->sdbuf.pa,
3193 return IRDMA_ERR_NO_MEMORY;
3195 spin_lock_init(&cqp->dev->cqp_lock);
3197 temp = FIELD_PREP(IRDMA_CQPHC_SQSIZE, cqp->hw_sq_size) |
3198 FIELD_PREP(IRDMA_CQPHC_SVER, cqp->struct_ver) |
3199 FIELD_PREP(IRDMA_CQPHC_DISABLE_PFPDUS, cqp->disable_packed) |
3200 FIELD_PREP(IRDMA_CQPHC_CEQPERVF, cqp->ceqs_per_vf);
3201 if (hw_rev >= IRDMA_GEN_2) {
3202 temp |= FIELD_PREP(IRDMA_CQPHC_ROCEV2_RTO_POLICY,
3203 cqp->rocev2_rto_policy) |
3204 FIELD_PREP(IRDMA_CQPHC_PROTOCOL_USED,
3205 cqp->protocol_used);
3208 set_64bit_val(cqp->host_ctx, 0, temp);
3209 set_64bit_val(cqp->host_ctx, 8, cqp->sq_pa);
3211 temp = FIELD_PREP(IRDMA_CQPHC_ENABLED_VFS, cqp->ena_vf_count) |
3212 FIELD_PREP(IRDMA_CQPHC_HMC_PROFILE, cqp->hmc_profile);
3213 set_64bit_val(cqp->host_ctx, 16, temp);
3214 set_64bit_val(cqp->host_ctx, 24, (uintptr_t)cqp);
3215 temp = FIELD_PREP(IRDMA_CQPHC_HW_MAJVER, cqp->hw_maj_ver) |
3216 FIELD_PREP(IRDMA_CQPHC_HW_MINVER, cqp->hw_min_ver);
3217 if (hw_rev >= IRDMA_GEN_2) {
3218 temp |= FIELD_PREP(IRDMA_CQPHC_MIN_RATE, cqp->dcqcn_params.min_rate) |
3219 FIELD_PREP(IRDMA_CQPHC_MIN_DEC_FACTOR, cqp->dcqcn_params.min_dec_factor);
3221 set_64bit_val(cqp->host_ctx, 32, temp);
3222 set_64bit_val(cqp->host_ctx, 40, 0);
3224 if (hw_rev >= IRDMA_GEN_2) {
3225 temp |= FIELD_PREP(IRDMA_CQPHC_DCQCN_T, cqp->dcqcn_params.dcqcn_t) |
3226 FIELD_PREP(IRDMA_CQPHC_RAI_FACTOR, cqp->dcqcn_params.rai_factor) |
3227 FIELD_PREP(IRDMA_CQPHC_HAI_FACTOR, cqp->dcqcn_params.hai_factor);
3229 set_64bit_val(cqp->host_ctx, 48, temp);
3231 if (hw_rev >= IRDMA_GEN_2) {
3232 temp |= FIELD_PREP(IRDMA_CQPHC_DCQCN_B, cqp->dcqcn_params.dcqcn_b) |
3233 FIELD_PREP(IRDMA_CQPHC_DCQCN_F, cqp->dcqcn_params.dcqcn_f) |
3234 FIELD_PREP(IRDMA_CQPHC_CC_CFG_VALID, cqp->dcqcn_params.cc_cfg_valid) |
3235 FIELD_PREP(IRDMA_CQPHC_RREDUCE_MPERIOD, cqp->dcqcn_params.rreduce_mperiod);
3237 set_64bit_val(cqp->host_ctx, 56, temp);
3238 print_hex_dump_debug("WQE: CQP_HOST_CTX WQE", DUMP_PREFIX_OFFSET, 16,
3239 8, cqp->host_ctx, IRDMA_CQP_CTX_SIZE * 8, false);
3240 p1 = cqp->host_ctx_pa >> 32;
3241 p2 = (u32)cqp->host_ctx_pa;
3243 writel(p1, cqp->dev->hw_regs[IRDMA_CCQPHIGH]);
3244 writel(p2, cqp->dev->hw_regs[IRDMA_CCQPLOW]);
3247 if (cnt++ > cqp->dev->hw_attrs.max_done_count) {
3248 ret_code = IRDMA_ERR_TIMEOUT;
3251 udelay(cqp->dev->hw_attrs.max_sleep_count);
3252 val = readl(cqp->dev->hw_regs[IRDMA_CCQPSTATUS]);
3255 if (FLD_RS_32(cqp->dev, val, IRDMA_CCQPSTATUS_CCQP_ERR)) {
3256 ret_code = IRDMA_ERR_DEVICE_NOT_SUPPORTED;
3260 cqp->process_cqp_sds = irdma_update_sds_noccq;
3264 dma_free_coherent(cqp->dev->hw->device, cqp->sdbuf.size,
3265 cqp->sdbuf.va, cqp->sdbuf.pa);
3266 cqp->sdbuf.va = NULL;
3267 err_code = readl(cqp->dev->hw_regs[IRDMA_CQPERRCODES]);
3268 *min_err = FIELD_GET(IRDMA_CQPERRCODES_CQP_MINOR_CODE, err_code);
3269 *maj_err = FIELD_GET(IRDMA_CQPERRCODES_CQP_MAJOR_CODE, err_code);
3274 * irdma_sc_cqp_post_sq - post of cqp's sq
3275 * @cqp: struct for cqp hw
3277 void irdma_sc_cqp_post_sq(struct irdma_sc_cqp *cqp)
3279 writel(IRDMA_RING_CURRENT_HEAD(cqp->sq_ring), cqp->dev->cqp_db);
3281 ibdev_dbg(to_ibdev(cqp->dev),
3282 "WQE: CQP SQ head 0x%x tail 0x%x size 0x%x\n",
3283 cqp->sq_ring.head, cqp->sq_ring.tail, cqp->sq_ring.size);
3287 * irdma_sc_cqp_get_next_send_wqe_idx - get next wqe on cqp sq
3288 * and pass back index
3289 * @cqp: CQP HW structure
3290 * @scratch: private data for CQP WQE
3291 * @wqe_idx: WQE index of CQP SQ
3293 __le64 *irdma_sc_cqp_get_next_send_wqe_idx(struct irdma_sc_cqp *cqp, u64 scratch,
3297 enum irdma_status_code ret_code;
3299 if (IRDMA_RING_FULL_ERR(cqp->sq_ring)) {
3300 ibdev_dbg(to_ibdev(cqp->dev),
3301 "WQE: CQP SQ is full, head 0x%x tail 0x%x size 0x%x\n",
3302 cqp->sq_ring.head, cqp->sq_ring.tail,
3306 IRDMA_ATOMIC_RING_MOVE_HEAD(cqp->sq_ring, *wqe_idx, ret_code);
3310 cqp->dev->cqp_cmd_stats[IRDMA_OP_REQ_CMDS]++;
3312 cqp->polarity = !cqp->polarity;
3313 wqe = cqp->sq_base[*wqe_idx].elem;
3314 cqp->scratch_array[*wqe_idx] = scratch;
3315 IRDMA_CQP_INIT_WQE(wqe);
3321 * irdma_sc_cqp_destroy - destroy cqp during close
3322 * @cqp: struct for cqp hw
3324 enum irdma_status_code irdma_sc_cqp_destroy(struct irdma_sc_cqp *cqp)
3327 enum irdma_status_code ret_code = 0;
3329 writel(0, cqp->dev->hw_regs[IRDMA_CCQPHIGH]);
3330 writel(0, cqp->dev->hw_regs[IRDMA_CCQPLOW]);
3332 if (cnt++ > cqp->dev->hw_attrs.max_done_count) {
3333 ret_code = IRDMA_ERR_TIMEOUT;
3336 udelay(cqp->dev->hw_attrs.max_sleep_count);
3337 val = readl(cqp->dev->hw_regs[IRDMA_CCQPSTATUS]);
3338 } while (FLD_RS_32(cqp->dev, val, IRDMA_CCQPSTATUS_CCQP_DONE));
3340 dma_free_coherent(cqp->dev->hw->device, cqp->sdbuf.size,
3341 cqp->sdbuf.va, cqp->sdbuf.pa);
3342 cqp->sdbuf.va = NULL;
3347 * irdma_sc_ccq_arm - enable intr for control cq
3348 * @ccq: ccq sc struct
3350 void irdma_sc_ccq_arm(struct irdma_sc_cq *ccq)
3357 get_64bit_val(ccq->cq_uk.shadow_area, 32, &temp_val);
3358 sw_cq_sel = (u16)FIELD_GET(IRDMA_CQ_DBSA_SW_CQ_SELECT, temp_val);
3359 arm_next_se = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT_SE, temp_val);
3360 arm_seq_num = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_SEQ_NUM, temp_val);
3362 temp_val = FIELD_PREP(IRDMA_CQ_DBSA_ARM_SEQ_NUM, arm_seq_num) |
3363 FIELD_PREP(IRDMA_CQ_DBSA_SW_CQ_SELECT, sw_cq_sel) |
3364 FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT_SE, arm_next_se) |
3365 FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT, 1);
3366 set_64bit_val(ccq->cq_uk.shadow_area, 32, temp_val);
3368 dma_wmb(); /* make sure shadow area is updated before arming */
3370 writel(ccq->cq_uk.cq_id, ccq->dev->cq_arm_db);
3374 * irdma_sc_ccq_get_cqe_info - get ccq's cq entry
3375 * @ccq: ccq sc struct
3376 * @info: completion q entry to return
3378 enum irdma_status_code irdma_sc_ccq_get_cqe_info(struct irdma_sc_cq *ccq,
3379 struct irdma_ccq_cqe_info *info)
3381 u64 qp_ctx, temp, temp1;
3383 struct irdma_sc_cqp *cqp;
3387 enum irdma_status_code ret_code = 0;
3389 if (ccq->cq_uk.avoid_mem_cflct)
3390 cqe = IRDMA_GET_CURRENT_EXTENDED_CQ_ELEM(&ccq->cq_uk);
3392 cqe = IRDMA_GET_CURRENT_CQ_ELEM(&ccq->cq_uk);
3394 get_64bit_val(cqe, 24, &temp);
3395 polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, temp);
3396 if (polarity != ccq->cq_uk.polarity)
3397 return IRDMA_ERR_Q_EMPTY;
3399 get_64bit_val(cqe, 8, &qp_ctx);
3400 cqp = (struct irdma_sc_cqp *)(unsigned long)qp_ctx;
3401 info->error = (bool)FIELD_GET(IRDMA_CQ_ERROR, temp);
3402 info->maj_err_code = IRDMA_CQPSQ_MAJ_NO_ERROR;
3403 info->min_err_code = (u16)FIELD_GET(IRDMA_CQ_MINERR, temp);
3405 info->maj_err_code = (u16)FIELD_GET(IRDMA_CQ_MAJERR, temp);
3406 error = readl(cqp->dev->hw_regs[IRDMA_CQPERRCODES]);
3407 ibdev_dbg(to_ibdev(cqp->dev),
3408 "CQP: CQPERRCODES error_code[x%08X]\n", error);
3411 wqe_idx = (u32)FIELD_GET(IRDMA_CQ_WQEIDX, temp);
3412 info->scratch = cqp->scratch_array[wqe_idx];
3414 get_64bit_val(cqe, 16, &temp1);
3415 info->op_ret_val = (u32)FIELD_GET(IRDMA_CCQ_OPRETVAL, temp1);
3416 get_64bit_val(cqp->sq_base[wqe_idx].elem, 24, &temp1);
3417 info->op_code = (u8)FIELD_GET(IRDMA_CQPSQ_OPCODE, temp1);
3420 /* move the head for cq */
3421 IRDMA_RING_MOVE_HEAD(ccq->cq_uk.cq_ring, ret_code);
3422 if (!IRDMA_RING_CURRENT_HEAD(ccq->cq_uk.cq_ring))
3423 ccq->cq_uk.polarity ^= 1;
3425 /* update cq tail in cq shadow memory also */
3426 IRDMA_RING_MOVE_TAIL(ccq->cq_uk.cq_ring);
3427 set_64bit_val(ccq->cq_uk.shadow_area, 0,
3428 IRDMA_RING_CURRENT_HEAD(ccq->cq_uk.cq_ring));
3430 dma_wmb(); /* make sure shadow area is updated before moving tail */
3432 IRDMA_RING_MOVE_TAIL(cqp->sq_ring);
3433 ccq->dev->cqp_cmd_stats[IRDMA_OP_CMPL_CMDS]++;
3439 * irdma_sc_poll_for_cqp_op_done - Waits for last write to complete in CQP SQ
3440 * @cqp: struct for cqp hw
3441 * @op_code: cqp opcode for completion
3442 * @compl_info: completion q entry to return
3444 enum irdma_status_code irdma_sc_poll_for_cqp_op_done(struct irdma_sc_cqp *cqp, u8 op_code,
3445 struct irdma_ccq_cqe_info *compl_info)
3447 struct irdma_ccq_cqe_info info = {};
3448 struct irdma_sc_cq *ccq;
3449 enum irdma_status_code ret_code = 0;
3452 ccq = cqp->dev->ccq;
3454 if (cnt++ > 100 * cqp->dev->hw_attrs.max_done_count)
3455 return IRDMA_ERR_TIMEOUT;
3457 if (irdma_sc_ccq_get_cqe_info(ccq, &info)) {
3458 udelay(cqp->dev->hw_attrs.max_sleep_count);
3461 if (info.error && info.op_code != IRDMA_CQP_OP_QUERY_STAG) {
3462 ret_code = IRDMA_ERR_CQP_COMPL_ERROR;
3465 /* make sure op code matches*/
3466 if (op_code == info.op_code)
3468 ibdev_dbg(to_ibdev(cqp->dev),
3469 "WQE: opcode mismatch for my op code 0x%x, returned opcode %x\n",
3470 op_code, info.op_code);
3474 memcpy(compl_info, &info, sizeof(*compl_info));
3480 * irdma_sc_manage_hmc_pm_func_table - manage of function table
3481 * @cqp: struct for cqp hw
3482 * @scratch: u64 saved to be used during cqp completion
3483 * @info: info for the manage function table operation
3484 * @post_sq: flag for cqp db to ring
3486 static enum irdma_status_code
3487 irdma_sc_manage_hmc_pm_func_table(struct irdma_sc_cqp *cqp,
3488 struct irdma_hmc_fcn_info *info,
3489 u64 scratch, bool post_sq)
3494 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
3496 return IRDMA_ERR_RING_FULL;
3498 set_64bit_val(wqe, 0, 0);
3499 set_64bit_val(wqe, 8, 0);
3500 set_64bit_val(wqe, 16, 0);
3501 set_64bit_val(wqe, 32, 0);
3502 set_64bit_val(wqe, 40, 0);
3503 set_64bit_val(wqe, 48, 0);
3504 set_64bit_val(wqe, 56, 0);
3506 hdr = FIELD_PREP(IRDMA_CQPSQ_MHMC_VFIDX, info->vf_id) |
3507 FIELD_PREP(IRDMA_CQPSQ_OPCODE,
3508 IRDMA_CQP_OP_MANAGE_HMC_PM_FUNC_TABLE) |
3509 FIELD_PREP(IRDMA_CQPSQ_MHMC_FREEPMFN, info->free_fcn) |
3510 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
3511 dma_wmb(); /* make sure WQE is written before valid bit is set */
3513 set_64bit_val(wqe, 24, hdr);
3515 print_hex_dump_debug("WQE: MANAGE_HMC_PM_FUNC_TABLE WQE",
3516 DUMP_PREFIX_OFFSET, 16, 8, wqe,
3517 IRDMA_CQP_WQE_SIZE * 8, false);
3519 irdma_sc_cqp_post_sq(cqp);
3525 * irdma_sc_commit_fpm_val_done - wait for cqp eqe completion
3527 * @cqp: struct for cqp hw
3529 static enum irdma_status_code
3530 irdma_sc_commit_fpm_val_done(struct irdma_sc_cqp *cqp)
3532 return irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_COMMIT_FPM_VAL,
3537 * irdma_sc_commit_fpm_val - cqp wqe for commit fpm values
3538 * @cqp: struct for cqp hw
3539 * @scratch: u64 saved to be used during cqp completion
3540 * @hmc_fn_id: hmc function id
3541 * @commit_fpm_mem: Memory for fpm values
3542 * @post_sq: flag for cqp db to ring
3543 * @wait_type: poll ccq or cqp registers for cqp completion
3545 static enum irdma_status_code
3546 irdma_sc_commit_fpm_val(struct irdma_sc_cqp *cqp, u64 scratch, u8 hmc_fn_id,
3547 struct irdma_dma_mem *commit_fpm_mem, bool post_sq,
3552 u32 tail, val, error;
3553 enum irdma_status_code ret_code = 0;
3555 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
3557 return IRDMA_ERR_RING_FULL;
3559 set_64bit_val(wqe, 16, hmc_fn_id);
3560 set_64bit_val(wqe, 32, commit_fpm_mem->pa);
3562 hdr = FIELD_PREP(IRDMA_CQPSQ_BUFSIZE, IRDMA_COMMIT_FPM_BUF_SIZE) |
3563 FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_COMMIT_FPM_VAL) |
3564 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
3566 dma_wmb(); /* make sure WQE is written before valid bit is set */
3568 set_64bit_val(wqe, 24, hdr);
3570 print_hex_dump_debug("WQE: COMMIT_FPM_VAL WQE", DUMP_PREFIX_OFFSET,
3571 16, 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
3572 irdma_get_cqp_reg_info(cqp, &val, &tail, &error);
3575 irdma_sc_cqp_post_sq(cqp);
3576 if (wait_type == IRDMA_CQP_WAIT_POLL_REGS)
3577 ret_code = irdma_cqp_poll_registers(cqp, tail,
3578 cqp->dev->hw_attrs.max_done_count);
3579 else if (wait_type == IRDMA_CQP_WAIT_POLL_CQ)
3580 ret_code = irdma_sc_commit_fpm_val_done(cqp);
3587 * irdma_sc_query_fpm_val_done - poll for cqp wqe completion for
3589 * @cqp: struct for cqp hw
3591 static enum irdma_status_code
3592 irdma_sc_query_fpm_val_done(struct irdma_sc_cqp *cqp)
3594 return irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_QUERY_FPM_VAL,
3599 * irdma_sc_query_fpm_val - cqp wqe query fpm values
3600 * @cqp: struct for cqp hw
3601 * @scratch: u64 saved to be used during cqp completion
3602 * @hmc_fn_id: hmc function id
3603 * @query_fpm_mem: memory for return fpm values
3604 * @post_sq: flag for cqp db to ring
3605 * @wait_type: poll ccq or cqp registers for cqp completion
3607 static enum irdma_status_code
3608 irdma_sc_query_fpm_val(struct irdma_sc_cqp *cqp, u64 scratch, u8 hmc_fn_id,
3609 struct irdma_dma_mem *query_fpm_mem, bool post_sq,
3614 u32 tail, val, error;
3615 enum irdma_status_code ret_code = 0;
3617 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
3619 return IRDMA_ERR_RING_FULL;
3621 set_64bit_val(wqe, 16, hmc_fn_id);
3622 set_64bit_val(wqe, 32, query_fpm_mem->pa);
3624 hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_QUERY_FPM_VAL) |
3625 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
3626 dma_wmb(); /* make sure WQE is written before valid bit is set */
3628 set_64bit_val(wqe, 24, hdr);
3630 print_hex_dump_debug("WQE: QUERY_FPM WQE", DUMP_PREFIX_OFFSET, 16, 8,
3631 wqe, IRDMA_CQP_WQE_SIZE * 8, false);
3632 irdma_get_cqp_reg_info(cqp, &val, &tail, &error);
3635 irdma_sc_cqp_post_sq(cqp);
3636 if (wait_type == IRDMA_CQP_WAIT_POLL_REGS)
3637 ret_code = irdma_cqp_poll_registers(cqp, tail,
3638 cqp->dev->hw_attrs.max_done_count);
3639 else if (wait_type == IRDMA_CQP_WAIT_POLL_CQ)
3640 ret_code = irdma_sc_query_fpm_val_done(cqp);
3647 * irdma_sc_ceq_init - initialize ceq
3648 * @ceq: ceq sc structure
3649 * @info: ceq initialization info
3651 enum irdma_status_code irdma_sc_ceq_init(struct irdma_sc_ceq *ceq,
3652 struct irdma_ceq_init_info *info)
3656 if (info->elem_cnt < info->dev->hw_attrs.min_hw_ceq_size ||
3657 info->elem_cnt > info->dev->hw_attrs.max_hw_ceq_size)
3658 return IRDMA_ERR_INVALID_SIZE;
3660 if (info->ceq_id > (info->dev->hmc_fpm_misc.max_ceqs - 1))
3661 return IRDMA_ERR_INVALID_CEQ_ID;
3662 pble_obj_cnt = info->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
3664 if (info->virtual_map && info->first_pm_pbl_idx >= pble_obj_cnt)
3665 return IRDMA_ERR_INVALID_PBLE_INDEX;
3667 ceq->size = sizeof(*ceq);
3668 ceq->ceqe_base = (struct irdma_ceqe *)info->ceqe_base;
3669 ceq->ceq_id = info->ceq_id;
3670 ceq->dev = info->dev;
3671 ceq->elem_cnt = info->elem_cnt;
3672 ceq->ceq_elem_pa = info->ceqe_pa;
3673 ceq->virtual_map = info->virtual_map;
3674 ceq->itr_no_expire = info->itr_no_expire;
3675 ceq->reg_cq = info->reg_cq;
3676 ceq->reg_cq_size = 0;
3677 spin_lock_init(&ceq->req_cq_lock);
3678 ceq->pbl_chunk_size = (ceq->virtual_map ? info->pbl_chunk_size : 0);
3679 ceq->first_pm_pbl_idx = (ceq->virtual_map ? info->first_pm_pbl_idx : 0);
3680 ceq->pbl_list = (ceq->virtual_map ? info->pbl_list : NULL);
3681 ceq->tph_en = info->tph_en;
3682 ceq->tph_val = info->tph_val;
3683 ceq->vsi = info->vsi;
3685 IRDMA_RING_INIT(ceq->ceq_ring, ceq->elem_cnt);
3686 ceq->dev->ceq[info->ceq_id] = ceq;
3692 * irdma_sc_ceq_create - create ceq wqe
3693 * @ceq: ceq sc structure
3694 * @scratch: u64 saved to be used during cqp completion
3695 * @post_sq: flag for cqp db to ring
3698 static enum irdma_status_code irdma_sc_ceq_create(struct irdma_sc_ceq *ceq, u64 scratch,
3701 struct irdma_sc_cqp *cqp;
3705 cqp = ceq->dev->cqp;
3706 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
3708 return IRDMA_ERR_RING_FULL;
3709 set_64bit_val(wqe, 16, ceq->elem_cnt);
3710 set_64bit_val(wqe, 32,
3711 (ceq->virtual_map ? 0 : ceq->ceq_elem_pa));
3712 set_64bit_val(wqe, 48,
3713 (ceq->virtual_map ? ceq->first_pm_pbl_idx : 0));
3714 set_64bit_val(wqe, 56,
3715 FIELD_PREP(IRDMA_CQPSQ_TPHVAL, ceq->tph_val) |
3716 FIELD_PREP(IRDMA_CQPSQ_VSIIDX, ceq->vsi->vsi_idx));
3717 hdr = FIELD_PREP(IRDMA_CQPSQ_CEQ_CEQID, ceq->ceq_id) |
3718 FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_CEQ) |
3719 FIELD_PREP(IRDMA_CQPSQ_CEQ_LPBLSIZE, ceq->pbl_chunk_size) |
3720 FIELD_PREP(IRDMA_CQPSQ_CEQ_VMAP, ceq->virtual_map) |
3721 FIELD_PREP(IRDMA_CQPSQ_CEQ_ITRNOEXPIRE, ceq->itr_no_expire) |
3722 FIELD_PREP(IRDMA_CQPSQ_TPHEN, ceq->tph_en) |
3723 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
3724 dma_wmb(); /* make sure WQE is written before valid bit is set */
3726 set_64bit_val(wqe, 24, hdr);
3728 print_hex_dump_debug("WQE: CEQ_CREATE WQE", DUMP_PREFIX_OFFSET, 16, 8,
3729 wqe, IRDMA_CQP_WQE_SIZE * 8, false);
3731 irdma_sc_cqp_post_sq(cqp);
3737 * irdma_sc_cceq_create_done - poll for control ceq wqe to complete
3738 * @ceq: ceq sc structure
3740 static enum irdma_status_code
3741 irdma_sc_cceq_create_done(struct irdma_sc_ceq *ceq)
3743 struct irdma_sc_cqp *cqp;
3745 cqp = ceq->dev->cqp;
3746 return irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_CREATE_CEQ,
3751 * irdma_sc_cceq_destroy_done - poll for destroy cceq to complete
3752 * @ceq: ceq sc structure
3754 enum irdma_status_code irdma_sc_cceq_destroy_done(struct irdma_sc_ceq *ceq)
3756 struct irdma_sc_cqp *cqp;
3759 irdma_sc_remove_cq_ctx(ceq, ceq->dev->ccq);
3761 cqp = ceq->dev->cqp;
3762 cqp->process_cqp_sds = irdma_update_sds_noccq;
3764 return irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_DESTROY_CEQ,
3769 * irdma_sc_cceq_create - create cceq
3770 * @ceq: ceq sc structure
3771 * @scratch: u64 saved to be used during cqp completion
3773 enum irdma_status_code irdma_sc_cceq_create(struct irdma_sc_ceq *ceq, u64 scratch)
3775 enum irdma_status_code ret_code;
3776 struct irdma_sc_dev *dev = ceq->dev;
3778 dev->ccq->vsi = ceq->vsi;
3780 ret_code = irdma_sc_add_cq_ctx(ceq, ceq->dev->ccq);
3785 ret_code = irdma_sc_ceq_create(ceq, scratch, true);
3787 return irdma_sc_cceq_create_done(ceq);
3793 * irdma_sc_ceq_destroy - destroy ceq
3794 * @ceq: ceq sc structure
3795 * @scratch: u64 saved to be used during cqp completion
3796 * @post_sq: flag for cqp db to ring
3798 enum irdma_status_code irdma_sc_ceq_destroy(struct irdma_sc_ceq *ceq, u64 scratch,
3801 struct irdma_sc_cqp *cqp;
3805 cqp = ceq->dev->cqp;
3806 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
3808 return IRDMA_ERR_RING_FULL;
3810 set_64bit_val(wqe, 16, ceq->elem_cnt);
3811 set_64bit_val(wqe, 48, ceq->first_pm_pbl_idx);
3813 FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DESTROY_CEQ) |
3814 FIELD_PREP(IRDMA_CQPSQ_CEQ_LPBLSIZE, ceq->pbl_chunk_size) |
3815 FIELD_PREP(IRDMA_CQPSQ_CEQ_VMAP, ceq->virtual_map) |
3816 FIELD_PREP(IRDMA_CQPSQ_TPHEN, ceq->tph_en) |
3817 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
3818 dma_wmb(); /* make sure WQE is written before valid bit is set */
3820 set_64bit_val(wqe, 24, hdr);
3822 print_hex_dump_debug("WQE: CEQ_DESTROY WQE", DUMP_PREFIX_OFFSET, 16,
3823 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
3825 irdma_sc_cqp_post_sq(cqp);
3831 * irdma_sc_process_ceq - process ceq
3832 * @dev: sc device struct
3833 * @ceq: ceq sc structure
3835 * It is expected caller serializes this function with cleanup_ceqes()
3836 * because these functions manipulate the same ceq
3838 void *irdma_sc_process_ceq(struct irdma_sc_dev *dev, struct irdma_sc_ceq *ceq)
3842 struct irdma_sc_cq *cq = NULL;
3843 struct irdma_sc_cq *temp_cq;
3846 unsigned long flags;
3850 ceqe = IRDMA_GET_CURRENT_CEQ_ELEM(ceq);
3851 get_64bit_val(ceqe, 0, &temp);
3852 polarity = (u8)FIELD_GET(IRDMA_CEQE_VALID, temp);
3853 if (polarity != ceq->polarity)
3856 temp_cq = (struct irdma_sc_cq *)(unsigned long)(temp << 1);
3858 cq_idx = IRDMA_INVALID_CQ_IDX;
3859 IRDMA_RING_MOVE_TAIL(ceq->ceq_ring);
3861 if (!IRDMA_RING_CURRENT_TAIL(ceq->ceq_ring))
3868 spin_lock_irqsave(&ceq->req_cq_lock, flags);
3869 cq_idx = irdma_sc_find_reg_cq(ceq, cq);
3870 spin_unlock_irqrestore(&ceq->req_cq_lock, flags);
3873 IRDMA_RING_MOVE_TAIL(ceq->ceq_ring);
3874 if (!IRDMA_RING_CURRENT_TAIL(ceq->ceq_ring))
3876 } while (cq_idx == IRDMA_INVALID_CQ_IDX);
3879 irdma_sc_cq_ack(cq);
3884 * irdma_sc_cleanup_ceqes - clear the valid ceqes ctx matching the cq
3885 * @cq: cq for which the ceqes need to be cleaned up
3888 * The function is called after the cq is destroyed to cleanup
3889 * its pending ceqe entries. It is expected caller serializes this
3890 * function with process_ceq() in interrupt context.
3892 void irdma_sc_cleanup_ceqes(struct irdma_sc_cq *cq, struct irdma_sc_ceq *ceq)
3894 struct irdma_sc_cq *next_cq;
3895 u8 ceq_polarity = ceq->polarity;
3902 next = IRDMA_RING_GET_NEXT_TAIL(ceq->ceq_ring, 0);
3904 for (i = 1; i <= IRDMA_RING_SIZE(*ceq); i++) {
3905 ceqe = IRDMA_GET_CEQ_ELEM_AT_POS(ceq, next);
3907 get_64bit_val(ceqe, 0, &temp);
3908 polarity = (u8)FIELD_GET(IRDMA_CEQE_VALID, temp);
3909 if (polarity != ceq_polarity)
3912 next_cq = (struct irdma_sc_cq *)(unsigned long)(temp << 1);
3914 set_64bit_val(ceqe, 0, temp & IRDMA_CEQE_VALID);
3916 next = IRDMA_RING_GET_NEXT_TAIL(ceq->ceq_ring, i);
3923 * irdma_sc_aeq_init - initialize aeq
3924 * @aeq: aeq structure ptr
3925 * @info: aeq initialization info
3927 enum irdma_status_code irdma_sc_aeq_init(struct irdma_sc_aeq *aeq,
3928 struct irdma_aeq_init_info *info)
3932 if (info->elem_cnt < info->dev->hw_attrs.min_hw_aeq_size ||
3933 info->elem_cnt > info->dev->hw_attrs.max_hw_aeq_size)
3934 return IRDMA_ERR_INVALID_SIZE;
3936 pble_obj_cnt = info->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
3938 if (info->virtual_map && info->first_pm_pbl_idx >= pble_obj_cnt)
3939 return IRDMA_ERR_INVALID_PBLE_INDEX;
3941 aeq->size = sizeof(*aeq);
3943 aeq->aeqe_base = (struct irdma_sc_aeqe *)info->aeqe_base;
3944 aeq->dev = info->dev;
3945 aeq->elem_cnt = info->elem_cnt;
3946 aeq->aeq_elem_pa = info->aeq_elem_pa;
3947 IRDMA_RING_INIT(aeq->aeq_ring, aeq->elem_cnt);
3948 aeq->virtual_map = info->virtual_map;
3949 aeq->pbl_list = (aeq->virtual_map ? info->pbl_list : NULL);
3950 aeq->pbl_chunk_size = (aeq->virtual_map ? info->pbl_chunk_size : 0);
3951 aeq->first_pm_pbl_idx = (aeq->virtual_map ? info->first_pm_pbl_idx : 0);
3952 aeq->msix_idx = info->msix_idx;
3953 info->dev->aeq = aeq;
3959 * irdma_sc_aeq_create - create aeq
3960 * @aeq: aeq structure ptr
3961 * @scratch: u64 saved to be used during cqp completion
3962 * @post_sq: flag for cqp db to ring
3964 static enum irdma_status_code irdma_sc_aeq_create(struct irdma_sc_aeq *aeq,
3965 u64 scratch, bool post_sq)
3968 struct irdma_sc_cqp *cqp;
3971 cqp = aeq->dev->cqp;
3972 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
3974 return IRDMA_ERR_RING_FULL;
3975 set_64bit_val(wqe, 16, aeq->elem_cnt);
3976 set_64bit_val(wqe, 32,
3977 (aeq->virtual_map ? 0 : aeq->aeq_elem_pa));
3978 set_64bit_val(wqe, 48,
3979 (aeq->virtual_map ? aeq->first_pm_pbl_idx : 0));
3981 hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_AEQ) |
3982 FIELD_PREP(IRDMA_CQPSQ_AEQ_LPBLSIZE, aeq->pbl_chunk_size) |
3983 FIELD_PREP(IRDMA_CQPSQ_AEQ_VMAP, aeq->virtual_map) |
3984 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
3985 dma_wmb(); /* make sure WQE is written before valid bit is set */
3987 set_64bit_val(wqe, 24, hdr);
3989 print_hex_dump_debug("WQE: AEQ_CREATE WQE", DUMP_PREFIX_OFFSET, 16, 8,
3990 wqe, IRDMA_CQP_WQE_SIZE * 8, false);
3992 irdma_sc_cqp_post_sq(cqp);
3998 * irdma_sc_aeq_destroy - destroy aeq during close
3999 * @aeq: aeq structure ptr
4000 * @scratch: u64 saved to be used during cqp completion
4001 * @post_sq: flag for cqp db to ring
4003 static enum irdma_status_code irdma_sc_aeq_destroy(struct irdma_sc_aeq *aeq,
4004 u64 scratch, bool post_sq)
4007 struct irdma_sc_cqp *cqp;
4008 struct irdma_sc_dev *dev;
4012 writel(0, dev->hw_regs[IRDMA_PFINT_AEQCTL]);
4015 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
4017 return IRDMA_ERR_RING_FULL;
4018 set_64bit_val(wqe, 16, aeq->elem_cnt);
4019 set_64bit_val(wqe, 48, aeq->first_pm_pbl_idx);
4020 hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DESTROY_AEQ) |
4021 FIELD_PREP(IRDMA_CQPSQ_AEQ_LPBLSIZE, aeq->pbl_chunk_size) |
4022 FIELD_PREP(IRDMA_CQPSQ_AEQ_VMAP, aeq->virtual_map) |
4023 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
4024 dma_wmb(); /* make sure WQE is written before valid bit is set */
4026 set_64bit_val(wqe, 24, hdr);
4028 print_hex_dump_debug("WQE: AEQ_DESTROY WQE", DUMP_PREFIX_OFFSET, 16,
4029 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
4031 irdma_sc_cqp_post_sq(cqp);
4036 * irdma_sc_get_next_aeqe - get next aeq entry
4037 * @aeq: aeq structure ptr
4038 * @info: aeqe info to be returned
4040 enum irdma_status_code irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq,
4041 struct irdma_aeqe_info *info)
4043 u64 temp, compl_ctx;
4049 aeqe = IRDMA_GET_CURRENT_AEQ_ELEM(aeq);
4050 get_64bit_val(aeqe, 0, &compl_ctx);
4051 get_64bit_val(aeqe, 8, &temp);
4052 polarity = (u8)FIELD_GET(IRDMA_AEQE_VALID, temp);
4054 if (aeq->polarity != polarity)
4055 return IRDMA_ERR_Q_EMPTY;
4057 print_hex_dump_debug("WQE: AEQ_ENTRY WQE", DUMP_PREFIX_OFFSET, 16, 8,
4060 ae_src = (u8)FIELD_GET(IRDMA_AEQE_AESRC, temp);
4061 wqe_idx = (u16)FIELD_GET(IRDMA_AEQE_WQDESCIDX, temp);
4062 info->qp_cq_id = (u32)FIELD_GET(IRDMA_AEQE_QPCQID_LOW, temp) |
4063 ((u32)FIELD_GET(IRDMA_AEQE_QPCQID_HI, temp) << 18);
4064 info->ae_id = (u16)FIELD_GET(IRDMA_AEQE_AECODE, temp);
4065 info->tcp_state = (u8)FIELD_GET(IRDMA_AEQE_TCPSTATE, temp);
4066 info->iwarp_state = (u8)FIELD_GET(IRDMA_AEQE_IWSTATE, temp);
4067 info->q2_data_written = (u8)FIELD_GET(IRDMA_AEQE_Q2DATA, temp);
4068 info->aeqe_overflow = (bool)FIELD_GET(IRDMA_AEQE_OVERFLOW, temp);
4070 info->ae_src = ae_src;
4071 switch (info->ae_id) {
4072 case IRDMA_AE_PRIV_OPERATION_DENIED:
4073 case IRDMA_AE_AMP_INVALIDATE_TYPE1_MW:
4074 case IRDMA_AE_AMP_MWBIND_ZERO_BASED_TYPE1_MW:
4075 case IRDMA_AE_AMP_FASTREG_INVALID_PBL_HPS_CFG:
4076 case IRDMA_AE_AMP_FASTREG_PBLE_MISMATCH:
4077 case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG:
4078 case IRDMA_AE_UDA_XMIT_BAD_PD:
4079 case IRDMA_AE_UDA_XMIT_DGRAM_TOO_SHORT:
4080 case IRDMA_AE_BAD_CLOSE:
4081 case IRDMA_AE_RDMA_READ_WHILE_ORD_ZERO:
4082 case IRDMA_AE_STAG_ZERO_INVALID:
4083 case IRDMA_AE_IB_RREQ_AND_Q1_FULL:
4084 case IRDMA_AE_IB_INVALID_REQUEST:
4085 case IRDMA_AE_WQE_UNEXPECTED_OPCODE:
4086 case IRDMA_AE_IB_REMOTE_ACCESS_ERROR:
4087 case IRDMA_AE_IB_REMOTE_OP_ERROR:
4088 case IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION:
4089 case IRDMA_AE_DDP_UBE_INVALID_MO:
4090 case IRDMA_AE_DDP_UBE_INVALID_QN:
4091 case IRDMA_AE_DDP_NO_L_BIT:
4092 case IRDMA_AE_RDMAP_ROE_INVALID_RDMAP_VERSION:
4093 case IRDMA_AE_RDMAP_ROE_UNEXPECTED_OPCODE:
4094 case IRDMA_AE_ROE_INVALID_RDMA_READ_REQUEST:
4095 case IRDMA_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP:
4096 case IRDMA_AE_ROCE_RSP_LENGTH_ERROR:
4097 case IRDMA_AE_INVALID_ARP_ENTRY:
4098 case IRDMA_AE_INVALID_TCP_OPTION_RCVD:
4099 case IRDMA_AE_STALE_ARP_ENTRY:
4100 case IRDMA_AE_INVALID_AH_ENTRY:
4101 case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR:
4102 case IRDMA_AE_LLP_SEGMENT_TOO_SMALL:
4103 case IRDMA_AE_LLP_TOO_MANY_RETRIES:
4104 case IRDMA_AE_LLP_DOUBT_REACHABILITY:
4105 case IRDMA_AE_LLP_CONNECTION_ESTABLISHED:
4106 case IRDMA_AE_RESET_SENT:
4107 case IRDMA_AE_TERMINATE_SENT:
4108 case IRDMA_AE_RESET_NOT_SENT:
4109 case IRDMA_AE_LCE_QP_CATASTROPHIC:
4110 case IRDMA_AE_QP_SUSPEND_COMPLETE:
4111 case IRDMA_AE_UDA_L4LEN_INVALID:
4113 info->compl_ctx = compl_ctx;
4115 case IRDMA_AE_LCE_CQ_CATASTROPHIC:
4117 info->compl_ctx = compl_ctx << 1;
4118 ae_src = IRDMA_AE_SOURCE_RSVD;
4120 case IRDMA_AE_ROCE_EMPTY_MCG:
4121 case IRDMA_AE_ROCE_BAD_MC_IP_ADDR:
4122 case IRDMA_AE_ROCE_BAD_MC_QPID:
4123 case IRDMA_AE_MCG_QP_PROTOCOL_MISMATCH:
4125 case IRDMA_AE_LLP_CONNECTION_RESET:
4126 case IRDMA_AE_LLP_SYN_RECEIVED:
4127 case IRDMA_AE_LLP_FIN_RECEIVED:
4128 case IRDMA_AE_LLP_CLOSE_COMPLETE:
4129 case IRDMA_AE_LLP_TERMINATE_RECEIVED:
4130 case IRDMA_AE_RDMAP_ROE_BAD_LLP_CLOSE:
4131 ae_src = IRDMA_AE_SOURCE_RSVD;
4133 info->compl_ctx = compl_ctx;
4140 case IRDMA_AE_SOURCE_RQ:
4141 case IRDMA_AE_SOURCE_RQ_0011:
4144 info->wqe_idx = wqe_idx;
4145 info->compl_ctx = compl_ctx;
4147 case IRDMA_AE_SOURCE_CQ:
4148 case IRDMA_AE_SOURCE_CQ_0110:
4149 case IRDMA_AE_SOURCE_CQ_1010:
4150 case IRDMA_AE_SOURCE_CQ_1110:
4152 info->compl_ctx = compl_ctx << 1;
4154 case IRDMA_AE_SOURCE_SQ:
4155 case IRDMA_AE_SOURCE_SQ_0111:
4158 info->wqe_idx = wqe_idx;
4159 info->compl_ctx = compl_ctx;
4161 case IRDMA_AE_SOURCE_IN_RR_WR:
4162 case IRDMA_AE_SOURCE_IN_RR_WR_1011:
4164 info->compl_ctx = compl_ctx;
4165 info->in_rdrsp_wr = true;
4167 case IRDMA_AE_SOURCE_OUT_RR:
4168 case IRDMA_AE_SOURCE_OUT_RR_1111:
4170 info->compl_ctx = compl_ctx;
4171 info->out_rdrsp = true;
4173 case IRDMA_AE_SOURCE_RSVD:
4178 IRDMA_RING_MOVE_TAIL(aeq->aeq_ring);
4179 if (!IRDMA_RING_CURRENT_TAIL(aeq->aeq_ring))
4186 * irdma_sc_repost_aeq_entries - repost completed aeq entries
4187 * @dev: sc device struct
4188 * @count: allocate count
4190 enum irdma_status_code irdma_sc_repost_aeq_entries(struct irdma_sc_dev *dev, u32 count)
4192 writel(count, dev->hw_regs[IRDMA_AEQALLOC]);
4198 * irdma_sc_ccq_init - initialize control cq
4199 * @cq: sc's cq ctruct
4200 * @info: info for control cq initialization
4202 enum irdma_status_code irdma_sc_ccq_init(struct irdma_sc_cq *cq,
4203 struct irdma_ccq_init_info *info)
4207 if (info->num_elem < info->dev->hw_attrs.uk_attrs.min_hw_cq_size ||
4208 info->num_elem > info->dev->hw_attrs.uk_attrs.max_hw_cq_size)
4209 return IRDMA_ERR_INVALID_SIZE;
4211 if (info->ceq_id > (info->dev->hmc_fpm_misc.max_ceqs - 1))
4212 return IRDMA_ERR_INVALID_CEQ_ID;
4214 pble_obj_cnt = info->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
4216 if (info->virtual_map && info->first_pm_pbl_idx >= pble_obj_cnt)
4217 return IRDMA_ERR_INVALID_PBLE_INDEX;
4219 cq->cq_pa = info->cq_pa;
4220 cq->cq_uk.cq_base = info->cq_base;
4221 cq->shadow_area_pa = info->shadow_area_pa;
4222 cq->cq_uk.shadow_area = info->shadow_area;
4223 cq->shadow_read_threshold = info->shadow_read_threshold;
4224 cq->dev = info->dev;
4225 cq->ceq_id = info->ceq_id;
4226 cq->cq_uk.cq_size = info->num_elem;
4227 cq->cq_type = IRDMA_CQ_TYPE_CQP;
4228 cq->ceqe_mask = info->ceqe_mask;
4229 IRDMA_RING_INIT(cq->cq_uk.cq_ring, info->num_elem);
4230 cq->cq_uk.cq_id = 0; /* control cq is id 0 always */
4231 cq->ceq_id_valid = info->ceq_id_valid;
4232 cq->tph_en = info->tph_en;
4233 cq->tph_val = info->tph_val;
4234 cq->cq_uk.avoid_mem_cflct = info->avoid_mem_cflct;
4235 cq->pbl_list = info->pbl_list;
4236 cq->virtual_map = info->virtual_map;
4237 cq->pbl_chunk_size = info->pbl_chunk_size;
4238 cq->first_pm_pbl_idx = info->first_pm_pbl_idx;
4239 cq->cq_uk.polarity = true;
4240 cq->vsi = info->vsi;
4241 cq->cq_uk.cq_ack_db = cq->dev->cq_ack_db;
4243 /* Only applicable to CQs other than CCQ so initialize to zero */
4244 cq->cq_uk.cqe_alloc_db = NULL;
4246 info->dev->ccq = cq;
4251 * irdma_sc_ccq_create_done - poll cqp for ccq create
4252 * @ccq: ccq sc struct
4254 static inline enum irdma_status_code irdma_sc_ccq_create_done(struct irdma_sc_cq *ccq)
4256 struct irdma_sc_cqp *cqp;
4258 cqp = ccq->dev->cqp;
4260 return irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_CREATE_CQ, NULL);
4264 * irdma_sc_ccq_create - create control cq
4265 * @ccq: ccq sc struct
4266 * @scratch: u64 saved to be used during cqp completion
4267 * @check_overflow: overlow flag for ccq
4268 * @post_sq: flag for cqp db to ring
4270 enum irdma_status_code irdma_sc_ccq_create(struct irdma_sc_cq *ccq, u64 scratch,
4271 bool check_overflow, bool post_sq)
4273 enum irdma_status_code ret_code;
4275 ret_code = irdma_sc_cq_create(ccq, scratch, check_overflow, post_sq);
4280 ret_code = irdma_sc_ccq_create_done(ccq);
4284 ccq->dev->cqp->process_cqp_sds = irdma_cqp_sds_cmd;
4290 * irdma_sc_ccq_destroy - destroy ccq during close
4291 * @ccq: ccq sc struct
4292 * @scratch: u64 saved to be used during cqp completion
4293 * @post_sq: flag for cqp db to ring
4295 enum irdma_status_code irdma_sc_ccq_destroy(struct irdma_sc_cq *ccq, u64 scratch,
4298 struct irdma_sc_cqp *cqp;
4301 enum irdma_status_code ret_code = 0;
4302 u32 tail, val, error;
4304 cqp = ccq->dev->cqp;
4305 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
4307 return IRDMA_ERR_RING_FULL;
4309 set_64bit_val(wqe, 0, ccq->cq_uk.cq_size);
4310 set_64bit_val(wqe, 8, (uintptr_t)ccq >> 1);
4311 set_64bit_val(wqe, 40, ccq->shadow_area_pa);
4313 hdr = ccq->cq_uk.cq_id |
4314 FLD_LS_64(ccq->dev, (ccq->ceq_id_valid ? ccq->ceq_id : 0),
4315 IRDMA_CQPSQ_CQ_CEQID) |
4316 FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DESTROY_CQ) |
4317 FIELD_PREP(IRDMA_CQPSQ_CQ_ENCEQEMASK, ccq->ceqe_mask) |
4318 FIELD_PREP(IRDMA_CQPSQ_CQ_CEQIDVALID, ccq->ceq_id_valid) |
4319 FIELD_PREP(IRDMA_CQPSQ_TPHEN, ccq->tph_en) |
4320 FIELD_PREP(IRDMA_CQPSQ_CQ_AVOIDMEMCNFLCT, ccq->cq_uk.avoid_mem_cflct) |
4321 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
4322 dma_wmb(); /* make sure WQE is written before valid bit is set */
4324 set_64bit_val(wqe, 24, hdr);
4326 print_hex_dump_debug("WQE: CCQ_DESTROY WQE", DUMP_PREFIX_OFFSET, 16,
4327 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
4328 irdma_get_cqp_reg_info(cqp, &val, &tail, &error);
4331 irdma_sc_cqp_post_sq(cqp);
4332 ret_code = irdma_cqp_poll_registers(cqp, tail,
4333 cqp->dev->hw_attrs.max_done_count);
4336 cqp->process_cqp_sds = irdma_update_sds_noccq;
4342 * irdma_sc_init_iw_hmc() - queries fpm values using cqp and populates hmc_info
4343 * @dev : ptr to irdma_dev struct
4344 * @hmc_fn_id: hmc function id
4346 enum irdma_status_code irdma_sc_init_iw_hmc(struct irdma_sc_dev *dev,
4349 struct irdma_hmc_info *hmc_info;
4350 struct irdma_hmc_fpm_misc *hmc_fpm_misc;
4351 struct irdma_dma_mem query_fpm_mem;
4352 enum irdma_status_code ret_code = 0;
4355 hmc_info = dev->hmc_info;
4356 hmc_fpm_misc = &dev->hmc_fpm_misc;
4357 query_fpm_mem.pa = dev->fpm_query_buf_pa;
4358 query_fpm_mem.va = dev->fpm_query_buf;
4359 hmc_info->hmc_fn_id = hmc_fn_id;
4360 wait_type = (u8)IRDMA_CQP_WAIT_POLL_REGS;
4362 ret_code = irdma_sc_query_fpm_val(dev->cqp, 0, hmc_info->hmc_fn_id,
4363 &query_fpm_mem, true, wait_type);
4367 /* parse the fpm_query_buf and fill hmc obj info */
4368 ret_code = irdma_sc_parse_fpm_query_buf(dev, query_fpm_mem.va, hmc_info,
4371 print_hex_dump_debug("HMC: QUERY FPM BUFFER", DUMP_PREFIX_OFFSET, 16,
4372 8, query_fpm_mem.va, IRDMA_QUERY_FPM_BUF_SIZE,
4378 * irdma_sc_cfg_iw_fpm() - commits hmc obj cnt values using cqp
4379 * command and populates fpm base address in hmc_info
4380 * @dev : ptr to irdma_dev struct
4381 * @hmc_fn_id: hmc function id
4383 static enum irdma_status_code irdma_sc_cfg_iw_fpm(struct irdma_sc_dev *dev,
4386 struct irdma_hmc_info *hmc_info;
4387 struct irdma_hmc_obj_info *obj_info;
4389 struct irdma_dma_mem commit_fpm_mem;
4390 enum irdma_status_code ret_code = 0;
4393 hmc_info = dev->hmc_info;
4394 obj_info = hmc_info->hmc_obj;
4395 buf = dev->fpm_commit_buf;
4397 set_64bit_val(buf, 0, (u64)obj_info[IRDMA_HMC_IW_QP].cnt);
4398 set_64bit_val(buf, 8, (u64)obj_info[IRDMA_HMC_IW_CQ].cnt);
4399 set_64bit_val(buf, 16, (u64)0); /* RSRVD */
4400 set_64bit_val(buf, 24, (u64)obj_info[IRDMA_HMC_IW_HTE].cnt);
4401 set_64bit_val(buf, 32, (u64)obj_info[IRDMA_HMC_IW_ARP].cnt);
4402 set_64bit_val(buf, 40, (u64)0); /* RSVD */
4403 set_64bit_val(buf, 48, (u64)obj_info[IRDMA_HMC_IW_MR].cnt);
4404 set_64bit_val(buf, 56, (u64)obj_info[IRDMA_HMC_IW_XF].cnt);
4405 set_64bit_val(buf, 64, (u64)obj_info[IRDMA_HMC_IW_XFFL].cnt);
4406 set_64bit_val(buf, 72, (u64)obj_info[IRDMA_HMC_IW_Q1].cnt);
4407 set_64bit_val(buf, 80, (u64)obj_info[IRDMA_HMC_IW_Q1FL].cnt);
4408 set_64bit_val(buf, 88,
4409 (u64)obj_info[IRDMA_HMC_IW_TIMER].cnt);
4410 set_64bit_val(buf, 96,
4411 (u64)obj_info[IRDMA_HMC_IW_FSIMC].cnt);
4412 set_64bit_val(buf, 104,
4413 (u64)obj_info[IRDMA_HMC_IW_FSIAV].cnt);
4414 set_64bit_val(buf, 112,
4415 (u64)obj_info[IRDMA_HMC_IW_PBLE].cnt);
4416 set_64bit_val(buf, 120, (u64)0); /* RSVD */
4417 set_64bit_val(buf, 128, (u64)obj_info[IRDMA_HMC_IW_RRF].cnt);
4418 set_64bit_val(buf, 136,
4419 (u64)obj_info[IRDMA_HMC_IW_RRFFL].cnt);
4420 set_64bit_val(buf, 144, (u64)obj_info[IRDMA_HMC_IW_HDR].cnt);
4421 set_64bit_val(buf, 152, (u64)obj_info[IRDMA_HMC_IW_MD].cnt);
4422 set_64bit_val(buf, 160,
4423 (u64)obj_info[IRDMA_HMC_IW_OOISC].cnt);
4424 set_64bit_val(buf, 168,
4425 (u64)obj_info[IRDMA_HMC_IW_OOISCFFL].cnt);
4427 commit_fpm_mem.pa = dev->fpm_commit_buf_pa;
4428 commit_fpm_mem.va = dev->fpm_commit_buf;
4430 wait_type = (u8)IRDMA_CQP_WAIT_POLL_REGS;
4431 print_hex_dump_debug("HMC: COMMIT FPM BUFFER", DUMP_PREFIX_OFFSET, 16,
4432 8, commit_fpm_mem.va, IRDMA_COMMIT_FPM_BUF_SIZE,
4434 ret_code = irdma_sc_commit_fpm_val(dev->cqp, 0, hmc_info->hmc_fn_id,
4435 &commit_fpm_mem, true, wait_type);
4437 ret_code = irdma_sc_parse_fpm_commit_buf(dev, dev->fpm_commit_buf,
4439 &hmc_info->sd_table.sd_cnt);
4440 print_hex_dump_debug("HMC: COMMIT FPM BUFFER", DUMP_PREFIX_OFFSET, 16,
4441 8, commit_fpm_mem.va, IRDMA_COMMIT_FPM_BUF_SIZE,
4448 * cqp_sds_wqe_fill - fill cqp wqe doe sd
4449 * @cqp: struct for cqp hw
4450 * @info: sd info for wqe
4451 * @scratch: u64 saved to be used during cqp completion
4453 static enum irdma_status_code
4454 cqp_sds_wqe_fill(struct irdma_sc_cqp *cqp, struct irdma_update_sds_info *info,
4460 int mem_entries, wqe_entries;
4461 struct irdma_dma_mem *sdbuf = &cqp->sdbuf;
4465 wqe = irdma_sc_cqp_get_next_send_wqe_idx(cqp, scratch, &wqe_idx);
4467 return IRDMA_ERR_RING_FULL;
4469 wqe_entries = (info->cnt > 3) ? 3 : info->cnt;
4470 mem_entries = info->cnt - wqe_entries;
4473 offset = wqe_idx * IRDMA_UPDATE_SD_BUFF_SIZE;
4474 memcpy(((char *)sdbuf->va + offset), &info->entry[3], mem_entries << 4);
4476 data = (u64)sdbuf->pa + offset;
4480 data |= FIELD_PREP(IRDMA_CQPSQ_UPESD_HMCFNID, info->hmc_fn_id);
4481 set_64bit_val(wqe, 16, data);
4483 switch (wqe_entries) {
4485 set_64bit_val(wqe, 48,
4486 (FIELD_PREP(IRDMA_CQPSQ_UPESD_SDCMD, info->entry[2].cmd) |
4487 FIELD_PREP(IRDMA_CQPSQ_UPESD_ENTRY_VALID, 1)));
4489 set_64bit_val(wqe, 56, info->entry[2].data);
4492 set_64bit_val(wqe, 32,
4493 (FIELD_PREP(IRDMA_CQPSQ_UPESD_SDCMD, info->entry[1].cmd) |
4494 FIELD_PREP(IRDMA_CQPSQ_UPESD_ENTRY_VALID, 1)));
4496 set_64bit_val(wqe, 40, info->entry[1].data);
4499 set_64bit_val(wqe, 0,
4500 FIELD_PREP(IRDMA_CQPSQ_UPESD_SDCMD, info->entry[0].cmd));
4502 set_64bit_val(wqe, 8, info->entry[0].data);
4508 hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_UPDATE_PE_SDS) |
4509 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity) |
4510 FIELD_PREP(IRDMA_CQPSQ_UPESD_ENTRY_COUNT, mem_entries);
4511 dma_wmb(); /* make sure WQE is written before valid bit is set */
4513 set_64bit_val(wqe, 24, hdr);
4516 print_hex_dump_debug("WQE: UPDATE_PE_SDS WQE Buffer",
4517 DUMP_PREFIX_OFFSET, 16, 8,
4518 (char *)sdbuf->va + offset,
4519 mem_entries << 4, false);
4521 print_hex_dump_debug("WQE: UPDATE_PE_SDS WQE", DUMP_PREFIX_OFFSET, 16,
4522 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
4528 * irdma_update_pe_sds - cqp wqe for sd
4529 * @dev: ptr to irdma_dev struct
4530 * @info: sd info for sd's
4531 * @scratch: u64 saved to be used during cqp completion
4533 static enum irdma_status_code
4534 irdma_update_pe_sds(struct irdma_sc_dev *dev,
4535 struct irdma_update_sds_info *info, u64 scratch)
4537 struct irdma_sc_cqp *cqp = dev->cqp;
4538 enum irdma_status_code ret_code;
4540 ret_code = cqp_sds_wqe_fill(cqp, info, scratch);
4542 irdma_sc_cqp_post_sq(cqp);
4548 * irdma_update_sds_noccq - update sd before ccq created
4549 * @dev: sc device struct
4550 * @info: sd info for sd's
4552 enum irdma_status_code
4553 irdma_update_sds_noccq(struct irdma_sc_dev *dev,
4554 struct irdma_update_sds_info *info)
4556 u32 error, val, tail;
4557 struct irdma_sc_cqp *cqp = dev->cqp;
4558 enum irdma_status_code ret_code;
4560 ret_code = cqp_sds_wqe_fill(cqp, info, 0);
4564 irdma_get_cqp_reg_info(cqp, &val, &tail, &error);
4566 irdma_sc_cqp_post_sq(cqp);
4567 return irdma_cqp_poll_registers(cqp, tail,
4568 cqp->dev->hw_attrs.max_done_count);
4572 * irdma_sc_static_hmc_pages_allocated - cqp wqe to allocate hmc pages
4573 * @cqp: struct for cqp hw
4574 * @scratch: u64 saved to be used during cqp completion
4575 * @hmc_fn_id: hmc function id
4576 * @post_sq: flag for cqp db to ring
4577 * @poll_registers: flag to poll register for cqp completion
4579 enum irdma_status_code
4580 irdma_sc_static_hmc_pages_allocated(struct irdma_sc_cqp *cqp, u64 scratch,
4581 u8 hmc_fn_id, bool post_sq,
4582 bool poll_registers)
4586 u32 tail, val, error;
4588 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
4590 return IRDMA_ERR_RING_FULL;
4592 set_64bit_val(wqe, 16,
4593 FIELD_PREP(IRDMA_SHMC_PAGE_ALLOCATED_HMC_FN_ID, hmc_fn_id));
4595 hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE,
4596 IRDMA_CQP_OP_SHMC_PAGES_ALLOCATED) |
4597 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
4598 dma_wmb(); /* make sure WQE is written before valid bit is set */
4600 set_64bit_val(wqe, 24, hdr);
4602 print_hex_dump_debug("WQE: SHMC_PAGES_ALLOCATED WQE",
4603 DUMP_PREFIX_OFFSET, 16, 8, wqe,
4604 IRDMA_CQP_WQE_SIZE * 8, false);
4605 irdma_get_cqp_reg_info(cqp, &val, &tail, &error);
4608 irdma_sc_cqp_post_sq(cqp);
4610 /* check for cqp sq tail update */
4611 return irdma_cqp_poll_registers(cqp, tail,
4612 cqp->dev->hw_attrs.max_done_count);
4614 return irdma_sc_poll_for_cqp_op_done(cqp,
4615 IRDMA_CQP_OP_SHMC_PAGES_ALLOCATED,
4623 * irdma_cqp_ring_full - check if cqp ring is full
4624 * @cqp: struct for cqp hw
4626 static bool irdma_cqp_ring_full(struct irdma_sc_cqp *cqp)
4628 return IRDMA_RING_FULL_ERR(cqp->sq_ring);
4632 * irdma_est_sd - returns approximate number of SDs for HMC
4633 * @dev: sc device struct
4634 * @hmc_info: hmc structure, size and count for HMC objects
4636 static u32 irdma_est_sd(struct irdma_sc_dev *dev,
4637 struct irdma_hmc_info *hmc_info)
4643 for (i = IRDMA_HMC_IW_QP; i < IRDMA_HMC_IW_MAX; i++)
4644 if (i != IRDMA_HMC_IW_PBLE)
4645 size += round_up(hmc_info->hmc_obj[i].cnt *
4646 hmc_info->hmc_obj[i].size, 512);
4647 size += round_up(hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt *
4648 hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].size, 512);
4649 if (size & 0x1FFFFF)
4650 sd = (size >> 21) + 1; /* add 1 for remainder */
4653 if (sd > 0xFFFFFFFF) {
4654 ibdev_dbg(to_ibdev(dev), "HMC: sd overflow[%lld]\n", sd);
4655 sd = 0xFFFFFFFF - 1;
4662 * irdma_sc_query_rdma_features_done - poll cqp for query features done
4663 * @cqp: struct for cqp hw
4665 static enum irdma_status_code
4666 irdma_sc_query_rdma_features_done(struct irdma_sc_cqp *cqp)
4668 return irdma_sc_poll_for_cqp_op_done(cqp,
4669 IRDMA_CQP_OP_QUERY_RDMA_FEATURES,
4674 * irdma_sc_query_rdma_features - query RDMA features and FW ver
4675 * @cqp: struct for cqp hw
4676 * @buf: buffer to hold query info
4677 * @scratch: u64 saved to be used during cqp completion
4679 static enum irdma_status_code
4680 irdma_sc_query_rdma_features(struct irdma_sc_cqp *cqp,
4681 struct irdma_dma_mem *buf, u64 scratch)
4686 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
4688 return IRDMA_ERR_RING_FULL;
4691 set_64bit_val(wqe, 32, temp);
4693 temp = FIELD_PREP(IRDMA_CQPSQ_QUERY_RDMA_FEATURES_WQEVALID,
4695 FIELD_PREP(IRDMA_CQPSQ_QUERY_RDMA_FEATURES_BUF_LEN, buf->size) |
4696 FIELD_PREP(IRDMA_CQPSQ_UP_OP, IRDMA_CQP_OP_QUERY_RDMA_FEATURES);
4697 dma_wmb(); /* make sure WQE is written before valid bit is set */
4699 set_64bit_val(wqe, 24, temp);
4701 print_hex_dump_debug("WQE: QUERY RDMA FEATURES", DUMP_PREFIX_OFFSET,
4702 16, 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
4703 irdma_sc_cqp_post_sq(cqp);
4709 * irdma_get_rdma_features - get RDMA features
4710 * @dev: sc device struct
4712 enum irdma_status_code irdma_get_rdma_features(struct irdma_sc_dev *dev)
4714 enum irdma_status_code ret_code;
4715 struct irdma_dma_mem feat_buf;
4717 u16 byte_idx, feat_type, feat_cnt, feat_idx;
4719 feat_buf.size = ALIGN(IRDMA_FEATURE_BUF_SIZE,
4720 IRDMA_FEATURE_BUF_ALIGNMENT);
4721 feat_buf.va = dma_alloc_coherent(dev->hw->device, feat_buf.size,
4722 &feat_buf.pa, GFP_KERNEL);
4724 return IRDMA_ERR_NO_MEMORY;
4726 ret_code = irdma_sc_query_rdma_features(dev->cqp, &feat_buf, 0);
4728 ret_code = irdma_sc_query_rdma_features_done(dev->cqp);
4732 get_64bit_val(feat_buf.va, 0, &temp);
4733 feat_cnt = (u16)FIELD_GET(IRDMA_FEATURE_CNT, temp);
4735 ret_code = IRDMA_ERR_INVALID_FEAT_CNT;
4737 } else if (feat_cnt > IRDMA_MAX_FEATURES) {
4738 ibdev_dbg(to_ibdev(dev),
4739 "DEV: feature buf size insufficient, retrying with larger buffer\n");
4740 dma_free_coherent(dev->hw->device, feat_buf.size, feat_buf.va,
4743 feat_buf.size = ALIGN(8 * feat_cnt,
4744 IRDMA_FEATURE_BUF_ALIGNMENT);
4745 feat_buf.va = dma_alloc_coherent(dev->hw->device,
4746 feat_buf.size, &feat_buf.pa,
4749 return IRDMA_ERR_NO_MEMORY;
4751 ret_code = irdma_sc_query_rdma_features(dev->cqp, &feat_buf, 0);
4753 ret_code = irdma_sc_query_rdma_features_done(dev->cqp);
4757 get_64bit_val(feat_buf.va, 0, &temp);
4758 feat_cnt = (u16)FIELD_GET(IRDMA_FEATURE_CNT, temp);
4760 ret_code = IRDMA_ERR_INVALID_FEAT_CNT;
4765 print_hex_dump_debug("WQE: QUERY RDMA FEATURES", DUMP_PREFIX_OFFSET,
4766 16, 8, feat_buf.va, feat_cnt * 8, false);
4768 for (byte_idx = 0, feat_idx = 0; feat_idx < min(feat_cnt, (u16)IRDMA_MAX_FEATURES);
4769 feat_idx++, byte_idx += 8) {
4770 get_64bit_val(feat_buf.va, byte_idx, &temp);
4771 feat_type = FIELD_GET(IRDMA_FEATURE_TYPE, temp);
4772 if (feat_type >= IRDMA_MAX_FEATURES) {
4773 ibdev_dbg(to_ibdev(dev),
4774 "DEV: found unrecognized feature type %d\n",
4778 dev->feature_info[feat_type] = temp;
4781 dma_free_coherent(dev->hw->device, feat_buf.size, feat_buf.va,
4787 static u32 irdma_q1_cnt(struct irdma_sc_dev *dev,
4788 struct irdma_hmc_info *hmc_info, u32 qpwanted)
4792 if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) {
4793 q1_cnt = roundup_pow_of_two(dev->hw_attrs.max_hw_ird * 2 * qpwanted);
4795 if (dev->cqp->protocol_used != IRDMA_IWARP_PROTOCOL_ONLY)
4796 q1_cnt = roundup_pow_of_two(dev->hw_attrs.max_hw_ird * 2 * qpwanted + 512);
4798 q1_cnt = dev->hw_attrs.max_hw_ird * 2 * qpwanted;
4804 static void cfg_fpm_value_gen_1(struct irdma_sc_dev *dev,
4805 struct irdma_hmc_info *hmc_info, u32 qpwanted)
4807 hmc_info->hmc_obj[IRDMA_HMC_IW_XF].cnt = roundup_pow_of_two(qpwanted * dev->hw_attrs.max_hw_wqes);
4810 static void cfg_fpm_value_gen_2(struct irdma_sc_dev *dev,
4811 struct irdma_hmc_info *hmc_info, u32 qpwanted)
4813 struct irdma_hmc_fpm_misc *hmc_fpm_misc = &dev->hmc_fpm_misc;
4815 hmc_info->hmc_obj[IRDMA_HMC_IW_XF].cnt =
4816 4 * hmc_fpm_misc->xf_block_size * qpwanted;
4818 hmc_info->hmc_obj[IRDMA_HMC_IW_HDR].cnt = qpwanted;
4820 if (hmc_info->hmc_obj[IRDMA_HMC_IW_RRF].max_cnt)
4821 hmc_info->hmc_obj[IRDMA_HMC_IW_RRF].cnt = 32 * qpwanted;
4822 if (hmc_info->hmc_obj[IRDMA_HMC_IW_RRFFL].max_cnt)
4823 hmc_info->hmc_obj[IRDMA_HMC_IW_RRFFL].cnt =
4824 hmc_info->hmc_obj[IRDMA_HMC_IW_RRF].cnt /
4825 hmc_fpm_misc->rrf_block_size;
4826 if (hmc_info->hmc_obj[IRDMA_HMC_IW_OOISC].max_cnt)
4827 hmc_info->hmc_obj[IRDMA_HMC_IW_OOISC].cnt = 32 * qpwanted;
4828 if (hmc_info->hmc_obj[IRDMA_HMC_IW_OOISCFFL].max_cnt)
4829 hmc_info->hmc_obj[IRDMA_HMC_IW_OOISCFFL].cnt =
4830 hmc_info->hmc_obj[IRDMA_HMC_IW_OOISC].cnt /
4831 hmc_fpm_misc->ooiscf_block_size;
4835 * irdma_cfg_fpm_val - configure HMC objects
4836 * @dev: sc device struct
4837 * @qp_count: desired qp count
4839 enum irdma_status_code irdma_cfg_fpm_val(struct irdma_sc_dev *dev, u32 qp_count)
4841 struct irdma_virt_mem virt_mem;
4843 u32 qpwanted, mrwanted, pblewanted;
4848 struct irdma_hmc_info *hmc_info;
4849 struct irdma_hmc_fpm_misc *hmc_fpm_misc;
4850 enum irdma_status_code ret_code = 0;
4852 hmc_info = dev->hmc_info;
4853 hmc_fpm_misc = &dev->hmc_fpm_misc;
4855 ret_code = irdma_sc_init_iw_hmc(dev, dev->hmc_fn_id);
4857 ibdev_dbg(to_ibdev(dev),
4858 "HMC: irdma_sc_init_iw_hmc returned error_code = %d\n",
4863 for (i = IRDMA_HMC_IW_QP; i < IRDMA_HMC_IW_MAX; i++)
4864 hmc_info->hmc_obj[i].cnt = hmc_info->hmc_obj[i].max_cnt;
4865 sd_needed = irdma_est_sd(dev, hmc_info);
4866 ibdev_dbg(to_ibdev(dev),
4867 "HMC: FW max resources sd_needed[%08d] first_sd_index[%04d]\n",
4868 sd_needed, hmc_info->first_sd_index);
4869 ibdev_dbg(to_ibdev(dev), "HMC: sd count %d where max sd is %d\n",
4870 hmc_info->sd_table.sd_cnt, hmc_fpm_misc->max_sds);
4872 qpwanted = min(qp_count, hmc_info->hmc_obj[IRDMA_HMC_IW_QP].max_cnt);
4875 while (powerof2 <= qpwanted)
4878 qpwanted = powerof2;
4880 mrwanted = hmc_info->hmc_obj[IRDMA_HMC_IW_MR].max_cnt;
4881 pblewanted = hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].max_cnt;
4883 ibdev_dbg(to_ibdev(dev),
4884 "HMC: req_qp=%d max_sd=%d, max_qp = %d, max_cq=%d, max_mr=%d, max_pble=%d, mc=%d, av=%d\n",
4885 qp_count, hmc_fpm_misc->max_sds,
4886 hmc_info->hmc_obj[IRDMA_HMC_IW_QP].max_cnt,
4887 hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].max_cnt,
4888 hmc_info->hmc_obj[IRDMA_HMC_IW_MR].max_cnt,
4889 hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].max_cnt,
4890 hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].max_cnt,
4891 hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].max_cnt);
4892 hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].cnt =
4893 hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].max_cnt;
4894 hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt =
4895 hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].max_cnt;
4896 hmc_info->hmc_obj[IRDMA_HMC_IW_ARP].cnt =
4897 hmc_info->hmc_obj[IRDMA_HMC_IW_ARP].max_cnt;
4899 hmc_info->hmc_obj[IRDMA_HMC_IW_APBVT_ENTRY].cnt = 1;
4901 while (irdma_q1_cnt(dev, hmc_info, qpwanted) > hmc_info->hmc_obj[IRDMA_HMC_IW_Q1].max_cnt)
4906 hmc_info->hmc_obj[IRDMA_HMC_IW_QP].cnt = qpwanted;
4907 hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt =
4908 min(2 * qpwanted, hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt);
4909 hmc_info->hmc_obj[IRDMA_HMC_IW_RESERVED].cnt = 0; /* Reserved */
4910 hmc_info->hmc_obj[IRDMA_HMC_IW_MR].cnt = mrwanted;
4912 hte = round_up(qpwanted + hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].cnt, 512);
4914 while (powerof2 < hte)
4916 hmc_info->hmc_obj[IRDMA_HMC_IW_HTE].cnt =
4917 powerof2 * hmc_fpm_misc->ht_multiplier;
4918 if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
4919 cfg_fpm_value_gen_1(dev, hmc_info, qpwanted);
4921 cfg_fpm_value_gen_2(dev, hmc_info, qpwanted);
4923 hmc_info->hmc_obj[IRDMA_HMC_IW_Q1].cnt = irdma_q1_cnt(dev, hmc_info, qpwanted);
4924 hmc_info->hmc_obj[IRDMA_HMC_IW_XFFL].cnt =
4925 hmc_info->hmc_obj[IRDMA_HMC_IW_XF].cnt / hmc_fpm_misc->xf_block_size;
4926 hmc_info->hmc_obj[IRDMA_HMC_IW_Q1FL].cnt =
4927 hmc_info->hmc_obj[IRDMA_HMC_IW_Q1].cnt / hmc_fpm_misc->q1_block_size;
4928 hmc_info->hmc_obj[IRDMA_HMC_IW_TIMER].cnt =
4929 (round_up(qpwanted, 512) / 512 + 1) * hmc_fpm_misc->timer_bucket;
4931 hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt = pblewanted;
4932 sd_needed = irdma_est_sd(dev, hmc_info);
4933 ibdev_dbg(to_ibdev(dev),
4934 "HMC: sd_needed = %d, hmc_fpm_misc->max_sds=%d, mrwanted=%d, pblewanted=%d qpwanted=%d\n",
4935 sd_needed, hmc_fpm_misc->max_sds, mrwanted,
4936 pblewanted, qpwanted);
4938 /* Do not reduce resources further. All objects fit with max SDs */
4939 if (sd_needed <= hmc_fpm_misc->max_sds)
4942 sd_diff = sd_needed - hmc_fpm_misc->max_sds;
4943 if (sd_diff > 128) {
4944 if (qpwanted > 128 && sd_diff > 144)
4950 if (dev->cqp->hmc_profile != IRDMA_HMC_PROFILE_FAVOR_VF &&
4951 pblewanted > (512 * FPM_MULTIPLIER * sd_diff)) {
4952 pblewanted -= 256 * FPM_MULTIPLIER * sd_diff;
4954 } else if (pblewanted > (100 * FPM_MULTIPLIER)) {
4955 pblewanted -= 10 * FPM_MULTIPLIER;
4956 } else if (pblewanted > FPM_MULTIPLIER) {
4957 pblewanted -= FPM_MULTIPLIER;
4958 } else if (qpwanted <= 128) {
4959 if (hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].cnt > 256)
4960 hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].cnt /= 2;
4961 if (hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt > 256)
4962 hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt /= 2;
4964 if (mrwanted > FPM_MULTIPLIER)
4965 mrwanted -= FPM_MULTIPLIER;
4966 if (!(loop_count % 10) && qpwanted > 128) {
4968 if (hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt > 256)
4969 hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt /= 2;
4971 } while (loop_count < 2000);
4973 if (sd_needed > hmc_fpm_misc->max_sds) {
4974 ibdev_dbg(to_ibdev(dev),
4975 "HMC: cfg_fpm failed loop_cnt=%d, sd_needed=%d, max sd count %d\n",
4976 loop_count, sd_needed, hmc_info->sd_table.sd_cnt);
4977 return IRDMA_ERR_CFG;
4980 if (loop_count > 1 && sd_needed < hmc_fpm_misc->max_sds) {
4981 pblewanted += (hmc_fpm_misc->max_sds - sd_needed) * 256 *
4983 hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt = pblewanted;
4984 sd_needed = irdma_est_sd(dev, hmc_info);
4987 ibdev_dbg(to_ibdev(dev),
4988 "HMC: loop_cnt=%d, sd_needed=%d, qpcnt = %d, cqcnt=%d, mrcnt=%d, pblecnt=%d, mc=%d, ah=%d, max sd count %d, first sd index %d\n",
4989 loop_count, sd_needed,
4990 hmc_info->hmc_obj[IRDMA_HMC_IW_QP].cnt,
4991 hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt,
4992 hmc_info->hmc_obj[IRDMA_HMC_IW_MR].cnt,
4993 hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt,
4994 hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].cnt,
4995 hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt,
4996 hmc_info->sd_table.sd_cnt, hmc_info->first_sd_index);
4998 ret_code = irdma_sc_cfg_iw_fpm(dev, dev->hmc_fn_id);
5000 ibdev_dbg(to_ibdev(dev),
5001 "HMC: cfg_iw_fpm returned error_code[x%08X]\n",
5002 readl(dev->hw_regs[IRDMA_CQPERRCODES]));
5006 mem_size = sizeof(struct irdma_hmc_sd_entry) *
5007 (hmc_info->sd_table.sd_cnt + hmc_info->first_sd_index + 1);
5008 virt_mem.size = mem_size;
5009 virt_mem.va = kzalloc(virt_mem.size, GFP_KERNEL);
5011 ibdev_dbg(to_ibdev(dev),
5012 "HMC: failed to allocate memory for sd_entry buffer\n");
5013 return IRDMA_ERR_NO_MEMORY;
5015 hmc_info->sd_table.sd_entry = virt_mem.va;
5021 * irdma_exec_cqp_cmd - execute cqp cmd when wqe are available
5023 * @pcmdinfo: cqp command info
5025 static enum irdma_status_code irdma_exec_cqp_cmd(struct irdma_sc_dev *dev,
5026 struct cqp_cmds_info *pcmdinfo)
5028 enum irdma_status_code status;
5029 struct irdma_dma_mem val_mem;
5032 dev->cqp_cmd_stats[pcmdinfo->cqp_cmd]++;
5033 switch (pcmdinfo->cqp_cmd) {
5034 case IRDMA_OP_CEQ_DESTROY:
5035 status = irdma_sc_ceq_destroy(pcmdinfo->in.u.ceq_destroy.ceq,
5036 pcmdinfo->in.u.ceq_destroy.scratch,
5039 case IRDMA_OP_AEQ_DESTROY:
5040 status = irdma_sc_aeq_destroy(pcmdinfo->in.u.aeq_destroy.aeq,
5041 pcmdinfo->in.u.aeq_destroy.scratch,
5045 case IRDMA_OP_CEQ_CREATE:
5046 status = irdma_sc_ceq_create(pcmdinfo->in.u.ceq_create.ceq,
5047 pcmdinfo->in.u.ceq_create.scratch,
5050 case IRDMA_OP_AEQ_CREATE:
5051 status = irdma_sc_aeq_create(pcmdinfo->in.u.aeq_create.aeq,
5052 pcmdinfo->in.u.aeq_create.scratch,
5055 case IRDMA_OP_QP_UPLOAD_CONTEXT:
5056 status = irdma_sc_qp_upload_context(pcmdinfo->in.u.qp_upload_context.dev,
5057 &pcmdinfo->in.u.qp_upload_context.info,
5058 pcmdinfo->in.u.qp_upload_context.scratch,
5061 case IRDMA_OP_CQ_CREATE:
5062 status = irdma_sc_cq_create(pcmdinfo->in.u.cq_create.cq,
5063 pcmdinfo->in.u.cq_create.scratch,
5064 pcmdinfo->in.u.cq_create.check_overflow,
5067 case IRDMA_OP_CQ_MODIFY:
5068 status = irdma_sc_cq_modify(pcmdinfo->in.u.cq_modify.cq,
5069 &pcmdinfo->in.u.cq_modify.info,
5070 pcmdinfo->in.u.cq_modify.scratch,
5073 case IRDMA_OP_CQ_DESTROY:
5074 status = irdma_sc_cq_destroy(pcmdinfo->in.u.cq_destroy.cq,
5075 pcmdinfo->in.u.cq_destroy.scratch,
5078 case IRDMA_OP_QP_FLUSH_WQES:
5079 status = irdma_sc_qp_flush_wqes(pcmdinfo->in.u.qp_flush_wqes.qp,
5080 &pcmdinfo->in.u.qp_flush_wqes.info,
5081 pcmdinfo->in.u.qp_flush_wqes.scratch,
5084 case IRDMA_OP_GEN_AE:
5085 status = irdma_sc_gen_ae(pcmdinfo->in.u.gen_ae.qp,
5086 &pcmdinfo->in.u.gen_ae.info,
5087 pcmdinfo->in.u.gen_ae.scratch,
5090 case IRDMA_OP_MANAGE_PUSH_PAGE:
5091 status = irdma_sc_manage_push_page(pcmdinfo->in.u.manage_push_page.cqp,
5092 &pcmdinfo->in.u.manage_push_page.info,
5093 pcmdinfo->in.u.manage_push_page.scratch,
5096 case IRDMA_OP_UPDATE_PE_SDS:
5097 status = irdma_update_pe_sds(pcmdinfo->in.u.update_pe_sds.dev,
5098 &pcmdinfo->in.u.update_pe_sds.info,
5099 pcmdinfo->in.u.update_pe_sds.scratch);
5101 case IRDMA_OP_MANAGE_HMC_PM_FUNC_TABLE:
5102 /* switch to calling through the call table */
5104 irdma_sc_manage_hmc_pm_func_table(pcmdinfo->in.u.manage_hmc_pm.dev->cqp,
5105 &pcmdinfo->in.u.manage_hmc_pm.info,
5106 pcmdinfo->in.u.manage_hmc_pm.scratch,
5109 case IRDMA_OP_SUSPEND:
5110 status = irdma_sc_suspend_qp(pcmdinfo->in.u.suspend_resume.cqp,
5111 pcmdinfo->in.u.suspend_resume.qp,
5112 pcmdinfo->in.u.suspend_resume.scratch);
5114 case IRDMA_OP_RESUME:
5115 status = irdma_sc_resume_qp(pcmdinfo->in.u.suspend_resume.cqp,
5116 pcmdinfo->in.u.suspend_resume.qp,
5117 pcmdinfo->in.u.suspend_resume.scratch);
5119 case IRDMA_OP_QUERY_FPM_VAL:
5120 val_mem.pa = pcmdinfo->in.u.query_fpm_val.fpm_val_pa;
5121 val_mem.va = pcmdinfo->in.u.query_fpm_val.fpm_val_va;
5122 status = irdma_sc_query_fpm_val(pcmdinfo->in.u.query_fpm_val.cqp,
5123 pcmdinfo->in.u.query_fpm_val.scratch,
5124 pcmdinfo->in.u.query_fpm_val.hmc_fn_id,
5125 &val_mem, true, IRDMA_CQP_WAIT_EVENT);
5127 case IRDMA_OP_COMMIT_FPM_VAL:
5128 val_mem.pa = pcmdinfo->in.u.commit_fpm_val.fpm_val_pa;
5129 val_mem.va = pcmdinfo->in.u.commit_fpm_val.fpm_val_va;
5130 status = irdma_sc_commit_fpm_val(pcmdinfo->in.u.commit_fpm_val.cqp,
5131 pcmdinfo->in.u.commit_fpm_val.scratch,
5132 pcmdinfo->in.u.commit_fpm_val.hmc_fn_id,
5135 IRDMA_CQP_WAIT_EVENT);
5137 case IRDMA_OP_STATS_ALLOCATE:
5140 case IRDMA_OP_STATS_FREE:
5141 status = irdma_sc_manage_stats_inst(pcmdinfo->in.u.stats_manage.cqp,
5142 &pcmdinfo->in.u.stats_manage.info,
5144 pcmdinfo->in.u.stats_manage.scratch);
5146 case IRDMA_OP_STATS_GATHER:
5147 status = irdma_sc_gather_stats(pcmdinfo->in.u.stats_gather.cqp,
5148 &pcmdinfo->in.u.stats_gather.info,
5149 pcmdinfo->in.u.stats_gather.scratch);
5151 case IRDMA_OP_WS_MODIFY_NODE:
5152 status = irdma_sc_manage_ws_node(pcmdinfo->in.u.ws_node.cqp,
5153 &pcmdinfo->in.u.ws_node.info,
5155 pcmdinfo->in.u.ws_node.scratch);
5157 case IRDMA_OP_WS_DELETE_NODE:
5158 status = irdma_sc_manage_ws_node(pcmdinfo->in.u.ws_node.cqp,
5159 &pcmdinfo->in.u.ws_node.info,
5161 pcmdinfo->in.u.ws_node.scratch);
5163 case IRDMA_OP_WS_ADD_NODE:
5164 status = irdma_sc_manage_ws_node(pcmdinfo->in.u.ws_node.cqp,
5165 &pcmdinfo->in.u.ws_node.info,
5167 pcmdinfo->in.u.ws_node.scratch);
5169 case IRDMA_OP_SET_UP_MAP:
5170 status = irdma_sc_set_up_map(pcmdinfo->in.u.up_map.cqp,
5171 &pcmdinfo->in.u.up_map.info,
5172 pcmdinfo->in.u.up_map.scratch);
5174 case IRDMA_OP_QUERY_RDMA_FEATURES:
5175 status = irdma_sc_query_rdma_features(pcmdinfo->in.u.query_rdma.cqp,
5176 &pcmdinfo->in.u.query_rdma.query_buff_mem,
5177 pcmdinfo->in.u.query_rdma.scratch);
5179 case IRDMA_OP_DELETE_ARP_CACHE_ENTRY:
5180 status = irdma_sc_del_arp_cache_entry(pcmdinfo->in.u.del_arp_cache_entry.cqp,
5181 pcmdinfo->in.u.del_arp_cache_entry.scratch,
5182 pcmdinfo->in.u.del_arp_cache_entry.arp_index,
5185 case IRDMA_OP_MANAGE_APBVT_ENTRY:
5186 status = irdma_sc_manage_apbvt_entry(pcmdinfo->in.u.manage_apbvt_entry.cqp,
5187 &pcmdinfo->in.u.manage_apbvt_entry.info,
5188 pcmdinfo->in.u.manage_apbvt_entry.scratch,
5191 case IRDMA_OP_MANAGE_QHASH_TABLE_ENTRY:
5192 status = irdma_sc_manage_qhash_table_entry(pcmdinfo->in.u.manage_qhash_table_entry.cqp,
5193 &pcmdinfo->in.u.manage_qhash_table_entry.info,
5194 pcmdinfo->in.u.manage_qhash_table_entry.scratch,
5197 case IRDMA_OP_QP_MODIFY:
5198 status = irdma_sc_qp_modify(pcmdinfo->in.u.qp_modify.qp,
5199 &pcmdinfo->in.u.qp_modify.info,
5200 pcmdinfo->in.u.qp_modify.scratch,
5203 case IRDMA_OP_QP_CREATE:
5204 status = irdma_sc_qp_create(pcmdinfo->in.u.qp_create.qp,
5205 &pcmdinfo->in.u.qp_create.info,
5206 pcmdinfo->in.u.qp_create.scratch,
5209 case IRDMA_OP_QP_DESTROY:
5210 status = irdma_sc_qp_destroy(pcmdinfo->in.u.qp_destroy.qp,
5211 pcmdinfo->in.u.qp_destroy.scratch,
5212 pcmdinfo->in.u.qp_destroy.remove_hash_idx,
5213 pcmdinfo->in.u.qp_destroy.ignore_mw_bnd,
5216 case IRDMA_OP_ALLOC_STAG:
5217 status = irdma_sc_alloc_stag(pcmdinfo->in.u.alloc_stag.dev,
5218 &pcmdinfo->in.u.alloc_stag.info,
5219 pcmdinfo->in.u.alloc_stag.scratch,
5222 case IRDMA_OP_MR_REG_NON_SHARED:
5223 status = irdma_sc_mr_reg_non_shared(pcmdinfo->in.u.mr_reg_non_shared.dev,
5224 &pcmdinfo->in.u.mr_reg_non_shared.info,
5225 pcmdinfo->in.u.mr_reg_non_shared.scratch,
5228 case IRDMA_OP_DEALLOC_STAG:
5229 status = irdma_sc_dealloc_stag(pcmdinfo->in.u.dealloc_stag.dev,
5230 &pcmdinfo->in.u.dealloc_stag.info,
5231 pcmdinfo->in.u.dealloc_stag.scratch,
5234 case IRDMA_OP_MW_ALLOC:
5235 status = irdma_sc_mw_alloc(pcmdinfo->in.u.mw_alloc.dev,
5236 &pcmdinfo->in.u.mw_alloc.info,
5237 pcmdinfo->in.u.mw_alloc.scratch,
5240 case IRDMA_OP_ADD_ARP_CACHE_ENTRY:
5241 status = irdma_sc_add_arp_cache_entry(pcmdinfo->in.u.add_arp_cache_entry.cqp,
5242 &pcmdinfo->in.u.add_arp_cache_entry.info,
5243 pcmdinfo->in.u.add_arp_cache_entry.scratch,
5246 case IRDMA_OP_ALLOC_LOCAL_MAC_ENTRY:
5247 status = irdma_sc_alloc_local_mac_entry(pcmdinfo->in.u.alloc_local_mac_entry.cqp,
5248 pcmdinfo->in.u.alloc_local_mac_entry.scratch,
5251 case IRDMA_OP_ADD_LOCAL_MAC_ENTRY:
5252 status = irdma_sc_add_local_mac_entry(pcmdinfo->in.u.add_local_mac_entry.cqp,
5253 &pcmdinfo->in.u.add_local_mac_entry.info,
5254 pcmdinfo->in.u.add_local_mac_entry.scratch,
5257 case IRDMA_OP_DELETE_LOCAL_MAC_ENTRY:
5258 status = irdma_sc_del_local_mac_entry(pcmdinfo->in.u.del_local_mac_entry.cqp,
5259 pcmdinfo->in.u.del_local_mac_entry.scratch,
5260 pcmdinfo->in.u.del_local_mac_entry.entry_idx,
5261 pcmdinfo->in.u.del_local_mac_entry.ignore_ref_count,
5264 case IRDMA_OP_AH_CREATE:
5265 status = irdma_sc_create_ah(pcmdinfo->in.u.ah_create.cqp,
5266 &pcmdinfo->in.u.ah_create.info,
5267 pcmdinfo->in.u.ah_create.scratch);
5269 case IRDMA_OP_AH_DESTROY:
5270 status = irdma_sc_destroy_ah(pcmdinfo->in.u.ah_destroy.cqp,
5271 &pcmdinfo->in.u.ah_destroy.info,
5272 pcmdinfo->in.u.ah_destroy.scratch);
5274 case IRDMA_OP_MC_CREATE:
5275 status = irdma_sc_create_mcast_grp(pcmdinfo->in.u.mc_create.cqp,
5276 &pcmdinfo->in.u.mc_create.info,
5277 pcmdinfo->in.u.mc_create.scratch);
5279 case IRDMA_OP_MC_DESTROY:
5280 status = irdma_sc_destroy_mcast_grp(pcmdinfo->in.u.mc_destroy.cqp,
5281 &pcmdinfo->in.u.mc_destroy.info,
5282 pcmdinfo->in.u.mc_destroy.scratch);
5284 case IRDMA_OP_MC_MODIFY:
5285 status = irdma_sc_modify_mcast_grp(pcmdinfo->in.u.mc_modify.cqp,
5286 &pcmdinfo->in.u.mc_modify.info,
5287 pcmdinfo->in.u.mc_modify.scratch);
5290 status = IRDMA_NOT_SUPPORTED;
5298 * irdma_process_cqp_cmd - process all cqp commands
5299 * @dev: sc device struct
5300 * @pcmdinfo: cqp command info
5302 enum irdma_status_code irdma_process_cqp_cmd(struct irdma_sc_dev *dev,
5303 struct cqp_cmds_info *pcmdinfo)
5305 enum irdma_status_code status = 0;
5306 unsigned long flags;
5308 spin_lock_irqsave(&dev->cqp_lock, flags);
5309 if (list_empty(&dev->cqp_cmd_head) && !irdma_cqp_ring_full(dev->cqp))
5310 status = irdma_exec_cqp_cmd(dev, pcmdinfo);
5312 list_add_tail(&pcmdinfo->cqp_cmd_entry, &dev->cqp_cmd_head);
5313 spin_unlock_irqrestore(&dev->cqp_lock, flags);
5318 * irdma_process_bh - called from tasklet for cqp list
5319 * @dev: sc device struct
5321 enum irdma_status_code irdma_process_bh(struct irdma_sc_dev *dev)
5323 enum irdma_status_code status = 0;
5324 struct cqp_cmds_info *pcmdinfo;
5325 unsigned long flags;
5327 spin_lock_irqsave(&dev->cqp_lock, flags);
5328 while (!list_empty(&dev->cqp_cmd_head) &&
5329 !irdma_cqp_ring_full(dev->cqp)) {
5330 pcmdinfo = (struct cqp_cmds_info *)irdma_remove_cqp_head(dev);
5331 status = irdma_exec_cqp_cmd(dev, pcmdinfo);
5335 spin_unlock_irqrestore(&dev->cqp_lock, flags);
5340 * irdma_cfg_aeq- Configure AEQ interrupt
5341 * @dev: pointer to the device structure
5342 * @idx: vector index
5343 * @enable: True to enable, False disables
5345 void irdma_cfg_aeq(struct irdma_sc_dev *dev, u32 idx, bool enable)
5349 reg_val = FIELD_PREP(IRDMA_PFINT_AEQCTL_CAUSE_ENA, enable) |
5350 FIELD_PREP(IRDMA_PFINT_AEQCTL_MSIX_INDX, idx) |
5351 FIELD_PREP(IRDMA_PFINT_AEQCTL_ITR_INDX, 3);
5352 writel(reg_val, dev->hw_regs[IRDMA_PFINT_AEQCTL]);
5356 * sc_vsi_update_stats - Update statistics
5357 * @vsi: sc_vsi instance to update
5359 void sc_vsi_update_stats(struct irdma_sc_vsi *vsi)
5361 struct irdma_gather_stats *gather_stats;
5362 struct irdma_gather_stats *last_gather_stats;
5364 gather_stats = vsi->pestat->gather_info.gather_stats_va;
5365 last_gather_stats = vsi->pestat->gather_info.last_gather_stats_va;
5366 irdma_update_stats(&vsi->pestat->hw_stats, gather_stats,
5371 * irdma_wait_pe_ready - Check if firmware is ready
5372 * @dev: provides access to registers
5374 static int irdma_wait_pe_ready(struct irdma_sc_dev *dev)
5382 statuscpu0 = readl(dev->hw_regs[IRDMA_GLPE_CPUSTATUS0]);
5383 statuscpu1 = readl(dev->hw_regs[IRDMA_GLPE_CPUSTATUS1]);
5384 statuscpu2 = readl(dev->hw_regs[IRDMA_GLPE_CPUSTATUS2]);
5385 if (statuscpu0 == 0x80 && statuscpu1 == 0x80 &&
5389 } while (retrycount++ < dev->hw_attrs.max_pe_ready_count);
5393 static inline void irdma_sc_init_hw(struct irdma_sc_dev *dev)
5395 switch (dev->hw_attrs.uk_attrs.hw_rev) {
5400 icrdma_init_hw(dev);
5406 * irdma_sc_dev_init - Initialize control part of device
5408 * @dev: Device pointer
5409 * @info: Device init info
5411 enum irdma_status_code irdma_sc_dev_init(enum irdma_vers ver,
5412 struct irdma_sc_dev *dev,
5413 struct irdma_device_init_info *info)
5416 enum irdma_status_code ret_code = 0;
5419 INIT_LIST_HEAD(&dev->cqp_cmd_head); /* for CQP command backlog */
5420 mutex_init(&dev->ws_mutex);
5421 dev->hmc_fn_id = info->hmc_fn_id;
5422 dev->fpm_query_buf_pa = info->fpm_query_buf_pa;
5423 dev->fpm_query_buf = info->fpm_query_buf;
5424 dev->fpm_commit_buf_pa = info->fpm_commit_buf_pa;
5425 dev->fpm_commit_buf = info->fpm_commit_buf;
5427 dev->hw->hw_addr = info->bar0;
5428 /* Setup the hardware limits, hmc may limit further */
5429 dev->hw_attrs.min_hw_qp_id = IRDMA_MIN_IW_QP_ID;
5430 dev->hw_attrs.min_hw_aeq_size = IRDMA_MIN_AEQ_ENTRIES;
5431 dev->hw_attrs.max_hw_aeq_size = IRDMA_MAX_AEQ_ENTRIES;
5432 dev->hw_attrs.min_hw_ceq_size = IRDMA_MIN_CEQ_ENTRIES;
5433 dev->hw_attrs.max_hw_ceq_size = IRDMA_MAX_CEQ_ENTRIES;
5434 dev->hw_attrs.uk_attrs.min_hw_cq_size = IRDMA_MIN_CQ_SIZE;
5435 dev->hw_attrs.uk_attrs.max_hw_cq_size = IRDMA_MAX_CQ_SIZE;
5436 dev->hw_attrs.uk_attrs.max_hw_wq_frags = IRDMA_MAX_WQ_FRAGMENT_COUNT;
5437 dev->hw_attrs.uk_attrs.max_hw_read_sges = IRDMA_MAX_SGE_RD;
5438 dev->hw_attrs.max_hw_outbound_msg_size = IRDMA_MAX_OUTBOUND_MSG_SIZE;
5439 dev->hw_attrs.max_mr_size = IRDMA_MAX_MR_SIZE;
5440 dev->hw_attrs.max_hw_inbound_msg_size = IRDMA_MAX_INBOUND_MSG_SIZE;
5441 dev->hw_attrs.max_hw_device_pages = IRDMA_MAX_PUSH_PAGE_COUNT;
5442 dev->hw_attrs.uk_attrs.max_hw_inline = IRDMA_MAX_INLINE_DATA_SIZE;
5443 dev->hw_attrs.max_hw_wqes = IRDMA_MAX_WQ_ENTRIES;
5444 dev->hw_attrs.max_qp_wr = IRDMA_MAX_QP_WRS(IRDMA_MAX_QUANTA_PER_WR);
5446 dev->hw_attrs.uk_attrs.max_hw_rq_quanta = IRDMA_QP_SW_MAX_RQ_QUANTA;
5447 dev->hw_attrs.uk_attrs.max_hw_wq_quanta = IRDMA_QP_SW_MAX_WQ_QUANTA;
5448 dev->hw_attrs.max_hw_pds = IRDMA_MAX_PDS;
5449 dev->hw_attrs.max_hw_ena_vf_count = IRDMA_MAX_PE_ENA_VF_COUNT;
5451 dev->hw_attrs.max_pe_ready_count = 14;
5452 dev->hw_attrs.max_done_count = IRDMA_DONE_COUNT;
5453 dev->hw_attrs.max_sleep_count = IRDMA_SLEEP_COUNT;
5454 dev->hw_attrs.max_cqp_compl_wait_time_ms = CQP_COMPL_WAIT_TIME_MS;
5456 dev->hw_attrs.uk_attrs.hw_rev = ver;
5457 irdma_sc_init_hw(dev);
5459 if (irdma_wait_pe_ready(dev))
5460 return IRDMA_ERR_TIMEOUT;
5462 val = readl(dev->hw_regs[IRDMA_GLPCI_LBARCTRL]);
5463 db_size = (u8)FIELD_GET(IRDMA_GLPCI_LBARCTRL_PE_DB_SIZE, val);
5464 if (db_size != IRDMA_PE_DB_SIZE_4M && db_size != IRDMA_PE_DB_SIZE_8M) {
5465 ibdev_dbg(to_ibdev(dev),
5466 "DEV: RDMA PE doorbell is not enabled in CSR val 0x%x db_size=%d\n",
5468 return IRDMA_ERR_PE_DOORBELL_NOT_ENA;
5470 dev->db_addr = dev->hw->hw_addr + (uintptr_t)dev->hw_regs[IRDMA_DB_ADDR_OFFSET];
5476 * irdma_update_stats - Update statistics
5477 * @hw_stats: hw_stats instance to update
5478 * @gather_stats: updated stat counters
5479 * @last_gather_stats: last stat counters
5481 void irdma_update_stats(struct irdma_dev_hw_stats *hw_stats,
5482 struct irdma_gather_stats *gather_stats,
5483 struct irdma_gather_stats *last_gather_stats)
5485 u64 *stats_val = hw_stats->stats_val_32;
5487 stats_val[IRDMA_HW_STAT_INDEX_RXVLANERR] +=
5488 IRDMA_STATS_DELTA(gather_stats->rxvlanerr,
5489 last_gather_stats->rxvlanerr,
5490 IRDMA_MAX_STATS_32);
5491 stats_val[IRDMA_HW_STAT_INDEX_IP4RXDISCARD] +=
5492 IRDMA_STATS_DELTA(gather_stats->ip4rxdiscard,
5493 last_gather_stats->ip4rxdiscard,
5494 IRDMA_MAX_STATS_32);
5495 stats_val[IRDMA_HW_STAT_INDEX_IP4RXTRUNC] +=
5496 IRDMA_STATS_DELTA(gather_stats->ip4rxtrunc,
5497 last_gather_stats->ip4rxtrunc,
5498 IRDMA_MAX_STATS_32);
5499 stats_val[IRDMA_HW_STAT_INDEX_IP4TXNOROUTE] +=
5500 IRDMA_STATS_DELTA(gather_stats->ip4txnoroute,
5501 last_gather_stats->ip4txnoroute,
5502 IRDMA_MAX_STATS_32);
5503 stats_val[IRDMA_HW_STAT_INDEX_IP6RXDISCARD] +=
5504 IRDMA_STATS_DELTA(gather_stats->ip6rxdiscard,
5505 last_gather_stats->ip6rxdiscard,
5506 IRDMA_MAX_STATS_32);
5507 stats_val[IRDMA_HW_STAT_INDEX_IP6RXTRUNC] +=
5508 IRDMA_STATS_DELTA(gather_stats->ip6rxtrunc,
5509 last_gather_stats->ip6rxtrunc,
5510 IRDMA_MAX_STATS_32);
5511 stats_val[IRDMA_HW_STAT_INDEX_IP6TXNOROUTE] +=
5512 IRDMA_STATS_DELTA(gather_stats->ip6txnoroute,
5513 last_gather_stats->ip6txnoroute,
5514 IRDMA_MAX_STATS_32);
5515 stats_val[IRDMA_HW_STAT_INDEX_TCPRTXSEG] +=
5516 IRDMA_STATS_DELTA(gather_stats->tcprtxseg,
5517 last_gather_stats->tcprtxseg,
5518 IRDMA_MAX_STATS_32);
5519 stats_val[IRDMA_HW_STAT_INDEX_TCPRXOPTERR] +=
5520 IRDMA_STATS_DELTA(gather_stats->tcprxopterr,
5521 last_gather_stats->tcprxopterr,
5522 IRDMA_MAX_STATS_32);
5523 stats_val[IRDMA_HW_STAT_INDEX_TCPRXPROTOERR] +=
5524 IRDMA_STATS_DELTA(gather_stats->tcprxprotoerr,
5525 last_gather_stats->tcprxprotoerr,
5526 IRDMA_MAX_STATS_32);
5527 stats_val[IRDMA_HW_STAT_INDEX_RXRPCNPHANDLED] +=
5528 IRDMA_STATS_DELTA(gather_stats->rxrpcnphandled,
5529 last_gather_stats->rxrpcnphandled,
5530 IRDMA_MAX_STATS_32);
5531 stats_val[IRDMA_HW_STAT_INDEX_RXRPCNPIGNORED] +=
5532 IRDMA_STATS_DELTA(gather_stats->rxrpcnpignored,
5533 last_gather_stats->rxrpcnpignored,
5534 IRDMA_MAX_STATS_32);
5535 stats_val[IRDMA_HW_STAT_INDEX_TXNPCNPSENT] +=
5536 IRDMA_STATS_DELTA(gather_stats->txnpcnpsent,
5537 last_gather_stats->txnpcnpsent,
5538 IRDMA_MAX_STATS_32);
5539 stats_val = hw_stats->stats_val_64;
5540 stats_val[IRDMA_HW_STAT_INDEX_IP4RXOCTS] +=
5541 IRDMA_STATS_DELTA(gather_stats->ip4rxocts,
5542 last_gather_stats->ip4rxocts,
5543 IRDMA_MAX_STATS_48);
5544 stats_val[IRDMA_HW_STAT_INDEX_IP4RXPKTS] +=
5545 IRDMA_STATS_DELTA(gather_stats->ip4rxpkts,
5546 last_gather_stats->ip4rxpkts,
5547 IRDMA_MAX_STATS_48);
5548 stats_val[IRDMA_HW_STAT_INDEX_IP4RXFRAGS] +=
5549 IRDMA_STATS_DELTA(gather_stats->ip4txfrag,
5550 last_gather_stats->ip4txfrag,
5551 IRDMA_MAX_STATS_48);
5552 stats_val[IRDMA_HW_STAT_INDEX_IP4RXMCPKTS] +=
5553 IRDMA_STATS_DELTA(gather_stats->ip4rxmcpkts,
5554 last_gather_stats->ip4rxmcpkts,
5555 IRDMA_MAX_STATS_48);
5556 stats_val[IRDMA_HW_STAT_INDEX_IP4TXOCTS] +=
5557 IRDMA_STATS_DELTA(gather_stats->ip4txocts,
5558 last_gather_stats->ip4txocts,
5559 IRDMA_MAX_STATS_48);
5560 stats_val[IRDMA_HW_STAT_INDEX_IP4TXPKTS] +=
5561 IRDMA_STATS_DELTA(gather_stats->ip4txpkts,
5562 last_gather_stats->ip4txpkts,
5563 IRDMA_MAX_STATS_48);
5564 stats_val[IRDMA_HW_STAT_INDEX_IP4TXFRAGS] +=
5565 IRDMA_STATS_DELTA(gather_stats->ip4txfrag,
5566 last_gather_stats->ip4txfrag,
5567 IRDMA_MAX_STATS_48);
5568 stats_val[IRDMA_HW_STAT_INDEX_IP4TXMCPKTS] +=
5569 IRDMA_STATS_DELTA(gather_stats->ip4txmcpkts,
5570 last_gather_stats->ip4txmcpkts,
5571 IRDMA_MAX_STATS_48);
5572 stats_val[IRDMA_HW_STAT_INDEX_IP6RXOCTS] +=
5573 IRDMA_STATS_DELTA(gather_stats->ip6rxocts,
5574 last_gather_stats->ip6rxocts,
5575 IRDMA_MAX_STATS_48);
5576 stats_val[IRDMA_HW_STAT_INDEX_IP6RXPKTS] +=
5577 IRDMA_STATS_DELTA(gather_stats->ip6rxpkts,
5578 last_gather_stats->ip6rxpkts,
5579 IRDMA_MAX_STATS_48);
5580 stats_val[IRDMA_HW_STAT_INDEX_IP6RXFRAGS] +=
5581 IRDMA_STATS_DELTA(gather_stats->ip6txfrags,
5582 last_gather_stats->ip6txfrags,
5583 IRDMA_MAX_STATS_48);
5584 stats_val[IRDMA_HW_STAT_INDEX_IP6RXMCPKTS] +=
5585 IRDMA_STATS_DELTA(gather_stats->ip6rxmcpkts,
5586 last_gather_stats->ip6rxmcpkts,
5587 IRDMA_MAX_STATS_48);
5588 stats_val[IRDMA_HW_STAT_INDEX_IP6TXOCTS] +=
5589 IRDMA_STATS_DELTA(gather_stats->ip6txocts,
5590 last_gather_stats->ip6txocts,
5591 IRDMA_MAX_STATS_48);
5592 stats_val[IRDMA_HW_STAT_INDEX_IP6TXPKTS] +=
5593 IRDMA_STATS_DELTA(gather_stats->ip6txpkts,
5594 last_gather_stats->ip6txpkts,
5595 IRDMA_MAX_STATS_48);
5596 stats_val[IRDMA_HW_STAT_INDEX_IP6TXFRAGS] +=
5597 IRDMA_STATS_DELTA(gather_stats->ip6txfrags,
5598 last_gather_stats->ip6txfrags,
5599 IRDMA_MAX_STATS_48);
5600 stats_val[IRDMA_HW_STAT_INDEX_IP6TXMCPKTS] +=
5601 IRDMA_STATS_DELTA(gather_stats->ip6txmcpkts,
5602 last_gather_stats->ip6txmcpkts,
5603 IRDMA_MAX_STATS_48);
5604 stats_val[IRDMA_HW_STAT_INDEX_TCPRXSEGS] +=
5605 IRDMA_STATS_DELTA(gather_stats->tcprxsegs,
5606 last_gather_stats->tcprxsegs,
5607 IRDMA_MAX_STATS_48);
5608 stats_val[IRDMA_HW_STAT_INDEX_TCPTXSEG] +=
5609 IRDMA_STATS_DELTA(gather_stats->tcptxsegs,
5610 last_gather_stats->tcptxsegs,
5611 IRDMA_MAX_STATS_48);
5612 stats_val[IRDMA_HW_STAT_INDEX_RDMARXRDS] +=
5613 IRDMA_STATS_DELTA(gather_stats->rdmarxrds,
5614 last_gather_stats->rdmarxrds,
5615 IRDMA_MAX_STATS_48);
5616 stats_val[IRDMA_HW_STAT_INDEX_RDMARXSNDS] +=
5617 IRDMA_STATS_DELTA(gather_stats->rdmarxsnds,
5618 last_gather_stats->rdmarxsnds,
5619 IRDMA_MAX_STATS_48);
5620 stats_val[IRDMA_HW_STAT_INDEX_RDMARXWRS] +=
5621 IRDMA_STATS_DELTA(gather_stats->rdmarxwrs,
5622 last_gather_stats->rdmarxwrs,
5623 IRDMA_MAX_STATS_48);
5624 stats_val[IRDMA_HW_STAT_INDEX_RDMATXRDS] +=
5625 IRDMA_STATS_DELTA(gather_stats->rdmatxrds,
5626 last_gather_stats->rdmatxrds,
5627 IRDMA_MAX_STATS_48);
5628 stats_val[IRDMA_HW_STAT_INDEX_RDMATXSNDS] +=
5629 IRDMA_STATS_DELTA(gather_stats->rdmatxsnds,
5630 last_gather_stats->rdmatxsnds,
5631 IRDMA_MAX_STATS_48);
5632 stats_val[IRDMA_HW_STAT_INDEX_RDMATXWRS] +=
5633 IRDMA_STATS_DELTA(gather_stats->rdmatxwrs,
5634 last_gather_stats->rdmatxwrs,
5635 IRDMA_MAX_STATS_48);
5636 stats_val[IRDMA_HW_STAT_INDEX_RDMAVBND] +=
5637 IRDMA_STATS_DELTA(gather_stats->rdmavbn,
5638 last_gather_stats->rdmavbn,
5639 IRDMA_MAX_STATS_48);
5640 stats_val[IRDMA_HW_STAT_INDEX_RDMAVINV] +=
5641 IRDMA_STATS_DELTA(gather_stats->rdmavinv,
5642 last_gather_stats->rdmavinv,
5643 IRDMA_MAX_STATS_48);
5644 stats_val[IRDMA_HW_STAT_INDEX_UDPRXPKTS] +=
5645 IRDMA_STATS_DELTA(gather_stats->udprxpkts,
5646 last_gather_stats->udprxpkts,
5647 IRDMA_MAX_STATS_48);
5648 stats_val[IRDMA_HW_STAT_INDEX_UDPTXPKTS] +=
5649 IRDMA_STATS_DELTA(gather_stats->udptxpkts,
5650 last_gather_stats->udptxpkts,
5651 IRDMA_MAX_STATS_48);
5652 stats_val[IRDMA_HW_STAT_INDEX_RXNPECNMARKEDPKTS] +=
5653 IRDMA_STATS_DELTA(gather_stats->rxnpecnmrkpkts,
5654 last_gather_stats->rxnpecnmrkpkts,
5655 IRDMA_MAX_STATS_48);
5656 memcpy(last_gather_stats, gather_stats, sizeof(*last_gather_stats));