1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015-2017 QLogic Corporation
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/if_ether.h>
33 #include <linux/if_vlan.h>
35 #include <linux/ipv6.h>
36 #include <linux/spinlock.h>
37 #include <linux/tcp.h>
42 #include "qed_reg_addr.h"
46 #define QED_IWARP_ORD_DEFAULT 32
47 #define QED_IWARP_IRD_DEFAULT 32
48 #define QED_IWARP_MAX_FW_MSS 4120
50 #define QED_EP_SIG 0xecabcdef
57 #define MPA_V2_PEER2PEER_MODEL 0x8000
58 #define MPA_V2_SEND_RTR 0x4000 /* on ird */
59 #define MPA_V2_READ_RTR 0x4000 /* on ord */
60 #define MPA_V2_WRITE_RTR 0x8000
61 #define MPA_V2_IRD_ORD_MASK 0x3FFF
63 #define MPA_REV2(_mpa_rev) ((_mpa_rev) == MPA_NEGOTIATION_TYPE_ENHANCED)
65 #define QED_IWARP_INVALID_TCP_CID 0xffffffff
66 #define QED_IWARP_RCV_WND_SIZE_DEF (256 * 1024)
67 #define QED_IWARP_RCV_WND_SIZE_MIN (0xffff)
68 #define TIMESTAMP_HEADER_SIZE (12)
69 #define QED_IWARP_MAX_FIN_RT_DEFAULT (2)
71 #define QED_IWARP_TS_EN BIT(0)
72 #define QED_IWARP_DA_EN BIT(1)
73 #define QED_IWARP_PARAM_CRC_NEEDED (1)
74 #define QED_IWARP_PARAM_P2P (1)
76 #define QED_IWARP_DEF_MAX_RT_TIME (0)
77 #define QED_IWARP_DEF_CWND_FACTOR (4)
78 #define QED_IWARP_DEF_KA_MAX_PROBE_CNT (5)
79 #define QED_IWARP_DEF_KA_TIMEOUT (1200000) /* 20 min */
80 #define QED_IWARP_DEF_KA_INTERVAL (1000) /* 1 sec */
82 static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn,
83 u8 fw_event_code, u16 echo,
84 union event_ring_data *data,
87 /* Override devinfo with iWARP specific values */
88 void qed_iwarp_init_devinfo(struct qed_hwfn *p_hwfn)
90 struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
92 dev->max_inline = IWARP_REQ_MAX_INLINE_DATA_SIZE;
93 dev->max_qp = min_t(u32,
95 p_hwfn->p_rdma_info->num_qps) -
96 QED_IWARP_PREALLOC_CNT;
98 dev->max_cq = dev->max_qp;
100 dev->max_qp_resp_rd_atomic_resc = QED_IWARP_IRD_DEFAULT;
101 dev->max_qp_req_rd_atomic_resc = QED_IWARP_ORD_DEFAULT;
104 void qed_iwarp_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
106 p_hwfn->rdma_prs_search_reg = PRS_REG_SEARCH_TCP;
107 qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 1);
108 p_hwfn->b_rdma_enabled_in_prs = true;
111 /* We have two cid maps, one for tcp which should be used only from passive
112 * syn processing and replacing a pre-allocated ep in the list. The second
113 * for active tcp and for QPs.
115 static void qed_iwarp_cid_cleaned(struct qed_hwfn *p_hwfn, u32 cid)
117 cid -= qed_cxt_get_proto_cid_start(p_hwfn, p_hwfn->p_rdma_info->proto);
119 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
121 if (cid < QED_IWARP_PREALLOC_CNT)
122 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map,
125 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid);
127 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
131 qed_iwarp_init_fw_ramrod(struct qed_hwfn *p_hwfn,
132 struct iwarp_init_func_ramrod_data *p_ramrod)
134 p_ramrod->iwarp.ll2_ooo_q_index =
135 RESC_START(p_hwfn, QED_LL2_QUEUE) +
136 p_hwfn->p_rdma_info->iwarp.ll2_ooo_handle;
138 p_ramrod->tcp.max_fin_rt = QED_IWARP_MAX_FIN_RT_DEFAULT;
143 static int qed_iwarp_alloc_cid(struct qed_hwfn *p_hwfn, u32 *cid)
147 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
148 rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid);
149 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
151 DP_NOTICE(p_hwfn, "Failed in allocating iwarp cid\n");
154 *cid += qed_cxt_get_proto_cid_start(p_hwfn, p_hwfn->p_rdma_info->proto);
156 rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, *cid);
158 qed_iwarp_cid_cleaned(p_hwfn, *cid);
163 static void qed_iwarp_set_tcp_cid(struct qed_hwfn *p_hwfn, u32 cid)
165 cid -= qed_cxt_get_proto_cid_start(p_hwfn, p_hwfn->p_rdma_info->proto);
167 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
168 qed_bmap_set_id(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map, cid);
169 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
172 /* This function allocates a cid for passive tcp (called from syn receive)
173 * the reason it's separate from the regular cid allocation is because it
174 * is assured that these cids already have ilt allocated. They are preallocated
175 * to ensure that we won't need to allocate memory during syn processing
177 static int qed_iwarp_alloc_tcp_cid(struct qed_hwfn *p_hwfn, u32 *cid)
181 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
183 rc = qed_rdma_bmap_alloc_id(p_hwfn,
184 &p_hwfn->p_rdma_info->tcp_cid_map, cid);
186 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
189 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
190 "can't allocate iwarp tcp cid max-count=%d\n",
191 p_hwfn->p_rdma_info->tcp_cid_map.max_count);
193 *cid = QED_IWARP_INVALID_TCP_CID;
197 *cid += qed_cxt_get_proto_cid_start(p_hwfn,
198 p_hwfn->p_rdma_info->proto);
202 int qed_iwarp_create_qp(struct qed_hwfn *p_hwfn,
203 struct qed_rdma_qp *qp,
204 struct qed_rdma_create_qp_out_params *out_params)
206 struct iwarp_create_qp_ramrod_data *p_ramrod;
207 struct qed_sp_init_data init_data;
208 struct qed_spq_entry *p_ent;
213 qp->shared_queue = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
214 IWARP_SHARED_QUEUE_PAGE_SIZE,
215 &qp->shared_queue_phys_addr,
217 if (!qp->shared_queue)
220 out_params->sq_pbl_virt = (u8 *)qp->shared_queue +
221 IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET;
222 out_params->sq_pbl_phys = qp->shared_queue_phys_addr +
223 IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET;
224 out_params->rq_pbl_virt = (u8 *)qp->shared_queue +
225 IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET;
226 out_params->rq_pbl_phys = qp->shared_queue_phys_addr +
227 IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET;
229 rc = qed_iwarp_alloc_cid(p_hwfn, &cid);
235 memset(&init_data, 0, sizeof(init_data));
236 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
237 init_data.cid = qp->icid;
238 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
240 rc = qed_sp_init_request(p_hwfn, &p_ent,
241 IWARP_RAMROD_CMD_ID_CREATE_QP,
242 PROTOCOLID_IWARP, &init_data);
246 p_ramrod = &p_ent->ramrod.iwarp_create_qp;
248 SET_FIELD(p_ramrod->flags,
249 IWARP_CREATE_QP_RAMROD_DATA_FMR_AND_RESERVED_EN,
250 qp->fmr_and_reserved_lkey);
252 SET_FIELD(p_ramrod->flags,
253 IWARP_CREATE_QP_RAMROD_DATA_SIGNALED_COMP, qp->signal_all);
255 SET_FIELD(p_ramrod->flags,
256 IWARP_CREATE_QP_RAMROD_DATA_RDMA_RD_EN,
257 qp->incoming_rdma_read_en);
259 SET_FIELD(p_ramrod->flags,
260 IWARP_CREATE_QP_RAMROD_DATA_RDMA_WR_EN,
261 qp->incoming_rdma_write_en);
263 SET_FIELD(p_ramrod->flags,
264 IWARP_CREATE_QP_RAMROD_DATA_ATOMIC_EN,
265 qp->incoming_atomic_en);
267 SET_FIELD(p_ramrod->flags,
268 IWARP_CREATE_QP_RAMROD_DATA_SRQ_FLG, qp->use_srq);
270 p_ramrod->pd = qp->pd;
271 p_ramrod->sq_num_pages = qp->sq_num_pages;
272 p_ramrod->rq_num_pages = qp->rq_num_pages;
274 p_ramrod->srq_id.srq_idx = cpu_to_le16(qp->srq_id);
275 p_ramrod->srq_id.opaque_fid = cpu_to_le16(p_hwfn->hw_info.opaque_fid);
276 p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi);
277 p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo);
279 p_ramrod->cq_cid_for_sq =
280 cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->sq_cq_id);
281 p_ramrod->cq_cid_for_rq =
282 cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->rq_cq_id);
284 p_ramrod->dpi = cpu_to_le16(qp->dpi);
286 physical_queue = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
287 p_ramrod->physical_q0 = cpu_to_le16(physical_queue);
288 physical_queue = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_ACK);
289 p_ramrod->physical_q1 = cpu_to_le16(physical_queue);
291 rc = qed_spq_post(p_hwfn, p_ent, NULL);
298 qed_iwarp_cid_cleaned(p_hwfn, cid);
300 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
301 IWARP_SHARED_QUEUE_PAGE_SIZE,
302 qp->shared_queue, qp->shared_queue_phys_addr);
307 static int qed_iwarp_modify_fw(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
309 struct iwarp_modify_qp_ramrod_data *p_ramrod;
310 struct qed_sp_init_data init_data;
311 struct qed_spq_entry *p_ent;
315 memset(&init_data, 0, sizeof(init_data));
316 init_data.cid = qp->icid;
317 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
318 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
320 rc = qed_sp_init_request(p_hwfn, &p_ent,
321 IWARP_RAMROD_CMD_ID_MODIFY_QP,
322 p_hwfn->p_rdma_info->proto, &init_data);
326 p_ramrod = &p_ent->ramrod.iwarp_modify_qp;
327 SET_FIELD(p_ramrod->flags, IWARP_MODIFY_QP_RAMROD_DATA_STATE_TRANS_EN,
329 if (qp->iwarp_state == QED_IWARP_QP_STATE_CLOSING)
330 p_ramrod->transition_to_state = IWARP_MODIFY_QP_STATE_CLOSING;
332 p_ramrod->transition_to_state = IWARP_MODIFY_QP_STATE_ERROR;
334 rc = qed_spq_post(p_hwfn, p_ent, NULL);
336 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x)rc=%d\n", qp->icid, rc);
341 enum qed_iwarp_qp_state qed_roce2iwarp_state(enum qed_roce_qp_state state)
344 case QED_ROCE_QP_STATE_RESET:
345 case QED_ROCE_QP_STATE_INIT:
346 case QED_ROCE_QP_STATE_RTR:
347 return QED_IWARP_QP_STATE_IDLE;
348 case QED_ROCE_QP_STATE_RTS:
349 return QED_IWARP_QP_STATE_RTS;
350 case QED_ROCE_QP_STATE_SQD:
351 return QED_IWARP_QP_STATE_CLOSING;
352 case QED_ROCE_QP_STATE_ERR:
353 return QED_IWARP_QP_STATE_ERROR;
354 case QED_ROCE_QP_STATE_SQE:
355 return QED_IWARP_QP_STATE_TERMINATE;
357 return QED_IWARP_QP_STATE_ERROR;
361 static enum qed_roce_qp_state
362 qed_iwarp2roce_state(enum qed_iwarp_qp_state state)
365 case QED_IWARP_QP_STATE_IDLE:
366 return QED_ROCE_QP_STATE_INIT;
367 case QED_IWARP_QP_STATE_RTS:
368 return QED_ROCE_QP_STATE_RTS;
369 case QED_IWARP_QP_STATE_TERMINATE:
370 return QED_ROCE_QP_STATE_SQE;
371 case QED_IWARP_QP_STATE_CLOSING:
372 return QED_ROCE_QP_STATE_SQD;
373 case QED_IWARP_QP_STATE_ERROR:
374 return QED_ROCE_QP_STATE_ERR;
376 return QED_ROCE_QP_STATE_ERR;
380 const static char *iwarp_state_names[] = {
389 qed_iwarp_modify_qp(struct qed_hwfn *p_hwfn,
390 struct qed_rdma_qp *qp,
391 enum qed_iwarp_qp_state new_state, bool internal)
393 enum qed_iwarp_qp_state prev_iw_state;
394 bool modify_fw = false;
397 /* modify QP can be called from upper-layer or as a result of async
398 * RST/FIN... therefore need to protect
400 spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.qp_lock);
401 prev_iw_state = qp->iwarp_state;
403 if (prev_iw_state == new_state) {
404 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.qp_lock);
408 switch (prev_iw_state) {
409 case QED_IWARP_QP_STATE_IDLE:
411 case QED_IWARP_QP_STATE_RTS:
412 qp->iwarp_state = QED_IWARP_QP_STATE_RTS;
414 case QED_IWARP_QP_STATE_ERROR:
415 qp->iwarp_state = QED_IWARP_QP_STATE_ERROR;
423 case QED_IWARP_QP_STATE_RTS:
425 case QED_IWARP_QP_STATE_CLOSING:
429 qp->iwarp_state = QED_IWARP_QP_STATE_CLOSING;
431 case QED_IWARP_QP_STATE_ERROR:
434 qp->iwarp_state = QED_IWARP_QP_STATE_ERROR;
440 case QED_IWARP_QP_STATE_ERROR:
442 case QED_IWARP_QP_STATE_IDLE:
444 qp->iwarp_state = new_state;
446 case QED_IWARP_QP_STATE_CLOSING:
447 /* could happen due to race... do nothing.... */
453 case QED_IWARP_QP_STATE_TERMINATE:
454 case QED_IWARP_QP_STATE_CLOSING:
455 qp->iwarp_state = new_state;
461 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) %s --> %s%s\n",
463 iwarp_state_names[prev_iw_state],
464 iwarp_state_names[qp->iwarp_state],
465 internal ? "internal" : "");
467 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.qp_lock);
470 rc = qed_iwarp_modify_fw(p_hwfn, qp);
475 int qed_iwarp_fw_destroy(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
477 struct qed_sp_init_data init_data;
478 struct qed_spq_entry *p_ent;
482 memset(&init_data, 0, sizeof(init_data));
483 init_data.cid = qp->icid;
484 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
485 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
487 rc = qed_sp_init_request(p_hwfn, &p_ent,
488 IWARP_RAMROD_CMD_ID_DESTROY_QP,
489 p_hwfn->p_rdma_info->proto, &init_data);
493 rc = qed_spq_post(p_hwfn, p_ent, NULL);
495 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) rc = %d\n", qp->icid, rc);
500 static void qed_iwarp_destroy_ep(struct qed_hwfn *p_hwfn,
501 struct qed_iwarp_ep *ep,
502 bool remove_from_active_list)
504 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
505 sizeof(*ep->ep_buffer_virt),
506 ep->ep_buffer_virt, ep->ep_buffer_phys);
508 if (remove_from_active_list) {
509 spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
510 list_del(&ep->list_entry);
511 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
520 int qed_iwarp_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
522 struct qed_iwarp_ep *ep = qp->ep;
526 if (qp->iwarp_state != QED_IWARP_QP_STATE_ERROR) {
527 rc = qed_iwarp_modify_qp(p_hwfn, qp,
528 QED_IWARP_QP_STATE_ERROR, false);
533 /* Make sure ep is closed before returning and freeing memory. */
535 while (ep->state != QED_IWARP_EP_CLOSED && wait_count++ < 200)
538 if (ep->state != QED_IWARP_EP_CLOSED)
539 DP_NOTICE(p_hwfn, "ep state close timeout state=%x\n",
542 qed_iwarp_destroy_ep(p_hwfn, ep, false);
545 rc = qed_iwarp_fw_destroy(p_hwfn, qp);
547 if (qp->shared_queue)
548 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
549 IWARP_SHARED_QUEUE_PAGE_SIZE,
550 qp->shared_queue, qp->shared_queue_phys_addr);
556 qed_iwarp_create_ep(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep **ep_out)
558 struct qed_iwarp_ep *ep;
561 ep = kzalloc(sizeof(*ep), GFP_KERNEL);
565 ep->state = QED_IWARP_EP_INIT;
567 ep->ep_buffer_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
568 sizeof(*ep->ep_buffer_virt),
571 if (!ep->ep_buffer_virt) {
576 ep->sig = QED_EP_SIG;
588 qed_iwarp_print_tcp_ramrod(struct qed_hwfn *p_hwfn,
589 struct iwarp_tcp_offload_ramrod_data *p_tcp_ramrod)
591 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "local_mac=%x %x %x, remote_mac=%x %x %x\n",
592 p_tcp_ramrod->tcp.local_mac_addr_lo,
593 p_tcp_ramrod->tcp.local_mac_addr_mid,
594 p_tcp_ramrod->tcp.local_mac_addr_hi,
595 p_tcp_ramrod->tcp.remote_mac_addr_lo,
596 p_tcp_ramrod->tcp.remote_mac_addr_mid,
597 p_tcp_ramrod->tcp.remote_mac_addr_hi);
599 if (p_tcp_ramrod->tcp.ip_version == TCP_IPV4) {
600 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
601 "local_ip=%pI4h:%x, remote_ip=%pI4h:%x, vlan=%x\n",
602 p_tcp_ramrod->tcp.local_ip,
603 p_tcp_ramrod->tcp.local_port,
604 p_tcp_ramrod->tcp.remote_ip,
605 p_tcp_ramrod->tcp.remote_port,
606 p_tcp_ramrod->tcp.vlan_id);
608 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
609 "local_ip=%pI6:%x, remote_ip=%pI6:%x, vlan=%x\n",
610 p_tcp_ramrod->tcp.local_ip,
611 p_tcp_ramrod->tcp.local_port,
612 p_tcp_ramrod->tcp.remote_ip,
613 p_tcp_ramrod->tcp.remote_port,
614 p_tcp_ramrod->tcp.vlan_id);
617 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
618 "flow_label=%x, ttl=%x, tos_or_tc=%x, mss=%x, rcv_wnd_scale=%x, connect_mode=%x, flags=%x\n",
619 p_tcp_ramrod->tcp.flow_label,
620 p_tcp_ramrod->tcp.ttl,
621 p_tcp_ramrod->tcp.tos_or_tc,
622 p_tcp_ramrod->tcp.mss,
623 p_tcp_ramrod->tcp.rcv_wnd_scale,
624 p_tcp_ramrod->tcp.connect_mode,
625 p_tcp_ramrod->tcp.flags);
627 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "syn_ip_payload_length=%x, lo=%x, hi=%x\n",
628 p_tcp_ramrod->tcp.syn_ip_payload_length,
629 p_tcp_ramrod->tcp.syn_phy_addr_lo,
630 p_tcp_ramrod->tcp.syn_phy_addr_hi);
634 qed_iwarp_tcp_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
636 struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
637 struct iwarp_tcp_offload_ramrod_data *p_tcp_ramrod;
638 struct tcp_offload_params_opt2 *tcp;
639 struct qed_sp_init_data init_data;
640 struct qed_spq_entry *p_ent;
641 dma_addr_t async_output_phys;
642 dma_addr_t in_pdata_phys;
648 memset(&init_data, 0, sizeof(init_data));
649 init_data.cid = ep->tcp_cid;
650 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
651 if (ep->connect_mode == TCP_CONNECT_PASSIVE)
652 init_data.comp_mode = QED_SPQ_MODE_CB;
654 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
656 rc = qed_sp_init_request(p_hwfn, &p_ent,
657 IWARP_RAMROD_CMD_ID_TCP_OFFLOAD,
658 PROTOCOLID_IWARP, &init_data);
662 p_tcp_ramrod = &p_ent->ramrod.iwarp_tcp_offload;
664 in_pdata_phys = ep->ep_buffer_phys +
665 offsetof(struct qed_iwarp_ep_memory, in_pdata);
666 DMA_REGPAIR_LE(p_tcp_ramrod->iwarp.incoming_ulp_buffer.addr,
669 p_tcp_ramrod->iwarp.incoming_ulp_buffer.len =
670 cpu_to_le16(sizeof(ep->ep_buffer_virt->in_pdata));
672 async_output_phys = ep->ep_buffer_phys +
673 offsetof(struct qed_iwarp_ep_memory, async_output);
674 DMA_REGPAIR_LE(p_tcp_ramrod->iwarp.async_eqe_output_buf,
677 p_tcp_ramrod->iwarp.handle_for_async.hi = cpu_to_le32(PTR_HI(ep));
678 p_tcp_ramrod->iwarp.handle_for_async.lo = cpu_to_le32(PTR_LO(ep));
680 physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
681 p_tcp_ramrod->iwarp.physical_q0 = cpu_to_le16(physical_q);
682 physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_ACK);
683 p_tcp_ramrod->iwarp.physical_q1 = cpu_to_le16(physical_q);
684 p_tcp_ramrod->iwarp.mpa_mode = iwarp_info->mpa_rev;
686 tcp = &p_tcp_ramrod->tcp;
687 qed_set_fw_mac_addr(&tcp->remote_mac_addr_hi,
688 &tcp->remote_mac_addr_mid,
689 &tcp->remote_mac_addr_lo, ep->remote_mac_addr);
690 qed_set_fw_mac_addr(&tcp->local_mac_addr_hi, &tcp->local_mac_addr_mid,
691 &tcp->local_mac_addr_lo, ep->local_mac_addr);
693 tcp->vlan_id = cpu_to_le16(ep->cm_info.vlan);
695 tcp_flags = p_hwfn->p_rdma_info->iwarp.tcp_flags;
697 SET_FIELD(tcp->flags, TCP_OFFLOAD_PARAMS_OPT2_TS_EN,
698 !!(tcp_flags & QED_IWARP_TS_EN));
700 SET_FIELD(tcp->flags, TCP_OFFLOAD_PARAMS_OPT2_DA_EN,
701 !!(tcp_flags & QED_IWARP_DA_EN));
703 tcp->ip_version = ep->cm_info.ip_version;
705 for (i = 0; i < 4; i++) {
706 tcp->remote_ip[i] = cpu_to_le32(ep->cm_info.remote_ip[i]);
707 tcp->local_ip[i] = cpu_to_le32(ep->cm_info.local_ip[i]);
710 tcp->remote_port = cpu_to_le16(ep->cm_info.remote_port);
711 tcp->local_port = cpu_to_le16(ep->cm_info.local_port);
712 tcp->mss = cpu_to_le16(ep->mss);
717 tcp->max_rt_time = QED_IWARP_DEF_MAX_RT_TIME;
718 tcp->cwnd = QED_IWARP_DEF_CWND_FACTOR * tcp->mss;
719 tcp->ka_max_probe_cnt = QED_IWARP_DEF_KA_MAX_PROBE_CNT;
720 tcp->ka_timeout = QED_IWARP_DEF_KA_TIMEOUT;
721 tcp->ka_interval = QED_IWARP_DEF_KA_INTERVAL;
723 tcp->rcv_wnd_scale = (u8)p_hwfn->p_rdma_info->iwarp.rcv_wnd_scale;
724 tcp->connect_mode = ep->connect_mode;
726 if (ep->connect_mode == TCP_CONNECT_PASSIVE) {
727 tcp->syn_ip_payload_length =
728 cpu_to_le16(ep->syn_ip_payload_length);
729 tcp->syn_phy_addr_hi = DMA_HI_LE(ep->syn_phy_addr);
730 tcp->syn_phy_addr_lo = DMA_LO_LE(ep->syn_phy_addr);
733 qed_iwarp_print_tcp_ramrod(p_hwfn, p_tcp_ramrod);
735 rc = qed_spq_post(p_hwfn, p_ent, NULL);
737 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
738 "EP(0x%x) Offload completed rc=%d\n", ep->tcp_cid, rc);
744 qed_iwarp_mpa_received(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
746 struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
747 struct qed_iwarp_cm_event_params params;
748 struct mpa_v2_hdr *mpa_v2;
749 union async_output *async_data;
750 u16 mpa_ord, mpa_ird;
754 async_data = &ep->ep_buffer_virt->async_output;
756 mpa_rev = async_data->mpa_request.mpa_handshake_mode;
757 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
758 "private_data_len=%x handshake_mode=%x private_data=(%x)\n",
759 async_data->mpa_request.ulp_data_len,
760 mpa_rev, *((u32 *)(ep->ep_buffer_virt->in_pdata)));
762 if (mpa_rev == MPA_NEGOTIATION_TYPE_ENHANCED) {
763 /* Read ord/ird values from private data buffer */
764 mpa_v2 = (struct mpa_v2_hdr *)ep->ep_buffer_virt->in_pdata;
765 mpa_hdr_size = sizeof(*mpa_v2);
767 mpa_ord = ntohs(mpa_v2->ord);
768 mpa_ird = ntohs(mpa_v2->ird);
770 /* Temprary store in cm_info incoming ord/ird requested, later
771 * replace with negotiated value during accept
773 ep->cm_info.ord = (u8)min_t(u16,
774 (mpa_ord & MPA_V2_IRD_ORD_MASK),
775 QED_IWARP_ORD_DEFAULT);
777 ep->cm_info.ird = (u8)min_t(u16,
778 (mpa_ird & MPA_V2_IRD_ORD_MASK),
779 QED_IWARP_IRD_DEFAULT);
781 /* Peer2Peer negotiation */
782 ep->rtr_type = MPA_RTR_TYPE_NONE;
783 if (mpa_ird & MPA_V2_PEER2PEER_MODEL) {
784 if (mpa_ord & MPA_V2_WRITE_RTR)
785 ep->rtr_type |= MPA_RTR_TYPE_ZERO_WRITE;
787 if (mpa_ord & MPA_V2_READ_RTR)
788 ep->rtr_type |= MPA_RTR_TYPE_ZERO_READ;
790 if (mpa_ird & MPA_V2_SEND_RTR)
791 ep->rtr_type |= MPA_RTR_TYPE_ZERO_SEND;
793 ep->rtr_type &= iwarp_info->rtr_type;
795 /* if we're left with no match send our capabilities */
796 if (ep->rtr_type == MPA_RTR_TYPE_NONE)
797 ep->rtr_type = iwarp_info->rtr_type;
800 ep->mpa_rev = MPA_NEGOTIATION_TYPE_ENHANCED;
802 ep->cm_info.ord = QED_IWARP_ORD_DEFAULT;
803 ep->cm_info.ird = QED_IWARP_IRD_DEFAULT;
804 ep->mpa_rev = MPA_NEGOTIATION_TYPE_BASIC;
807 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
808 "MPA_NEGOTIATE (v%d): ORD: 0x%x IRD: 0x%x rtr:0x%x ulp_data_len = %x mpa_hdr_size = %x\n",
809 mpa_rev, ep->cm_info.ord, ep->cm_info.ird, ep->rtr_type,
810 async_data->mpa_request.ulp_data_len, mpa_hdr_size);
812 /* Strip mpa v2 hdr from private data before sending to upper layer */
813 ep->cm_info.private_data = ep->ep_buffer_virt->in_pdata + mpa_hdr_size;
815 ep->cm_info.private_data_len = async_data->mpa_request.ulp_data_len -
818 params.event = QED_IWARP_EVENT_MPA_REQUEST;
819 params.cm_info = &ep->cm_info;
820 params.ep_context = ep;
823 ep->state = QED_IWARP_EP_MPA_REQ_RCVD;
824 ep->event_cb(ep->cb_context, ¶ms);
828 qed_iwarp_mpa_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
830 struct iwarp_mpa_offload_ramrod_data *p_mpa_ramrod;
831 struct qed_iwarp_info *iwarp_info;
832 struct qed_sp_init_data init_data;
833 dma_addr_t async_output_phys;
834 struct qed_spq_entry *p_ent;
835 dma_addr_t out_pdata_phys;
836 dma_addr_t in_pdata_phys;
837 struct qed_rdma_qp *qp;
847 memset(&init_data, 0, sizeof(init_data));
848 init_data.cid = reject ? ep->tcp_cid : qp->icid;
849 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
851 if (ep->connect_mode == TCP_CONNECT_ACTIVE)
852 init_data.comp_mode = QED_SPQ_MODE_CB;
854 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
856 rc = qed_sp_init_request(p_hwfn, &p_ent,
857 IWARP_RAMROD_CMD_ID_MPA_OFFLOAD,
858 PROTOCOLID_IWARP, &init_data);
862 p_mpa_ramrod = &p_ent->ramrod.iwarp_mpa_offload;
863 out_pdata_phys = ep->ep_buffer_phys +
864 offsetof(struct qed_iwarp_ep_memory, out_pdata);
865 DMA_REGPAIR_LE(p_mpa_ramrod->common.outgoing_ulp_buffer.addr,
867 p_mpa_ramrod->common.outgoing_ulp_buffer.len =
868 ep->cm_info.private_data_len;
869 p_mpa_ramrod->common.crc_needed = p_hwfn->p_rdma_info->iwarp.crc_needed;
871 p_mpa_ramrod->common.out_rq.ord = ep->cm_info.ord;
872 p_mpa_ramrod->common.out_rq.ird = ep->cm_info.ird;
874 p_mpa_ramrod->tcp_cid = p_hwfn->hw_info.opaque_fid << 16 | ep->tcp_cid;
876 in_pdata_phys = ep->ep_buffer_phys +
877 offsetof(struct qed_iwarp_ep_memory, in_pdata);
878 p_mpa_ramrod->tcp_connect_side = ep->connect_mode;
879 DMA_REGPAIR_LE(p_mpa_ramrod->incoming_ulp_buffer.addr,
881 p_mpa_ramrod->incoming_ulp_buffer.len =
882 cpu_to_le16(sizeof(ep->ep_buffer_virt->in_pdata));
883 async_output_phys = ep->ep_buffer_phys +
884 offsetof(struct qed_iwarp_ep_memory, async_output);
885 DMA_REGPAIR_LE(p_mpa_ramrod->async_eqe_output_buf,
887 p_mpa_ramrod->handle_for_async.hi = cpu_to_le32(PTR_HI(ep));
888 p_mpa_ramrod->handle_for_async.lo = cpu_to_le32(PTR_LO(ep));
891 DMA_REGPAIR_LE(p_mpa_ramrod->shared_queue_addr,
892 qp->shared_queue_phys_addr);
893 p_mpa_ramrod->stats_counter_id =
894 RESC_START(p_hwfn, QED_RDMA_STATS_QUEUE) + qp->stats_queue;
896 p_mpa_ramrod->common.reject = 1;
899 iwarp_info = &p_hwfn->p_rdma_info->iwarp;
900 p_mpa_ramrod->rcv_wnd = iwarp_info->rcv_wnd_size;
901 p_mpa_ramrod->mode = ep->mpa_rev;
902 SET_FIELD(p_mpa_ramrod->rtr_pref,
903 IWARP_MPA_OFFLOAD_RAMROD_DATA_RTR_SUPPORTED, ep->rtr_type);
905 ep->state = QED_IWARP_EP_MPA_OFFLOADED;
906 rc = qed_spq_post(p_hwfn, p_ent, NULL);
908 ep->cid = qp->icid; /* Now they're migrated. */
912 "QP(0x%x) EP(0x%x) MPA Offload rc = %d IRD=0x%x ORD=0x%x rtr_type=%d mpa_rev=%d reject=%d\n",
913 reject ? 0xffff : qp->icid,
917 ep->cm_info.ord, ep->rtr_type, ep->mpa_rev, reject);
922 qed_iwarp_return_ep(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
924 ep->state = QED_IWARP_EP_INIT;
928 memset(&ep->cm_info, 0, sizeof(ep->cm_info));
930 if (ep->tcp_cid == QED_IWARP_INVALID_TCP_CID) {
931 /* We don't care about the return code, it's ok if tcp_cid
932 * remains invalid...in this case we'll defer allocation
934 qed_iwarp_alloc_tcp_cid(p_hwfn, &ep->tcp_cid);
936 spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
938 list_move_tail(&ep->list_entry,
939 &p_hwfn->p_rdma_info->iwarp.ep_free_list);
941 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
945 qed_iwarp_parse_private_data(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
947 struct mpa_v2_hdr *mpa_v2_params;
948 union async_output *async_data;
949 u16 mpa_ird, mpa_ord;
950 u8 mpa_data_size = 0;
952 if (MPA_REV2(p_hwfn->p_rdma_info->iwarp.mpa_rev)) {
954 (struct mpa_v2_hdr *)(ep->ep_buffer_virt->in_pdata);
955 mpa_data_size = sizeof(*mpa_v2_params);
956 mpa_ird = ntohs(mpa_v2_params->ird);
957 mpa_ord = ntohs(mpa_v2_params->ord);
959 ep->cm_info.ird = (u8)(mpa_ord & MPA_V2_IRD_ORD_MASK);
960 ep->cm_info.ord = (u8)(mpa_ird & MPA_V2_IRD_ORD_MASK);
962 async_data = &ep->ep_buffer_virt->async_output;
964 ep->cm_info.private_data = ep->ep_buffer_virt->in_pdata + mpa_data_size;
965 ep->cm_info.private_data_len = async_data->mpa_response.ulp_data_len -
970 qed_iwarp_mpa_reply_arrived(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
972 struct qed_iwarp_cm_event_params params;
974 if (ep->connect_mode == TCP_CONNECT_PASSIVE) {
976 "MPA reply event not expected on passive side!\n");
980 params.event = QED_IWARP_EVENT_ACTIVE_MPA_REPLY;
982 qed_iwarp_parse_private_data(p_hwfn, ep);
984 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
985 "MPA_NEGOTIATE (v%d): ORD: 0x%x IRD: 0x%x\n",
986 ep->mpa_rev, ep->cm_info.ord, ep->cm_info.ird);
988 params.cm_info = &ep->cm_info;
989 params.ep_context = ep;
992 ep->mpa_reply_processed = true;
994 ep->event_cb(ep->cb_context, ¶ms);
997 #define QED_IWARP_CONNECT_MODE_STRING(ep) \
998 ((ep)->connect_mode == TCP_CONNECT_PASSIVE) ? "Passive" : "Active"
1000 /* Called as a result of the event:
1001 * IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE
1004 qed_iwarp_mpa_complete(struct qed_hwfn *p_hwfn,
1005 struct qed_iwarp_ep *ep, u8 fw_return_code)
1007 struct qed_iwarp_cm_event_params params;
1009 if (ep->connect_mode == TCP_CONNECT_ACTIVE)
1010 params.event = QED_IWARP_EVENT_ACTIVE_COMPLETE;
1012 params.event = QED_IWARP_EVENT_PASSIVE_COMPLETE;
1014 if (ep->connect_mode == TCP_CONNECT_ACTIVE && !ep->mpa_reply_processed)
1015 qed_iwarp_parse_private_data(p_hwfn, ep);
1017 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1018 "MPA_NEGOTIATE (v%d): ORD: 0x%x IRD: 0x%x\n",
1019 ep->mpa_rev, ep->cm_info.ord, ep->cm_info.ird);
1021 params.cm_info = &ep->cm_info;
1023 params.ep_context = ep;
1025 ep->state = QED_IWARP_EP_CLOSED;
1027 switch (fw_return_code) {
1028 case RDMA_RETURN_OK:
1029 ep->qp->max_rd_atomic_req = ep->cm_info.ord;
1030 ep->qp->max_rd_atomic_resp = ep->cm_info.ird;
1031 qed_iwarp_modify_qp(p_hwfn, ep->qp, QED_IWARP_QP_STATE_RTS, 1);
1032 ep->state = QED_IWARP_EP_ESTABLISHED;
1035 case IWARP_CONN_ERROR_MPA_TIMEOUT:
1036 DP_NOTICE(p_hwfn, "%s(0x%x) MPA timeout\n",
1037 QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1038 params.status = -EBUSY;
1040 case IWARP_CONN_ERROR_MPA_ERROR_REJECT:
1041 DP_NOTICE(p_hwfn, "%s(0x%x) MPA Reject\n",
1042 QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1043 params.status = -ECONNREFUSED;
1045 case IWARP_CONN_ERROR_MPA_RST:
1046 DP_NOTICE(p_hwfn, "%s(0x%x) MPA reset(tcp cid: 0x%x)\n",
1047 QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid,
1049 params.status = -ECONNRESET;
1051 case IWARP_CONN_ERROR_MPA_FIN:
1052 DP_NOTICE(p_hwfn, "%s(0x%x) MPA received FIN\n",
1053 QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1054 params.status = -ECONNREFUSED;
1056 case IWARP_CONN_ERROR_MPA_INSUF_IRD:
1057 DP_NOTICE(p_hwfn, "%s(0x%x) MPA insufficient ird\n",
1058 QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1059 params.status = -ECONNREFUSED;
1061 case IWARP_CONN_ERROR_MPA_RTR_MISMATCH:
1062 DP_NOTICE(p_hwfn, "%s(0x%x) MPA RTR MISMATCH\n",
1063 QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1064 params.status = -ECONNREFUSED;
1066 case IWARP_CONN_ERROR_MPA_INVALID_PACKET:
1067 DP_NOTICE(p_hwfn, "%s(0x%x) MPA Invalid Packet\n",
1068 QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1069 params.status = -ECONNREFUSED;
1071 case IWARP_CONN_ERROR_MPA_LOCAL_ERROR:
1072 DP_NOTICE(p_hwfn, "%s(0x%x) MPA Local Error\n",
1073 QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1074 params.status = -ECONNREFUSED;
1076 case IWARP_CONN_ERROR_MPA_TERMINATE:
1077 DP_NOTICE(p_hwfn, "%s(0x%x) MPA TERMINATE\n",
1078 QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1079 params.status = -ECONNREFUSED;
1082 params.status = -ECONNRESET;
1086 ep->event_cb(ep->cb_context, ¶ms);
1088 /* on passive side, if there is no associated QP (REJECT) we need to
1089 * return the ep to the pool, (in the regular case we add an element
1090 * in accept instead of this one.
1091 * In both cases we need to remove it from the ep_list.
1093 if (fw_return_code != RDMA_RETURN_OK) {
1094 ep->tcp_cid = QED_IWARP_INVALID_TCP_CID;
1095 if ((ep->connect_mode == TCP_CONNECT_PASSIVE) &&
1096 (!ep->qp)) { /* Rejected */
1097 qed_iwarp_return_ep(p_hwfn, ep);
1099 spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1100 list_del(&ep->list_entry);
1101 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1107 qed_iwarp_mpa_v2_set_private(struct qed_hwfn *p_hwfn,
1108 struct qed_iwarp_ep *ep, u8 *mpa_data_size)
1110 struct mpa_v2_hdr *mpa_v2_params;
1111 u16 mpa_ird, mpa_ord;
1114 if (MPA_REV2(ep->mpa_rev)) {
1116 (struct mpa_v2_hdr *)ep->ep_buffer_virt->out_pdata;
1117 *mpa_data_size = sizeof(*mpa_v2_params);
1119 mpa_ird = (u16)ep->cm_info.ird;
1120 mpa_ord = (u16)ep->cm_info.ord;
1122 if (ep->rtr_type != MPA_RTR_TYPE_NONE) {
1123 mpa_ird |= MPA_V2_PEER2PEER_MODEL;
1125 if (ep->rtr_type & MPA_RTR_TYPE_ZERO_SEND)
1126 mpa_ird |= MPA_V2_SEND_RTR;
1128 if (ep->rtr_type & MPA_RTR_TYPE_ZERO_WRITE)
1129 mpa_ord |= MPA_V2_WRITE_RTR;
1131 if (ep->rtr_type & MPA_RTR_TYPE_ZERO_READ)
1132 mpa_ord |= MPA_V2_READ_RTR;
1135 mpa_v2_params->ird = htons(mpa_ird);
1136 mpa_v2_params->ord = htons(mpa_ord);
1140 "MPA_NEGOTIATE Header: [%x ord:%x ird] %x ord:%x ird:%x peer2peer:%x rtr_send:%x rtr_write:%x rtr_read:%x\n",
1143 *((u32 *)mpa_v2_params),
1144 mpa_ord & MPA_V2_IRD_ORD_MASK,
1145 mpa_ird & MPA_V2_IRD_ORD_MASK,
1146 !!(mpa_ird & MPA_V2_PEER2PEER_MODEL),
1147 !!(mpa_ird & MPA_V2_SEND_RTR),
1148 !!(mpa_ord & MPA_V2_WRITE_RTR),
1149 !!(mpa_ord & MPA_V2_READ_RTR));
1153 int qed_iwarp_connect(void *rdma_cxt,
1154 struct qed_iwarp_connect_in *iparams,
1155 struct qed_iwarp_connect_out *oparams)
1157 struct qed_hwfn *p_hwfn = rdma_cxt;
1158 struct qed_iwarp_info *iwarp_info;
1159 struct qed_iwarp_ep *ep;
1160 u8 mpa_data_size = 0;
1164 if ((iparams->cm_info.ord > QED_IWARP_ORD_DEFAULT) ||
1165 (iparams->cm_info.ird > QED_IWARP_IRD_DEFAULT)) {
1167 "QP(0x%x) ERROR: Invalid ord(0x%x)/ird(0x%x)\n",
1168 iparams->qp->icid, iparams->cm_info.ord,
1169 iparams->cm_info.ird);
1174 iwarp_info = &p_hwfn->p_rdma_info->iwarp;
1176 /* Allocate ep object */
1177 rc = qed_iwarp_alloc_cid(p_hwfn, &cid);
1181 rc = qed_iwarp_create_ep(p_hwfn, &ep);
1187 spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1188 list_add_tail(&ep->list_entry, &p_hwfn->p_rdma_info->iwarp.ep_list);
1189 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1191 ep->qp = iparams->qp;
1193 ether_addr_copy(ep->remote_mac_addr, iparams->remote_mac_addr);
1194 ether_addr_copy(ep->local_mac_addr, iparams->local_mac_addr);
1195 memcpy(&ep->cm_info, &iparams->cm_info, sizeof(ep->cm_info));
1197 ep->cm_info.ord = iparams->cm_info.ord;
1198 ep->cm_info.ird = iparams->cm_info.ird;
1200 ep->rtr_type = iwarp_info->rtr_type;
1201 if (!iwarp_info->peer2peer)
1202 ep->rtr_type = MPA_RTR_TYPE_NONE;
1204 if ((ep->rtr_type & MPA_RTR_TYPE_ZERO_READ) && (ep->cm_info.ord == 0))
1205 ep->cm_info.ord = 1;
1207 ep->mpa_rev = iwarp_info->mpa_rev;
1209 qed_iwarp_mpa_v2_set_private(p_hwfn, ep, &mpa_data_size);
1211 ep->cm_info.private_data = ep->ep_buffer_virt->out_pdata;
1212 ep->cm_info.private_data_len = iparams->cm_info.private_data_len +
1215 memcpy((u8 *)ep->ep_buffer_virt->out_pdata + mpa_data_size,
1216 iparams->cm_info.private_data,
1217 iparams->cm_info.private_data_len);
1219 ep->mss = iparams->mss;
1220 ep->mss = min_t(u16, QED_IWARP_MAX_FW_MSS, ep->mss);
1222 ep->event_cb = iparams->event_cb;
1223 ep->cb_context = iparams->cb_context;
1224 ep->connect_mode = TCP_CONNECT_ACTIVE;
1226 oparams->ep_context = ep;
1228 rc = qed_iwarp_tcp_offload(p_hwfn, ep);
1230 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) EP(0x%x) rc = %d\n",
1231 iparams->qp->icid, ep->tcp_cid, rc);
1234 qed_iwarp_destroy_ep(p_hwfn, ep, true);
1240 qed_iwarp_cid_cleaned(p_hwfn, cid);
1245 static struct qed_iwarp_ep *qed_iwarp_get_free_ep(struct qed_hwfn *p_hwfn)
1247 struct qed_iwarp_ep *ep = NULL;
1250 spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1252 if (list_empty(&p_hwfn->p_rdma_info->iwarp.ep_free_list)) {
1253 DP_ERR(p_hwfn, "Ep list is empty\n");
1257 ep = list_first_entry(&p_hwfn->p_rdma_info->iwarp.ep_free_list,
1258 struct qed_iwarp_ep, list_entry);
1260 /* in some cases we could have failed allocating a tcp cid when added
1261 * from accept / failure... retry now..this is not the common case.
1263 if (ep->tcp_cid == QED_IWARP_INVALID_TCP_CID) {
1264 rc = qed_iwarp_alloc_tcp_cid(p_hwfn, &ep->tcp_cid);
1266 /* if we fail we could look for another entry with a valid
1267 * tcp_cid, but since we don't expect to reach this anyway
1268 * it's not worth the handling
1271 ep->tcp_cid = QED_IWARP_INVALID_TCP_CID;
1277 list_del(&ep->list_entry);
1280 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1284 #define QED_IWARP_MAX_CID_CLEAN_TIME 100
1285 #define QED_IWARP_MAX_NO_PROGRESS_CNT 5
1287 /* This function waits for all the bits of a bmap to be cleared, as long as
1288 * there is progress ( i.e. the number of bits left to be cleared decreases )
1289 * the function continues.
1292 qed_iwarp_wait_cid_map_cleared(struct qed_hwfn *p_hwfn, struct qed_bmap *bmap)
1294 int prev_weight = 0;
1298 weight = bitmap_weight(bmap->bitmap, bmap->max_count);
1299 prev_weight = weight;
1302 msleep(QED_IWARP_MAX_CID_CLEAN_TIME);
1304 weight = bitmap_weight(bmap->bitmap, bmap->max_count);
1306 if (prev_weight == weight) {
1309 prev_weight = weight;
1313 if (wait_count > QED_IWARP_MAX_NO_PROGRESS_CNT) {
1315 "%s bitmap wait timed out (%d cids pending)\n",
1316 bmap->name, weight);
1323 static int qed_iwarp_wait_for_all_cids(struct qed_hwfn *p_hwfn)
1328 rc = qed_iwarp_wait_cid_map_cleared(p_hwfn,
1329 &p_hwfn->p_rdma_info->tcp_cid_map);
1333 /* Now free the tcp cids from the main cid map */
1334 for (i = 0; i < QED_IWARP_PREALLOC_CNT; i++)
1335 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, i);
1337 /* Now wait for all cids to be completed */
1338 return qed_iwarp_wait_cid_map_cleared(p_hwfn,
1339 &p_hwfn->p_rdma_info->cid_map);
1342 static void qed_iwarp_free_prealloc_ep(struct qed_hwfn *p_hwfn)
1344 struct qed_iwarp_ep *ep;
1346 while (!list_empty(&p_hwfn->p_rdma_info->iwarp.ep_free_list)) {
1347 spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1349 ep = list_first_entry(&p_hwfn->p_rdma_info->iwarp.ep_free_list,
1350 struct qed_iwarp_ep, list_entry);
1353 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1356 list_del(&ep->list_entry);
1358 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1360 if (ep->tcp_cid != QED_IWARP_INVALID_TCP_CID)
1361 qed_iwarp_cid_cleaned(p_hwfn, ep->tcp_cid);
1363 qed_iwarp_destroy_ep(p_hwfn, ep, false);
1367 static int qed_iwarp_prealloc_ep(struct qed_hwfn *p_hwfn, bool init)
1369 struct qed_iwarp_ep *ep;
1375 count = init ? QED_IWARP_PREALLOC_CNT : 1;
1376 for (i = 0; i < count; i++) {
1377 rc = qed_iwarp_create_ep(p_hwfn, &ep);
1381 /* During initialization we allocate from the main pool,
1382 * afterwards we allocate only from the tcp_cid.
1385 rc = qed_iwarp_alloc_cid(p_hwfn, &cid);
1388 qed_iwarp_set_tcp_cid(p_hwfn, cid);
1390 /* We don't care about the return code, it's ok if
1391 * tcp_cid remains invalid...in this case we'll
1394 qed_iwarp_alloc_tcp_cid(p_hwfn, &cid);
1399 spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1400 list_add_tail(&ep->list_entry,
1401 &p_hwfn->p_rdma_info->iwarp.ep_free_list);
1402 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1408 qed_iwarp_destroy_ep(p_hwfn, ep, false);
1413 int qed_iwarp_alloc(struct qed_hwfn *p_hwfn)
1417 /* Allocate bitmap for tcp cid. These are used by passive side
1418 * to ensure it can allocate a tcp cid during dpc that was
1419 * pre-acquired and doesn't require dynamic allocation of ilt
1421 rc = qed_rdma_bmap_alloc(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map,
1422 QED_IWARP_PREALLOC_CNT, "TCP_CID");
1424 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1425 "Failed to allocate tcp cid, rc = %d\n", rc);
1429 INIT_LIST_HEAD(&p_hwfn->p_rdma_info->iwarp.ep_free_list);
1430 spin_lock_init(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1432 rc = qed_iwarp_prealloc_ep(p_hwfn, true);
1436 return qed_ooo_alloc(p_hwfn);
1439 void qed_iwarp_resc_free(struct qed_hwfn *p_hwfn)
1441 struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
1443 qed_ooo_free(p_hwfn);
1444 qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map, 1);
1445 kfree(iwarp_info->mpa_bufs);
1446 kfree(iwarp_info->partial_fpdus);
1447 kfree(iwarp_info->mpa_intermediate_buf);
1450 int qed_iwarp_accept(void *rdma_cxt, struct qed_iwarp_accept_in *iparams)
1452 struct qed_hwfn *p_hwfn = rdma_cxt;
1453 struct qed_iwarp_ep *ep;
1454 u8 mpa_data_size = 0;
1457 ep = iparams->ep_context;
1459 DP_ERR(p_hwfn, "Ep Context receive in accept is NULL\n");
1463 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) EP(0x%x)\n",
1464 iparams->qp->icid, ep->tcp_cid);
1466 if ((iparams->ord > QED_IWARP_ORD_DEFAULT) ||
1467 (iparams->ird > QED_IWARP_IRD_DEFAULT)) {
1470 "QP(0x%x) EP(0x%x) ERROR: Invalid ord(0x%x)/ird(0x%x)\n",
1472 ep->tcp_cid, iparams->ord, iparams->ord);
1476 qed_iwarp_prealloc_ep(p_hwfn, false);
1478 ep->cb_context = iparams->cb_context;
1479 ep->qp = iparams->qp;
1482 if (ep->mpa_rev == MPA_NEGOTIATION_TYPE_ENHANCED) {
1483 /* Negotiate ord/ird: if upperlayer requested ord larger than
1484 * ird advertised by remote, we need to decrease our ord
1486 if (iparams->ord > ep->cm_info.ird)
1487 iparams->ord = ep->cm_info.ird;
1489 if ((ep->rtr_type & MPA_RTR_TYPE_ZERO_READ) &&
1490 (iparams->ird == 0))
1494 /* Update cm_info ord/ird to be negotiated values */
1495 ep->cm_info.ord = iparams->ord;
1496 ep->cm_info.ird = iparams->ird;
1498 qed_iwarp_mpa_v2_set_private(p_hwfn, ep, &mpa_data_size);
1500 ep->cm_info.private_data = ep->ep_buffer_virt->out_pdata;
1501 ep->cm_info.private_data_len = iparams->private_data_len +
1504 memcpy((u8 *)ep->ep_buffer_virt->out_pdata + mpa_data_size,
1505 iparams->private_data, iparams->private_data_len);
1507 rc = qed_iwarp_mpa_offload(p_hwfn, ep);
1509 qed_iwarp_modify_qp(p_hwfn,
1510 iparams->qp, QED_IWARP_QP_STATE_ERROR, 1);
1515 int qed_iwarp_reject(void *rdma_cxt, struct qed_iwarp_reject_in *iparams)
1517 struct qed_hwfn *p_hwfn = rdma_cxt;
1518 struct qed_iwarp_ep *ep;
1519 u8 mpa_data_size = 0;
1521 ep = iparams->ep_context;
1523 DP_ERR(p_hwfn, "Ep Context receive in reject is NULL\n");
1527 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "EP(0x%x)\n", ep->tcp_cid);
1529 ep->cb_context = iparams->cb_context;
1532 qed_iwarp_mpa_v2_set_private(p_hwfn, ep, &mpa_data_size);
1534 ep->cm_info.private_data = ep->ep_buffer_virt->out_pdata;
1535 ep->cm_info.private_data_len = iparams->private_data_len +
1538 memcpy((u8 *)ep->ep_buffer_virt->out_pdata + mpa_data_size,
1539 iparams->private_data, iparams->private_data_len);
1541 return qed_iwarp_mpa_offload(p_hwfn, ep);
1545 qed_iwarp_print_cm_info(struct qed_hwfn *p_hwfn,
1546 struct qed_iwarp_cm_info *cm_info)
1548 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "ip_version = %d\n",
1549 cm_info->ip_version);
1551 if (cm_info->ip_version == QED_TCP_IPV4)
1552 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1553 "remote_ip %pI4h:%x, local_ip %pI4h:%x vlan=%x\n",
1554 cm_info->remote_ip, cm_info->remote_port,
1555 cm_info->local_ip, cm_info->local_port,
1558 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1559 "remote_ip %pI6:%x, local_ip %pI6:%x vlan=%x\n",
1560 cm_info->remote_ip, cm_info->remote_port,
1561 cm_info->local_ip, cm_info->local_port,
1564 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1565 "private_data_len = %x ord = %d, ird = %d\n",
1566 cm_info->private_data_len, cm_info->ord, cm_info->ird);
1570 qed_iwarp_ll2_post_rx(struct qed_hwfn *p_hwfn,
1571 struct qed_iwarp_ll2_buff *buf, u8 handle)
1575 rc = qed_ll2_post_rx_buffer(p_hwfn, handle, buf->data_phys_addr,
1576 (u16)buf->buff_size, buf, 1);
1579 "Failed to repost rx buffer to ll2 rc = %d, handle=%d\n",
1581 dma_free_coherent(&p_hwfn->cdev->pdev->dev, buf->buff_size,
1582 buf->data, buf->data_phys_addr);
1590 qed_iwarp_ep_exists(struct qed_hwfn *p_hwfn, struct qed_iwarp_cm_info *cm_info)
1592 struct qed_iwarp_ep *ep = NULL;
1595 list_for_each_entry(ep,
1596 &p_hwfn->p_rdma_info->iwarp.ep_list,
1598 if ((ep->cm_info.local_port == cm_info->local_port) &&
1599 (ep->cm_info.remote_port == cm_info->remote_port) &&
1600 (ep->cm_info.vlan == cm_info->vlan) &&
1601 !memcmp(&ep->cm_info.local_ip, cm_info->local_ip,
1602 sizeof(cm_info->local_ip)) &&
1603 !memcmp(&ep->cm_info.remote_ip, cm_info->remote_ip,
1604 sizeof(cm_info->remote_ip))) {
1612 "SYN received on active connection - dropping\n");
1613 qed_iwarp_print_cm_info(p_hwfn, cm_info);
1621 static struct qed_iwarp_listener *
1622 qed_iwarp_get_listener(struct qed_hwfn *p_hwfn,
1623 struct qed_iwarp_cm_info *cm_info)
1625 struct qed_iwarp_listener *listener = NULL;
1626 static const u32 ip_zero[4] = { 0, 0, 0, 0 };
1629 qed_iwarp_print_cm_info(p_hwfn, cm_info);
1631 list_for_each_entry(listener,
1632 &p_hwfn->p_rdma_info->iwarp.listen_list,
1634 if (listener->port == cm_info->local_port) {
1635 if (!memcmp(listener->ip_addr,
1636 ip_zero, sizeof(ip_zero))) {
1641 if (!memcmp(listener->ip_addr,
1643 sizeof(cm_info->local_ip)) &&
1644 (listener->vlan == cm_info->vlan)) {
1652 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "listener found = %p\n",
1657 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "listener not found\n");
1662 qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn,
1663 struct qed_iwarp_cm_info *cm_info,
1665 u8 *remote_mac_addr,
1667 int *payload_len, int *tcp_start_offset)
1669 struct vlan_ethhdr *vethh;
1670 bool vlan_valid = false;
1671 struct ipv6hdr *ip6h;
1672 struct ethhdr *ethh;
1673 struct tcphdr *tcph;
1681 eth_type = ntohs(ethh->h_proto);
1682 if (eth_type == ETH_P_8021Q) {
1684 vethh = (struct vlan_ethhdr *)ethh;
1685 cm_info->vlan = ntohs(vethh->h_vlan_TCI) & VLAN_VID_MASK;
1686 eth_type = ntohs(vethh->h_vlan_encapsulated_proto);
1689 eth_hlen = ETH_HLEN + (vlan_valid ? sizeof(u32) : 0);
1691 ether_addr_copy(remote_mac_addr, ethh->h_source);
1692 ether_addr_copy(local_mac_addr, ethh->h_dest);
1694 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "eth_type =%d source mac: %pM\n",
1695 eth_type, ethh->h_source);
1697 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "eth_hlen=%d destination mac: %pM\n",
1698 eth_hlen, ethh->h_dest);
1700 iph = (struct iphdr *)((u8 *)(ethh) + eth_hlen);
1702 if (eth_type == ETH_P_IP) {
1703 if (iph->protocol != IPPROTO_TCP) {
1705 "Unexpected ip protocol on ll2 %x\n",
1710 cm_info->local_ip[0] = ntohl(iph->daddr);
1711 cm_info->remote_ip[0] = ntohl(iph->saddr);
1712 cm_info->ip_version = QED_TCP_IPV4;
1714 ip_hlen = (iph->ihl) * sizeof(u32);
1715 *payload_len = ntohs(iph->tot_len) - ip_hlen;
1716 } else if (eth_type == ETH_P_IPV6) {
1717 ip6h = (struct ipv6hdr *)iph;
1719 if (ip6h->nexthdr != IPPROTO_TCP) {
1721 "Unexpected ip protocol on ll2 %x\n",
1726 for (i = 0; i < 4; i++) {
1727 cm_info->local_ip[i] =
1728 ntohl(ip6h->daddr.in6_u.u6_addr32[i]);
1729 cm_info->remote_ip[i] =
1730 ntohl(ip6h->saddr.in6_u.u6_addr32[i]);
1732 cm_info->ip_version = QED_TCP_IPV6;
1734 ip_hlen = sizeof(*ip6h);
1735 *payload_len = ntohs(ip6h->payload_len);
1737 DP_NOTICE(p_hwfn, "Unexpected ethertype on ll2 %x\n", eth_type);
1741 tcph = (struct tcphdr *)((u8 *)iph + ip_hlen);
1745 "Only SYN type packet expected on this ll2 conn, iph->ihl=%d source=%d dest=%d\n",
1746 iph->ihl, tcph->source, tcph->dest);
1750 cm_info->local_port = ntohs(tcph->dest);
1751 cm_info->remote_port = ntohs(tcph->source);
1753 qed_iwarp_print_cm_info(p_hwfn, cm_info);
1755 *tcp_start_offset = eth_hlen + ip_hlen;
1760 static struct qed_iwarp_fpdu *qed_iwarp_get_curr_fpdu(struct qed_hwfn *p_hwfn,
1763 struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
1764 struct qed_iwarp_fpdu *partial_fpdu;
1767 idx = cid - qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_IWARP);
1768 if (idx >= iwarp_info->max_num_partial_fpdus) {
1769 DP_ERR(p_hwfn, "Invalid cid %x max_num_partial_fpdus=%x\n", cid,
1770 iwarp_info->max_num_partial_fpdus);
1774 partial_fpdu = &iwarp_info->partial_fpdus[idx];
1776 return partial_fpdu;
1779 enum qed_iwarp_mpa_pkt_type {
1780 QED_IWARP_MPA_PKT_PACKED,
1781 QED_IWARP_MPA_PKT_PARTIAL,
1782 QED_IWARP_MPA_PKT_UNALIGNED
1785 #define QED_IWARP_INVALID_FPDU_LENGTH 0xffff
1786 #define QED_IWARP_MPA_FPDU_LENGTH_SIZE (2)
1787 #define QED_IWARP_MPA_CRC32_DIGEST_SIZE (4)
1789 /* Pad to multiple of 4 */
1790 #define QED_IWARP_PDU_DATA_LEN_WITH_PAD(data_len) ALIGN(data_len, 4)
1791 #define QED_IWARP_FPDU_LEN_WITH_PAD(_mpa_len) \
1792 (QED_IWARP_PDU_DATA_LEN_WITH_PAD((_mpa_len) + \
1793 QED_IWARP_MPA_FPDU_LENGTH_SIZE) + \
1794 QED_IWARP_MPA_CRC32_DIGEST_SIZE)
1796 /* fpdu can be fragmented over maximum 3 bds: header, partial mpa, unaligned */
1797 #define QED_IWARP_MAX_BDS_PER_FPDU 3
1799 static const char * const pkt_type_str[] = {
1800 "QED_IWARP_MPA_PKT_PACKED",
1801 "QED_IWARP_MPA_PKT_PARTIAL",
1802 "QED_IWARP_MPA_PKT_UNALIGNED"
1806 qed_iwarp_recycle_pkt(struct qed_hwfn *p_hwfn,
1807 struct qed_iwarp_fpdu *fpdu,
1808 struct qed_iwarp_ll2_buff *buf);
1810 static enum qed_iwarp_mpa_pkt_type
1811 qed_iwarp_mpa_classify(struct qed_hwfn *p_hwfn,
1812 struct qed_iwarp_fpdu *fpdu,
1813 u16 tcp_payload_len, u8 *mpa_data)
1815 enum qed_iwarp_mpa_pkt_type pkt_type;
1818 if (fpdu->incomplete_bytes) {
1819 pkt_type = QED_IWARP_MPA_PKT_UNALIGNED;
1823 /* special case of one byte remaining...
1824 * lower byte will be read next packet
1826 if (tcp_payload_len == 1) {
1827 fpdu->fpdu_length = *mpa_data << BITS_PER_BYTE;
1828 pkt_type = QED_IWARP_MPA_PKT_PARTIAL;
1832 mpa_len = ntohs(*((u16 *)(mpa_data)));
1833 fpdu->fpdu_length = QED_IWARP_FPDU_LEN_WITH_PAD(mpa_len);
1835 if (fpdu->fpdu_length <= tcp_payload_len)
1836 pkt_type = QED_IWARP_MPA_PKT_PACKED;
1838 pkt_type = QED_IWARP_MPA_PKT_PARTIAL;
1841 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1842 "MPA_ALIGN: %s: fpdu_length=0x%x tcp_payload_len:0x%x\n",
1843 pkt_type_str[pkt_type], fpdu->fpdu_length, tcp_payload_len);
1849 qed_iwarp_init_fpdu(struct qed_iwarp_ll2_buff *buf,
1850 struct qed_iwarp_fpdu *fpdu,
1851 struct unaligned_opaque_data *pkt_data,
1852 u16 tcp_payload_size, u8 placement_offset)
1854 fpdu->mpa_buf = buf;
1855 fpdu->pkt_hdr = buf->data_phys_addr + placement_offset;
1856 fpdu->pkt_hdr_size = pkt_data->tcp_payload_offset;
1857 fpdu->mpa_frag = buf->data_phys_addr + pkt_data->first_mpa_offset;
1858 fpdu->mpa_frag_virt = (u8 *)(buf->data) + pkt_data->first_mpa_offset;
1860 if (tcp_payload_size == 1)
1861 fpdu->incomplete_bytes = QED_IWARP_INVALID_FPDU_LENGTH;
1862 else if (tcp_payload_size < fpdu->fpdu_length)
1863 fpdu->incomplete_bytes = fpdu->fpdu_length - tcp_payload_size;
1865 fpdu->incomplete_bytes = 0; /* complete fpdu */
1867 fpdu->mpa_frag_len = fpdu->fpdu_length - fpdu->incomplete_bytes;
1871 qed_iwarp_cp_pkt(struct qed_hwfn *p_hwfn,
1872 struct qed_iwarp_fpdu *fpdu,
1873 struct unaligned_opaque_data *pkt_data,
1874 struct qed_iwarp_ll2_buff *buf, u16 tcp_payload_size)
1876 u8 *tmp_buf = p_hwfn->p_rdma_info->iwarp.mpa_intermediate_buf;
1879 /* need to copy the data from the partial packet stored in fpdu
1880 * to the new buf, for this we also need to move the data currently
1881 * placed on the buf. The assumption is that the buffer is big enough
1882 * since fpdu_length <= mss, we use an intermediate buffer since
1883 * we may need to copy the new data to an overlapping location
1885 if ((fpdu->mpa_frag_len + tcp_payload_size) > (u16)buf->buff_size) {
1887 "MPA ALIGN: Unexpected: buffer is not large enough for split fpdu buff_size = %d mpa_frag_len = %d, tcp_payload_size = %d, incomplete_bytes = %d\n",
1888 buf->buff_size, fpdu->mpa_frag_len,
1889 tcp_payload_size, fpdu->incomplete_bytes);
1893 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1894 "MPA ALIGN Copying fpdu: [%p, %d] [%p, %d]\n",
1895 fpdu->mpa_frag_virt, fpdu->mpa_frag_len,
1896 (u8 *)(buf->data) + pkt_data->first_mpa_offset,
1899 memcpy(tmp_buf, fpdu->mpa_frag_virt, fpdu->mpa_frag_len);
1900 memcpy(tmp_buf + fpdu->mpa_frag_len,
1901 (u8 *)(buf->data) + pkt_data->first_mpa_offset,
1904 rc = qed_iwarp_recycle_pkt(p_hwfn, fpdu, fpdu->mpa_buf);
1908 /* If we managed to post the buffer copy the data to the new buffer
1909 * o/w this will occur in the next round...
1911 memcpy((u8 *)(buf->data), tmp_buf,
1912 fpdu->mpa_frag_len + tcp_payload_size);
1914 fpdu->mpa_buf = buf;
1915 /* fpdu->pkt_hdr remains as is */
1916 /* fpdu->mpa_frag is overridden with new buf */
1917 fpdu->mpa_frag = buf->data_phys_addr;
1918 fpdu->mpa_frag_virt = buf->data;
1919 fpdu->mpa_frag_len += tcp_payload_size;
1921 fpdu->incomplete_bytes -= tcp_payload_size;
1925 "MPA ALIGN: split fpdu buff_size = %d mpa_frag_len = %d, tcp_payload_size = %d, incomplete_bytes = %d\n",
1926 buf->buff_size, fpdu->mpa_frag_len, tcp_payload_size,
1927 fpdu->incomplete_bytes);
1933 qed_iwarp_update_fpdu_length(struct qed_hwfn *p_hwfn,
1934 struct qed_iwarp_fpdu *fpdu, u8 *mpa_data)
1938 /* Update incomplete packets if needed */
1939 if (fpdu->incomplete_bytes == QED_IWARP_INVALID_FPDU_LENGTH) {
1940 /* Missing lower byte is now available */
1941 mpa_len = fpdu->fpdu_length | *mpa_data;
1942 fpdu->fpdu_length = QED_IWARP_FPDU_LEN_WITH_PAD(mpa_len);
1943 /* one byte of hdr */
1944 fpdu->mpa_frag_len = 1;
1945 fpdu->incomplete_bytes = fpdu->fpdu_length - 1;
1948 "MPA_ALIGN: Partial header mpa_len=%x fpdu_length=%x incomplete_bytes=%x\n",
1949 mpa_len, fpdu->fpdu_length, fpdu->incomplete_bytes);
1953 #define QED_IWARP_IS_RIGHT_EDGE(_curr_pkt) \
1954 (GET_FIELD((_curr_pkt)->flags, \
1955 UNALIGNED_OPAQUE_DATA_PKT_REACHED_WIN_RIGHT_EDGE))
1957 /* This function is used to recycle a buffer using the ll2 drop option. It
1958 * uses the mechanism to ensure that all buffers posted to tx before this one
1959 * were completed. The buffer sent here will be sent as a cookie in the tx
1960 * completion function and can then be reposted to rx chain when done. The flow
1961 * that requires this is the flow where a FPDU splits over more than 3 tcp
1962 * segments. In this case the driver needs to re-post a rx buffer instead of
1963 * the one received, but driver can't simply repost a buffer it copied from
1964 * as there is a case where the buffer was originally a packed FPDU, and is
1965 * partially posted to FW. Driver needs to ensure FW is done with it.
1968 qed_iwarp_recycle_pkt(struct qed_hwfn *p_hwfn,
1969 struct qed_iwarp_fpdu *fpdu,
1970 struct qed_iwarp_ll2_buff *buf)
1972 struct qed_ll2_tx_pkt_info tx_pkt;
1976 memset(&tx_pkt, 0, sizeof(tx_pkt));
1977 tx_pkt.num_of_bds = 1;
1978 tx_pkt.tx_dest = QED_LL2_TX_DEST_DROP;
1979 tx_pkt.l4_hdr_offset_w = fpdu->pkt_hdr_size >> 2;
1980 tx_pkt.first_frag = fpdu->pkt_hdr;
1981 tx_pkt.first_frag_len = fpdu->pkt_hdr_size;
1982 buf->piggy_buf = NULL;
1983 tx_pkt.cookie = buf;
1985 ll2_handle = p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle;
1987 rc = qed_ll2_prepare_tx_packet(p_hwfn, ll2_handle, &tx_pkt, true);
1989 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1990 "Can't drop packet rc=%d\n", rc);
1994 "MPA_ALIGN: send drop tx packet [%lx, 0x%x], buf=%p, rc=%d\n",
1995 (unsigned long int)tx_pkt.first_frag,
1996 tx_pkt.first_frag_len, buf, rc);
2002 qed_iwarp_win_right_edge(struct qed_hwfn *p_hwfn, struct qed_iwarp_fpdu *fpdu)
2004 struct qed_ll2_tx_pkt_info tx_pkt;
2008 memset(&tx_pkt, 0, sizeof(tx_pkt));
2009 tx_pkt.num_of_bds = 1;
2010 tx_pkt.tx_dest = QED_LL2_TX_DEST_LB;
2011 tx_pkt.l4_hdr_offset_w = fpdu->pkt_hdr_size >> 2;
2013 tx_pkt.first_frag = fpdu->pkt_hdr;
2014 tx_pkt.first_frag_len = fpdu->pkt_hdr_size;
2015 tx_pkt.enable_ip_cksum = true;
2016 tx_pkt.enable_l4_cksum = true;
2017 tx_pkt.calc_ip_len = true;
2018 /* vlan overload with enum iwarp_ll2_tx_queues */
2019 tx_pkt.vlan = IWARP_LL2_ALIGNED_RIGHT_TRIMMED_TX_QUEUE;
2021 ll2_handle = p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle;
2023 rc = qed_ll2_prepare_tx_packet(p_hwfn, ll2_handle, &tx_pkt, true);
2025 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2026 "Can't send right edge rc=%d\n", rc);
2029 "MPA_ALIGN: Sent right edge FPDU num_bds=%d [%lx, 0x%x], rc=%d\n",
2031 (unsigned long int)tx_pkt.first_frag,
2032 tx_pkt.first_frag_len, rc);
2038 qed_iwarp_send_fpdu(struct qed_hwfn *p_hwfn,
2039 struct qed_iwarp_fpdu *fpdu,
2040 struct unaligned_opaque_data *curr_pkt,
2041 struct qed_iwarp_ll2_buff *buf,
2042 u16 tcp_payload_size, enum qed_iwarp_mpa_pkt_type pkt_type)
2044 struct qed_ll2_tx_pkt_info tx_pkt;
2048 memset(&tx_pkt, 0, sizeof(tx_pkt));
2050 /* An unaligned packet means it's split over two tcp segments. So the
2051 * complete packet requires 3 bds, one for the header, one for the
2052 * part of the fpdu of the first tcp segment, and the last fragment
2053 * will point to the remainder of the fpdu. A packed pdu, requires only
2054 * two bds, one for the header and one for the data.
2056 tx_pkt.num_of_bds = (pkt_type == QED_IWARP_MPA_PKT_UNALIGNED) ? 3 : 2;
2057 tx_pkt.tx_dest = QED_LL2_TX_DEST_LB;
2058 tx_pkt.l4_hdr_offset_w = fpdu->pkt_hdr_size >> 2; /* offset in words */
2060 /* Send the mpa_buf only with the last fpdu (in case of packed) */
2061 if (pkt_type == QED_IWARP_MPA_PKT_UNALIGNED ||
2062 tcp_payload_size <= fpdu->fpdu_length)
2063 tx_pkt.cookie = fpdu->mpa_buf;
2065 tx_pkt.first_frag = fpdu->pkt_hdr;
2066 tx_pkt.first_frag_len = fpdu->pkt_hdr_size;
2067 tx_pkt.enable_ip_cksum = true;
2068 tx_pkt.enable_l4_cksum = true;
2069 tx_pkt.calc_ip_len = true;
2070 /* vlan overload with enum iwarp_ll2_tx_queues */
2071 tx_pkt.vlan = IWARP_LL2_ALIGNED_TX_QUEUE;
2073 /* special case of unaligned packet and not packed, need to send
2074 * both buffers as cookie to release.
2076 if (tcp_payload_size == fpdu->incomplete_bytes)
2077 fpdu->mpa_buf->piggy_buf = buf;
2079 ll2_handle = p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle;
2081 /* Set first fragment to header */
2082 rc = qed_ll2_prepare_tx_packet(p_hwfn, ll2_handle, &tx_pkt, true);
2086 /* Set second fragment to first part of packet */
2087 rc = qed_ll2_set_fragment_of_tx_packet(p_hwfn, ll2_handle,
2089 fpdu->mpa_frag_len);
2093 if (!fpdu->incomplete_bytes)
2096 /* Set third fragment to second part of the packet */
2097 rc = qed_ll2_set_fragment_of_tx_packet(p_hwfn,
2099 buf->data_phys_addr +
2100 curr_pkt->first_mpa_offset,
2101 fpdu->incomplete_bytes);
2105 "MPA_ALIGN: Sent FPDU num_bds=%d first_frag_len=%x, mpa_frag_len=0x%x, incomplete_bytes:0x%x rc=%d\n",
2107 tx_pkt.first_frag_len,
2109 fpdu->incomplete_bytes, rc);
2115 qed_iwarp_mpa_get_data(struct qed_hwfn *p_hwfn,
2116 struct unaligned_opaque_data *curr_pkt,
2117 u32 opaque_data0, u32 opaque_data1)
2121 opaque_data = HILO_64(opaque_data1, opaque_data0);
2122 *curr_pkt = *((struct unaligned_opaque_data *)&opaque_data);
2124 curr_pkt->first_mpa_offset = curr_pkt->tcp_payload_offset +
2125 le16_to_cpu(curr_pkt->first_mpa_offset);
2126 curr_pkt->cid = le32_to_cpu(curr_pkt->cid);
2129 /* This function is called when an unaligned or incomplete MPA packet arrives
2130 * driver needs to align the packet, perhaps using previous data and send
2131 * it down to FW once it is aligned.
2134 qed_iwarp_process_mpa_pkt(struct qed_hwfn *p_hwfn,
2135 struct qed_iwarp_ll2_mpa_buf *mpa_buf)
2137 struct unaligned_opaque_data *curr_pkt = &mpa_buf->data;
2138 struct qed_iwarp_ll2_buff *buf = mpa_buf->ll2_buf;
2139 enum qed_iwarp_mpa_pkt_type pkt_type;
2140 struct qed_iwarp_fpdu *fpdu;
2144 fpdu = qed_iwarp_get_curr_fpdu(p_hwfn, curr_pkt->cid & 0xffff);
2145 if (!fpdu) { /* something corrupt with cid, post rx back */
2146 DP_ERR(p_hwfn, "Invalid cid, drop and post back to rx cid=%x\n",
2152 mpa_data = ((u8 *)(buf->data) + curr_pkt->first_mpa_offset);
2154 pkt_type = qed_iwarp_mpa_classify(p_hwfn, fpdu,
2155 mpa_buf->tcp_payload_len,
2159 case QED_IWARP_MPA_PKT_PARTIAL:
2160 qed_iwarp_init_fpdu(buf, fpdu,
2162 mpa_buf->tcp_payload_len,
2163 mpa_buf->placement_offset);
2165 if (!QED_IWARP_IS_RIGHT_EDGE(curr_pkt)) {
2166 mpa_buf->tcp_payload_len = 0;
2170 rc = qed_iwarp_win_right_edge(p_hwfn, fpdu);
2173 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2174 "Can't send FPDU:reset rc=%d\n", rc);
2175 memset(fpdu, 0, sizeof(*fpdu));
2179 mpa_buf->tcp_payload_len = 0;
2181 case QED_IWARP_MPA_PKT_PACKED:
2182 qed_iwarp_init_fpdu(buf, fpdu,
2184 mpa_buf->tcp_payload_len,
2185 mpa_buf->placement_offset);
2187 rc = qed_iwarp_send_fpdu(p_hwfn, fpdu, curr_pkt, buf,
2188 mpa_buf->tcp_payload_len,
2191 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2192 "Can't send FPDU:reset rc=%d\n", rc);
2193 memset(fpdu, 0, sizeof(*fpdu));
2197 mpa_buf->tcp_payload_len -= fpdu->fpdu_length;
2198 curr_pkt->first_mpa_offset += fpdu->fpdu_length;
2200 case QED_IWARP_MPA_PKT_UNALIGNED:
2201 qed_iwarp_update_fpdu_length(p_hwfn, fpdu, mpa_data);
2202 if (mpa_buf->tcp_payload_len < fpdu->incomplete_bytes) {
2203 /* special handling of fpdu split over more
2206 if (QED_IWARP_IS_RIGHT_EDGE(curr_pkt)) {
2207 rc = qed_iwarp_win_right_edge(p_hwfn,
2209 /* packet will be re-processed later */
2214 rc = qed_iwarp_cp_pkt(p_hwfn, fpdu, curr_pkt,
2216 mpa_buf->tcp_payload_len);
2217 if (rc) /* packet will be re-processed later */
2220 mpa_buf->tcp_payload_len = 0;
2224 rc = qed_iwarp_send_fpdu(p_hwfn, fpdu, curr_pkt, buf,
2225 mpa_buf->tcp_payload_len,
2228 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2229 "Can't send FPDU:delay rc=%d\n", rc);
2230 /* don't reset fpdu -> we need it for next
2236 mpa_buf->tcp_payload_len -= fpdu->incomplete_bytes;
2237 curr_pkt->first_mpa_offset += fpdu->incomplete_bytes;
2238 /* The framed PDU was sent - no more incomplete bytes */
2239 fpdu->incomplete_bytes = 0;
2242 } while (mpa_buf->tcp_payload_len && !rc);
2247 qed_iwarp_ll2_post_rx(p_hwfn,
2249 p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle);
2253 static void qed_iwarp_process_pending_pkts(struct qed_hwfn *p_hwfn)
2255 struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
2256 struct qed_iwarp_ll2_mpa_buf *mpa_buf = NULL;
2259 while (!list_empty(&iwarp_info->mpa_buf_pending_list)) {
2260 mpa_buf = list_first_entry(&iwarp_info->mpa_buf_pending_list,
2261 struct qed_iwarp_ll2_mpa_buf,
2264 rc = qed_iwarp_process_mpa_pkt(p_hwfn, mpa_buf);
2266 /* busy means break and continue processing later, don't
2267 * remove the buf from the pending list.
2272 list_move_tail(&mpa_buf->list_entry,
2273 &iwarp_info->mpa_buf_list);
2275 if (rc) { /* different error, don't continue */
2276 DP_NOTICE(p_hwfn, "process pkts failed rc=%d\n", rc);
2283 qed_iwarp_ll2_comp_mpa_pkt(void *cxt, struct qed_ll2_comp_rx_data *data)
2285 struct qed_iwarp_ll2_mpa_buf *mpa_buf;
2286 struct qed_iwarp_info *iwarp_info;
2287 struct qed_hwfn *p_hwfn = cxt;
2289 iwarp_info = &p_hwfn->p_rdma_info->iwarp;
2290 mpa_buf = list_first_entry(&iwarp_info->mpa_buf_list,
2291 struct qed_iwarp_ll2_mpa_buf, list_entry);
2293 DP_ERR(p_hwfn, "No free mpa buf\n");
2297 list_del(&mpa_buf->list_entry);
2298 qed_iwarp_mpa_get_data(p_hwfn, &mpa_buf->data,
2299 data->opaque_data_0, data->opaque_data_1);
2303 "LL2 MPA CompRx payload_len:0x%x\tfirst_mpa_offset:0x%x\ttcp_payload_offset:0x%x\tflags:0x%x\tcid:0x%x\n",
2304 data->length.packet_length, mpa_buf->data.first_mpa_offset,
2305 mpa_buf->data.tcp_payload_offset, mpa_buf->data.flags,
2308 mpa_buf->ll2_buf = data->cookie;
2309 mpa_buf->tcp_payload_len = data->length.packet_length -
2310 mpa_buf->data.first_mpa_offset;
2311 mpa_buf->data.first_mpa_offset += data->u.placement_offset;
2312 mpa_buf->placement_offset = data->u.placement_offset;
2314 list_add_tail(&mpa_buf->list_entry, &iwarp_info->mpa_buf_pending_list);
2316 qed_iwarp_process_pending_pkts(p_hwfn);
2319 qed_iwarp_ll2_post_rx(p_hwfn, data->cookie,
2320 iwarp_info->ll2_mpa_handle);
2324 qed_iwarp_ll2_comp_syn_pkt(void *cxt, struct qed_ll2_comp_rx_data *data)
2326 struct qed_iwarp_ll2_buff *buf = data->cookie;
2327 struct qed_iwarp_listener *listener;
2328 struct qed_ll2_tx_pkt_info tx_pkt;
2329 struct qed_iwarp_cm_info cm_info;
2330 struct qed_hwfn *p_hwfn = cxt;
2331 u8 remote_mac_addr[ETH_ALEN];
2332 u8 local_mac_addr[ETH_ALEN];
2333 struct qed_iwarp_ep *ep;
2334 int tcp_start_offset;
2340 memset(&cm_info, 0, sizeof(cm_info));
2341 ll2_syn_handle = p_hwfn->p_rdma_info->iwarp.ll2_syn_handle;
2343 /* Check if packet was received with errors... */
2344 if (data->err_flags) {
2345 DP_NOTICE(p_hwfn, "Error received on SYN packet: 0x%x\n",
2350 if (GET_FIELD(data->parse_flags,
2351 PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED) &&
2352 GET_FIELD(data->parse_flags, PARSING_AND_ERR_FLAGS_L4CHKSMERROR)) {
2353 DP_NOTICE(p_hwfn, "Syn packet received with checksum error\n");
2357 rc = qed_iwarp_parse_rx_pkt(p_hwfn, &cm_info, (u8 *)(buf->data) +
2358 data->u.placement_offset, remote_mac_addr,
2359 local_mac_addr, &payload_len,
2364 /* Check if there is a listener for this 4-tuple+vlan */
2365 listener = qed_iwarp_get_listener(p_hwfn, &cm_info);
2369 "SYN received on tuple not listened on parse_flags=%d packet len=%d\n",
2370 data->parse_flags, data->length.packet_length);
2372 memset(&tx_pkt, 0, sizeof(tx_pkt));
2373 tx_pkt.num_of_bds = 1;
2374 tx_pkt.l4_hdr_offset_w = (data->length.packet_length) >> 2;
2375 tx_pkt.tx_dest = QED_LL2_TX_DEST_LB;
2376 tx_pkt.first_frag = buf->data_phys_addr +
2377 data->u.placement_offset;
2378 tx_pkt.first_frag_len = data->length.packet_length;
2379 tx_pkt.cookie = buf;
2381 rc = qed_ll2_prepare_tx_packet(p_hwfn, ll2_syn_handle,
2386 "Can't post SYN back to chip rc=%d\n", rc);
2392 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Received syn on listening port\n");
2393 /* There may be an open ep on this connection if this is a syn
2394 * retrasnmit... need to make sure there isn't...
2396 if (qed_iwarp_ep_exists(p_hwfn, &cm_info))
2399 ep = qed_iwarp_get_free_ep(p_hwfn);
2403 spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
2404 list_add_tail(&ep->list_entry, &p_hwfn->p_rdma_info->iwarp.ep_list);
2405 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
2407 ether_addr_copy(ep->remote_mac_addr, remote_mac_addr);
2408 ether_addr_copy(ep->local_mac_addr, local_mac_addr);
2410 memcpy(&ep->cm_info, &cm_info, sizeof(ep->cm_info));
2412 hdr_size = ((cm_info.ip_version == QED_TCP_IPV4) ? 40 : 60);
2413 ep->mss = p_hwfn->p_rdma_info->iwarp.max_mtu - hdr_size;
2414 ep->mss = min_t(u16, QED_IWARP_MAX_FW_MSS, ep->mss);
2416 ep->event_cb = listener->event_cb;
2417 ep->cb_context = listener->cb_context;
2418 ep->connect_mode = TCP_CONNECT_PASSIVE;
2421 ep->syn_ip_payload_length = (u16)payload_len;
2422 ep->syn_phy_addr = buf->data_phys_addr + data->u.placement_offset +
2425 rc = qed_iwarp_tcp_offload(p_hwfn, ep);
2427 qed_iwarp_return_ep(p_hwfn, ep);
2433 qed_iwarp_ll2_post_rx(p_hwfn, buf, ll2_syn_handle);
2436 static void qed_iwarp_ll2_rel_rx_pkt(void *cxt, u8 connection_handle,
2437 void *cookie, dma_addr_t rx_buf_addr,
2440 struct qed_iwarp_ll2_buff *buffer = cookie;
2441 struct qed_hwfn *p_hwfn = cxt;
2443 dma_free_coherent(&p_hwfn->cdev->pdev->dev, buffer->buff_size,
2444 buffer->data, buffer->data_phys_addr);
2448 static void qed_iwarp_ll2_comp_tx_pkt(void *cxt, u8 connection_handle,
2449 void *cookie, dma_addr_t first_frag_addr,
2450 bool b_last_fragment, bool b_last_packet)
2452 struct qed_iwarp_ll2_buff *buffer = cookie;
2453 struct qed_iwarp_ll2_buff *piggy;
2454 struct qed_hwfn *p_hwfn = cxt;
2456 if (!buffer) /* can happen in packed mpa unaligned... */
2459 /* this was originally an rx packet, post it back */
2460 piggy = buffer->piggy_buf;
2462 buffer->piggy_buf = NULL;
2463 qed_iwarp_ll2_post_rx(p_hwfn, piggy, connection_handle);
2466 qed_iwarp_ll2_post_rx(p_hwfn, buffer, connection_handle);
2468 if (connection_handle == p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle)
2469 qed_iwarp_process_pending_pkts(p_hwfn);
2474 static void qed_iwarp_ll2_rel_tx_pkt(void *cxt, u8 connection_handle,
2475 void *cookie, dma_addr_t first_frag_addr,
2476 bool b_last_fragment, bool b_last_packet)
2478 struct qed_iwarp_ll2_buff *buffer = cookie;
2479 struct qed_hwfn *p_hwfn = cxt;
2484 if (buffer->piggy_buf) {
2485 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
2486 buffer->piggy_buf->buff_size,
2487 buffer->piggy_buf->data,
2488 buffer->piggy_buf->data_phys_addr);
2490 kfree(buffer->piggy_buf);
2493 dma_free_coherent(&p_hwfn->cdev->pdev->dev, buffer->buff_size,
2494 buffer->data, buffer->data_phys_addr);
2499 /* The only slowpath for iwarp ll2 is unalign flush. When this completion
2500 * is received, need to reset the FPDU.
2503 qed_iwarp_ll2_slowpath(void *cxt,
2504 u8 connection_handle,
2505 u32 opaque_data_0, u32 opaque_data_1)
2507 struct unaligned_opaque_data unalign_data;
2508 struct qed_hwfn *p_hwfn = cxt;
2509 struct qed_iwarp_fpdu *fpdu;
2511 qed_iwarp_mpa_get_data(p_hwfn, &unalign_data,
2512 opaque_data_0, opaque_data_1);
2514 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "(0x%x) Flush fpdu\n",
2517 fpdu = qed_iwarp_get_curr_fpdu(p_hwfn, (u16)unalign_data.cid);
2519 memset(fpdu, 0, sizeof(*fpdu));
2522 static int qed_iwarp_ll2_stop(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2524 struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
2527 if (iwarp_info->ll2_syn_handle != QED_IWARP_HANDLE_INVAL) {
2528 rc = qed_ll2_terminate_connection(p_hwfn,
2529 iwarp_info->ll2_syn_handle);
2531 DP_INFO(p_hwfn, "Failed to terminate syn connection\n");
2533 qed_ll2_release_connection(p_hwfn, iwarp_info->ll2_syn_handle);
2534 iwarp_info->ll2_syn_handle = QED_IWARP_HANDLE_INVAL;
2537 if (iwarp_info->ll2_ooo_handle != QED_IWARP_HANDLE_INVAL) {
2538 rc = qed_ll2_terminate_connection(p_hwfn,
2539 iwarp_info->ll2_ooo_handle);
2541 DP_INFO(p_hwfn, "Failed to terminate ooo connection\n");
2543 qed_ll2_release_connection(p_hwfn, iwarp_info->ll2_ooo_handle);
2544 iwarp_info->ll2_ooo_handle = QED_IWARP_HANDLE_INVAL;
2547 if (iwarp_info->ll2_mpa_handle != QED_IWARP_HANDLE_INVAL) {
2548 rc = qed_ll2_terminate_connection(p_hwfn,
2549 iwarp_info->ll2_mpa_handle);
2551 DP_INFO(p_hwfn, "Failed to terminate mpa connection\n");
2553 qed_ll2_release_connection(p_hwfn, iwarp_info->ll2_mpa_handle);
2554 iwarp_info->ll2_mpa_handle = QED_IWARP_HANDLE_INVAL;
2557 qed_llh_remove_mac_filter(p_hwfn,
2558 p_ptt, p_hwfn->p_rdma_info->iwarp.mac_addr);
2563 qed_iwarp_ll2_alloc_buffers(struct qed_hwfn *p_hwfn,
2564 int num_rx_bufs, int buff_size, u8 ll2_handle)
2566 struct qed_iwarp_ll2_buff *buffer;
2570 for (i = 0; i < num_rx_bufs; i++) {
2571 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
2577 buffer->data = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
2579 &buffer->data_phys_addr,
2581 if (!buffer->data) {
2587 buffer->buff_size = buff_size;
2588 rc = qed_iwarp_ll2_post_rx(p_hwfn, buffer, ll2_handle);
2590 /* buffers will be deallocated by qed_ll2 */
2596 #define QED_IWARP_MAX_BUF_SIZE(mtu) \
2597 ALIGN((mtu) + ETH_HLEN + 2 * VLAN_HLEN + 2 + ETH_CACHE_LINE_SIZE, \
2598 ETH_CACHE_LINE_SIZE)
2601 qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
2602 struct qed_rdma_start_in_params *params,
2603 struct qed_ptt *p_ptt)
2605 struct qed_iwarp_info *iwarp_info;
2606 struct qed_ll2_acquire_data data;
2607 struct qed_ll2_cbs cbs;
2613 iwarp_info = &p_hwfn->p_rdma_info->iwarp;
2614 iwarp_info->ll2_syn_handle = QED_IWARP_HANDLE_INVAL;
2615 iwarp_info->ll2_ooo_handle = QED_IWARP_HANDLE_INVAL;
2616 iwarp_info->ll2_mpa_handle = QED_IWARP_HANDLE_INVAL;
2618 iwarp_info->max_mtu = params->max_mtu;
2620 ether_addr_copy(p_hwfn->p_rdma_info->iwarp.mac_addr, params->mac_addr);
2622 rc = qed_llh_add_mac_filter(p_hwfn, p_ptt, params->mac_addr);
2626 /* Start SYN connection */
2627 cbs.rx_comp_cb = qed_iwarp_ll2_comp_syn_pkt;
2628 cbs.rx_release_cb = qed_iwarp_ll2_rel_rx_pkt;
2629 cbs.tx_comp_cb = qed_iwarp_ll2_comp_tx_pkt;
2630 cbs.tx_release_cb = qed_iwarp_ll2_rel_tx_pkt;
2631 cbs.cookie = p_hwfn;
2633 memset(&data, 0, sizeof(data));
2634 data.input.conn_type = QED_LL2_TYPE_IWARP;
2635 data.input.mtu = QED_IWARP_MAX_SYN_PKT_SIZE;
2636 data.input.rx_num_desc = QED_IWARP_LL2_SYN_RX_SIZE;
2637 data.input.tx_num_desc = QED_IWARP_LL2_SYN_TX_SIZE;
2638 data.input.tx_max_bds_per_packet = 1; /* will never be fragmented */
2639 data.input.tx_tc = PKT_LB_TC;
2640 data.input.tx_dest = QED_LL2_TX_DEST_LB;
2641 data.p_connection_handle = &iwarp_info->ll2_syn_handle;
2644 rc = qed_ll2_acquire_connection(p_hwfn, &data);
2646 DP_NOTICE(p_hwfn, "Failed to acquire LL2 connection\n");
2647 qed_llh_remove_mac_filter(p_hwfn, p_ptt, params->mac_addr);
2651 rc = qed_ll2_establish_connection(p_hwfn, iwarp_info->ll2_syn_handle);
2653 DP_NOTICE(p_hwfn, "Failed to establish LL2 connection\n");
2657 rc = qed_iwarp_ll2_alloc_buffers(p_hwfn,
2658 QED_IWARP_LL2_SYN_RX_SIZE,
2659 QED_IWARP_MAX_SYN_PKT_SIZE,
2660 iwarp_info->ll2_syn_handle);
2664 /* Start OOO connection */
2665 data.input.conn_type = QED_LL2_TYPE_OOO;
2666 data.input.mtu = params->max_mtu;
2668 n_ooo_bufs = (QED_IWARP_MAX_OOO * QED_IWARP_RCV_WND_SIZE_DEF) /
2669 iwarp_info->max_mtu;
2670 n_ooo_bufs = min_t(u32, n_ooo_bufs, QED_IWARP_LL2_OOO_MAX_RX_SIZE);
2672 data.input.rx_num_desc = n_ooo_bufs;
2673 data.input.rx_num_ooo_buffers = n_ooo_bufs;
2675 data.input.tx_max_bds_per_packet = 1; /* will never be fragmented */
2676 data.input.tx_num_desc = QED_IWARP_LL2_OOO_DEF_TX_SIZE;
2677 data.p_connection_handle = &iwarp_info->ll2_ooo_handle;
2679 rc = qed_ll2_acquire_connection(p_hwfn, &data);
2683 rc = qed_ll2_establish_connection(p_hwfn, iwarp_info->ll2_ooo_handle);
2687 /* Start Unaligned MPA connection */
2688 cbs.rx_comp_cb = qed_iwarp_ll2_comp_mpa_pkt;
2689 cbs.slowpath_cb = qed_iwarp_ll2_slowpath;
2691 memset(&data, 0, sizeof(data));
2692 data.input.conn_type = QED_LL2_TYPE_IWARP;
2693 data.input.mtu = params->max_mtu;
2694 /* FW requires that once a packet arrives OOO, it must have at
2695 * least 2 rx buffers available on the unaligned connection
2696 * for handling the case that it is a partial fpdu.
2698 data.input.rx_num_desc = n_ooo_bufs * 2;
2699 data.input.tx_num_desc = data.input.rx_num_desc;
2700 data.input.tx_max_bds_per_packet = QED_IWARP_MAX_BDS_PER_FPDU;
2701 data.p_connection_handle = &iwarp_info->ll2_mpa_handle;
2702 data.input.secondary_queue = true;
2705 rc = qed_ll2_acquire_connection(p_hwfn, &data);
2709 rc = qed_ll2_establish_connection(p_hwfn, iwarp_info->ll2_mpa_handle);
2713 mpa_buff_size = QED_IWARP_MAX_BUF_SIZE(params->max_mtu);
2714 rc = qed_iwarp_ll2_alloc_buffers(p_hwfn,
2715 data.input.rx_num_desc,
2717 iwarp_info->ll2_mpa_handle);
2721 iwarp_info->partial_fpdus = kcalloc((u16)p_hwfn->p_rdma_info->num_qps,
2722 sizeof(*iwarp_info->partial_fpdus),
2724 if (!iwarp_info->partial_fpdus)
2727 iwarp_info->max_num_partial_fpdus = (u16)p_hwfn->p_rdma_info->num_qps;
2729 iwarp_info->mpa_intermediate_buf = kzalloc(mpa_buff_size, GFP_KERNEL);
2730 if (!iwarp_info->mpa_intermediate_buf)
2733 /* The mpa_bufs array serves for pending RX packets received on the
2734 * mpa ll2 that don't have place on the tx ring and require later
2735 * processing. We can't fail on allocation of such a struct therefore
2736 * we allocate enough to take care of all rx packets
2738 iwarp_info->mpa_bufs = kcalloc(data.input.rx_num_desc,
2739 sizeof(*iwarp_info->mpa_bufs),
2741 if (!iwarp_info->mpa_bufs)
2744 INIT_LIST_HEAD(&iwarp_info->mpa_buf_pending_list);
2745 INIT_LIST_HEAD(&iwarp_info->mpa_buf_list);
2746 for (i = 0; i < data.input.rx_num_desc; i++)
2747 list_add_tail(&iwarp_info->mpa_bufs[i].list_entry,
2748 &iwarp_info->mpa_buf_list);
2751 qed_iwarp_ll2_stop(p_hwfn, p_ptt);
2756 int qed_iwarp_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
2757 struct qed_rdma_start_in_params *params)
2759 struct qed_iwarp_info *iwarp_info;
2762 iwarp_info = &p_hwfn->p_rdma_info->iwarp;
2764 iwarp_info->tcp_flags = QED_IWARP_TS_EN;
2765 rcv_wnd_size = QED_IWARP_RCV_WND_SIZE_DEF;
2767 /* value 0 is used for ilog2(QED_IWARP_RCV_WND_SIZE_MIN) */
2768 iwarp_info->rcv_wnd_scale = ilog2(rcv_wnd_size) -
2769 ilog2(QED_IWARP_RCV_WND_SIZE_MIN);
2770 iwarp_info->rcv_wnd_size = rcv_wnd_size >> iwarp_info->rcv_wnd_scale;
2771 iwarp_info->crc_needed = QED_IWARP_PARAM_CRC_NEEDED;
2772 iwarp_info->mpa_rev = MPA_NEGOTIATION_TYPE_ENHANCED;
2774 iwarp_info->peer2peer = QED_IWARP_PARAM_P2P;
2776 iwarp_info->rtr_type = MPA_RTR_TYPE_ZERO_SEND |
2777 MPA_RTR_TYPE_ZERO_WRITE |
2778 MPA_RTR_TYPE_ZERO_READ;
2780 spin_lock_init(&p_hwfn->p_rdma_info->iwarp.qp_lock);
2781 INIT_LIST_HEAD(&p_hwfn->p_rdma_info->iwarp.ep_list);
2782 INIT_LIST_HEAD(&p_hwfn->p_rdma_info->iwarp.listen_list);
2784 qed_spq_register_async_cb(p_hwfn, PROTOCOLID_IWARP,
2785 qed_iwarp_async_event);
2786 qed_ooo_setup(p_hwfn);
2788 return qed_iwarp_ll2_start(p_hwfn, params, p_ptt);
2791 int qed_iwarp_stop(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2795 qed_iwarp_free_prealloc_ep(p_hwfn);
2796 rc = qed_iwarp_wait_for_all_cids(p_hwfn);
2800 qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_IWARP);
2802 return qed_iwarp_ll2_stop(p_hwfn, p_ptt);
2805 static void qed_iwarp_qp_in_error(struct qed_hwfn *p_hwfn,
2806 struct qed_iwarp_ep *ep,
2809 struct qed_iwarp_cm_event_params params;
2811 qed_iwarp_modify_qp(p_hwfn, ep->qp, QED_IWARP_QP_STATE_ERROR, true);
2813 params.event = QED_IWARP_EVENT_CLOSE;
2814 params.ep_context = ep;
2815 params.cm_info = &ep->cm_info;
2816 params.status = (fw_return_code == IWARP_QP_IN_ERROR_GOOD_CLOSE) ?
2819 ep->state = QED_IWARP_EP_CLOSED;
2820 spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
2821 list_del(&ep->list_entry);
2822 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
2824 ep->event_cb(ep->cb_context, ¶ms);
2827 static void qed_iwarp_exception_received(struct qed_hwfn *p_hwfn,
2828 struct qed_iwarp_ep *ep,
2831 struct qed_iwarp_cm_event_params params;
2832 bool event_cb = false;
2834 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "EP(0x%x) fw_ret_code=%d\n",
2835 ep->cid, fw_ret_code);
2837 switch (fw_ret_code) {
2838 case IWARP_EXCEPTION_DETECTED_LLP_CLOSED:
2840 params.event = QED_IWARP_EVENT_DISCONNECT;
2843 case IWARP_EXCEPTION_DETECTED_LLP_RESET:
2844 params.status = -ECONNRESET;
2845 params.event = QED_IWARP_EVENT_DISCONNECT;
2848 case IWARP_EXCEPTION_DETECTED_RQ_EMPTY:
2849 params.event = QED_IWARP_EVENT_RQ_EMPTY;
2852 case IWARP_EXCEPTION_DETECTED_IRQ_FULL:
2853 params.event = QED_IWARP_EVENT_IRQ_FULL;
2856 case IWARP_EXCEPTION_DETECTED_LLP_TIMEOUT:
2857 params.event = QED_IWARP_EVENT_LLP_TIMEOUT;
2860 case IWARP_EXCEPTION_DETECTED_REMOTE_PROTECTION_ERROR:
2861 params.event = QED_IWARP_EVENT_REMOTE_PROTECTION_ERROR;
2864 case IWARP_EXCEPTION_DETECTED_CQ_OVERFLOW:
2865 params.event = QED_IWARP_EVENT_CQ_OVERFLOW;
2868 case IWARP_EXCEPTION_DETECTED_LOCAL_CATASTROPHIC:
2869 params.event = QED_IWARP_EVENT_QP_CATASTROPHIC;
2872 case IWARP_EXCEPTION_DETECTED_LOCAL_ACCESS_ERROR:
2873 params.event = QED_IWARP_EVENT_LOCAL_ACCESS_ERROR;
2876 case IWARP_EXCEPTION_DETECTED_REMOTE_OPERATION_ERROR:
2877 params.event = QED_IWARP_EVENT_REMOTE_OPERATION_ERROR;
2880 case IWARP_EXCEPTION_DETECTED_TERMINATE_RECEIVED:
2881 params.event = QED_IWARP_EVENT_TERMINATE_RECEIVED;
2885 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2886 "Unhandled exception received...fw_ret_code=%d\n",
2892 params.ep_context = ep;
2893 params.cm_info = &ep->cm_info;
2894 ep->event_cb(ep->cb_context, ¶ms);
2899 qed_iwarp_tcp_connect_unsuccessful(struct qed_hwfn *p_hwfn,
2900 struct qed_iwarp_ep *ep, u8 fw_return_code)
2902 struct qed_iwarp_cm_event_params params;
2904 memset(¶ms, 0, sizeof(params));
2905 params.event = QED_IWARP_EVENT_ACTIVE_COMPLETE;
2906 params.ep_context = ep;
2907 params.cm_info = &ep->cm_info;
2908 ep->state = QED_IWARP_EP_CLOSED;
2910 switch (fw_return_code) {
2911 case IWARP_CONN_ERROR_TCP_CONNECT_INVALID_PACKET:
2912 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2913 "%s(0x%x) TCP connect got invalid packet\n",
2914 QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid);
2915 params.status = -ECONNRESET;
2917 case IWARP_CONN_ERROR_TCP_CONNECTION_RST:
2918 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2919 "%s(0x%x) TCP Connection Reset\n",
2920 QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid);
2921 params.status = -ECONNRESET;
2923 case IWARP_CONN_ERROR_TCP_CONNECT_TIMEOUT:
2924 DP_NOTICE(p_hwfn, "%s(0x%x) TCP timeout\n",
2925 QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid);
2926 params.status = -EBUSY;
2928 case IWARP_CONN_ERROR_MPA_NOT_SUPPORTED_VER:
2929 DP_NOTICE(p_hwfn, "%s(0x%x) MPA not supported VER\n",
2930 QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid);
2931 params.status = -ECONNREFUSED;
2933 case IWARP_CONN_ERROR_MPA_INVALID_PACKET:
2934 DP_NOTICE(p_hwfn, "%s(0x%x) MPA Invalid Packet\n",
2935 QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid);
2936 params.status = -ECONNRESET;
2940 "%s(0x%x) Unexpected return code tcp connect: %d\n",
2941 QED_IWARP_CONNECT_MODE_STRING(ep),
2942 ep->tcp_cid, fw_return_code);
2943 params.status = -ECONNRESET;
2947 if (ep->connect_mode == TCP_CONNECT_PASSIVE) {
2948 ep->tcp_cid = QED_IWARP_INVALID_TCP_CID;
2949 qed_iwarp_return_ep(p_hwfn, ep);
2951 ep->event_cb(ep->cb_context, ¶ms);
2952 spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
2953 list_del(&ep->list_entry);
2954 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
2959 qed_iwarp_connect_complete(struct qed_hwfn *p_hwfn,
2960 struct qed_iwarp_ep *ep, u8 fw_return_code)
2962 u8 ll2_syn_handle = p_hwfn->p_rdma_info->iwarp.ll2_syn_handle;
2964 if (ep->connect_mode == TCP_CONNECT_PASSIVE) {
2965 /* Done with the SYN packet, post back to ll2 rx */
2966 qed_iwarp_ll2_post_rx(p_hwfn, ep->syn, ll2_syn_handle);
2970 /* If connect failed - upper layer doesn't know about it */
2971 if (fw_return_code == RDMA_RETURN_OK)
2972 qed_iwarp_mpa_received(p_hwfn, ep);
2974 qed_iwarp_tcp_connect_unsuccessful(p_hwfn, ep,
2977 if (fw_return_code == RDMA_RETURN_OK)
2978 qed_iwarp_mpa_offload(p_hwfn, ep);
2980 qed_iwarp_tcp_connect_unsuccessful(p_hwfn, ep,
2986 qed_iwarp_check_ep_ok(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
2988 if (!ep || (ep->sig != QED_EP_SIG)) {
2989 DP_ERR(p_hwfn, "ERROR ON ASYNC ep=%p\n", ep);
2996 static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn,
2997 u8 fw_event_code, u16 echo,
2998 union event_ring_data *data,
3001 struct qed_rdma_events events = p_hwfn->p_rdma_info->events;
3002 struct regpair *fw_handle = &data->rdma_data.async_handle;
3003 struct qed_iwarp_ep *ep = NULL;
3008 ep = (struct qed_iwarp_ep *)(uintptr_t)HILO_64(fw_handle->hi,
3011 switch (fw_event_code) {
3012 case IWARP_EVENT_TYPE_ASYNC_CONNECT_COMPLETE:
3013 /* Async completion after TCP 3-way handshake */
3014 if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
3018 "EP(0x%x) IWARP_EVENT_TYPE_ASYNC_CONNECT_COMPLETE fw_ret_code=%d\n",
3019 ep->tcp_cid, fw_return_code);
3020 qed_iwarp_connect_complete(p_hwfn, ep, fw_return_code);
3022 case IWARP_EVENT_TYPE_ASYNC_EXCEPTION_DETECTED:
3023 if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
3027 "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_EXCEPTION_DETECTED fw_ret_code=%d\n",
3028 ep->cid, fw_return_code);
3029 qed_iwarp_exception_received(p_hwfn, ep, fw_return_code);
3031 case IWARP_EVENT_TYPE_ASYNC_QP_IN_ERROR_STATE:
3032 /* Async completion for Close Connection ramrod */
3033 if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
3037 "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_QP_IN_ERROR_STATE fw_ret_code=%d\n",
3038 ep->cid, fw_return_code);
3039 qed_iwarp_qp_in_error(p_hwfn, ep, fw_return_code);
3041 case IWARP_EVENT_TYPE_ASYNC_ENHANCED_MPA_REPLY_ARRIVED:
3042 /* Async event for active side only */
3043 if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
3047 "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_MPA_REPLY_ARRIVED fw_ret_code=%d\n",
3048 ep->cid, fw_return_code);
3049 qed_iwarp_mpa_reply_arrived(p_hwfn, ep);
3051 case IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE:
3052 if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
3056 "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE fw_ret_code=%d\n",
3057 ep->cid, fw_return_code);
3058 qed_iwarp_mpa_complete(p_hwfn, ep, fw_return_code);
3060 case IWARP_EVENT_TYPE_ASYNC_CID_CLEANED:
3061 cid = (u16)le32_to_cpu(fw_handle->lo);
3062 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
3063 "(0x%x)IWARP_EVENT_TYPE_ASYNC_CID_CLEANED\n", cid);
3064 qed_iwarp_cid_cleaned(p_hwfn, cid);
3067 case IWARP_EVENT_TYPE_ASYNC_SRQ_EMPTY:
3068 DP_NOTICE(p_hwfn, "IWARP_EVENT_TYPE_ASYNC_SRQ_EMPTY\n");
3069 srq_offset = p_hwfn->p_rdma_info->srq_id_offset;
3070 /* FW assigns value that is no greater than u16 */
3071 srq_id = ((u16)le32_to_cpu(fw_handle->lo)) - srq_offset;
3072 events.affiliated_event(events.context,
3073 QED_IWARP_EVENT_SRQ_EMPTY,
3076 case IWARP_EVENT_TYPE_ASYNC_SRQ_LIMIT:
3077 DP_NOTICE(p_hwfn, "IWARP_EVENT_TYPE_ASYNC_SRQ_LIMIT\n");
3078 srq_offset = p_hwfn->p_rdma_info->srq_id_offset;
3079 /* FW assigns value that is no greater than u16 */
3080 srq_id = ((u16)le32_to_cpu(fw_handle->lo)) - srq_offset;
3081 events.affiliated_event(events.context,
3082 QED_IWARP_EVENT_SRQ_LIMIT,
3085 case IWARP_EVENT_TYPE_ASYNC_CQ_OVERFLOW:
3086 DP_NOTICE(p_hwfn, "IWARP_EVENT_TYPE_ASYNC_CQ_OVERFLOW\n");
3088 p_hwfn->p_rdma_info->events.affiliated_event(
3089 p_hwfn->p_rdma_info->events.context,
3090 QED_IWARP_EVENT_CQ_OVERFLOW,
3094 DP_ERR(p_hwfn, "Received unexpected async iwarp event %d\n",
3102 qed_iwarp_create_listen(void *rdma_cxt,
3103 struct qed_iwarp_listen_in *iparams,
3104 struct qed_iwarp_listen_out *oparams)
3106 struct qed_hwfn *p_hwfn = rdma_cxt;
3107 struct qed_iwarp_listener *listener;
3109 listener = kzalloc(sizeof(*listener), GFP_KERNEL);
3113 listener->ip_version = iparams->ip_version;
3114 memcpy(listener->ip_addr, iparams->ip_addr, sizeof(listener->ip_addr));
3115 listener->port = iparams->port;
3116 listener->vlan = iparams->vlan;
3118 listener->event_cb = iparams->event_cb;
3119 listener->cb_context = iparams->cb_context;
3120 listener->max_backlog = iparams->max_backlog;
3121 oparams->handle = listener;
3123 spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
3124 list_add_tail(&listener->list_entry,
3125 &p_hwfn->p_rdma_info->iwarp.listen_list);
3126 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
3130 "callback=%p handle=%p ip=%x:%x:%x:%x port=0x%x vlan=0x%x\n",
3133 listener->ip_addr[0],
3134 listener->ip_addr[1],
3135 listener->ip_addr[2],
3136 listener->ip_addr[3], listener->port, listener->vlan);
3141 int qed_iwarp_destroy_listen(void *rdma_cxt, void *handle)
3143 struct qed_iwarp_listener *listener = handle;
3144 struct qed_hwfn *p_hwfn = rdma_cxt;
3146 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "handle=%p\n", handle);
3148 spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
3149 list_del(&listener->list_entry);
3150 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
3157 int qed_iwarp_send_rtr(void *rdma_cxt, struct qed_iwarp_send_rtr_in *iparams)
3159 struct qed_hwfn *p_hwfn = rdma_cxt;
3160 struct qed_sp_init_data init_data;
3161 struct qed_spq_entry *p_ent;
3162 struct qed_iwarp_ep *ep;
3163 struct qed_rdma_qp *qp;
3166 ep = iparams->ep_context;
3168 DP_ERR(p_hwfn, "Ep Context receive in send_rtr is NULL\n");
3174 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) EP(0x%x)\n",
3175 qp->icid, ep->tcp_cid);
3177 memset(&init_data, 0, sizeof(init_data));
3178 init_data.cid = qp->icid;
3179 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
3180 init_data.comp_mode = QED_SPQ_MODE_CB;
3182 rc = qed_sp_init_request(p_hwfn, &p_ent,
3183 IWARP_RAMROD_CMD_ID_MPA_OFFLOAD_SEND_RTR,
3184 PROTOCOLID_IWARP, &init_data);
3189 rc = qed_spq_post(p_hwfn, p_ent, NULL);
3191 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = 0x%x\n", rc);
3197 qed_iwarp_query_qp(struct qed_rdma_qp *qp,
3198 struct qed_rdma_query_qp_out_params *out_params)
3200 out_params->state = qed_iwarp2roce_state(qp->iwarp_state);