1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 /* QLogic qed NIC Driver
3 * Copyright (c) 2015-2017 QLogic Corporation
4 * Copyright (c) 2019-2020 Marvell International Ltd.
7 #include <linux/types.h>
8 #include <asm/byteorder.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/if_vlan.h>
11 #include <linux/kernel.h>
12 #include <linux/pci.h>
13 #include <linux/slab.h>
14 #include <linux/stddef.h>
15 #include <linux/workqueue.h>
17 #include <linux/bitops.h>
18 #include <linux/delay.h>
19 #include <linux/errno.h>
20 #include <linux/etherdevice.h>
22 #include <linux/list.h>
23 #include <linux/mutex.h>
24 #include <linux/spinlock.h>
25 #include <linux/string.h>
26 #include <linux/qed/qed_ll2_if.h>
29 #include "qed_dev_api.h"
31 #include "qed_iro_hsi.h"
37 #include "qed_reg_addr.h"
41 #define QED_LL2_RX_REGISTERED(ll2) ((ll2)->rx_queue.b_cb_registered)
42 #define QED_LL2_TX_REGISTERED(ll2) ((ll2)->tx_queue.b_cb_registered)
44 #define QED_LL2_TX_SIZE (256)
45 #define QED_LL2_RX_SIZE (4096)
47 #define QED_LL2_INVALID_STATS_ID 0xff
49 struct qed_cb_ll2_info {
54 /* Lock protecting LL2 buffer lists in sleepless context */
56 struct list_head list;
58 const struct qed_ll2_cb_ops *cbs;
62 struct qed_ll2_buffer {
63 struct list_head list;
68 static u8 qed_ll2_handle_to_stats_id(struct qed_hwfn *p_hwfn,
69 u8 ll2_queue_type, u8 qid)
73 /* For legacy (RAM based) queues, the stats_id will be set as the
74 * queue_id. Otherwise (context based queue), it will be set to
75 * the "abs_pf_id" offset from the end of the RAM based queue IDs.
76 * If the final value exceeds the total counters amount, return
77 * INVALID value to indicate that the stats for this connection should
80 if (ll2_queue_type == QED_LL2_RX_TYPE_LEGACY)
83 stats_id = MAX_NUM_LL2_RX_RAM_QUEUES + p_hwfn->abs_pf_id;
85 if (stats_id < MAX_NUM_LL2_TX_STATS_COUNTERS)
88 return QED_LL2_INVALID_STATS_ID;
91 static void qed_ll2b_complete_tx_packet(void *cxt,
94 dma_addr_t first_frag_addr,
98 struct qed_hwfn *p_hwfn = cxt;
99 struct qed_dev *cdev = p_hwfn->cdev;
100 struct sk_buff *skb = cookie;
102 /* All we need to do is release the mapping */
103 dma_unmap_single(&p_hwfn->cdev->pdev->dev, first_frag_addr,
104 skb_headlen(skb), DMA_TO_DEVICE);
106 if (cdev->ll2->cbs && cdev->ll2->cbs->tx_cb)
107 cdev->ll2->cbs->tx_cb(cdev->ll2->cb_cookie, skb,
110 dev_kfree_skb_any(skb);
113 static int qed_ll2_alloc_buffer(struct qed_dev *cdev,
114 u8 **data, dma_addr_t *phys_addr)
116 *data = kmalloc(cdev->ll2->rx_size, GFP_ATOMIC);
118 DP_INFO(cdev, "Failed to allocate LL2 buffer data\n");
122 *phys_addr = dma_map_single(&cdev->pdev->dev,
123 ((*data) + NET_SKB_PAD),
124 cdev->ll2->rx_size, DMA_FROM_DEVICE);
125 if (dma_mapping_error(&cdev->pdev->dev, *phys_addr)) {
126 DP_INFO(cdev, "Failed to map LL2 buffer data\n");
134 static int qed_ll2_dealloc_buffer(struct qed_dev *cdev,
135 struct qed_ll2_buffer *buffer)
137 spin_lock_bh(&cdev->ll2->lock);
139 dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
140 cdev->ll2->rx_size, DMA_FROM_DEVICE);
142 list_del(&buffer->list);
145 if (!cdev->ll2->rx_cnt)
146 DP_INFO(cdev, "All LL2 entries were removed\n");
148 spin_unlock_bh(&cdev->ll2->lock);
153 static void qed_ll2_kill_buffers(struct qed_dev *cdev)
155 struct qed_ll2_buffer *buffer, *tmp_buffer;
157 list_for_each_entry_safe(buffer, tmp_buffer, &cdev->ll2->list, list)
158 qed_ll2_dealloc_buffer(cdev, buffer);
161 static void qed_ll2b_complete_rx_packet(void *cxt,
162 struct qed_ll2_comp_rx_data *data)
164 struct qed_hwfn *p_hwfn = cxt;
165 struct qed_ll2_buffer *buffer = data->cookie;
166 struct qed_dev *cdev = p_hwfn->cdev;
167 dma_addr_t new_phys_addr;
174 (NETIF_MSG_RX_STATUS | QED_MSG_STORAGE | NETIF_MSG_PKTDATA),
175 "Got an LL2 Rx completion: [Buffer at phys 0x%llx, offset 0x%02x] Length 0x%04x Parse_flags 0x%04x vlan 0x%04x Opaque data [0x%08x:0x%08x]\n",
176 (u64)data->rx_buf_addr,
177 data->u.placement_offset,
178 data->length.packet_length,
180 data->vlan, data->opaque_data_0, data->opaque_data_1);
182 if ((cdev->dp_module & NETIF_MSG_PKTDATA) && buffer->data) {
183 print_hex_dump(KERN_INFO, "",
184 DUMP_PREFIX_OFFSET, 16, 1,
185 buffer->data, data->length.packet_length, false);
188 /* Determine if data is valid */
189 if (data->length.packet_length < ETH_HLEN)
192 /* Allocate a replacement for buffer; Reuse upon failure */
194 rc = qed_ll2_alloc_buffer(p_hwfn->cdev, &new_data,
197 /* If need to reuse or there's no replacement buffer, repost this */
200 dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
201 cdev->ll2->rx_size, DMA_FROM_DEVICE);
203 skb = slab_build_skb(buffer->data);
205 DP_INFO(cdev, "Failed to build SKB\n");
210 data->u.placement_offset += NET_SKB_PAD;
211 skb_reserve(skb, data->u.placement_offset);
212 skb_put(skb, data->length.packet_length);
213 skb_checksum_none_assert(skb);
215 /* Get parital ethernet information instead of eth_type_trans(),
216 * Since we don't have an associated net_device.
218 skb_reset_mac_header(skb);
219 skb->protocol = eth_hdr(skb)->h_proto;
221 /* Pass SKB onward */
222 if (cdev->ll2->cbs && cdev->ll2->cbs->rx_cb) {
224 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
226 cdev->ll2->cbs->rx_cb(cdev->ll2->cb_cookie, skb,
228 data->opaque_data_1);
230 DP_VERBOSE(p_hwfn, (NETIF_MSG_RX_STATUS | NETIF_MSG_PKTDATA |
231 QED_MSG_LL2 | QED_MSG_STORAGE),
232 "Dropping the packet\n");
237 /* Update Buffer information and update FW producer */
238 buffer->data = new_data;
239 buffer->phys_addr = new_phys_addr;
242 rc = qed_ll2_post_rx_buffer(p_hwfn, cdev->ll2->handle,
243 buffer->phys_addr, 0, buffer, 1);
245 qed_ll2_dealloc_buffer(cdev, buffer);
248 static struct qed_ll2_info *__qed_ll2_handle_sanity(struct qed_hwfn *p_hwfn,
249 u8 connection_handle,
253 struct qed_ll2_info *p_ll2_conn, *p_ret = NULL;
255 if (connection_handle >= QED_MAX_NUM_OF_LL2_CONNECTIONS)
258 if (!p_hwfn->p_ll2_info)
261 p_ll2_conn = &p_hwfn->p_ll2_info[connection_handle];
265 mutex_lock(&p_ll2_conn->mutex);
266 if (p_ll2_conn->b_active)
269 mutex_unlock(&p_ll2_conn->mutex);
277 static struct qed_ll2_info *qed_ll2_handle_sanity(struct qed_hwfn *p_hwfn,
278 u8 connection_handle)
280 return __qed_ll2_handle_sanity(p_hwfn, connection_handle, false, true);
283 static struct qed_ll2_info *qed_ll2_handle_sanity_lock(struct qed_hwfn *p_hwfn,
284 u8 connection_handle)
286 return __qed_ll2_handle_sanity(p_hwfn, connection_handle, true, true);
289 static struct qed_ll2_info *qed_ll2_handle_sanity_inactive(struct qed_hwfn
291 u8 connection_handle)
293 return __qed_ll2_handle_sanity(p_hwfn, connection_handle, false, false);
296 static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
298 bool b_last_packet = false, b_last_frag = false;
299 struct qed_ll2_tx_packet *p_pkt = NULL;
300 struct qed_ll2_info *p_ll2_conn;
301 struct qed_ll2_tx_queue *p_tx;
302 unsigned long flags = 0;
305 p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
309 p_tx = &p_ll2_conn->tx_queue;
311 spin_lock_irqsave(&p_tx->lock, flags);
312 while (!list_empty(&p_tx->active_descq)) {
313 p_pkt = list_first_entry(&p_tx->active_descq,
314 struct qed_ll2_tx_packet, list_entry);
318 list_del(&p_pkt->list_entry);
319 b_last_packet = list_empty(&p_tx->active_descq);
320 list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
321 spin_unlock_irqrestore(&p_tx->lock, flags);
322 if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) {
323 struct qed_ooo_buffer *p_buffer;
325 p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
326 qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
329 p_tx->cur_completing_packet = *p_pkt;
330 p_tx->cur_completing_bd_idx = 1;
332 p_tx->cur_completing_bd_idx == p_pkt->bd_used;
333 tx_frag = p_pkt->bds_set[0].tx_frag;
334 p_ll2_conn->cbs.tx_release_cb(p_ll2_conn->cbs.cookie,
341 spin_lock_irqsave(&p_tx->lock, flags);
343 spin_unlock_irqrestore(&p_tx->lock, flags);
346 static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
348 struct qed_ll2_info *p_ll2_conn = p_cookie;
349 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
350 u16 new_idx = 0, num_bds = 0, num_bds_in_packet = 0;
351 struct qed_ll2_tx_packet *p_pkt;
352 bool b_last_frag = false;
359 spin_lock_irqsave(&p_tx->lock, flags);
360 if (p_tx->b_completing_packet) {
365 new_idx = le16_to_cpu(*p_tx->p_fw_cons);
366 num_bds = ((s16)new_idx - (s16)p_tx->bds_idx);
368 if (list_empty(&p_tx->active_descq))
371 p_pkt = list_first_entry(&p_tx->active_descq,
372 struct qed_ll2_tx_packet, list_entry);
376 p_tx->b_completing_packet = true;
377 p_tx->cur_completing_packet = *p_pkt;
378 num_bds_in_packet = p_pkt->bd_used;
379 list_del(&p_pkt->list_entry);
381 if (unlikely(num_bds < num_bds_in_packet)) {
383 "Rest of BDs does not cover whole packet\n");
387 num_bds -= num_bds_in_packet;
388 p_tx->bds_idx += num_bds_in_packet;
389 while (num_bds_in_packet--)
390 qed_chain_consume(&p_tx->txq_chain);
392 p_tx->cur_completing_bd_idx = 1;
393 b_last_frag = p_tx->cur_completing_bd_idx == p_pkt->bd_used;
394 list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
396 spin_unlock_irqrestore(&p_tx->lock, flags);
398 p_ll2_conn->cbs.tx_comp_cb(p_ll2_conn->cbs.cookie,
401 p_pkt->bds_set[0].tx_frag,
402 b_last_frag, !num_bds);
404 spin_lock_irqsave(&p_tx->lock, flags);
407 p_tx->b_completing_packet = false;
410 spin_unlock_irqrestore(&p_tx->lock, flags);
414 static void qed_ll2_rxq_parse_gsi(struct qed_hwfn *p_hwfn,
415 union core_rx_cqe_union *p_cqe,
416 struct qed_ll2_comp_rx_data *data)
418 data->parse_flags = le16_to_cpu(p_cqe->rx_cqe_gsi.parse_flags.flags);
419 data->length.data_length = le16_to_cpu(p_cqe->rx_cqe_gsi.data_length);
420 data->vlan = le16_to_cpu(p_cqe->rx_cqe_gsi.vlan);
421 data->opaque_data_0 = le32_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrhi);
422 data->opaque_data_1 = le16_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrlo);
423 data->u.data_length_error = p_cqe->rx_cqe_gsi.data_length_error;
424 data->qp_id = le16_to_cpu(p_cqe->rx_cqe_gsi.qp_id);
426 data->src_qp = le32_to_cpu(p_cqe->rx_cqe_gsi.src_qp);
429 static void qed_ll2_rxq_parse_reg(struct qed_hwfn *p_hwfn,
430 union core_rx_cqe_union *p_cqe,
431 struct qed_ll2_comp_rx_data *data)
433 data->parse_flags = le16_to_cpu(p_cqe->rx_cqe_fp.parse_flags.flags);
434 data->err_flags = le16_to_cpu(p_cqe->rx_cqe_fp.err_flags.flags);
435 data->length.packet_length =
436 le16_to_cpu(p_cqe->rx_cqe_fp.packet_length);
437 data->vlan = le16_to_cpu(p_cqe->rx_cqe_fp.vlan);
438 data->opaque_data_0 = le32_to_cpu(p_cqe->rx_cqe_fp.opaque_data.data[0]);
439 data->opaque_data_1 = le32_to_cpu(p_cqe->rx_cqe_fp.opaque_data.data[1]);
440 data->u.placement_offset = p_cqe->rx_cqe_fp.placement_offset;
444 qed_ll2_handle_slowpath(struct qed_hwfn *p_hwfn,
445 struct qed_ll2_info *p_ll2_conn,
446 union core_rx_cqe_union *p_cqe,
447 unsigned long *p_lock_flags)
449 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
450 struct core_rx_slow_path_cqe *sp_cqe;
452 sp_cqe = &p_cqe->rx_cqe_sp;
453 if (sp_cqe->ramrod_cmd_id != CORE_RAMROD_RX_QUEUE_FLUSH) {
455 "LL2 - unexpected Rx CQE slowpath ramrod_cmd_id:%d\n",
456 sp_cqe->ramrod_cmd_id);
460 if (!p_ll2_conn->cbs.slowpath_cb) {
462 "LL2 - received RX_QUEUE_FLUSH but no callback was provided\n");
466 spin_unlock_irqrestore(&p_rx->lock, *p_lock_flags);
468 p_ll2_conn->cbs.slowpath_cb(p_ll2_conn->cbs.cookie,
470 le32_to_cpu(sp_cqe->opaque_data.data[0]),
471 le32_to_cpu(sp_cqe->opaque_data.data[1]));
473 spin_lock_irqsave(&p_rx->lock, *p_lock_flags);
479 qed_ll2_rxq_handle_completion(struct qed_hwfn *p_hwfn,
480 struct qed_ll2_info *p_ll2_conn,
481 union core_rx_cqe_union *p_cqe,
482 unsigned long *p_lock_flags, bool b_last_cqe)
484 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
485 struct qed_ll2_rx_packet *p_pkt = NULL;
486 struct qed_ll2_comp_rx_data data;
488 if (!list_empty(&p_rx->active_descq))
489 p_pkt = list_first_entry(&p_rx->active_descq,
490 struct qed_ll2_rx_packet, list_entry);
491 if (unlikely(!p_pkt)) {
493 "[%d] LL2 Rx completion but active_descq is empty\n",
494 p_ll2_conn->input.conn_type);
498 list_del(&p_pkt->list_entry);
500 if (p_cqe->rx_cqe_sp.type == CORE_RX_CQE_TYPE_REGULAR)
501 qed_ll2_rxq_parse_reg(p_hwfn, p_cqe, &data);
503 qed_ll2_rxq_parse_gsi(p_hwfn, p_cqe, &data);
504 if (unlikely(qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd))
506 "Mismatch between active_descq and the LL2 Rx chain\n");
508 list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
510 data.connection_handle = p_ll2_conn->my_id;
511 data.cookie = p_pkt->cookie;
512 data.rx_buf_addr = p_pkt->rx_buf_addr;
513 data.b_last_packet = b_last_cqe;
515 spin_unlock_irqrestore(&p_rx->lock, *p_lock_flags);
516 p_ll2_conn->cbs.rx_comp_cb(p_ll2_conn->cbs.cookie, &data);
518 spin_lock_irqsave(&p_rx->lock, *p_lock_flags);
523 static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie)
525 struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)cookie;
526 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
527 union core_rx_cqe_union *cqe = NULL;
528 u16 cq_new_idx = 0, cq_old_idx = 0;
529 unsigned long flags = 0;
535 spin_lock_irqsave(&p_rx->lock, flags);
537 if (!QED_LL2_RX_REGISTERED(p_ll2_conn)) {
538 spin_unlock_irqrestore(&p_rx->lock, flags);
542 cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons);
543 cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
545 while (cq_new_idx != cq_old_idx) {
546 bool b_last_cqe = (cq_new_idx == cq_old_idx);
549 (union core_rx_cqe_union *)
550 qed_chain_consume(&p_rx->rcq_chain);
551 cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
555 "LL2 [sw. cons %04x, fw. at %04x] - Got Packet of type %02x\n",
556 cq_old_idx, cq_new_idx, cqe->rx_cqe_sp.type);
558 switch (cqe->rx_cqe_sp.type) {
559 case CORE_RX_CQE_TYPE_SLOW_PATH:
560 rc = qed_ll2_handle_slowpath(p_hwfn, p_ll2_conn,
563 case CORE_RX_CQE_TYPE_GSI_OFFLOAD:
564 case CORE_RX_CQE_TYPE_REGULAR:
565 rc = qed_ll2_rxq_handle_completion(p_hwfn, p_ll2_conn,
574 spin_unlock_irqrestore(&p_rx->lock, flags);
578 static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
580 struct qed_ll2_info *p_ll2_conn = NULL;
581 struct qed_ll2_rx_packet *p_pkt = NULL;
582 struct qed_ll2_rx_queue *p_rx;
583 unsigned long flags = 0;
585 p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
589 p_rx = &p_ll2_conn->rx_queue;
591 spin_lock_irqsave(&p_rx->lock, flags);
592 while (!list_empty(&p_rx->active_descq)) {
593 p_pkt = list_first_entry(&p_rx->active_descq,
594 struct qed_ll2_rx_packet, list_entry);
597 list_move_tail(&p_pkt->list_entry, &p_rx->free_descq);
598 spin_unlock_irqrestore(&p_rx->lock, flags);
600 if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) {
601 struct qed_ooo_buffer *p_buffer;
603 p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
604 qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
607 dma_addr_t rx_buf_addr = p_pkt->rx_buf_addr;
608 void *cookie = p_pkt->cookie;
611 b_last = list_empty(&p_rx->active_descq);
612 p_ll2_conn->cbs.rx_release_cb(p_ll2_conn->cbs.cookie,
615 rx_buf_addr, b_last);
617 spin_lock_irqsave(&p_rx->lock, flags);
619 spin_unlock_irqrestore(&p_rx->lock, flags);
623 qed_ll2_lb_rxq_handler_slowpath(struct qed_hwfn *p_hwfn,
624 struct core_rx_slow_path_cqe *p_cqe)
626 struct ooo_opaque *ooo_opq;
629 if (p_cqe->ramrod_cmd_id != CORE_RAMROD_RX_QUEUE_FLUSH)
632 ooo_opq = (struct ooo_opaque *)&p_cqe->opaque_data;
633 if (ooo_opq->ooo_opcode != TCP_EVENT_DELETE_ISLES)
636 /* Need to make a flush */
637 cid = le32_to_cpu(ooo_opq->cid);
638 qed_ooo_release_connection_isles(p_hwfn, p_hwfn->p_ooo_info, cid);
643 static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn,
644 struct qed_ll2_info *p_ll2_conn)
646 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
647 u16 packet_length = 0, parse_flags = 0, vlan = 0;
648 struct qed_ll2_rx_packet *p_pkt = NULL;
649 u32 num_ooo_add_to_peninsula = 0, cid;
650 union core_rx_cqe_union *cqe = NULL;
651 u16 cq_new_idx = 0, cq_old_idx = 0;
652 struct qed_ooo_buffer *p_buffer;
653 struct ooo_opaque *ooo_opq;
654 u8 placement_offset = 0;
657 cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons);
658 cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
659 if (cq_new_idx == cq_old_idx)
662 while (cq_new_idx != cq_old_idx) {
663 struct core_rx_fast_path_cqe *p_cqe_fp;
665 cqe = qed_chain_consume(&p_rx->rcq_chain);
666 cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
667 cqe_type = cqe->rx_cqe_sp.type;
669 if (cqe_type == CORE_RX_CQE_TYPE_SLOW_PATH)
670 if (qed_ll2_lb_rxq_handler_slowpath(p_hwfn,
674 if (unlikely(cqe_type != CORE_RX_CQE_TYPE_REGULAR)) {
676 "Got a non-regular LB LL2 completion [type 0x%02x]\n",
680 p_cqe_fp = &cqe->rx_cqe_fp;
682 placement_offset = p_cqe_fp->placement_offset;
683 parse_flags = le16_to_cpu(p_cqe_fp->parse_flags.flags);
684 packet_length = le16_to_cpu(p_cqe_fp->packet_length);
685 vlan = le16_to_cpu(p_cqe_fp->vlan);
686 ooo_opq = (struct ooo_opaque *)&p_cqe_fp->opaque_data;
687 qed_ooo_save_history_entry(p_hwfn, p_hwfn->p_ooo_info, ooo_opq);
688 cid = le32_to_cpu(ooo_opq->cid);
690 /* Process delete isle first */
691 if (ooo_opq->drop_size)
692 qed_ooo_delete_isles(p_hwfn, p_hwfn->p_ooo_info, cid,
696 if (ooo_opq->ooo_opcode == TCP_EVENT_NOP)
699 /* Now process create/add/join isles */
700 if (unlikely(list_empty(&p_rx->active_descq))) {
702 "LL2 OOO RX chain has no submitted buffers\n"
707 p_pkt = list_first_entry(&p_rx->active_descq,
708 struct qed_ll2_rx_packet, list_entry);
710 if (likely(ooo_opq->ooo_opcode == TCP_EVENT_ADD_NEW_ISLE ||
711 ooo_opq->ooo_opcode == TCP_EVENT_ADD_ISLE_RIGHT ||
712 ooo_opq->ooo_opcode == TCP_EVENT_ADD_ISLE_LEFT ||
713 ooo_opq->ooo_opcode == TCP_EVENT_ADD_PEN ||
714 ooo_opq->ooo_opcode == TCP_EVENT_JOIN)) {
715 if (unlikely(!p_pkt)) {
717 "LL2 OOO RX packet is not valid\n");
720 list_del(&p_pkt->list_entry);
721 p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
722 p_buffer->packet_length = packet_length;
723 p_buffer->parse_flags = parse_flags;
724 p_buffer->vlan = vlan;
725 p_buffer->placement_offset = placement_offset;
726 qed_chain_consume(&p_rx->rxq_chain);
727 list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
729 switch (ooo_opq->ooo_opcode) {
730 case TCP_EVENT_ADD_NEW_ISLE:
731 qed_ooo_add_new_isle(p_hwfn,
737 case TCP_EVENT_ADD_ISLE_RIGHT:
738 qed_ooo_add_new_buffer(p_hwfn,
745 case TCP_EVENT_ADD_ISLE_LEFT:
746 qed_ooo_add_new_buffer(p_hwfn,
754 qed_ooo_add_new_buffer(p_hwfn,
757 ooo_opq->ooo_isle + 1,
760 qed_ooo_join_isles(p_hwfn,
762 cid, ooo_opq->ooo_isle);
764 case TCP_EVENT_ADD_PEN:
765 num_ooo_add_to_peninsula++;
766 qed_ooo_put_ready_buffer(p_hwfn,
773 "Unexpected event (%d) TX OOO completion\n",
774 ooo_opq->ooo_opcode);
782 qed_ooo_submit_tx_buffers(struct qed_hwfn *p_hwfn,
783 struct qed_ll2_info *p_ll2_conn)
785 struct qed_ll2_tx_pkt_info tx_pkt;
786 struct qed_ooo_buffer *p_buffer;
788 dma_addr_t first_frag;
792 /* Submit Tx buffers here */
793 while ((p_buffer = qed_ooo_get_ready_buffer(p_hwfn,
794 p_hwfn->p_ooo_info))) {
798 first_frag = p_buffer->rx_buffer_phys_addr +
799 p_buffer->placement_offset;
800 SET_FIELD(bd_flags, CORE_TX_BD_DATA_FORCE_VLAN_MODE, 1);
801 SET_FIELD(bd_flags, CORE_TX_BD_DATA_L4_PROTOCOL, 1);
803 memset(&tx_pkt, 0, sizeof(tx_pkt));
804 tx_pkt.num_of_bds = 1;
805 tx_pkt.vlan = p_buffer->vlan;
806 tx_pkt.bd_flags = bd_flags;
807 tx_pkt.l4_hdr_offset_w = l4_hdr_offset_w;
808 switch (p_ll2_conn->tx_dest) {
809 case CORE_TX_DEST_NW:
810 tx_pkt.tx_dest = QED_LL2_TX_DEST_NW;
812 case CORE_TX_DEST_LB:
813 tx_pkt.tx_dest = QED_LL2_TX_DEST_LB;
815 case CORE_TX_DEST_DROP:
817 tx_pkt.tx_dest = QED_LL2_TX_DEST_DROP;
820 tx_pkt.first_frag = first_frag;
821 tx_pkt.first_frag_len = p_buffer->packet_length;
822 tx_pkt.cookie = p_buffer;
824 rc = qed_ll2_prepare_tx_packet(p_hwfn, p_ll2_conn->my_id,
827 qed_ooo_put_ready_buffer(p_hwfn, p_hwfn->p_ooo_info,
835 qed_ooo_submit_rx_buffers(struct qed_hwfn *p_hwfn,
836 struct qed_ll2_info *p_ll2_conn)
838 struct qed_ooo_buffer *p_buffer;
841 while ((p_buffer = qed_ooo_get_free_buffer(p_hwfn,
842 p_hwfn->p_ooo_info))) {
843 rc = qed_ll2_post_rx_buffer(p_hwfn,
845 p_buffer->rx_buffer_phys_addr,
848 qed_ooo_put_free_buffer(p_hwfn,
849 p_hwfn->p_ooo_info, p_buffer);
855 static int qed_ll2_lb_rxq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
857 struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie;
863 if (!QED_LL2_RX_REGISTERED(p_ll2_conn))
866 rc = qed_ll2_lb_rxq_handler(p_hwfn, p_ll2_conn);
870 qed_ooo_submit_rx_buffers(p_hwfn, p_ll2_conn);
871 qed_ooo_submit_tx_buffers(p_hwfn, p_ll2_conn);
876 static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
878 struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie;
879 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
880 struct qed_ll2_tx_packet *p_pkt = NULL;
881 struct qed_ooo_buffer *p_buffer;
882 bool b_dont_submit_rx = false;
883 u16 new_idx = 0, num_bds = 0;
886 if (unlikely(!p_ll2_conn))
889 if (unlikely(!QED_LL2_TX_REGISTERED(p_ll2_conn)))
892 new_idx = le16_to_cpu(*p_tx->p_fw_cons);
893 num_bds = ((s16)new_idx - (s16)p_tx->bds_idx);
895 if (unlikely(!num_bds))
899 if (list_empty(&p_tx->active_descq))
902 p_pkt = list_first_entry(&p_tx->active_descq,
903 struct qed_ll2_tx_packet, list_entry);
904 if (unlikely(!p_pkt))
907 if (unlikely(p_pkt->bd_used != 1)) {
909 "Unexpectedly many BDs(%d) in TX OOO completion\n",
914 list_del(&p_pkt->list_entry);
918 qed_chain_consume(&p_tx->txq_chain);
920 p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
921 list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
923 if (b_dont_submit_rx) {
924 qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
929 rc = qed_ll2_post_rx_buffer(p_hwfn, p_ll2_conn->my_id,
930 p_buffer->rx_buffer_phys_addr, 0,
933 qed_ooo_put_free_buffer(p_hwfn,
934 p_hwfn->p_ooo_info, p_buffer);
935 b_dont_submit_rx = true;
939 qed_ooo_submit_tx_buffers(p_hwfn, p_ll2_conn);
944 static void qed_ll2_stop_ooo(struct qed_hwfn *p_hwfn)
946 u8 *handle = &p_hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
948 DP_VERBOSE(p_hwfn, (QED_MSG_STORAGE | QED_MSG_LL2),
949 "Stopping LL2 OOO queue [%02x]\n", *handle);
951 qed_ll2_terminate_connection(p_hwfn, *handle);
952 qed_ll2_release_connection(p_hwfn, *handle);
953 *handle = QED_LL2_UNUSED_HANDLE;
956 static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
957 struct qed_ll2_info *p_ll2_conn,
960 enum qed_ll2_conn_type conn_type = p_ll2_conn->input.conn_type;
961 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
962 struct core_rx_start_ramrod_data *p_ramrod = NULL;
963 struct qed_spq_entry *p_ent = NULL;
964 struct qed_sp_init_data init_data;
969 memset(&init_data, 0, sizeof(init_data));
970 init_data.cid = p_ll2_conn->cid;
971 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
972 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
974 rc = qed_sp_init_request(p_hwfn, &p_ent,
975 CORE_RAMROD_RX_QUEUE_START,
976 PROTOCOLID_CORE, &init_data);
980 p_ramrod = &p_ent->ramrod.core_rx_queue_start;
981 memset(p_ramrod, 0, sizeof(*p_ramrod));
982 p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
983 p_ramrod->sb_index = p_rx->rx_sb_index;
984 p_ramrod->complete_event_flg = 1;
986 p_ramrod->mtu = cpu_to_le16(p_ll2_conn->input.mtu);
987 DMA_REGPAIR_LE(p_ramrod->bd_base, p_rx->rxq_chain.p_phys_addr);
988 cqe_pbl_size = (u16)qed_chain_get_page_cnt(&p_rx->rcq_chain);
989 p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size);
990 DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr,
991 qed_chain_get_pbl_phys(&p_rx->rcq_chain));
993 p_ramrod->drop_ttl0_flg = p_ll2_conn->input.rx_drop_ttl0_flg;
994 p_ramrod->inner_vlan_stripping_en =
995 p_ll2_conn->input.rx_vlan_removal_en;
997 if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits) &&
998 p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE)
999 p_ramrod->report_outer_vlan = 1;
1000 p_ramrod->queue_id = p_ll2_conn->queue_id;
1001 p_ramrod->main_func_queue = p_ll2_conn->main_func_queue ? 1 : 0;
1003 if (test_bit(QED_MF_LL2_NON_UNICAST, &p_hwfn->cdev->mf_bits) &&
1004 p_ramrod->main_func_queue && conn_type != QED_LL2_TYPE_ROCE &&
1005 conn_type != QED_LL2_TYPE_IWARP &&
1006 (!QED_IS_NVMETCP_PERSONALITY(p_hwfn))) {
1007 p_ramrod->mf_si_bcast_accept_all = 1;
1008 p_ramrod->mf_si_mcast_accept_all = 1;
1010 p_ramrod->mf_si_bcast_accept_all = 0;
1011 p_ramrod->mf_si_mcast_accept_all = 0;
1014 p_ramrod->action_on_error.error_type = action_on_error;
1015 p_ramrod->gsi_offload_flag = p_ll2_conn->input.gsi_enable;
1016 p_ramrod->zero_prod_flg = 1;
1018 return qed_spq_post(p_hwfn, p_ent, NULL);
1021 static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
1022 struct qed_ll2_info *p_ll2_conn)
1024 enum qed_ll2_conn_type conn_type = p_ll2_conn->input.conn_type;
1025 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
1026 struct core_tx_start_ramrod_data *p_ramrod = NULL;
1027 struct qed_spq_entry *p_ent = NULL;
1028 struct qed_sp_init_data init_data;
1029 u16 pq_id = 0, pbl_size;
1032 if (!QED_LL2_TX_REGISTERED(p_ll2_conn))
1035 if (likely(p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO))
1036 p_ll2_conn->tx_stats_en = 0;
1038 p_ll2_conn->tx_stats_en = 1;
1041 memset(&init_data, 0, sizeof(init_data));
1042 init_data.cid = p_ll2_conn->cid;
1043 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1044 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1046 rc = qed_sp_init_request(p_hwfn, &p_ent,
1047 CORE_RAMROD_TX_QUEUE_START,
1048 PROTOCOLID_CORE, &init_data);
1052 p_ramrod = &p_ent->ramrod.core_tx_queue_start;
1054 p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
1055 p_ramrod->sb_index = p_tx->tx_sb_index;
1056 p_ramrod->mtu = cpu_to_le16(p_ll2_conn->input.mtu);
1057 p_ramrod->stats_en = p_ll2_conn->tx_stats_en;
1058 p_ramrod->stats_id = p_ll2_conn->tx_stats_id;
1060 DMA_REGPAIR_LE(p_ramrod->pbl_base_addr,
1061 qed_chain_get_pbl_phys(&p_tx->txq_chain));
1062 pbl_size = qed_chain_get_page_cnt(&p_tx->txq_chain);
1063 p_ramrod->pbl_size = cpu_to_le16(pbl_size);
1065 switch (p_ll2_conn->input.tx_tc) {
1067 pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
1070 pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OOO);
1073 pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
1077 p_ramrod->qm_pq_id = cpu_to_le16(pq_id);
1079 switch (conn_type) {
1080 case QED_LL2_TYPE_FCOE:
1081 p_ramrod->conn_type = PROTOCOLID_FCOE;
1083 case QED_LL2_TYPE_TCP_ULP:
1084 p_ramrod->conn_type = PROTOCOLID_TCP_ULP;
1086 case QED_LL2_TYPE_ROCE:
1087 p_ramrod->conn_type = PROTOCOLID_ROCE;
1089 case QED_LL2_TYPE_IWARP:
1090 p_ramrod->conn_type = PROTOCOLID_IWARP;
1092 case QED_LL2_TYPE_OOO:
1093 if (p_hwfn->hw_info.personality == QED_PCI_ISCSI ||
1094 p_hwfn->hw_info.personality == QED_PCI_NVMETCP)
1095 p_ramrod->conn_type = PROTOCOLID_TCP_ULP;
1097 p_ramrod->conn_type = PROTOCOLID_IWARP;
1100 p_ramrod->conn_type = PROTOCOLID_ETH;
1101 DP_NOTICE(p_hwfn, "Unknown connection type: %d\n", conn_type);
1104 p_ramrod->gsi_offload_flag = p_ll2_conn->input.gsi_enable;
1106 rc = qed_spq_post(p_hwfn, p_ent, NULL);
1110 rc = qed_db_recovery_add(p_hwfn->cdev, p_tx->doorbell_addr,
1111 &p_tx->db_msg, DB_REC_WIDTH_32B,
1116 static int qed_sp_ll2_rx_queue_stop(struct qed_hwfn *p_hwfn,
1117 struct qed_ll2_info *p_ll2_conn)
1119 struct core_rx_stop_ramrod_data *p_ramrod = NULL;
1120 struct qed_spq_entry *p_ent = NULL;
1121 struct qed_sp_init_data init_data;
1125 memset(&init_data, 0, sizeof(init_data));
1126 init_data.cid = p_ll2_conn->cid;
1127 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1128 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1130 rc = qed_sp_init_request(p_hwfn, &p_ent,
1131 CORE_RAMROD_RX_QUEUE_STOP,
1132 PROTOCOLID_CORE, &init_data);
1136 p_ramrod = &p_ent->ramrod.core_rx_queue_stop;
1138 p_ramrod->complete_event_flg = 1;
1139 p_ramrod->queue_id = p_ll2_conn->queue_id;
1141 return qed_spq_post(p_hwfn, p_ent, NULL);
1144 static int qed_sp_ll2_tx_queue_stop(struct qed_hwfn *p_hwfn,
1145 struct qed_ll2_info *p_ll2_conn)
1147 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
1148 struct qed_spq_entry *p_ent = NULL;
1149 struct qed_sp_init_data init_data;
1152 qed_db_recovery_del(p_hwfn->cdev, p_tx->doorbell_addr, &p_tx->db_msg);
1155 memset(&init_data, 0, sizeof(init_data));
1156 init_data.cid = p_ll2_conn->cid;
1157 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1158 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1160 rc = qed_sp_init_request(p_hwfn, &p_ent,
1161 CORE_RAMROD_TX_QUEUE_STOP,
1162 PROTOCOLID_CORE, &init_data);
1166 return qed_spq_post(p_hwfn, p_ent, NULL);
1170 qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn,
1171 struct qed_ll2_info *p_ll2_info)
1173 struct qed_chain_init_params params = {
1174 .intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1175 .cnt_type = QED_CHAIN_CNT_TYPE_U16,
1176 .num_elems = p_ll2_info->input.rx_num_desc,
1178 struct qed_dev *cdev = p_hwfn->cdev;
1179 struct qed_ll2_rx_packet *p_descq;
1183 if (!p_ll2_info->input.rx_num_desc)
1186 params.mode = QED_CHAIN_MODE_NEXT_PTR;
1187 params.elem_size = sizeof(struct core_rx_bd);
1189 rc = qed_chain_alloc(cdev, &p_ll2_info->rx_queue.rxq_chain, ¶ms);
1191 DP_NOTICE(p_hwfn, "Failed to allocate ll2 rxq chain\n");
1195 capacity = qed_chain_get_capacity(&p_ll2_info->rx_queue.rxq_chain);
1196 p_descq = kcalloc(capacity, sizeof(struct qed_ll2_rx_packet),
1200 DP_NOTICE(p_hwfn, "Failed to allocate ll2 Rx desc\n");
1203 p_ll2_info->rx_queue.descq_array = p_descq;
1205 params.mode = QED_CHAIN_MODE_PBL;
1206 params.elem_size = sizeof(struct core_rx_fast_path_cqe);
1208 rc = qed_chain_alloc(cdev, &p_ll2_info->rx_queue.rcq_chain, ¶ms);
1210 DP_NOTICE(p_hwfn, "Failed to allocate ll2 rcq chain\n");
1214 DP_VERBOSE(p_hwfn, QED_MSG_LL2,
1215 "Allocated LL2 Rxq [Type %08x] with 0x%08x buffers\n",
1216 p_ll2_info->input.conn_type, p_ll2_info->input.rx_num_desc);
1222 static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn,
1223 struct qed_ll2_info *p_ll2_info)
1225 struct qed_chain_init_params params = {
1226 .mode = QED_CHAIN_MODE_PBL,
1227 .intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1228 .cnt_type = QED_CHAIN_CNT_TYPE_U16,
1229 .num_elems = p_ll2_info->input.tx_num_desc,
1230 .elem_size = sizeof(struct core_tx_bd),
1232 struct qed_ll2_tx_packet *p_descq;
1237 if (!p_ll2_info->input.tx_num_desc)
1240 rc = qed_chain_alloc(p_hwfn->cdev, &p_ll2_info->tx_queue.txq_chain,
1245 capacity = qed_chain_get_capacity(&p_ll2_info->tx_queue.txq_chain);
1246 /* All bds_set elements are flexibily added. */
1247 desc_size = struct_size(p_descq, bds_set,
1248 p_ll2_info->input.tx_max_bds_per_packet);
1250 p_descq = kcalloc(capacity, desc_size, GFP_KERNEL);
1255 p_ll2_info->tx_queue.descq_mem = p_descq;
1257 DP_VERBOSE(p_hwfn, QED_MSG_LL2,
1258 "Allocated LL2 Txq [Type %08x] with 0x%08x buffers\n",
1259 p_ll2_info->input.conn_type, p_ll2_info->input.tx_num_desc);
1264 "Can't allocate memory for Tx LL2 with 0x%08x buffers\n",
1265 p_ll2_info->input.tx_num_desc);
1270 qed_ll2_acquire_connection_ooo(struct qed_hwfn *p_hwfn,
1271 struct qed_ll2_info *p_ll2_info, u16 mtu)
1273 struct qed_ooo_buffer *p_buf = NULL;
1278 if (p_ll2_info->input.conn_type != QED_LL2_TYPE_OOO)
1281 /* Correct number of requested OOO buffers if needed */
1282 if (!p_ll2_info->input.rx_num_ooo_buffers) {
1283 u16 num_desc = p_ll2_info->input.rx_num_desc;
1287 p_ll2_info->input.rx_num_ooo_buffers = num_desc * 2;
1290 for (buf_idx = 0; buf_idx < p_ll2_info->input.rx_num_ooo_buffers;
1292 p_buf = kzalloc(sizeof(*p_buf), GFP_KERNEL);
1298 p_buf->rx_buffer_size = mtu + 26 + ETH_CACHE_LINE_SIZE;
1299 p_buf->rx_buffer_size = (p_buf->rx_buffer_size +
1300 ETH_CACHE_LINE_SIZE - 1) &
1301 ~(ETH_CACHE_LINE_SIZE - 1);
1302 p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
1303 p_buf->rx_buffer_size,
1304 &p_buf->rx_buffer_phys_addr,
1312 p_buf->rx_buffer_virt_addr = p_virt;
1313 qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info, p_buf);
1316 DP_VERBOSE(p_hwfn, QED_MSG_LL2,
1317 "Allocated [%04x] LL2 OOO buffers [each of size 0x%08x]\n",
1318 p_ll2_info->input.rx_num_ooo_buffers, p_buf->rx_buffer_size);
1325 qed_ll2_set_cbs(struct qed_ll2_info *p_ll2_info, const struct qed_ll2_cbs *cbs)
1327 if (!cbs || (!cbs->rx_comp_cb ||
1328 !cbs->rx_release_cb ||
1329 !cbs->tx_comp_cb || !cbs->tx_release_cb || !cbs->cookie))
1332 p_ll2_info->cbs.rx_comp_cb = cbs->rx_comp_cb;
1333 p_ll2_info->cbs.rx_release_cb = cbs->rx_release_cb;
1334 p_ll2_info->cbs.tx_comp_cb = cbs->tx_comp_cb;
1335 p_ll2_info->cbs.tx_release_cb = cbs->tx_release_cb;
1336 p_ll2_info->cbs.slowpath_cb = cbs->slowpath_cb;
1337 p_ll2_info->cbs.cookie = cbs->cookie;
1342 static void _qed_ll2_calc_allowed_conns(struct qed_hwfn *p_hwfn,
1343 struct qed_ll2_acquire_data *data,
1344 u8 *start_idx, u8 *last_idx)
1346 /* LL2 queues handles will be split as follows:
1347 * First will be the legacy queues, and then the ctx based.
1349 if (data->input.rx_conn_type == QED_LL2_RX_TYPE_LEGACY) {
1350 *start_idx = QED_LL2_LEGACY_CONN_BASE_PF;
1351 *last_idx = *start_idx +
1352 QED_MAX_NUM_OF_LEGACY_LL2_CONNS_PF;
1354 /* QED_LL2_RX_TYPE_CTX */
1355 *start_idx = QED_LL2_CTX_CONN_BASE_PF;
1356 *last_idx = *start_idx +
1357 QED_MAX_NUM_OF_CTX_LL2_CONNS_PF;
1361 static enum core_error_handle
1362 qed_ll2_get_error_choice(enum qed_ll2_error_handle err)
1365 case QED_LL2_DROP_PACKET:
1366 return LL2_DROP_PACKET;
1367 case QED_LL2_DO_NOTHING:
1368 return LL2_DO_NOTHING;
1369 case QED_LL2_ASSERT:
1372 return LL2_DO_NOTHING;
1376 int qed_ll2_acquire_connection(void *cxt, struct qed_ll2_acquire_data *data)
1378 struct qed_hwfn *p_hwfn = cxt;
1379 qed_int_comp_cb_t comp_rx_cb, comp_tx_cb;
1380 struct qed_ll2_info *p_ll2_info = NULL;
1381 u8 i, first_idx, last_idx, *p_tx_max;
1384 if (!data->p_connection_handle || !p_hwfn->p_ll2_info)
1387 _qed_ll2_calc_allowed_conns(p_hwfn, data, &first_idx, &last_idx);
1389 /* Find a free connection to be used */
1390 for (i = first_idx; i < last_idx; i++) {
1391 mutex_lock(&p_hwfn->p_ll2_info[i].mutex);
1392 if (p_hwfn->p_ll2_info[i].b_active) {
1393 mutex_unlock(&p_hwfn->p_ll2_info[i].mutex);
1397 p_hwfn->p_ll2_info[i].b_active = true;
1398 p_ll2_info = &p_hwfn->p_ll2_info[i];
1399 mutex_unlock(&p_hwfn->p_ll2_info[i].mutex);
1405 memcpy(&p_ll2_info->input, &data->input, sizeof(p_ll2_info->input));
1407 switch (data->input.tx_dest) {
1408 case QED_LL2_TX_DEST_NW:
1409 p_ll2_info->tx_dest = CORE_TX_DEST_NW;
1411 case QED_LL2_TX_DEST_LB:
1412 p_ll2_info->tx_dest = CORE_TX_DEST_LB;
1414 case QED_LL2_TX_DEST_DROP:
1415 p_ll2_info->tx_dest = CORE_TX_DEST_DROP;
1421 if (data->input.conn_type == QED_LL2_TYPE_OOO ||
1422 data->input.secondary_queue)
1423 p_ll2_info->main_func_queue = false;
1425 p_ll2_info->main_func_queue = true;
1427 /* Correct maximum number of Tx BDs */
1428 p_tx_max = &p_ll2_info->input.tx_max_bds_per_packet;
1430 *p_tx_max = CORE_LL2_TX_MAX_BDS_PER_PACKET;
1432 *p_tx_max = min_t(u8, *p_tx_max,
1433 CORE_LL2_TX_MAX_BDS_PER_PACKET);
1435 rc = qed_ll2_set_cbs(p_ll2_info, data->cbs);
1437 DP_NOTICE(p_hwfn, "Invalid callback functions\n");
1438 goto q_allocate_fail;
1441 rc = qed_ll2_acquire_connection_rx(p_hwfn, p_ll2_info);
1443 goto q_allocate_fail;
1445 rc = qed_ll2_acquire_connection_tx(p_hwfn, p_ll2_info);
1447 goto q_allocate_fail;
1449 rc = qed_ll2_acquire_connection_ooo(p_hwfn, p_ll2_info,
1452 goto q_allocate_fail;
1454 /* Register callbacks for the Rx/Tx queues */
1455 if (data->input.conn_type == QED_LL2_TYPE_OOO) {
1456 comp_rx_cb = qed_ll2_lb_rxq_completion;
1457 comp_tx_cb = qed_ll2_lb_txq_completion;
1459 comp_rx_cb = qed_ll2_rxq_completion;
1460 comp_tx_cb = qed_ll2_txq_completion;
1463 if (data->input.rx_num_desc) {
1464 qed_int_register_cb(p_hwfn, comp_rx_cb,
1465 &p_hwfn->p_ll2_info[i],
1466 &p_ll2_info->rx_queue.rx_sb_index,
1467 &p_ll2_info->rx_queue.p_fw_cons);
1468 p_ll2_info->rx_queue.b_cb_registered = true;
1471 if (data->input.tx_num_desc) {
1472 qed_int_register_cb(p_hwfn,
1474 &p_hwfn->p_ll2_info[i],
1475 &p_ll2_info->tx_queue.tx_sb_index,
1476 &p_ll2_info->tx_queue.p_fw_cons);
1477 p_ll2_info->tx_queue.b_cb_registered = true;
1480 *data->p_connection_handle = i;
1484 qed_ll2_release_connection(p_hwfn, i);
1488 static int qed_ll2_establish_connection_rx(struct qed_hwfn *p_hwfn,
1489 struct qed_ll2_info *p_ll2_conn)
1491 enum qed_ll2_error_handle error_input;
1492 enum core_error_handle error_mode;
1493 u8 action_on_error = 0;
1496 if (!QED_LL2_RX_REGISTERED(p_ll2_conn))
1499 DIRECT_REG_WR(p_ll2_conn->rx_queue.set_prod_addr, 0x0);
1500 error_input = p_ll2_conn->input.ai_err_packet_too_big;
1501 error_mode = qed_ll2_get_error_choice(error_input);
1502 SET_FIELD(action_on_error,
1503 CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG, error_mode);
1504 error_input = p_ll2_conn->input.ai_err_no_buf;
1505 error_mode = qed_ll2_get_error_choice(error_input);
1506 SET_FIELD(action_on_error, CORE_RX_ACTION_ON_ERROR_NO_BUFF, error_mode);
1508 rc = qed_sp_ll2_rx_queue_start(p_hwfn, p_ll2_conn, action_on_error);
1512 if (p_ll2_conn->rx_queue.ctx_based) {
1513 rc = qed_db_recovery_add(p_hwfn->cdev,
1514 p_ll2_conn->rx_queue.set_prod_addr,
1515 &p_ll2_conn->rx_queue.db_data,
1516 DB_REC_WIDTH_64B, DB_REC_KERNEL);
1523 qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn,
1524 struct qed_ll2_info *p_ll2_conn)
1526 if (p_ll2_conn->input.conn_type != QED_LL2_TYPE_OOO)
1529 qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
1530 qed_ooo_submit_rx_buffers(p_hwfn, p_ll2_conn);
1533 static inline u8 qed_ll2_handle_to_queue_id(struct qed_hwfn *p_hwfn,
1539 if (ll2_queue_type == QED_LL2_RX_TYPE_LEGACY)
1540 return p_hwfn->hw_info.resc_start[QED_LL2_RAM_QUEUE] + handle;
1542 /* QED_LL2_RX_TYPE_CTX
1543 * FW distinguishes between the legacy queues (ram based) and the
1544 * ctx based queues by the queue_id.
1545 * The first MAX_NUM_LL2_RX_RAM_QUEUES queues are legacy
1546 * and the queue ids above that are ctx base.
1548 qid = p_hwfn->hw_info.resc_start[QED_LL2_CTX_QUEUE] +
1549 MAX_NUM_LL2_RX_RAM_QUEUES;
1551 /* See comment on the acquire connection for how the ll2
1552 * queues handles are divided.
1554 qid += (handle - QED_MAX_NUM_OF_LEGACY_LL2_CONNS_PF);
1559 int qed_ll2_establish_connection(void *cxt, u8 connection_handle)
1561 struct core_conn_context *p_cxt;
1562 struct qed_ll2_tx_packet *p_pkt;
1563 struct qed_ll2_info *p_ll2_conn;
1564 struct qed_hwfn *p_hwfn = cxt;
1565 struct qed_ll2_rx_queue *p_rx;
1566 struct qed_ll2_tx_queue *p_tx;
1567 struct qed_cxt_info cxt_info;
1568 struct qed_ptt *p_ptt;
1574 p_ptt = qed_ptt_acquire(p_hwfn);
1578 p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle);
1584 p_rx = &p_ll2_conn->rx_queue;
1585 p_tx = &p_ll2_conn->tx_queue;
1587 qed_chain_reset(&p_rx->rxq_chain);
1588 qed_chain_reset(&p_rx->rcq_chain);
1589 INIT_LIST_HEAD(&p_rx->active_descq);
1590 INIT_LIST_HEAD(&p_rx->free_descq);
1591 INIT_LIST_HEAD(&p_rx->posting_descq);
1592 spin_lock_init(&p_rx->lock);
1593 capacity = qed_chain_get_capacity(&p_rx->rxq_chain);
1594 for (i = 0; i < capacity; i++)
1595 list_add_tail(&p_rx->descq_array[i].list_entry,
1597 *p_rx->p_fw_cons = 0;
1599 qed_chain_reset(&p_tx->txq_chain);
1600 INIT_LIST_HEAD(&p_tx->active_descq);
1601 INIT_LIST_HEAD(&p_tx->free_descq);
1602 INIT_LIST_HEAD(&p_tx->sending_descq);
1603 spin_lock_init(&p_tx->lock);
1604 capacity = qed_chain_get_capacity(&p_tx->txq_chain);
1605 /* All bds_set elements are flexibily added. */
1606 desc_size = struct_size(p_pkt, bds_set,
1607 p_ll2_conn->input.tx_max_bds_per_packet);
1609 for (i = 0; i < capacity; i++) {
1610 p_pkt = p_tx->descq_mem + desc_size * i;
1611 list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
1613 p_tx->cur_completing_bd_idx = 0;
1615 p_tx->b_completing_packet = false;
1616 p_tx->cur_send_packet = NULL;
1617 p_tx->cur_send_frag_num = 0;
1618 p_tx->cur_completing_frag_num = 0;
1619 *p_tx->p_fw_cons = 0;
1621 rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_ll2_conn->cid);
1624 cxt_info.iid = p_ll2_conn->cid;
1625 rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info);
1627 DP_NOTICE(p_hwfn, "Cannot find context info for cid=%d\n",
1632 p_cxt = cxt_info.p_cxt;
1634 memset(p_cxt, 0, sizeof(*p_cxt));
1636 qid = qed_ll2_handle_to_queue_id(p_hwfn, connection_handle,
1637 p_ll2_conn->input.rx_conn_type);
1638 stats_id = qed_ll2_handle_to_stats_id(p_hwfn,
1639 p_ll2_conn->input.rx_conn_type,
1641 p_ll2_conn->queue_id = qid;
1642 p_ll2_conn->tx_stats_id = stats_id;
1644 /* If there is no valid stats id for this connection, disable stats */
1645 if (p_ll2_conn->tx_stats_id == QED_LL2_INVALID_STATS_ID) {
1646 p_ll2_conn->tx_stats_en = 0;
1649 "Disabling stats for queue %d - not enough counters\n",
1655 "Establishing ll2 queue. PF %d ctx_based=%d abs qid=%d stats_id=%d\n",
1657 p_ll2_conn->input.rx_conn_type, qid, stats_id);
1659 if (p_ll2_conn->input.rx_conn_type == QED_LL2_RX_TYPE_LEGACY) {
1660 p_rx->set_prod_addr =
1661 (u8 __iomem *)p_hwfn->regview +
1662 GET_GTT_REG_ADDR(GTT_BAR0_MAP_REG_TSDM_RAM,
1663 TSTORM_LL2_RX_PRODS, qid);
1665 /* QED_LL2_RX_TYPE_CTX - using doorbell */
1666 p_rx->ctx_based = 1;
1668 p_rx->set_prod_addr = p_hwfn->doorbells +
1669 p_hwfn->dpi_start_offset +
1670 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_LL2_PROD_UPDATE);
1672 /* prepare db data */
1673 p_rx->db_data.icid = cpu_to_le16((u16)p_ll2_conn->cid);
1674 SET_FIELD(p_rx->db_data.params,
1675 CORE_PWM_PROD_UPDATE_DATA_AGG_CMD, DB_AGG_CMD_SET);
1676 SET_FIELD(p_rx->db_data.params,
1677 CORE_PWM_PROD_UPDATE_DATA_RESERVED1, 0);
1680 p_tx->doorbell_addr = (u8 __iomem *)p_hwfn->doorbells +
1681 qed_db_addr(p_ll2_conn->cid,
1683 /* prepare db data */
1684 SET_FIELD(p_tx->db_msg.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
1685 SET_FIELD(p_tx->db_msg.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
1686 SET_FIELD(p_tx->db_msg.params, CORE_DB_DATA_AGG_VAL_SEL,
1687 DQ_XCM_CORE_TX_BD_PROD_CMD);
1688 p_tx->db_msg.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
1690 rc = qed_ll2_establish_connection_rx(p_hwfn, p_ll2_conn);
1694 rc = qed_sp_ll2_tx_queue_start(p_hwfn, p_ll2_conn);
1698 if (!QED_IS_RDMA_PERSONALITY(p_hwfn) &&
1699 !QED_IS_NVMETCP_PERSONALITY(p_hwfn))
1700 qed_wr(p_hwfn, p_ptt, PRS_REG_USE_LIGHT_L2, 1);
1702 qed_ll2_establish_connection_ooo(p_hwfn, p_ll2_conn);
1704 if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE) {
1705 if (!test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits))
1706 qed_llh_add_protocol_filter(p_hwfn->cdev, 0,
1707 QED_LLH_FILTER_ETHERTYPE,
1709 qed_llh_add_protocol_filter(p_hwfn->cdev, 0,
1710 QED_LLH_FILTER_ETHERTYPE,
1715 qed_ptt_release(p_hwfn, p_ptt);
1719 static void qed_ll2_post_rx_buffer_notify_fw(struct qed_hwfn *p_hwfn,
1720 struct qed_ll2_rx_queue *p_rx,
1721 struct qed_ll2_rx_packet *p_curp)
1723 struct qed_ll2_rx_packet *p_posting_packet = NULL;
1724 struct core_ll2_rx_prod rx_prod = { 0, 0 };
1725 bool b_notify_fw = false;
1726 u16 bd_prod, cq_prod;
1728 /* This handles the flushing of already posted buffers */
1729 while (!list_empty(&p_rx->posting_descq)) {
1730 p_posting_packet = list_first_entry(&p_rx->posting_descq,
1731 struct qed_ll2_rx_packet,
1733 list_move_tail(&p_posting_packet->list_entry,
1734 &p_rx->active_descq);
1738 /* This handles the supplied packet [if there is one] */
1740 list_add_tail(&p_curp->list_entry, &p_rx->active_descq);
1747 bd_prod = qed_chain_get_prod_idx(&p_rx->rxq_chain);
1748 cq_prod = qed_chain_get_prod_idx(&p_rx->rcq_chain);
1749 if (p_rx->ctx_based) {
1750 /* update producer by giving a doorbell */
1751 p_rx->db_data.prod.bd_prod = cpu_to_le16(bd_prod);
1752 p_rx->db_data.prod.cqe_prod = cpu_to_le16(cq_prod);
1753 /* Make sure chain element is updated before ringing the
1757 DIRECT_REG_WR64(p_rx->set_prod_addr,
1758 *((u64 *)&p_rx->db_data));
1760 rx_prod.bd_prod = cpu_to_le16(bd_prod);
1761 rx_prod.cqe_prod = cpu_to_le16(cq_prod);
1763 /* Make sure chain element is updated before ringing the
1768 DIRECT_REG_WR(p_rx->set_prod_addr, *((u32 *)&rx_prod));
1772 int qed_ll2_post_rx_buffer(void *cxt,
1773 u8 connection_handle,
1775 u16 buf_len, void *cookie, u8 notify_fw)
1777 struct qed_hwfn *p_hwfn = cxt;
1778 struct core_rx_bd_with_buff_len *p_curb = NULL;
1779 struct qed_ll2_rx_packet *p_curp = NULL;
1780 struct qed_ll2_info *p_ll2_conn;
1781 struct qed_ll2_rx_queue *p_rx;
1782 unsigned long flags;
1786 p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
1789 p_rx = &p_ll2_conn->rx_queue;
1790 if (!p_rx->set_prod_addr)
1793 spin_lock_irqsave(&p_rx->lock, flags);
1794 if (!list_empty(&p_rx->free_descq))
1795 p_curp = list_first_entry(&p_rx->free_descq,
1796 struct qed_ll2_rx_packet, list_entry);
1798 if (qed_chain_get_elem_left(&p_rx->rxq_chain) &&
1799 qed_chain_get_elem_left(&p_rx->rcq_chain)) {
1800 p_data = qed_chain_produce(&p_rx->rxq_chain);
1801 p_curb = (struct core_rx_bd_with_buff_len *)p_data;
1802 qed_chain_produce(&p_rx->rcq_chain);
1806 /* If we're lacking entries, let's try to flush buffers to FW */
1807 if (!p_curp || !p_curb) {
1813 /* We have an Rx packet we can fill */
1814 DMA_REGPAIR_LE(p_curb->addr, addr);
1815 p_curb->buff_length = cpu_to_le16(buf_len);
1816 p_curp->rx_buf_addr = addr;
1817 p_curp->cookie = cookie;
1818 p_curp->rxq_bd = p_curb;
1819 p_curp->buf_length = buf_len;
1820 list_del(&p_curp->list_entry);
1822 /* Check if we only want to enqueue this packet without informing FW */
1824 list_add_tail(&p_curp->list_entry, &p_rx->posting_descq);
1829 qed_ll2_post_rx_buffer_notify_fw(p_hwfn, p_rx, p_curp);
1831 spin_unlock_irqrestore(&p_rx->lock, flags);
1835 static void qed_ll2_prepare_tx_packet_set(struct qed_hwfn *p_hwfn,
1836 struct qed_ll2_tx_queue *p_tx,
1837 struct qed_ll2_tx_packet *p_curp,
1838 struct qed_ll2_tx_pkt_info *pkt,
1841 list_del(&p_curp->list_entry);
1842 p_curp->cookie = pkt->cookie;
1843 p_curp->bd_used = pkt->num_of_bds;
1844 p_curp->notify_fw = notify_fw;
1845 p_tx->cur_send_packet = p_curp;
1846 p_tx->cur_send_frag_num = 0;
1848 p_curp->bds_set[p_tx->cur_send_frag_num].tx_frag = pkt->first_frag;
1849 p_curp->bds_set[p_tx->cur_send_frag_num].frag_len = pkt->first_frag_len;
1850 p_tx->cur_send_frag_num++;
1854 qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
1855 struct qed_ll2_info *p_ll2,
1856 struct qed_ll2_tx_packet *p_curp,
1857 struct qed_ll2_tx_pkt_info *pkt)
1859 struct qed_chain *p_tx_chain = &p_ll2->tx_queue.txq_chain;
1860 u16 prod_idx = qed_chain_get_prod_idx(p_tx_chain);
1861 struct core_tx_bd *start_bd = NULL;
1862 enum core_roce_flavor_type roce_flavor;
1863 enum core_tx_dest tx_dest;
1864 u16 bd_data = 0, frag_idx;
1867 roce_flavor = (pkt->qed_roce_flavor == QED_LL2_ROCE) ? CORE_ROCE
1870 switch (pkt->tx_dest) {
1871 case QED_LL2_TX_DEST_NW:
1872 tx_dest = CORE_TX_DEST_NW;
1874 case QED_LL2_TX_DEST_LB:
1875 tx_dest = CORE_TX_DEST_LB;
1877 case QED_LL2_TX_DEST_DROP:
1878 tx_dest = CORE_TX_DEST_DROP;
1881 tx_dest = CORE_TX_DEST_LB;
1885 start_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
1886 if (likely(QED_IS_IWARP_PERSONALITY(p_hwfn) &&
1887 p_ll2->input.conn_type == QED_LL2_TYPE_OOO)) {
1888 start_bd->nw_vlan_or_lb_echo =
1889 cpu_to_le16(IWARP_LL2_IN_ORDER_TX_QUEUE);
1891 start_bd->nw_vlan_or_lb_echo = cpu_to_le16(pkt->vlan);
1892 if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits) &&
1893 p_ll2->input.conn_type == QED_LL2_TYPE_FCOE)
1894 pkt->remove_stag = true;
1897 bitfield1 = le16_to_cpu(start_bd->bitfield1);
1898 SET_FIELD(bitfield1, CORE_TX_BD_L4_HDR_OFFSET_W, pkt->l4_hdr_offset_w);
1899 SET_FIELD(bitfield1, CORE_TX_BD_TX_DST, tx_dest);
1900 start_bd->bitfield1 = cpu_to_le16(bitfield1);
1902 bd_data |= pkt->bd_flags;
1903 SET_FIELD(bd_data, CORE_TX_BD_DATA_START_BD, 0x1);
1904 SET_FIELD(bd_data, CORE_TX_BD_DATA_NBDS, pkt->num_of_bds);
1905 SET_FIELD(bd_data, CORE_TX_BD_DATA_ROCE_FLAV, roce_flavor);
1906 SET_FIELD(bd_data, CORE_TX_BD_DATA_IP_CSUM, !!(pkt->enable_ip_cksum));
1907 SET_FIELD(bd_data, CORE_TX_BD_DATA_L4_CSUM, !!(pkt->enable_l4_cksum));
1908 SET_FIELD(bd_data, CORE_TX_BD_DATA_IP_LEN, !!(pkt->calc_ip_len));
1909 SET_FIELD(bd_data, CORE_TX_BD_DATA_DISABLE_STAG_INSERTION,
1910 !!(pkt->remove_stag));
1912 start_bd->bd_data.as_bitfield = cpu_to_le16(bd_data);
1913 DMA_REGPAIR_LE(start_bd->addr, pkt->first_frag);
1914 start_bd->nbytes = cpu_to_le16(pkt->first_frag_len);
1917 (NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
1918 "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Tx Producer at [0x%04x] - set with a %04x bytes %02x BDs buffer at %08x:%08x\n",
1921 p_ll2->input.conn_type,
1923 pkt->first_frag_len,
1925 le32_to_cpu(start_bd->addr.hi),
1926 le32_to_cpu(start_bd->addr.lo));
1928 if (p_ll2->tx_queue.cur_send_frag_num == pkt->num_of_bds)
1931 /* Need to provide the packet with additional BDs for frags */
1932 for (frag_idx = p_ll2->tx_queue.cur_send_frag_num;
1933 frag_idx < pkt->num_of_bds; frag_idx++) {
1934 struct core_tx_bd **p_bd = &p_curp->bds_set[frag_idx].txq_bd;
1936 *p_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
1937 (*p_bd)->bd_data.as_bitfield = 0;
1938 (*p_bd)->bitfield1 = 0;
1939 p_curp->bds_set[frag_idx].tx_frag = 0;
1940 p_curp->bds_set[frag_idx].frag_len = 0;
1944 /* This should be called while the Txq spinlock is being held */
1945 static void qed_ll2_tx_packet_notify(struct qed_hwfn *p_hwfn,
1946 struct qed_ll2_info *p_ll2_conn)
1948 bool b_notify = p_ll2_conn->tx_queue.cur_send_packet->notify_fw;
1949 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
1950 struct qed_ll2_tx_packet *p_pkt = NULL;
1953 /* If there are missing BDs, don't do anything now */
1954 if (p_ll2_conn->tx_queue.cur_send_frag_num !=
1955 p_ll2_conn->tx_queue.cur_send_packet->bd_used)
1958 /* Push the current packet to the list and clean after it */
1959 list_add_tail(&p_ll2_conn->tx_queue.cur_send_packet->list_entry,
1960 &p_ll2_conn->tx_queue.sending_descq);
1961 p_ll2_conn->tx_queue.cur_send_packet = NULL;
1962 p_ll2_conn->tx_queue.cur_send_frag_num = 0;
1964 /* Notify FW of packet only if requested to */
1968 bd_prod = qed_chain_get_prod_idx(&p_ll2_conn->tx_queue.txq_chain);
1970 while (!list_empty(&p_tx->sending_descq)) {
1971 p_pkt = list_first_entry(&p_tx->sending_descq,
1972 struct qed_ll2_tx_packet, list_entry);
1976 list_move_tail(&p_pkt->list_entry, &p_tx->active_descq);
1979 p_tx->db_msg.spq_prod = cpu_to_le16(bd_prod);
1981 /* Make sure the BDs data is updated before ringing the doorbell */
1984 DIRECT_REG_WR(p_tx->doorbell_addr, *((u32 *)&p_tx->db_msg));
1987 (NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
1988 "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Doorbelled [producer 0x%04x]\n",
1989 p_ll2_conn->queue_id,
1991 p_ll2_conn->input.conn_type, p_tx->db_msg.spq_prod);
1994 int qed_ll2_prepare_tx_packet(void *cxt,
1995 u8 connection_handle,
1996 struct qed_ll2_tx_pkt_info *pkt,
1999 struct qed_hwfn *p_hwfn = cxt;
2000 struct qed_ll2_tx_packet *p_curp = NULL;
2001 struct qed_ll2_info *p_ll2_conn = NULL;
2002 struct qed_ll2_tx_queue *p_tx;
2003 struct qed_chain *p_tx_chain;
2004 unsigned long flags;
2007 p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
2008 if (unlikely(!p_ll2_conn))
2010 p_tx = &p_ll2_conn->tx_queue;
2011 p_tx_chain = &p_tx->txq_chain;
2013 if (unlikely(pkt->num_of_bds > p_ll2_conn->input.tx_max_bds_per_packet))
2016 spin_lock_irqsave(&p_tx->lock, flags);
2017 if (unlikely(p_tx->cur_send_packet)) {
2022 /* Get entry, but only if we have tx elements for it */
2023 if (unlikely(!list_empty(&p_tx->free_descq)))
2024 p_curp = list_first_entry(&p_tx->free_descq,
2025 struct qed_ll2_tx_packet, list_entry);
2026 if (unlikely(p_curp &&
2027 qed_chain_get_elem_left(p_tx_chain) < pkt->num_of_bds))
2030 if (unlikely(!p_curp)) {
2035 /* Prepare packet and BD, and perhaps send a doorbell to FW */
2036 qed_ll2_prepare_tx_packet_set(p_hwfn, p_tx, p_curp, pkt, notify_fw);
2038 qed_ll2_prepare_tx_packet_set_bd(p_hwfn, p_ll2_conn, p_curp, pkt);
2040 qed_ll2_tx_packet_notify(p_hwfn, p_ll2_conn);
2043 spin_unlock_irqrestore(&p_tx->lock, flags);
2047 int qed_ll2_set_fragment_of_tx_packet(void *cxt,
2048 u8 connection_handle,
2049 dma_addr_t addr, u16 nbytes)
2051 struct qed_ll2_tx_packet *p_cur_send_packet = NULL;
2052 struct qed_hwfn *p_hwfn = cxt;
2053 struct qed_ll2_info *p_ll2_conn = NULL;
2054 u16 cur_send_frag_num = 0;
2055 struct core_tx_bd *p_bd;
2056 unsigned long flags;
2058 p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
2059 if (unlikely(!p_ll2_conn))
2062 if (unlikely(!p_ll2_conn->tx_queue.cur_send_packet))
2065 p_cur_send_packet = p_ll2_conn->tx_queue.cur_send_packet;
2066 cur_send_frag_num = p_ll2_conn->tx_queue.cur_send_frag_num;
2068 if (unlikely(cur_send_frag_num >= p_cur_send_packet->bd_used))
2071 /* Fill the BD information, and possibly notify FW */
2072 p_bd = p_cur_send_packet->bds_set[cur_send_frag_num].txq_bd;
2073 DMA_REGPAIR_LE(p_bd->addr, addr);
2074 p_bd->nbytes = cpu_to_le16(nbytes);
2075 p_cur_send_packet->bds_set[cur_send_frag_num].tx_frag = addr;
2076 p_cur_send_packet->bds_set[cur_send_frag_num].frag_len = nbytes;
2078 p_ll2_conn->tx_queue.cur_send_frag_num++;
2080 spin_lock_irqsave(&p_ll2_conn->tx_queue.lock, flags);
2081 qed_ll2_tx_packet_notify(p_hwfn, p_ll2_conn);
2082 spin_unlock_irqrestore(&p_ll2_conn->tx_queue.lock, flags);
2087 int qed_ll2_terminate_connection(void *cxt, u8 connection_handle)
2089 struct qed_hwfn *p_hwfn = cxt;
2090 struct qed_ll2_info *p_ll2_conn = NULL;
2092 struct qed_ptt *p_ptt;
2094 p_ptt = qed_ptt_acquire(p_hwfn);
2098 p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle);
2104 /* Stop Tx & Rx of connection, if needed */
2105 if (QED_LL2_TX_REGISTERED(p_ll2_conn)) {
2106 p_ll2_conn->tx_queue.b_cb_registered = false;
2107 smp_wmb(); /* Make sure this is seen by ll2_lb_rxq_completion */
2108 rc = qed_sp_ll2_tx_queue_stop(p_hwfn, p_ll2_conn);
2112 qed_ll2_txq_flush(p_hwfn, connection_handle);
2113 qed_int_unregister_cb(p_hwfn, p_ll2_conn->tx_queue.tx_sb_index);
2116 if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
2117 p_ll2_conn->rx_queue.b_cb_registered = false;
2118 smp_wmb(); /* Make sure this is seen by ll2_lb_rxq_completion */
2120 if (p_ll2_conn->rx_queue.ctx_based)
2121 qed_db_recovery_del(p_hwfn->cdev,
2122 p_ll2_conn->rx_queue.set_prod_addr,
2123 &p_ll2_conn->rx_queue.db_data);
2125 rc = qed_sp_ll2_rx_queue_stop(p_hwfn, p_ll2_conn);
2129 qed_ll2_rxq_flush(p_hwfn, connection_handle);
2130 qed_int_unregister_cb(p_hwfn, p_ll2_conn->rx_queue.rx_sb_index);
2133 if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO)
2134 qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
2136 if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE) {
2137 if (!test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits))
2138 qed_llh_remove_protocol_filter(p_hwfn->cdev, 0,
2139 QED_LLH_FILTER_ETHERTYPE,
2141 qed_llh_remove_protocol_filter(p_hwfn->cdev, 0,
2142 QED_LLH_FILTER_ETHERTYPE,
2147 qed_ptt_release(p_hwfn, p_ptt);
2151 static void qed_ll2_release_connection_ooo(struct qed_hwfn *p_hwfn,
2152 struct qed_ll2_info *p_ll2_conn)
2154 struct qed_ooo_buffer *p_buffer;
2156 if (p_ll2_conn->input.conn_type != QED_LL2_TYPE_OOO)
2159 qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
2160 while ((p_buffer = qed_ooo_get_free_buffer(p_hwfn,
2161 p_hwfn->p_ooo_info))) {
2162 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
2163 p_buffer->rx_buffer_size,
2164 p_buffer->rx_buffer_virt_addr,
2165 p_buffer->rx_buffer_phys_addr);
2170 void qed_ll2_release_connection(void *cxt, u8 connection_handle)
2172 struct qed_hwfn *p_hwfn = cxt;
2173 struct qed_ll2_info *p_ll2_conn = NULL;
2175 p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
2179 kfree(p_ll2_conn->tx_queue.descq_mem);
2180 qed_chain_free(p_hwfn->cdev, &p_ll2_conn->tx_queue.txq_chain);
2182 kfree(p_ll2_conn->rx_queue.descq_array);
2183 qed_chain_free(p_hwfn->cdev, &p_ll2_conn->rx_queue.rxq_chain);
2184 qed_chain_free(p_hwfn->cdev, &p_ll2_conn->rx_queue.rcq_chain);
2186 qed_cxt_release_cid(p_hwfn, p_ll2_conn->cid);
2188 qed_ll2_release_connection_ooo(p_hwfn, p_ll2_conn);
2190 mutex_lock(&p_ll2_conn->mutex);
2191 p_ll2_conn->b_active = false;
2192 mutex_unlock(&p_ll2_conn->mutex);
2195 int qed_ll2_alloc(struct qed_hwfn *p_hwfn)
2197 struct qed_ll2_info *p_ll2_connections;
2200 /* Allocate LL2's set struct */
2201 p_ll2_connections = kcalloc(QED_MAX_NUM_OF_LL2_CONNECTIONS,
2202 sizeof(struct qed_ll2_info), GFP_KERNEL);
2203 if (!p_ll2_connections) {
2204 DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_ll2'\n");
2208 for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++)
2209 p_ll2_connections[i].my_id = i;
2211 p_hwfn->p_ll2_info = p_ll2_connections;
2215 void qed_ll2_setup(struct qed_hwfn *p_hwfn)
2219 for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++)
2220 mutex_init(&p_hwfn->p_ll2_info[i].mutex);
2223 void qed_ll2_free(struct qed_hwfn *p_hwfn)
2225 if (!p_hwfn->p_ll2_info)
2228 kfree(p_hwfn->p_ll2_info);
2229 p_hwfn->p_ll2_info = NULL;
2232 static void _qed_ll2_get_port_stats(struct qed_hwfn *p_hwfn,
2233 struct qed_ptt *p_ptt,
2234 struct qed_ll2_stats *p_stats)
2236 struct core_ll2_port_stats port_stats;
2238 memset(&port_stats, 0, sizeof(port_stats));
2239 qed_memcpy_from(p_hwfn, p_ptt, &port_stats,
2240 BAR0_MAP_REG_TSDM_RAM +
2241 TSTORM_LL2_PORT_STAT_OFFSET(MFW_PORT(p_hwfn)),
2242 sizeof(port_stats));
2244 p_stats->gsi_invalid_hdr += HILO_64_REGPAIR(port_stats.gsi_invalid_hdr);
2245 p_stats->gsi_invalid_pkt_length +=
2246 HILO_64_REGPAIR(port_stats.gsi_invalid_pkt_length);
2247 p_stats->gsi_unsupported_pkt_typ +=
2248 HILO_64_REGPAIR(port_stats.gsi_unsupported_pkt_typ);
2249 p_stats->gsi_crcchksm_error +=
2250 HILO_64_REGPAIR(port_stats.gsi_crcchksm_error);
2253 static void _qed_ll2_get_tstats(struct qed_hwfn *p_hwfn,
2254 struct qed_ptt *p_ptt,
2255 struct qed_ll2_info *p_ll2_conn,
2256 struct qed_ll2_stats *p_stats)
2258 struct core_ll2_tstorm_per_queue_stat tstats;
2259 u8 qid = p_ll2_conn->queue_id;
2262 memset(&tstats, 0, sizeof(tstats));
2263 tstats_addr = BAR0_MAP_REG_TSDM_RAM +
2264 CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(qid);
2265 qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, sizeof(tstats));
2267 p_stats->packet_too_big_discard +=
2268 HILO_64_REGPAIR(tstats.packet_too_big_discard);
2269 p_stats->no_buff_discard += HILO_64_REGPAIR(tstats.no_buff_discard);
2272 static void _qed_ll2_get_ustats(struct qed_hwfn *p_hwfn,
2273 struct qed_ptt *p_ptt,
2274 struct qed_ll2_info *p_ll2_conn,
2275 struct qed_ll2_stats *p_stats)
2277 struct core_ll2_ustorm_per_queue_stat ustats;
2278 u8 qid = p_ll2_conn->queue_id;
2281 memset(&ustats, 0, sizeof(ustats));
2282 ustats_addr = BAR0_MAP_REG_USDM_RAM +
2283 CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(qid);
2284 qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, sizeof(ustats));
2286 p_stats->rcv_ucast_bytes += HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
2287 p_stats->rcv_mcast_bytes += HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
2288 p_stats->rcv_bcast_bytes += HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
2289 p_stats->rcv_ucast_pkts += HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
2290 p_stats->rcv_mcast_pkts += HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
2291 p_stats->rcv_bcast_pkts += HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
2294 static void _qed_ll2_get_pstats(struct qed_hwfn *p_hwfn,
2295 struct qed_ptt *p_ptt,
2296 struct qed_ll2_info *p_ll2_conn,
2297 struct qed_ll2_stats *p_stats)
2299 struct core_ll2_pstorm_per_queue_stat pstats;
2300 u8 stats_id = p_ll2_conn->tx_stats_id;
2303 memset(&pstats, 0, sizeof(pstats));
2304 pstats_addr = BAR0_MAP_REG_PSDM_RAM +
2305 CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(stats_id);
2306 qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, sizeof(pstats));
2308 p_stats->sent_ucast_bytes += HILO_64_REGPAIR(pstats.sent_ucast_bytes);
2309 p_stats->sent_mcast_bytes += HILO_64_REGPAIR(pstats.sent_mcast_bytes);
2310 p_stats->sent_bcast_bytes += HILO_64_REGPAIR(pstats.sent_bcast_bytes);
2311 p_stats->sent_ucast_pkts += HILO_64_REGPAIR(pstats.sent_ucast_pkts);
2312 p_stats->sent_mcast_pkts += HILO_64_REGPAIR(pstats.sent_mcast_pkts);
2313 p_stats->sent_bcast_pkts += HILO_64_REGPAIR(pstats.sent_bcast_pkts);
2316 static int __qed_ll2_get_stats(void *cxt, u8 connection_handle,
2317 struct qed_ll2_stats *p_stats)
2319 struct qed_hwfn *p_hwfn = cxt;
2320 struct qed_ll2_info *p_ll2_conn = NULL;
2321 struct qed_ptt *p_ptt;
2323 if ((connection_handle >= QED_MAX_NUM_OF_LL2_CONNECTIONS) ||
2324 !p_hwfn->p_ll2_info)
2327 p_ll2_conn = &p_hwfn->p_ll2_info[connection_handle];
2329 p_ptt = qed_ptt_acquire(p_hwfn);
2331 DP_ERR(p_hwfn, "Failed to acquire ptt\n");
2335 if (p_ll2_conn->input.gsi_enable)
2336 _qed_ll2_get_port_stats(p_hwfn, p_ptt, p_stats);
2338 _qed_ll2_get_tstats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
2340 _qed_ll2_get_ustats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
2342 if (p_ll2_conn->tx_stats_en)
2343 _qed_ll2_get_pstats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
2345 qed_ptt_release(p_hwfn, p_ptt);
2350 int qed_ll2_get_stats(void *cxt,
2351 u8 connection_handle, struct qed_ll2_stats *p_stats)
2353 memset(p_stats, 0, sizeof(*p_stats));
2354 return __qed_ll2_get_stats(cxt, connection_handle, p_stats);
2357 static void qed_ll2b_release_rx_packet(void *cxt,
2358 u8 connection_handle,
2360 dma_addr_t rx_buf_addr,
2363 struct qed_hwfn *p_hwfn = cxt;
2365 qed_ll2_dealloc_buffer(p_hwfn->cdev, cookie);
2368 static void qed_ll2_register_cb_ops(struct qed_dev *cdev,
2369 const struct qed_ll2_cb_ops *ops,
2372 cdev->ll2->cbs = ops;
2373 cdev->ll2->cb_cookie = cookie;
2376 static struct qed_ll2_cbs ll2_cbs = {
2377 .rx_comp_cb = &qed_ll2b_complete_rx_packet,
2378 .rx_release_cb = &qed_ll2b_release_rx_packet,
2379 .tx_comp_cb = &qed_ll2b_complete_tx_packet,
2380 .tx_release_cb = &qed_ll2b_complete_tx_packet,
2383 static void qed_ll2_set_conn_data(struct qed_hwfn *p_hwfn,
2384 struct qed_ll2_acquire_data *data,
2385 struct qed_ll2_params *params,
2386 enum qed_ll2_conn_type conn_type,
2387 u8 *handle, bool lb)
2389 memset(data, 0, sizeof(*data));
2391 data->input.conn_type = conn_type;
2392 data->input.mtu = params->mtu;
2393 data->input.rx_num_desc = QED_LL2_RX_SIZE;
2394 data->input.rx_drop_ttl0_flg = params->drop_ttl0_packets;
2395 data->input.rx_vlan_removal_en = params->rx_vlan_stripping;
2396 data->input.tx_num_desc = QED_LL2_TX_SIZE;
2397 data->p_connection_handle = handle;
2398 data->cbs = &ll2_cbs;
2399 ll2_cbs.cookie = p_hwfn;
2402 data->input.tx_tc = PKT_LB_TC;
2403 data->input.tx_dest = QED_LL2_TX_DEST_LB;
2405 data->input.tx_tc = 0;
2406 data->input.tx_dest = QED_LL2_TX_DEST_NW;
2410 static int qed_ll2_start_ooo(struct qed_hwfn *p_hwfn,
2411 struct qed_ll2_params *params)
2413 u8 *handle = &p_hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
2414 struct qed_ll2_acquire_data data;
2417 qed_ll2_set_conn_data(p_hwfn, &data, params,
2418 QED_LL2_TYPE_OOO, handle, true);
2420 rc = qed_ll2_acquire_connection(p_hwfn, &data);
2422 DP_INFO(p_hwfn, "Failed to acquire LL2 OOO connection\n");
2426 rc = qed_ll2_establish_connection(p_hwfn, *handle);
2428 DP_INFO(p_hwfn, "Failed to establish LL2 OOO connection\n");
2435 qed_ll2_release_connection(p_hwfn, *handle);
2437 *handle = QED_LL2_UNUSED_HANDLE;
2441 static bool qed_ll2_is_storage_eng1(struct qed_dev *cdev)
2443 return (QED_IS_FCOE_PERSONALITY(QED_LEADING_HWFN(cdev)) ||
2444 QED_IS_ISCSI_PERSONALITY(QED_LEADING_HWFN(cdev)) ||
2445 QED_IS_NVMETCP_PERSONALITY(QED_LEADING_HWFN(cdev))) &&
2446 (QED_AFFIN_HWFN(cdev) != QED_LEADING_HWFN(cdev));
2449 static int __qed_ll2_stop(struct qed_hwfn *p_hwfn)
2451 struct qed_dev *cdev = p_hwfn->cdev;
2454 rc = qed_ll2_terminate_connection(p_hwfn, cdev->ll2->handle);
2456 DP_INFO(cdev, "Failed to terminate LL2 connection\n");
2458 qed_ll2_release_connection(p_hwfn, cdev->ll2->handle);
2463 static int qed_ll2_stop(struct qed_dev *cdev)
2465 bool b_is_storage_eng1 = qed_ll2_is_storage_eng1(cdev);
2466 struct qed_hwfn *p_hwfn = QED_AFFIN_HWFN(cdev);
2467 int rc = 0, rc2 = 0;
2469 if (cdev->ll2->handle == QED_LL2_UNUSED_HANDLE)
2471 if (!QED_IS_NVMETCP_PERSONALITY(p_hwfn))
2472 qed_llh_remove_mac_filter(cdev, 0, cdev->ll2_mac_address);
2474 qed_llh_remove_mac_filter(cdev, 0, cdev->ll2_mac_address);
2475 eth_zero_addr(cdev->ll2_mac_address);
2477 if (QED_IS_ISCSI_PERSONALITY(p_hwfn) || QED_IS_NVMETCP_PERSONALITY(p_hwfn))
2478 qed_ll2_stop_ooo(p_hwfn);
2480 /* In CMT mode, LL2 is always started on engine 0 for a storage PF */
2481 if (b_is_storage_eng1) {
2482 rc2 = __qed_ll2_stop(QED_LEADING_HWFN(cdev));
2484 DP_NOTICE(QED_LEADING_HWFN(cdev),
2485 "Failed to stop LL2 on engine 0\n");
2488 rc = __qed_ll2_stop(p_hwfn);
2490 DP_NOTICE(p_hwfn, "Failed to stop LL2\n");
2492 qed_ll2_kill_buffers(cdev);
2494 cdev->ll2->handle = QED_LL2_UNUSED_HANDLE;
2499 static int __qed_ll2_start(struct qed_hwfn *p_hwfn,
2500 struct qed_ll2_params *params)
2502 struct qed_ll2_buffer *buffer, *tmp_buffer;
2503 struct qed_dev *cdev = p_hwfn->cdev;
2504 enum qed_ll2_conn_type conn_type;
2505 struct qed_ll2_acquire_data data;
2508 switch (p_hwfn->hw_info.personality) {
2510 conn_type = QED_LL2_TYPE_FCOE;
2513 case QED_PCI_NVMETCP:
2514 conn_type = QED_LL2_TYPE_TCP_ULP;
2516 case QED_PCI_ETH_ROCE:
2517 conn_type = QED_LL2_TYPE_ROCE;
2521 conn_type = QED_LL2_TYPE_TEST;
2524 qed_ll2_set_conn_data(p_hwfn, &data, params, conn_type,
2525 &cdev->ll2->handle, false);
2527 rc = qed_ll2_acquire_connection(p_hwfn, &data);
2529 DP_INFO(p_hwfn, "Failed to acquire LL2 connection\n");
2533 rc = qed_ll2_establish_connection(p_hwfn, cdev->ll2->handle);
2535 DP_INFO(p_hwfn, "Failed to establish LL2 connection\n");
2539 /* Post all Rx buffers to FW */
2540 spin_lock_bh(&cdev->ll2->lock);
2541 rx_cnt = cdev->ll2->rx_cnt;
2542 list_for_each_entry_safe(buffer, tmp_buffer, &cdev->ll2->list, list) {
2543 rc = qed_ll2_post_rx_buffer(p_hwfn,
2545 buffer->phys_addr, 0, buffer, 1);
2548 "Failed to post an Rx buffer; Deleting it\n");
2549 dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
2550 cdev->ll2->rx_size, DMA_FROM_DEVICE);
2551 kfree(buffer->data);
2552 list_del(&buffer->list);
2558 spin_unlock_bh(&cdev->ll2->lock);
2560 if (rx_cnt == cdev->ll2->rx_cnt) {
2561 DP_NOTICE(p_hwfn, "Failed passing even a single Rx buffer\n");
2562 goto terminate_conn;
2564 cdev->ll2->rx_cnt = rx_cnt;
2569 qed_ll2_terminate_connection(p_hwfn, cdev->ll2->handle);
2571 qed_ll2_release_connection(p_hwfn, cdev->ll2->handle);
2575 static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
2577 bool b_is_storage_eng1 = qed_ll2_is_storage_eng1(cdev);
2578 struct qed_hwfn *p_hwfn = QED_AFFIN_HWFN(cdev);
2579 struct qed_ll2_buffer *buffer;
2580 int rx_num_desc, i, rc;
2582 if (!is_valid_ether_addr(params->ll2_mac_address)) {
2583 DP_NOTICE(cdev, "Invalid Ethernet address\n");
2587 WARN_ON(!cdev->ll2->cbs);
2589 /* Initialize LL2 locks & lists */
2590 INIT_LIST_HEAD(&cdev->ll2->list);
2591 spin_lock_init(&cdev->ll2->lock);
2593 cdev->ll2->rx_size = NET_SKB_PAD + ETH_HLEN +
2594 L1_CACHE_BYTES + params->mtu;
2596 /* Allocate memory for LL2.
2597 * In CMT mode, in case of a storage PF which is affintized to engine 1,
2598 * LL2 is started also on engine 0 and thus we need twofold buffers.
2600 rx_num_desc = QED_LL2_RX_SIZE * (b_is_storage_eng1 ? 2 : 1);
2601 DP_INFO(cdev, "Allocating %d LL2 buffers of size %08x bytes\n",
2602 rx_num_desc, cdev->ll2->rx_size);
2603 for (i = 0; i < rx_num_desc; i++) {
2604 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
2606 DP_INFO(cdev, "Failed to allocate LL2 buffers\n");
2611 rc = qed_ll2_alloc_buffer(cdev, (u8 **)&buffer->data,
2612 &buffer->phys_addr);
2618 list_add_tail(&buffer->list, &cdev->ll2->list);
2621 rc = __qed_ll2_start(p_hwfn, params);
2623 DP_NOTICE(cdev, "Failed to start LL2\n");
2627 /* In CMT mode, always need to start LL2 on engine 0 for a storage PF,
2628 * since broadcast/mutlicast packets are routed to engine 0.
2630 if (b_is_storage_eng1) {
2631 rc = __qed_ll2_start(QED_LEADING_HWFN(cdev), params);
2633 DP_NOTICE(QED_LEADING_HWFN(cdev),
2634 "Failed to start LL2 on engine 0\n");
2639 if (QED_IS_ISCSI_PERSONALITY(p_hwfn) || QED_IS_NVMETCP_PERSONALITY(p_hwfn)) {
2640 DP_VERBOSE(cdev, QED_MSG_STORAGE, "Starting OOO LL2 queue\n");
2641 rc = qed_ll2_start_ooo(p_hwfn, params);
2643 DP_NOTICE(cdev, "Failed to start OOO LL2\n");
2648 if (!QED_IS_NVMETCP_PERSONALITY(p_hwfn)) {
2649 rc = qed_llh_add_mac_filter(cdev, 0, params->ll2_mac_address);
2651 DP_NOTICE(cdev, "Failed to add an LLH filter\n");
2656 ether_addr_copy(cdev->ll2_mac_address, params->ll2_mac_address);
2661 if (QED_IS_ISCSI_PERSONALITY(p_hwfn) || QED_IS_NVMETCP_PERSONALITY(p_hwfn))
2662 qed_ll2_stop_ooo(p_hwfn);
2664 if (b_is_storage_eng1)
2665 __qed_ll2_stop(QED_LEADING_HWFN(cdev));
2667 __qed_ll2_stop(p_hwfn);
2669 qed_ll2_kill_buffers(cdev);
2670 cdev->ll2->handle = QED_LL2_UNUSED_HANDLE;
2674 static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb,
2675 unsigned long xmit_flags)
2677 struct qed_hwfn *p_hwfn = QED_AFFIN_HWFN(cdev);
2678 struct qed_ll2_tx_pkt_info pkt;
2679 const skb_frag_t *frag;
2680 u8 flags = 0, nr_frags;
2681 int rc = -EINVAL, i;
2685 if (unlikely(skb->ip_summed != CHECKSUM_NONE)) {
2686 DP_INFO(cdev, "Cannot transmit a checksummed packet\n");
2690 /* Cache number of fragments from SKB since SKB may be freed by
2691 * the completion routine after calling qed_ll2_prepare_tx_packet()
2693 nr_frags = skb_shinfo(skb)->nr_frags;
2695 if (unlikely(1 + nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET)) {
2696 DP_ERR(cdev, "Cannot transmit a packet with %d fragments\n",
2701 mapping = dma_map_single(&cdev->pdev->dev, skb->data,
2702 skb->len, DMA_TO_DEVICE);
2703 if (unlikely(dma_mapping_error(&cdev->pdev->dev, mapping))) {
2704 DP_NOTICE(cdev, "SKB mapping failed\n");
2708 /* Request HW to calculate IP csum */
2709 if (!((vlan_get_protocol(skb) == htons(ETH_P_IPV6)) &&
2710 ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
2711 flags |= BIT(CORE_TX_BD_DATA_IP_CSUM_SHIFT);
2713 if (skb_vlan_tag_present(skb)) {
2714 vlan = skb_vlan_tag_get(skb);
2715 flags |= BIT(CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT);
2718 memset(&pkt, 0, sizeof(pkt));
2719 pkt.num_of_bds = 1 + nr_frags;
2721 pkt.bd_flags = flags;
2722 pkt.tx_dest = QED_LL2_TX_DEST_NW;
2723 pkt.first_frag = mapping;
2724 pkt.first_frag_len = skb->len;
2726 if (test_bit(QED_MF_UFP_SPECIFIC, &cdev->mf_bits) &&
2727 test_bit(QED_LL2_XMIT_FLAGS_FIP_DISCOVERY, &xmit_flags))
2728 pkt.remove_stag = true;
2730 /* qed_ll2_prepare_tx_packet() may actually send the packet if
2731 * there are no fragments in the skb and subsequently the completion
2732 * routine may run and free the SKB, so no dereferencing the SKB
2733 * beyond this point unless skb has any fragments.
2735 rc = qed_ll2_prepare_tx_packet(p_hwfn, cdev->ll2->handle,
2740 for (i = 0; i < nr_frags; i++) {
2741 frag = &skb_shinfo(skb)->frags[i];
2743 mapping = skb_frag_dma_map(&cdev->pdev->dev, frag, 0,
2744 skb_frag_size(frag), DMA_TO_DEVICE);
2746 if (unlikely(dma_mapping_error(&cdev->pdev->dev, mapping))) {
2748 "Unable to map frag - dropping packet\n");
2753 rc = qed_ll2_set_fragment_of_tx_packet(p_hwfn,
2756 skb_frag_size(frag));
2758 /* if failed not much to do here, partial packet has been posted
2759 * we can't free memory, will need to wait for completion
2768 dma_unmap_single(&cdev->pdev->dev, mapping, skb->len, DMA_TO_DEVICE);
2773 static int qed_ll2_stats(struct qed_dev *cdev, struct qed_ll2_stats *stats)
2775 bool b_is_storage_eng1 = qed_ll2_is_storage_eng1(cdev);
2776 struct qed_hwfn *p_hwfn = QED_AFFIN_HWFN(cdev);
2782 rc = qed_ll2_get_stats(p_hwfn, cdev->ll2->handle, stats);
2784 DP_NOTICE(p_hwfn, "Failed to get LL2 stats\n");
2788 /* In CMT mode, LL2 is always started on engine 0 for a storage PF */
2789 if (b_is_storage_eng1) {
2790 rc = __qed_ll2_get_stats(QED_LEADING_HWFN(cdev),
2791 cdev->ll2->handle, stats);
2793 DP_NOTICE(QED_LEADING_HWFN(cdev),
2794 "Failed to get LL2 stats on engine 0\n");
2802 const struct qed_ll2_ops qed_ll2_ops_pass = {
2803 .start = &qed_ll2_start,
2804 .stop = &qed_ll2_stop,
2805 .start_xmit = &qed_ll2_start_xmit,
2806 .register_cb_ops = &qed_ll2_register_cb_ops,
2807 .get_stats = &qed_ll2_stats,
2810 int qed_ll2_alloc_if(struct qed_dev *cdev)
2812 cdev->ll2 = kzalloc(sizeof(*cdev->ll2), GFP_KERNEL);
2813 return cdev->ll2 ? 0 : -ENOMEM;
2816 void qed_ll2_dealloc_if(struct qed_dev *cdev)