1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015-2017 QLogic Corporation
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/types.h>
34 #include <asm/byteorder.h>
35 #include <asm/param.h>
36 #include <linux/delay.h>
37 #include <linux/dma-mapping.h>
38 #include <linux/etherdevice.h>
39 #include <linux/interrupt.h>
40 #include <linux/kernel.h>
41 #include <linux/module.h>
42 #include <linux/pci.h>
43 #include <linux/slab.h>
44 #include <linux/stddef.h>
45 #include <linux/string.h>
46 #include <linux/workqueue.h>
47 #include <linux/bitops.h>
48 #include <linux/bug.h>
49 #include <linux/vmalloc.h>
51 #include <linux/qed/qed_chain.h>
53 #include "qed_dev_api.h"
54 #include <linux/qed/qed_eth_if.h>
60 #include "qed_reg_addr.h"
62 #include "qed_sriov.h"
65 #define QED_MAX_SGES_NUM 16
66 #define CRC32_POLY 0x1edc6f41
70 unsigned long **pp_qid_usage;
72 /* The lock is meant to synchronize access to the qid usage */
76 int qed_l2_alloc(struct qed_hwfn *p_hwfn)
78 struct qed_l2_info *p_l2_info;
79 unsigned long **pp_qids;
82 if (p_hwfn->hw_info.personality != QED_PCI_ETH &&
83 p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE)
86 p_l2_info = kzalloc(sizeof(*p_l2_info), GFP_KERNEL);
89 p_hwfn->p_l2_info = p_l2_info;
91 if (IS_PF(p_hwfn->cdev)) {
92 p_l2_info->queues = RESC_NUM(p_hwfn, QED_L2_QUEUE);
96 qed_vf_get_num_rxqs(p_hwfn, &rx);
97 qed_vf_get_num_txqs(p_hwfn, &tx);
99 p_l2_info->queues = max_t(u8, rx, tx);
102 pp_qids = kzalloc(sizeof(unsigned long *) * p_l2_info->queues,
106 p_l2_info->pp_qid_usage = pp_qids;
108 for (i = 0; i < p_l2_info->queues; i++) {
109 pp_qids[i] = kzalloc(MAX_QUEUES_PER_QZONE / 8, GFP_KERNEL);
117 void qed_l2_setup(struct qed_hwfn *p_hwfn)
119 if (p_hwfn->hw_info.personality != QED_PCI_ETH &&
120 p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE)
123 mutex_init(&p_hwfn->p_l2_info->lock);
126 void qed_l2_free(struct qed_hwfn *p_hwfn)
130 if (p_hwfn->hw_info.personality != QED_PCI_ETH &&
131 p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE)
134 if (!p_hwfn->p_l2_info)
137 if (!p_hwfn->p_l2_info->pp_qid_usage)
140 /* Free until hit first uninitialized entry */
141 for (i = 0; i < p_hwfn->p_l2_info->queues; i++) {
142 if (!p_hwfn->p_l2_info->pp_qid_usage[i])
144 kfree(p_hwfn->p_l2_info->pp_qid_usage[i]);
147 kfree(p_hwfn->p_l2_info->pp_qid_usage);
150 kfree(p_hwfn->p_l2_info);
151 p_hwfn->p_l2_info = NULL;
154 static bool qed_eth_queue_qid_usage_add(struct qed_hwfn *p_hwfn,
155 struct qed_queue_cid *p_cid)
157 struct qed_l2_info *p_l2_info = p_hwfn->p_l2_info;
158 u16 queue_id = p_cid->rel.queue_id;
162 mutex_lock(&p_l2_info->lock);
164 if (queue_id >= p_l2_info->queues) {
166 "Requested to increase usage for qzone %04x out of %08x\n",
167 queue_id, p_l2_info->queues);
172 first = (u8)find_first_zero_bit(p_l2_info->pp_qid_usage[queue_id],
173 MAX_QUEUES_PER_QZONE);
174 if (first >= MAX_QUEUES_PER_QZONE) {
179 __set_bit(first, p_l2_info->pp_qid_usage[queue_id]);
180 p_cid->qid_usage_idx = first;
183 mutex_unlock(&p_l2_info->lock);
187 static void qed_eth_queue_qid_usage_del(struct qed_hwfn *p_hwfn,
188 struct qed_queue_cid *p_cid)
190 mutex_lock(&p_hwfn->p_l2_info->lock);
192 clear_bit(p_cid->qid_usage_idx,
193 p_hwfn->p_l2_info->pp_qid_usage[p_cid->rel.queue_id]);
195 mutex_unlock(&p_hwfn->p_l2_info->lock);
198 void qed_eth_queue_cid_release(struct qed_hwfn *p_hwfn,
199 struct qed_queue_cid *p_cid)
201 bool b_legacy_vf = !!(p_cid->vf_legacy & QED_QCID_LEGACY_VF_CID);
203 if (IS_PF(p_hwfn->cdev) && !b_legacy_vf)
204 _qed_cxt_release_cid(p_hwfn, p_cid->cid, p_cid->vfid);
206 /* For PF's VFs we maintain the index inside queue-zone in IOV */
207 if (p_cid->vfid == QED_QUEUE_CID_SELF)
208 qed_eth_queue_qid_usage_del(p_hwfn, p_cid);
213 /* The internal is only meant to be directly called by PFs initializeing CIDs
216 static struct qed_queue_cid *
217 _qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn,
220 struct qed_queue_start_common_params *p_params,
222 struct qed_queue_cid_vf_params *p_vf_params)
224 struct qed_queue_cid *p_cid;
227 p_cid = vmalloc(sizeof(*p_cid));
230 memset(p_cid, 0, sizeof(*p_cid));
232 p_cid->opaque_fid = opaque_fid;
234 p_cid->p_owner = p_hwfn;
236 /* Fill in parameters */
237 p_cid->rel.vport_id = p_params->vport_id;
238 p_cid->rel.queue_id = p_params->queue_id;
239 p_cid->rel.stats_id = p_params->stats_id;
240 p_cid->sb_igu_id = p_params->p_sb->igu_sb_id;
241 p_cid->b_is_rx = b_is_rx;
242 p_cid->sb_idx = p_params->sb_idx;
244 /* Fill-in bits related to VFs' queues if information was provided */
246 p_cid->vfid = p_vf_params->vfid;
247 p_cid->vf_qid = p_vf_params->vf_qid;
248 p_cid->vf_legacy = p_vf_params->vf_legacy;
250 p_cid->vfid = QED_QUEUE_CID_SELF;
253 /* Don't try calculating the absolute indices for VFs */
254 if (IS_VF(p_hwfn->cdev)) {
255 p_cid->abs = p_cid->rel;
259 /* Calculate the engine-absolute indices of the resources.
260 * This would guarantee they're valid later on.
261 * In some cases [SBs] we already have the right values.
263 rc = qed_fw_vport(p_hwfn, p_cid->rel.vport_id, &p_cid->abs.vport_id);
267 rc = qed_fw_l2_queue(p_hwfn, p_cid->rel.queue_id, &p_cid->abs.queue_id);
271 /* In case of a PF configuring its VF's queues, the stats-id is already
272 * absolute [since there's a single index that's suitable per-VF].
274 if (p_cid->vfid == QED_QUEUE_CID_SELF) {
275 rc = qed_fw_vport(p_hwfn, p_cid->rel.stats_id,
276 &p_cid->abs.stats_id);
280 p_cid->abs.stats_id = p_cid->rel.stats_id;
284 /* VF-images have provided the qid_usage_idx on their own.
285 * Otherwise, we need to allocate a unique one.
288 if (!qed_eth_queue_qid_usage_add(p_hwfn, p_cid))
291 p_cid->qid_usage_idx = p_vf_params->qid_usage_idx;
296 "opaque_fid: %04x CID %08x vport %02x [%02x] qzone %04x.%02x [%04x] stats %02x [%02x] SB %04x PI %02x\n",
302 p_cid->qid_usage_idx,
305 p_cid->abs.stats_id, p_cid->sb_igu_id, p_cid->sb_idx);
314 struct qed_queue_cid *
315 qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn,
317 struct qed_queue_start_common_params *p_params,
319 struct qed_queue_cid_vf_params *p_vf_params)
321 struct qed_queue_cid *p_cid;
322 u8 vfid = QED_CXT_PF_CID;
323 bool b_legacy_vf = false;
326 /* In case of legacy VFs, The CID can be derived from the additional
327 * VF parameters - the VF assumes queue X uses CID X, so we can simply
328 * use the vf_qid for this purpose as well.
331 vfid = p_vf_params->vfid;
333 if (p_vf_params->vf_legacy & QED_QCID_LEGACY_VF_CID) {
335 cid = p_vf_params->vf_qid;
339 /* Get a unique firmware CID for this queue, in case it's a PF.
340 * VF's don't need a CID as the queue configuration will be done
343 if (IS_PF(p_hwfn->cdev) && !b_legacy_vf) {
344 if (_qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH,
346 DP_NOTICE(p_hwfn, "Failed to acquire cid\n");
351 p_cid = _qed_eth_queue_to_cid(p_hwfn, opaque_fid, cid,
352 p_params, b_is_rx, p_vf_params);
353 if (!p_cid && IS_PF(p_hwfn->cdev) && !b_legacy_vf)
354 _qed_cxt_release_cid(p_hwfn, cid, vfid);
359 static struct qed_queue_cid *
360 qed_eth_queue_to_cid_pf(struct qed_hwfn *p_hwfn,
363 struct qed_queue_start_common_params *p_params)
365 return qed_eth_queue_to_cid(p_hwfn, opaque_fid, p_params, b_is_rx,
369 int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
370 struct qed_sp_vport_start_params *p_params)
372 struct vport_start_ramrod_data *p_ramrod = NULL;
373 struct qed_spq_entry *p_ent = NULL;
374 struct qed_sp_init_data init_data;
379 rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
383 memset(&init_data, 0, sizeof(init_data));
384 init_data.cid = qed_spq_get_cid(p_hwfn);
385 init_data.opaque_fid = p_params->opaque_fid;
386 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
388 rc = qed_sp_init_request(p_hwfn, &p_ent,
389 ETH_RAMROD_VPORT_START,
390 PROTOCOLID_ETH, &init_data);
394 p_ramrod = &p_ent->ramrod.vport_start;
395 p_ramrod->vport_id = abs_vport_id;
397 p_ramrod->mtu = cpu_to_le16(p_params->mtu);
398 p_ramrod->handle_ptp_pkts = p_params->handle_ptp_pkts;
399 p_ramrod->inner_vlan_removal_en = p_params->remove_inner_vlan;
400 p_ramrod->drop_ttl0_en = p_params->drop_ttl0;
401 p_ramrod->untagged = p_params->only_untagged;
403 SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 1);
404 SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 1);
406 p_ramrod->rx_mode.state = cpu_to_le16(rx_mode);
408 /* TPA related fields */
409 memset(&p_ramrod->tpa_param, 0, sizeof(struct eth_vport_tpa_param));
411 p_ramrod->tpa_param.max_buff_num = p_params->max_buffers_per_cqe;
413 switch (p_params->tpa_mode) {
414 case QED_TPA_MODE_GRO:
415 p_ramrod->tpa_param.tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
416 p_ramrod->tpa_param.tpa_max_size = (u16)-1;
417 p_ramrod->tpa_param.tpa_min_size_to_cont = p_params->mtu / 2;
418 p_ramrod->tpa_param.tpa_min_size_to_start = p_params->mtu / 2;
419 p_ramrod->tpa_param.tpa_ipv4_en_flg = 1;
420 p_ramrod->tpa_param.tpa_ipv6_en_flg = 1;
421 p_ramrod->tpa_param.tpa_pkt_split_flg = 1;
422 p_ramrod->tpa_param.tpa_gro_consistent_flg = 1;
428 p_ramrod->tx_switching_en = p_params->tx_switching;
430 p_ramrod->ctl_frame_mac_check_en = !!p_params->check_mac;
431 p_ramrod->ctl_frame_ethtype_check_en = !!p_params->check_ethtype;
433 /* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */
434 p_ramrod->sw_fid = qed_concrete_to_sw_fid(p_hwfn->cdev,
435 p_params->concrete_fid);
437 return qed_spq_post(p_hwfn, p_ent, NULL);
440 static int qed_sp_vport_start(struct qed_hwfn *p_hwfn,
441 struct qed_sp_vport_start_params *p_params)
443 if (IS_VF(p_hwfn->cdev)) {
444 return qed_vf_pf_vport_start(p_hwfn, p_params->vport_id,
446 p_params->remove_inner_vlan,
448 p_params->max_buffers_per_cqe,
449 p_params->only_untagged);
452 return qed_sp_eth_vport_start(p_hwfn, p_params);
456 qed_sp_vport_update_rss(struct qed_hwfn *p_hwfn,
457 struct vport_update_ramrod_data *p_ramrod,
458 struct qed_rss_params *p_rss)
460 struct eth_vport_rss_config *p_config;
461 u16 capabilities = 0;
466 p_ramrod->common.update_rss_flg = 0;
469 p_config = &p_ramrod->rss_config;
471 BUILD_BUG_ON(QED_RSS_IND_TABLE_SIZE != ETH_RSS_IND_TABLE_ENTRIES_NUM);
473 rc = qed_fw_rss_eng(p_hwfn, p_rss->rss_eng_id, &p_config->rss_id);
477 p_ramrod->common.update_rss_flg = p_rss->update_rss_config;
478 p_config->update_rss_capabilities = p_rss->update_rss_capabilities;
479 p_config->update_rss_ind_table = p_rss->update_rss_ind_table;
480 p_config->update_rss_key = p_rss->update_rss_key;
482 p_config->rss_mode = p_rss->rss_enable ?
483 ETH_VPORT_RSS_MODE_REGULAR :
484 ETH_VPORT_RSS_MODE_DISABLED;
486 SET_FIELD(capabilities,
487 ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY,
488 !!(p_rss->rss_caps & QED_RSS_IPV4));
489 SET_FIELD(capabilities,
490 ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY,
491 !!(p_rss->rss_caps & QED_RSS_IPV6));
492 SET_FIELD(capabilities,
493 ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY,
494 !!(p_rss->rss_caps & QED_RSS_IPV4_TCP));
495 SET_FIELD(capabilities,
496 ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY,
497 !!(p_rss->rss_caps & QED_RSS_IPV6_TCP));
498 SET_FIELD(capabilities,
499 ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY,
500 !!(p_rss->rss_caps & QED_RSS_IPV4_UDP));
501 SET_FIELD(capabilities,
502 ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY,
503 !!(p_rss->rss_caps & QED_RSS_IPV6_UDP));
504 p_config->tbl_size = p_rss->rss_table_size_log;
506 p_config->capabilities = cpu_to_le16(capabilities);
508 DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
509 "update rss flag %d, rss_mode = %d, update_caps = %d, capabilities = %d, update_ind = %d, update_rss_key = %d\n",
510 p_ramrod->common.update_rss_flg,
512 p_config->update_rss_capabilities,
513 p_config->capabilities,
514 p_config->update_rss_ind_table, p_config->update_rss_key);
516 table_size = min_t(int, QED_RSS_IND_TABLE_SIZE,
517 1 << p_config->tbl_size);
518 for (i = 0; i < table_size; i++) {
519 struct qed_queue_cid *p_queue = p_rss->rss_ind_table[i];
524 p_config->indirection_table[i] =
525 cpu_to_le16(p_queue->abs.queue_id);
528 DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
529 "Configured RSS indirection table [%d entries]:\n",
531 for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i += 0x10) {
534 "%04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x\n",
535 le16_to_cpu(p_config->indirection_table[i]),
536 le16_to_cpu(p_config->indirection_table[i + 1]),
537 le16_to_cpu(p_config->indirection_table[i + 2]),
538 le16_to_cpu(p_config->indirection_table[i + 3]),
539 le16_to_cpu(p_config->indirection_table[i + 4]),
540 le16_to_cpu(p_config->indirection_table[i + 5]),
541 le16_to_cpu(p_config->indirection_table[i + 6]),
542 le16_to_cpu(p_config->indirection_table[i + 7]),
543 le16_to_cpu(p_config->indirection_table[i + 8]),
544 le16_to_cpu(p_config->indirection_table[i + 9]),
545 le16_to_cpu(p_config->indirection_table[i + 10]),
546 le16_to_cpu(p_config->indirection_table[i + 11]),
547 le16_to_cpu(p_config->indirection_table[i + 12]),
548 le16_to_cpu(p_config->indirection_table[i + 13]),
549 le16_to_cpu(p_config->indirection_table[i + 14]),
550 le16_to_cpu(p_config->indirection_table[i + 15]));
553 for (i = 0; i < 10; i++)
554 p_config->rss_key[i] = cpu_to_le32(p_rss->rss_key[i]);
560 qed_sp_update_accept_mode(struct qed_hwfn *p_hwfn,
561 struct vport_update_ramrod_data *p_ramrod,
562 struct qed_filter_accept_flags accept_flags)
564 p_ramrod->common.update_rx_mode_flg =
565 accept_flags.update_rx_mode_config;
567 p_ramrod->common.update_tx_mode_flg =
568 accept_flags.update_tx_mode_config;
570 /* Set Rx mode accept flags */
571 if (p_ramrod->common.update_rx_mode_flg) {
572 u8 accept_filter = accept_flags.rx_accept_filter;
575 SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_DROP_ALL,
576 !(!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) ||
577 !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED)));
579 SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED,
580 !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED));
582 SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_DROP_ALL,
583 !(!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) ||
584 !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
586 SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL,
587 (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) &&
588 !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
590 SET_FIELD(state, ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL,
591 !!(accept_filter & QED_ACCEPT_BCAST));
593 p_ramrod->rx_mode.state = cpu_to_le16(state);
594 DP_VERBOSE(p_hwfn, QED_MSG_SP,
595 "p_ramrod->rx_mode.state = 0x%x\n", state);
598 /* Set Tx mode accept flags */
599 if (p_ramrod->common.update_tx_mode_flg) {
600 u8 accept_filter = accept_flags.tx_accept_filter;
603 SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_DROP_ALL,
604 !!(accept_filter & QED_ACCEPT_NONE));
606 SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_DROP_ALL,
607 !!(accept_filter & QED_ACCEPT_NONE));
609 SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL,
610 (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) &&
611 !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
613 SET_FIELD(state, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL,
614 !!(accept_filter & QED_ACCEPT_BCAST));
616 p_ramrod->tx_mode.state = cpu_to_le16(state);
617 DP_VERBOSE(p_hwfn, QED_MSG_SP,
618 "p_ramrod->tx_mode.state = 0x%x\n", state);
623 qed_sp_vport_update_sge_tpa(struct qed_hwfn *p_hwfn,
624 struct vport_update_ramrod_data *p_ramrod,
625 struct qed_sge_tpa_params *p_params)
627 struct eth_vport_tpa_param *p_tpa;
630 p_ramrod->common.update_tpa_param_flg = 0;
631 p_ramrod->common.update_tpa_en_flg = 0;
632 p_ramrod->common.update_tpa_param_flg = 0;
636 p_ramrod->common.update_tpa_en_flg = p_params->update_tpa_en_flg;
637 p_tpa = &p_ramrod->tpa_param;
638 p_tpa->tpa_ipv4_en_flg = p_params->tpa_ipv4_en_flg;
639 p_tpa->tpa_ipv6_en_flg = p_params->tpa_ipv6_en_flg;
640 p_tpa->tpa_ipv4_tunn_en_flg = p_params->tpa_ipv4_tunn_en_flg;
641 p_tpa->tpa_ipv6_tunn_en_flg = p_params->tpa_ipv6_tunn_en_flg;
643 p_ramrod->common.update_tpa_param_flg = p_params->update_tpa_param_flg;
644 p_tpa->max_buff_num = p_params->max_buffers_per_cqe;
645 p_tpa->tpa_pkt_split_flg = p_params->tpa_pkt_split_flg;
646 p_tpa->tpa_hdr_data_split_flg = p_params->tpa_hdr_data_split_flg;
647 p_tpa->tpa_gro_consistent_flg = p_params->tpa_gro_consistent_flg;
648 p_tpa->tpa_max_aggs_num = p_params->tpa_max_aggs_num;
649 p_tpa->tpa_max_size = p_params->tpa_max_size;
650 p_tpa->tpa_min_size_to_start = p_params->tpa_min_size_to_start;
651 p_tpa->tpa_min_size_to_cont = p_params->tpa_min_size_to_cont;
655 qed_sp_update_mcast_bin(struct qed_hwfn *p_hwfn,
656 struct vport_update_ramrod_data *p_ramrod,
657 struct qed_sp_vport_update_params *p_params)
661 memset(&p_ramrod->approx_mcast.bins, 0,
662 sizeof(p_ramrod->approx_mcast.bins));
664 if (!p_params->update_approx_mcast_flg)
667 p_ramrod->common.update_approx_mcast_flg = 1;
668 for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
669 u32 *p_bins = (u32 *)p_params->bins;
671 p_ramrod->approx_mcast.bins[i] = cpu_to_le32(p_bins[i]);
675 int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
676 struct qed_sp_vport_update_params *p_params,
677 enum spq_mode comp_mode,
678 struct qed_spq_comp_cb *p_comp_data)
680 struct qed_rss_params *p_rss_params = p_params->rss_params;
681 struct vport_update_ramrod_data_cmn *p_cmn;
682 struct qed_sp_init_data init_data;
683 struct vport_update_ramrod_data *p_ramrod = NULL;
684 struct qed_spq_entry *p_ent = NULL;
685 u8 abs_vport_id = 0, val;
688 if (IS_VF(p_hwfn->cdev)) {
689 rc = qed_vf_pf_vport_update(p_hwfn, p_params);
693 rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
697 memset(&init_data, 0, sizeof(init_data));
698 init_data.cid = qed_spq_get_cid(p_hwfn);
699 init_data.opaque_fid = p_params->opaque_fid;
700 init_data.comp_mode = comp_mode;
701 init_data.p_comp_data = p_comp_data;
703 rc = qed_sp_init_request(p_hwfn, &p_ent,
704 ETH_RAMROD_VPORT_UPDATE,
705 PROTOCOLID_ETH, &init_data);
709 /* Copy input params to ramrod according to FW struct */
710 p_ramrod = &p_ent->ramrod.vport_update;
711 p_cmn = &p_ramrod->common;
713 p_cmn->vport_id = abs_vport_id;
714 p_cmn->rx_active_flg = p_params->vport_active_rx_flg;
715 p_cmn->update_rx_active_flg = p_params->update_vport_active_rx_flg;
716 p_cmn->tx_active_flg = p_params->vport_active_tx_flg;
717 p_cmn->update_tx_active_flg = p_params->update_vport_active_tx_flg;
718 p_cmn->accept_any_vlan = p_params->accept_any_vlan;
719 val = p_params->update_accept_any_vlan_flg;
720 p_cmn->update_accept_any_vlan_flg = val;
722 p_cmn->inner_vlan_removal_en = p_params->inner_vlan_removal_flg;
723 val = p_params->update_inner_vlan_removal_flg;
724 p_cmn->update_inner_vlan_removal_en_flg = val;
726 p_cmn->default_vlan_en = p_params->default_vlan_enable_flg;
727 val = p_params->update_default_vlan_enable_flg;
728 p_cmn->update_default_vlan_en_flg = val;
730 p_cmn->default_vlan = cpu_to_le16(p_params->default_vlan);
731 p_cmn->update_default_vlan_flg = p_params->update_default_vlan_flg;
733 p_cmn->silent_vlan_removal_en = p_params->silent_vlan_removal_flg;
735 p_ramrod->common.tx_switching_en = p_params->tx_switching_flg;
736 p_cmn->update_tx_switching_en_flg = p_params->update_tx_switching_flg;
738 p_cmn->anti_spoofing_en = p_params->anti_spoofing_en;
739 val = p_params->update_anti_spoofing_en_flg;
740 p_ramrod->common.update_anti_spoofing_en_flg = val;
742 rc = qed_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params);
744 /* Return spq entry which is taken in qed_sp_init_request()*/
745 qed_spq_return_entry(p_hwfn, p_ent);
749 /* Update mcast bins for VFs, PF doesn't use this functionality */
750 qed_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params);
752 qed_sp_update_accept_mode(p_hwfn, p_ramrod, p_params->accept_flags);
753 qed_sp_vport_update_sge_tpa(p_hwfn, p_ramrod, p_params->sge_tpa_params);
754 return qed_spq_post(p_hwfn, p_ent, NULL);
757 int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, u16 opaque_fid, u8 vport_id)
759 struct vport_stop_ramrod_data *p_ramrod;
760 struct qed_sp_init_data init_data;
761 struct qed_spq_entry *p_ent;
765 if (IS_VF(p_hwfn->cdev))
766 return qed_vf_pf_vport_stop(p_hwfn);
768 rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id);
772 memset(&init_data, 0, sizeof(init_data));
773 init_data.cid = qed_spq_get_cid(p_hwfn);
774 init_data.opaque_fid = opaque_fid;
775 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
777 rc = qed_sp_init_request(p_hwfn, &p_ent,
778 ETH_RAMROD_VPORT_STOP,
779 PROTOCOLID_ETH, &init_data);
783 p_ramrod = &p_ent->ramrod.vport_stop;
784 p_ramrod->vport_id = abs_vport_id;
786 return qed_spq_post(p_hwfn, p_ent, NULL);
790 qed_vf_pf_accept_flags(struct qed_hwfn *p_hwfn,
791 struct qed_filter_accept_flags *p_accept_flags)
793 struct qed_sp_vport_update_params s_params;
795 memset(&s_params, 0, sizeof(s_params));
796 memcpy(&s_params.accept_flags, p_accept_flags,
797 sizeof(struct qed_filter_accept_flags));
799 return qed_vf_pf_vport_update(p_hwfn, &s_params);
802 static int qed_filter_accept_cmd(struct qed_dev *cdev,
804 struct qed_filter_accept_flags accept_flags,
805 u8 update_accept_any_vlan,
807 enum spq_mode comp_mode,
808 struct qed_spq_comp_cb *p_comp_data)
810 struct qed_sp_vport_update_params vport_update_params;
813 /* Prepare and send the vport rx_mode change */
814 memset(&vport_update_params, 0, sizeof(vport_update_params));
815 vport_update_params.vport_id = vport;
816 vport_update_params.accept_flags = accept_flags;
817 vport_update_params.update_accept_any_vlan_flg = update_accept_any_vlan;
818 vport_update_params.accept_any_vlan = accept_any_vlan;
820 for_each_hwfn(cdev, i) {
821 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
823 vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
826 rc = qed_vf_pf_accept_flags(p_hwfn, &accept_flags);
832 rc = qed_sp_vport_update(p_hwfn, &vport_update_params,
833 comp_mode, p_comp_data);
835 DP_ERR(cdev, "Update rx_mode failed %d\n", rc);
839 DP_VERBOSE(p_hwfn, QED_MSG_SP,
840 "Accept filter configured, flags = [Rx]%x [Tx]%x\n",
841 accept_flags.rx_accept_filter,
842 accept_flags.tx_accept_filter);
843 if (update_accept_any_vlan)
844 DP_VERBOSE(p_hwfn, QED_MSG_SP,
845 "accept_any_vlan=%d configured\n",
852 int qed_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
853 struct qed_queue_cid *p_cid,
855 dma_addr_t bd_chain_phys_addr,
856 dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size)
858 struct rx_queue_start_ramrod_data *p_ramrod = NULL;
859 struct qed_spq_entry *p_ent = NULL;
860 struct qed_sp_init_data init_data;
863 DP_VERBOSE(p_hwfn, QED_MSG_SP,
864 "opaque_fid=0x%x, cid=0x%x, rx_qzone=0x%x, vport_id=0x%x, sb_id=0x%x\n",
865 p_cid->opaque_fid, p_cid->cid,
866 p_cid->abs.queue_id, p_cid->abs.vport_id, p_cid->sb_igu_id);
869 memset(&init_data, 0, sizeof(init_data));
870 init_data.cid = p_cid->cid;
871 init_data.opaque_fid = p_cid->opaque_fid;
872 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
874 rc = qed_sp_init_request(p_hwfn, &p_ent,
875 ETH_RAMROD_RX_QUEUE_START,
876 PROTOCOLID_ETH, &init_data);
880 p_ramrod = &p_ent->ramrod.rx_queue_start;
882 p_ramrod->sb_id = cpu_to_le16(p_cid->sb_igu_id);
883 p_ramrod->sb_index = p_cid->sb_idx;
884 p_ramrod->vport_id = p_cid->abs.vport_id;
885 p_ramrod->stats_counter_id = p_cid->abs.stats_id;
886 p_ramrod->rx_queue_id = cpu_to_le16(p_cid->abs.queue_id);
887 p_ramrod->complete_cqe_flg = 0;
888 p_ramrod->complete_event_flg = 1;
890 p_ramrod->bd_max_bytes = cpu_to_le16(bd_max_bytes);
891 DMA_REGPAIR_LE(p_ramrod->bd_base, bd_chain_phys_addr);
893 p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size);
894 DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr);
896 if (p_cid->vfid != QED_QUEUE_CID_SELF) {
897 bool b_legacy_vf = !!(p_cid->vf_legacy &
898 QED_QCID_LEGACY_VF_RX_PROD);
900 p_ramrod->vf_rx_prod_index = p_cid->vf_qid;
901 DP_VERBOSE(p_hwfn, QED_MSG_SP,
902 "Queue%s is meant for VF rxq[%02x]\n",
903 b_legacy_vf ? " [legacy]" : "", p_cid->vf_qid);
904 p_ramrod->vf_rx_prod_use_zone_a = b_legacy_vf;
907 return qed_spq_post(p_hwfn, p_ent, NULL);
911 qed_eth_pf_rx_queue_start(struct qed_hwfn *p_hwfn,
912 struct qed_queue_cid *p_cid,
914 dma_addr_t bd_chain_phys_addr,
915 dma_addr_t cqe_pbl_addr,
916 u16 cqe_pbl_size, void __iomem **pp_prod)
918 u32 init_prod_val = 0;
920 *pp_prod = p_hwfn->regview +
921 GTT_BAR0_MAP_REG_MSDM_RAM +
922 MSTORM_ETH_PF_PRODS_OFFSET(p_cid->abs.queue_id);
924 /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
925 __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
926 (u32 *)(&init_prod_val));
928 return qed_eth_rxq_start_ramrod(p_hwfn, p_cid,
931 cqe_pbl_addr, cqe_pbl_size);
935 qed_eth_rx_queue_start(struct qed_hwfn *p_hwfn,
937 struct qed_queue_start_common_params *p_params,
939 dma_addr_t bd_chain_phys_addr,
940 dma_addr_t cqe_pbl_addr,
942 struct qed_rxq_start_ret_params *p_ret_params)
944 struct qed_queue_cid *p_cid;
947 /* Allocate a CID for the queue */
948 p_cid = qed_eth_queue_to_cid_pf(p_hwfn, opaque_fid, true, p_params);
952 if (IS_PF(p_hwfn->cdev)) {
953 rc = qed_eth_pf_rx_queue_start(p_hwfn, p_cid,
956 cqe_pbl_addr, cqe_pbl_size,
957 &p_ret_params->p_prod);
959 rc = qed_vf_pf_rxq_start(p_hwfn, p_cid,
963 cqe_pbl_size, &p_ret_params->p_prod);
966 /* Provide the caller with a reference to as handler */
968 qed_eth_queue_cid_release(p_hwfn, p_cid);
970 p_ret_params->p_handle = (void *)p_cid;
975 int qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn,
976 void **pp_rxq_handles,
979 u8 complete_event_flg,
980 enum spq_mode comp_mode,
981 struct qed_spq_comp_cb *p_comp_data)
983 struct rx_queue_update_ramrod_data *p_ramrod = NULL;
984 struct qed_spq_entry *p_ent = NULL;
985 struct qed_sp_init_data init_data;
986 struct qed_queue_cid *p_cid;
990 memset(&init_data, 0, sizeof(init_data));
991 init_data.comp_mode = comp_mode;
992 init_data.p_comp_data = p_comp_data;
994 for (i = 0; i < num_rxqs; i++) {
995 p_cid = ((struct qed_queue_cid **)pp_rxq_handles)[i];
998 init_data.cid = p_cid->cid;
999 init_data.opaque_fid = p_cid->opaque_fid;
1001 rc = qed_sp_init_request(p_hwfn, &p_ent,
1002 ETH_RAMROD_RX_QUEUE_UPDATE,
1003 PROTOCOLID_ETH, &init_data);
1007 p_ramrod = &p_ent->ramrod.rx_queue_update;
1008 p_ramrod->vport_id = p_cid->abs.vport_id;
1010 p_ramrod->rx_queue_id = cpu_to_le16(p_cid->abs.queue_id);
1011 p_ramrod->complete_cqe_flg = complete_cqe_flg;
1012 p_ramrod->complete_event_flg = complete_event_flg;
1014 rc = qed_spq_post(p_hwfn, p_ent, NULL);
1023 qed_eth_pf_rx_queue_stop(struct qed_hwfn *p_hwfn,
1024 struct qed_queue_cid *p_cid,
1025 bool b_eq_completion_only, bool b_cqe_completion)
1027 struct rx_queue_stop_ramrod_data *p_ramrod = NULL;
1028 struct qed_spq_entry *p_ent = NULL;
1029 struct qed_sp_init_data init_data;
1032 memset(&init_data, 0, sizeof(init_data));
1033 init_data.cid = p_cid->cid;
1034 init_data.opaque_fid = p_cid->opaque_fid;
1035 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1037 rc = qed_sp_init_request(p_hwfn, &p_ent,
1038 ETH_RAMROD_RX_QUEUE_STOP,
1039 PROTOCOLID_ETH, &init_data);
1043 p_ramrod = &p_ent->ramrod.rx_queue_stop;
1044 p_ramrod->vport_id = p_cid->abs.vport_id;
1045 p_ramrod->rx_queue_id = cpu_to_le16(p_cid->abs.queue_id);
1047 /* Cleaning the queue requires the completion to arrive there.
1048 * In addition, VFs require the answer to come as eqe to PF.
1050 p_ramrod->complete_cqe_flg = ((p_cid->vfid == QED_QUEUE_CID_SELF) &&
1051 !b_eq_completion_only) ||
1053 p_ramrod->complete_event_flg = (p_cid->vfid != QED_QUEUE_CID_SELF) ||
1054 b_eq_completion_only;
1056 return qed_spq_post(p_hwfn, p_ent, NULL);
1059 int qed_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
1061 bool eq_completion_only, bool cqe_completion)
1063 struct qed_queue_cid *p_cid = (struct qed_queue_cid *)p_rxq;
1066 if (IS_PF(p_hwfn->cdev))
1067 rc = qed_eth_pf_rx_queue_stop(p_hwfn, p_cid,
1071 rc = qed_vf_pf_rxq_stop(p_hwfn, p_cid, cqe_completion);
1074 qed_eth_queue_cid_release(p_hwfn, p_cid);
1079 qed_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
1080 struct qed_queue_cid *p_cid,
1081 dma_addr_t pbl_addr, u16 pbl_size, u16 pq_id)
1083 struct tx_queue_start_ramrod_data *p_ramrod = NULL;
1084 struct qed_spq_entry *p_ent = NULL;
1085 struct qed_sp_init_data init_data;
1089 memset(&init_data, 0, sizeof(init_data));
1090 init_data.cid = p_cid->cid;
1091 init_data.opaque_fid = p_cid->opaque_fid;
1092 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1094 rc = qed_sp_init_request(p_hwfn, &p_ent,
1095 ETH_RAMROD_TX_QUEUE_START,
1096 PROTOCOLID_ETH, &init_data);
1100 p_ramrod = &p_ent->ramrod.tx_queue_start;
1101 p_ramrod->vport_id = p_cid->abs.vport_id;
1103 p_ramrod->sb_id = cpu_to_le16(p_cid->sb_igu_id);
1104 p_ramrod->sb_index = p_cid->sb_idx;
1105 p_ramrod->stats_counter_id = p_cid->abs.stats_id;
1107 p_ramrod->queue_zone_id = cpu_to_le16(p_cid->abs.queue_id);
1108 p_ramrod->same_as_last_id = cpu_to_le16(p_cid->abs.queue_id);
1110 p_ramrod->pbl_size = cpu_to_le16(pbl_size);
1111 DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, pbl_addr);
1113 p_ramrod->qm_pq_id = cpu_to_le16(pq_id);
1115 return qed_spq_post(p_hwfn, p_ent, NULL);
1119 qed_eth_pf_tx_queue_start(struct qed_hwfn *p_hwfn,
1120 struct qed_queue_cid *p_cid,
1122 dma_addr_t pbl_addr,
1123 u16 pbl_size, void __iomem **pp_doorbell)
1128 rc = qed_eth_txq_start_ramrod(p_hwfn, p_cid,
1130 qed_get_cm_pq_idx_mcos(p_hwfn, tc));
1134 /* Provide the caller with the necessary return values */
1135 *pp_doorbell = p_hwfn->doorbells +
1136 qed_db_addr(p_cid->cid, DQ_DEMS_LEGACY);
1142 qed_eth_tx_queue_start(struct qed_hwfn *p_hwfn,
1144 struct qed_queue_start_common_params *p_params,
1146 dma_addr_t pbl_addr,
1148 struct qed_txq_start_ret_params *p_ret_params)
1150 struct qed_queue_cid *p_cid;
1153 p_cid = qed_eth_queue_to_cid_pf(p_hwfn, opaque_fid, false, p_params);
1157 if (IS_PF(p_hwfn->cdev))
1158 rc = qed_eth_pf_tx_queue_start(p_hwfn, p_cid, tc,
1160 &p_ret_params->p_doorbell);
1162 rc = qed_vf_pf_txq_start(p_hwfn, p_cid,
1164 &p_ret_params->p_doorbell);
1167 qed_eth_queue_cid_release(p_hwfn, p_cid);
1169 p_ret_params->p_handle = (void *)p_cid;
1175 qed_eth_pf_tx_queue_stop(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid)
1177 struct qed_spq_entry *p_ent = NULL;
1178 struct qed_sp_init_data init_data;
1181 memset(&init_data, 0, sizeof(init_data));
1182 init_data.cid = p_cid->cid;
1183 init_data.opaque_fid = p_cid->opaque_fid;
1184 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1186 rc = qed_sp_init_request(p_hwfn, &p_ent,
1187 ETH_RAMROD_TX_QUEUE_STOP,
1188 PROTOCOLID_ETH, &init_data);
1192 return qed_spq_post(p_hwfn, p_ent, NULL);
1195 int qed_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, void *p_handle)
1197 struct qed_queue_cid *p_cid = (struct qed_queue_cid *)p_handle;
1200 if (IS_PF(p_hwfn->cdev))
1201 rc = qed_eth_pf_tx_queue_stop(p_hwfn, p_cid);
1203 rc = qed_vf_pf_txq_stop(p_hwfn, p_cid);
1206 qed_eth_queue_cid_release(p_hwfn, p_cid);
1210 static enum eth_filter_action qed_filter_action(enum qed_filter_opcode opcode)
1212 enum eth_filter_action action = MAX_ETH_FILTER_ACTION;
1215 case QED_FILTER_ADD:
1216 action = ETH_FILTER_ACTION_ADD;
1218 case QED_FILTER_REMOVE:
1219 action = ETH_FILTER_ACTION_REMOVE;
1221 case QED_FILTER_FLUSH:
1222 action = ETH_FILTER_ACTION_REMOVE_ALL;
1225 action = MAX_ETH_FILTER_ACTION;
1231 static void qed_set_fw_mac_addr(__le16 *fw_msb,
1236 ((u8 *)fw_msb)[0] = mac[1];
1237 ((u8 *)fw_msb)[1] = mac[0];
1238 ((u8 *)fw_mid)[0] = mac[3];
1239 ((u8 *)fw_mid)[1] = mac[2];
1240 ((u8 *)fw_lsb)[0] = mac[5];
1241 ((u8 *)fw_lsb)[1] = mac[4];
1245 qed_filter_ucast_common(struct qed_hwfn *p_hwfn,
1247 struct qed_filter_ucast *p_filter_cmd,
1248 struct vport_filter_update_ramrod_data **pp_ramrod,
1249 struct qed_spq_entry **pp_ent,
1250 enum spq_mode comp_mode,
1251 struct qed_spq_comp_cb *p_comp_data)
1253 u8 vport_to_add_to = 0, vport_to_remove_from = 0;
1254 struct vport_filter_update_ramrod_data *p_ramrod;
1255 struct eth_filter_cmd *p_first_filter;
1256 struct eth_filter_cmd *p_second_filter;
1257 struct qed_sp_init_data init_data;
1258 enum eth_filter_action action;
1261 rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from,
1262 &vport_to_remove_from);
1266 rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to,
1272 memset(&init_data, 0, sizeof(init_data));
1273 init_data.cid = qed_spq_get_cid(p_hwfn);
1274 init_data.opaque_fid = opaque_fid;
1275 init_data.comp_mode = comp_mode;
1276 init_data.p_comp_data = p_comp_data;
1278 rc = qed_sp_init_request(p_hwfn, pp_ent,
1279 ETH_RAMROD_FILTERS_UPDATE,
1280 PROTOCOLID_ETH, &init_data);
1284 *pp_ramrod = &(*pp_ent)->ramrod.vport_filter_update;
1285 p_ramrod = *pp_ramrod;
1286 p_ramrod->filter_cmd_hdr.rx = p_filter_cmd->is_rx_filter ? 1 : 0;
1287 p_ramrod->filter_cmd_hdr.tx = p_filter_cmd->is_tx_filter ? 1 : 0;
1289 switch (p_filter_cmd->opcode) {
1290 case QED_FILTER_REPLACE:
1291 case QED_FILTER_MOVE:
1292 p_ramrod->filter_cmd_hdr.cmd_cnt = 2; break;
1294 p_ramrod->filter_cmd_hdr.cmd_cnt = 1; break;
1297 p_first_filter = &p_ramrod->filter_cmds[0];
1298 p_second_filter = &p_ramrod->filter_cmds[1];
1300 switch (p_filter_cmd->type) {
1301 case QED_FILTER_MAC:
1302 p_first_filter->type = ETH_FILTER_TYPE_MAC; break;
1303 case QED_FILTER_VLAN:
1304 p_first_filter->type = ETH_FILTER_TYPE_VLAN; break;
1305 case QED_FILTER_MAC_VLAN:
1306 p_first_filter->type = ETH_FILTER_TYPE_PAIR; break;
1307 case QED_FILTER_INNER_MAC:
1308 p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC; break;
1309 case QED_FILTER_INNER_VLAN:
1310 p_first_filter->type = ETH_FILTER_TYPE_INNER_VLAN; break;
1311 case QED_FILTER_INNER_PAIR:
1312 p_first_filter->type = ETH_FILTER_TYPE_INNER_PAIR; break;
1313 case QED_FILTER_INNER_MAC_VNI_PAIR:
1314 p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR;
1316 case QED_FILTER_MAC_VNI_PAIR:
1317 p_first_filter->type = ETH_FILTER_TYPE_MAC_VNI_PAIR; break;
1318 case QED_FILTER_VNI:
1319 p_first_filter->type = ETH_FILTER_TYPE_VNI; break;
1322 if ((p_first_filter->type == ETH_FILTER_TYPE_MAC) ||
1323 (p_first_filter->type == ETH_FILTER_TYPE_PAIR) ||
1324 (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC) ||
1325 (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR) ||
1326 (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) ||
1327 (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR)) {
1328 qed_set_fw_mac_addr(&p_first_filter->mac_msb,
1329 &p_first_filter->mac_mid,
1330 &p_first_filter->mac_lsb,
1331 (u8 *)p_filter_cmd->mac);
1334 if ((p_first_filter->type == ETH_FILTER_TYPE_VLAN) ||
1335 (p_first_filter->type == ETH_FILTER_TYPE_PAIR) ||
1336 (p_first_filter->type == ETH_FILTER_TYPE_INNER_VLAN) ||
1337 (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR))
1338 p_first_filter->vlan_id = cpu_to_le16(p_filter_cmd->vlan);
1340 if ((p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) ||
1341 (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR) ||
1342 (p_first_filter->type == ETH_FILTER_TYPE_VNI))
1343 p_first_filter->vni = cpu_to_le32(p_filter_cmd->vni);
1345 if (p_filter_cmd->opcode == QED_FILTER_MOVE) {
1346 p_second_filter->type = p_first_filter->type;
1347 p_second_filter->mac_msb = p_first_filter->mac_msb;
1348 p_second_filter->mac_mid = p_first_filter->mac_mid;
1349 p_second_filter->mac_lsb = p_first_filter->mac_lsb;
1350 p_second_filter->vlan_id = p_first_filter->vlan_id;
1351 p_second_filter->vni = p_first_filter->vni;
1353 p_first_filter->action = ETH_FILTER_ACTION_REMOVE;
1355 p_first_filter->vport_id = vport_to_remove_from;
1357 p_second_filter->action = ETH_FILTER_ACTION_ADD;
1358 p_second_filter->vport_id = vport_to_add_to;
1359 } else if (p_filter_cmd->opcode == QED_FILTER_REPLACE) {
1360 p_first_filter->vport_id = vport_to_add_to;
1361 memcpy(p_second_filter, p_first_filter,
1362 sizeof(*p_second_filter));
1363 p_first_filter->action = ETH_FILTER_ACTION_REMOVE_ALL;
1364 p_second_filter->action = ETH_FILTER_ACTION_ADD;
1366 action = qed_filter_action(p_filter_cmd->opcode);
1368 if (action == MAX_ETH_FILTER_ACTION) {
1370 "%d is not supported yet\n",
1371 p_filter_cmd->opcode);
1375 p_first_filter->action = action;
1376 p_first_filter->vport_id = (p_filter_cmd->opcode ==
1377 QED_FILTER_REMOVE) ?
1378 vport_to_remove_from :
1385 int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn,
1387 struct qed_filter_ucast *p_filter_cmd,
1388 enum spq_mode comp_mode,
1389 struct qed_spq_comp_cb *p_comp_data)
1391 struct vport_filter_update_ramrod_data *p_ramrod = NULL;
1392 struct qed_spq_entry *p_ent = NULL;
1393 struct eth_filter_cmd_header *p_header;
1396 rc = qed_filter_ucast_common(p_hwfn, opaque_fid, p_filter_cmd,
1398 comp_mode, p_comp_data);
1400 DP_ERR(p_hwfn, "Uni. filter command failed %d\n", rc);
1403 p_header = &p_ramrod->filter_cmd_hdr;
1404 p_header->assert_on_error = p_filter_cmd->assert_on_error;
1406 rc = qed_spq_post(p_hwfn, p_ent, NULL);
1408 DP_ERR(p_hwfn, "Unicast filter ADD command failed %d\n", rc);
1412 DP_VERBOSE(p_hwfn, QED_MSG_SP,
1413 "Unicast filter configured, opcode = %s, type = %s, cmd_cnt = %d, is_rx_filter = %d, is_tx_filter = %d\n",
1414 (p_filter_cmd->opcode == QED_FILTER_ADD) ? "ADD" :
1415 ((p_filter_cmd->opcode == QED_FILTER_REMOVE) ?
1417 ((p_filter_cmd->opcode == QED_FILTER_MOVE) ?
1418 "MOVE" : "REPLACE")),
1419 (p_filter_cmd->type == QED_FILTER_MAC) ? "MAC" :
1420 ((p_filter_cmd->type == QED_FILTER_VLAN) ?
1421 "VLAN" : "MAC & VLAN"),
1422 p_ramrod->filter_cmd_hdr.cmd_cnt,
1423 p_filter_cmd->is_rx_filter,
1424 p_filter_cmd->is_tx_filter);
1425 DP_VERBOSE(p_hwfn, QED_MSG_SP,
1426 "vport_to_add_to = %d, vport_to_remove_from = %d, mac = %2x:%2x:%2x:%2x:%2x:%2x, vlan = %d\n",
1427 p_filter_cmd->vport_to_add_to,
1428 p_filter_cmd->vport_to_remove_from,
1429 p_filter_cmd->mac[0],
1430 p_filter_cmd->mac[1],
1431 p_filter_cmd->mac[2],
1432 p_filter_cmd->mac[3],
1433 p_filter_cmd->mac[4],
1434 p_filter_cmd->mac[5],
1435 p_filter_cmd->vlan);
1440 /*******************************************************************************
1442 * Calculates crc 32 on a buffer
1443 * Note: crc32_length MUST be aligned to 8
1445 ******************************************************************************/
1446 static u32 qed_calc_crc32c(u8 *crc32_packet,
1447 u32 crc32_length, u32 crc32_seed, u8 complement)
1449 u32 byte = 0, bit = 0, crc32_result = crc32_seed;
1450 u8 msb = 0, current_byte = 0;
1452 if ((!crc32_packet) ||
1453 (crc32_length == 0) ||
1454 ((crc32_length % 8) != 0))
1455 return crc32_result;
1456 for (byte = 0; byte < crc32_length; byte++) {
1457 current_byte = crc32_packet[byte];
1458 for (bit = 0; bit < 8; bit++) {
1459 msb = (u8)(crc32_result >> 31);
1460 crc32_result = crc32_result << 1;
1461 if (msb != (0x1 & (current_byte >> bit))) {
1462 crc32_result = crc32_result ^ CRC32_POLY;
1463 crc32_result |= 1; /*crc32_result[0] = 1;*/
1467 return crc32_result;
1470 static u32 qed_crc32c_le(u32 seed, u8 *mac, u32 len)
1472 u32 packet_buf[2] = { 0 };
1474 memcpy((u8 *)(&packet_buf[0]), &mac[0], 6);
1475 return qed_calc_crc32c((u8 *)packet_buf, 8, seed, 0);
1478 u8 qed_mcast_bin_from_mac(u8 *mac)
1480 u32 crc = qed_crc32c_le(ETH_MULTICAST_BIN_FROM_MAC_SEED,
1487 qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn,
1489 struct qed_filter_mcast *p_filter_cmd,
1490 enum spq_mode comp_mode,
1491 struct qed_spq_comp_cb *p_comp_data)
1493 unsigned long bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
1494 struct vport_update_ramrod_data *p_ramrod = NULL;
1495 struct qed_spq_entry *p_ent = NULL;
1496 struct qed_sp_init_data init_data;
1497 u8 abs_vport_id = 0;
1500 if (p_filter_cmd->opcode == QED_FILTER_ADD)
1501 rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to,
1504 rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from,
1510 memset(&init_data, 0, sizeof(init_data));
1511 init_data.cid = qed_spq_get_cid(p_hwfn);
1512 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1513 init_data.comp_mode = comp_mode;
1514 init_data.p_comp_data = p_comp_data;
1516 rc = qed_sp_init_request(p_hwfn, &p_ent,
1517 ETH_RAMROD_VPORT_UPDATE,
1518 PROTOCOLID_ETH, &init_data);
1520 DP_ERR(p_hwfn, "Multi-cast command failed %d\n", rc);
1524 p_ramrod = &p_ent->ramrod.vport_update;
1525 p_ramrod->common.update_approx_mcast_flg = 1;
1527 /* explicitly clear out the entire vector */
1528 memset(&p_ramrod->approx_mcast.bins, 0,
1529 sizeof(p_ramrod->approx_mcast.bins));
1530 memset(bins, 0, sizeof(unsigned long) *
1531 ETH_MULTICAST_MAC_BINS_IN_REGS);
1532 /* filter ADD op is explicit set op and it removes
1533 * any existing filters for the vport
1535 if (p_filter_cmd->opcode == QED_FILTER_ADD) {
1536 for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) {
1539 bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]);
1540 __set_bit(bit, bins);
1543 /* Convert to correct endianity */
1544 for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
1545 struct vport_update_ramrod_mcast *p_ramrod_bins;
1546 u32 *p_bins = (u32 *)bins;
1548 p_ramrod_bins = &p_ramrod->approx_mcast;
1549 p_ramrod_bins->bins[i] = cpu_to_le32(p_bins[i]);
1553 p_ramrod->common.vport_id = abs_vport_id;
1555 return qed_spq_post(p_hwfn, p_ent, NULL);
1558 static int qed_filter_mcast_cmd(struct qed_dev *cdev,
1559 struct qed_filter_mcast *p_filter_cmd,
1560 enum spq_mode comp_mode,
1561 struct qed_spq_comp_cb *p_comp_data)
1566 /* only ADD and REMOVE operations are supported for multi-cast */
1567 if ((p_filter_cmd->opcode != QED_FILTER_ADD &&
1568 (p_filter_cmd->opcode != QED_FILTER_REMOVE)) ||
1569 (p_filter_cmd->num_mc_addrs > QED_MAX_MC_ADDRS))
1572 for_each_hwfn(cdev, i) {
1573 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1578 qed_vf_pf_filter_mcast(p_hwfn, p_filter_cmd);
1582 opaque_fid = p_hwfn->hw_info.opaque_fid;
1584 rc = qed_sp_eth_filter_mcast(p_hwfn,
1587 comp_mode, p_comp_data);
1592 static int qed_filter_ucast_cmd(struct qed_dev *cdev,
1593 struct qed_filter_ucast *p_filter_cmd,
1594 enum spq_mode comp_mode,
1595 struct qed_spq_comp_cb *p_comp_data)
1600 for_each_hwfn(cdev, i) {
1601 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1605 rc = qed_vf_pf_filter_ucast(p_hwfn, p_filter_cmd);
1609 opaque_fid = p_hwfn->hw_info.opaque_fid;
1611 rc = qed_sp_eth_filter_ucast(p_hwfn,
1614 comp_mode, p_comp_data);
1622 /* Statistics related code */
1623 static void __qed_get_vport_pstats_addrlen(struct qed_hwfn *p_hwfn,
1625 u32 *p_len, u16 statistics_bin)
1627 if (IS_PF(p_hwfn->cdev)) {
1628 *p_addr = BAR0_MAP_REG_PSDM_RAM +
1629 PSTORM_QUEUE_STAT_OFFSET(statistics_bin);
1630 *p_len = sizeof(struct eth_pstorm_per_queue_stat);
1632 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
1633 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1635 *p_addr = p_resp->pfdev_info.stats_info.pstats.address;
1636 *p_len = p_resp->pfdev_info.stats_info.pstats.len;
1640 static void __qed_get_vport_pstats(struct qed_hwfn *p_hwfn,
1641 struct qed_ptt *p_ptt,
1642 struct qed_eth_stats *p_stats,
1645 struct eth_pstorm_per_queue_stat pstats;
1646 u32 pstats_addr = 0, pstats_len = 0;
1648 __qed_get_vport_pstats_addrlen(p_hwfn, &pstats_addr, &pstats_len,
1651 memset(&pstats, 0, sizeof(pstats));
1652 qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, pstats_len);
1654 p_stats->common.tx_ucast_bytes +=
1655 HILO_64_REGPAIR(pstats.sent_ucast_bytes);
1656 p_stats->common.tx_mcast_bytes +=
1657 HILO_64_REGPAIR(pstats.sent_mcast_bytes);
1658 p_stats->common.tx_bcast_bytes +=
1659 HILO_64_REGPAIR(pstats.sent_bcast_bytes);
1660 p_stats->common.tx_ucast_pkts +=
1661 HILO_64_REGPAIR(pstats.sent_ucast_pkts);
1662 p_stats->common.tx_mcast_pkts +=
1663 HILO_64_REGPAIR(pstats.sent_mcast_pkts);
1664 p_stats->common.tx_bcast_pkts +=
1665 HILO_64_REGPAIR(pstats.sent_bcast_pkts);
1666 p_stats->common.tx_err_drop_pkts +=
1667 HILO_64_REGPAIR(pstats.error_drop_pkts);
1670 static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn,
1671 struct qed_ptt *p_ptt,
1672 struct qed_eth_stats *p_stats,
1675 struct tstorm_per_port_stat tstats;
1676 u32 tstats_addr, tstats_len;
1678 if (IS_PF(p_hwfn->cdev)) {
1679 tstats_addr = BAR0_MAP_REG_TSDM_RAM +
1680 TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn));
1681 tstats_len = sizeof(struct tstorm_per_port_stat);
1683 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
1684 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1686 tstats_addr = p_resp->pfdev_info.stats_info.tstats.address;
1687 tstats_len = p_resp->pfdev_info.stats_info.tstats.len;
1690 memset(&tstats, 0, sizeof(tstats));
1691 qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, tstats_len);
1693 p_stats->common.mftag_filter_discards +=
1694 HILO_64_REGPAIR(tstats.mftag_filter_discard);
1695 p_stats->common.mac_filter_discards +=
1696 HILO_64_REGPAIR(tstats.eth_mac_filter_discard);
1699 static void __qed_get_vport_ustats_addrlen(struct qed_hwfn *p_hwfn,
1701 u32 *p_len, u16 statistics_bin)
1703 if (IS_PF(p_hwfn->cdev)) {
1704 *p_addr = BAR0_MAP_REG_USDM_RAM +
1705 USTORM_QUEUE_STAT_OFFSET(statistics_bin);
1706 *p_len = sizeof(struct eth_ustorm_per_queue_stat);
1708 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
1709 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1711 *p_addr = p_resp->pfdev_info.stats_info.ustats.address;
1712 *p_len = p_resp->pfdev_info.stats_info.ustats.len;
1716 static void __qed_get_vport_ustats(struct qed_hwfn *p_hwfn,
1717 struct qed_ptt *p_ptt,
1718 struct qed_eth_stats *p_stats,
1721 struct eth_ustorm_per_queue_stat ustats;
1722 u32 ustats_addr = 0, ustats_len = 0;
1724 __qed_get_vport_ustats_addrlen(p_hwfn, &ustats_addr, &ustats_len,
1727 memset(&ustats, 0, sizeof(ustats));
1728 qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, ustats_len);
1730 p_stats->common.rx_ucast_bytes +=
1731 HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
1732 p_stats->common.rx_mcast_bytes +=
1733 HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
1734 p_stats->common.rx_bcast_bytes +=
1735 HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
1736 p_stats->common.rx_ucast_pkts += HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
1737 p_stats->common.rx_mcast_pkts += HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
1738 p_stats->common.rx_bcast_pkts += HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
1741 static void __qed_get_vport_mstats_addrlen(struct qed_hwfn *p_hwfn,
1743 u32 *p_len, u16 statistics_bin)
1745 if (IS_PF(p_hwfn->cdev)) {
1746 *p_addr = BAR0_MAP_REG_MSDM_RAM +
1747 MSTORM_QUEUE_STAT_OFFSET(statistics_bin);
1748 *p_len = sizeof(struct eth_mstorm_per_queue_stat);
1750 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
1751 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1753 *p_addr = p_resp->pfdev_info.stats_info.mstats.address;
1754 *p_len = p_resp->pfdev_info.stats_info.mstats.len;
1758 static void __qed_get_vport_mstats(struct qed_hwfn *p_hwfn,
1759 struct qed_ptt *p_ptt,
1760 struct qed_eth_stats *p_stats,
1763 struct eth_mstorm_per_queue_stat mstats;
1764 u32 mstats_addr = 0, mstats_len = 0;
1766 __qed_get_vport_mstats_addrlen(p_hwfn, &mstats_addr, &mstats_len,
1769 memset(&mstats, 0, sizeof(mstats));
1770 qed_memcpy_from(p_hwfn, p_ptt, &mstats, mstats_addr, mstats_len);
1772 p_stats->common.no_buff_discards +=
1773 HILO_64_REGPAIR(mstats.no_buff_discard);
1774 p_stats->common.packet_too_big_discard +=
1775 HILO_64_REGPAIR(mstats.packet_too_big_discard);
1776 p_stats->common.ttl0_discard += HILO_64_REGPAIR(mstats.ttl0_discard);
1777 p_stats->common.tpa_coalesced_pkts +=
1778 HILO_64_REGPAIR(mstats.tpa_coalesced_pkts);
1779 p_stats->common.tpa_coalesced_events +=
1780 HILO_64_REGPAIR(mstats.tpa_coalesced_events);
1781 p_stats->common.tpa_aborts_num +=
1782 HILO_64_REGPAIR(mstats.tpa_aborts_num);
1783 p_stats->common.tpa_coalesced_bytes +=
1784 HILO_64_REGPAIR(mstats.tpa_coalesced_bytes);
1787 static void __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn,
1788 struct qed_ptt *p_ptt,
1789 struct qed_eth_stats *p_stats)
1791 struct qed_eth_stats_common *p_common = &p_stats->common;
1792 struct port_stats port_stats;
1795 memset(&port_stats, 0, sizeof(port_stats));
1797 qed_memcpy_from(p_hwfn, p_ptt, &port_stats,
1798 p_hwfn->mcp_info->port_addr +
1799 offsetof(struct public_port, stats),
1800 sizeof(port_stats));
1802 p_common->rx_64_byte_packets += port_stats.eth.r64;
1803 p_common->rx_65_to_127_byte_packets += port_stats.eth.r127;
1804 p_common->rx_128_to_255_byte_packets += port_stats.eth.r255;
1805 p_common->rx_256_to_511_byte_packets += port_stats.eth.r511;
1806 p_common->rx_512_to_1023_byte_packets += port_stats.eth.r1023;
1807 p_common->rx_1024_to_1518_byte_packets += port_stats.eth.r1518;
1808 p_common->rx_crc_errors += port_stats.eth.rfcs;
1809 p_common->rx_mac_crtl_frames += port_stats.eth.rxcf;
1810 p_common->rx_pause_frames += port_stats.eth.rxpf;
1811 p_common->rx_pfc_frames += port_stats.eth.rxpp;
1812 p_common->rx_align_errors += port_stats.eth.raln;
1813 p_common->rx_carrier_errors += port_stats.eth.rfcr;
1814 p_common->rx_oversize_packets += port_stats.eth.rovr;
1815 p_common->rx_jabbers += port_stats.eth.rjbr;
1816 p_common->rx_undersize_packets += port_stats.eth.rund;
1817 p_common->rx_fragments += port_stats.eth.rfrg;
1818 p_common->tx_64_byte_packets += port_stats.eth.t64;
1819 p_common->tx_65_to_127_byte_packets += port_stats.eth.t127;
1820 p_common->tx_128_to_255_byte_packets += port_stats.eth.t255;
1821 p_common->tx_256_to_511_byte_packets += port_stats.eth.t511;
1822 p_common->tx_512_to_1023_byte_packets += port_stats.eth.t1023;
1823 p_common->tx_1024_to_1518_byte_packets += port_stats.eth.t1518;
1824 p_common->tx_pause_frames += port_stats.eth.txpf;
1825 p_common->tx_pfc_frames += port_stats.eth.txpp;
1826 p_common->rx_mac_bytes += port_stats.eth.rbyte;
1827 p_common->rx_mac_uc_packets += port_stats.eth.rxuca;
1828 p_common->rx_mac_mc_packets += port_stats.eth.rxmca;
1829 p_common->rx_mac_bc_packets += port_stats.eth.rxbca;
1830 p_common->rx_mac_frames_ok += port_stats.eth.rxpok;
1831 p_common->tx_mac_bytes += port_stats.eth.tbyte;
1832 p_common->tx_mac_uc_packets += port_stats.eth.txuca;
1833 p_common->tx_mac_mc_packets += port_stats.eth.txmca;
1834 p_common->tx_mac_bc_packets += port_stats.eth.txbca;
1835 p_common->tx_mac_ctrl_frames += port_stats.eth.txcf;
1836 for (j = 0; j < 8; j++) {
1837 p_common->brb_truncates += port_stats.brb.brb_truncate[j];
1838 p_common->brb_discards += port_stats.brb.brb_discard[j];
1841 if (QED_IS_BB(p_hwfn->cdev)) {
1842 struct qed_eth_stats_bb *p_bb = &p_stats->bb;
1844 p_bb->rx_1519_to_1522_byte_packets +=
1845 port_stats.eth.u0.bb0.r1522;
1846 p_bb->rx_1519_to_2047_byte_packets +=
1847 port_stats.eth.u0.bb0.r2047;
1848 p_bb->rx_2048_to_4095_byte_packets +=
1849 port_stats.eth.u0.bb0.r4095;
1850 p_bb->rx_4096_to_9216_byte_packets +=
1851 port_stats.eth.u0.bb0.r9216;
1852 p_bb->rx_9217_to_16383_byte_packets +=
1853 port_stats.eth.u0.bb0.r16383;
1854 p_bb->tx_1519_to_2047_byte_packets +=
1855 port_stats.eth.u1.bb1.t2047;
1856 p_bb->tx_2048_to_4095_byte_packets +=
1857 port_stats.eth.u1.bb1.t4095;
1858 p_bb->tx_4096_to_9216_byte_packets +=
1859 port_stats.eth.u1.bb1.t9216;
1860 p_bb->tx_9217_to_16383_byte_packets +=
1861 port_stats.eth.u1.bb1.t16383;
1862 p_bb->tx_lpi_entry_count += port_stats.eth.u2.bb2.tlpiec;
1863 p_bb->tx_total_collisions += port_stats.eth.u2.bb2.tncl;
1865 struct qed_eth_stats_ah *p_ah = &p_stats->ah;
1867 p_ah->rx_1519_to_max_byte_packets +=
1868 port_stats.eth.u0.ah0.r1519_to_max;
1869 p_ah->tx_1519_to_max_byte_packets =
1870 port_stats.eth.u1.ah1.t1519_to_max;
1874 static void __qed_get_vport_stats(struct qed_hwfn *p_hwfn,
1875 struct qed_ptt *p_ptt,
1876 struct qed_eth_stats *stats,
1877 u16 statistics_bin, bool b_get_port_stats)
1879 __qed_get_vport_mstats(p_hwfn, p_ptt, stats, statistics_bin);
1880 __qed_get_vport_ustats(p_hwfn, p_ptt, stats, statistics_bin);
1881 __qed_get_vport_tstats(p_hwfn, p_ptt, stats, statistics_bin);
1882 __qed_get_vport_pstats(p_hwfn, p_ptt, stats, statistics_bin);
1884 if (b_get_port_stats && p_hwfn->mcp_info)
1885 __qed_get_vport_port_stats(p_hwfn, p_ptt, stats);
1888 static void _qed_get_vport_stats(struct qed_dev *cdev,
1889 struct qed_eth_stats *stats)
1894 memset(stats, 0, sizeof(*stats));
1896 for_each_hwfn(cdev, i) {
1897 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1898 struct qed_ptt *p_ptt = IS_PF(cdev) ? qed_ptt_acquire(p_hwfn)
1902 /* The main vport index is relative first */
1903 if (qed_fw_vport(p_hwfn, 0, &fw_vport)) {
1904 DP_ERR(p_hwfn, "No vport available!\n");
1909 if (IS_PF(cdev) && !p_ptt) {
1910 DP_ERR(p_hwfn, "Failed to acquire ptt\n");
1914 __qed_get_vport_stats(p_hwfn, p_ptt, stats, fw_vport,
1915 IS_PF(cdev) ? true : false);
1918 if (IS_PF(cdev) && p_ptt)
1919 qed_ptt_release(p_hwfn, p_ptt);
1923 void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats)
1928 memset(stats, 0, sizeof(*stats));
1932 _qed_get_vport_stats(cdev, stats);
1934 if (!cdev->reset_stats)
1937 /* Reduce the statistics baseline */
1938 for (i = 0; i < sizeof(struct qed_eth_stats) / sizeof(u64); i++)
1939 ((u64 *)stats)[i] -= ((u64 *)cdev->reset_stats)[i];
1942 /* zeroes V-PORT specific portion of stats (Port stats remains untouched) */
1943 void qed_reset_vport_stats(struct qed_dev *cdev)
1947 for_each_hwfn(cdev, i) {
1948 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1949 struct eth_mstorm_per_queue_stat mstats;
1950 struct eth_ustorm_per_queue_stat ustats;
1951 struct eth_pstorm_per_queue_stat pstats;
1952 struct qed_ptt *p_ptt = IS_PF(cdev) ? qed_ptt_acquire(p_hwfn)
1954 u32 addr = 0, len = 0;
1956 if (IS_PF(cdev) && !p_ptt) {
1957 DP_ERR(p_hwfn, "Failed to acquire ptt\n");
1961 memset(&mstats, 0, sizeof(mstats));
1962 __qed_get_vport_mstats_addrlen(p_hwfn, &addr, &len, 0);
1963 qed_memcpy_to(p_hwfn, p_ptt, addr, &mstats, len);
1965 memset(&ustats, 0, sizeof(ustats));
1966 __qed_get_vport_ustats_addrlen(p_hwfn, &addr, &len, 0);
1967 qed_memcpy_to(p_hwfn, p_ptt, addr, &ustats, len);
1969 memset(&pstats, 0, sizeof(pstats));
1970 __qed_get_vport_pstats_addrlen(p_hwfn, &addr, &len, 0);
1971 qed_memcpy_to(p_hwfn, p_ptt, addr, &pstats, len);
1974 qed_ptt_release(p_hwfn, p_ptt);
1977 /* PORT statistics are not necessarily reset, so we need to
1978 * read and create a baseline for future statistics.
1980 if (!cdev->reset_stats)
1981 DP_INFO(cdev, "Reset stats not allocated\n");
1983 _qed_get_vport_stats(cdev, cdev->reset_stats);
1987 qed_arfs_mode_configure(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
1988 struct qed_arfs_config_params *p_cfg_params)
1990 if (p_cfg_params->arfs_enable) {
1991 qed_set_rfs_mode_enable(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
1992 p_cfg_params->tcp, p_cfg_params->udp,
1993 p_cfg_params->ipv4, p_cfg_params->ipv6);
1994 DP_VERBOSE(p_hwfn, QED_MSG_SP,
1995 "tcp = %s, udp = %s, ipv4 = %s, ipv6 =%s\n",
1996 p_cfg_params->tcp ? "Enable" : "Disable",
1997 p_cfg_params->udp ? "Enable" : "Disable",
1998 p_cfg_params->ipv4 ? "Enable" : "Disable",
1999 p_cfg_params->ipv6 ? "Enable" : "Disable");
2001 qed_set_rfs_mode_disable(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
2004 DP_VERBOSE(p_hwfn, QED_MSG_SP, "Configured ARFS mode : %s\n",
2005 p_cfg_params->arfs_enable ? "Enable" : "Disable");
2009 qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
2010 struct qed_spq_comp_cb *p_cb,
2011 dma_addr_t p_addr, u16 length, u16 qid,
2012 u8 vport_id, bool b_is_add)
2014 struct rx_update_gft_filter_data *p_ramrod = NULL;
2015 struct qed_spq_entry *p_ent = NULL;
2016 struct qed_sp_init_data init_data;
2017 u16 abs_rx_q_id = 0;
2018 u8 abs_vport_id = 0;
2021 rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id);
2025 rc = qed_fw_l2_queue(p_hwfn, qid, &abs_rx_q_id);
2030 memset(&init_data, 0, sizeof(init_data));
2031 init_data.cid = qed_spq_get_cid(p_hwfn);
2033 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
2036 init_data.comp_mode = QED_SPQ_MODE_CB;
2037 init_data.p_comp_data = p_cb;
2039 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
2042 rc = qed_sp_init_request(p_hwfn, &p_ent,
2043 ETH_RAMROD_GFT_UPDATE_FILTER,
2044 PROTOCOLID_ETH, &init_data);
2048 p_ramrod = &p_ent->ramrod.rx_update_gft;
2049 DMA_REGPAIR_LE(p_ramrod->pkt_hdr_addr, p_addr);
2050 p_ramrod->pkt_hdr_length = cpu_to_le16(length);
2051 p_ramrod->rx_qid_or_action_icid = cpu_to_le16(abs_rx_q_id);
2052 p_ramrod->vport_id = abs_vport_id;
2053 p_ramrod->filter_type = RFS_FILTER_TYPE;
2054 p_ramrod->filter_action = b_is_add ? GFT_ADD_FILTER : GFT_DELETE_FILTER;
2056 DP_VERBOSE(p_hwfn, QED_MSG_SP,
2057 "V[%0x], Q[%04x] - %s filter from 0x%llx [length %04xb]\n",
2058 abs_vport_id, abs_rx_q_id,
2059 b_is_add ? "Adding" : "Removing", (u64)p_addr, length);
2061 return qed_spq_post(p_hwfn, p_ent, NULL);
2064 static int qed_fill_eth_dev_info(struct qed_dev *cdev,
2065 struct qed_dev_eth_info *info)
2069 memset(info, 0, sizeof(*info));
2074 int max_vf_vlan_filters = 0;
2075 int max_vf_mac_filters = 0;
2077 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
2080 /* Since the feature controls only queue-zones,
2081 * make sure we have the contexts [rx, tx, xdp] to
2084 for_each_hwfn(cdev, i) {
2085 struct qed_hwfn *hwfn = &cdev->hwfns[i];
2086 u16 l2_queues = (u16)FEAT_NUM(hwfn,
2090 cids = hwfn->pf_params.eth_pf_params.num_cons;
2091 num_queues += min_t(u16, l2_queues, cids / 3);
2094 /* queues might theoretically be >256, but interrupts'
2095 * upper-limit guarantes that it would fit in a u8.
2097 if (cdev->int_params.fp_msix_cnt) {
2098 u8 irqs = cdev->int_params.fp_msix_cnt;
2100 info->num_queues = (u8)min_t(u16,
2104 info->num_queues = cdev->num_hwfns;
2107 if (IS_QED_SRIOV(cdev)) {
2108 max_vf_vlan_filters = cdev->p_iov_info->total_vfs *
2109 QED_ETH_VF_NUM_VLAN_FILTERS;
2110 max_vf_mac_filters = cdev->p_iov_info->total_vfs *
2111 QED_ETH_VF_NUM_MAC_FILTERS;
2113 info->num_vlan_filters = RESC_NUM(QED_LEADING_HWFN(cdev),
2115 max_vf_vlan_filters;
2116 info->num_mac_filters = RESC_NUM(QED_LEADING_HWFN(cdev),
2120 ether_addr_copy(info->port_mac,
2121 cdev->hwfns[0].hw_info.hw_mac_addr);
2123 info->xdp_supported = true;
2127 /* Determine queues & XDP support */
2128 for_each_hwfn(cdev, i) {
2129 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
2132 qed_vf_get_num_cids(p_hwfn, &cids);
2133 qed_vf_get_num_rxqs(p_hwfn, &queues);
2134 info->num_queues += queues;
2138 /* Enable VF XDP in case PF guarntees sufficient connections */
2139 if (total_cids >= info->num_queues * 3)
2140 info->xdp_supported = true;
2142 qed_vf_get_num_vlan_filters(&cdev->hwfns[0],
2143 (u8 *)&info->num_vlan_filters);
2144 qed_vf_get_num_mac_filters(&cdev->hwfns[0],
2145 (u8 *)&info->num_mac_filters);
2146 qed_vf_get_port_mac(&cdev->hwfns[0], info->port_mac);
2148 info->is_legacy = !!cdev->hwfns[0].vf_iov_info->b_pre_fp_hsi;
2151 qed_fill_dev_info(cdev, &info->common);
2154 eth_zero_addr(info->common.hw_mac);
2159 static void qed_register_eth_ops(struct qed_dev *cdev,
2160 struct qed_eth_cb_ops *ops, void *cookie)
2162 cdev->protocol_ops.eth = ops;
2163 cdev->ops_cookie = cookie;
2165 /* For VF, we start bulletin reading */
2167 qed_vf_start_iov_wq(cdev);
2170 static bool qed_check_mac(struct qed_dev *cdev, u8 *mac)
2175 return qed_vf_check_mac(&cdev->hwfns[0], mac);
2178 static int qed_start_vport(struct qed_dev *cdev,
2179 struct qed_start_vport_params *params)
2183 for_each_hwfn(cdev, i) {
2184 struct qed_sp_vport_start_params start = { 0 };
2185 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
2187 start.tpa_mode = params->gro_enable ? QED_TPA_MODE_GRO :
2189 start.remove_inner_vlan = params->remove_inner_vlan;
2190 start.only_untagged = true; /* untagged only */
2191 start.drop_ttl0 = params->drop_ttl0;
2192 start.opaque_fid = p_hwfn->hw_info.opaque_fid;
2193 start.concrete_fid = p_hwfn->hw_info.concrete_fid;
2194 start.handle_ptp_pkts = params->handle_ptp_pkts;
2195 start.vport_id = params->vport_id;
2196 start.max_buffers_per_cqe = 16;
2197 start.mtu = params->mtu;
2199 rc = qed_sp_vport_start(p_hwfn, &start);
2201 DP_ERR(cdev, "Failed to start VPORT\n");
2205 rc = qed_hw_start_fastpath(p_hwfn);
2207 DP_ERR(cdev, "Failed to start VPORT fastpath\n");
2211 DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
2212 "Started V-PORT %d with MTU %d\n",
2213 start.vport_id, start.mtu);
2216 if (params->clear_stats)
2217 qed_reset_vport_stats(cdev);
2222 static int qed_stop_vport(struct qed_dev *cdev, u8 vport_id)
2226 for_each_hwfn(cdev, i) {
2227 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
2229 rc = qed_sp_vport_stop(p_hwfn,
2230 p_hwfn->hw_info.opaque_fid, vport_id);
2233 DP_ERR(cdev, "Failed to stop VPORT\n");
2240 static int qed_update_vport_rss(struct qed_dev *cdev,
2241 struct qed_update_vport_rss_params *input,
2242 struct qed_rss_params *rss)
2246 /* Update configuration with what's correct regardless of CMT */
2247 rss->update_rss_config = 1;
2248 rss->rss_enable = 1;
2249 rss->update_rss_capabilities = 1;
2250 rss->update_rss_ind_table = 1;
2251 rss->update_rss_key = 1;
2252 rss->rss_caps = input->rss_caps;
2253 memcpy(rss->rss_key, input->rss_key, QED_RSS_KEY_SIZE * sizeof(u32));
2255 /* In regular scenario, we'd simply need to take input handlers.
2256 * But in CMT, we'd have to split the handlers according to the
2257 * engine they were configured on. We'd then have to understand
2258 * whether RSS is really required, since 2-queues on CMT doesn't
2261 if (cdev->num_hwfns == 1) {
2262 memcpy(rss->rss_ind_table,
2263 input->rss_ind_table,
2264 QED_RSS_IND_TABLE_SIZE * sizeof(void *));
2265 rss->rss_table_size_log = 7;
2269 /* Start by copying the non-spcific information to the 2nd copy */
2270 memcpy(&rss[1], &rss[0], sizeof(struct qed_rss_params));
2272 /* CMT should be round-robin */
2273 for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
2274 struct qed_queue_cid *cid = input->rss_ind_table[i];
2275 struct qed_rss_params *t_rss;
2277 if (cid->p_owner == QED_LEADING_HWFN(cdev))
2282 t_rss->rss_ind_table[i / cdev->num_hwfns] = cid;
2285 /* Make sure RSS is actually required */
2286 for_each_hwfn(cdev, fn) {
2287 for (i = 1; i < QED_RSS_IND_TABLE_SIZE / cdev->num_hwfns; i++) {
2288 if (rss[fn].rss_ind_table[i] !=
2289 rss[fn].rss_ind_table[0])
2292 if (i == QED_RSS_IND_TABLE_SIZE / cdev->num_hwfns) {
2293 DP_VERBOSE(cdev, NETIF_MSG_IFUP,
2294 "CMT - 1 queue per-hwfn; Disabling RSS\n");
2297 rss[fn].rss_table_size_log = 6;
2303 static int qed_update_vport(struct qed_dev *cdev,
2304 struct qed_update_vport_params *params)
2306 struct qed_sp_vport_update_params sp_params;
2307 struct qed_rss_params *rss;
2313 rss = vzalloc(sizeof(*rss) * cdev->num_hwfns);
2317 memset(&sp_params, 0, sizeof(sp_params));
2319 /* Translate protocol params into sp params */
2320 sp_params.vport_id = params->vport_id;
2321 sp_params.update_vport_active_rx_flg = params->update_vport_active_flg;
2322 sp_params.update_vport_active_tx_flg = params->update_vport_active_flg;
2323 sp_params.vport_active_rx_flg = params->vport_active_flg;
2324 sp_params.vport_active_tx_flg = params->vport_active_flg;
2325 sp_params.update_tx_switching_flg = params->update_tx_switching_flg;
2326 sp_params.tx_switching_flg = params->tx_switching_flg;
2327 sp_params.accept_any_vlan = params->accept_any_vlan;
2328 sp_params.update_accept_any_vlan_flg =
2329 params->update_accept_any_vlan_flg;
2331 /* Prepare the RSS configuration */
2332 if (params->update_rss_flg)
2333 if (qed_update_vport_rss(cdev, ¶ms->rss_params, rss))
2334 params->update_rss_flg = 0;
2336 for_each_hwfn(cdev, i) {
2337 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
2339 if (params->update_rss_flg)
2340 sp_params.rss_params = &rss[i];
2342 sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
2343 rc = qed_sp_vport_update(p_hwfn, &sp_params,
2344 QED_SPQ_MODE_EBLOCK,
2347 DP_ERR(cdev, "Failed to update VPORT\n");
2351 DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
2352 "Updated V-PORT %d: active_flag %d [update %d]\n",
2353 params->vport_id, params->vport_active_flg,
2354 params->update_vport_active_flg);
2362 static int qed_start_rxq(struct qed_dev *cdev,
2364 struct qed_queue_start_common_params *p_params,
2366 dma_addr_t bd_chain_phys_addr,
2367 dma_addr_t cqe_pbl_addr,
2369 struct qed_rxq_start_ret_params *ret_params)
2371 struct qed_hwfn *p_hwfn;
2374 hwfn_index = rss_num % cdev->num_hwfns;
2375 p_hwfn = &cdev->hwfns[hwfn_index];
2377 p_params->queue_id = p_params->queue_id / cdev->num_hwfns;
2378 p_params->stats_id = p_params->vport_id;
2380 rc = qed_eth_rx_queue_start(p_hwfn,
2381 p_hwfn->hw_info.opaque_fid,
2385 cqe_pbl_addr, cqe_pbl_size, ret_params);
2387 DP_ERR(cdev, "Failed to start RXQ#%d\n", p_params->queue_id);
2391 DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
2392 "Started RX-Q %d [rss_num %d] on V-PORT %d and SB igu %d\n",
2393 p_params->queue_id, rss_num, p_params->vport_id,
2394 p_params->p_sb->igu_sb_id);
2399 static int qed_stop_rxq(struct qed_dev *cdev, u8 rss_id, void *handle)
2402 struct qed_hwfn *p_hwfn;
2404 hwfn_index = rss_id % cdev->num_hwfns;
2405 p_hwfn = &cdev->hwfns[hwfn_index];
2407 rc = qed_eth_rx_queue_stop(p_hwfn, handle, false, false);
2409 DP_ERR(cdev, "Failed to stop RXQ#%02x\n", rss_id);
2416 static int qed_start_txq(struct qed_dev *cdev,
2418 struct qed_queue_start_common_params *p_params,
2419 dma_addr_t pbl_addr,
2421 struct qed_txq_start_ret_params *ret_params)
2423 struct qed_hwfn *p_hwfn;
2426 hwfn_index = rss_num % cdev->num_hwfns;
2427 p_hwfn = &cdev->hwfns[hwfn_index];
2428 p_params->queue_id = p_params->queue_id / cdev->num_hwfns;
2429 p_params->stats_id = p_params->vport_id;
2431 rc = qed_eth_tx_queue_start(p_hwfn,
2432 p_hwfn->hw_info.opaque_fid,
2434 pbl_addr, pbl_size, ret_params);
2437 DP_ERR(cdev, "Failed to start TXQ#%d\n", p_params->queue_id);
2441 DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
2442 "Started TX-Q %d [rss_num %d] on V-PORT %d and SB igu %d\n",
2443 p_params->queue_id, rss_num, p_params->vport_id,
2444 p_params->p_sb->igu_sb_id);
2449 #define QED_HW_STOP_RETRY_LIMIT (10)
2450 static int qed_fastpath_stop(struct qed_dev *cdev)
2454 rc = qed_hw_stop_fastpath(cdev);
2456 DP_ERR(cdev, "Failed to stop Fastpath\n");
2463 static int qed_stop_txq(struct qed_dev *cdev, u8 rss_id, void *handle)
2465 struct qed_hwfn *p_hwfn;
2468 hwfn_index = rss_id % cdev->num_hwfns;
2469 p_hwfn = &cdev->hwfns[hwfn_index];
2471 rc = qed_eth_tx_queue_stop(p_hwfn, handle);
2473 DP_ERR(cdev, "Failed to stop TXQ#%02x\n", rss_id);
2480 static int qed_tunn_configure(struct qed_dev *cdev,
2481 struct qed_tunn_params *tunn_params)
2483 struct qed_tunnel_info tunn_info;
2486 memset(&tunn_info, 0, sizeof(tunn_info));
2487 if (tunn_params->update_vxlan_port) {
2488 tunn_info.vxlan_port.b_update_port = true;
2489 tunn_info.vxlan_port.port = tunn_params->vxlan_port;
2492 if (tunn_params->update_geneve_port) {
2493 tunn_info.geneve_port.b_update_port = true;
2494 tunn_info.geneve_port.port = tunn_params->geneve_port;
2497 for_each_hwfn(cdev, i) {
2498 struct qed_hwfn *hwfn = &cdev->hwfns[i];
2499 struct qed_ptt *p_ptt;
2500 struct qed_tunnel_info *tun;
2502 tun = &hwfn->cdev->tunnel;
2504 p_ptt = qed_ptt_acquire(hwfn);
2511 rc = qed_sp_pf_update_tunn_cfg(hwfn, p_ptt, &tunn_info,
2512 QED_SPQ_MODE_EBLOCK, NULL);
2515 qed_ptt_release(hwfn, p_ptt);
2519 if (IS_PF_SRIOV(hwfn)) {
2520 u16 vxlan_port, geneve_port;
2523 vxlan_port = tun->vxlan_port.port;
2524 geneve_port = tun->geneve_port.port;
2526 qed_for_each_vf(hwfn, j) {
2527 qed_iov_bulletin_set_udp_ports(hwfn, j,
2532 qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
2535 qed_ptt_release(hwfn, p_ptt);
2541 static int qed_configure_filter_rx_mode(struct qed_dev *cdev,
2542 enum qed_filter_rx_mode_type type)
2544 struct qed_filter_accept_flags accept_flags;
2546 memset(&accept_flags, 0, sizeof(accept_flags));
2548 accept_flags.update_rx_mode_config = 1;
2549 accept_flags.update_tx_mode_config = 1;
2550 accept_flags.rx_accept_filter = QED_ACCEPT_UCAST_MATCHED |
2551 QED_ACCEPT_MCAST_MATCHED |
2553 accept_flags.tx_accept_filter = QED_ACCEPT_UCAST_MATCHED |
2554 QED_ACCEPT_MCAST_MATCHED |
2557 if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) {
2558 accept_flags.rx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED |
2559 QED_ACCEPT_MCAST_UNMATCHED;
2560 accept_flags.tx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
2561 } else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) {
2562 accept_flags.rx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
2563 accept_flags.tx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
2566 return qed_filter_accept_cmd(cdev, 0, accept_flags, false, false,
2567 QED_SPQ_MODE_CB, NULL);
2570 static int qed_configure_filter_ucast(struct qed_dev *cdev,
2571 struct qed_filter_ucast_params *params)
2573 struct qed_filter_ucast ucast;
2575 if (!params->vlan_valid && !params->mac_valid) {
2577 "Tried configuring a unicast filter, but both MAC and VLAN are not set\n");
2581 memset(&ucast, 0, sizeof(ucast));
2582 switch (params->type) {
2583 case QED_FILTER_XCAST_TYPE_ADD:
2584 ucast.opcode = QED_FILTER_ADD;
2586 case QED_FILTER_XCAST_TYPE_DEL:
2587 ucast.opcode = QED_FILTER_REMOVE;
2589 case QED_FILTER_XCAST_TYPE_REPLACE:
2590 ucast.opcode = QED_FILTER_REPLACE;
2593 DP_NOTICE(cdev, "Unknown unicast filter type %d\n",
2597 if (params->vlan_valid && params->mac_valid) {
2598 ucast.type = QED_FILTER_MAC_VLAN;
2599 ether_addr_copy(ucast.mac, params->mac);
2600 ucast.vlan = params->vlan;
2601 } else if (params->mac_valid) {
2602 ucast.type = QED_FILTER_MAC;
2603 ether_addr_copy(ucast.mac, params->mac);
2605 ucast.type = QED_FILTER_VLAN;
2606 ucast.vlan = params->vlan;
2609 ucast.is_rx_filter = true;
2610 ucast.is_tx_filter = true;
2612 return qed_filter_ucast_cmd(cdev, &ucast, QED_SPQ_MODE_CB, NULL);
2615 static int qed_configure_filter_mcast(struct qed_dev *cdev,
2616 struct qed_filter_mcast_params *params)
2618 struct qed_filter_mcast mcast;
2621 memset(&mcast, 0, sizeof(mcast));
2622 switch (params->type) {
2623 case QED_FILTER_XCAST_TYPE_ADD:
2624 mcast.opcode = QED_FILTER_ADD;
2626 case QED_FILTER_XCAST_TYPE_DEL:
2627 mcast.opcode = QED_FILTER_REMOVE;
2630 DP_NOTICE(cdev, "Unknown multicast filter type %d\n",
2634 mcast.num_mc_addrs = params->num;
2635 for (i = 0; i < mcast.num_mc_addrs; i++)
2636 ether_addr_copy(mcast.mac[i], params->mac[i]);
2638 return qed_filter_mcast_cmd(cdev, &mcast, QED_SPQ_MODE_CB, NULL);
2641 static int qed_configure_filter(struct qed_dev *cdev,
2642 struct qed_filter_params *params)
2644 enum qed_filter_rx_mode_type accept_flags;
2646 switch (params->type) {
2647 case QED_FILTER_TYPE_UCAST:
2648 return qed_configure_filter_ucast(cdev, ¶ms->filter.ucast);
2649 case QED_FILTER_TYPE_MCAST:
2650 return qed_configure_filter_mcast(cdev, ¶ms->filter.mcast);
2651 case QED_FILTER_TYPE_RX_MODE:
2652 accept_flags = params->filter.accept_flags;
2653 return qed_configure_filter_rx_mode(cdev, accept_flags);
2655 DP_NOTICE(cdev, "Unknown filter type %d\n", (int)params->type);
2660 static int qed_configure_arfs_searcher(struct qed_dev *cdev, bool en_searcher)
2662 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
2663 struct qed_arfs_config_params arfs_config_params;
2665 memset(&arfs_config_params, 0, sizeof(arfs_config_params));
2666 arfs_config_params.tcp = true;
2667 arfs_config_params.udp = true;
2668 arfs_config_params.ipv4 = true;
2669 arfs_config_params.ipv6 = true;
2670 arfs_config_params.arfs_enable = en_searcher;
2672 qed_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
2673 &arfs_config_params);
2678 qed_arfs_sp_response_handler(struct qed_hwfn *p_hwfn,
2679 void *cookie, union event_ring_data *data,
2682 struct qed_common_cb_ops *op = p_hwfn->cdev->protocol_ops.common;
2683 void *dev = p_hwfn->cdev->ops_cookie;
2685 op->arfs_filter_op(dev, cookie, fw_return_code);
2688 static int qed_ntuple_arfs_filter_config(struct qed_dev *cdev, void *cookie,
2689 dma_addr_t mapping, u16 length,
2690 u16 vport_id, u16 rx_queue_id,
2693 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
2694 struct qed_spq_comp_cb cb;
2697 cb.function = qed_arfs_sp_response_handler;
2700 rc = qed_configure_rfs_ntuple_filter(p_hwfn, p_hwfn->p_arfs_ptt,
2701 &cb, mapping, length, rx_queue_id,
2702 vport_id, add_filter);
2705 "Failed to issue a-RFS filter configuration\n");
2707 DP_VERBOSE(p_hwfn, NETIF_MSG_DRV,
2708 "Successfully issued a-RFS filter configuration\n");
2713 static int qed_fp_cqe_completion(struct qed_dev *dev,
2714 u8 rss_id, struct eth_slow_path_rx_cqe *cqe)
2716 return qed_eth_cqe_completion(&dev->hwfns[rss_id % dev->num_hwfns],
2720 #ifdef CONFIG_QED_SRIOV
2721 extern const struct qed_iov_hv_ops qed_iov_ops_pass;
2725 extern const struct qed_eth_dcbnl_ops qed_dcbnl_ops_pass;
2728 extern const struct qed_eth_ptp_ops qed_ptp_ops_pass;
2730 static const struct qed_eth_ops qed_eth_ops_pass = {
2731 .common = &qed_common_ops_pass,
2732 #ifdef CONFIG_QED_SRIOV
2733 .iov = &qed_iov_ops_pass,
2736 .dcb = &qed_dcbnl_ops_pass,
2738 .ptp = &qed_ptp_ops_pass,
2739 .fill_dev_info = &qed_fill_eth_dev_info,
2740 .register_ops = &qed_register_eth_ops,
2741 .check_mac = &qed_check_mac,
2742 .vport_start = &qed_start_vport,
2743 .vport_stop = &qed_stop_vport,
2744 .vport_update = &qed_update_vport,
2745 .q_rx_start = &qed_start_rxq,
2746 .q_rx_stop = &qed_stop_rxq,
2747 .q_tx_start = &qed_start_txq,
2748 .q_tx_stop = &qed_stop_txq,
2749 .filter_config = &qed_configure_filter,
2750 .fastpath_stop = &qed_fastpath_stop,
2751 .eth_cqe_completion = &qed_fp_cqe_completion,
2752 .get_vport_stats = &qed_get_vport_stats,
2753 .tunn_config = &qed_tunn_configure,
2754 .ntuple_filter_config = &qed_ntuple_arfs_filter_config,
2755 .configure_arfs_searcher = &qed_configure_arfs_searcher,
2758 const struct qed_eth_ops *qed_get_eth_ops(void)
2760 return &qed_eth_ops_pass;
2762 EXPORT_SYMBOL(qed_get_eth_ops);
2764 void qed_put_eth_ops(void)
2766 /* TODO - reference count for module? */
2768 EXPORT_SYMBOL(qed_put_eth_ops);