1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Admin Function driver
4 * Copyright (C) 2018 Marvell International Ltd.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #include <linux/module.h>
12 #include <linux/pci.h>
14 #include "rvu_struct.h"
20 static int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add);
46 enum nix_makr_fmt_indexes {
47 NIX_MARK_CFG_IP_DSCP_RED,
48 NIX_MARK_CFG_IP_DSCP_YELLOW,
49 NIX_MARK_CFG_IP_DSCP_YELLOW_RED,
50 NIX_MARK_CFG_IP_ECN_RED,
51 NIX_MARK_CFG_IP_ECN_YELLOW,
52 NIX_MARK_CFG_IP_ECN_YELLOW_RED,
53 NIX_MARK_CFG_VLAN_DEI_RED,
54 NIX_MARK_CFG_VLAN_DEI_YELLOW,
55 NIX_MARK_CFG_VLAN_DEI_YELLOW_RED,
59 /* For now considering MC resources needed for broadcast
60 * pkt replication only. i.e 256 HWVFs + 12 PFs.
62 #define MC_TBL_SIZE MC_TBL_SZ_512
63 #define MC_BUF_CNT MC_BUF_CNT_128
66 struct hlist_node node;
71 bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc)
73 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
76 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
77 if (!pfvf->nixlf || blkaddr < 0)
82 int rvu_get_nixlf_count(struct rvu *rvu)
84 struct rvu_block *block;
87 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
90 block = &rvu->hw->block[blkaddr];
94 static void nix_mce_list_init(struct nix_mce_list *list, int max)
96 INIT_HLIST_HEAD(&list->head);
101 static u16 nix_alloc_mce_list(struct nix_mcast *mcast, int count)
108 idx = mcast->next_free_mce;
109 mcast->next_free_mce += count;
113 static inline struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr)
115 if (blkaddr == BLKADDR_NIX0 && hw->nix0)
121 static void nix_rx_sync(struct rvu *rvu, int blkaddr)
125 /*Sync all in flight RX packets to LLC/DRAM */
126 rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0));
127 err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true);
129 dev_err(rvu->dev, "NIX RX software sync failed\n");
131 /* As per a HW errata in 9xxx A0 silicon, HW may clear SW_SYNC[ENA]
132 * bit too early. Hence wait for 50us more.
134 if (is_rvu_9xxx_A0(rvu))
135 usleep_range(50, 60);
138 static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
139 int lvl, u16 pcifunc, u16 schq)
141 struct nix_txsch *txsch;
142 struct nix_hw *nix_hw;
145 nix_hw = get_nix_hw(rvu->hw, blkaddr);
149 txsch = &nix_hw->txsch[lvl];
150 /* Check out of bounds */
151 if (schq >= txsch->schq.max)
154 mutex_lock(&rvu->rsrc_lock);
155 map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]);
156 mutex_unlock(&rvu->rsrc_lock);
158 /* For TL1 schq, sharing across VF's of same PF is ok */
159 if (lvl == NIX_TXSCH_LVL_TL1 &&
160 rvu_get_pf(map_func) != rvu_get_pf(pcifunc))
163 if (lvl != NIX_TXSCH_LVL_TL1 &&
170 static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
172 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
177 pf = rvu_get_pf(pcifunc);
178 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
182 case NIX_INTF_TYPE_CGX:
183 pfvf->cgx_lmac = rvu->pf2cgxlmac_map[pf];
184 rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
186 pkind = rvu_npc_get_pkind(rvu, pf);
189 "PF_Func 0x%x: Invalid pkind\n", pcifunc);
192 pfvf->rx_chan_base = NIX_CHAN_CGX_LMAC_CHX(cgx_id, lmac_id, 0);
193 pfvf->tx_chan_base = pfvf->rx_chan_base;
194 pfvf->rx_chan_cnt = 1;
195 pfvf->tx_chan_cnt = 1;
196 cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind);
197 rvu_npc_set_pkind(rvu, pkind, pfvf);
199 case NIX_INTF_TYPE_LBK:
200 vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
201 pfvf->rx_chan_base = NIX_CHAN_LBK_CHX(0, vf);
202 pfvf->tx_chan_base = vf & 0x1 ? NIX_CHAN_LBK_CHX(0, vf - 1) :
203 NIX_CHAN_LBK_CHX(0, vf + 1);
204 pfvf->rx_chan_cnt = 1;
205 pfvf->tx_chan_cnt = 1;
206 rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
207 pfvf->rx_chan_base, false);
211 /* Add a UCAST forwarding rule in MCAM with this NIXLF attached
212 * RVU PF/VF's MAC address.
214 rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
215 pfvf->rx_chan_base, pfvf->mac_addr);
217 /* Add this PF_FUNC to bcast pkt replication list */
218 err = nix_update_bcast_mce_list(rvu, pcifunc, true);
221 "Bcast list, failed to enable PF_FUNC 0x%x\n",
226 rvu_npc_install_bcast_match_entry(rvu, pcifunc,
227 nixlf, pfvf->rx_chan_base);
228 pfvf->maxlen = NIC_HW_MIN_FRS;
229 pfvf->minlen = NIC_HW_MIN_FRS;
234 static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf)
236 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
241 pfvf->rxvlan = false;
243 /* Remove this PF_FUNC from bcast pkt replication list */
244 err = nix_update_bcast_mce_list(rvu, pcifunc, false);
247 "Bcast list, failed to disable PF_FUNC 0x%x\n",
251 /* Free and disable any MCAM entries used by this NIX LF */
252 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
255 static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr,
256 u64 format, bool v4, u64 *fidx)
258 struct nix_lso_format field = {0};
260 /* IP's Length field */
261 field.layer = NIX_TXLAYER_OL3;
262 /* In ipv4, length field is at offset 2 bytes, for ipv6 it's 4 */
263 field.offset = v4 ? 2 : 4;
264 field.sizem1 = 1; /* i.e 2 bytes */
265 field.alg = NIX_LSOALG_ADD_PAYLEN;
266 rvu_write64(rvu, blkaddr,
267 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
270 /* No ID field in IPv6 header */
275 field.layer = NIX_TXLAYER_OL3;
277 field.sizem1 = 1; /* i.e 2 bytes */
278 field.alg = NIX_LSOALG_ADD_SEGNUM;
279 rvu_write64(rvu, blkaddr,
280 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
284 static void nix_setup_lso_tso_l4(struct rvu *rvu, int blkaddr,
285 u64 format, u64 *fidx)
287 struct nix_lso_format field = {0};
289 /* TCP's sequence number field */
290 field.layer = NIX_TXLAYER_OL4;
292 field.sizem1 = 3; /* i.e 4 bytes */
293 field.alg = NIX_LSOALG_ADD_OFFSET;
294 rvu_write64(rvu, blkaddr,
295 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
298 /* TCP's flags field */
299 field.layer = NIX_TXLAYER_OL4;
301 field.sizem1 = 0; /* not needed */
302 field.alg = NIX_LSOALG_TCP_FLAGS;
303 rvu_write64(rvu, blkaddr,
304 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
308 static void nix_setup_lso(struct rvu *rvu, int blkaddr)
310 u64 cfg, idx, fidx = 0;
313 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LSO_CFG);
314 /* For TSO, set first and middle segment flags to
315 * mask out PSH, RST & FIN flags in TCP packet
317 cfg &= ~((0xFFFFULL << 32) | (0xFFFFULL << 16));
318 cfg |= (0xFFF2ULL << 32) | (0xFFF2ULL << 16);
319 rvu_write64(rvu, blkaddr, NIX_AF_LSO_CFG, cfg | BIT_ULL(63));
321 /* Configure format fields for TCPv4 segmentation offload */
322 idx = NIX_LSO_FORMAT_IDX_TSOV4;
323 nix_setup_lso_tso_l3(rvu, blkaddr, idx, true, &fidx);
324 nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
326 /* Set rest of the fields to NOP */
327 for (; fidx < 8; fidx++) {
328 rvu_write64(rvu, blkaddr,
329 NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
332 /* Configure format fields for TCPv6 segmentation offload */
333 idx = NIX_LSO_FORMAT_IDX_TSOV6;
335 nix_setup_lso_tso_l3(rvu, blkaddr, idx, false, &fidx);
336 nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
338 /* Set rest of the fields to NOP */
339 for (; fidx < 8; fidx++) {
340 rvu_write64(rvu, blkaddr,
341 NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
345 static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf)
347 kfree(pfvf->rq_bmap);
348 kfree(pfvf->sq_bmap);
349 kfree(pfvf->cq_bmap);
351 qmem_free(rvu->dev, pfvf->rq_ctx);
353 qmem_free(rvu->dev, pfvf->sq_ctx);
355 qmem_free(rvu->dev, pfvf->cq_ctx);
357 qmem_free(rvu->dev, pfvf->rss_ctx);
358 if (pfvf->nix_qints_ctx)
359 qmem_free(rvu->dev, pfvf->nix_qints_ctx);
360 if (pfvf->cq_ints_ctx)
361 qmem_free(rvu->dev, pfvf->cq_ints_ctx);
363 pfvf->rq_bmap = NULL;
364 pfvf->cq_bmap = NULL;
365 pfvf->sq_bmap = NULL;
369 pfvf->rss_ctx = NULL;
370 pfvf->nix_qints_ctx = NULL;
371 pfvf->cq_ints_ctx = NULL;
374 static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr,
375 struct rvu_pfvf *pfvf, int nixlf,
376 int rss_sz, int rss_grps, int hwctx_size)
378 int err, grp, num_indices;
380 /* RSS is not requested for this NIXLF */
383 num_indices = rss_sz * rss_grps;
385 /* Alloc NIX RSS HW context memory and config the base */
386 err = qmem_alloc(rvu->dev, &pfvf->rss_ctx, num_indices, hwctx_size);
390 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_BASE(nixlf),
391 (u64)pfvf->rss_ctx->iova);
393 /* Config full RSS table size, enable RSS and caching */
394 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf),
395 BIT_ULL(36) | BIT_ULL(4) |
396 ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE));
397 /* Config RSS group offset and sizes */
398 for (grp = 0; grp < rss_grps; grp++)
399 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_GRPX(nixlf, grp),
400 ((ilog2(rss_sz) - 1) << 16) | (rss_sz * grp));
404 static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
405 struct nix_aq_inst_s *inst)
407 struct admin_queue *aq = block->aq;
408 struct nix_aq_res_s *result;
412 result = (struct nix_aq_res_s *)aq->res->base;
414 /* Get current head pointer where to append this instruction */
415 reg = rvu_read64(rvu, block->addr, NIX_AF_AQ_STATUS);
416 head = (reg >> 4) & AQ_PTR_MASK;
418 memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)),
419 (void *)inst, aq->inst->entry_sz);
420 memset(result, 0, sizeof(*result));
421 /* sync into memory */
424 /* Ring the doorbell and wait for result */
425 rvu_write64(rvu, block->addr, NIX_AF_AQ_DOOR, 1);
426 while (result->compcode == NIX_AQ_COMP_NOTDONE) {
434 if (result->compcode != NIX_AQ_COMP_GOOD)
435 /* TODO: Replace this with some error code */
441 static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
442 struct nix_aq_enq_rsp *rsp)
444 struct rvu_hwinfo *hw = rvu->hw;
445 u16 pcifunc = req->hdr.pcifunc;
446 int nixlf, blkaddr, rc = 0;
447 struct nix_aq_inst_s inst;
448 struct rvu_block *block;
449 struct admin_queue *aq;
450 struct rvu_pfvf *pfvf;
455 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
457 return NIX_AF_ERR_AF_LF_INVALID;
459 block = &hw->block[blkaddr];
462 dev_warn(rvu->dev, "%s: NIX AQ not initialized\n", __func__);
463 return NIX_AF_ERR_AQ_ENQUEUE;
466 pfvf = rvu_get_pfvf(rvu, pcifunc);
467 nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
469 /* Skip NIXLF check for broadcast MCE entry init */
470 if (!(!rsp && req->ctype == NIX_AQ_CTYPE_MCE)) {
471 if (!pfvf->nixlf || nixlf < 0)
472 return NIX_AF_ERR_AF_LF_INVALID;
475 switch (req->ctype) {
476 case NIX_AQ_CTYPE_RQ:
477 /* Check if index exceeds max no of queues */
478 if (!pfvf->rq_ctx || req->qidx >= pfvf->rq_ctx->qsize)
479 rc = NIX_AF_ERR_AQ_ENQUEUE;
481 case NIX_AQ_CTYPE_SQ:
482 if (!pfvf->sq_ctx || req->qidx >= pfvf->sq_ctx->qsize)
483 rc = NIX_AF_ERR_AQ_ENQUEUE;
485 case NIX_AQ_CTYPE_CQ:
486 if (!pfvf->cq_ctx || req->qidx >= pfvf->cq_ctx->qsize)
487 rc = NIX_AF_ERR_AQ_ENQUEUE;
489 case NIX_AQ_CTYPE_RSS:
490 /* Check if RSS is enabled and qidx is within range */
491 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf));
492 if (!(cfg & BIT_ULL(4)) || !pfvf->rss_ctx ||
493 (req->qidx >= (256UL << (cfg & 0xF))))
494 rc = NIX_AF_ERR_AQ_ENQUEUE;
496 case NIX_AQ_CTYPE_MCE:
497 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG);
498 /* Check if index exceeds MCE list length */
499 if (!hw->nix0->mcast.mce_ctx ||
500 (req->qidx >= (256UL << (cfg & 0xF))))
501 rc = NIX_AF_ERR_AQ_ENQUEUE;
503 /* Adding multicast lists for requests from PF/VFs is not
504 * yet supported, so ignore this.
507 rc = NIX_AF_ERR_AQ_ENQUEUE;
510 rc = NIX_AF_ERR_AQ_ENQUEUE;
516 /* Check if SQ pointed SMQ belongs to this PF/VF or not */
517 if (req->ctype == NIX_AQ_CTYPE_SQ &&
518 ((req->op == NIX_AQ_INSTOP_INIT && req->sq.ena) ||
519 (req->op == NIX_AQ_INSTOP_WRITE &&
520 req->sq_mask.ena && req->sq_mask.smq && req->sq.ena))) {
521 if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ,
522 pcifunc, req->sq.smq))
523 return NIX_AF_ERR_AQ_ENQUEUE;
526 memset(&inst, 0, sizeof(struct nix_aq_inst_s));
528 inst.cindex = req->qidx;
529 inst.ctype = req->ctype;
531 /* Currently we are not supporting enqueuing multiple instructions,
532 * so always choose first entry in result memory.
534 inst.res_addr = (u64)aq->res->iova;
536 /* Clean result + context memory */
537 memset(aq->res->base, 0, aq->res->entry_sz);
538 /* Context needs to be written at RES_ADDR + 128 */
539 ctx = aq->res->base + 128;
540 /* Mask needs to be written at RES_ADDR + 256 */
541 mask = aq->res->base + 256;
544 case NIX_AQ_INSTOP_WRITE:
545 if (req->ctype == NIX_AQ_CTYPE_RQ)
546 memcpy(mask, &req->rq_mask,
547 sizeof(struct nix_rq_ctx_s));
548 else if (req->ctype == NIX_AQ_CTYPE_SQ)
549 memcpy(mask, &req->sq_mask,
550 sizeof(struct nix_sq_ctx_s));
551 else if (req->ctype == NIX_AQ_CTYPE_CQ)
552 memcpy(mask, &req->cq_mask,
553 sizeof(struct nix_cq_ctx_s));
554 else if (req->ctype == NIX_AQ_CTYPE_RSS)
555 memcpy(mask, &req->rss_mask,
556 sizeof(struct nix_rsse_s));
557 else if (req->ctype == NIX_AQ_CTYPE_MCE)
558 memcpy(mask, &req->mce_mask,
559 sizeof(struct nix_rx_mce_s));
561 case NIX_AQ_INSTOP_INIT:
562 if (req->ctype == NIX_AQ_CTYPE_RQ)
563 memcpy(ctx, &req->rq, sizeof(struct nix_rq_ctx_s));
564 else if (req->ctype == NIX_AQ_CTYPE_SQ)
565 memcpy(ctx, &req->sq, sizeof(struct nix_sq_ctx_s));
566 else if (req->ctype == NIX_AQ_CTYPE_CQ)
567 memcpy(ctx, &req->cq, sizeof(struct nix_cq_ctx_s));
568 else if (req->ctype == NIX_AQ_CTYPE_RSS)
569 memcpy(ctx, &req->rss, sizeof(struct nix_rsse_s));
570 else if (req->ctype == NIX_AQ_CTYPE_MCE)
571 memcpy(ctx, &req->mce, sizeof(struct nix_rx_mce_s));
573 case NIX_AQ_INSTOP_NOP:
574 case NIX_AQ_INSTOP_READ:
575 case NIX_AQ_INSTOP_LOCK:
576 case NIX_AQ_INSTOP_UNLOCK:
579 rc = NIX_AF_ERR_AQ_ENQUEUE;
583 spin_lock(&aq->lock);
585 /* Submit the instruction to AQ */
586 rc = nix_aq_enqueue_wait(rvu, block, &inst);
588 spin_unlock(&aq->lock);
592 /* Set RQ/SQ/CQ bitmap if respective queue hw context is enabled */
593 if (req->op == NIX_AQ_INSTOP_INIT) {
594 if (req->ctype == NIX_AQ_CTYPE_RQ && req->rq.ena)
595 __set_bit(req->qidx, pfvf->rq_bmap);
596 if (req->ctype == NIX_AQ_CTYPE_SQ && req->sq.ena)
597 __set_bit(req->qidx, pfvf->sq_bmap);
598 if (req->ctype == NIX_AQ_CTYPE_CQ && req->cq.ena)
599 __set_bit(req->qidx, pfvf->cq_bmap);
602 if (req->op == NIX_AQ_INSTOP_WRITE) {
603 if (req->ctype == NIX_AQ_CTYPE_RQ) {
604 ena = (req->rq.ena & req->rq_mask.ena) |
605 (test_bit(req->qidx, pfvf->rq_bmap) &
608 __set_bit(req->qidx, pfvf->rq_bmap);
610 __clear_bit(req->qidx, pfvf->rq_bmap);
612 if (req->ctype == NIX_AQ_CTYPE_SQ) {
613 ena = (req->rq.ena & req->sq_mask.ena) |
614 (test_bit(req->qidx, pfvf->sq_bmap) &
617 __set_bit(req->qidx, pfvf->sq_bmap);
619 __clear_bit(req->qidx, pfvf->sq_bmap);
621 if (req->ctype == NIX_AQ_CTYPE_CQ) {
622 ena = (req->rq.ena & req->cq_mask.ena) |
623 (test_bit(req->qidx, pfvf->cq_bmap) &
626 __set_bit(req->qidx, pfvf->cq_bmap);
628 __clear_bit(req->qidx, pfvf->cq_bmap);
633 /* Copy read context into mailbox */
634 if (req->op == NIX_AQ_INSTOP_READ) {
635 if (req->ctype == NIX_AQ_CTYPE_RQ)
636 memcpy(&rsp->rq, ctx,
637 sizeof(struct nix_rq_ctx_s));
638 else if (req->ctype == NIX_AQ_CTYPE_SQ)
639 memcpy(&rsp->sq, ctx,
640 sizeof(struct nix_sq_ctx_s));
641 else if (req->ctype == NIX_AQ_CTYPE_CQ)
642 memcpy(&rsp->cq, ctx,
643 sizeof(struct nix_cq_ctx_s));
644 else if (req->ctype == NIX_AQ_CTYPE_RSS)
645 memcpy(&rsp->rss, ctx,
646 sizeof(struct nix_rsse_s));
647 else if (req->ctype == NIX_AQ_CTYPE_MCE)
648 memcpy(&rsp->mce, ctx,
649 sizeof(struct nix_rx_mce_s));
653 spin_unlock(&aq->lock);
657 static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
659 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
660 struct nix_aq_enq_req aq_req;
665 if (!pfvf->cq_ctx || !pfvf->sq_ctx || !pfvf->rq_ctx)
666 return NIX_AF_ERR_AQ_ENQUEUE;
668 memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
669 aq_req.hdr.pcifunc = req->hdr.pcifunc;
671 if (req->ctype == NIX_AQ_CTYPE_CQ) {
673 aq_req.cq_mask.ena = 1;
674 q_cnt = pfvf->cq_ctx->qsize;
675 bmap = pfvf->cq_bmap;
677 if (req->ctype == NIX_AQ_CTYPE_SQ) {
679 aq_req.sq_mask.ena = 1;
680 q_cnt = pfvf->sq_ctx->qsize;
681 bmap = pfvf->sq_bmap;
683 if (req->ctype == NIX_AQ_CTYPE_RQ) {
685 aq_req.rq_mask.ena = 1;
686 q_cnt = pfvf->rq_ctx->qsize;
687 bmap = pfvf->rq_bmap;
690 aq_req.ctype = req->ctype;
691 aq_req.op = NIX_AQ_INSTOP_WRITE;
693 for (qidx = 0; qidx < q_cnt; qidx++) {
694 if (!test_bit(qidx, bmap))
697 rc = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL);
700 dev_err(rvu->dev, "Failed to disable %s:%d context\n",
701 (req->ctype == NIX_AQ_CTYPE_CQ) ?
702 "CQ" : ((req->ctype == NIX_AQ_CTYPE_RQ) ?
710 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
711 struct nix_aq_enq_req *req,
712 struct nix_aq_enq_rsp *rsp)
714 return rvu_nix_aq_enq_inst(rvu, req, rsp);
717 int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu,
718 struct hwctx_disable_req *req,
721 return nix_lf_hwctx_disable(rvu, req);
724 int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
725 struct nix_lf_alloc_req *req,
726 struct nix_lf_alloc_rsp *rsp)
728 int nixlf, qints, hwctx_size, intf, err, rc = 0;
729 struct rvu_hwinfo *hw = rvu->hw;
730 u16 pcifunc = req->hdr.pcifunc;
731 struct rvu_block *block;
732 struct rvu_pfvf *pfvf;
736 if (!req->rq_cnt || !req->sq_cnt || !req->cq_cnt)
737 return NIX_AF_ERR_PARAM;
739 pfvf = rvu_get_pfvf(rvu, pcifunc);
740 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
741 if (!pfvf->nixlf || blkaddr < 0)
742 return NIX_AF_ERR_AF_LF_INVALID;
744 block = &hw->block[blkaddr];
745 nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
747 return NIX_AF_ERR_AF_LF_INVALID;
749 /* Check if requested 'NIXLF <=> NPALF' mapping is valid */
751 /* If default, use 'this' NIXLF's PFFUNC */
752 if (req->npa_func == RVU_DEFAULT_PF_FUNC)
753 req->npa_func = pcifunc;
754 if (!is_pffunc_map_valid(rvu, req->npa_func, BLKTYPE_NPA))
755 return NIX_AF_INVAL_NPA_PF_FUNC;
758 /* Check if requested 'NIXLF <=> SSOLF' mapping is valid */
760 /* If default, use 'this' NIXLF's PFFUNC */
761 if (req->sso_func == RVU_DEFAULT_PF_FUNC)
762 req->sso_func = pcifunc;
763 if (!is_pffunc_map_valid(rvu, req->sso_func, BLKTYPE_SSO))
764 return NIX_AF_INVAL_SSO_PF_FUNC;
767 /* If RSS is being enabled, check if requested config is valid.
768 * RSS table size should be power of two, otherwise
769 * RSS_GRP::OFFSET + adder might go beyond that group or
770 * won't be able to use entire table.
772 if (req->rss_sz && (req->rss_sz > MAX_RSS_INDIR_TBL_SIZE ||
773 !is_power_of_2(req->rss_sz)))
774 return NIX_AF_ERR_RSS_SIZE_INVALID;
777 (!req->rss_grps || req->rss_grps > MAX_RSS_GROUPS))
778 return NIX_AF_ERR_RSS_GRPS_INVALID;
780 /* Reset this NIX LF */
781 err = rvu_lf_reset(rvu, block, nixlf);
783 dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
784 block->addr - BLKADDR_NIX0, nixlf);
785 return NIX_AF_ERR_LF_RESET;
788 ctx_cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST3);
790 /* Alloc NIX RQ HW context memory and config the base */
791 hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF);
792 err = qmem_alloc(rvu->dev, &pfvf->rq_ctx, req->rq_cnt, hwctx_size);
796 pfvf->rq_bmap = kcalloc(req->rq_cnt, sizeof(long), GFP_KERNEL);
800 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_BASE(nixlf),
801 (u64)pfvf->rq_ctx->iova);
803 /* Set caching and queue count in HW */
804 cfg = BIT_ULL(36) | (req->rq_cnt - 1);
805 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_CFG(nixlf), cfg);
807 /* Alloc NIX SQ HW context memory and config the base */
808 hwctx_size = 1UL << (ctx_cfg & 0xF);
809 err = qmem_alloc(rvu->dev, &pfvf->sq_ctx, req->sq_cnt, hwctx_size);
813 pfvf->sq_bmap = kcalloc(req->sq_cnt, sizeof(long), GFP_KERNEL);
817 rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_BASE(nixlf),
818 (u64)pfvf->sq_ctx->iova);
819 cfg = BIT_ULL(36) | (req->sq_cnt - 1);
820 rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_CFG(nixlf), cfg);
822 /* Alloc NIX CQ HW context memory and config the base */
823 hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF);
824 err = qmem_alloc(rvu->dev, &pfvf->cq_ctx, req->cq_cnt, hwctx_size);
828 pfvf->cq_bmap = kcalloc(req->cq_cnt, sizeof(long), GFP_KERNEL);
832 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_BASE(nixlf),
833 (u64)pfvf->cq_ctx->iova);
834 cfg = BIT_ULL(36) | (req->cq_cnt - 1);
835 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_CFG(nixlf), cfg);
837 /* Initialize receive side scaling (RSS) */
838 hwctx_size = 1UL << ((ctx_cfg >> 12) & 0xF);
839 err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf,
840 req->rss_sz, req->rss_grps, hwctx_size);
844 /* Alloc memory for CQINT's HW contexts */
845 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
846 qints = (cfg >> 24) & 0xFFF;
847 hwctx_size = 1UL << ((ctx_cfg >> 24) & 0xF);
848 err = qmem_alloc(rvu->dev, &pfvf->cq_ints_ctx, qints, hwctx_size);
852 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_BASE(nixlf),
853 (u64)pfvf->cq_ints_ctx->iova);
854 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_CFG(nixlf), BIT_ULL(36));
856 /* Alloc memory for QINT's HW contexts */
857 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
858 qints = (cfg >> 12) & 0xFFF;
859 hwctx_size = 1UL << ((ctx_cfg >> 20) & 0xF);
860 err = qmem_alloc(rvu->dev, &pfvf->nix_qints_ctx, qints, hwctx_size);
864 rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_BASE(nixlf),
865 (u64)pfvf->nix_qints_ctx->iova);
866 rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf), BIT_ULL(36));
868 /* Setup VLANX TPID's.
869 * Use VLAN1 for 802.1Q
870 * and VLAN0 for 802.1AD.
872 cfg = (0x8100ULL << 16) | 0x88A8ULL;
873 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg);
875 /* Enable LMTST for this NIX LF */
876 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG2(nixlf), BIT_ULL(0));
878 /* Set CQE/WQE size, NPA_PF_FUNC for SQBs and also SSO_PF_FUNC */
882 cfg |= (u64)req->sso_func << 16;
884 cfg |= (u64)req->xqe_sz << 33;
885 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CFG(nixlf), cfg);
887 /* Config Rx pkt length, csum checks and apad enable / disable */
888 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), req->rx_cfg);
890 intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
891 err = nix_interface_init(rvu, pcifunc, intf, nixlf);
895 /* Disable NPC entries as NIXLF's contexts are not initialized yet */
896 rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
901 nix_ctx_free(rvu, pfvf);
905 /* Set macaddr of this PF/VF */
906 ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
908 /* set SQB size info */
909 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQ_CONST);
910 rsp->sqb_size = (cfg >> 34) & 0xFFFF;
911 rsp->rx_chan_base = pfvf->rx_chan_base;
912 rsp->tx_chan_base = pfvf->tx_chan_base;
913 rsp->rx_chan_cnt = pfvf->rx_chan_cnt;
914 rsp->tx_chan_cnt = pfvf->tx_chan_cnt;
915 rsp->lso_tsov4_idx = NIX_LSO_FORMAT_IDX_TSOV4;
916 rsp->lso_tsov6_idx = NIX_LSO_FORMAT_IDX_TSOV6;
917 /* Get HW supported stat count */
918 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
919 rsp->lf_rx_stats = ((cfg >> 32) & 0xFF);
920 rsp->lf_tx_stats = ((cfg >> 24) & 0xFF);
921 /* Get count of CQ IRQs and error IRQs supported per LF */
922 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
923 rsp->qints = ((cfg >> 12) & 0xFFF);
924 rsp->cints = ((cfg >> 24) & 0xFFF);
928 int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct msg_req *req,
931 struct rvu_hwinfo *hw = rvu->hw;
932 u16 pcifunc = req->hdr.pcifunc;
933 struct rvu_block *block;
934 int blkaddr, nixlf, err;
935 struct rvu_pfvf *pfvf;
937 pfvf = rvu_get_pfvf(rvu, pcifunc);
938 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
939 if (!pfvf->nixlf || blkaddr < 0)
940 return NIX_AF_ERR_AF_LF_INVALID;
942 block = &hw->block[blkaddr];
943 nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
945 return NIX_AF_ERR_AF_LF_INVALID;
947 nix_interface_deinit(rvu, pcifunc, nixlf);
949 /* Reset this NIX LF */
950 err = rvu_lf_reset(rvu, block, nixlf);
952 dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
953 block->addr - BLKADDR_NIX0, nixlf);
954 return NIX_AF_ERR_LF_RESET;
957 nix_ctx_free(rvu, pfvf);
962 int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu,
963 struct nix_mark_format_cfg *req,
964 struct nix_mark_format_cfg_rsp *rsp)
966 u16 pcifunc = req->hdr.pcifunc;
967 struct nix_hw *nix_hw;
968 struct rvu_pfvf *pfvf;
972 pfvf = rvu_get_pfvf(rvu, pcifunc);
973 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
974 if (!pfvf->nixlf || blkaddr < 0)
975 return NIX_AF_ERR_AF_LF_INVALID;
977 nix_hw = get_nix_hw(rvu->hw, blkaddr);
981 cfg = (((u32)req->offset & 0x7) << 16) |
982 (((u32)req->y_mask & 0xF) << 12) |
983 (((u32)req->y_val & 0xF) << 8) |
984 (((u32)req->r_mask & 0xF) << 4) | ((u32)req->r_val & 0xF);
986 rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfg);
988 dev_err(rvu->dev, "No mark_format_ctl for (pf:%d, vf:%d)",
989 rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
990 return NIX_AF_ERR_MARK_CFG_FAIL;
993 rsp->mark_format_idx = rc;
997 /* Disable shaping of pkts by a scheduler queue
998 * at a given scheduler level.
1000 static void nix_reset_tx_shaping(struct rvu *rvu, int blkaddr,
1003 u64 cir_reg = 0, pir_reg = 0;
1007 case NIX_TXSCH_LVL_TL1:
1008 cir_reg = NIX_AF_TL1X_CIR(schq);
1009 pir_reg = 0; /* PIR not available at TL1 */
1011 case NIX_TXSCH_LVL_TL2:
1012 cir_reg = NIX_AF_TL2X_CIR(schq);
1013 pir_reg = NIX_AF_TL2X_PIR(schq);
1015 case NIX_TXSCH_LVL_TL3:
1016 cir_reg = NIX_AF_TL3X_CIR(schq);
1017 pir_reg = NIX_AF_TL3X_PIR(schq);
1019 case NIX_TXSCH_LVL_TL4:
1020 cir_reg = NIX_AF_TL4X_CIR(schq);
1021 pir_reg = NIX_AF_TL4X_PIR(schq);
1027 cfg = rvu_read64(rvu, blkaddr, cir_reg);
1028 rvu_write64(rvu, blkaddr, cir_reg, cfg & ~BIT_ULL(0));
1032 cfg = rvu_read64(rvu, blkaddr, pir_reg);
1033 rvu_write64(rvu, blkaddr, pir_reg, cfg & ~BIT_ULL(0));
1036 static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr,
1039 struct rvu_hwinfo *hw = rvu->hw;
1042 /* Reset TL4's SDP link config */
1043 if (lvl == NIX_TXSCH_LVL_TL4)
1044 rvu_write64(rvu, blkaddr, NIX_AF_TL4X_SDP_LINK_CFG(schq), 0x00);
1046 if (lvl != NIX_TXSCH_LVL_TL2)
1049 /* Reset TL2's CGX or LBK link config */
1050 for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++)
1051 rvu_write64(rvu, blkaddr,
1052 NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00);
1056 rvu_get_tl1_schqs(struct rvu *rvu, int blkaddr, u16 pcifunc,
1057 u16 *schq_list, u16 *schq_cnt)
1059 struct nix_txsch *txsch;
1060 struct nix_hw *nix_hw;
1061 struct rvu_pfvf *pfvf;
1067 nix_hw = get_nix_hw(rvu->hw, blkaddr);
1071 pfvf = rvu_get_pfvf(rvu, pcifunc);
1072 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL1];
1073 pfvf_map = txsch->pfvf_map;
1074 pf = rvu_get_pf(pcifunc);
1076 /* static allocation as two TL1's per link */
1077 intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
1080 case NIX_INTF_TYPE_CGX:
1081 rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
1082 schq_base = (cgx_id * MAX_LMAC_PER_CGX + lmac_id) * 2;
1084 case NIX_INTF_TYPE_LBK:
1085 schq_base = rvu->cgx_cnt_max * MAX_LMAC_PER_CGX * 2;
1091 if (schq_base + 1 > txsch->schq.max)
1094 /* init pfvf_map as we store flags */
1095 if (pfvf_map[schq_base] == U32_MAX) {
1096 pfvf_map[schq_base] =
1097 TXSCH_MAP((pf << RVU_PFVF_PF_SHIFT), 0);
1098 pfvf_map[schq_base + 1] =
1099 TXSCH_MAP((pf << RVU_PFVF_PF_SHIFT), 0);
1101 /* Onetime reset for TL1 */
1102 nix_reset_tx_linkcfg(rvu, blkaddr,
1103 NIX_TXSCH_LVL_TL1, schq_base);
1104 nix_reset_tx_shaping(rvu, blkaddr,
1105 NIX_TXSCH_LVL_TL1, schq_base);
1107 nix_reset_tx_linkcfg(rvu, blkaddr,
1108 NIX_TXSCH_LVL_TL1, schq_base + 1);
1109 nix_reset_tx_shaping(rvu, blkaddr,
1110 NIX_TXSCH_LVL_TL1, schq_base + 1);
1113 if (schq_list && schq_cnt) {
1114 schq_list[0] = schq_base;
1115 schq_list[1] = schq_base + 1;
1122 int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
1123 struct nix_txsch_alloc_req *req,
1124 struct nix_txsch_alloc_rsp *rsp)
1126 u16 pcifunc = req->hdr.pcifunc;
1127 struct nix_txsch *txsch;
1128 int lvl, idx, req_schq;
1129 struct rvu_pfvf *pfvf;
1130 struct nix_hw *nix_hw;
1131 int blkaddr, rc = 0;
1135 pfvf = rvu_get_pfvf(rvu, pcifunc);
1136 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1137 if (!pfvf->nixlf || blkaddr < 0)
1138 return NIX_AF_ERR_AF_LF_INVALID;
1140 nix_hw = get_nix_hw(rvu->hw, blkaddr);
1144 mutex_lock(&rvu->rsrc_lock);
1145 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1146 txsch = &nix_hw->txsch[lvl];
1147 req_schq = req->schq_contig[lvl] + req->schq[lvl];
1148 pfvf_map = txsch->pfvf_map;
1153 /* There are only 28 TL1s */
1154 if (lvl == NIX_TXSCH_LVL_TL1) {
1155 if (req->schq_contig[lvl] ||
1156 req->schq[lvl] > 2 ||
1157 rvu_get_tl1_schqs(rvu, blkaddr,
1158 pcifunc, NULL, NULL))
1163 /* Check if request is valid */
1164 if (req_schq > MAX_TXSCHQ_PER_FUNC)
1167 /* If contiguous queues are needed, check for availability */
1168 if (req->schq_contig[lvl] &&
1169 !rvu_rsrc_check_contig(&txsch->schq, req->schq_contig[lvl]))
1172 /* Check if full request can be accommodated */
1173 if (req_schq >= rvu_rsrc_free_count(&txsch->schq))
1177 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1178 txsch = &nix_hw->txsch[lvl];
1179 rsp->schq_contig[lvl] = req->schq_contig[lvl];
1180 pfvf_map = txsch->pfvf_map;
1181 rsp->schq[lvl] = req->schq[lvl];
1183 if (!req->schq[lvl] && !req->schq_contig[lvl])
1186 /* Handle TL1 specially as it is
1187 * allocation is restricted to 2 TL1's
1191 if (lvl == NIX_TXSCH_LVL_TL1) {
1192 rsp->schq_contig[lvl] = 0;
1193 rvu_get_tl1_schqs(rvu, blkaddr, pcifunc,
1194 &rsp->schq_list[lvl][0],
1199 /* Alloc contiguous queues first */
1200 if (req->schq_contig[lvl]) {
1201 schq = rvu_alloc_rsrc_contig(&txsch->schq,
1202 req->schq_contig[lvl]);
1204 for (idx = 0; idx < req->schq_contig[lvl]; idx++) {
1205 pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
1206 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
1207 nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
1208 rsp->schq_contig_list[lvl][idx] = schq;
1213 /* Alloc non-contiguous queues */
1214 for (idx = 0; idx < req->schq[lvl]; idx++) {
1215 schq = rvu_alloc_rsrc(&txsch->schq);
1216 pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
1217 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
1218 nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
1219 rsp->schq_list[lvl][idx] = schq;
1224 rc = NIX_AF_ERR_TLX_ALLOC_FAIL;
1226 mutex_unlock(&rvu->rsrc_lock);
1230 static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
1232 int blkaddr, nixlf, lvl, schq, err;
1233 struct rvu_hwinfo *hw = rvu->hw;
1234 struct nix_txsch *txsch;
1235 struct nix_hw *nix_hw;
1238 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1240 return NIX_AF_ERR_AF_LF_INVALID;
1242 nix_hw = get_nix_hw(rvu->hw, blkaddr);
1246 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1248 return NIX_AF_ERR_AF_LF_INVALID;
1250 /* Disable TL2/3 queue links before SMQ flush*/
1251 mutex_lock(&rvu->rsrc_lock);
1252 for (lvl = NIX_TXSCH_LVL_TL4; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1253 if (lvl != NIX_TXSCH_LVL_TL2 && lvl != NIX_TXSCH_LVL_TL4)
1256 txsch = &nix_hw->txsch[lvl];
1257 for (schq = 0; schq < txsch->schq.max; schq++) {
1258 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
1260 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
1265 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
1266 for (schq = 0; schq < txsch->schq.max; schq++) {
1267 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
1269 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq));
1270 /* Do SMQ flush and set enqueue xoff */
1271 cfg |= BIT_ULL(50) | BIT_ULL(49);
1272 rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg);
1274 /* Wait for flush to complete */
1275 err = rvu_poll_reg(rvu, blkaddr,
1276 NIX_AF_SMQX_CFG(schq), BIT_ULL(49), true);
1279 "NIXLF%d: SMQ%d flush failed\n", nixlf, schq);
1283 /* Now free scheduler queues to free pool */
1284 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1285 /* Free all SCHQ's except TL1 as
1286 * TL1 is shared across all VF's for a RVU PF
1288 if (lvl == NIX_TXSCH_LVL_TL1)
1291 txsch = &nix_hw->txsch[lvl];
1292 for (schq = 0; schq < txsch->schq.max; schq++) {
1293 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
1295 rvu_free_rsrc(&txsch->schq, schq);
1296 txsch->pfvf_map[schq] = 0;
1299 mutex_unlock(&rvu->rsrc_lock);
1301 /* Sync cached info for this LF in NDC-TX to LLC/DRAM */
1302 rvu_write64(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12) | nixlf);
1303 err = rvu_poll_reg(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12), true);
1305 dev_err(rvu->dev, "NDC-TX sync failed for NIXLF %d\n", nixlf);
1310 static int nix_txschq_free_one(struct rvu *rvu,
1311 struct nix_txsch_free_req *req)
1313 int lvl, schq, nixlf, blkaddr, rc;
1314 struct rvu_hwinfo *hw = rvu->hw;
1315 u16 pcifunc = req->hdr.pcifunc;
1316 struct nix_txsch *txsch;
1317 struct nix_hw *nix_hw;
1321 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1323 return NIX_AF_ERR_AF_LF_INVALID;
1325 nix_hw = get_nix_hw(rvu->hw, blkaddr);
1329 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1331 return NIX_AF_ERR_AF_LF_INVALID;
1333 lvl = req->schq_lvl;
1335 txsch = &nix_hw->txsch[lvl];
1337 /* Don't allow freeing TL1 */
1338 if (lvl > NIX_TXSCH_LVL_TL2 ||
1339 schq >= txsch->schq.max)
1342 pfvf_map = txsch->pfvf_map;
1343 mutex_lock(&rvu->rsrc_lock);
1345 if (TXSCH_MAP_FUNC(pfvf_map[schq]) != pcifunc) {
1346 mutex_unlock(&rvu->rsrc_lock);
1350 /* Flush if it is a SMQ. Onus of disabling
1351 * TL2/3 queue links before SMQ flush is on user
1353 if (lvl == NIX_TXSCH_LVL_SMQ) {
1354 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq));
1355 /* Do SMQ flush and set enqueue xoff */
1356 cfg |= BIT_ULL(50) | BIT_ULL(49);
1357 rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg);
1359 /* Wait for flush to complete */
1360 rc = rvu_poll_reg(rvu, blkaddr,
1361 NIX_AF_SMQX_CFG(schq), BIT_ULL(49), true);
1364 "NIXLF%d: SMQ%d flush failed\n", nixlf, schq);
1368 /* Free the resource */
1369 rvu_free_rsrc(&txsch->schq, schq);
1370 txsch->pfvf_map[schq] = 0;
1371 mutex_unlock(&rvu->rsrc_lock);
1374 return NIX_AF_ERR_TLX_INVALID;
1377 int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu,
1378 struct nix_txsch_free_req *req,
1379 struct msg_rsp *rsp)
1381 if (req->flags & TXSCHQ_FREE_ALL)
1382 return nix_txschq_free(rvu, req->hdr.pcifunc);
1384 return nix_txschq_free_one(rvu, req);
1387 static bool is_txschq_config_valid(struct rvu *rvu, u16 pcifunc, int blkaddr,
1388 int lvl, u64 reg, u64 regval)
1390 u64 regbase = reg & 0xFFFF;
1393 if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, lvl, reg))
1396 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
1397 /* Check if this schq belongs to this PF/VF or not */
1398 if (!is_valid_txschq(rvu, blkaddr, lvl, pcifunc, schq))
1401 parent = (regval >> 16) & 0x1FF;
1402 /* Validate MDQ's TL4 parent */
1403 if (regbase == NIX_AF_MDQX_PARENT(0) &&
1404 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL4, pcifunc, parent))
1407 /* Validate TL4's TL3 parent */
1408 if (regbase == NIX_AF_TL4X_PARENT(0) &&
1409 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL3, pcifunc, parent))
1412 /* Validate TL3's TL2 parent */
1413 if (regbase == NIX_AF_TL3X_PARENT(0) &&
1414 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL2, pcifunc, parent))
1417 /* Validate TL2's TL1 parent */
1418 if (regbase == NIX_AF_TL2X_PARENT(0) &&
1419 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL1, pcifunc, parent))
1426 nix_tl1_default_cfg(struct rvu *rvu, u16 pcifunc)
1428 u16 schq_list[2], schq_cnt, schq;
1429 int blkaddr, idx, err = 0;
1430 u16 map_func, map_flags;
1431 struct nix_hw *nix_hw;
1435 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1437 return NIX_AF_ERR_AF_LF_INVALID;
1439 nix_hw = get_nix_hw(rvu->hw, blkaddr);
1443 pfvf_map = nix_hw->txsch[NIX_TXSCH_LVL_TL1].pfvf_map;
1445 mutex_lock(&rvu->rsrc_lock);
1447 err = rvu_get_tl1_schqs(rvu, blkaddr,
1448 pcifunc, schq_list, &schq_cnt);
1452 for (idx = 0; idx < schq_cnt; idx++) {
1453 schq = schq_list[idx];
1454 map_func = TXSCH_MAP_FUNC(pfvf_map[schq]);
1455 map_flags = TXSCH_MAP_FLAGS(pfvf_map[schq]);
1457 /* check if config is already done or this is pf */
1458 if (map_flags & NIX_TXSCHQ_TL1_CFG_DONE)
1461 /* default configuration */
1462 reg = NIX_AF_TL1X_TOPOLOGY(schq);
1463 regval = (TXSCH_TL1_DFLT_RR_PRIO << 1);
1464 rvu_write64(rvu, blkaddr, reg, regval);
1465 reg = NIX_AF_TL1X_SCHEDULE(schq);
1466 regval = TXSCH_TL1_DFLT_RR_QTM;
1467 rvu_write64(rvu, blkaddr, reg, regval);
1468 reg = NIX_AF_TL1X_CIR(schq);
1470 rvu_write64(rvu, blkaddr, reg, regval);
1472 map_flags |= NIX_TXSCHQ_TL1_CFG_DONE;
1473 pfvf_map[schq] = TXSCH_MAP(map_func, map_flags);
1476 mutex_unlock(&rvu->rsrc_lock);
1480 int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
1481 struct nix_txschq_config *req,
1482 struct msg_rsp *rsp)
1484 u16 schq, pcifunc = req->hdr.pcifunc;
1485 struct rvu_hwinfo *hw = rvu->hw;
1486 u64 reg, regval, schq_regbase;
1487 struct nix_txsch *txsch;
1488 u16 map_func, map_flags;
1489 struct nix_hw *nix_hw;
1490 int blkaddr, idx, err;
1494 if (req->lvl >= NIX_TXSCH_LVL_CNT ||
1495 req->num_regs > MAX_REGS_PER_MBOX_MSG)
1496 return NIX_AF_INVAL_TXSCHQ_CFG;
1498 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1500 return NIX_AF_ERR_AF_LF_INVALID;
1502 nix_hw = get_nix_hw(rvu->hw, blkaddr);
1506 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1508 return NIX_AF_ERR_AF_LF_INVALID;
1510 txsch = &nix_hw->txsch[req->lvl];
1511 pfvf_map = txsch->pfvf_map;
1513 /* VF is only allowed to trigger
1514 * setting default cfg on TL1
1516 if (pcifunc & RVU_PFVF_FUNC_MASK &&
1517 req->lvl == NIX_TXSCH_LVL_TL1) {
1518 return nix_tl1_default_cfg(rvu, pcifunc);
1521 for (idx = 0; idx < req->num_regs; idx++) {
1522 reg = req->reg[idx];
1523 regval = req->regval[idx];
1524 schq_regbase = reg & 0xFFFF;
1526 if (!is_txschq_config_valid(rvu, pcifunc, blkaddr,
1527 txsch->lvl, reg, regval))
1528 return NIX_AF_INVAL_TXSCHQ_CFG;
1530 /* Replace PF/VF visible NIXLF slot with HW NIXLF id */
1531 if (schq_regbase == NIX_AF_SMQX_CFG(0)) {
1532 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr],
1534 regval &= ~(0x7FULL << 24);
1535 regval |= ((u64)nixlf << 24);
1538 /* Mark config as done for TL1 by PF */
1539 if (schq_regbase >= NIX_AF_TL1X_SCHEDULE(0) &&
1540 schq_regbase <= NIX_AF_TL1X_GREEN_BYTES(0)) {
1541 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
1543 mutex_lock(&rvu->rsrc_lock);
1545 map_func = TXSCH_MAP_FUNC(pfvf_map[schq]);
1546 map_flags = TXSCH_MAP_FLAGS(pfvf_map[schq]);
1548 map_flags |= NIX_TXSCHQ_TL1_CFG_DONE;
1549 pfvf_map[schq] = TXSCH_MAP(map_func, map_flags);
1550 mutex_unlock(&rvu->rsrc_lock);
1553 rvu_write64(rvu, blkaddr, reg, regval);
1555 /* Check for SMQ flush, if so, poll for its completion */
1556 if (schq_regbase == NIX_AF_SMQX_CFG(0) &&
1557 (regval & BIT_ULL(49))) {
1558 err = rvu_poll_reg(rvu, blkaddr,
1559 reg, BIT_ULL(49), true);
1561 return NIX_AF_SMQ_FLUSH_FAILED;
1567 static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr,
1568 struct nix_vtag_config *req)
1570 u64 regval = req->vtag_size;
1572 if (req->rx.vtag_type > 7 || req->vtag_size > VTAGSIZE_T8)
1575 if (req->rx.capture_vtag)
1576 regval |= BIT_ULL(5);
1577 if (req->rx.strip_vtag)
1578 regval |= BIT_ULL(4);
1580 rvu_write64(rvu, blkaddr,
1581 NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, req->rx.vtag_type), regval);
1585 int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu,
1586 struct nix_vtag_config *req,
1587 struct msg_rsp *rsp)
1589 struct rvu_hwinfo *hw = rvu->hw;
1590 u16 pcifunc = req->hdr.pcifunc;
1591 int blkaddr, nixlf, err;
1593 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1595 return NIX_AF_ERR_AF_LF_INVALID;
1597 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1599 return NIX_AF_ERR_AF_LF_INVALID;
1601 if (req->cfg_type) {
1602 err = nix_rx_vtag_cfg(rvu, nixlf, blkaddr, req);
1604 return NIX_AF_ERR_PARAM;
1606 /* TODO: handle tx vtag configuration */
1613 static int nix_setup_mce(struct rvu *rvu, int mce, u8 op,
1614 u16 pcifunc, int next, bool eol)
1616 struct nix_aq_enq_req aq_req;
1619 aq_req.hdr.pcifunc = 0;
1620 aq_req.ctype = NIX_AQ_CTYPE_MCE;
1624 /* Forward bcast pkts to RQ0, RSS not needed */
1626 aq_req.mce.index = 0;
1627 aq_req.mce.eol = eol;
1628 aq_req.mce.pf_func = pcifunc;
1629 aq_req.mce.next = next;
1631 /* All fields valid */
1632 *(u64 *)(&aq_req.mce_mask) = ~0ULL;
1634 err = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL);
1636 dev_err(rvu->dev, "Failed to setup Bcast MCE for PF%d:VF%d\n",
1637 rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
1643 static int nix_update_mce_list(struct nix_mce_list *mce_list,
1644 u16 pcifunc, int idx, bool add)
1646 struct mce *mce, *tail = NULL;
1647 bool delete = false;
1649 /* Scan through the current list */
1650 hlist_for_each_entry(mce, &mce_list->head, node) {
1651 /* If already exists, then delete */
1652 if (mce->pcifunc == pcifunc && !add) {
1660 hlist_del(&mce->node);
1669 /* Add a new one to the list, at the tail */
1670 mce = kzalloc(sizeof(*mce), GFP_KERNEL);
1674 mce->pcifunc = pcifunc;
1676 hlist_add_head(&mce->node, &mce_list->head);
1678 hlist_add_behind(&mce->node, &tail->node);
1683 static int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add)
1685 int err = 0, idx, next_idx, count;
1686 struct nix_mce_list *mce_list;
1687 struct mce *mce, *next_mce;
1688 struct nix_mcast *mcast;
1689 struct nix_hw *nix_hw;
1690 struct rvu_pfvf *pfvf;
1693 /* Broadcast pkt replication is not needed for AF's VFs, hence skip */
1694 if (is_afvf(pcifunc))
1697 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1701 nix_hw = get_nix_hw(rvu->hw, blkaddr);
1705 mcast = &nix_hw->mcast;
1707 /* Get this PF/VF func's MCE index */
1708 pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
1709 idx = pfvf->bcast_mce_idx + (pcifunc & RVU_PFVF_FUNC_MASK);
1711 mce_list = &pfvf->bcast_mce_list;
1712 if (idx > (pfvf->bcast_mce_idx + mce_list->max)) {
1714 "%s: Idx %d > max MCE idx %d, for PF%d bcast list\n",
1715 __func__, idx, mce_list->max,
1716 pcifunc >> RVU_PFVF_PF_SHIFT);
1720 mutex_lock(&mcast->mce_lock);
1722 err = nix_update_mce_list(mce_list, pcifunc, idx, add);
1726 /* Disable MCAM entry in NPC */
1728 if (!mce_list->count)
1730 count = mce_list->count;
1732 /* Dump the updated list to HW */
1733 hlist_for_each_entry(mce, &mce_list->head, node) {
1737 next_mce = hlist_entry(mce->node.next,
1739 next_idx = next_mce->idx;
1741 /* EOL should be set in last MCE */
1742 err = nix_setup_mce(rvu, mce->idx,
1743 NIX_AQ_INSTOP_WRITE, mce->pcifunc,
1744 next_idx, count ? false : true);
1750 mutex_unlock(&mcast->mce_lock);
1754 static int nix_setup_bcast_tables(struct rvu *rvu, struct nix_hw *nix_hw)
1756 struct nix_mcast *mcast = &nix_hw->mcast;
1757 int err, pf, numvfs, idx;
1758 struct rvu_pfvf *pfvf;
1762 /* Skip PF0 (i.e AF) */
1763 for (pf = 1; pf < (rvu->cgx_mapped_pfs + 1); pf++) {
1764 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
1765 /* If PF is not enabled, nothing to do */
1766 if (!((cfg >> 20) & 0x01))
1768 /* Get numVFs attached to this PF */
1769 numvfs = (cfg >> 12) & 0xFF;
1771 pfvf = &rvu->pf[pf];
1772 /* Save the start MCE */
1773 pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
1775 nix_mce_list_init(&pfvf->bcast_mce_list, numvfs + 1);
1777 for (idx = 0; idx < (numvfs + 1); idx++) {
1778 /* idx-0 is for PF, followed by VFs */
1779 pcifunc = (pf << RVU_PFVF_PF_SHIFT);
1781 /* Add dummy entries now, so that we don't have to check
1782 * for whether AQ_OP should be INIT/WRITE later on.
1783 * Will be updated when a NIXLF is attached/detached to
1786 err = nix_setup_mce(rvu, pfvf->bcast_mce_idx + idx,
1796 static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
1798 struct nix_mcast *mcast = &nix_hw->mcast;
1799 struct rvu_hwinfo *hw = rvu->hw;
1802 size = (rvu_read64(rvu, blkaddr, NIX_AF_CONST3) >> 16) & 0x0F;
1803 size = (1ULL << size);
1805 /* Alloc memory for multicast/mirror replication entries */
1806 err = qmem_alloc(rvu->dev, &mcast->mce_ctx,
1807 (256UL << MC_TBL_SIZE), size);
1811 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BASE,
1812 (u64)mcast->mce_ctx->iova);
1814 /* Set max list length equal to max no of VFs per PF + PF itself */
1815 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG,
1816 BIT_ULL(36) | (hw->max_vfs_per_pf << 4) | MC_TBL_SIZE);
1818 /* Alloc memory for multicast replication buffers */
1819 size = rvu_read64(rvu, blkaddr, NIX_AF_MC_MIRROR_CONST) & 0xFFFF;
1820 err = qmem_alloc(rvu->dev, &mcast->mcast_buf,
1821 (8UL << MC_BUF_CNT), size);
1825 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_BASE,
1826 (u64)mcast->mcast_buf->iova);
1828 /* Alloc pkind for NIX internal RX multicast/mirror replay */
1829 mcast->replay_pkind = rvu_alloc_rsrc(&hw->pkind.rsrc);
1831 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_CFG,
1832 BIT_ULL(63) | (mcast->replay_pkind << 24) |
1833 BIT_ULL(20) | MC_BUF_CNT);
1835 mutex_init(&mcast->mce_lock);
1837 return nix_setup_bcast_tables(rvu, nix_hw);
1840 static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
1842 struct nix_txsch *txsch;
1846 /* Get scheduler queue count of each type and alloc
1847 * bitmap for each for alloc/free/attach operations.
1849 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1850 txsch = &nix_hw->txsch[lvl];
1853 case NIX_TXSCH_LVL_SMQ:
1854 reg = NIX_AF_MDQ_CONST;
1856 case NIX_TXSCH_LVL_TL4:
1857 reg = NIX_AF_TL4_CONST;
1859 case NIX_TXSCH_LVL_TL3:
1860 reg = NIX_AF_TL3_CONST;
1862 case NIX_TXSCH_LVL_TL2:
1863 reg = NIX_AF_TL2_CONST;
1865 case NIX_TXSCH_LVL_TL1:
1866 reg = NIX_AF_TL1_CONST;
1869 cfg = rvu_read64(rvu, blkaddr, reg);
1870 txsch->schq.max = cfg & 0xFFFF;
1871 err = rvu_alloc_bitmap(&txsch->schq);
1875 /* Allocate memory for scheduler queues to
1876 * PF/VF pcifunc mapping info.
1878 txsch->pfvf_map = devm_kcalloc(rvu->dev, txsch->schq.max,
1879 sizeof(u32), GFP_KERNEL);
1880 if (!txsch->pfvf_map)
1882 memset(txsch->pfvf_map, U8_MAX, txsch->schq.max * sizeof(u32));
1887 int rvu_nix_reserve_mark_format(struct rvu *rvu, struct nix_hw *nix_hw,
1888 int blkaddr, u32 cfg)
1892 for (fmt_idx = 0; fmt_idx < nix_hw->mark_format.in_use; fmt_idx++) {
1893 if (nix_hw->mark_format.cfg[fmt_idx] == cfg)
1896 if (fmt_idx >= nix_hw->mark_format.total)
1899 rvu_write64(rvu, blkaddr, NIX_AF_MARK_FORMATX_CTL(fmt_idx), cfg);
1900 nix_hw->mark_format.cfg[fmt_idx] = cfg;
1901 nix_hw->mark_format.in_use++;
1905 static int nix_af_mark_format_setup(struct rvu *rvu, struct nix_hw *nix_hw,
1909 [NIX_MARK_CFG_IP_DSCP_RED] = 0x10003,
1910 [NIX_MARK_CFG_IP_DSCP_YELLOW] = 0x11200,
1911 [NIX_MARK_CFG_IP_DSCP_YELLOW_RED] = 0x11203,
1912 [NIX_MARK_CFG_IP_ECN_RED] = 0x6000c,
1913 [NIX_MARK_CFG_IP_ECN_YELLOW] = 0x60c00,
1914 [NIX_MARK_CFG_IP_ECN_YELLOW_RED] = 0x60c0c,
1915 [NIX_MARK_CFG_VLAN_DEI_RED] = 0x30008,
1916 [NIX_MARK_CFG_VLAN_DEI_YELLOW] = 0x30800,
1917 [NIX_MARK_CFG_VLAN_DEI_YELLOW_RED] = 0x30808,
1922 total = (rvu_read64(rvu, blkaddr, NIX_AF_PSE_CONST) & 0xFF00) >> 8;
1923 nix_hw->mark_format.total = (u8)total;
1924 nix_hw->mark_format.cfg = devm_kcalloc(rvu->dev, total, sizeof(u32),
1926 if (!nix_hw->mark_format.cfg)
1928 for (i = 0; i < NIX_MARK_CFG_MAX; i++) {
1929 rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfgs[i]);
1931 dev_err(rvu->dev, "Err %d in setup mark format %d\n",
1938 int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req,
1939 struct msg_rsp *rsp)
1941 struct rvu_hwinfo *hw = rvu->hw;
1942 u16 pcifunc = req->hdr.pcifunc;
1943 int i, nixlf, blkaddr;
1946 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1948 return NIX_AF_ERR_AF_LF_INVALID;
1950 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1952 return NIX_AF_ERR_AF_LF_INVALID;
1954 /* Get stats count supported by HW */
1955 stats = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
1957 /* Reset tx stats */
1958 for (i = 0; i < ((stats >> 24) & 0xFF); i++)
1959 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_STATX(nixlf, i), 0);
1961 /* Reset rx stats */
1962 for (i = 0; i < ((stats >> 32) & 0xFF); i++)
1963 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_STATX(nixlf, i), 0);
1968 /* Returns the ALG index to be set into NPC_RX_ACTION */
1969 static int get_flowkey_alg_idx(struct nix_hw *nix_hw, u32 flow_cfg)
1973 /* Scan over exiting algo entries to find a match */
1974 for (i = 0; i < nix_hw->flowkey.in_use; i++)
1975 if (nix_hw->flowkey.flowkey[i] == flow_cfg)
1981 static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
1983 int idx, nr_field, key_off, field_marker, keyoff_marker;
1984 int max_key_off, max_bit_pos, group_member;
1985 struct nix_rx_flowkey_alg *field;
1986 struct nix_rx_flowkey_alg tmp;
1987 u32 key_type, valid_key;
1992 #define FIELDS_PER_ALG 5
1993 #define MAX_KEY_OFF 40
1994 /* Clear all fields */
1995 memset(alg, 0, sizeof(uint64_t) * FIELDS_PER_ALG);
1997 /* Each of the 32 possible flow key algorithm definitions should
1998 * fall into above incremental config (except ALG0). Otherwise a
1999 * single NPC MCAM entry is not sufficient for supporting RSS.
2001 * If a different definition or combination needed then NPC MCAM
2002 * has to be programmed to filter such pkts and it's action should
2003 * point to this definition to calculate flowtag or hash.
2005 * The `for loop` goes over _all_ protocol field and the following
2006 * variables depicts the state machine forward progress logic.
2008 * keyoff_marker - Enabled when hash byte length needs to be accounted
2009 * in field->key_offset update.
2010 * field_marker - Enabled when a new field needs to be selected.
2011 * group_member - Enabled when protocol is part of a group.
2014 keyoff_marker = 0; max_key_off = 0; group_member = 0;
2015 nr_field = 0; key_off = 0; field_marker = 1;
2016 field = &tmp; max_bit_pos = fls(flow_cfg);
2018 idx < max_bit_pos && nr_field < FIELDS_PER_ALG &&
2019 key_off < MAX_KEY_OFF; idx++) {
2020 key_type = BIT(idx);
2021 valid_key = flow_cfg & key_type;
2022 /* Found a field marker, reset the field values */
2024 memset(&tmp, 0, sizeof(tmp));
2027 case NIX_FLOW_KEY_TYPE_PORT:
2028 field->sel_chan = true;
2029 /* This should be set to 1, when SEL_CHAN is set */
2031 field_marker = true;
2032 keyoff_marker = true;
2034 case NIX_FLOW_KEY_TYPE_IPV4:
2035 field->lid = NPC_LID_LC;
2036 field->ltype_match = NPC_LT_LC_IP;
2037 field->hdr_offset = 12; /* SIP offset */
2038 field->bytesm1 = 7; /* SIP + DIP, 8 bytes */
2039 field->ltype_mask = 0xF; /* Match only IPv4 */
2040 field_marker = true;
2041 keyoff_marker = false;
2043 case NIX_FLOW_KEY_TYPE_IPV6:
2044 field->lid = NPC_LID_LC;
2045 field->ltype_match = NPC_LT_LC_IP6;
2046 field->hdr_offset = 8; /* SIP offset */
2047 field->bytesm1 = 31; /* SIP + DIP, 32 bytes */
2048 field->ltype_mask = 0xF; /* Match only IPv6 */
2049 field_marker = true;
2050 keyoff_marker = true;
2052 case NIX_FLOW_KEY_TYPE_TCP:
2053 case NIX_FLOW_KEY_TYPE_UDP:
2054 case NIX_FLOW_KEY_TYPE_SCTP:
2055 field->lid = NPC_LID_LD;
2056 field->bytesm1 = 3; /* Sport + Dport, 4 bytes */
2057 if (key_type == NIX_FLOW_KEY_TYPE_TCP && valid_key) {
2058 field->ltype_match |= NPC_LT_LD_TCP;
2059 group_member = true;
2060 } else if (key_type == NIX_FLOW_KEY_TYPE_UDP &&
2062 field->ltype_match |= NPC_LT_LD_UDP;
2063 group_member = true;
2064 } else if (key_type == NIX_FLOW_KEY_TYPE_SCTP &&
2066 field->ltype_match |= NPC_LT_LD_SCTP;
2067 group_member = true;
2069 field->ltype_mask = ~field->ltype_match;
2070 if (key_type == NIX_FLOW_KEY_TYPE_SCTP) {
2071 /* Handle the case where any of the group item
2072 * is enabled in the group but not the final one
2076 group_member = false;
2078 field_marker = true;
2079 keyoff_marker = true;
2081 field_marker = false;
2082 keyoff_marker = false;
2088 /* Found a valid flow key type */
2090 field->key_offset = key_off;
2091 memcpy(&alg[nr_field], field, sizeof(*field));
2092 max_key_off = max(max_key_off, field->bytesm1 + 1);
2094 /* Found a field marker, get the next field */
2099 /* Found a keyoff marker, update the new key_off */
2100 if (keyoff_marker) {
2101 key_off += max_key_off;
2105 /* Processed all the flow key types */
2106 if (idx == max_bit_pos && key_off <= MAX_KEY_OFF)
2109 return NIX_AF_ERR_RSS_NOSPC_FIELD;
2112 static int reserve_flowkey_alg_idx(struct rvu *rvu, int blkaddr, u32 flow_cfg)
2114 u64 field[FIELDS_PER_ALG];
2118 hw = get_nix_hw(rvu->hw, blkaddr);
2122 /* No room to add new flow hash algoritham */
2123 if (hw->flowkey.in_use >= NIX_FLOW_KEY_ALG_MAX)
2124 return NIX_AF_ERR_RSS_NOSPC_ALGO;
2126 /* Generate algo fields for the given flow_cfg */
2127 rc = set_flowkey_fields((struct nix_rx_flowkey_alg *)field, flow_cfg);
2131 /* Update ALGX_FIELDX register with generated fields */
2132 for (fid = 0; fid < FIELDS_PER_ALG; fid++)
2133 rvu_write64(rvu, blkaddr,
2134 NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(hw->flowkey.in_use,
2137 /* Store the flow_cfg for futher lookup */
2138 rc = hw->flowkey.in_use;
2139 hw->flowkey.flowkey[rc] = flow_cfg;
2140 hw->flowkey.in_use++;
2145 int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu,
2146 struct nix_rss_flowkey_cfg *req,
2147 struct nix_rss_flowkey_cfg_rsp *rsp)
2149 struct rvu_hwinfo *hw = rvu->hw;
2150 u16 pcifunc = req->hdr.pcifunc;
2151 int alg_idx, nixlf, blkaddr;
2152 struct nix_hw *nix_hw;
2154 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2156 return NIX_AF_ERR_AF_LF_INVALID;
2158 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
2160 return NIX_AF_ERR_AF_LF_INVALID;
2162 nix_hw = get_nix_hw(rvu->hw, blkaddr);
2166 alg_idx = get_flowkey_alg_idx(nix_hw, req->flowkey_cfg);
2167 /* Failed to get algo index from the exiting list, reserve new */
2169 alg_idx = reserve_flowkey_alg_idx(rvu, blkaddr,
2174 rsp->alg_idx = alg_idx;
2175 rvu_npc_update_flowkey_alg_idx(rvu, pcifunc, nixlf, req->group,
2176 alg_idx, req->mcam_index);
2180 static int nix_rx_flowkey_alg_cfg(struct rvu *rvu, int blkaddr)
2182 u32 flowkey_cfg, minkey_cfg;
2185 /* Disable all flow key algx fieldx */
2186 for (alg = 0; alg < NIX_FLOW_KEY_ALG_MAX; alg++) {
2187 for (fid = 0; fid < FIELDS_PER_ALG; fid++)
2188 rvu_write64(rvu, blkaddr,
2189 NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(alg, fid),
2193 /* IPv4/IPv6 SIP/DIPs */
2194 flowkey_cfg = NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6;
2195 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2199 /* TCPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
2200 minkey_cfg = flowkey_cfg;
2201 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP;
2202 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2206 /* UDPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
2207 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP;
2208 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2212 /* SCTPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
2213 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_SCTP;
2214 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2218 /* TCP/UDP v4/v6 4-tuple, rest IP pkts 2-tuple */
2219 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
2220 NIX_FLOW_KEY_TYPE_UDP;
2221 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2225 /* TCP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
2226 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
2227 NIX_FLOW_KEY_TYPE_SCTP;
2228 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2232 /* UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
2233 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP |
2234 NIX_FLOW_KEY_TYPE_SCTP;
2235 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2239 /* TCP/UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
2240 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
2241 NIX_FLOW_KEY_TYPE_UDP | NIX_FLOW_KEY_TYPE_SCTP;
2242 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2249 int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
2250 struct nix_set_mac_addr *req,
2251 struct msg_rsp *rsp)
2253 struct rvu_hwinfo *hw = rvu->hw;
2254 u16 pcifunc = req->hdr.pcifunc;
2255 struct rvu_pfvf *pfvf;
2258 pfvf = rvu_get_pfvf(rvu, pcifunc);
2259 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2260 if (!pfvf->nixlf || blkaddr < 0)
2261 return NIX_AF_ERR_AF_LF_INVALID;
2263 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
2265 return NIX_AF_ERR_AF_LF_INVALID;
2267 ether_addr_copy(pfvf->mac_addr, req->mac_addr);
2269 rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
2270 pfvf->rx_chan_base, req->mac_addr);
2272 rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
2277 int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req,
2278 struct msg_rsp *rsp)
2280 bool allmulti = false, disable_promisc = false;
2281 struct rvu_hwinfo *hw = rvu->hw;
2282 u16 pcifunc = req->hdr.pcifunc;
2283 struct rvu_pfvf *pfvf;
2286 pfvf = rvu_get_pfvf(rvu, pcifunc);
2287 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2288 if (!pfvf->nixlf || blkaddr < 0)
2289 return NIX_AF_ERR_AF_LF_INVALID;
2291 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
2293 return NIX_AF_ERR_AF_LF_INVALID;
2295 if (req->mode & NIX_RX_MODE_PROMISC)
2297 else if (req->mode & NIX_RX_MODE_ALLMULTI)
2300 disable_promisc = true;
2302 if (disable_promisc)
2303 rvu_npc_disable_promisc_entry(rvu, pcifunc, nixlf);
2305 rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
2306 pfvf->rx_chan_base, allmulti);
2308 rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
2313 static void nix_find_link_frs(struct rvu *rvu,
2314 struct nix_frs_cfg *req, u16 pcifunc)
2316 int pf = rvu_get_pf(pcifunc);
2317 struct rvu_pfvf *pfvf;
2322 /* Update with requester's min/max lengths */
2323 pfvf = rvu_get_pfvf(rvu, pcifunc);
2324 pfvf->maxlen = req->maxlen;
2325 if (req->update_minlen)
2326 pfvf->minlen = req->minlen;
2328 maxlen = req->maxlen;
2329 minlen = req->update_minlen ? req->minlen : 0;
2331 /* Get this PF's numVFs and starting hwvf */
2332 rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
2334 /* For each VF, compare requested max/minlen */
2335 for (vf = 0; vf < numvfs; vf++) {
2336 pfvf = &rvu->hwvf[hwvf + vf];
2337 if (pfvf->maxlen > maxlen)
2338 maxlen = pfvf->maxlen;
2339 if (req->update_minlen &&
2340 pfvf->minlen && pfvf->minlen < minlen)
2341 minlen = pfvf->minlen;
2344 /* Compare requested max/minlen with PF's max/minlen */
2345 pfvf = &rvu->pf[pf];
2346 if (pfvf->maxlen > maxlen)
2347 maxlen = pfvf->maxlen;
2348 if (req->update_minlen &&
2349 pfvf->minlen && pfvf->minlen < minlen)
2350 minlen = pfvf->minlen;
2352 /* Update the request with max/min PF's and it's VF's max/min */
2353 req->maxlen = maxlen;
2354 if (req->update_minlen)
2355 req->minlen = minlen;
2358 int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
2359 struct msg_rsp *rsp)
2361 struct rvu_hwinfo *hw = rvu->hw;
2362 u16 pcifunc = req->hdr.pcifunc;
2363 int pf = rvu_get_pf(pcifunc);
2364 int blkaddr, schq, link = -1;
2365 struct nix_txsch *txsch;
2366 u64 cfg, lmac_fifo_len;
2367 struct nix_hw *nix_hw;
2368 u8 cgx = 0, lmac = 0;
2370 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2372 return NIX_AF_ERR_AF_LF_INVALID;
2374 nix_hw = get_nix_hw(rvu->hw, blkaddr);
2378 if (!req->sdp_link && req->maxlen > NIC_HW_MAX_FRS)
2379 return NIX_AF_ERR_FRS_INVALID;
2381 if (req->update_minlen && req->minlen < NIC_HW_MIN_FRS)
2382 return NIX_AF_ERR_FRS_INVALID;
2384 /* Check if requester wants to update SMQ's */
2385 if (!req->update_smq)
2388 /* Update min/maxlen in each of the SMQ attached to this PF/VF */
2389 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
2390 mutex_lock(&rvu->rsrc_lock);
2391 for (schq = 0; schq < txsch->schq.max; schq++) {
2392 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
2394 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq));
2395 cfg = (cfg & ~(0xFFFFULL << 8)) | ((u64)req->maxlen << 8);
2396 if (req->update_minlen)
2397 cfg = (cfg & ~0x7FULL) | ((u64)req->minlen & 0x7F);
2398 rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg);
2400 mutex_unlock(&rvu->rsrc_lock);
2403 /* Check if config is for SDP link */
2404 if (req->sdp_link) {
2406 return NIX_AF_ERR_RX_LINK_INVALID;
2407 link = hw->cgx_links + hw->lbk_links;
2411 /* Check if the request is from CGX mapped RVU PF */
2412 if (is_pf_cgxmapped(rvu, pf)) {
2413 /* Get CGX and LMAC to which this PF is mapped and find link */
2414 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx, &lmac);
2415 link = (cgx * hw->lmac_per_cgx) + lmac;
2416 } else if (pf == 0) {
2417 /* For VFs of PF0 ingress is LBK port, so config LBK link */
2418 link = hw->cgx_links;
2422 return NIX_AF_ERR_RX_LINK_INVALID;
2424 nix_find_link_frs(rvu, req, pcifunc);
2427 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link));
2428 cfg = (cfg & ~(0xFFFFULL << 16)) | ((u64)req->maxlen << 16);
2429 if (req->update_minlen)
2430 cfg = (cfg & ~0xFFFFULL) | req->minlen;
2431 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), cfg);
2433 if (req->sdp_link || pf == 0)
2436 /* Update transmit credits for CGX links */
2438 CGX_FIFO_LEN / cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
2439 cfg = rvu_read64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link));
2440 cfg &= ~(0xFFFFFULL << 12);
2441 cfg |= ((lmac_fifo_len - req->maxlen) / 16) << 12;
2442 rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg);
2443 rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_EXPR_CREDIT(link), cfg);
2448 int rvu_mbox_handler_nix_rxvlan_alloc(struct rvu *rvu, struct msg_req *req,
2449 struct msg_rsp *rsp)
2451 struct npc_mcam_alloc_entry_req alloc_req = { };
2452 struct npc_mcam_alloc_entry_rsp alloc_rsp = { };
2453 struct npc_mcam_free_entry_req free_req = { };
2454 u16 pcifunc = req->hdr.pcifunc;
2455 int blkaddr, nixlf, err;
2456 struct rvu_pfvf *pfvf;
2458 /* LBK VFs do not have separate MCAM UCAST entry hence
2459 * skip allocating rxvlan for them
2461 if (is_afvf(pcifunc))
2464 pfvf = rvu_get_pfvf(rvu, pcifunc);
2468 /* alloc new mcam entry */
2469 alloc_req.hdr.pcifunc = pcifunc;
2470 alloc_req.count = 1;
2472 err = rvu_mbox_handler_npc_mcam_alloc_entry(rvu, &alloc_req,
2477 /* update entry to enable rxvlan offload */
2478 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2480 err = NIX_AF_ERR_AF_LF_INVALID;
2484 nixlf = rvu_get_lf(rvu, &rvu->hw->block[blkaddr], pcifunc, 0);
2486 err = NIX_AF_ERR_AF_LF_INVALID;
2490 pfvf->rxvlan_index = alloc_rsp.entry_list[0];
2491 /* all it means is that rxvlan_index is valid */
2492 pfvf->rxvlan = true;
2494 err = rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
2500 free_req.hdr.pcifunc = pcifunc;
2501 free_req.entry = alloc_rsp.entry_list[0];
2502 rvu_mbox_handler_npc_mcam_free_entry(rvu, &free_req, rsp);
2503 pfvf->rxvlan = false;
2507 int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req,
2508 struct msg_rsp *rsp)
2510 struct rvu_hwinfo *hw = rvu->hw;
2511 u16 pcifunc = req->hdr.pcifunc;
2512 struct rvu_block *block;
2513 struct rvu_pfvf *pfvf;
2517 pfvf = rvu_get_pfvf(rvu, pcifunc);
2518 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2519 if (!pfvf->nixlf || blkaddr < 0)
2520 return NIX_AF_ERR_AF_LF_INVALID;
2522 block = &hw->block[blkaddr];
2523 nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
2525 return NIX_AF_ERR_AF_LF_INVALID;
2527 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf));
2528 /* Set the interface configuration */
2529 if (req->len_verify & BIT(0))
2532 cfg &= ~BIT_ULL(41);
2534 if (req->len_verify & BIT(1))
2537 cfg &= ~BIT_ULL(40);
2539 if (req->csum_verify & BIT(0))
2542 cfg &= ~BIT_ULL(37);
2544 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), cfg);
2549 static void nix_link_config(struct rvu *rvu, int blkaddr)
2551 struct rvu_hwinfo *hw = rvu->hw;
2552 int cgx, lmac_cnt, slink, link;
2555 /* Set default min/max packet lengths allowed on NIX Rx links.
2557 * With HW reset minlen value of 60byte, HW will treat ARP pkts
2558 * as undersize and report them to SW as error pkts, hence
2559 * setting it to 40 bytes.
2561 for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++) {
2562 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
2563 NIC_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS);
2566 if (hw->sdp_links) {
2567 link = hw->cgx_links + hw->lbk_links;
2568 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
2569 SDP_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS);
2572 /* Set credits for Tx links assuming max packet length allowed.
2573 * This will be reconfigured based on MTU set for PF/VF.
2575 for (cgx = 0; cgx < hw->cgx; cgx++) {
2576 lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
2577 tx_credits = ((CGX_FIFO_LEN / lmac_cnt) - NIC_HW_MAX_FRS) / 16;
2578 /* Enable credits and set credit pkt count to max allowed */
2579 tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
2580 slink = cgx * hw->lmac_per_cgx;
2581 for (link = slink; link < (slink + lmac_cnt); link++) {
2582 rvu_write64(rvu, blkaddr,
2583 NIX_AF_TX_LINKX_NORM_CREDIT(link),
2585 rvu_write64(rvu, blkaddr,
2586 NIX_AF_TX_LINKX_EXPR_CREDIT(link),
2591 /* Set Tx credits for LBK link */
2592 slink = hw->cgx_links;
2593 for (link = slink; link < (slink + hw->lbk_links); link++) {
2594 tx_credits = 1000; /* 10 * max LBK datarate = 10 * 100Gbps */
2595 /* Enable credits and set credit pkt count to max allowed */
2596 tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
2597 rvu_write64(rvu, blkaddr,
2598 NIX_AF_TX_LINKX_NORM_CREDIT(link), tx_credits);
2599 rvu_write64(rvu, blkaddr,
2600 NIX_AF_TX_LINKX_EXPR_CREDIT(link), tx_credits);
2604 static int nix_calibrate_x2p(struct rvu *rvu, int blkaddr)
2609 /* Start X2P bus calibration */
2610 rvu_write64(rvu, blkaddr, NIX_AF_CFG,
2611 rvu_read64(rvu, blkaddr, NIX_AF_CFG) | BIT_ULL(9));
2612 /* Wait for calibration to complete */
2613 err = rvu_poll_reg(rvu, blkaddr,
2614 NIX_AF_STATUS, BIT_ULL(10), false);
2616 dev_err(rvu->dev, "NIX X2P bus calibration failed\n");
2620 status = rvu_read64(rvu, blkaddr, NIX_AF_STATUS);
2621 /* Check if CGX devices are ready */
2622 for (idx = 0; idx < rvu->cgx_cnt_max; idx++) {
2623 /* Skip when cgx port is not available */
2624 if (!rvu_cgx_pdata(idx, rvu) ||
2625 (status & (BIT_ULL(16 + idx))))
2628 "CGX%d didn't respond to NIX X2P calibration\n", idx);
2632 /* Check if LBK is ready */
2633 if (!(status & BIT_ULL(19))) {
2635 "LBK didn't respond to NIX X2P calibration\n");
2639 /* Clear 'calibrate_x2p' bit */
2640 rvu_write64(rvu, blkaddr, NIX_AF_CFG,
2641 rvu_read64(rvu, blkaddr, NIX_AF_CFG) & ~BIT_ULL(9));
2642 if (err || (status & 0x3FFULL))
2644 "NIX X2P calibration failed, status 0x%llx\n", status);
2650 static int nix_aq_init(struct rvu *rvu, struct rvu_block *block)
2655 /* Set admin queue endianness */
2656 cfg = rvu_read64(rvu, block->addr, NIX_AF_CFG);
2659 rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
2662 rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
2665 /* Do not bypass NDC cache */
2666 cfg = rvu_read64(rvu, block->addr, NIX_AF_NDC_CFG);
2668 rvu_write64(rvu, block->addr, NIX_AF_NDC_CFG, cfg);
2670 /* Result structure can be followed by RQ/SQ/CQ context at
2671 * RES + 128bytes and a write mask at RES + 256 bytes, depending on
2672 * operation type. Alloc sufficient result memory for all operations.
2674 err = rvu_aq_alloc(rvu, &block->aq,
2675 Q_COUNT(AQ_SIZE), sizeof(struct nix_aq_inst_s),
2676 ALIGN(sizeof(struct nix_aq_res_s), 128) + 256);
2680 rvu_write64(rvu, block->addr, NIX_AF_AQ_CFG, AQ_SIZE);
2681 rvu_write64(rvu, block->addr,
2682 NIX_AF_AQ_BASE, (u64)block->aq->inst->iova);
2686 int rvu_nix_init(struct rvu *rvu)
2688 struct rvu_hwinfo *hw = rvu->hw;
2689 struct rvu_block *block;
2693 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
2696 block = &hw->block[blkaddr];
2698 /* As per a HW errata in 9xxx A0 silicon, NIX may corrupt
2699 * internal state when conditional clocks are turned off.
2700 * Hence enable them.
2702 if (is_rvu_9xxx_A0(rvu))
2703 rvu_write64(rvu, blkaddr, NIX_AF_CFG,
2704 rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x5EULL);
2706 /* Calibrate X2P bus to check if CGX/LBK links are fine */
2707 err = nix_calibrate_x2p(rvu, blkaddr);
2711 /* Set num of links of each type */
2712 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
2713 hw->cgx = (cfg >> 12) & 0xF;
2714 hw->lmac_per_cgx = (cfg >> 8) & 0xF;
2715 hw->cgx_links = hw->cgx * hw->lmac_per_cgx;
2719 /* Initialize admin queue */
2720 err = nix_aq_init(rvu, block);
2724 /* Restore CINT timer delay to HW reset values */
2725 rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL);
2727 /* Configure segmentation offload formats */
2728 nix_setup_lso(rvu, blkaddr);
2730 if (blkaddr == BLKADDR_NIX0) {
2731 hw->nix0 = devm_kzalloc(rvu->dev,
2732 sizeof(struct nix_hw), GFP_KERNEL);
2736 err = nix_setup_txschq(rvu, hw->nix0, blkaddr);
2740 err = nix_af_mark_format_setup(rvu, hw->nix0, blkaddr);
2744 err = nix_setup_mcast(rvu, hw->nix0, blkaddr);
2748 /* Config Outer/Inner L2, IP, TCP, UDP and SCTP NPC layer info.
2749 * This helps HW protocol checker to identify headers
2750 * and validate length and checksums.
2752 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OL2,
2753 (NPC_LID_LA << 8) | (NPC_LT_LA_ETHER << 4) | 0x0F);
2754 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4,
2755 (NPC_LID_LC << 8) | (NPC_LT_LC_IP << 4) | 0x0F);
2756 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4,
2757 (NPC_LID_LF << 8) | (NPC_LT_LF_TU_IP << 4) | 0x0F);
2758 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6,
2759 (NPC_LID_LC << 8) | (NPC_LT_LC_IP6 << 4) | 0x0F);
2760 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6,
2761 (NPC_LID_LF << 8) | (NPC_LT_LF_TU_IP6 << 4) | 0x0F);
2762 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OTCP,
2763 (NPC_LID_LD << 8) | (NPC_LT_LD_TCP << 4) | 0x0F);
2764 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ITCP,
2765 (NPC_LID_LG << 8) | (NPC_LT_LG_TU_TCP << 4) | 0x0F);
2766 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OUDP,
2767 (NPC_LID_LD << 8) | (NPC_LT_LD_UDP << 4) | 0x0F);
2768 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IUDP,
2769 (NPC_LID_LG << 8) | (NPC_LT_LG_TU_UDP << 4) | 0x0F);
2770 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OSCTP,
2771 (NPC_LID_LD << 8) | (NPC_LT_LD_SCTP << 4) | 0x0F);
2772 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ISCTP,
2773 (NPC_LID_LG << 8) | (NPC_LT_LG_TU_SCTP << 4) |
2776 err = nix_rx_flowkey_alg_cfg(rvu, blkaddr);
2780 /* Initialize CGX/LBK/SDP link credits, min/max pkt lengths */
2781 nix_link_config(rvu, blkaddr);
2786 void rvu_nix_freemem(struct rvu *rvu)
2788 struct rvu_hwinfo *hw = rvu->hw;
2789 struct rvu_block *block;
2790 struct nix_txsch *txsch;
2791 struct nix_mcast *mcast;
2792 struct nix_hw *nix_hw;
2795 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
2799 block = &hw->block[blkaddr];
2800 rvu_aq_free(rvu, block->aq);
2802 if (blkaddr == BLKADDR_NIX0) {
2803 nix_hw = get_nix_hw(rvu->hw, blkaddr);
2807 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
2808 txsch = &nix_hw->txsch[lvl];
2809 kfree(txsch->schq.bmap);
2812 mcast = &nix_hw->mcast;
2813 qmem_free(rvu->dev, mcast->mce_ctx);
2814 qmem_free(rvu->dev, mcast->mcast_buf);
2815 mutex_destroy(&mcast->mce_lock);
2819 static int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf)
2821 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
2822 struct rvu_hwinfo *hw = rvu->hw;
2825 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2826 if (!pfvf->nixlf || blkaddr < 0)
2827 return NIX_AF_ERR_AF_LF_INVALID;
2829 *nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
2831 return NIX_AF_ERR_AF_LF_INVALID;
2836 int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
2837 struct msg_rsp *rsp)
2839 u16 pcifunc = req->hdr.pcifunc;
2842 err = nix_get_nixlf(rvu, pcifunc, &nixlf);
2846 rvu_npc_enable_default_entries(rvu, pcifunc, nixlf);
2850 int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
2851 struct msg_rsp *rsp)
2853 u16 pcifunc = req->hdr.pcifunc;
2856 err = nix_get_nixlf(rvu, pcifunc, &nixlf);
2860 rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
2864 void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
2866 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
2867 struct hwctx_disable_req ctx_req;
2870 ctx_req.hdr.pcifunc = pcifunc;
2872 /* Cleanup NPC MCAM entries, free Tx scheduler queues being used */
2873 nix_interface_deinit(rvu, pcifunc, nixlf);
2874 nix_rx_sync(rvu, blkaddr);
2875 nix_txschq_free(rvu, pcifunc);
2878 ctx_req.ctype = NIX_AQ_CTYPE_SQ;
2879 err = nix_lf_hwctx_disable(rvu, &ctx_req);
2881 dev_err(rvu->dev, "SQ ctx disable failed\n");
2885 ctx_req.ctype = NIX_AQ_CTYPE_RQ;
2886 err = nix_lf_hwctx_disable(rvu, &ctx_req);
2888 dev_err(rvu->dev, "RQ ctx disable failed\n");
2892 ctx_req.ctype = NIX_AQ_CTYPE_CQ;
2893 err = nix_lf_hwctx_disable(rvu, &ctx_req);
2895 dev_err(rvu->dev, "CQ ctx disable failed\n");
2898 nix_ctx_free(rvu, pfvf);