OSDN Git Service

bnxt_en: Reset device on RX buffer errors.
[android-x86/kernel.git] / drivers / net / ethernet / broadcom / bnxt / bnxt.c
1 /* Broadcom NetXtreme-C/E network driver.
2  *
3  * Copyright (c) 2014-2016 Broadcom Corporation
4  * Copyright (c) 2016-2018 Broadcom Limited
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation.
9  */
10
11 #include <linux/module.h>
12
13 #include <linux/stringify.h>
14 #include <linux/kernel.h>
15 #include <linux/timer.h>
16 #include <linux/errno.h>
17 #include <linux/ioport.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <linux/interrupt.h>
21 #include <linux/pci.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/skbuff.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/bitops.h>
27 #include <linux/io.h>
28 #include <linux/irq.h>
29 #include <linux/delay.h>
30 #include <asm/byteorder.h>
31 #include <asm/page.h>
32 #include <linux/time.h>
33 #include <linux/mii.h>
34 #include <linux/if.h>
35 #include <linux/if_vlan.h>
36 #include <linux/if_bridge.h>
37 #include <linux/rtc.h>
38 #include <linux/bpf.h>
39 #include <net/ip.h>
40 #include <net/tcp.h>
41 #include <net/udp.h>
42 #include <net/checksum.h>
43 #include <net/ip6_checksum.h>
44 #include <net/udp_tunnel.h>
45 #include <linux/workqueue.h>
46 #include <linux/prefetch.h>
47 #include <linux/cache.h>
48 #include <linux/log2.h>
49 #include <linux/aer.h>
50 #include <linux/bitmap.h>
51 #include <linux/cpu_rmap.h>
52 #include <linux/cpumask.h>
53 #include <net/pkt_cls.h>
54 #include <linux/hwmon.h>
55 #include <linux/hwmon-sysfs.h>
56
57 #include "bnxt_hsi.h"
58 #include "bnxt.h"
59 #include "bnxt_ulp.h"
60 #include "bnxt_sriov.h"
61 #include "bnxt_ethtool.h"
62 #include "bnxt_dcb.h"
63 #include "bnxt_xdp.h"
64 #include "bnxt_vfr.h"
65 #include "bnxt_tc.h"
66 #include "bnxt_devlink.h"
67 #include "bnxt_debugfs.h"
68
69 #define BNXT_TX_TIMEOUT         (5 * HZ)
70
71 static const char version[] =
72         "Broadcom NetXtreme-C/E driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION "\n";
73
74 MODULE_LICENSE("GPL");
75 MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
76 MODULE_VERSION(DRV_MODULE_VERSION);
77
78 #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
79 #define BNXT_RX_DMA_OFFSET NET_SKB_PAD
80 #define BNXT_RX_COPY_THRESH 256
81
82 #define BNXT_TX_PUSH_THRESH 164
83
84 enum board_idx {
85         BCM57301,
86         BCM57302,
87         BCM57304,
88         BCM57417_NPAR,
89         BCM58700,
90         BCM57311,
91         BCM57312,
92         BCM57402,
93         BCM57404,
94         BCM57406,
95         BCM57402_NPAR,
96         BCM57407,
97         BCM57412,
98         BCM57414,
99         BCM57416,
100         BCM57417,
101         BCM57412_NPAR,
102         BCM57314,
103         BCM57417_SFP,
104         BCM57416_SFP,
105         BCM57404_NPAR,
106         BCM57406_NPAR,
107         BCM57407_SFP,
108         BCM57407_NPAR,
109         BCM57414_NPAR,
110         BCM57416_NPAR,
111         BCM57452,
112         BCM57454,
113         BCM5745x_NPAR,
114         BCM58802,
115         BCM58804,
116         BCM58808,
117         NETXTREME_E_VF,
118         NETXTREME_C_VF,
119         NETXTREME_S_VF,
120 };
121
122 /* indexed by enum above */
123 static const struct {
124         char *name;
125 } board_info[] = {
126         [BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
127         [BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
128         [BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
129         [BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
130         [BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
131         [BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
132         [BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
133         [BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
134         [BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
135         [BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
136         [BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
137         [BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
138         [BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
139         [BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
140         [BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
141         [BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
142         [BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
143         [BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
144         [BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
145         [BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
146         [BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
147         [BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
148         [BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
149         [BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
150         [BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
151         [BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
152         [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
153         [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
154         [BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" },
155         [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
156         [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
157         [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
158         [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
159         [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
160         [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" },
161 };
162
163 static const struct pci_device_id bnxt_pci_tbl[] = {
164         { PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR },
165         { PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR },
166         { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
167         { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
168         { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
169         { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
170         { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
171         { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR },
172         { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 },
173         { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 },
174         { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 },
175         { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
176         { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
177         { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
178         { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR },
179         { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 },
180         { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 },
181         { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 },
182         { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 },
183         { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 },
184         { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR },
185         { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 },
186         { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP },
187         { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP },
188         { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR },
189         { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR },
190         { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP },
191         { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR },
192         { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR },
193         { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR },
194         { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
195         { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
196         { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
197         { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 },
198         { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
199         { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
200         { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
201 #ifdef CONFIG_BNXT_SRIOV
202         { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
203         { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
204         { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
205         { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
206         { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
207         { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
208         { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
209         { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
210         { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
211 #endif
212         { 0 }
213 };
214
215 MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
216
217 static const u16 bnxt_vf_req_snif[] = {
218         HWRM_FUNC_CFG,
219         HWRM_FUNC_VF_CFG,
220         HWRM_PORT_PHY_QCFG,
221         HWRM_CFA_L2_FILTER_ALLOC,
222 };
223
224 static const u16 bnxt_async_events_arr[] = {
225         ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
226         ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
227         ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
228         ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
229         ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
230 };
231
232 static struct workqueue_struct *bnxt_pf_wq;
233
234 static bool bnxt_vf_pciid(enum board_idx idx)
235 {
236         return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
237                 idx == NETXTREME_S_VF);
238 }
239
240 #define DB_CP_REARM_FLAGS       (DB_KEY_CP | DB_IDX_VALID)
241 #define DB_CP_FLAGS             (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
242 #define DB_CP_IRQ_DIS_FLAGS     (DB_KEY_CP | DB_IRQ_DIS)
243
244 #define BNXT_CP_DB_REARM(db, raw_cons)                                  \
245                 writel(DB_CP_REARM_FLAGS | RING_CMP(raw_cons), db)
246
247 #define BNXT_CP_DB(db, raw_cons)                                        \
248                 writel(DB_CP_FLAGS | RING_CMP(raw_cons), db)
249
250 #define BNXT_CP_DB_IRQ_DIS(db)                                          \
251                 writel(DB_CP_IRQ_DIS_FLAGS, db)
252
253 const u16 bnxt_lhint_arr[] = {
254         TX_BD_FLAGS_LHINT_512_AND_SMALLER,
255         TX_BD_FLAGS_LHINT_512_TO_1023,
256         TX_BD_FLAGS_LHINT_1024_TO_2047,
257         TX_BD_FLAGS_LHINT_1024_TO_2047,
258         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
259         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
260         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
261         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
262         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
263         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
264         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
265         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
266         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
267         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
268         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
269         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
270         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
271         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
272         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
273 };
274
275 static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
276 {
277         struct metadata_dst *md_dst = skb_metadata_dst(skb);
278
279         if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX)
280                 return 0;
281
282         return md_dst->u.port_info.port_id;
283 }
284
285 static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
286 {
287         struct bnxt *bp = netdev_priv(dev);
288         struct tx_bd *txbd;
289         struct tx_bd_ext *txbd1;
290         struct netdev_queue *txq;
291         int i;
292         dma_addr_t mapping;
293         unsigned int length, pad = 0;
294         u32 len, free_size, vlan_tag_flags, cfa_action, flags;
295         u16 prod, last_frag;
296         struct pci_dev *pdev = bp->pdev;
297         struct bnxt_tx_ring_info *txr;
298         struct bnxt_sw_tx_bd *tx_buf;
299
300         i = skb_get_queue_mapping(skb);
301         if (unlikely(i >= bp->tx_nr_rings)) {
302                 dev_kfree_skb_any(skb);
303                 return NETDEV_TX_OK;
304         }
305
306         txq = netdev_get_tx_queue(dev, i);
307         txr = &bp->tx_ring[bp->tx_ring_map[i]];
308         prod = txr->tx_prod;
309
310         free_size = bnxt_tx_avail(bp, txr);
311         if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
312                 netif_tx_stop_queue(txq);
313                 return NETDEV_TX_BUSY;
314         }
315
316         length = skb->len;
317         len = skb_headlen(skb);
318         last_frag = skb_shinfo(skb)->nr_frags;
319
320         txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
321
322         txbd->tx_bd_opaque = prod;
323
324         tx_buf = &txr->tx_buf_ring[prod];
325         tx_buf->skb = skb;
326         tx_buf->nr_frags = last_frag;
327
328         vlan_tag_flags = 0;
329         cfa_action = bnxt_xmit_get_cfa_action(skb);
330         if (skb_vlan_tag_present(skb)) {
331                 vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
332                                  skb_vlan_tag_get(skb);
333                 /* Currently supports 8021Q, 8021AD vlan offloads
334                  * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
335                  */
336                 if (skb->vlan_proto == htons(ETH_P_8021Q))
337                         vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
338         }
339
340         if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) {
341                 struct tx_push_buffer *tx_push_buf = txr->tx_push;
342                 struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
343                 struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
344                 void *pdata = tx_push_buf->data;
345                 u64 *end;
346                 int j, push_len;
347
348                 /* Set COAL_NOW to be ready quickly for the next push */
349                 tx_push->tx_bd_len_flags_type =
350                         cpu_to_le32((length << TX_BD_LEN_SHIFT) |
351                                         TX_BD_TYPE_LONG_TX_BD |
352                                         TX_BD_FLAGS_LHINT_512_AND_SMALLER |
353                                         TX_BD_FLAGS_COAL_NOW |
354                                         TX_BD_FLAGS_PACKET_END |
355                                         (2 << TX_BD_FLAGS_BD_CNT_SHIFT));
356
357                 if (skb->ip_summed == CHECKSUM_PARTIAL)
358                         tx_push1->tx_bd_hsize_lflags =
359                                         cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
360                 else
361                         tx_push1->tx_bd_hsize_lflags = 0;
362
363                 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
364                 tx_push1->tx_bd_cfa_action =
365                         cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
366
367                 end = pdata + length;
368                 end = PTR_ALIGN(end, 8) - 1;
369                 *end = 0;
370
371                 skb_copy_from_linear_data(skb, pdata, len);
372                 pdata += len;
373                 for (j = 0; j < last_frag; j++) {
374                         skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
375                         void *fptr;
376
377                         fptr = skb_frag_address_safe(frag);
378                         if (!fptr)
379                                 goto normal_tx;
380
381                         memcpy(pdata, fptr, skb_frag_size(frag));
382                         pdata += skb_frag_size(frag);
383                 }
384
385                 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
386                 txbd->tx_bd_haddr = txr->data_mapping;
387                 prod = NEXT_TX(prod);
388                 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
389                 memcpy(txbd, tx_push1, sizeof(*txbd));
390                 prod = NEXT_TX(prod);
391                 tx_push->doorbell =
392                         cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
393                 txr->tx_prod = prod;
394
395                 tx_buf->is_push = 1;
396                 netdev_tx_sent_queue(txq, skb->len);
397                 wmb();  /* Sync is_push and byte queue before pushing data */
398
399                 push_len = (length + sizeof(*tx_push) + 7) / 8;
400                 if (push_len > 16) {
401                         __iowrite64_copy(txr->tx_doorbell, tx_push_buf, 16);
402                         __iowrite32_copy(txr->tx_doorbell + 4, tx_push_buf + 1,
403                                          (push_len - 16) << 1);
404                 } else {
405                         __iowrite64_copy(txr->tx_doorbell, tx_push_buf,
406                                          push_len);
407                 }
408
409                 goto tx_done;
410         }
411
412 normal_tx:
413         if (length < BNXT_MIN_PKT_SIZE) {
414                 pad = BNXT_MIN_PKT_SIZE - length;
415                 if (skb_pad(skb, pad)) {
416                         /* SKB already freed. */
417                         tx_buf->skb = NULL;
418                         return NETDEV_TX_OK;
419                 }
420                 length = BNXT_MIN_PKT_SIZE;
421         }
422
423         mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
424
425         if (unlikely(dma_mapping_error(&pdev->dev, mapping))) {
426                 dev_kfree_skb_any(skb);
427                 tx_buf->skb = NULL;
428                 return NETDEV_TX_OK;
429         }
430
431         dma_unmap_addr_set(tx_buf, mapping, mapping);
432         flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
433                 ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
434
435         txbd->tx_bd_haddr = cpu_to_le64(mapping);
436
437         prod = NEXT_TX(prod);
438         txbd1 = (struct tx_bd_ext *)
439                 &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
440
441         txbd1->tx_bd_hsize_lflags = 0;
442         if (skb_is_gso(skb)) {
443                 u32 hdr_len;
444
445                 if (skb->encapsulation)
446                         hdr_len = skb_inner_network_offset(skb) +
447                                 skb_inner_network_header_len(skb) +
448                                 inner_tcp_hdrlen(skb);
449                 else
450                         hdr_len = skb_transport_offset(skb) +
451                                 tcp_hdrlen(skb);
452
453                 txbd1->tx_bd_hsize_lflags = cpu_to_le32(TX_BD_FLAGS_LSO |
454                                         TX_BD_FLAGS_T_IPID |
455                                         (hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
456                 length = skb_shinfo(skb)->gso_size;
457                 txbd1->tx_bd_mss = cpu_to_le32(length);
458                 length += hdr_len;
459         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
460                 txbd1->tx_bd_hsize_lflags =
461                         cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
462                 txbd1->tx_bd_mss = 0;
463         }
464
465         length >>= 9;
466         if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) {
467                 dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n",
468                                      skb->len);
469                 i = 0;
470                 goto tx_dma_error;
471         }
472         flags |= bnxt_lhint_arr[length];
473         txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
474
475         txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
476         txbd1->tx_bd_cfa_action =
477                         cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
478         for (i = 0; i < last_frag; i++) {
479                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
480
481                 prod = NEXT_TX(prod);
482                 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
483
484                 len = skb_frag_size(frag);
485                 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
486                                            DMA_TO_DEVICE);
487
488                 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
489                         goto tx_dma_error;
490
491                 tx_buf = &txr->tx_buf_ring[prod];
492                 dma_unmap_addr_set(tx_buf, mapping, mapping);
493
494                 txbd->tx_bd_haddr = cpu_to_le64(mapping);
495
496                 flags = len << TX_BD_LEN_SHIFT;
497                 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
498         }
499
500         flags &= ~TX_BD_LEN;
501         txbd->tx_bd_len_flags_type =
502                 cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
503                             TX_BD_FLAGS_PACKET_END);
504
505         netdev_tx_sent_queue(txq, skb->len);
506
507         /* Sync BD data before updating doorbell */
508         wmb();
509
510         prod = NEXT_TX(prod);
511         txr->tx_prod = prod;
512
513         if (!skb->xmit_more || netif_xmit_stopped(txq))
514                 bnxt_db_write(bp, txr->tx_doorbell, DB_KEY_TX | prod);
515
516 tx_done:
517
518         mmiowb();
519
520         if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
521                 if (skb->xmit_more && !tx_buf->is_push)
522                         bnxt_db_write(bp, txr->tx_doorbell, DB_KEY_TX | prod);
523
524                 netif_tx_stop_queue(txq);
525
526                 /* netif_tx_stop_queue() must be done before checking
527                  * tx index in bnxt_tx_avail() below, because in
528                  * bnxt_tx_int(), we update tx index before checking for
529                  * netif_tx_queue_stopped().
530                  */
531                 smp_mb();
532                 if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)
533                         netif_tx_wake_queue(txq);
534         }
535         return NETDEV_TX_OK;
536
537 tx_dma_error:
538         last_frag = i;
539
540         /* start back at beginning and unmap skb */
541         prod = txr->tx_prod;
542         tx_buf = &txr->tx_buf_ring[prod];
543         tx_buf->skb = NULL;
544         dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
545                          skb_headlen(skb), PCI_DMA_TODEVICE);
546         prod = NEXT_TX(prod);
547
548         /* unmap remaining mapped pages */
549         for (i = 0; i < last_frag; i++) {
550                 prod = NEXT_TX(prod);
551                 tx_buf = &txr->tx_buf_ring[prod];
552                 dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
553                                skb_frag_size(&skb_shinfo(skb)->frags[i]),
554                                PCI_DMA_TODEVICE);
555         }
556
557         dev_kfree_skb_any(skb);
558         return NETDEV_TX_OK;
559 }
560
561 static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
562 {
563         struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
564         struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
565         u16 cons = txr->tx_cons;
566         struct pci_dev *pdev = bp->pdev;
567         int i;
568         unsigned int tx_bytes = 0;
569
570         for (i = 0; i < nr_pkts; i++) {
571                 struct bnxt_sw_tx_bd *tx_buf;
572                 struct sk_buff *skb;
573                 int j, last;
574
575                 tx_buf = &txr->tx_buf_ring[cons];
576                 cons = NEXT_TX(cons);
577                 skb = tx_buf->skb;
578                 tx_buf->skb = NULL;
579
580                 if (tx_buf->is_push) {
581                         tx_buf->is_push = 0;
582                         goto next_tx_int;
583                 }
584
585                 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
586                                  skb_headlen(skb), PCI_DMA_TODEVICE);
587                 last = tx_buf->nr_frags;
588
589                 for (j = 0; j < last; j++) {
590                         cons = NEXT_TX(cons);
591                         tx_buf = &txr->tx_buf_ring[cons];
592                         dma_unmap_page(
593                                 &pdev->dev,
594                                 dma_unmap_addr(tx_buf, mapping),
595                                 skb_frag_size(&skb_shinfo(skb)->frags[j]),
596                                 PCI_DMA_TODEVICE);
597                 }
598
599 next_tx_int:
600                 cons = NEXT_TX(cons);
601
602                 tx_bytes += skb->len;
603                 dev_kfree_skb_any(skb);
604         }
605
606         netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);
607         txr->tx_cons = cons;
608
609         /* Need to make the tx_cons update visible to bnxt_start_xmit()
610          * before checking for netif_tx_queue_stopped().  Without the
611          * memory barrier, there is a small possibility that bnxt_start_xmit()
612          * will miss it and cause the queue to be stopped forever.
613          */
614         smp_mb();
615
616         if (unlikely(netif_tx_queue_stopped(txq)) &&
617             (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
618                 __netif_tx_lock(txq, smp_processor_id());
619                 if (netif_tx_queue_stopped(txq) &&
620                     bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
621                     txr->dev_state != BNXT_DEV_STATE_CLOSING)
622                         netif_tx_wake_queue(txq);
623                 __netif_tx_unlock(txq);
624         }
625 }
626
627 static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
628                                          gfp_t gfp)
629 {
630         struct device *dev = &bp->pdev->dev;
631         struct page *page;
632
633         page = alloc_page(gfp);
634         if (!page)
635                 return NULL;
636
637         *mapping = dma_map_page_attrs(dev, page, 0, PAGE_SIZE, bp->rx_dir,
638                                       DMA_ATTR_WEAK_ORDERING);
639         if (dma_mapping_error(dev, *mapping)) {
640                 __free_page(page);
641                 return NULL;
642         }
643         *mapping += bp->rx_dma_offset;
644         return page;
645 }
646
647 static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
648                                        gfp_t gfp)
649 {
650         u8 *data;
651         struct pci_dev *pdev = bp->pdev;
652
653         data = kmalloc(bp->rx_buf_size, gfp);
654         if (!data)
655                 return NULL;
656
657         *mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset,
658                                         bp->rx_buf_use_size, bp->rx_dir,
659                                         DMA_ATTR_WEAK_ORDERING);
660
661         if (dma_mapping_error(&pdev->dev, *mapping)) {
662                 kfree(data);
663                 data = NULL;
664         }
665         return data;
666 }
667
668 int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
669                        u16 prod, gfp_t gfp)
670 {
671         struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
672         struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
673         dma_addr_t mapping;
674
675         if (BNXT_RX_PAGE_MODE(bp)) {
676                 struct page *page = __bnxt_alloc_rx_page(bp, &mapping, gfp);
677
678                 if (!page)
679                         return -ENOMEM;
680
681                 rx_buf->data = page;
682                 rx_buf->data_ptr = page_address(page) + bp->rx_offset;
683         } else {
684                 u8 *data = __bnxt_alloc_rx_data(bp, &mapping, gfp);
685
686                 if (!data)
687                         return -ENOMEM;
688
689                 rx_buf->data = data;
690                 rx_buf->data_ptr = data + bp->rx_offset;
691         }
692         rx_buf->mapping = mapping;
693
694         rxbd->rx_bd_haddr = cpu_to_le64(mapping);
695         return 0;
696 }
697
698 void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data)
699 {
700         u16 prod = rxr->rx_prod;
701         struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
702         struct rx_bd *cons_bd, *prod_bd;
703
704         prod_rx_buf = &rxr->rx_buf_ring[prod];
705         cons_rx_buf = &rxr->rx_buf_ring[cons];
706
707         prod_rx_buf->data = data;
708         prod_rx_buf->data_ptr = cons_rx_buf->data_ptr;
709
710         prod_rx_buf->mapping = cons_rx_buf->mapping;
711
712         prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
713         cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
714
715         prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
716 }
717
718 static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
719 {
720         u16 next, max = rxr->rx_agg_bmap_size;
721
722         next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
723         if (next >= max)
724                 next = find_first_zero_bit(rxr->rx_agg_bmap, max);
725         return next;
726 }
727
728 static inline int bnxt_alloc_rx_page(struct bnxt *bp,
729                                      struct bnxt_rx_ring_info *rxr,
730                                      u16 prod, gfp_t gfp)
731 {
732         struct rx_bd *rxbd =
733                 &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
734         struct bnxt_sw_rx_agg_bd *rx_agg_buf;
735         struct pci_dev *pdev = bp->pdev;
736         struct page *page;
737         dma_addr_t mapping;
738         u16 sw_prod = rxr->rx_sw_agg_prod;
739         unsigned int offset = 0;
740
741         if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
742                 page = rxr->rx_page;
743                 if (!page) {
744                         page = alloc_page(gfp);
745                         if (!page)
746                                 return -ENOMEM;
747                         rxr->rx_page = page;
748                         rxr->rx_page_offset = 0;
749                 }
750                 offset = rxr->rx_page_offset;
751                 rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
752                 if (rxr->rx_page_offset == PAGE_SIZE)
753                         rxr->rx_page = NULL;
754                 else
755                         get_page(page);
756         } else {
757                 page = alloc_page(gfp);
758                 if (!page)
759                         return -ENOMEM;
760         }
761
762         mapping = dma_map_page_attrs(&pdev->dev, page, offset,
763                                      BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE,
764                                      DMA_ATTR_WEAK_ORDERING);
765         if (dma_mapping_error(&pdev->dev, mapping)) {
766                 __free_page(page);
767                 return -EIO;
768         }
769
770         if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
771                 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
772
773         __set_bit(sw_prod, rxr->rx_agg_bmap);
774         rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
775         rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
776
777         rx_agg_buf->page = page;
778         rx_agg_buf->offset = offset;
779         rx_agg_buf->mapping = mapping;
780         rxbd->rx_bd_haddr = cpu_to_le64(mapping);
781         rxbd->rx_bd_opaque = sw_prod;
782         return 0;
783 }
784
785 static void bnxt_reuse_rx_agg_bufs(struct bnxt_napi *bnapi, u16 cp_cons,
786                                    u32 agg_bufs)
787 {
788         struct bnxt *bp = bnapi->bp;
789         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
790         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
791         u16 prod = rxr->rx_agg_prod;
792         u16 sw_prod = rxr->rx_sw_agg_prod;
793         u32 i;
794
795         for (i = 0; i < agg_bufs; i++) {
796                 u16 cons;
797                 struct rx_agg_cmp *agg;
798                 struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
799                 struct rx_bd *prod_bd;
800                 struct page *page;
801
802                 agg = (struct rx_agg_cmp *)
803                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
804                 cons = agg->rx_agg_cmp_opaque;
805                 __clear_bit(cons, rxr->rx_agg_bmap);
806
807                 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
808                         sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
809
810                 __set_bit(sw_prod, rxr->rx_agg_bmap);
811                 prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
812                 cons_rx_buf = &rxr->rx_agg_ring[cons];
813
814                 /* It is possible for sw_prod to be equal to cons, so
815                  * set cons_rx_buf->page to NULL first.
816                  */
817                 page = cons_rx_buf->page;
818                 cons_rx_buf->page = NULL;
819                 prod_rx_buf->page = page;
820                 prod_rx_buf->offset = cons_rx_buf->offset;
821
822                 prod_rx_buf->mapping = cons_rx_buf->mapping;
823
824                 prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
825
826                 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
827                 prod_bd->rx_bd_opaque = sw_prod;
828
829                 prod = NEXT_RX_AGG(prod);
830                 sw_prod = NEXT_RX_AGG(sw_prod);
831                 cp_cons = NEXT_CMP(cp_cons);
832         }
833         rxr->rx_agg_prod = prod;
834         rxr->rx_sw_agg_prod = sw_prod;
835 }
836
837 static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
838                                         struct bnxt_rx_ring_info *rxr,
839                                         u16 cons, void *data, u8 *data_ptr,
840                                         dma_addr_t dma_addr,
841                                         unsigned int offset_and_len)
842 {
843         unsigned int payload = offset_and_len >> 16;
844         unsigned int len = offset_and_len & 0xffff;
845         struct skb_frag_struct *frag;
846         struct page *page = data;
847         u16 prod = rxr->rx_prod;
848         struct sk_buff *skb;
849         int off, err;
850
851         err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
852         if (unlikely(err)) {
853                 bnxt_reuse_rx_data(rxr, cons, data);
854                 return NULL;
855         }
856         dma_addr -= bp->rx_dma_offset;
857         dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
858                              DMA_ATTR_WEAK_ORDERING);
859
860         if (unlikely(!payload))
861                 payload = eth_get_headlen(data_ptr, len);
862
863         skb = napi_alloc_skb(&rxr->bnapi->napi, payload);
864         if (!skb) {
865                 __free_page(page);
866                 return NULL;
867         }
868
869         off = (void *)data_ptr - page_address(page);
870         skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE);
871         memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
872                payload + NET_IP_ALIGN);
873
874         frag = &skb_shinfo(skb)->frags[0];
875         skb_frag_size_sub(frag, payload);
876         frag->page_offset += payload;
877         skb->data_len -= payload;
878         skb->tail += payload;
879
880         return skb;
881 }
882
883 static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
884                                    struct bnxt_rx_ring_info *rxr, u16 cons,
885                                    void *data, u8 *data_ptr,
886                                    dma_addr_t dma_addr,
887                                    unsigned int offset_and_len)
888 {
889         u16 prod = rxr->rx_prod;
890         struct sk_buff *skb;
891         int err;
892
893         err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
894         if (unlikely(err)) {
895                 bnxt_reuse_rx_data(rxr, cons, data);
896                 return NULL;
897         }
898
899         skb = build_skb(data, 0);
900         dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
901                                bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
902         if (!skb) {
903                 kfree(data);
904                 return NULL;
905         }
906
907         skb_reserve(skb, bp->rx_offset);
908         skb_put(skb, offset_and_len & 0xffff);
909         return skb;
910 }
911
912 static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_napi *bnapi,
913                                      struct sk_buff *skb, u16 cp_cons,
914                                      u32 agg_bufs)
915 {
916         struct pci_dev *pdev = bp->pdev;
917         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
918         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
919         u16 prod = rxr->rx_agg_prod;
920         u32 i;
921
922         for (i = 0; i < agg_bufs; i++) {
923                 u16 cons, frag_len;
924                 struct rx_agg_cmp *agg;
925                 struct bnxt_sw_rx_agg_bd *cons_rx_buf;
926                 struct page *page;
927                 dma_addr_t mapping;
928
929                 agg = (struct rx_agg_cmp *)
930                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
931                 cons = agg->rx_agg_cmp_opaque;
932                 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
933                             RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
934
935                 cons_rx_buf = &rxr->rx_agg_ring[cons];
936                 skb_fill_page_desc(skb, i, cons_rx_buf->page,
937                                    cons_rx_buf->offset, frag_len);
938                 __clear_bit(cons, rxr->rx_agg_bmap);
939
940                 /* It is possible for bnxt_alloc_rx_page() to allocate
941                  * a sw_prod index that equals the cons index, so we
942                  * need to clear the cons entry now.
943                  */
944                 mapping = cons_rx_buf->mapping;
945                 page = cons_rx_buf->page;
946                 cons_rx_buf->page = NULL;
947
948                 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
949                         struct skb_shared_info *shinfo;
950                         unsigned int nr_frags;
951
952                         shinfo = skb_shinfo(skb);
953                         nr_frags = --shinfo->nr_frags;
954                         __skb_frag_set_page(&shinfo->frags[nr_frags], NULL);
955
956                         dev_kfree_skb(skb);
957
958                         cons_rx_buf->page = page;
959
960                         /* Update prod since possibly some pages have been
961                          * allocated already.
962                          */
963                         rxr->rx_agg_prod = prod;
964                         bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs - i);
965                         return NULL;
966                 }
967
968                 dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
969                                      PCI_DMA_FROMDEVICE,
970                                      DMA_ATTR_WEAK_ORDERING);
971
972                 skb->data_len += frag_len;
973                 skb->len += frag_len;
974                 skb->truesize += PAGE_SIZE;
975
976                 prod = NEXT_RX_AGG(prod);
977                 cp_cons = NEXT_CMP(cp_cons);
978         }
979         rxr->rx_agg_prod = prod;
980         return skb;
981 }
982
983 static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
984                                u8 agg_bufs, u32 *raw_cons)
985 {
986         u16 last;
987         struct rx_agg_cmp *agg;
988
989         *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
990         last = RING_CMP(*raw_cons);
991         agg = (struct rx_agg_cmp *)
992                 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
993         return RX_AGG_CMP_VALID(agg, *raw_cons);
994 }
995
996 static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
997                                             unsigned int len,
998                                             dma_addr_t mapping)
999 {
1000         struct bnxt *bp = bnapi->bp;
1001         struct pci_dev *pdev = bp->pdev;
1002         struct sk_buff *skb;
1003
1004         skb = napi_alloc_skb(&bnapi->napi, len);
1005         if (!skb)
1006                 return NULL;
1007
1008         dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh,
1009                                 bp->rx_dir);
1010
1011         memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN,
1012                len + NET_IP_ALIGN);
1013
1014         dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh,
1015                                    bp->rx_dir);
1016
1017         skb_put(skb, len);
1018         return skb;
1019 }
1020
1021 static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_napi *bnapi,
1022                            u32 *raw_cons, void *cmp)
1023 {
1024         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1025         struct rx_cmp *rxcmp = cmp;
1026         u32 tmp_raw_cons = *raw_cons;
1027         u8 cmp_type, agg_bufs = 0;
1028
1029         cmp_type = RX_CMP_TYPE(rxcmp);
1030
1031         if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1032                 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) &
1033                             RX_CMP_AGG_BUFS) >>
1034                            RX_CMP_AGG_BUFS_SHIFT;
1035         } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1036                 struct rx_tpa_end_cmp *tpa_end = cmp;
1037
1038                 agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
1039                             RX_TPA_END_CMP_AGG_BUFS) >>
1040                            RX_TPA_END_CMP_AGG_BUFS_SHIFT;
1041         }
1042
1043         if (agg_bufs) {
1044                 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1045                         return -EBUSY;
1046         }
1047         *raw_cons = tmp_raw_cons;
1048         return 0;
1049 }
1050
1051 static void bnxt_queue_sp_work(struct bnxt *bp)
1052 {
1053         if (BNXT_PF(bp))
1054                 queue_work(bnxt_pf_wq, &bp->sp_task);
1055         else
1056                 schedule_work(&bp->sp_task);
1057 }
1058
1059 static void bnxt_cancel_sp_work(struct bnxt *bp)
1060 {
1061         if (BNXT_PF(bp))
1062                 flush_workqueue(bnxt_pf_wq);
1063         else
1064                 cancel_work_sync(&bp->sp_task);
1065 }
1066
1067 static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
1068 {
1069         if (!rxr->bnapi->in_reset) {
1070                 rxr->bnapi->in_reset = true;
1071                 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
1072                 bnxt_queue_sp_work(bp);
1073         }
1074         rxr->rx_next_cons = 0xffff;
1075 }
1076
1077 static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1078                            struct rx_tpa_start_cmp *tpa_start,
1079                            struct rx_tpa_start_cmp_ext *tpa_start1)
1080 {
1081         u8 agg_id = TPA_START_AGG_ID(tpa_start);
1082         u16 cons, prod;
1083         struct bnxt_tpa_info *tpa_info;
1084         struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
1085         struct rx_bd *prod_bd;
1086         dma_addr_t mapping;
1087
1088         cons = tpa_start->rx_tpa_start_cmp_opaque;
1089         prod = rxr->rx_prod;
1090         cons_rx_buf = &rxr->rx_buf_ring[cons];
1091         prod_rx_buf = &rxr->rx_buf_ring[prod];
1092         tpa_info = &rxr->rx_tpa[agg_id];
1093
1094         if (unlikely(cons != rxr->rx_next_cons)) {
1095                 netdev_warn(bp->dev, "TPA cons %x != expected cons %x\n",
1096                             cons, rxr->rx_next_cons);
1097                 bnxt_sched_reset(bp, rxr);
1098                 return;
1099         }
1100         /* Store cfa_code in tpa_info to use in tpa_end
1101          * completion processing.
1102          */
1103         tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1);
1104         prod_rx_buf->data = tpa_info->data;
1105         prod_rx_buf->data_ptr = tpa_info->data_ptr;
1106
1107         mapping = tpa_info->mapping;
1108         prod_rx_buf->mapping = mapping;
1109
1110         prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1111
1112         prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
1113
1114         tpa_info->data = cons_rx_buf->data;
1115         tpa_info->data_ptr = cons_rx_buf->data_ptr;
1116         cons_rx_buf->data = NULL;
1117         tpa_info->mapping = cons_rx_buf->mapping;
1118
1119         tpa_info->len =
1120                 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
1121                                 RX_TPA_START_CMP_LEN_SHIFT;
1122         if (likely(TPA_START_HASH_VALID(tpa_start))) {
1123                 u32 hash_type = TPA_START_HASH_TYPE(tpa_start);
1124
1125                 tpa_info->hash_type = PKT_HASH_TYPE_L4;
1126                 tpa_info->gso_type = SKB_GSO_TCPV4;
1127                 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1128                 if (hash_type == 3 || TPA_START_IS_IPV6(tpa_start1))
1129                         tpa_info->gso_type = SKB_GSO_TCPV6;
1130                 tpa_info->rss_hash =
1131                         le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
1132         } else {
1133                 tpa_info->hash_type = PKT_HASH_TYPE_NONE;
1134                 tpa_info->gso_type = 0;
1135                 if (netif_msg_rx_err(bp))
1136                         netdev_warn(bp->dev, "TPA packet without valid hash\n");
1137         }
1138         tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
1139         tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
1140         tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
1141
1142         rxr->rx_prod = NEXT_RX(prod);
1143         cons = NEXT_RX(cons);
1144         rxr->rx_next_cons = NEXT_RX(cons);
1145         cons_rx_buf = &rxr->rx_buf_ring[cons];
1146
1147         bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
1148         rxr->rx_prod = NEXT_RX(rxr->rx_prod);
1149         cons_rx_buf->data = NULL;
1150 }
1151
1152 static void bnxt_abort_tpa(struct bnxt *bp, struct bnxt_napi *bnapi,
1153                            u16 cp_cons, u32 agg_bufs)
1154 {
1155         if (agg_bufs)
1156                 bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs);
1157 }
1158
1159 static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
1160                                            int payload_off, int tcp_ts,
1161                                            struct sk_buff *skb)
1162 {
1163 #ifdef CONFIG_INET
1164         struct tcphdr *th;
1165         int len, nw_off;
1166         u16 outer_ip_off, inner_ip_off, inner_mac_off;
1167         u32 hdr_info = tpa_info->hdr_info;
1168         bool loopback = false;
1169
1170         inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1171         inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1172         outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1173
1174         /* If the packet is an internal loopback packet, the offsets will
1175          * have an extra 4 bytes.
1176          */
1177         if (inner_mac_off == 4) {
1178                 loopback = true;
1179         } else if (inner_mac_off > 4) {
1180                 __be16 proto = *((__be16 *)(skb->data + inner_ip_off -
1181                                             ETH_HLEN - 2));
1182
1183                 /* We only support inner iPv4/ipv6.  If we don't see the
1184                  * correct protocol ID, it must be a loopback packet where
1185                  * the offsets are off by 4.
1186                  */
1187                 if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6))
1188                         loopback = true;
1189         }
1190         if (loopback) {
1191                 /* internal loopback packet, subtract all offsets by 4 */
1192                 inner_ip_off -= 4;
1193                 inner_mac_off -= 4;
1194                 outer_ip_off -= 4;
1195         }
1196
1197         nw_off = inner_ip_off - ETH_HLEN;
1198         skb_set_network_header(skb, nw_off);
1199         if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) {
1200                 struct ipv6hdr *iph = ipv6_hdr(skb);
1201
1202                 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1203                 len = skb->len - skb_transport_offset(skb);
1204                 th = tcp_hdr(skb);
1205                 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1206         } else {
1207                 struct iphdr *iph = ip_hdr(skb);
1208
1209                 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1210                 len = skb->len - skb_transport_offset(skb);
1211                 th = tcp_hdr(skb);
1212                 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1213         }
1214
1215         if (inner_mac_off) { /* tunnel */
1216                 struct udphdr *uh = NULL;
1217                 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1218                                             ETH_HLEN - 2));
1219
1220                 if (proto == htons(ETH_P_IP)) {
1221                         struct iphdr *iph = (struct iphdr *)skb->data;
1222
1223                         if (iph->protocol == IPPROTO_UDP)
1224                                 uh = (struct udphdr *)(iph + 1);
1225                 } else {
1226                         struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1227
1228                         if (iph->nexthdr == IPPROTO_UDP)
1229                                 uh = (struct udphdr *)(iph + 1);
1230                 }
1231                 if (uh) {
1232                         if (uh->check)
1233                                 skb_shinfo(skb)->gso_type |=
1234                                         SKB_GSO_UDP_TUNNEL_CSUM;
1235                         else
1236                                 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1237                 }
1238         }
1239 #endif
1240         return skb;
1241 }
1242
1243 #define BNXT_IPV4_HDR_SIZE      (sizeof(struct iphdr) + sizeof(struct tcphdr))
1244 #define BNXT_IPV6_HDR_SIZE      (sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
1245
1246 static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
1247                                            int payload_off, int tcp_ts,
1248                                            struct sk_buff *skb)
1249 {
1250 #ifdef CONFIG_INET
1251         struct tcphdr *th;
1252         int len, nw_off, tcp_opt_len = 0;
1253
1254         if (tcp_ts)
1255                 tcp_opt_len = 12;
1256
1257         if (tpa_info->gso_type == SKB_GSO_TCPV4) {
1258                 struct iphdr *iph;
1259
1260                 nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
1261                          ETH_HLEN;
1262                 skb_set_network_header(skb, nw_off);
1263                 iph = ip_hdr(skb);
1264                 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1265                 len = skb->len - skb_transport_offset(skb);
1266                 th = tcp_hdr(skb);
1267                 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1268         } else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
1269                 struct ipv6hdr *iph;
1270
1271                 nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
1272                          ETH_HLEN;
1273                 skb_set_network_header(skb, nw_off);
1274                 iph = ipv6_hdr(skb);
1275                 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1276                 len = skb->len - skb_transport_offset(skb);
1277                 th = tcp_hdr(skb);
1278                 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1279         } else {
1280                 dev_kfree_skb_any(skb);
1281                 return NULL;
1282         }
1283
1284         if (nw_off) { /* tunnel */
1285                 struct udphdr *uh = NULL;
1286
1287                 if (skb->protocol == htons(ETH_P_IP)) {
1288                         struct iphdr *iph = (struct iphdr *)skb->data;
1289
1290                         if (iph->protocol == IPPROTO_UDP)
1291                                 uh = (struct udphdr *)(iph + 1);
1292                 } else {
1293                         struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1294
1295                         if (iph->nexthdr == IPPROTO_UDP)
1296                                 uh = (struct udphdr *)(iph + 1);
1297                 }
1298                 if (uh) {
1299                         if (uh->check)
1300                                 skb_shinfo(skb)->gso_type |=
1301                                         SKB_GSO_UDP_TUNNEL_CSUM;
1302                         else
1303                                 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1304                 }
1305         }
1306 #endif
1307         return skb;
1308 }
1309
1310 static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
1311                                            struct bnxt_tpa_info *tpa_info,
1312                                            struct rx_tpa_end_cmp *tpa_end,
1313                                            struct rx_tpa_end_cmp_ext *tpa_end1,
1314                                            struct sk_buff *skb)
1315 {
1316 #ifdef CONFIG_INET
1317         int payload_off;
1318         u16 segs;
1319
1320         segs = TPA_END_TPA_SEGS(tpa_end);
1321         if (segs == 1)
1322                 return skb;
1323
1324         NAPI_GRO_CB(skb)->count = segs;
1325         skb_shinfo(skb)->gso_size =
1326                 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
1327         skb_shinfo(skb)->gso_type = tpa_info->gso_type;
1328         payload_off = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
1329                        RX_TPA_END_CMP_PAYLOAD_OFFSET) >>
1330                       RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT;
1331         skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
1332         if (likely(skb))
1333                 tcp_gro_complete(skb);
1334 #endif
1335         return skb;
1336 }
1337
1338 /* Given the cfa_code of a received packet determine which
1339  * netdev (vf-rep or PF) the packet is destined to.
1340  */
1341 static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code)
1342 {
1343         struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code);
1344
1345         /* if vf-rep dev is NULL, the must belongs to the PF */
1346         return dev ? dev : bp->dev;
1347 }
1348
1349 static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
1350                                            struct bnxt_napi *bnapi,
1351                                            u32 *raw_cons,
1352                                            struct rx_tpa_end_cmp *tpa_end,
1353                                            struct rx_tpa_end_cmp_ext *tpa_end1,
1354                                            u8 *event)
1355 {
1356         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1357         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1358         u8 agg_id = TPA_END_AGG_ID(tpa_end);
1359         u8 *data_ptr, agg_bufs;
1360         u16 cp_cons = RING_CMP(*raw_cons);
1361         unsigned int len;
1362         struct bnxt_tpa_info *tpa_info;
1363         dma_addr_t mapping;
1364         struct sk_buff *skb;
1365         void *data;
1366
1367         if (unlikely(bnapi->in_reset)) {
1368                 int rc = bnxt_discard_rx(bp, bnapi, raw_cons, tpa_end);
1369
1370                 if (rc < 0)
1371                         return ERR_PTR(-EBUSY);
1372                 return NULL;
1373         }
1374
1375         tpa_info = &rxr->rx_tpa[agg_id];
1376         data = tpa_info->data;
1377         data_ptr = tpa_info->data_ptr;
1378         prefetch(data_ptr);
1379         len = tpa_info->len;
1380         mapping = tpa_info->mapping;
1381
1382         agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
1383                     RX_TPA_END_CMP_AGG_BUFS) >> RX_TPA_END_CMP_AGG_BUFS_SHIFT;
1384
1385         if (agg_bufs) {
1386                 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
1387                         return ERR_PTR(-EBUSY);
1388
1389                 *event |= BNXT_AGG_EVENT;
1390                 cp_cons = NEXT_CMP(cp_cons);
1391         }
1392
1393         if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
1394                 bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
1395                 if (agg_bufs > MAX_SKB_FRAGS)
1396                         netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
1397                                     agg_bufs, (int)MAX_SKB_FRAGS);
1398                 return NULL;
1399         }
1400
1401         if (len <= bp->rx_copy_thresh) {
1402                 skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
1403                 if (!skb) {
1404                         bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
1405                         return NULL;
1406                 }
1407         } else {
1408                 u8 *new_data;
1409                 dma_addr_t new_mapping;
1410
1411                 new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC);
1412                 if (!new_data) {
1413                         bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
1414                         return NULL;
1415                 }
1416
1417                 tpa_info->data = new_data;
1418                 tpa_info->data_ptr = new_data + bp->rx_offset;
1419                 tpa_info->mapping = new_mapping;
1420
1421                 skb = build_skb(data, 0);
1422                 dma_unmap_single_attrs(&bp->pdev->dev, mapping,
1423                                        bp->rx_buf_use_size, bp->rx_dir,
1424                                        DMA_ATTR_WEAK_ORDERING);
1425
1426                 if (!skb) {
1427                         kfree(data);
1428                         bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
1429                         return NULL;
1430                 }
1431                 skb_reserve(skb, bp->rx_offset);
1432                 skb_put(skb, len);
1433         }
1434
1435         if (agg_bufs) {
1436                 skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs);
1437                 if (!skb) {
1438                         /* Page reuse already handled by bnxt_rx_pages(). */
1439                         return NULL;
1440                 }
1441         }
1442
1443         skb->protocol =
1444                 eth_type_trans(skb, bnxt_get_pkt_dev(bp, tpa_info->cfa_code));
1445
1446         if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
1447                 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
1448
1449         if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) &&
1450             (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
1451                 u16 vlan_proto = tpa_info->metadata >>
1452                         RX_CMP_FLAGS2_METADATA_TPID_SFT;
1453                 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1454
1455                 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
1456         }
1457
1458         skb_checksum_none_assert(skb);
1459         if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
1460                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1461                 skb->csum_level =
1462                         (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
1463         }
1464
1465         if (TPA_END_GRO(tpa_end))
1466                 skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb);
1467
1468         return skb;
1469 }
1470
1471 static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
1472                              struct sk_buff *skb)
1473 {
1474         if (skb->dev != bp->dev) {
1475                 /* this packet belongs to a vf-rep */
1476                 bnxt_vf_rep_rx(bp, skb);
1477                 return;
1478         }
1479         skb_record_rx_queue(skb, bnapi->index);
1480         napi_gro_receive(&bnapi->napi, skb);
1481 }
1482
1483 /* returns the following:
1484  * 1       - 1 packet successfully received
1485  * 0       - successful TPA_START, packet not completed yet
1486  * -EBUSY  - completion ring does not have all the agg buffers yet
1487  * -ENOMEM - packet aborted due to out of memory
1488  * -EIO    - packet aborted due to hw error indicated in BD
1489  */
1490 static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
1491                        u8 *event)
1492 {
1493         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1494         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1495         struct net_device *dev = bp->dev;
1496         struct rx_cmp *rxcmp;
1497         struct rx_cmp_ext *rxcmp1;
1498         u32 tmp_raw_cons = *raw_cons;
1499         u16 cfa_code, cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
1500         struct bnxt_sw_rx_bd *rx_buf;
1501         unsigned int len;
1502         u8 *data_ptr, agg_bufs, cmp_type;
1503         dma_addr_t dma_addr;
1504         struct sk_buff *skb;
1505         void *data;
1506         int rc = 0;
1507         u32 misc;
1508
1509         rxcmp = (struct rx_cmp *)
1510                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1511
1512         tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1513         cp_cons = RING_CMP(tmp_raw_cons);
1514         rxcmp1 = (struct rx_cmp_ext *)
1515                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1516
1517         if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1518                 return -EBUSY;
1519
1520         cmp_type = RX_CMP_TYPE(rxcmp);
1521
1522         prod = rxr->rx_prod;
1523
1524         if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
1525                 bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp,
1526                                (struct rx_tpa_start_cmp_ext *)rxcmp1);
1527
1528                 *event |= BNXT_RX_EVENT;
1529                 goto next_rx_no_prod_no_len;
1530
1531         } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1532                 skb = bnxt_tpa_end(bp, bnapi, &tmp_raw_cons,
1533                                    (struct rx_tpa_end_cmp *)rxcmp,
1534                                    (struct rx_tpa_end_cmp_ext *)rxcmp1, event);
1535
1536                 if (IS_ERR(skb))
1537                         return -EBUSY;
1538
1539                 rc = -ENOMEM;
1540                 if (likely(skb)) {
1541                         bnxt_deliver_skb(bp, bnapi, skb);
1542                         rc = 1;
1543                 }
1544                 *event |= BNXT_RX_EVENT;
1545                 goto next_rx_no_prod_no_len;
1546         }
1547
1548         cons = rxcmp->rx_cmp_opaque;
1549         if (unlikely(cons != rxr->rx_next_cons)) {
1550                 int rc1 = bnxt_discard_rx(bp, bnapi, raw_cons, rxcmp);
1551
1552                 netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
1553                             cons, rxr->rx_next_cons);
1554                 bnxt_sched_reset(bp, rxr);
1555                 return rc1;
1556         }
1557         rx_buf = &rxr->rx_buf_ring[cons];
1558         data = rx_buf->data;
1559         data_ptr = rx_buf->data_ptr;
1560         prefetch(data_ptr);
1561
1562         misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
1563         agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT;
1564
1565         if (agg_bufs) {
1566                 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1567                         return -EBUSY;
1568
1569                 cp_cons = NEXT_CMP(cp_cons);
1570                 *event |= BNXT_AGG_EVENT;
1571         }
1572         *event |= BNXT_RX_EVENT;
1573
1574         rx_buf->data = NULL;
1575         if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
1576                 u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2);
1577
1578                 bnxt_reuse_rx_data(rxr, cons, data);
1579                 if (agg_bufs)
1580                         bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs);
1581
1582                 rc = -EIO;
1583                 if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
1584                         netdev_warn(bp->dev, "RX buffer error %x\n", rx_err);
1585                         bnxt_sched_reset(bp, rxr);
1586                 }
1587                 goto next_rx;
1588         }
1589
1590         len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
1591         dma_addr = rx_buf->mapping;
1592
1593         if (bnxt_rx_xdp(bp, rxr, cons, data, &data_ptr, &len, event)) {
1594                 rc = 1;
1595                 goto next_rx;
1596         }
1597
1598         if (len <= bp->rx_copy_thresh) {
1599                 skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
1600                 bnxt_reuse_rx_data(rxr, cons, data);
1601                 if (!skb) {
1602                         rc = -ENOMEM;
1603                         goto next_rx;
1604                 }
1605         } else {
1606                 u32 payload;
1607
1608                 if (rx_buf->data_ptr == data_ptr)
1609                         payload = misc & RX_CMP_PAYLOAD_OFFSET;
1610                 else
1611                         payload = 0;
1612                 skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
1613                                       payload | len);
1614                 if (!skb) {
1615                         rc = -ENOMEM;
1616                         goto next_rx;
1617                 }
1618         }
1619
1620         if (agg_bufs) {
1621                 skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs);
1622                 if (!skb) {
1623                         rc = -ENOMEM;
1624                         goto next_rx;
1625                 }
1626         }
1627
1628         if (RX_CMP_HASH_VALID(rxcmp)) {
1629                 u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
1630                 enum pkt_hash_types type = PKT_HASH_TYPE_L4;
1631
1632                 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1633                 if (hash_type != 1 && hash_type != 3)
1634                         type = PKT_HASH_TYPE_L3;
1635                 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
1636         }
1637
1638         cfa_code = RX_CMP_CFA_CODE(rxcmp1);
1639         skb->protocol = eth_type_trans(skb, bnxt_get_pkt_dev(bp, cfa_code));
1640
1641         if ((rxcmp1->rx_cmp_flags2 &
1642              cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
1643             (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
1644                 u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
1645                 u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1646                 u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT;
1647
1648                 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
1649         }
1650
1651         skb_checksum_none_assert(skb);
1652         if (RX_CMP_L4_CS_OK(rxcmp1)) {
1653                 if (dev->features & NETIF_F_RXCSUM) {
1654                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1655                         skb->csum_level = RX_CMP_ENCAP(rxcmp1);
1656                 }
1657         } else {
1658                 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
1659                         if (dev->features & NETIF_F_RXCSUM)
1660                                 cpr->rx_l4_csum_errors++;
1661                 }
1662         }
1663
1664         bnxt_deliver_skb(bp, bnapi, skb);
1665         rc = 1;
1666
1667 next_rx:
1668         rxr->rx_prod = NEXT_RX(prod);
1669         rxr->rx_next_cons = NEXT_RX(cons);
1670
1671         cpr->rx_packets += 1;
1672         cpr->rx_bytes += len;
1673
1674 next_rx_no_prod_no_len:
1675         *raw_cons = tmp_raw_cons;
1676
1677         return rc;
1678 }
1679
1680 /* In netpoll mode, if we are using a combined completion ring, we need to
1681  * discard the rx packets and recycle the buffers.
1682  */
1683 static int bnxt_force_rx_discard(struct bnxt *bp, struct bnxt_napi *bnapi,
1684                                  u32 *raw_cons, u8 *event)
1685 {
1686         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1687         u32 tmp_raw_cons = *raw_cons;
1688         struct rx_cmp_ext *rxcmp1;
1689         struct rx_cmp *rxcmp;
1690         u16 cp_cons;
1691         u8 cmp_type;
1692
1693         cp_cons = RING_CMP(tmp_raw_cons);
1694         rxcmp = (struct rx_cmp *)
1695                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1696
1697         tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1698         cp_cons = RING_CMP(tmp_raw_cons);
1699         rxcmp1 = (struct rx_cmp_ext *)
1700                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1701
1702         if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1703                 return -EBUSY;
1704
1705         cmp_type = RX_CMP_TYPE(rxcmp);
1706         if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1707                 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
1708                         cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
1709         } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1710                 struct rx_tpa_end_cmp_ext *tpa_end1;
1711
1712                 tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1;
1713                 tpa_end1->rx_tpa_end_cmp_errors_v2 |=
1714                         cpu_to_le32(RX_TPA_END_CMP_ERRORS);
1715         }
1716         return bnxt_rx_pkt(bp, bnapi, raw_cons, event);
1717 }
1718
1719 #define BNXT_GET_EVENT_PORT(data)       \
1720         ((data) &                       \
1721          ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
1722
1723 static int bnxt_async_event_process(struct bnxt *bp,
1724                                     struct hwrm_async_event_cmpl *cmpl)
1725 {
1726         u16 event_id = le16_to_cpu(cmpl->event_id);
1727
1728         /* TODO CHIMP_FW: Define event id's for link change, error etc */
1729         switch (event_id) {
1730         case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
1731                 u32 data1 = le32_to_cpu(cmpl->event_data1);
1732                 struct bnxt_link_info *link_info = &bp->link_info;
1733
1734                 if (BNXT_VF(bp))
1735                         goto async_event_process_exit;
1736
1737                 /* print unsupported speed warning in forced speed mode only */
1738                 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) &&
1739                     (data1 & 0x20000)) {
1740                         u16 fw_speed = link_info->force_link_speed;
1741                         u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
1742
1743                         if (speed != SPEED_UNKNOWN)
1744                                 netdev_warn(bp->dev, "Link speed %d no longer supported\n",
1745                                             speed);
1746                 }
1747                 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
1748         }
1749         /* fall through */
1750         case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
1751                 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
1752                 break;
1753         case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
1754                 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
1755                 break;
1756         case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
1757                 u32 data1 = le32_to_cpu(cmpl->event_data1);
1758                 u16 port_id = BNXT_GET_EVENT_PORT(data1);
1759
1760                 if (BNXT_VF(bp))
1761                         break;
1762
1763                 if (bp->pf.port_id != port_id)
1764                         break;
1765
1766                 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
1767                 break;
1768         }
1769         case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
1770                 if (BNXT_PF(bp))
1771                         goto async_event_process_exit;
1772                 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
1773                 break;
1774         default:
1775                 goto async_event_process_exit;
1776         }
1777         bnxt_queue_sp_work(bp);
1778 async_event_process_exit:
1779         bnxt_ulp_async_events(bp, cmpl);
1780         return 0;
1781 }
1782
1783 static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
1784 {
1785         u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
1786         struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
1787         struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
1788                                 (struct hwrm_fwd_req_cmpl *)txcmp;
1789
1790         switch (cmpl_type) {
1791         case CMPL_BASE_TYPE_HWRM_DONE:
1792                 seq_id = le16_to_cpu(h_cmpl->sequence_id);
1793                 if (seq_id == bp->hwrm_intr_seq_id)
1794                         bp->hwrm_intr_seq_id = HWRM_SEQ_ID_INVALID;
1795                 else
1796                         netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id);
1797                 break;
1798
1799         case CMPL_BASE_TYPE_HWRM_FWD_REQ:
1800                 vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
1801
1802                 if ((vf_id < bp->pf.first_vf_id) ||
1803                     (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
1804                         netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
1805                                    vf_id);
1806                         return -EINVAL;
1807                 }
1808
1809                 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
1810                 set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event);
1811                 bnxt_queue_sp_work(bp);
1812                 break;
1813
1814         case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
1815                 bnxt_async_event_process(bp,
1816                                          (struct hwrm_async_event_cmpl *)txcmp);
1817
1818         default:
1819                 break;
1820         }
1821
1822         return 0;
1823 }
1824
1825 static irqreturn_t bnxt_msix(int irq, void *dev_instance)
1826 {
1827         struct bnxt_napi *bnapi = dev_instance;
1828         struct bnxt *bp = bnapi->bp;
1829         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1830         u32 cons = RING_CMP(cpr->cp_raw_cons);
1831
1832         cpr->event_ctr++;
1833         prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
1834         napi_schedule(&bnapi->napi);
1835         return IRQ_HANDLED;
1836 }
1837
1838 static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1839 {
1840         u32 raw_cons = cpr->cp_raw_cons;
1841         u16 cons = RING_CMP(raw_cons);
1842         struct tx_cmp *txcmp;
1843
1844         txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
1845
1846         return TX_CMP_VALID(txcmp, raw_cons);
1847 }
1848
1849 static irqreturn_t bnxt_inta(int irq, void *dev_instance)
1850 {
1851         struct bnxt_napi *bnapi = dev_instance;
1852         struct bnxt *bp = bnapi->bp;
1853         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1854         u32 cons = RING_CMP(cpr->cp_raw_cons);
1855         u32 int_status;
1856
1857         prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
1858
1859         if (!bnxt_has_work(bp, cpr)) {
1860                 int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS);
1861                 /* return if erroneous interrupt */
1862                 if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id)))
1863                         return IRQ_NONE;
1864         }
1865
1866         /* disable ring IRQ */
1867         BNXT_CP_DB_IRQ_DIS(cpr->cp_doorbell);
1868
1869         /* Return here if interrupt is shared and is disabled. */
1870         if (unlikely(atomic_read(&bp->intr_sem) != 0))
1871                 return IRQ_HANDLED;
1872
1873         napi_schedule(&bnapi->napi);
1874         return IRQ_HANDLED;
1875 }
1876
1877 static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
1878 {
1879         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1880         u32 raw_cons = cpr->cp_raw_cons;
1881         u32 cons;
1882         int tx_pkts = 0;
1883         int rx_pkts = 0;
1884         u8 event = 0;
1885         struct tx_cmp *txcmp;
1886
1887         while (1) {
1888                 int rc;
1889
1890                 cons = RING_CMP(raw_cons);
1891                 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
1892
1893                 if (!TX_CMP_VALID(txcmp, raw_cons))
1894                         break;
1895
1896                 /* The valid test of the entry must be done first before
1897                  * reading any further.
1898                  */
1899                 dma_rmb();
1900                 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
1901                         tx_pkts++;
1902                         /* return full budget so NAPI will complete. */
1903                         if (unlikely(tx_pkts > bp->tx_wake_thresh)) {
1904                                 rx_pkts = budget;
1905                                 raw_cons = NEXT_RAW_CMP(raw_cons);
1906                                 break;
1907                         }
1908                 } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
1909                         if (likely(budget))
1910                                 rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event);
1911                         else
1912                                 rc = bnxt_force_rx_discard(bp, bnapi, &raw_cons,
1913                                                            &event);
1914                         if (likely(rc >= 0))
1915                                 rx_pkts += rc;
1916                         /* Increment rx_pkts when rc is -ENOMEM to count towards
1917                          * the NAPI budget.  Otherwise, we may potentially loop
1918                          * here forever if we consistently cannot allocate
1919                          * buffers.
1920                          */
1921                         else if (rc == -ENOMEM && budget)
1922                                 rx_pkts++;
1923                         else if (rc == -EBUSY)  /* partial completion */
1924                                 break;
1925                 } else if (unlikely((TX_CMP_TYPE(txcmp) ==
1926                                      CMPL_BASE_TYPE_HWRM_DONE) ||
1927                                     (TX_CMP_TYPE(txcmp) ==
1928                                      CMPL_BASE_TYPE_HWRM_FWD_REQ) ||
1929                                     (TX_CMP_TYPE(txcmp) ==
1930                                      CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) {
1931                         bnxt_hwrm_handler(bp, txcmp);
1932                 }
1933                 raw_cons = NEXT_RAW_CMP(raw_cons);
1934
1935                 if (rx_pkts && rx_pkts == budget)
1936                         break;
1937         }
1938
1939         if (event & BNXT_TX_EVENT) {
1940                 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
1941                 void __iomem *db = txr->tx_doorbell;
1942                 u16 prod = txr->tx_prod;
1943
1944                 /* Sync BD data before updating doorbell */
1945                 wmb();
1946
1947                 bnxt_db_write_relaxed(bp, db, DB_KEY_TX | prod);
1948         }
1949
1950         cpr->cp_raw_cons = raw_cons;
1951         /* ACK completion ring before freeing tx ring and producing new
1952          * buffers in rx/agg rings to prevent overflowing the completion
1953          * ring.
1954          */
1955         BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
1956
1957         if (tx_pkts)
1958                 bnapi->tx_int(bp, bnapi, tx_pkts);
1959
1960         if (event & BNXT_RX_EVENT) {
1961                 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1962
1963                 bnxt_db_write(bp, rxr->rx_doorbell, DB_KEY_RX | rxr->rx_prod);
1964                 if (event & BNXT_AGG_EVENT)
1965                         bnxt_db_write(bp, rxr->rx_agg_doorbell,
1966                                       DB_KEY_RX | rxr->rx_agg_prod);
1967         }
1968         return rx_pkts;
1969 }
1970
1971 static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
1972 {
1973         struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
1974         struct bnxt *bp = bnapi->bp;
1975         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1976         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1977         struct tx_cmp *txcmp;
1978         struct rx_cmp_ext *rxcmp1;
1979         u32 cp_cons, tmp_raw_cons;
1980         u32 raw_cons = cpr->cp_raw_cons;
1981         u32 rx_pkts = 0;
1982         u8 event = 0;
1983
1984         while (1) {
1985                 int rc;
1986
1987                 cp_cons = RING_CMP(raw_cons);
1988                 txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1989
1990                 if (!TX_CMP_VALID(txcmp, raw_cons))
1991                         break;
1992
1993                 if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
1994                         tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
1995                         cp_cons = RING_CMP(tmp_raw_cons);
1996                         rxcmp1 = (struct rx_cmp_ext *)
1997                           &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1998
1999                         if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2000                                 break;
2001
2002                         /* force an error to recycle the buffer */
2003                         rxcmp1->rx_cmp_cfa_code_errors_v2 |=
2004                                 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
2005
2006                         rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event);
2007                         if (likely(rc == -EIO) && budget)
2008                                 rx_pkts++;
2009                         else if (rc == -EBUSY)  /* partial completion */
2010                                 break;
2011                 } else if (unlikely(TX_CMP_TYPE(txcmp) ==
2012                                     CMPL_BASE_TYPE_HWRM_DONE)) {
2013                         bnxt_hwrm_handler(bp, txcmp);
2014                 } else {
2015                         netdev_err(bp->dev,
2016                                    "Invalid completion received on special ring\n");
2017                 }
2018                 raw_cons = NEXT_RAW_CMP(raw_cons);
2019
2020                 if (rx_pkts == budget)
2021                         break;
2022         }
2023
2024         cpr->cp_raw_cons = raw_cons;
2025         BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
2026         bnxt_db_write(bp, rxr->rx_doorbell, DB_KEY_RX | rxr->rx_prod);
2027
2028         if (event & BNXT_AGG_EVENT)
2029                 bnxt_db_write(bp, rxr->rx_agg_doorbell,
2030                               DB_KEY_RX | rxr->rx_agg_prod);
2031
2032         if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
2033                 napi_complete_done(napi, rx_pkts);
2034                 BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
2035         }
2036         return rx_pkts;
2037 }
2038
2039 static int bnxt_poll(struct napi_struct *napi, int budget)
2040 {
2041         struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2042         struct bnxt *bp = bnapi->bp;
2043         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2044         int work_done = 0;
2045
2046         while (1) {
2047                 work_done += bnxt_poll_work(bp, bnapi, budget - work_done);
2048
2049                 if (work_done >= budget) {
2050                         if (!budget)
2051                                 BNXT_CP_DB_REARM(cpr->cp_doorbell,
2052                                                  cpr->cp_raw_cons);
2053                         break;
2054                 }
2055
2056                 if (!bnxt_has_work(bp, cpr)) {
2057                         if (napi_complete_done(napi, work_done))
2058                                 BNXT_CP_DB_REARM(cpr->cp_doorbell,
2059                                                  cpr->cp_raw_cons);
2060                         break;
2061                 }
2062         }
2063         if (bp->flags & BNXT_FLAG_DIM) {
2064                 struct net_dim_sample dim_sample;
2065
2066                 net_dim_sample(cpr->event_ctr,
2067                                cpr->rx_packets,
2068                                cpr->rx_bytes,
2069                                &dim_sample);
2070                 net_dim(&cpr->dim, dim_sample);
2071         }
2072         mmiowb();
2073         return work_done;
2074 }
2075
2076 static void bnxt_free_tx_skbs(struct bnxt *bp)
2077 {
2078         int i, max_idx;
2079         struct pci_dev *pdev = bp->pdev;
2080
2081         if (!bp->tx_ring)
2082                 return;
2083
2084         max_idx = bp->tx_nr_pages * TX_DESC_CNT;
2085         for (i = 0; i < bp->tx_nr_rings; i++) {
2086                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2087                 int j;
2088
2089                 for (j = 0; j < max_idx;) {
2090                         struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
2091                         struct sk_buff *skb = tx_buf->skb;
2092                         int k, last;
2093
2094                         if (!skb) {
2095                                 j++;
2096                                 continue;
2097                         }
2098
2099                         tx_buf->skb = NULL;
2100
2101                         if (tx_buf->is_push) {
2102                                 dev_kfree_skb(skb);
2103                                 j += 2;
2104                                 continue;
2105                         }
2106
2107                         dma_unmap_single(&pdev->dev,
2108                                          dma_unmap_addr(tx_buf, mapping),
2109                                          skb_headlen(skb),
2110                                          PCI_DMA_TODEVICE);
2111
2112                         last = tx_buf->nr_frags;
2113                         j += 2;
2114                         for (k = 0; k < last; k++, j++) {
2115                                 int ring_idx = j & bp->tx_ring_mask;
2116                                 skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
2117
2118                                 tx_buf = &txr->tx_buf_ring[ring_idx];
2119                                 dma_unmap_page(
2120                                         &pdev->dev,
2121                                         dma_unmap_addr(tx_buf, mapping),
2122                                         skb_frag_size(frag), PCI_DMA_TODEVICE);
2123                         }
2124                         dev_kfree_skb(skb);
2125                 }
2126                 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
2127         }
2128 }
2129
2130 static void bnxt_free_rx_skbs(struct bnxt *bp)
2131 {
2132         int i, max_idx, max_agg_idx;
2133         struct pci_dev *pdev = bp->pdev;
2134
2135         if (!bp->rx_ring)
2136                 return;
2137
2138         max_idx = bp->rx_nr_pages * RX_DESC_CNT;
2139         max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
2140         for (i = 0; i < bp->rx_nr_rings; i++) {
2141                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2142                 int j;
2143
2144                 if (rxr->rx_tpa) {
2145                         for (j = 0; j < MAX_TPA; j++) {
2146                                 struct bnxt_tpa_info *tpa_info =
2147                                                         &rxr->rx_tpa[j];
2148                                 u8 *data = tpa_info->data;
2149
2150                                 if (!data)
2151                                         continue;
2152
2153                                 dma_unmap_single_attrs(&pdev->dev,
2154                                                        tpa_info->mapping,
2155                                                        bp->rx_buf_use_size,
2156                                                        bp->rx_dir,
2157                                                        DMA_ATTR_WEAK_ORDERING);
2158
2159                                 tpa_info->data = NULL;
2160
2161                                 kfree(data);
2162                         }
2163                 }
2164
2165                 for (j = 0; j < max_idx; j++) {
2166                         struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j];
2167                         dma_addr_t mapping = rx_buf->mapping;
2168                         void *data = rx_buf->data;
2169
2170                         if (!data)
2171                                 continue;
2172
2173                         rx_buf->data = NULL;
2174
2175                         if (BNXT_RX_PAGE_MODE(bp)) {
2176                                 mapping -= bp->rx_dma_offset;
2177                                 dma_unmap_page_attrs(&pdev->dev, mapping,
2178                                                      PAGE_SIZE, bp->rx_dir,
2179                                                      DMA_ATTR_WEAK_ORDERING);
2180                                 __free_page(data);
2181                         } else {
2182                                 dma_unmap_single_attrs(&pdev->dev, mapping,
2183                                                        bp->rx_buf_use_size,
2184                                                        bp->rx_dir,
2185                                                        DMA_ATTR_WEAK_ORDERING);
2186                                 kfree(data);
2187                         }
2188                 }
2189
2190                 for (j = 0; j < max_agg_idx; j++) {
2191                         struct bnxt_sw_rx_agg_bd *rx_agg_buf =
2192                                 &rxr->rx_agg_ring[j];
2193                         struct page *page = rx_agg_buf->page;
2194
2195                         if (!page)
2196                                 continue;
2197
2198                         dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
2199                                              BNXT_RX_PAGE_SIZE,
2200                                              PCI_DMA_FROMDEVICE,
2201                                              DMA_ATTR_WEAK_ORDERING);
2202
2203                         rx_agg_buf->page = NULL;
2204                         __clear_bit(j, rxr->rx_agg_bmap);
2205
2206                         __free_page(page);
2207                 }
2208                 if (rxr->rx_page) {
2209                         __free_page(rxr->rx_page);
2210                         rxr->rx_page = NULL;
2211                 }
2212         }
2213 }
2214
2215 static void bnxt_free_skbs(struct bnxt *bp)
2216 {
2217         bnxt_free_tx_skbs(bp);
2218         bnxt_free_rx_skbs(bp);
2219 }
2220
2221 static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_struct *ring)
2222 {
2223         struct pci_dev *pdev = bp->pdev;
2224         int i;
2225
2226         for (i = 0; i < ring->nr_pages; i++) {
2227                 if (!ring->pg_arr[i])
2228                         continue;
2229
2230                 dma_free_coherent(&pdev->dev, ring->page_size,
2231                                   ring->pg_arr[i], ring->dma_arr[i]);
2232
2233                 ring->pg_arr[i] = NULL;
2234         }
2235         if (ring->pg_tbl) {
2236                 dma_free_coherent(&pdev->dev, ring->nr_pages * 8,
2237                                   ring->pg_tbl, ring->pg_tbl_map);
2238                 ring->pg_tbl = NULL;
2239         }
2240         if (ring->vmem_size && *ring->vmem) {
2241                 vfree(*ring->vmem);
2242                 *ring->vmem = NULL;
2243         }
2244 }
2245
2246 static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_struct *ring)
2247 {
2248         int i;
2249         struct pci_dev *pdev = bp->pdev;
2250
2251         if (ring->nr_pages > 1) {
2252                 ring->pg_tbl = dma_alloc_coherent(&pdev->dev,
2253                                                   ring->nr_pages * 8,
2254                                                   &ring->pg_tbl_map,
2255                                                   GFP_KERNEL);
2256                 if (!ring->pg_tbl)
2257                         return -ENOMEM;
2258         }
2259
2260         for (i = 0; i < ring->nr_pages; i++) {
2261                 ring->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
2262                                                      ring->page_size,
2263                                                      &ring->dma_arr[i],
2264                                                      GFP_KERNEL);
2265                 if (!ring->pg_arr[i])
2266                         return -ENOMEM;
2267
2268                 if (ring->nr_pages > 1)
2269                         ring->pg_tbl[i] = cpu_to_le64(ring->dma_arr[i]);
2270         }
2271
2272         if (ring->vmem_size) {
2273                 *ring->vmem = vzalloc(ring->vmem_size);
2274                 if (!(*ring->vmem))
2275                         return -ENOMEM;
2276         }
2277         return 0;
2278 }
2279
2280 static void bnxt_free_rx_rings(struct bnxt *bp)
2281 {
2282         int i;
2283
2284         if (!bp->rx_ring)
2285                 return;
2286
2287         for (i = 0; i < bp->rx_nr_rings; i++) {
2288                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2289                 struct bnxt_ring_struct *ring;
2290
2291                 if (rxr->xdp_prog)
2292                         bpf_prog_put(rxr->xdp_prog);
2293
2294                 if (xdp_rxq_info_is_reg(&rxr->xdp_rxq))
2295                         xdp_rxq_info_unreg(&rxr->xdp_rxq);
2296
2297                 kfree(rxr->rx_tpa);
2298                 rxr->rx_tpa = NULL;
2299
2300                 kfree(rxr->rx_agg_bmap);
2301                 rxr->rx_agg_bmap = NULL;
2302
2303                 ring = &rxr->rx_ring_struct;
2304                 bnxt_free_ring(bp, ring);
2305
2306                 ring = &rxr->rx_agg_ring_struct;
2307                 bnxt_free_ring(bp, ring);
2308         }
2309 }
2310
2311 static int bnxt_alloc_rx_rings(struct bnxt *bp)
2312 {
2313         int i, rc, agg_rings = 0, tpa_rings = 0;
2314
2315         if (!bp->rx_ring)
2316                 return -ENOMEM;
2317
2318         if (bp->flags & BNXT_FLAG_AGG_RINGS)
2319                 agg_rings = 1;
2320
2321         if (bp->flags & BNXT_FLAG_TPA)
2322                 tpa_rings = 1;
2323
2324         for (i = 0; i < bp->rx_nr_rings; i++) {
2325                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2326                 struct bnxt_ring_struct *ring;
2327
2328                 ring = &rxr->rx_ring_struct;
2329
2330                 rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i);
2331                 if (rc < 0)
2332                         return rc;
2333
2334                 rc = bnxt_alloc_ring(bp, ring);
2335                 if (rc)
2336                         return rc;
2337
2338                 if (agg_rings) {
2339                         u16 mem_size;
2340
2341                         ring = &rxr->rx_agg_ring_struct;
2342                         rc = bnxt_alloc_ring(bp, ring);
2343                         if (rc)
2344                                 return rc;
2345
2346                         ring->grp_idx = i;
2347                         rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
2348                         mem_size = rxr->rx_agg_bmap_size / 8;
2349                         rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
2350                         if (!rxr->rx_agg_bmap)
2351                                 return -ENOMEM;
2352
2353                         if (tpa_rings) {
2354                                 rxr->rx_tpa = kcalloc(MAX_TPA,
2355                                                 sizeof(struct bnxt_tpa_info),
2356                                                 GFP_KERNEL);
2357                                 if (!rxr->rx_tpa)
2358                                         return -ENOMEM;
2359                         }
2360                 }
2361         }
2362         return 0;
2363 }
2364
2365 static void bnxt_free_tx_rings(struct bnxt *bp)
2366 {
2367         int i;
2368         struct pci_dev *pdev = bp->pdev;
2369
2370         if (!bp->tx_ring)
2371                 return;
2372
2373         for (i = 0; i < bp->tx_nr_rings; i++) {
2374                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2375                 struct bnxt_ring_struct *ring;
2376
2377                 if (txr->tx_push) {
2378                         dma_free_coherent(&pdev->dev, bp->tx_push_size,
2379                                           txr->tx_push, txr->tx_push_mapping);
2380                         txr->tx_push = NULL;
2381                 }
2382
2383                 ring = &txr->tx_ring_struct;
2384
2385                 bnxt_free_ring(bp, ring);
2386         }
2387 }
2388
2389 static int bnxt_alloc_tx_rings(struct bnxt *bp)
2390 {
2391         int i, j, rc;
2392         struct pci_dev *pdev = bp->pdev;
2393
2394         bp->tx_push_size = 0;
2395         if (bp->tx_push_thresh) {
2396                 int push_size;
2397
2398                 push_size  = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
2399                                         bp->tx_push_thresh);
2400
2401                 if (push_size > 256) {
2402                         push_size = 0;
2403                         bp->tx_push_thresh = 0;
2404                 }
2405
2406                 bp->tx_push_size = push_size;
2407         }
2408
2409         for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
2410                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2411                 struct bnxt_ring_struct *ring;
2412                 u8 qidx;
2413
2414                 ring = &txr->tx_ring_struct;
2415
2416                 rc = bnxt_alloc_ring(bp, ring);
2417                 if (rc)
2418                         return rc;
2419
2420                 ring->grp_idx = txr->bnapi->index;
2421                 if (bp->tx_push_size) {
2422                         dma_addr_t mapping;
2423
2424                         /* One pre-allocated DMA buffer to backup
2425                          * TX push operation
2426                          */
2427                         txr->tx_push = dma_alloc_coherent(&pdev->dev,
2428                                                 bp->tx_push_size,
2429                                                 &txr->tx_push_mapping,
2430                                                 GFP_KERNEL);
2431
2432                         if (!txr->tx_push)
2433                                 return -ENOMEM;
2434
2435                         mapping = txr->tx_push_mapping +
2436                                 sizeof(struct tx_push_bd);
2437                         txr->data_mapping = cpu_to_le64(mapping);
2438
2439                         memset(txr->tx_push, 0, sizeof(struct tx_push_bd));
2440                 }
2441                 qidx = bp->tc_to_qidx[j];
2442                 ring->queue_id = bp->q_info[qidx].queue_id;
2443                 if (i < bp->tx_nr_rings_xdp)
2444                         continue;
2445                 if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
2446                         j++;
2447         }
2448         return 0;
2449 }
2450
2451 static void bnxt_free_cp_rings(struct bnxt *bp)
2452 {
2453         int i;
2454
2455         if (!bp->bnapi)
2456                 return;
2457
2458         for (i = 0; i < bp->cp_nr_rings; i++) {
2459                 struct bnxt_napi *bnapi = bp->bnapi[i];
2460                 struct bnxt_cp_ring_info *cpr;
2461                 struct bnxt_ring_struct *ring;
2462
2463                 if (!bnapi)
2464                         continue;
2465
2466                 cpr = &bnapi->cp_ring;
2467                 ring = &cpr->cp_ring_struct;
2468
2469                 bnxt_free_ring(bp, ring);
2470         }
2471 }
2472
2473 static int bnxt_alloc_cp_rings(struct bnxt *bp)
2474 {
2475         int i, rc, ulp_base_vec, ulp_msix;
2476
2477         ulp_msix = bnxt_get_ulp_msix_num(bp);
2478         ulp_base_vec = bnxt_get_ulp_msix_base(bp);
2479         for (i = 0; i < bp->cp_nr_rings; i++) {
2480                 struct bnxt_napi *bnapi = bp->bnapi[i];
2481                 struct bnxt_cp_ring_info *cpr;
2482                 struct bnxt_ring_struct *ring;
2483
2484                 if (!bnapi)
2485                         continue;
2486
2487                 cpr = &bnapi->cp_ring;
2488                 ring = &cpr->cp_ring_struct;
2489
2490                 rc = bnxt_alloc_ring(bp, ring);
2491                 if (rc)
2492                         return rc;
2493
2494                 if (ulp_msix && i >= ulp_base_vec)
2495                         ring->map_idx = i + ulp_msix;
2496                 else
2497                         ring->map_idx = i;
2498         }
2499         return 0;
2500 }
2501
2502 static void bnxt_init_ring_struct(struct bnxt *bp)
2503 {
2504         int i;
2505
2506         for (i = 0; i < bp->cp_nr_rings; i++) {
2507                 struct bnxt_napi *bnapi = bp->bnapi[i];
2508                 struct bnxt_cp_ring_info *cpr;
2509                 struct bnxt_rx_ring_info *rxr;
2510                 struct bnxt_tx_ring_info *txr;
2511                 struct bnxt_ring_struct *ring;
2512
2513                 if (!bnapi)
2514                         continue;
2515
2516                 cpr = &bnapi->cp_ring;
2517                 ring = &cpr->cp_ring_struct;
2518                 ring->nr_pages = bp->cp_nr_pages;
2519                 ring->page_size = HW_CMPD_RING_SIZE;
2520                 ring->pg_arr = (void **)cpr->cp_desc_ring;
2521                 ring->dma_arr = cpr->cp_desc_mapping;
2522                 ring->vmem_size = 0;
2523
2524                 rxr = bnapi->rx_ring;
2525                 if (!rxr)
2526                         goto skip_rx;
2527
2528                 ring = &rxr->rx_ring_struct;
2529                 ring->nr_pages = bp->rx_nr_pages;
2530                 ring->page_size = HW_RXBD_RING_SIZE;
2531                 ring->pg_arr = (void **)rxr->rx_desc_ring;
2532                 ring->dma_arr = rxr->rx_desc_mapping;
2533                 ring->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
2534                 ring->vmem = (void **)&rxr->rx_buf_ring;
2535
2536                 ring = &rxr->rx_agg_ring_struct;
2537                 ring->nr_pages = bp->rx_agg_nr_pages;
2538                 ring->page_size = HW_RXBD_RING_SIZE;
2539                 ring->pg_arr = (void **)rxr->rx_agg_desc_ring;
2540                 ring->dma_arr = rxr->rx_agg_desc_mapping;
2541                 ring->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
2542                 ring->vmem = (void **)&rxr->rx_agg_ring;
2543
2544 skip_rx:
2545                 txr = bnapi->tx_ring;
2546                 if (!txr)
2547                         continue;
2548
2549                 ring = &txr->tx_ring_struct;
2550                 ring->nr_pages = bp->tx_nr_pages;
2551                 ring->page_size = HW_RXBD_RING_SIZE;
2552                 ring->pg_arr = (void **)txr->tx_desc_ring;
2553                 ring->dma_arr = txr->tx_desc_mapping;
2554                 ring->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
2555                 ring->vmem = (void **)&txr->tx_buf_ring;
2556         }
2557 }
2558
2559 static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
2560 {
2561         int i;
2562         u32 prod;
2563         struct rx_bd **rx_buf_ring;
2564
2565         rx_buf_ring = (struct rx_bd **)ring->pg_arr;
2566         for (i = 0, prod = 0; i < ring->nr_pages; i++) {
2567                 int j;
2568                 struct rx_bd *rxbd;
2569
2570                 rxbd = rx_buf_ring[i];
2571                 if (!rxbd)
2572                         continue;
2573
2574                 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
2575                         rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
2576                         rxbd->rx_bd_opaque = prod;
2577                 }
2578         }
2579 }
2580
2581 static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
2582 {
2583         struct net_device *dev = bp->dev;
2584         struct bnxt_rx_ring_info *rxr;
2585         struct bnxt_ring_struct *ring;
2586         u32 prod, type;
2587         int i;
2588
2589         type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
2590                 RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
2591
2592         if (NET_IP_ALIGN == 2)
2593                 type |= RX_BD_FLAGS_SOP;
2594
2595         rxr = &bp->rx_ring[ring_nr];
2596         ring = &rxr->rx_ring_struct;
2597         bnxt_init_rxbd_pages(ring, type);
2598
2599         if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
2600                 rxr->xdp_prog = bpf_prog_add(bp->xdp_prog, 1);
2601                 if (IS_ERR(rxr->xdp_prog)) {
2602                         int rc = PTR_ERR(rxr->xdp_prog);
2603
2604                         rxr->xdp_prog = NULL;
2605                         return rc;
2606                 }
2607         }
2608         prod = rxr->rx_prod;
2609         for (i = 0; i < bp->rx_ring_size; i++) {
2610                 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL) != 0) {
2611                         netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
2612                                     ring_nr, i, bp->rx_ring_size);
2613                         break;
2614                 }
2615                 prod = NEXT_RX(prod);
2616         }
2617         rxr->rx_prod = prod;
2618         ring->fw_ring_id = INVALID_HW_RING_ID;
2619
2620         ring = &rxr->rx_agg_ring_struct;
2621         ring->fw_ring_id = INVALID_HW_RING_ID;
2622
2623         if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
2624                 return 0;
2625
2626         type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
2627                 RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
2628
2629         bnxt_init_rxbd_pages(ring, type);
2630
2631         prod = rxr->rx_agg_prod;
2632         for (i = 0; i < bp->rx_agg_ring_size; i++) {
2633                 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL) != 0) {
2634                         netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
2635                                     ring_nr, i, bp->rx_ring_size);
2636                         break;
2637                 }
2638                 prod = NEXT_RX_AGG(prod);
2639         }
2640         rxr->rx_agg_prod = prod;
2641
2642         if (bp->flags & BNXT_FLAG_TPA) {
2643                 if (rxr->rx_tpa) {
2644                         u8 *data;
2645                         dma_addr_t mapping;
2646
2647                         for (i = 0; i < MAX_TPA; i++) {
2648                                 data = __bnxt_alloc_rx_data(bp, &mapping,
2649                                                             GFP_KERNEL);
2650                                 if (!data)
2651                                         return -ENOMEM;
2652
2653                                 rxr->rx_tpa[i].data = data;
2654                                 rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
2655                                 rxr->rx_tpa[i].mapping = mapping;
2656                         }
2657                 } else {
2658                         netdev_err(bp->dev, "No resource allocated for LRO/GRO\n");
2659                         return -ENOMEM;
2660                 }
2661         }
2662
2663         return 0;
2664 }
2665
2666 static void bnxt_init_cp_rings(struct bnxt *bp)
2667 {
2668         int i;
2669
2670         for (i = 0; i < bp->cp_nr_rings; i++) {
2671                 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
2672                 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
2673
2674                 ring->fw_ring_id = INVALID_HW_RING_ID;
2675                 cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
2676                 cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
2677         }
2678 }
2679
2680 static int bnxt_init_rx_rings(struct bnxt *bp)
2681 {
2682         int i, rc = 0;
2683
2684         if (BNXT_RX_PAGE_MODE(bp)) {
2685                 bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
2686                 bp->rx_dma_offset = XDP_PACKET_HEADROOM;
2687         } else {
2688                 bp->rx_offset = BNXT_RX_OFFSET;
2689                 bp->rx_dma_offset = BNXT_RX_DMA_OFFSET;
2690         }
2691
2692         for (i = 0; i < bp->rx_nr_rings; i++) {
2693                 rc = bnxt_init_one_rx_ring(bp, i);
2694                 if (rc)
2695                         break;
2696         }
2697
2698         return rc;
2699 }
2700
2701 static int bnxt_init_tx_rings(struct bnxt *bp)
2702 {
2703         u16 i;
2704
2705         bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
2706                                    MAX_SKB_FRAGS + 1);
2707
2708         for (i = 0; i < bp->tx_nr_rings; i++) {
2709                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2710                 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
2711
2712                 ring->fw_ring_id = INVALID_HW_RING_ID;
2713         }
2714
2715         return 0;
2716 }
2717
2718 static void bnxt_free_ring_grps(struct bnxt *bp)
2719 {
2720         kfree(bp->grp_info);
2721         bp->grp_info = NULL;
2722 }
2723
2724 static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
2725 {
2726         int i;
2727
2728         if (irq_re_init) {
2729                 bp->grp_info = kcalloc(bp->cp_nr_rings,
2730                                        sizeof(struct bnxt_ring_grp_info),
2731                                        GFP_KERNEL);
2732                 if (!bp->grp_info)
2733                         return -ENOMEM;
2734         }
2735         for (i = 0; i < bp->cp_nr_rings; i++) {
2736                 if (irq_re_init)
2737                         bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
2738                 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
2739                 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
2740                 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
2741                 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
2742         }
2743         return 0;
2744 }
2745
2746 static void bnxt_free_vnics(struct bnxt *bp)
2747 {
2748         kfree(bp->vnic_info);
2749         bp->vnic_info = NULL;
2750         bp->nr_vnics = 0;
2751 }
2752
2753 static int bnxt_alloc_vnics(struct bnxt *bp)
2754 {
2755         int num_vnics = 1;
2756
2757 #ifdef CONFIG_RFS_ACCEL
2758         if (bp->flags & BNXT_FLAG_RFS)
2759                 num_vnics += bp->rx_nr_rings;
2760 #endif
2761
2762         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
2763                 num_vnics++;
2764
2765         bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
2766                                 GFP_KERNEL);
2767         if (!bp->vnic_info)
2768                 return -ENOMEM;
2769
2770         bp->nr_vnics = num_vnics;
2771         return 0;
2772 }
2773
2774 static void bnxt_init_vnics(struct bnxt *bp)
2775 {
2776         int i;
2777
2778         for (i = 0; i < bp->nr_vnics; i++) {
2779                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
2780
2781                 vnic->fw_vnic_id = INVALID_HW_RING_ID;
2782                 vnic->fw_rss_cos_lb_ctx[0] = INVALID_HW_RING_ID;
2783                 vnic->fw_rss_cos_lb_ctx[1] = INVALID_HW_RING_ID;
2784                 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
2785
2786                 if (bp->vnic_info[i].rss_hash_key) {
2787                         if (i == 0)
2788                                 prandom_bytes(vnic->rss_hash_key,
2789                                               HW_HASH_KEY_SIZE);
2790                         else
2791                                 memcpy(vnic->rss_hash_key,
2792                                        bp->vnic_info[0].rss_hash_key,
2793                                        HW_HASH_KEY_SIZE);
2794                 }
2795         }
2796 }
2797
2798 static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
2799 {
2800         int pages;
2801
2802         pages = ring_size / desc_per_pg;
2803
2804         if (!pages)
2805                 return 1;
2806
2807         pages++;
2808
2809         while (pages & (pages - 1))
2810                 pages++;
2811
2812         return pages;
2813 }
2814
2815 void bnxt_set_tpa_flags(struct bnxt *bp)
2816 {
2817         bp->flags &= ~BNXT_FLAG_TPA;
2818         if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
2819                 return;
2820         if (bp->dev->features & NETIF_F_LRO)
2821                 bp->flags |= BNXT_FLAG_LRO;
2822         else if (bp->dev->features & NETIF_F_GRO_HW)
2823                 bp->flags |= BNXT_FLAG_GRO;
2824 }
2825
2826 /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
2827  * be set on entry.
2828  */
2829 void bnxt_set_ring_params(struct bnxt *bp)
2830 {
2831         u32 ring_size, rx_size, rx_space;
2832         u32 agg_factor = 0, agg_ring_size = 0;
2833
2834         /* 8 for CRC and VLAN */
2835         rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
2836
2837         rx_space = rx_size + NET_SKB_PAD +
2838                 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2839
2840         bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
2841         ring_size = bp->rx_ring_size;
2842         bp->rx_agg_ring_size = 0;
2843         bp->rx_agg_nr_pages = 0;
2844
2845         if (bp->flags & BNXT_FLAG_TPA)
2846                 agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
2847
2848         bp->flags &= ~BNXT_FLAG_JUMBO;
2849         if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) {
2850                 u32 jumbo_factor;
2851
2852                 bp->flags |= BNXT_FLAG_JUMBO;
2853                 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
2854                 if (jumbo_factor > agg_factor)
2855                         agg_factor = jumbo_factor;
2856         }
2857         agg_ring_size = ring_size * agg_factor;
2858
2859         if (agg_ring_size) {
2860                 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
2861                                                         RX_DESC_CNT);
2862                 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
2863                         u32 tmp = agg_ring_size;
2864
2865                         bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
2866                         agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
2867                         netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
2868                                     tmp, agg_ring_size);
2869                 }
2870                 bp->rx_agg_ring_size = agg_ring_size;
2871                 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
2872                 rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
2873                 rx_space = rx_size + NET_SKB_PAD +
2874                         SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2875         }
2876
2877         bp->rx_buf_use_size = rx_size;
2878         bp->rx_buf_size = rx_space;
2879
2880         bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
2881         bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
2882
2883         ring_size = bp->tx_ring_size;
2884         bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
2885         bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
2886
2887         ring_size = bp->rx_ring_size * (2 + agg_factor) + bp->tx_ring_size;
2888         bp->cp_ring_size = ring_size;
2889
2890         bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
2891         if (bp->cp_nr_pages > MAX_CP_PAGES) {
2892                 bp->cp_nr_pages = MAX_CP_PAGES;
2893                 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
2894                 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
2895                             ring_size, bp->cp_ring_size);
2896         }
2897         bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
2898         bp->cp_ring_mask = bp->cp_bit - 1;
2899 }
2900
2901 /* Changing allocation mode of RX rings.
2902  * TODO: Update when extending xdp_rxq_info to support allocation modes.
2903  */
2904 int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
2905 {
2906         if (page_mode) {
2907                 if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU)
2908                         return -EOPNOTSUPP;
2909                 bp->dev->max_mtu =
2910                         min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
2911                 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
2912                 bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE;
2913                 bp->rx_dir = DMA_BIDIRECTIONAL;
2914                 bp->rx_skb_func = bnxt_rx_page_skb;
2915                 /* Disable LRO or GRO_HW */
2916                 netdev_update_features(bp->dev);
2917         } else {
2918                 bp->dev->max_mtu = bp->max_mtu;
2919                 bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
2920                 bp->rx_dir = DMA_FROM_DEVICE;
2921                 bp->rx_skb_func = bnxt_rx_skb;
2922         }
2923         return 0;
2924 }
2925
2926 static void bnxt_free_vnic_attributes(struct bnxt *bp)
2927 {
2928         int i;
2929         struct bnxt_vnic_info *vnic;
2930         struct pci_dev *pdev = bp->pdev;
2931
2932         if (!bp->vnic_info)
2933                 return;
2934
2935         for (i = 0; i < bp->nr_vnics; i++) {
2936                 vnic = &bp->vnic_info[i];
2937
2938                 kfree(vnic->fw_grp_ids);
2939                 vnic->fw_grp_ids = NULL;
2940
2941                 kfree(vnic->uc_list);
2942                 vnic->uc_list = NULL;
2943
2944                 if (vnic->mc_list) {
2945                         dma_free_coherent(&pdev->dev, vnic->mc_list_size,
2946                                           vnic->mc_list, vnic->mc_list_mapping);
2947                         vnic->mc_list = NULL;
2948                 }
2949
2950                 if (vnic->rss_table) {
2951                         dma_free_coherent(&pdev->dev, PAGE_SIZE,
2952                                           vnic->rss_table,
2953                                           vnic->rss_table_dma_addr);
2954                         vnic->rss_table = NULL;
2955                 }
2956
2957                 vnic->rss_hash_key = NULL;
2958                 vnic->flags = 0;
2959         }
2960 }
2961
2962 static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
2963 {
2964         int i, rc = 0, size;
2965         struct bnxt_vnic_info *vnic;
2966         struct pci_dev *pdev = bp->pdev;
2967         int max_rings;
2968
2969         for (i = 0; i < bp->nr_vnics; i++) {
2970                 vnic = &bp->vnic_info[i];
2971
2972                 if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
2973                         int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
2974
2975                         if (mem_size > 0) {
2976                                 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
2977                                 if (!vnic->uc_list) {
2978                                         rc = -ENOMEM;
2979                                         goto out;
2980                                 }
2981                         }
2982                 }
2983
2984                 if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
2985                         vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
2986                         vnic->mc_list =
2987                                 dma_alloc_coherent(&pdev->dev,
2988                                                    vnic->mc_list_size,
2989                                                    &vnic->mc_list_mapping,
2990                                                    GFP_KERNEL);
2991                         if (!vnic->mc_list) {
2992                                 rc = -ENOMEM;
2993                                 goto out;
2994                         }
2995                 }
2996
2997                 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
2998                         max_rings = bp->rx_nr_rings;
2999                 else
3000                         max_rings = 1;
3001
3002                 vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
3003                 if (!vnic->fw_grp_ids) {
3004                         rc = -ENOMEM;
3005                         goto out;
3006                 }
3007
3008                 if ((bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
3009                     !(vnic->flags & BNXT_VNIC_RSS_FLAG))
3010                         continue;
3011
3012                 /* Allocate rss table and hash key */
3013                 vnic->rss_table = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
3014                                                      &vnic->rss_table_dma_addr,
3015                                                      GFP_KERNEL);
3016                 if (!vnic->rss_table) {
3017                         rc = -ENOMEM;
3018                         goto out;
3019                 }
3020
3021                 size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
3022
3023                 vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
3024                 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
3025         }
3026         return 0;
3027
3028 out:
3029         return rc;
3030 }
3031
3032 static void bnxt_free_hwrm_resources(struct bnxt *bp)
3033 {
3034         struct pci_dev *pdev = bp->pdev;
3035
3036         if (bp->hwrm_cmd_resp_addr) {
3037                 dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr,
3038                                   bp->hwrm_cmd_resp_dma_addr);
3039                 bp->hwrm_cmd_resp_addr = NULL;
3040         }
3041 }
3042
3043 static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
3044 {
3045         struct pci_dev *pdev = bp->pdev;
3046
3047         bp->hwrm_cmd_resp_addr = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
3048                                                    &bp->hwrm_cmd_resp_dma_addr,
3049                                                    GFP_KERNEL);
3050         if (!bp->hwrm_cmd_resp_addr)
3051                 return -ENOMEM;
3052
3053         return 0;
3054 }
3055
3056 static void bnxt_free_hwrm_short_cmd_req(struct bnxt *bp)
3057 {
3058         if (bp->hwrm_short_cmd_req_addr) {
3059                 struct pci_dev *pdev = bp->pdev;
3060
3061                 dma_free_coherent(&pdev->dev, BNXT_HWRM_MAX_REQ_LEN,
3062                                   bp->hwrm_short_cmd_req_addr,
3063                                   bp->hwrm_short_cmd_req_dma_addr);
3064                 bp->hwrm_short_cmd_req_addr = NULL;
3065         }
3066 }
3067
3068 static int bnxt_alloc_hwrm_short_cmd_req(struct bnxt *bp)
3069 {
3070         struct pci_dev *pdev = bp->pdev;
3071
3072         bp->hwrm_short_cmd_req_addr =
3073                 dma_alloc_coherent(&pdev->dev, BNXT_HWRM_MAX_REQ_LEN,
3074                                    &bp->hwrm_short_cmd_req_dma_addr,
3075                                    GFP_KERNEL);
3076         if (!bp->hwrm_short_cmd_req_addr)
3077                 return -ENOMEM;
3078
3079         return 0;
3080 }
3081
3082 static void bnxt_free_stats(struct bnxt *bp)
3083 {
3084         u32 size, i;
3085         struct pci_dev *pdev = bp->pdev;
3086
3087         bp->flags &= ~BNXT_FLAG_PORT_STATS;
3088         bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT;
3089
3090         if (bp->hw_rx_port_stats) {
3091                 dma_free_coherent(&pdev->dev, bp->hw_port_stats_size,
3092                                   bp->hw_rx_port_stats,
3093                                   bp->hw_rx_port_stats_map);
3094                 bp->hw_rx_port_stats = NULL;
3095         }
3096
3097         if (bp->hw_rx_port_stats_ext) {
3098                 dma_free_coherent(&pdev->dev, sizeof(struct rx_port_stats_ext),
3099                                   bp->hw_rx_port_stats_ext,
3100                                   bp->hw_rx_port_stats_ext_map);
3101                 bp->hw_rx_port_stats_ext = NULL;
3102         }
3103
3104         if (!bp->bnapi)
3105                 return;
3106
3107         size = sizeof(struct ctx_hw_stats);
3108
3109         for (i = 0; i < bp->cp_nr_rings; i++) {
3110                 struct bnxt_napi *bnapi = bp->bnapi[i];
3111                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3112
3113                 if (cpr->hw_stats) {
3114                         dma_free_coherent(&pdev->dev, size, cpr->hw_stats,
3115                                           cpr->hw_stats_map);
3116                         cpr->hw_stats = NULL;
3117                 }
3118         }
3119 }
3120
3121 static int bnxt_alloc_stats(struct bnxt *bp)
3122 {
3123         u32 size, i;
3124         struct pci_dev *pdev = bp->pdev;
3125
3126         size = sizeof(struct ctx_hw_stats);
3127
3128         for (i = 0; i < bp->cp_nr_rings; i++) {
3129                 struct bnxt_napi *bnapi = bp->bnapi[i];
3130                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3131
3132                 cpr->hw_stats = dma_alloc_coherent(&pdev->dev, size,
3133                                                    &cpr->hw_stats_map,
3134                                                    GFP_KERNEL);
3135                 if (!cpr->hw_stats)
3136                         return -ENOMEM;
3137
3138                 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
3139         }
3140
3141         if (BNXT_PF(bp) && bp->chip_num != CHIP_NUM_58700) {
3142                 bp->hw_port_stats_size = sizeof(struct rx_port_stats) +
3143                                          sizeof(struct tx_port_stats) + 1024;
3144
3145                 bp->hw_rx_port_stats =
3146                         dma_alloc_coherent(&pdev->dev, bp->hw_port_stats_size,
3147                                            &bp->hw_rx_port_stats_map,
3148                                            GFP_KERNEL);
3149                 if (!bp->hw_rx_port_stats)
3150                         return -ENOMEM;
3151
3152                 bp->hw_tx_port_stats = (void *)(bp->hw_rx_port_stats + 1) +
3153                                        512;
3154                 bp->hw_tx_port_stats_map = bp->hw_rx_port_stats_map +
3155                                            sizeof(struct rx_port_stats) + 512;
3156                 bp->flags |= BNXT_FLAG_PORT_STATS;
3157
3158                 /* Display extended statistics only if FW supports it */
3159                 if (bp->hwrm_spec_code < 0x10804 ||
3160                     bp->hwrm_spec_code == 0x10900)
3161                         return 0;
3162
3163                 bp->hw_rx_port_stats_ext =
3164                         dma_zalloc_coherent(&pdev->dev,
3165                                             sizeof(struct rx_port_stats_ext),
3166                                             &bp->hw_rx_port_stats_ext_map,
3167                                             GFP_KERNEL);
3168                 if (!bp->hw_rx_port_stats_ext)
3169                         return 0;
3170
3171                 bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
3172         }
3173         return 0;
3174 }
3175
3176 static void bnxt_clear_ring_indices(struct bnxt *bp)
3177 {
3178         int i;
3179
3180         if (!bp->bnapi)
3181                 return;
3182
3183         for (i = 0; i < bp->cp_nr_rings; i++) {
3184                 struct bnxt_napi *bnapi = bp->bnapi[i];
3185                 struct bnxt_cp_ring_info *cpr;
3186                 struct bnxt_rx_ring_info *rxr;
3187                 struct bnxt_tx_ring_info *txr;
3188
3189                 if (!bnapi)
3190                         continue;
3191
3192                 cpr = &bnapi->cp_ring;
3193                 cpr->cp_raw_cons = 0;
3194
3195                 txr = bnapi->tx_ring;
3196                 if (txr) {
3197                         txr->tx_prod = 0;
3198                         txr->tx_cons = 0;
3199                 }
3200
3201                 rxr = bnapi->rx_ring;
3202                 if (rxr) {
3203                         rxr->rx_prod = 0;
3204                         rxr->rx_agg_prod = 0;
3205                         rxr->rx_sw_agg_prod = 0;
3206                         rxr->rx_next_cons = 0;
3207                 }
3208         }
3209 }
3210
3211 static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
3212 {
3213 #ifdef CONFIG_RFS_ACCEL
3214         int i;
3215
3216         /* Under rtnl_lock and all our NAPIs have been disabled.  It's
3217          * safe to delete the hash table.
3218          */
3219         for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
3220                 struct hlist_head *head;
3221                 struct hlist_node *tmp;
3222                 struct bnxt_ntuple_filter *fltr;
3223
3224                 head = &bp->ntp_fltr_hash_tbl[i];
3225                 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
3226                         hlist_del(&fltr->hash);
3227                         kfree(fltr);
3228                 }
3229         }
3230         if (irq_reinit) {
3231                 kfree(bp->ntp_fltr_bmap);
3232                 bp->ntp_fltr_bmap = NULL;
3233         }
3234         bp->ntp_fltr_count = 0;
3235 #endif
3236 }
3237
3238 static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
3239 {
3240 #ifdef CONFIG_RFS_ACCEL
3241         int i, rc = 0;
3242
3243         if (!(bp->flags & BNXT_FLAG_RFS))
3244                 return 0;
3245
3246         for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
3247                 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
3248
3249         bp->ntp_fltr_count = 0;
3250         bp->ntp_fltr_bmap = kcalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR),
3251                                     sizeof(long),
3252                                     GFP_KERNEL);
3253
3254         if (!bp->ntp_fltr_bmap)
3255                 rc = -ENOMEM;
3256
3257         return rc;
3258 #else
3259         return 0;
3260 #endif
3261 }
3262
3263 static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
3264 {
3265         bnxt_free_vnic_attributes(bp);
3266         bnxt_free_tx_rings(bp);
3267         bnxt_free_rx_rings(bp);
3268         bnxt_free_cp_rings(bp);
3269         bnxt_free_ntp_fltrs(bp, irq_re_init);
3270         if (irq_re_init) {
3271                 bnxt_free_stats(bp);
3272                 bnxt_free_ring_grps(bp);
3273                 bnxt_free_vnics(bp);
3274                 kfree(bp->tx_ring_map);
3275                 bp->tx_ring_map = NULL;
3276                 kfree(bp->tx_ring);
3277                 bp->tx_ring = NULL;
3278                 kfree(bp->rx_ring);
3279                 bp->rx_ring = NULL;
3280                 kfree(bp->bnapi);
3281                 bp->bnapi = NULL;
3282         } else {
3283                 bnxt_clear_ring_indices(bp);
3284         }
3285 }
3286
3287 static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
3288 {
3289         int i, j, rc, size, arr_size;
3290         void *bnapi;
3291
3292         if (irq_re_init) {
3293                 /* Allocate bnapi mem pointer array and mem block for
3294                  * all queues
3295                  */
3296                 arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
3297                                 bp->cp_nr_rings);
3298                 size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
3299                 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
3300                 if (!bnapi)
3301                         return -ENOMEM;
3302
3303                 bp->bnapi = bnapi;
3304                 bnapi += arr_size;
3305                 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
3306                         bp->bnapi[i] = bnapi;
3307                         bp->bnapi[i]->index = i;
3308                         bp->bnapi[i]->bp = bp;
3309                 }
3310
3311                 bp->rx_ring = kcalloc(bp->rx_nr_rings,
3312                                       sizeof(struct bnxt_rx_ring_info),
3313                                       GFP_KERNEL);
3314                 if (!bp->rx_ring)
3315                         return -ENOMEM;
3316
3317                 for (i = 0; i < bp->rx_nr_rings; i++) {
3318                         bp->rx_ring[i].bnapi = bp->bnapi[i];
3319                         bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
3320                 }
3321
3322                 bp->tx_ring = kcalloc(bp->tx_nr_rings,
3323                                       sizeof(struct bnxt_tx_ring_info),
3324                                       GFP_KERNEL);
3325                 if (!bp->tx_ring)
3326                         return -ENOMEM;
3327
3328                 bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16),
3329                                           GFP_KERNEL);
3330
3331                 if (!bp->tx_ring_map)
3332                         return -ENOMEM;
3333
3334                 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
3335                         j = 0;
3336                 else
3337                         j = bp->rx_nr_rings;
3338
3339                 for (i = 0; i < bp->tx_nr_rings; i++, j++) {
3340                         bp->tx_ring[i].bnapi = bp->bnapi[j];
3341                         bp->bnapi[j]->tx_ring = &bp->tx_ring[i];
3342                         bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i;
3343                         if (i >= bp->tx_nr_rings_xdp) {
3344                                 bp->tx_ring[i].txq_index = i -
3345                                         bp->tx_nr_rings_xdp;
3346                                 bp->bnapi[j]->tx_int = bnxt_tx_int;
3347                         } else {
3348                                 bp->bnapi[j]->flags |= BNXT_NAPI_FLAG_XDP;
3349                                 bp->bnapi[j]->tx_int = bnxt_tx_int_xdp;
3350                         }
3351                 }
3352
3353                 rc = bnxt_alloc_stats(bp);
3354                 if (rc)
3355                         goto alloc_mem_err;
3356
3357                 rc = bnxt_alloc_ntp_fltrs(bp);
3358                 if (rc)
3359                         goto alloc_mem_err;
3360
3361                 rc = bnxt_alloc_vnics(bp);
3362                 if (rc)
3363                         goto alloc_mem_err;
3364         }
3365
3366         bnxt_init_ring_struct(bp);
3367
3368         rc = bnxt_alloc_rx_rings(bp);
3369         if (rc)
3370                 goto alloc_mem_err;
3371
3372         rc = bnxt_alloc_tx_rings(bp);
3373         if (rc)
3374                 goto alloc_mem_err;
3375
3376         rc = bnxt_alloc_cp_rings(bp);
3377         if (rc)
3378                 goto alloc_mem_err;
3379
3380         bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG |
3381                                   BNXT_VNIC_UCAST_FLAG;
3382         rc = bnxt_alloc_vnic_attributes(bp);
3383         if (rc)
3384                 goto alloc_mem_err;
3385         return 0;
3386
3387 alloc_mem_err:
3388         bnxt_free_mem(bp, true);
3389         return rc;
3390 }
3391
3392 static void bnxt_disable_int(struct bnxt *bp)
3393 {
3394         int i;
3395
3396         if (!bp->bnapi)
3397                 return;
3398
3399         for (i = 0; i < bp->cp_nr_rings; i++) {
3400                 struct bnxt_napi *bnapi = bp->bnapi[i];
3401                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3402                 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
3403
3404                 if (ring->fw_ring_id != INVALID_HW_RING_ID)
3405                         BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
3406         }
3407 }
3408
3409 static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n)
3410 {
3411         struct bnxt_napi *bnapi = bp->bnapi[n];
3412         struct bnxt_cp_ring_info *cpr;
3413
3414         cpr = &bnapi->cp_ring;
3415         return cpr->cp_ring_struct.map_idx;
3416 }
3417
3418 static void bnxt_disable_int_sync(struct bnxt *bp)
3419 {
3420         int i;
3421
3422         atomic_inc(&bp->intr_sem);
3423
3424         bnxt_disable_int(bp);
3425         for (i = 0; i < bp->cp_nr_rings; i++) {
3426                 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
3427
3428                 synchronize_irq(bp->irq_tbl[map_idx].vector);
3429         }
3430 }
3431
3432 static void bnxt_enable_int(struct bnxt *bp)
3433 {
3434         int i;
3435
3436         atomic_set(&bp->intr_sem, 0);
3437         for (i = 0; i < bp->cp_nr_rings; i++) {
3438                 struct bnxt_napi *bnapi = bp->bnapi[i];
3439                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3440
3441                 BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
3442         }
3443 }
3444
3445 void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type,
3446                             u16 cmpl_ring, u16 target_id)
3447 {
3448         struct input *req = request;
3449
3450         req->req_type = cpu_to_le16(req_type);
3451         req->cmpl_ring = cpu_to_le16(cmpl_ring);
3452         req->target_id = cpu_to_le16(target_id);
3453         req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
3454 }
3455
3456 static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
3457                                  int timeout, bool silent)
3458 {
3459         int i, intr_process, rc, tmo_count;
3460         struct input *req = msg;
3461         u32 *data = msg;
3462         __le32 *resp_len;
3463         u8 *valid;
3464         u16 cp_ring_id, len = 0;
3465         struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
3466         u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN;
3467         struct hwrm_short_input short_input = {0};
3468
3469         req->seq_id = cpu_to_le16(bp->hwrm_cmd_seq++);
3470         memset(resp, 0, PAGE_SIZE);
3471         cp_ring_id = le16_to_cpu(req->cmpl_ring);
3472         intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1;
3473
3474         if (bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) {
3475                 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
3476
3477                 memcpy(short_cmd_req, req, msg_len);
3478                 memset(short_cmd_req + msg_len, 0, BNXT_HWRM_MAX_REQ_LEN -
3479                                                    msg_len);
3480
3481                 short_input.req_type = req->req_type;
3482                 short_input.signature =
3483                                 cpu_to_le16(SHORT_REQ_SIGNATURE_SHORT_CMD);
3484                 short_input.size = cpu_to_le16(msg_len);
3485                 short_input.req_addr =
3486                         cpu_to_le64(bp->hwrm_short_cmd_req_dma_addr);
3487
3488                 data = (u32 *)&short_input;
3489                 msg_len = sizeof(short_input);
3490
3491                 /* Sync memory write before updating doorbell */
3492                 wmb();
3493
3494                 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
3495         }
3496
3497         /* Write request msg to hwrm channel */
3498         __iowrite32_copy(bp->bar0, data, msg_len / 4);
3499
3500         for (i = msg_len; i < max_req_len; i += 4)
3501                 writel(0, bp->bar0 + i);
3502
3503         /* currently supports only one outstanding message */
3504         if (intr_process)
3505                 bp->hwrm_intr_seq_id = le16_to_cpu(req->seq_id);
3506
3507         /* Ring channel doorbell */
3508         writel(1, bp->bar0 + 0x100);
3509
3510         if (!timeout)
3511                 timeout = DFLT_HWRM_CMD_TIMEOUT;
3512         /* convert timeout to usec */
3513         timeout *= 1000;
3514
3515         i = 0;
3516         /* Short timeout for the first few iterations:
3517          * number of loops = number of loops for short timeout +
3518          * number of loops for standard timeout.
3519          */
3520         tmo_count = HWRM_SHORT_TIMEOUT_COUNTER;
3521         timeout = timeout - HWRM_SHORT_MIN_TIMEOUT * HWRM_SHORT_TIMEOUT_COUNTER;
3522         tmo_count += DIV_ROUND_UP(timeout, HWRM_MIN_TIMEOUT);
3523         resp_len = bp->hwrm_cmd_resp_addr + HWRM_RESP_LEN_OFFSET;
3524         if (intr_process) {
3525                 /* Wait until hwrm response cmpl interrupt is processed */
3526                 while (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID &&
3527                        i++ < tmo_count) {
3528                         /* on first few passes, just barely sleep */
3529                         if (i < HWRM_SHORT_TIMEOUT_COUNTER)
3530                                 usleep_range(HWRM_SHORT_MIN_TIMEOUT,
3531                                              HWRM_SHORT_MAX_TIMEOUT);
3532                         else
3533                                 usleep_range(HWRM_MIN_TIMEOUT,
3534                                              HWRM_MAX_TIMEOUT);
3535                 }
3536
3537                 if (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID) {
3538                         netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n",
3539                                    le16_to_cpu(req->req_type));
3540                         return -1;
3541                 }
3542                 len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
3543                       HWRM_RESP_LEN_SFT;
3544                 valid = bp->hwrm_cmd_resp_addr + len - 1;
3545         } else {
3546                 int j;
3547
3548                 /* Check if response len is updated */
3549                 for (i = 0; i < tmo_count; i++) {
3550                         len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
3551                               HWRM_RESP_LEN_SFT;
3552                         if (len)
3553                                 break;
3554                         /* on first few passes, just barely sleep */
3555                         if (i < HWRM_SHORT_TIMEOUT_COUNTER)
3556                                 usleep_range(HWRM_SHORT_MIN_TIMEOUT,
3557                                              HWRM_SHORT_MAX_TIMEOUT);
3558                         else
3559                                 usleep_range(HWRM_MIN_TIMEOUT,
3560                                              HWRM_MAX_TIMEOUT);
3561                 }
3562
3563                 if (i >= tmo_count) {
3564                         netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n",
3565                                    HWRM_TOTAL_TIMEOUT(i),
3566                                    le16_to_cpu(req->req_type),
3567                                    le16_to_cpu(req->seq_id), len);
3568                         return -1;
3569                 }
3570
3571                 /* Last byte of resp contains valid bit */
3572                 valid = bp->hwrm_cmd_resp_addr + len - 1;
3573                 for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; j++) {
3574                         /* make sure we read from updated DMA memory */
3575                         dma_rmb();
3576                         if (*valid)
3577                                 break;
3578                         usleep_range(1, 5);
3579                 }
3580
3581                 if (j >= HWRM_VALID_BIT_DELAY_USEC) {
3582                         netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n",
3583                                    HWRM_TOTAL_TIMEOUT(i),
3584                                    le16_to_cpu(req->req_type),
3585                                    le16_to_cpu(req->seq_id), len, *valid);
3586                         return -1;
3587                 }
3588         }
3589
3590         /* Zero valid bit for compatibility.  Valid bit in an older spec
3591          * may become a new field in a newer spec.  We must make sure that
3592          * a new field not implemented by old spec will read zero.
3593          */
3594         *valid = 0;
3595         rc = le16_to_cpu(resp->error_code);
3596         if (rc && !silent)
3597                 netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
3598                            le16_to_cpu(resp->req_type),
3599                            le16_to_cpu(resp->seq_id), rc);
3600         return rc;
3601 }
3602
3603 int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
3604 {
3605         return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false);
3606 }
3607
3608 int _hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
3609                               int timeout)
3610 {
3611         return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
3612 }
3613
3614 int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
3615 {
3616         int rc;
3617
3618         mutex_lock(&bp->hwrm_cmd_lock);
3619         rc = _hwrm_send_message(bp, msg, msg_len, timeout);
3620         mutex_unlock(&bp->hwrm_cmd_lock);
3621         return rc;
3622 }
3623
3624 int hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
3625                              int timeout)
3626 {
3627         int rc;
3628
3629         mutex_lock(&bp->hwrm_cmd_lock);
3630         rc = bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
3631         mutex_unlock(&bp->hwrm_cmd_lock);
3632         return rc;
3633 }
3634
3635 int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap,
3636                                      int bmap_size)
3637 {
3638         struct hwrm_func_drv_rgtr_input req = {0};
3639         DECLARE_BITMAP(async_events_bmap, 256);
3640         u32 *events = (u32 *)async_events_bmap;
3641         int i;
3642
3643         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
3644
3645         req.enables =
3646                 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
3647
3648         memset(async_events_bmap, 0, sizeof(async_events_bmap));
3649         for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++)
3650                 __set_bit(bnxt_async_events_arr[i], async_events_bmap);
3651
3652         if (bmap && bmap_size) {
3653                 for (i = 0; i < bmap_size; i++) {
3654                         if (test_bit(i, bmap))
3655                                 __set_bit(i, async_events_bmap);
3656                 }
3657         }
3658
3659         for (i = 0; i < 8; i++)
3660                 req.async_event_fwd[i] |= cpu_to_le32(events[i]);
3661
3662         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3663 }
3664
3665 static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
3666 {
3667         struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
3668         struct hwrm_func_drv_rgtr_input req = {0};
3669         int rc;
3670
3671         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
3672
3673         req.enables =
3674                 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
3675                             FUNC_DRV_RGTR_REQ_ENABLES_VER);
3676
3677         req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
3678         req.flags = cpu_to_le32(FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE);
3679         req.ver_maj_8b = DRV_VER_MAJ;
3680         req.ver_min_8b = DRV_VER_MIN;
3681         req.ver_upd_8b = DRV_VER_UPD;
3682         req.ver_maj = cpu_to_le16(DRV_VER_MAJ);
3683         req.ver_min = cpu_to_le16(DRV_VER_MIN);
3684         req.ver_upd = cpu_to_le16(DRV_VER_UPD);
3685
3686         if (BNXT_PF(bp)) {
3687                 u32 data[8];
3688                 int i;
3689
3690                 memset(data, 0, sizeof(data));
3691                 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) {
3692                         u16 cmd = bnxt_vf_req_snif[i];
3693                         unsigned int bit, idx;
3694
3695                         idx = cmd / 32;
3696                         bit = cmd % 32;
3697                         data[idx] |= 1 << bit;
3698                 }
3699
3700                 for (i = 0; i < 8; i++)
3701                         req.vf_req_fwd[i] = cpu_to_le32(data[i]);
3702
3703                 req.enables |=
3704                         cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
3705         }
3706
3707         mutex_lock(&bp->hwrm_cmd_lock);
3708         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3709         if (rc)
3710                 rc = -EIO;
3711         else if (resp->flags &
3712                  cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED))
3713                 bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
3714         mutex_unlock(&bp->hwrm_cmd_lock);
3715         return rc;
3716 }
3717
3718 static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
3719 {
3720         struct hwrm_func_drv_unrgtr_input req = {0};
3721
3722         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_UNRGTR, -1, -1);
3723         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3724 }
3725
3726 static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
3727 {
3728         u32 rc = 0;
3729         struct hwrm_tunnel_dst_port_free_input req = {0};
3730
3731         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_FREE, -1, -1);
3732         req.tunnel_type = tunnel_type;
3733
3734         switch (tunnel_type) {
3735         case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
3736                 req.tunnel_dst_port_id = bp->vxlan_fw_dst_port_id;
3737                 break;
3738         case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
3739                 req.tunnel_dst_port_id = bp->nge_fw_dst_port_id;
3740                 break;
3741         default:
3742                 break;
3743         }
3744
3745         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3746         if (rc)
3747                 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
3748                            rc);
3749         return rc;
3750 }
3751
3752 static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
3753                                            u8 tunnel_type)
3754 {
3755         u32 rc = 0;
3756         struct hwrm_tunnel_dst_port_alloc_input req = {0};
3757         struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3758
3759         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_ALLOC, -1, -1);
3760
3761         req.tunnel_type = tunnel_type;
3762         req.tunnel_dst_port_val = port;
3763
3764         mutex_lock(&bp->hwrm_cmd_lock);
3765         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3766         if (rc) {
3767                 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
3768                            rc);
3769                 goto err_out;
3770         }
3771
3772         switch (tunnel_type) {
3773         case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN:
3774                 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
3775                 break;
3776         case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE:
3777                 bp->nge_fw_dst_port_id = resp->tunnel_dst_port_id;
3778                 break;
3779         default:
3780                 break;
3781         }
3782
3783 err_out:
3784         mutex_unlock(&bp->hwrm_cmd_lock);
3785         return rc;
3786 }
3787
3788 static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
3789 {
3790         struct hwrm_cfa_l2_set_rx_mask_input req = {0};
3791         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
3792
3793         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_SET_RX_MASK, -1, -1);
3794         req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
3795
3796         req.num_mc_entries = cpu_to_le32(vnic->mc_list_count);
3797         req.mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
3798         req.mask = cpu_to_le32(vnic->rx_mask);
3799         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3800 }
3801
3802 #ifdef CONFIG_RFS_ACCEL
3803 static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
3804                                             struct bnxt_ntuple_filter *fltr)
3805 {
3806         struct hwrm_cfa_ntuple_filter_free_input req = {0};
3807
3808         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_FREE, -1, -1);
3809         req.ntuple_filter_id = fltr->filter_id;
3810         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3811 }
3812
3813 #define BNXT_NTP_FLTR_FLAGS                                     \
3814         (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID |     \
3815          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE |        \
3816          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR |      \
3817          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE |      \
3818          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR |       \
3819          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK |  \
3820          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR |       \
3821          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK |  \
3822          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL |      \
3823          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT |         \
3824          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK |    \
3825          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT |         \
3826          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK |    \
3827          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)
3828
3829 #define BNXT_NTP_TUNNEL_FLTR_FLAG                               \
3830                 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE
3831
3832 static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
3833                                              struct bnxt_ntuple_filter *fltr)
3834 {
3835         int rc = 0;
3836         struct hwrm_cfa_ntuple_filter_alloc_input req = {0};
3837         struct hwrm_cfa_ntuple_filter_alloc_output *resp =
3838                 bp->hwrm_cmd_resp_addr;
3839         struct flow_keys *keys = &fltr->fkeys;
3840         struct bnxt_vnic_info *vnic = &bp->vnic_info[fltr->rxq + 1];
3841
3842         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1);
3843         req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx];
3844
3845         req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
3846
3847         req.ethertype = htons(ETH_P_IP);
3848         memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN);
3849         req.ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
3850         req.ip_protocol = keys->basic.ip_proto;
3851
3852         if (keys->basic.n_proto == htons(ETH_P_IPV6)) {
3853                 int i;
3854
3855                 req.ethertype = htons(ETH_P_IPV6);
3856                 req.ip_addr_type =
3857                         CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
3858                 *(struct in6_addr *)&req.src_ipaddr[0] =
3859                         keys->addrs.v6addrs.src;
3860                 *(struct in6_addr *)&req.dst_ipaddr[0] =
3861                         keys->addrs.v6addrs.dst;
3862                 for (i = 0; i < 4; i++) {
3863                         req.src_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
3864                         req.dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
3865                 }
3866         } else {
3867                 req.src_ipaddr[0] = keys->addrs.v4addrs.src;
3868                 req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
3869                 req.dst_ipaddr[0] = keys->addrs.v4addrs.dst;
3870                 req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
3871         }
3872         if (keys->control.flags & FLOW_DIS_ENCAPSULATION) {
3873                 req.enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
3874                 req.tunnel_type =
3875                         CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL;
3876         }
3877
3878         req.src_port = keys->ports.src;
3879         req.src_port_mask = cpu_to_be16(0xffff);
3880         req.dst_port = keys->ports.dst;
3881         req.dst_port_mask = cpu_to_be16(0xffff);
3882
3883         req.dst_id = cpu_to_le16(vnic->fw_vnic_id);
3884         mutex_lock(&bp->hwrm_cmd_lock);
3885         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3886         if (!rc)
3887                 fltr->filter_id = resp->ntuple_filter_id;
3888         mutex_unlock(&bp->hwrm_cmd_lock);
3889         return rc;
3890 }
3891 #endif
3892
3893 static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
3894                                      u8 *mac_addr)
3895 {
3896         u32 rc = 0;
3897         struct hwrm_cfa_l2_filter_alloc_input req = {0};
3898         struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3899
3900         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1);
3901         req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
3902         if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
3903                 req.flags |=
3904                         cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
3905         req.dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
3906         req.enables =
3907                 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
3908                             CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
3909                             CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
3910         memcpy(req.l2_addr, mac_addr, ETH_ALEN);
3911         req.l2_addr_mask[0] = 0xff;
3912         req.l2_addr_mask[1] = 0xff;
3913         req.l2_addr_mask[2] = 0xff;
3914         req.l2_addr_mask[3] = 0xff;
3915         req.l2_addr_mask[4] = 0xff;
3916         req.l2_addr_mask[5] = 0xff;
3917
3918         mutex_lock(&bp->hwrm_cmd_lock);
3919         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3920         if (!rc)
3921                 bp->vnic_info[vnic_id].fw_l2_filter_id[idx] =
3922                                                         resp->l2_filter_id;
3923         mutex_unlock(&bp->hwrm_cmd_lock);
3924         return rc;
3925 }
3926
3927 static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
3928 {
3929         u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
3930         int rc = 0;
3931
3932         /* Any associated ntuple filters will also be cleared by firmware. */
3933         mutex_lock(&bp->hwrm_cmd_lock);
3934         for (i = 0; i < num_of_vnics; i++) {
3935                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
3936
3937                 for (j = 0; j < vnic->uc_filter_count; j++) {
3938                         struct hwrm_cfa_l2_filter_free_input req = {0};
3939
3940                         bnxt_hwrm_cmd_hdr_init(bp, &req,
3941                                                HWRM_CFA_L2_FILTER_FREE, -1, -1);
3942
3943                         req.l2_filter_id = vnic->fw_l2_filter_id[j];
3944
3945                         rc = _hwrm_send_message(bp, &req, sizeof(req),
3946                                                 HWRM_CMD_TIMEOUT);
3947                 }
3948                 vnic->uc_filter_count = 0;
3949         }
3950         mutex_unlock(&bp->hwrm_cmd_lock);
3951
3952         return rc;
3953 }
3954
3955 static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
3956 {
3957         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
3958         struct hwrm_vnic_tpa_cfg_input req = {0};
3959
3960         if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
3961                 return 0;
3962
3963         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1);
3964
3965         if (tpa_flags) {
3966                 u16 mss = bp->dev->mtu - 40;
3967                 u32 nsegs, n, segs = 0, flags;
3968
3969                 flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
3970                         VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
3971                         VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
3972                         VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
3973                         VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
3974                 if (tpa_flags & BNXT_FLAG_GRO)
3975                         flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
3976
3977                 req.flags = cpu_to_le32(flags);
3978
3979                 req.enables =
3980                         cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
3981                                     VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS |
3982                                     VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN);
3983
3984                 /* Number of segs are log2 units, and first packet is not
3985                  * included as part of this units.
3986                  */
3987                 if (mss <= BNXT_RX_PAGE_SIZE) {
3988                         n = BNXT_RX_PAGE_SIZE / mss;
3989                         nsegs = (MAX_SKB_FRAGS - 1) * n;
3990                 } else {
3991                         n = mss / BNXT_RX_PAGE_SIZE;
3992                         if (mss & (BNXT_RX_PAGE_SIZE - 1))
3993                                 n++;
3994                         nsegs = (MAX_SKB_FRAGS - n) / n;
3995                 }
3996
3997                 segs = ilog2(nsegs);
3998                 req.max_agg_segs = cpu_to_le16(segs);
3999                 req.max_aggs = cpu_to_le16(VNIC_TPA_CFG_REQ_MAX_AGGS_MAX);
4000
4001                 req.min_agg_len = cpu_to_le32(512);
4002         }
4003         req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
4004
4005         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4006 }
4007
4008 static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
4009 {
4010         u32 i, j, max_rings;
4011         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4012         struct hwrm_vnic_rss_cfg_input req = {0};
4013
4014         if (vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
4015                 return 0;
4016
4017         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
4018         if (set_rss) {
4019                 req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
4020                 req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
4021                 if (vnic->flags & BNXT_VNIC_RSS_FLAG) {
4022                         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
4023                                 max_rings = bp->rx_nr_rings - 1;
4024                         else
4025                                 max_rings = bp->rx_nr_rings;
4026                 } else {
4027                         max_rings = 1;
4028                 }
4029
4030                 /* Fill the RSS indirection table with ring group ids */
4031                 for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++, j++) {
4032                         if (j == max_rings)
4033                                 j = 0;
4034                         vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
4035                 }
4036
4037                 req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
4038                 req.hash_key_tbl_addr =
4039                         cpu_to_le64(vnic->rss_hash_key_dma_addr);
4040         }
4041         req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
4042         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4043 }
4044
4045 static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
4046 {
4047         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4048         struct hwrm_vnic_plcmodes_cfg_input req = {0};
4049
4050         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_PLCMODES_CFG, -1, -1);
4051         req.flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT |
4052                                 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
4053                                 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
4054         req.enables =
4055                 cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID |
4056                             VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
4057         /* thresholds not implemented in firmware yet */
4058         req.jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
4059         req.hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
4060         req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
4061         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4062 }
4063
4064 static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id,
4065                                         u16 ctx_idx)
4066 {
4067         struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0};
4068
4069         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, -1, -1);
4070         req.rss_cos_lb_ctx_id =
4071                 cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]);
4072
4073         hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4074         bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
4075 }
4076
4077 static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
4078 {
4079         int i, j;
4080
4081         for (i = 0; i < bp->nr_vnics; i++) {
4082                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
4083
4084                 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) {
4085                         if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
4086                                 bnxt_hwrm_vnic_ctx_free_one(bp, i, j);
4087                 }
4088         }
4089         bp->rsscos_nr_ctxs = 0;
4090 }
4091
4092 static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx)
4093 {
4094         int rc;
4095         struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0};
4096         struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
4097                                                 bp->hwrm_cmd_resp_addr;
4098
4099         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, -1,
4100                                -1);
4101
4102         mutex_lock(&bp->hwrm_cmd_lock);
4103         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4104         if (!rc)
4105                 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] =
4106                         le16_to_cpu(resp->rss_cos_lb_ctx_id);
4107         mutex_unlock(&bp->hwrm_cmd_lock);
4108
4109         return rc;
4110 }
4111
4112 static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp)
4113 {
4114         if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP)
4115                 return VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE;
4116         return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE;
4117 }
4118
4119 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
4120 {
4121         unsigned int ring = 0, grp_idx;
4122         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4123         struct hwrm_vnic_cfg_input req = {0};
4124         u16 def_vlan = 0;
4125
4126         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1);
4127
4128         req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP);
4129         /* Only RSS support for now TBD: COS & LB */
4130         if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) {
4131                 req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
4132                 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
4133                                            VNIC_CFG_REQ_ENABLES_MRU);
4134         } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) {
4135                 req.rss_rule =
4136                         cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]);
4137                 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
4138                                            VNIC_CFG_REQ_ENABLES_MRU);
4139                 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE);
4140         } else {
4141                 req.rss_rule = cpu_to_le16(0xffff);
4142         }
4143
4144         if (BNXT_CHIP_TYPE_NITRO_A0(bp) &&
4145             (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) {
4146                 req.cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]);
4147                 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE);
4148         } else {
4149                 req.cos_rule = cpu_to_le16(0xffff);
4150         }
4151
4152         if (vnic->flags & BNXT_VNIC_RSS_FLAG)
4153                 ring = 0;
4154         else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
4155                 ring = vnic_id - 1;
4156         else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
4157                 ring = bp->rx_nr_rings - 1;
4158
4159         grp_idx = bp->rx_ring[ring].bnapi->index;
4160         req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
4161         req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
4162
4163         req.lb_rule = cpu_to_le16(0xffff);
4164         req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN +
4165                               VLAN_HLEN);
4166
4167 #ifdef CONFIG_BNXT_SRIOV
4168         if (BNXT_VF(bp))
4169                 def_vlan = bp->vf.vlan;
4170 #endif
4171         if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
4172                 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
4173         if (!vnic_id && bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
4174                 req.flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp));
4175
4176         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4177 }
4178
4179 static int bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
4180 {
4181         u32 rc = 0;
4182
4183         if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) {
4184                 struct hwrm_vnic_free_input req = {0};
4185
4186                 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_FREE, -1, -1);
4187                 req.vnic_id =
4188                         cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
4189
4190                 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4191                 if (rc)
4192                         return rc;
4193                 bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
4194         }
4195         return rc;
4196 }
4197
4198 static void bnxt_hwrm_vnic_free(struct bnxt *bp)
4199 {
4200         u16 i;
4201
4202         for (i = 0; i < bp->nr_vnics; i++)
4203                 bnxt_hwrm_vnic_free_one(bp, i);
4204 }
4205
4206 static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
4207                                 unsigned int start_rx_ring_idx,
4208                                 unsigned int nr_rings)
4209 {
4210         int rc = 0;
4211         unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
4212         struct hwrm_vnic_alloc_input req = {0};
4213         struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4214
4215         /* map ring groups to this vnic */
4216         for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) {
4217                 grp_idx = bp->rx_ring[i].bnapi->index;
4218                 if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) {
4219                         netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
4220                                    j, nr_rings);
4221                         break;
4222                 }
4223                 bp->vnic_info[vnic_id].fw_grp_ids[j] =
4224                                         bp->grp_info[grp_idx].fw_grp_id;
4225         }
4226
4227         bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[0] = INVALID_HW_RING_ID;
4228         bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[1] = INVALID_HW_RING_ID;
4229         if (vnic_id == 0)
4230                 req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
4231
4232         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_ALLOC, -1, -1);
4233
4234         mutex_lock(&bp->hwrm_cmd_lock);
4235         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4236         if (!rc)
4237                 bp->vnic_info[vnic_id].fw_vnic_id = le32_to_cpu(resp->vnic_id);
4238         mutex_unlock(&bp->hwrm_cmd_lock);
4239         return rc;
4240 }
4241
4242 static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
4243 {
4244         struct hwrm_vnic_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
4245         struct hwrm_vnic_qcaps_input req = {0};
4246         int rc;
4247
4248         if (bp->hwrm_spec_code < 0x10600)
4249                 return 0;
4250
4251         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_QCAPS, -1, -1);
4252         mutex_lock(&bp->hwrm_cmd_lock);
4253         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4254         if (!rc) {
4255                 u32 flags = le32_to_cpu(resp->flags);
4256
4257                 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP)
4258                         bp->flags |= BNXT_FLAG_NEW_RSS_CAP;
4259                 if (flags &
4260                     VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP)
4261                         bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP;
4262         }
4263         mutex_unlock(&bp->hwrm_cmd_lock);
4264         return rc;
4265 }
4266
4267 static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
4268 {
4269         u16 i;
4270         u32 rc = 0;
4271
4272         mutex_lock(&bp->hwrm_cmd_lock);
4273         for (i = 0; i < bp->rx_nr_rings; i++) {
4274                 struct hwrm_ring_grp_alloc_input req = {0};
4275                 struct hwrm_ring_grp_alloc_output *resp =
4276                                         bp->hwrm_cmd_resp_addr;
4277                 unsigned int grp_idx = bp->rx_ring[i].bnapi->index;
4278
4279                 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_ALLOC, -1, -1);
4280
4281                 req.cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
4282                 req.rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id);
4283                 req.ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id);
4284                 req.sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx);
4285
4286                 rc = _hwrm_send_message(bp, &req, sizeof(req),
4287                                         HWRM_CMD_TIMEOUT);
4288                 if (rc)
4289                         break;
4290
4291                 bp->grp_info[grp_idx].fw_grp_id =
4292                         le32_to_cpu(resp->ring_group_id);
4293         }
4294         mutex_unlock(&bp->hwrm_cmd_lock);
4295         return rc;
4296 }
4297
4298 static int bnxt_hwrm_ring_grp_free(struct bnxt *bp)
4299 {
4300         u16 i;
4301         u32 rc = 0;
4302         struct hwrm_ring_grp_free_input req = {0};
4303
4304         if (!bp->grp_info)
4305                 return 0;
4306
4307         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1);
4308
4309         mutex_lock(&bp->hwrm_cmd_lock);
4310         for (i = 0; i < bp->cp_nr_rings; i++) {
4311                 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
4312                         continue;
4313                 req.ring_group_id =
4314                         cpu_to_le32(bp->grp_info[i].fw_grp_id);
4315
4316                 rc = _hwrm_send_message(bp, &req, sizeof(req),
4317                                         HWRM_CMD_TIMEOUT);
4318                 if (rc)
4319                         break;
4320                 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
4321         }
4322         mutex_unlock(&bp->hwrm_cmd_lock);
4323         return rc;
4324 }
4325
4326 static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
4327                                     struct bnxt_ring_struct *ring,
4328                                     u32 ring_type, u32 map_index)
4329 {
4330         int rc = 0, err = 0;
4331         struct hwrm_ring_alloc_input req = {0};
4332         struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4333         struct bnxt_ring_grp_info *grp_info;
4334         u16 ring_id;
4335
4336         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1);
4337
4338         req.enables = 0;
4339         if (ring->nr_pages > 1) {
4340                 req.page_tbl_addr = cpu_to_le64(ring->pg_tbl_map);
4341                 /* Page size is in log2 units */
4342                 req.page_size = BNXT_PAGE_SHIFT;
4343                 req.page_tbl_depth = 1;
4344         } else {
4345                 req.page_tbl_addr =  cpu_to_le64(ring->dma_arr[0]);
4346         }
4347         req.fbo = 0;
4348         /* Association of ring index with doorbell index and MSIX number */
4349         req.logical_id = cpu_to_le16(map_index);
4350
4351         switch (ring_type) {
4352         case HWRM_RING_ALLOC_TX:
4353                 req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
4354                 /* Association of transmit ring with completion ring */
4355                 grp_info = &bp->grp_info[ring->grp_idx];
4356                 req.cmpl_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
4357                 req.length = cpu_to_le32(bp->tx_ring_mask + 1);
4358                 req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
4359                 req.queue_id = cpu_to_le16(ring->queue_id);
4360                 break;
4361         case HWRM_RING_ALLOC_RX:
4362                 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
4363                 req.length = cpu_to_le32(bp->rx_ring_mask + 1);
4364                 break;
4365         case HWRM_RING_ALLOC_AGG:
4366                 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
4367                 req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
4368                 break;
4369         case HWRM_RING_ALLOC_CMPL:
4370                 req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
4371                 req.length = cpu_to_le32(bp->cp_ring_mask + 1);
4372                 if (bp->flags & BNXT_FLAG_USING_MSIX)
4373                         req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
4374                 break;
4375         default:
4376                 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
4377                            ring_type);
4378                 return -1;
4379         }
4380
4381         mutex_lock(&bp->hwrm_cmd_lock);
4382         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4383         err = le16_to_cpu(resp->error_code);
4384         ring_id = le16_to_cpu(resp->ring_id);
4385         mutex_unlock(&bp->hwrm_cmd_lock);
4386
4387         if (rc || err) {
4388                 netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n",
4389                            ring_type, rc, err);
4390                 return -EIO;
4391         }
4392         ring->fw_ring_id = ring_id;
4393         return rc;
4394 }
4395
4396 static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
4397 {
4398         int rc;
4399
4400         if (BNXT_PF(bp)) {
4401                 struct hwrm_func_cfg_input req = {0};
4402
4403                 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
4404                 req.fid = cpu_to_le16(0xffff);
4405                 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
4406                 req.async_event_cr = cpu_to_le16(idx);
4407                 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4408         } else {
4409                 struct hwrm_func_vf_cfg_input req = {0};
4410
4411                 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
4412                 req.enables =
4413                         cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
4414                 req.async_event_cr = cpu_to_le16(idx);
4415                 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4416         }
4417         return rc;
4418 }
4419
4420 static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
4421 {
4422         int i, rc = 0;
4423
4424         for (i = 0; i < bp->cp_nr_rings; i++) {
4425                 struct bnxt_napi *bnapi = bp->bnapi[i];
4426                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4427                 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
4428                 u32 map_idx = ring->map_idx;
4429
4430                 cpr->cp_doorbell = bp->bar1 + map_idx * 0x80;
4431                 rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_CMPL,
4432                                               map_idx);
4433                 if (rc)
4434                         goto err_out;
4435                 BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
4436                 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
4437
4438                 if (!i) {
4439                         rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id);
4440                         if (rc)
4441                                 netdev_warn(bp->dev, "Failed to set async event completion ring.\n");
4442                 }
4443         }
4444
4445         for (i = 0; i < bp->tx_nr_rings; i++) {
4446                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
4447                 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
4448                 u32 map_idx = i;
4449
4450                 rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_TX,
4451                                               map_idx);
4452                 if (rc)
4453                         goto err_out;
4454                 txr->tx_doorbell = bp->bar1 + map_idx * 0x80;
4455         }
4456
4457         for (i = 0; i < bp->rx_nr_rings; i++) {
4458                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
4459                 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
4460                 u32 map_idx = rxr->bnapi->index;
4461
4462                 rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_RX,
4463                                               map_idx);
4464                 if (rc)
4465                         goto err_out;
4466                 rxr->rx_doorbell = bp->bar1 + map_idx * 0x80;
4467                 writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
4468                 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
4469         }
4470
4471         if (bp->flags & BNXT_FLAG_AGG_RINGS) {
4472                 for (i = 0; i < bp->rx_nr_rings; i++) {
4473                         struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
4474                         struct bnxt_ring_struct *ring =
4475                                                 &rxr->rx_agg_ring_struct;
4476                         u32 grp_idx = ring->grp_idx;
4477                         u32 map_idx = grp_idx + bp->rx_nr_rings;
4478
4479                         rc = hwrm_ring_alloc_send_msg(bp, ring,
4480                                                       HWRM_RING_ALLOC_AGG,
4481                                                       map_idx);
4482                         if (rc)
4483                                 goto err_out;
4484
4485                         rxr->rx_agg_doorbell = bp->bar1 + map_idx * 0x80;
4486                         writel(DB_KEY_RX | rxr->rx_agg_prod,
4487                                rxr->rx_agg_doorbell);
4488                         bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
4489                 }
4490         }
4491 err_out:
4492         return rc;
4493 }
4494
4495 static int hwrm_ring_free_send_msg(struct bnxt *bp,
4496                                    struct bnxt_ring_struct *ring,
4497                                    u32 ring_type, int cmpl_ring_id)
4498 {
4499         int rc;
4500         struct hwrm_ring_free_input req = {0};
4501         struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
4502         u16 error_code;
4503
4504         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, cmpl_ring_id, -1);
4505         req.ring_type = ring_type;
4506         req.ring_id = cpu_to_le16(ring->fw_ring_id);
4507
4508         mutex_lock(&bp->hwrm_cmd_lock);
4509         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4510         error_code = le16_to_cpu(resp->error_code);
4511         mutex_unlock(&bp->hwrm_cmd_lock);
4512
4513         if (rc || error_code) {
4514                 netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n",
4515                            ring_type, rc, error_code);
4516                 return -EIO;
4517         }
4518         return 0;
4519 }
4520
4521 static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
4522 {
4523         int i;
4524
4525         if (!bp->bnapi)
4526                 return;
4527
4528         for (i = 0; i < bp->tx_nr_rings; i++) {
4529                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
4530                 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
4531                 u32 grp_idx = txr->bnapi->index;
4532                 u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id;
4533
4534                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
4535                         hwrm_ring_free_send_msg(bp, ring,
4536                                                 RING_FREE_REQ_RING_TYPE_TX,
4537                                                 close_path ? cmpl_ring_id :
4538                                                 INVALID_HW_RING_ID);
4539                         ring->fw_ring_id = INVALID_HW_RING_ID;
4540                 }
4541         }
4542
4543         for (i = 0; i < bp->rx_nr_rings; i++) {
4544                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
4545                 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
4546                 u32 grp_idx = rxr->bnapi->index;
4547                 u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id;
4548
4549                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
4550                         hwrm_ring_free_send_msg(bp, ring,
4551                                                 RING_FREE_REQ_RING_TYPE_RX,
4552                                                 close_path ? cmpl_ring_id :
4553                                                 INVALID_HW_RING_ID);
4554                         ring->fw_ring_id = INVALID_HW_RING_ID;
4555                         bp->grp_info[grp_idx].rx_fw_ring_id =
4556                                 INVALID_HW_RING_ID;
4557                 }
4558         }
4559
4560         for (i = 0; i < bp->rx_nr_rings; i++) {
4561                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
4562                 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
4563                 u32 grp_idx = rxr->bnapi->index;
4564                 u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id;
4565
4566                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
4567                         hwrm_ring_free_send_msg(bp, ring,
4568                                                 RING_FREE_REQ_RING_TYPE_RX,
4569                                                 close_path ? cmpl_ring_id :
4570                                                 INVALID_HW_RING_ID);
4571                         ring->fw_ring_id = INVALID_HW_RING_ID;
4572                         bp->grp_info[grp_idx].agg_fw_ring_id =
4573                                 INVALID_HW_RING_ID;
4574                 }
4575         }
4576
4577         /* The completion rings are about to be freed.  After that the
4578          * IRQ doorbell will not work anymore.  So we need to disable
4579          * IRQ here.
4580          */
4581         bnxt_disable_int_sync(bp);
4582
4583         for (i = 0; i < bp->cp_nr_rings; i++) {
4584                 struct bnxt_napi *bnapi = bp->bnapi[i];
4585                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4586                 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
4587
4588                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
4589                         hwrm_ring_free_send_msg(bp, ring,
4590                                                 RING_FREE_REQ_RING_TYPE_L2_CMPL,
4591                                                 INVALID_HW_RING_ID);
4592                         ring->fw_ring_id = INVALID_HW_RING_ID;
4593                         bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
4594                 }
4595         }
4596 }
4597
4598 static int bnxt_hwrm_get_rings(struct bnxt *bp)
4599 {
4600         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
4601         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
4602         struct hwrm_func_qcfg_input req = {0};
4603         int rc;
4604
4605         if (bp->hwrm_spec_code < 0x10601)
4606                 return 0;
4607
4608         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
4609         req.fid = cpu_to_le16(0xffff);
4610         mutex_lock(&bp->hwrm_cmd_lock);
4611         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4612         if (rc) {
4613                 mutex_unlock(&bp->hwrm_cmd_lock);
4614                 return -EIO;
4615         }
4616
4617         hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings);
4618         if (BNXT_NEW_RM(bp)) {
4619                 u16 cp, stats;
4620
4621                 hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings);
4622                 hw_resc->resv_hw_ring_grps =
4623                         le32_to_cpu(resp->alloc_hw_ring_grps);
4624                 hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics);
4625                 cp = le16_to_cpu(resp->alloc_cmpl_rings);
4626                 stats = le16_to_cpu(resp->alloc_stat_ctx);
4627                 cp = min_t(u16, cp, stats);
4628                 hw_resc->resv_cp_rings = cp;
4629         }
4630         mutex_unlock(&bp->hwrm_cmd_lock);
4631         return 0;
4632 }
4633
4634 /* Caller must hold bp->hwrm_cmd_lock */
4635 int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
4636 {
4637         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
4638         struct hwrm_func_qcfg_input req = {0};
4639         int rc;
4640
4641         if (bp->hwrm_spec_code < 0x10601)
4642                 return 0;
4643
4644         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
4645         req.fid = cpu_to_le16(fid);
4646         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4647         if (!rc)
4648                 *tx_rings = le16_to_cpu(resp->alloc_tx_rings);
4649
4650         return rc;
4651 }
4652
4653 static void
4654 __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req,
4655                              int tx_rings, int rx_rings, int ring_grps,
4656                              int cp_rings, int vnics)
4657 {
4658         u32 enables = 0;
4659
4660         bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_CFG, -1, -1);
4661         req->fid = cpu_to_le16(0xffff);
4662         enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
4663         req->num_tx_rings = cpu_to_le16(tx_rings);
4664         if (BNXT_NEW_RM(bp)) {
4665                 enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
4666                 enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
4667                                       FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
4668                 enables |= ring_grps ?
4669                            FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
4670                 enables |= vnics ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
4671
4672                 req->num_rx_rings = cpu_to_le16(rx_rings);
4673                 req->num_hw_ring_grps = cpu_to_le16(ring_grps);
4674                 req->num_cmpl_rings = cpu_to_le16(cp_rings);
4675                 req->num_stat_ctxs = req->num_cmpl_rings;
4676                 req->num_vnics = cpu_to_le16(vnics);
4677         }
4678         req->enables = cpu_to_le32(enables);
4679 }
4680
4681 static void
4682 __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp,
4683                              struct hwrm_func_vf_cfg_input *req, int tx_rings,
4684                              int rx_rings, int ring_grps, int cp_rings,
4685                              int vnics)
4686 {
4687         u32 enables = 0;
4688
4689         bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_VF_CFG, -1, -1);
4690         enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
4691         enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
4692         enables |= cp_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
4693                               FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
4694         enables |= ring_grps ? FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
4695         enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
4696
4697         req->num_tx_rings = cpu_to_le16(tx_rings);
4698         req->num_rx_rings = cpu_to_le16(rx_rings);
4699         req->num_hw_ring_grps = cpu_to_le16(ring_grps);
4700         req->num_cmpl_rings = cpu_to_le16(cp_rings);
4701         req->num_stat_ctxs = req->num_cmpl_rings;
4702         req->num_vnics = cpu_to_le16(vnics);
4703
4704         req->enables = cpu_to_le32(enables);
4705 }
4706
4707 static int
4708 bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
4709                            int ring_grps, int cp_rings, int vnics)
4710 {
4711         struct hwrm_func_cfg_input req = {0};
4712         int rc;
4713
4714         __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
4715                                      cp_rings, vnics);
4716         if (!req.enables)
4717                 return 0;
4718
4719         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4720         if (rc)
4721                 return -ENOMEM;
4722
4723         if (bp->hwrm_spec_code < 0x10601)
4724                 bp->hw_resc.resv_tx_rings = tx_rings;
4725
4726         rc = bnxt_hwrm_get_rings(bp);
4727         return rc;
4728 }
4729
4730 static int
4731 bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
4732                            int ring_grps, int cp_rings, int vnics)
4733 {
4734         struct hwrm_func_vf_cfg_input req = {0};
4735         int rc;
4736
4737         if (!BNXT_NEW_RM(bp)) {
4738                 bp->hw_resc.resv_tx_rings = tx_rings;
4739                 return 0;
4740         }
4741
4742         __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
4743                                      cp_rings, vnics);
4744         req.enables |= cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS |
4745                                    FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS);
4746         req.num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX);
4747         req.num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
4748         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4749         if (rc)
4750                 return -ENOMEM;
4751
4752         rc = bnxt_hwrm_get_rings(bp);
4753         return rc;
4754 }
4755
4756 static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp,
4757                                    int cp, int vnic)
4758 {
4759         if (BNXT_PF(bp))
4760                 return bnxt_hwrm_reserve_pf_rings(bp, tx, rx, grp, cp, vnic);
4761         else
4762                 return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, vnic);
4763 }
4764
4765 static int bnxt_cp_rings_in_use(struct bnxt *bp)
4766 {
4767         int cp = bp->cp_nr_rings;
4768         int ulp_msix, ulp_base;
4769
4770         ulp_msix = bnxt_get_ulp_msix_num(bp);
4771         if (ulp_msix) {
4772                 ulp_base = bnxt_get_ulp_msix_base(bp);
4773                 cp += ulp_msix;
4774                 if ((ulp_base + ulp_msix) > cp)
4775                         cp = ulp_base + ulp_msix;
4776         }
4777         return cp;
4778 }
4779
4780 static bool bnxt_need_reserve_rings(struct bnxt *bp)
4781 {
4782         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
4783         int cp = bnxt_cp_rings_in_use(bp);
4784         int rx = bp->rx_nr_rings;
4785         int vnic = 1, grp = rx;
4786
4787         if (bp->hwrm_spec_code < 0x10601)
4788                 return false;
4789
4790         if (hw_resc->resv_tx_rings != bp->tx_nr_rings)
4791                 return true;
4792
4793         if (bp->flags & BNXT_FLAG_RFS)
4794                 vnic = rx + 1;
4795         if (bp->flags & BNXT_FLAG_AGG_RINGS)
4796                 rx <<= 1;
4797         if (BNXT_NEW_RM(bp) &&
4798             (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
4799              hw_resc->resv_hw_ring_grps != grp || hw_resc->resv_vnics != vnic))
4800                 return true;
4801         return false;
4802 }
4803
4804 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
4805                            bool shared);
4806
4807 static int __bnxt_reserve_rings(struct bnxt *bp)
4808 {
4809         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
4810         int cp = bnxt_cp_rings_in_use(bp);
4811         int tx = bp->tx_nr_rings;
4812         int rx = bp->rx_nr_rings;
4813         int grp, rx_rings, rc;
4814         bool sh = false;
4815         int vnic = 1;
4816
4817         if (!bnxt_need_reserve_rings(bp))
4818                 return 0;
4819
4820         if (bp->flags & BNXT_FLAG_SHARED_RINGS)
4821                 sh = true;
4822         if (bp->flags & BNXT_FLAG_RFS)
4823                 vnic = rx + 1;
4824         if (bp->flags & BNXT_FLAG_AGG_RINGS)
4825                 rx <<= 1;
4826         grp = bp->rx_nr_rings;
4827
4828         rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, vnic);
4829         if (rc)
4830                 return rc;
4831
4832         tx = hw_resc->resv_tx_rings;
4833         if (BNXT_NEW_RM(bp)) {
4834                 rx = hw_resc->resv_rx_rings;
4835                 cp = hw_resc->resv_cp_rings;
4836                 grp = hw_resc->resv_hw_ring_grps;
4837                 vnic = hw_resc->resv_vnics;
4838         }
4839
4840         rx_rings = rx;
4841         if (bp->flags & BNXT_FLAG_AGG_RINGS) {
4842                 if (rx >= 2) {
4843                         rx_rings = rx >> 1;
4844                 } else {
4845                         if (netif_running(bp->dev))
4846                                 return -ENOMEM;
4847
4848                         bp->flags &= ~BNXT_FLAG_AGG_RINGS;
4849                         bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
4850                         bp->dev->hw_features &= ~NETIF_F_LRO;
4851                         bp->dev->features &= ~NETIF_F_LRO;
4852                         bnxt_set_ring_params(bp);
4853                 }
4854         }
4855         rx_rings = min_t(int, rx_rings, grp);
4856         rc = bnxt_trim_rings(bp, &rx_rings, &tx, cp, sh);
4857         if (bp->flags & BNXT_FLAG_AGG_RINGS)
4858                 rx = rx_rings << 1;
4859         cp = sh ? max_t(int, tx, rx_rings) : tx + rx_rings;
4860         bp->tx_nr_rings = tx;
4861         bp->rx_nr_rings = rx_rings;
4862         bp->cp_nr_rings = cp;
4863
4864         if (!tx || !rx || !cp || !grp || !vnic)
4865                 return -ENOMEM;
4866
4867         return rc;
4868 }
4869
4870 static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
4871                                     int ring_grps, int cp_rings, int vnics)
4872 {
4873         struct hwrm_func_vf_cfg_input req = {0};
4874         u32 flags;
4875         int rc;
4876
4877         if (!BNXT_NEW_RM(bp))
4878                 return 0;
4879
4880         __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
4881                                      cp_rings, vnics);
4882         flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
4883                 FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
4884                 FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
4885                 FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST |
4886                 FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
4887                 FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
4888
4889         req.flags = cpu_to_le32(flags);
4890         rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4891         if (rc)
4892                 return -ENOMEM;
4893         return 0;
4894 }
4895
4896 static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
4897                                     int ring_grps, int cp_rings, int vnics)
4898 {
4899         struct hwrm_func_cfg_input req = {0};
4900         u32 flags;
4901         int rc;
4902
4903         __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
4904                                      cp_rings, vnics);
4905         flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
4906         if (BNXT_NEW_RM(bp))
4907                 flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
4908                          FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
4909                          FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST |
4910                          FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
4911                          FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
4912
4913         req.flags = cpu_to_le32(flags);
4914         rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4915         if (rc)
4916                 return -ENOMEM;
4917         return 0;
4918 }
4919
4920 static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings,
4921                                  int ring_grps, int cp_rings, int vnics)
4922 {
4923         if (bp->hwrm_spec_code < 0x10801)
4924                 return 0;
4925
4926         if (BNXT_PF(bp))
4927                 return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings,
4928                                                 ring_grps, cp_rings, vnics);
4929
4930         return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps,
4931                                         cp_rings, vnics);
4932 }
4933
4934 static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal,
4935         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
4936 {
4937         u16 val, tmr, max, flags;
4938
4939         max = hw_coal->bufs_per_record * 128;
4940         if (hw_coal->budget)
4941                 max = hw_coal->bufs_per_record * hw_coal->budget;
4942
4943         val = clamp_t(u16, hw_coal->coal_bufs, 1, max);
4944         req->num_cmpl_aggr_int = cpu_to_le16(val);
4945
4946         /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
4947         val = min_t(u16, val, 63);
4948         req->num_cmpl_dma_aggr = cpu_to_le16(val);
4949
4950         /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
4951         val = clamp_t(u16, hw_coal->coal_bufs_irq, 1, 63);
4952         req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val);
4953
4954         tmr = BNXT_USEC_TO_COAL_TIMER(hw_coal->coal_ticks);
4955         tmr = max_t(u16, tmr, 1);
4956         req->int_lat_tmr_max = cpu_to_le16(tmr);
4957
4958         /* min timer set to 1/2 of interrupt timer */
4959         val = tmr / 2;
4960         req->int_lat_tmr_min = cpu_to_le16(val);
4961
4962         /* buf timer set to 1/4 of interrupt timer */
4963         val = max_t(u16, tmr / 4, 1);
4964         req->cmpl_aggr_dma_tmr = cpu_to_le16(val);
4965
4966         tmr = BNXT_USEC_TO_COAL_TIMER(hw_coal->coal_ticks_irq);
4967         tmr = max_t(u16, tmr, 1);
4968         req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(tmr);
4969
4970         flags = RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
4971         if (hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh)
4972                 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
4973         req->flags = cpu_to_le16(flags);
4974 }
4975
4976 int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
4977 {
4978         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0};
4979         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4980         struct bnxt_coal coal;
4981         unsigned int grp_idx;
4982
4983         /* Tick values in micro seconds.
4984          * 1 coal_buf x bufs_per_record = 1 completion record.
4985          */
4986         memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal));
4987
4988         coal.coal_ticks = cpr->rx_ring_coal.coal_ticks;
4989         coal.coal_bufs = cpr->rx_ring_coal.coal_bufs;
4990
4991         if (!bnapi->rx_ring)
4992                 return -ENODEV;
4993
4994         bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
4995                                HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
4996
4997         bnxt_hwrm_set_coal_params(&coal, &req_rx);
4998
4999         grp_idx = bnapi->index;
5000         req_rx.ring_id = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
5001
5002         return hwrm_send_message(bp, &req_rx, sizeof(req_rx),
5003                                  HWRM_CMD_TIMEOUT);
5004 }
5005
5006 int bnxt_hwrm_set_coal(struct bnxt *bp)
5007 {
5008         int i, rc = 0;
5009         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0},
5010                                                            req_tx = {0}, *req;
5011
5012         bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
5013                                HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
5014         bnxt_hwrm_cmd_hdr_init(bp, &req_tx,
5015                                HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
5016
5017         bnxt_hwrm_set_coal_params(&bp->rx_coal, &req_rx);
5018         bnxt_hwrm_set_coal_params(&bp->tx_coal, &req_tx);
5019
5020         mutex_lock(&bp->hwrm_cmd_lock);
5021         for (i = 0; i < bp->cp_nr_rings; i++) {
5022                 struct bnxt_napi *bnapi = bp->bnapi[i];
5023
5024                 req = &req_rx;
5025                 if (!bnapi->rx_ring)
5026                         req = &req_tx;
5027                 req->ring_id = cpu_to_le16(bp->grp_info[i].cp_fw_ring_id);
5028
5029                 rc = _hwrm_send_message(bp, req, sizeof(*req),
5030                                         HWRM_CMD_TIMEOUT);
5031                 if (rc)
5032                         break;
5033         }
5034         mutex_unlock(&bp->hwrm_cmd_lock);
5035         return rc;
5036 }
5037
5038 static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
5039 {
5040         int rc = 0, i;
5041         struct hwrm_stat_ctx_free_input req = {0};
5042
5043         if (!bp->bnapi)
5044                 return 0;
5045
5046         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5047                 return 0;
5048
5049         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1);
5050
5051         mutex_lock(&bp->hwrm_cmd_lock);
5052         for (i = 0; i < bp->cp_nr_rings; i++) {
5053                 struct bnxt_napi *bnapi = bp->bnapi[i];
5054                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5055
5056                 if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
5057                         req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
5058
5059                         rc = _hwrm_send_message(bp, &req, sizeof(req),
5060                                                 HWRM_CMD_TIMEOUT);
5061                         if (rc)
5062                                 break;
5063
5064                         cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
5065                 }
5066         }
5067         mutex_unlock(&bp->hwrm_cmd_lock);
5068         return rc;
5069 }
5070
5071 static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
5072 {
5073         int rc = 0, i;
5074         struct hwrm_stat_ctx_alloc_input req = {0};
5075         struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
5076
5077         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5078                 return 0;
5079
5080         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1);
5081
5082         req.update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
5083
5084         mutex_lock(&bp->hwrm_cmd_lock);
5085         for (i = 0; i < bp->cp_nr_rings; i++) {
5086                 struct bnxt_napi *bnapi = bp->bnapi[i];
5087                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5088
5089                 req.stats_dma_addr = cpu_to_le64(cpr->hw_stats_map);
5090
5091                 rc = _hwrm_send_message(bp, &req, sizeof(req),
5092                                         HWRM_CMD_TIMEOUT);
5093                 if (rc)
5094                         break;
5095
5096                 cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
5097
5098                 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
5099         }
5100         mutex_unlock(&bp->hwrm_cmd_lock);
5101         return rc;
5102 }
5103
5104 static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
5105 {
5106         struct hwrm_func_qcfg_input req = {0};
5107         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
5108         u16 flags;
5109         int rc;
5110
5111         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
5112         req.fid = cpu_to_le16(0xffff);
5113         mutex_lock(&bp->hwrm_cmd_lock);
5114         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5115         if (rc)
5116                 goto func_qcfg_exit;
5117
5118 #ifdef CONFIG_BNXT_SRIOV
5119         if (BNXT_VF(bp)) {
5120                 struct bnxt_vf_info *vf = &bp->vf;
5121
5122                 vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
5123         }
5124 #endif
5125         flags = le16_to_cpu(resp->flags);
5126         if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
5127                      FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) {
5128                 bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT;
5129                 if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED)
5130                         bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT;
5131         }
5132         if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST))
5133                 bp->flags |= BNXT_FLAG_MULTI_HOST;
5134
5135         switch (resp->port_partition_type) {
5136         case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
5137         case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
5138         case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0:
5139                 bp->port_partition_type = resp->port_partition_type;
5140                 break;
5141         }
5142         if (bp->hwrm_spec_code < 0x10707 ||
5143             resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB)
5144                 bp->br_mode = BRIDGE_MODE_VEB;
5145         else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA)
5146                 bp->br_mode = BRIDGE_MODE_VEPA;
5147         else
5148                 bp->br_mode = BRIDGE_MODE_UNDEF;
5149
5150         bp->max_mtu = le16_to_cpu(resp->max_mtu_configured);
5151         if (!bp->max_mtu)
5152                 bp->max_mtu = BNXT_MAX_MTU;
5153
5154 func_qcfg_exit:
5155         mutex_unlock(&bp->hwrm_cmd_lock);
5156         return rc;
5157 }
5158
5159 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
5160 {
5161         struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
5162         struct hwrm_func_resource_qcaps_input req = {0};
5163         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
5164         int rc;
5165
5166         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESOURCE_QCAPS, -1, -1);
5167         req.fid = cpu_to_le16(0xffff);
5168
5169         mutex_lock(&bp->hwrm_cmd_lock);
5170         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5171         if (rc) {
5172                 rc = -EIO;
5173                 goto hwrm_func_resc_qcaps_exit;
5174         }
5175
5176         hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs);
5177         if (!all)
5178                 goto hwrm_func_resc_qcaps_exit;
5179
5180         hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx);
5181         hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
5182         hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings);
5183         hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
5184         hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings);
5185         hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
5186         hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings);
5187         hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
5188         hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps);
5189         hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps);
5190         hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs);
5191         hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
5192         hw_resc->min_vnics = le16_to_cpu(resp->min_vnics);
5193         hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
5194         hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx);
5195         hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
5196
5197         if (BNXT_PF(bp)) {
5198                 struct bnxt_pf_info *pf = &bp->pf;
5199
5200                 pf->vf_resv_strategy =
5201                         le16_to_cpu(resp->vf_reservation_strategy);
5202                 if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC)
5203                         pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL;
5204         }
5205 hwrm_func_resc_qcaps_exit:
5206         mutex_unlock(&bp->hwrm_cmd_lock);
5207         return rc;
5208 }
5209
5210 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
5211 {
5212         int rc = 0;
5213         struct hwrm_func_qcaps_input req = {0};
5214         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
5215         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
5216         u32 flags;
5217
5218         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
5219         req.fid = cpu_to_le16(0xffff);
5220
5221         mutex_lock(&bp->hwrm_cmd_lock);
5222         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5223         if (rc)
5224                 goto hwrm_func_qcaps_exit;
5225
5226         flags = le32_to_cpu(resp->flags);
5227         if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED)
5228                 bp->flags |= BNXT_FLAG_ROCEV1_CAP;
5229         if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)
5230                 bp->flags |= BNXT_FLAG_ROCEV2_CAP;
5231
5232         bp->tx_push_thresh = 0;
5233         if (flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED)
5234                 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
5235
5236         hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
5237         hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
5238         hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
5239         hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
5240         hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
5241         if (!hw_resc->max_hw_ring_grps)
5242                 hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings;
5243         hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
5244         hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
5245         hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
5246
5247         if (BNXT_PF(bp)) {
5248                 struct bnxt_pf_info *pf = &bp->pf;
5249
5250                 pf->fw_fid = le16_to_cpu(resp->fid);
5251                 pf->port_id = le16_to_cpu(resp->port_id);
5252                 bp->dev->dev_port = pf->port_id;
5253                 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
5254                 pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
5255                 pf->max_vfs = le16_to_cpu(resp->max_vfs);
5256                 pf->max_encap_records = le32_to_cpu(resp->max_encap_records);
5257                 pf->max_decap_records = le32_to_cpu(resp->max_decap_records);
5258                 pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
5259                 pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
5260                 pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
5261                 pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
5262                 if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)
5263                         bp->flags |= BNXT_FLAG_WOL_CAP;
5264         } else {
5265 #ifdef CONFIG_BNXT_SRIOV
5266                 struct bnxt_vf_info *vf = &bp->vf;
5267
5268                 vf->fw_fid = le16_to_cpu(resp->fid);
5269                 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
5270 #endif
5271         }
5272
5273 hwrm_func_qcaps_exit:
5274         mutex_unlock(&bp->hwrm_cmd_lock);
5275         return rc;
5276 }
5277
5278 static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
5279 {
5280         int rc;
5281
5282         rc = __bnxt_hwrm_func_qcaps(bp);
5283         if (rc)
5284                 return rc;
5285         if (bp->hwrm_spec_code >= 0x10803) {
5286                 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
5287                 if (!rc)
5288                         bp->fw_cap |= BNXT_FW_CAP_NEW_RM;
5289         }
5290         return 0;
5291 }
5292
5293 static int bnxt_hwrm_func_reset(struct bnxt *bp)
5294 {
5295         struct hwrm_func_reset_input req = {0};
5296
5297         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESET, -1, -1);
5298         req.enables = 0;
5299
5300         return hwrm_send_message(bp, &req, sizeof(req), HWRM_RESET_TIMEOUT);
5301 }
5302
5303 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
5304 {
5305         int rc = 0;
5306         struct hwrm_queue_qportcfg_input req = {0};
5307         struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
5308         u8 i, j, *qptr;
5309         bool no_rdma;
5310
5311         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_QPORTCFG, -1, -1);
5312
5313         mutex_lock(&bp->hwrm_cmd_lock);
5314         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5315         if (rc)
5316                 goto qportcfg_exit;
5317
5318         if (!resp->max_configurable_queues) {
5319                 rc = -EINVAL;
5320                 goto qportcfg_exit;
5321         }
5322         bp->max_tc = resp->max_configurable_queues;
5323         bp->max_lltc = resp->max_configurable_lossless_queues;
5324         if (bp->max_tc > BNXT_MAX_QUEUE)
5325                 bp->max_tc = BNXT_MAX_QUEUE;
5326
5327         no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP);
5328         qptr = &resp->queue_id0;
5329         for (i = 0, j = 0; i < bp->max_tc; i++) {
5330                 bp->q_info[j].queue_id = *qptr++;
5331                 bp->q_info[j].queue_profile = *qptr++;
5332                 bp->tc_to_qidx[j] = j;
5333                 if (!BNXT_CNPQ(bp->q_info[j].queue_profile) ||
5334                     (no_rdma && BNXT_PF(bp)))
5335                         j++;
5336         }
5337         bp->max_tc = max_t(u8, j, 1);
5338
5339         if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
5340                 bp->max_tc = 1;
5341
5342         if (bp->max_lltc > bp->max_tc)
5343                 bp->max_lltc = bp->max_tc;
5344
5345 qportcfg_exit:
5346         mutex_unlock(&bp->hwrm_cmd_lock);
5347         return rc;
5348 }
5349
5350 static int bnxt_hwrm_ver_get(struct bnxt *bp)
5351 {
5352         int rc;
5353         struct hwrm_ver_get_input req = {0};
5354         struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
5355         u32 dev_caps_cfg;
5356
5357         bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
5358         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1);
5359         req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
5360         req.hwrm_intf_min = HWRM_VERSION_MINOR;
5361         req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
5362         mutex_lock(&bp->hwrm_cmd_lock);
5363         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5364         if (rc)
5365                 goto hwrm_ver_get_exit;
5366
5367         memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
5368
5369         bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 |
5370                              resp->hwrm_intf_min_8b << 8 |
5371                              resp->hwrm_intf_upd_8b;
5372         if (resp->hwrm_intf_maj_8b < 1) {
5373                 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
5374                             resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
5375                             resp->hwrm_intf_upd_8b);
5376                 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
5377         }
5378         snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "%d.%d.%d.%d",
5379                  resp->hwrm_fw_maj_8b, resp->hwrm_fw_min_8b,
5380                  resp->hwrm_fw_bld_8b, resp->hwrm_fw_rsvd_8b);
5381
5382         bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
5383         if (!bp->hwrm_cmd_timeout)
5384                 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
5385
5386         if (resp->hwrm_intf_maj_8b >= 1)
5387                 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
5388
5389         bp->chip_num = le16_to_cpu(resp->chip_num);
5390         if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev &&
5391             !resp->chip_metal)
5392                 bp->flags |= BNXT_FLAG_CHIP_NITRO_A0;
5393
5394         dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg);
5395         if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
5396             (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
5397                 bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD;
5398
5399 hwrm_ver_get_exit:
5400         mutex_unlock(&bp->hwrm_cmd_lock);
5401         return rc;
5402 }
5403
5404 int bnxt_hwrm_fw_set_time(struct bnxt *bp)
5405 {
5406         struct hwrm_fw_set_time_input req = {0};
5407         struct tm tm;
5408         time64_t now = ktime_get_real_seconds();
5409
5410         if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) ||
5411             bp->hwrm_spec_code < 0x10400)
5412                 return -EOPNOTSUPP;
5413
5414         time64_to_tm(now, 0, &tm);
5415         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_SET_TIME, -1, -1);
5416         req.year = cpu_to_le16(1900 + tm.tm_year);
5417         req.month = 1 + tm.tm_mon;
5418         req.day = tm.tm_mday;
5419         req.hour = tm.tm_hour;
5420         req.minute = tm.tm_min;
5421         req.second = tm.tm_sec;
5422         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5423 }
5424
5425 static int bnxt_hwrm_port_qstats(struct bnxt *bp)
5426 {
5427         int rc;
5428         struct bnxt_pf_info *pf = &bp->pf;
5429         struct hwrm_port_qstats_input req = {0};
5430
5431         if (!(bp->flags & BNXT_FLAG_PORT_STATS))
5432                 return 0;
5433
5434         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS, -1, -1);
5435         req.port_id = cpu_to_le16(pf->port_id);
5436         req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_map);
5437         req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_map);
5438         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5439         return rc;
5440 }
5441
5442 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp)
5443 {
5444         struct hwrm_port_qstats_ext_input req = {0};
5445         struct bnxt_pf_info *pf = &bp->pf;
5446
5447         if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
5448                 return 0;
5449
5450         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS_EXT, -1, -1);
5451         req.port_id = cpu_to_le16(pf->port_id);
5452         req.rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext));
5453         req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_ext_map);
5454         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5455 }
5456
5457 static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
5458 {
5459         if (bp->vxlan_port_cnt) {
5460                 bnxt_hwrm_tunnel_dst_port_free(
5461                         bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
5462         }
5463         bp->vxlan_port_cnt = 0;
5464         if (bp->nge_port_cnt) {
5465                 bnxt_hwrm_tunnel_dst_port_free(
5466                         bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
5467         }
5468         bp->nge_port_cnt = 0;
5469 }
5470
5471 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
5472 {
5473         int rc, i;
5474         u32 tpa_flags = 0;
5475
5476         if (set_tpa)
5477                 tpa_flags = bp->flags & BNXT_FLAG_TPA;
5478         for (i = 0; i < bp->nr_vnics; i++) {
5479                 rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
5480                 if (rc) {
5481                         netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
5482                                    i, rc);
5483                         return rc;
5484                 }
5485         }
5486         return 0;
5487 }
5488
5489 static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
5490 {
5491         int i;
5492
5493         for (i = 0; i < bp->nr_vnics; i++)
5494                 bnxt_hwrm_vnic_set_rss(bp, i, false);
5495 }
5496
5497 static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
5498                                     bool irq_re_init)
5499 {
5500         if (bp->vnic_info) {
5501                 bnxt_hwrm_clear_vnic_filter(bp);
5502                 /* clear all RSS setting before free vnic ctx */
5503                 bnxt_hwrm_clear_vnic_rss(bp);
5504                 bnxt_hwrm_vnic_ctx_free(bp);
5505                 /* before free the vnic, undo the vnic tpa settings */
5506                 if (bp->flags & BNXT_FLAG_TPA)
5507                         bnxt_set_tpa(bp, false);
5508                 bnxt_hwrm_vnic_free(bp);
5509         }
5510         bnxt_hwrm_ring_free(bp, close_path);
5511         bnxt_hwrm_ring_grp_free(bp);
5512         if (irq_re_init) {
5513                 bnxt_hwrm_stat_ctx_free(bp);
5514                 bnxt_hwrm_free_tunnel_ports(bp);
5515         }
5516 }
5517
5518 static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
5519 {
5520         struct hwrm_func_cfg_input req = {0};
5521         int rc;
5522
5523         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
5524         req.fid = cpu_to_le16(0xffff);
5525         req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE);
5526         if (br_mode == BRIDGE_MODE_VEB)
5527                 req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB;
5528         else if (br_mode == BRIDGE_MODE_VEPA)
5529                 req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA;
5530         else
5531                 return -EINVAL;
5532         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5533         if (rc)
5534                 rc = -EIO;
5535         return rc;
5536 }
5537
5538 static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
5539 {
5540         struct hwrm_func_cfg_input req = {0};
5541         int rc;
5542
5543         if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803)
5544                 return 0;
5545
5546         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
5547         req.fid = cpu_to_le16(0xffff);
5548         req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE);
5549         req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64;
5550         if (size == 128)
5551                 req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128;
5552
5553         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5554         if (rc)
5555                 rc = -EIO;
5556         return rc;
5557 }
5558
5559 static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
5560 {
5561         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5562         int rc;
5563
5564         if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG)
5565                 goto skip_rss_ctx;
5566
5567         /* allocate context for vnic */
5568         rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0);
5569         if (rc) {
5570                 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
5571                            vnic_id, rc);
5572                 goto vnic_setup_err;
5573         }
5574         bp->rsscos_nr_ctxs++;
5575
5576         if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
5577                 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1);
5578                 if (rc) {
5579                         netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
5580                                    vnic_id, rc);
5581                         goto vnic_setup_err;
5582                 }
5583                 bp->rsscos_nr_ctxs++;
5584         }
5585
5586 skip_rss_ctx:
5587         /* configure default vnic, ring grp */
5588         rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
5589         if (rc) {
5590                 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
5591                            vnic_id, rc);
5592                 goto vnic_setup_err;
5593         }
5594
5595         /* Enable RSS hashing on vnic */
5596         rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true);
5597         if (rc) {
5598                 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
5599                            vnic_id, rc);
5600                 goto vnic_setup_err;
5601         }
5602
5603         if (bp->flags & BNXT_FLAG_AGG_RINGS) {
5604                 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
5605                 if (rc) {
5606                         netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
5607                                    vnic_id, rc);
5608                 }
5609         }
5610
5611 vnic_setup_err:
5612         return rc;
5613 }
5614
5615 static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
5616 {
5617 #ifdef CONFIG_RFS_ACCEL
5618         int i, rc = 0;
5619
5620         for (i = 0; i < bp->rx_nr_rings; i++) {
5621                 struct bnxt_vnic_info *vnic;
5622                 u16 vnic_id = i + 1;
5623                 u16 ring_id = i;
5624
5625                 if (vnic_id >= bp->nr_vnics)
5626                         break;
5627
5628                 vnic = &bp->vnic_info[vnic_id];
5629                 vnic->flags |= BNXT_VNIC_RFS_FLAG;
5630                 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
5631                         vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
5632                 rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1);
5633                 if (rc) {
5634                         netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
5635                                    vnic_id, rc);
5636                         break;
5637                 }
5638                 rc = bnxt_setup_vnic(bp, vnic_id);
5639                 if (rc)
5640                         break;
5641         }
5642         return rc;
5643 #else
5644         return 0;
5645 #endif
5646 }
5647
5648 /* Allow PF and VF with default VLAN to be in promiscuous mode */
5649 static bool bnxt_promisc_ok(struct bnxt *bp)
5650 {
5651 #ifdef CONFIG_BNXT_SRIOV
5652         if (BNXT_VF(bp) && !bp->vf.vlan)
5653                 return false;
5654 #endif
5655         return true;
5656 }
5657
5658 static int bnxt_setup_nitroa0_vnic(struct bnxt *bp)
5659 {
5660         unsigned int rc = 0;
5661
5662         rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1);
5663         if (rc) {
5664                 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
5665                            rc);
5666                 return rc;
5667         }
5668
5669         rc = bnxt_hwrm_vnic_cfg(bp, 1);
5670         if (rc) {
5671                 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
5672                            rc);
5673                 return rc;
5674         }
5675         return rc;
5676 }
5677
5678 static int bnxt_cfg_rx_mode(struct bnxt *);
5679 static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
5680
5681 static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
5682 {
5683         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
5684         int rc = 0;
5685         unsigned int rx_nr_rings = bp->rx_nr_rings;
5686
5687         if (irq_re_init) {
5688                 rc = bnxt_hwrm_stat_ctx_alloc(bp);
5689                 if (rc) {
5690                         netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
5691                                    rc);
5692                         goto err_out;
5693                 }
5694         }
5695
5696         rc = bnxt_hwrm_ring_alloc(bp);
5697         if (rc) {
5698                 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
5699                 goto err_out;
5700         }
5701
5702         rc = bnxt_hwrm_ring_grp_alloc(bp);
5703         if (rc) {
5704                 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
5705                 goto err_out;
5706         }
5707
5708         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5709                 rx_nr_rings--;
5710
5711         /* default vnic 0 */
5712         rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings);
5713         if (rc) {
5714                 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
5715                 goto err_out;
5716         }
5717
5718         rc = bnxt_setup_vnic(bp, 0);
5719         if (rc)
5720                 goto err_out;
5721
5722         if (bp->flags & BNXT_FLAG_RFS) {
5723                 rc = bnxt_alloc_rfs_vnics(bp);
5724                 if (rc)
5725                         goto err_out;
5726         }
5727
5728         if (bp->flags & BNXT_FLAG_TPA) {
5729                 rc = bnxt_set_tpa(bp, true);
5730                 if (rc)
5731                         goto err_out;
5732         }
5733
5734         if (BNXT_VF(bp))
5735                 bnxt_update_vf_mac(bp);
5736
5737         /* Filter for default vnic 0 */
5738         rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
5739         if (rc) {
5740                 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
5741                 goto err_out;
5742         }
5743         vnic->uc_filter_count = 1;
5744
5745         vnic->rx_mask = 0;
5746         if (bp->dev->flags & IFF_BROADCAST)
5747                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
5748
5749         if ((bp->dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
5750                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
5751
5752         if (bp->dev->flags & IFF_ALLMULTI) {
5753                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
5754                 vnic->mc_list_count = 0;
5755         } else {
5756                 u32 mask = 0;
5757
5758                 bnxt_mc_list_updated(bp, &mask);
5759                 vnic->rx_mask |= mask;
5760         }
5761
5762         rc = bnxt_cfg_rx_mode(bp);
5763         if (rc)
5764                 goto err_out;
5765
5766         rc = bnxt_hwrm_set_coal(bp);
5767         if (rc)
5768                 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
5769                                 rc);
5770
5771         if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
5772                 rc = bnxt_setup_nitroa0_vnic(bp);
5773                 if (rc)
5774                         netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n",
5775                                    rc);
5776         }
5777
5778         if (BNXT_VF(bp)) {
5779                 bnxt_hwrm_func_qcfg(bp);
5780                 netdev_update_features(bp->dev);
5781         }
5782
5783         return 0;
5784
5785 err_out:
5786         bnxt_hwrm_resource_free(bp, 0, true);
5787
5788         return rc;
5789 }
5790
5791 static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
5792 {
5793         bnxt_hwrm_resource_free(bp, 1, irq_re_init);
5794         return 0;
5795 }
5796
5797 static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
5798 {
5799         bnxt_init_cp_rings(bp);
5800         bnxt_init_rx_rings(bp);
5801         bnxt_init_tx_rings(bp);
5802         bnxt_init_ring_grps(bp, irq_re_init);
5803         bnxt_init_vnics(bp);
5804
5805         return bnxt_init_chip(bp, irq_re_init);
5806 }
5807
5808 static int bnxt_set_real_num_queues(struct bnxt *bp)
5809 {
5810         int rc;
5811         struct net_device *dev = bp->dev;
5812
5813         rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings -
5814                                           bp->tx_nr_rings_xdp);
5815         if (rc)
5816                 return rc;
5817
5818         rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
5819         if (rc)
5820                 return rc;
5821
5822 #ifdef CONFIG_RFS_ACCEL
5823         if (bp->flags & BNXT_FLAG_RFS)
5824                 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
5825 #endif
5826
5827         return rc;
5828 }
5829
5830 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
5831                            bool shared)
5832 {
5833         int _rx = *rx, _tx = *tx;
5834
5835         if (shared) {
5836                 *rx = min_t(int, _rx, max);
5837                 *tx = min_t(int, _tx, max);
5838         } else {
5839                 if (max < 2)
5840                         return -ENOMEM;
5841
5842                 while (_rx + _tx > max) {
5843                         if (_rx > _tx && _rx > 1)
5844                                 _rx--;
5845                         else if (_tx > 1)
5846                                 _tx--;
5847                 }
5848                 *rx = _rx;
5849                 *tx = _tx;
5850         }
5851         return 0;
5852 }
5853
5854 static void bnxt_setup_msix(struct bnxt *bp)
5855 {
5856         const int len = sizeof(bp->irq_tbl[0].name);
5857         struct net_device *dev = bp->dev;
5858         int tcs, i;
5859
5860         tcs = netdev_get_num_tc(dev);
5861         if (tcs > 1) {
5862                 int i, off, count;
5863
5864                 for (i = 0; i < tcs; i++) {
5865                         count = bp->tx_nr_rings_per_tc;
5866                         off = i * count;
5867                         netdev_set_tc_queue(dev, i, count, off);
5868                 }
5869         }
5870
5871         for (i = 0; i < bp->cp_nr_rings; i++) {
5872                 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
5873                 char *attr;
5874
5875                 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
5876                         attr = "TxRx";
5877                 else if (i < bp->rx_nr_rings)
5878                         attr = "rx";
5879                 else
5880                         attr = "tx";
5881
5882                 snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name,
5883                          attr, i);
5884                 bp->irq_tbl[map_idx].handler = bnxt_msix;
5885         }
5886 }
5887
5888 static void bnxt_setup_inta(struct bnxt *bp)
5889 {
5890         const int len = sizeof(bp->irq_tbl[0].name);
5891
5892         if (netdev_get_num_tc(bp->dev))
5893                 netdev_reset_tc(bp->dev);
5894
5895         snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx",
5896                  0);
5897         bp->irq_tbl[0].handler = bnxt_inta;
5898 }
5899
5900 static int bnxt_setup_int_mode(struct bnxt *bp)
5901 {
5902         int rc;
5903
5904         if (bp->flags & BNXT_FLAG_USING_MSIX)
5905                 bnxt_setup_msix(bp);
5906         else
5907                 bnxt_setup_inta(bp);
5908
5909         rc = bnxt_set_real_num_queues(bp);
5910         return rc;
5911 }
5912
5913 #ifdef CONFIG_RFS_ACCEL
5914 static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
5915 {
5916         return bp->hw_resc.max_rsscos_ctxs;
5917 }
5918
5919 static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
5920 {
5921         return bp->hw_resc.max_vnics;
5922 }
5923 #endif
5924
5925 unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
5926 {
5927         return bp->hw_resc.max_stat_ctxs;
5928 }
5929
5930 void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max)
5931 {
5932         bp->hw_resc.max_stat_ctxs = max;
5933 }
5934
5935 unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
5936 {
5937         return bp->hw_resc.max_cp_rings;
5938 }
5939
5940 unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp)
5941 {
5942         return bp->hw_resc.max_cp_rings - bnxt_get_ulp_msix_num(bp);
5943 }
5944
5945 static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
5946 {
5947         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
5948
5949         return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings);
5950 }
5951
5952 static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
5953 {
5954         bp->hw_resc.max_irqs = max_irqs;
5955 }
5956
5957 int bnxt_get_avail_msix(struct bnxt *bp, int num)
5958 {
5959         int max_cp = bnxt_get_max_func_cp_rings(bp);
5960         int max_irq = bnxt_get_max_func_irqs(bp);
5961         int total_req = bp->cp_nr_rings + num;
5962         int max_idx, avail_msix;
5963
5964         max_idx = min_t(int, bp->total_irqs, max_cp);
5965         avail_msix = max_idx - bp->cp_nr_rings;
5966         if (!BNXT_NEW_RM(bp) || avail_msix >= num)
5967                 return avail_msix;
5968
5969         if (max_irq < total_req) {
5970                 num = max_irq - bp->cp_nr_rings;
5971                 if (num <= 0)
5972                         return 0;
5973         }
5974         return num;
5975 }
5976
5977 static int bnxt_get_num_msix(struct bnxt *bp)
5978 {
5979         if (!BNXT_NEW_RM(bp))
5980                 return bnxt_get_max_func_irqs(bp);
5981
5982         return bnxt_cp_rings_in_use(bp);
5983 }
5984
5985 static int bnxt_init_msix(struct bnxt *bp)
5986 {
5987         int i, total_vecs, max, rc = 0, min = 1, ulp_msix;
5988         struct msix_entry *msix_ent;
5989
5990         total_vecs = bnxt_get_num_msix(bp);
5991         max = bnxt_get_max_func_irqs(bp);
5992         if (total_vecs > max)
5993                 total_vecs = max;
5994
5995         if (!total_vecs)
5996                 return 0;
5997
5998         msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
5999         if (!msix_ent)
6000                 return -ENOMEM;
6001
6002         for (i = 0; i < total_vecs; i++) {
6003                 msix_ent[i].entry = i;
6004                 msix_ent[i].vector = 0;
6005         }
6006
6007         if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
6008                 min = 2;
6009
6010         total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs);
6011         ulp_msix = bnxt_get_ulp_msix_num(bp);
6012         if (total_vecs < 0 || total_vecs < ulp_msix) {
6013                 rc = -ENODEV;
6014                 goto msix_setup_exit;
6015         }
6016
6017         bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
6018         if (bp->irq_tbl) {
6019                 for (i = 0; i < total_vecs; i++)
6020                         bp->irq_tbl[i].vector = msix_ent[i].vector;
6021
6022                 bp->total_irqs = total_vecs;
6023                 /* Trim rings based upon num of vectors allocated */
6024                 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
6025                                      total_vecs - ulp_msix, min == 1);
6026                 if (rc)
6027                         goto msix_setup_exit;
6028
6029                 bp->cp_nr_rings = (min == 1) ?
6030                                   max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
6031                                   bp->tx_nr_rings + bp->rx_nr_rings;
6032
6033         } else {
6034                 rc = -ENOMEM;
6035                 goto msix_setup_exit;
6036         }
6037         bp->flags |= BNXT_FLAG_USING_MSIX;
6038         kfree(msix_ent);
6039         return 0;
6040
6041 msix_setup_exit:
6042         netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc);
6043         kfree(bp->irq_tbl);
6044         bp->irq_tbl = NULL;
6045         pci_disable_msix(bp->pdev);
6046         kfree(msix_ent);
6047         return rc;
6048 }
6049
6050 static int bnxt_init_inta(struct bnxt *bp)
6051 {
6052         bp->irq_tbl = kcalloc(1, sizeof(struct bnxt_irq), GFP_KERNEL);
6053         if (!bp->irq_tbl)
6054                 return -ENOMEM;
6055
6056         bp->total_irqs = 1;
6057         bp->rx_nr_rings = 1;
6058         bp->tx_nr_rings = 1;
6059         bp->cp_nr_rings = 1;
6060         bp->flags |= BNXT_FLAG_SHARED_RINGS;
6061         bp->irq_tbl[0].vector = bp->pdev->irq;
6062         return 0;
6063 }
6064
6065 static int bnxt_init_int_mode(struct bnxt *bp)
6066 {
6067         int rc = 0;
6068
6069         if (bp->flags & BNXT_FLAG_MSIX_CAP)
6070                 rc = bnxt_init_msix(bp);
6071
6072         if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) {
6073                 /* fallback to INTA */
6074                 rc = bnxt_init_inta(bp);
6075         }
6076         return rc;
6077 }
6078
6079 static void bnxt_clear_int_mode(struct bnxt *bp)
6080 {
6081         if (bp->flags & BNXT_FLAG_USING_MSIX)
6082                 pci_disable_msix(bp->pdev);
6083
6084         kfree(bp->irq_tbl);
6085         bp->irq_tbl = NULL;
6086         bp->flags &= ~BNXT_FLAG_USING_MSIX;
6087 }
6088
6089 int bnxt_reserve_rings(struct bnxt *bp)
6090 {
6091         int tcs = netdev_get_num_tc(bp->dev);
6092         bool reinit_irq = false;
6093         int rc;
6094
6095         if (!bnxt_need_reserve_rings(bp))
6096                 return 0;
6097
6098         if (BNXT_NEW_RM(bp) && (bnxt_get_num_msix(bp) != bp->total_irqs)) {
6099                 bnxt_ulp_irq_stop(bp);
6100                 bnxt_clear_int_mode(bp);
6101                 reinit_irq = true;
6102         }
6103         rc = __bnxt_reserve_rings(bp);
6104         if (reinit_irq) {
6105                 if (!rc)
6106                         rc = bnxt_init_int_mode(bp);
6107                 bnxt_ulp_irq_restart(bp, rc);
6108         }
6109         if (rc) {
6110                 netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
6111                 return rc;
6112         }
6113         if (tcs && (bp->tx_nr_rings_per_tc * tcs != bp->tx_nr_rings)) {
6114                 netdev_err(bp->dev, "tx ring reservation failure\n");
6115                 netdev_reset_tc(bp->dev);
6116                 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
6117                 return -ENOMEM;
6118         }
6119         bp->num_stat_ctxs = bp->cp_nr_rings;
6120         return 0;
6121 }
6122
6123 static void bnxt_free_irq(struct bnxt *bp)
6124 {
6125         struct bnxt_irq *irq;
6126         int i;
6127
6128 #ifdef CONFIG_RFS_ACCEL
6129         free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
6130         bp->dev->rx_cpu_rmap = NULL;
6131 #endif
6132         if (!bp->irq_tbl || !bp->bnapi)
6133                 return;
6134
6135         for (i = 0; i < bp->cp_nr_rings; i++) {
6136                 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
6137
6138                 irq = &bp->irq_tbl[map_idx];
6139                 if (irq->requested) {
6140                         if (irq->have_cpumask) {
6141                                 irq_set_affinity_hint(irq->vector, NULL);
6142                                 free_cpumask_var(irq->cpu_mask);
6143                                 irq->have_cpumask = 0;
6144                         }
6145                         free_irq(irq->vector, bp->bnapi[i]);
6146                 }
6147
6148                 irq->requested = 0;
6149         }
6150 }
6151
6152 static int bnxt_request_irq(struct bnxt *bp)
6153 {
6154         int i, j, rc = 0;
6155         unsigned long flags = 0;
6156 #ifdef CONFIG_RFS_ACCEL
6157         struct cpu_rmap *rmap;
6158 #endif
6159
6160         rc = bnxt_setup_int_mode(bp);
6161         if (rc) {
6162                 netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
6163                            rc);
6164                 return rc;
6165         }
6166 #ifdef CONFIG_RFS_ACCEL
6167         rmap = bp->dev->rx_cpu_rmap;
6168 #endif
6169         if (!(bp->flags & BNXT_FLAG_USING_MSIX))
6170                 flags = IRQF_SHARED;
6171
6172         for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
6173                 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
6174                 struct bnxt_irq *irq = &bp->irq_tbl[map_idx];
6175
6176 #ifdef CONFIG_RFS_ACCEL
6177                 if (rmap && bp->bnapi[i]->rx_ring) {
6178                         rc = irq_cpu_rmap_add(rmap, irq->vector);
6179                         if (rc)
6180                                 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
6181                                             j);
6182                         j++;
6183                 }
6184 #endif
6185                 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
6186                                  bp->bnapi[i]);
6187                 if (rc)
6188                         break;
6189
6190                 irq->requested = 1;
6191
6192                 if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
6193                         int numa_node = dev_to_node(&bp->pdev->dev);
6194
6195                         irq->have_cpumask = 1;
6196                         cpumask_set_cpu(cpumask_local_spread(i, numa_node),
6197                                         irq->cpu_mask);
6198                         rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask);
6199                         if (rc) {
6200                                 netdev_warn(bp->dev,
6201                                             "Set affinity failed, IRQ = %d\n",
6202                                             irq->vector);
6203                                 break;
6204                         }
6205                 }
6206         }
6207         return rc;
6208 }
6209
6210 static void bnxt_del_napi(struct bnxt *bp)
6211 {
6212         int i;
6213
6214         if (!bp->bnapi)
6215                 return;
6216
6217         for (i = 0; i < bp->cp_nr_rings; i++) {
6218                 struct bnxt_napi *bnapi = bp->bnapi[i];
6219
6220                 napi_hash_del(&bnapi->napi);
6221                 netif_napi_del(&bnapi->napi);
6222         }
6223         /* We called napi_hash_del() before netif_napi_del(), we need
6224          * to respect an RCU grace period before freeing napi structures.
6225          */
6226         synchronize_net();
6227 }
6228
6229 static void bnxt_init_napi(struct bnxt *bp)
6230 {
6231         int i;
6232         unsigned int cp_nr_rings = bp->cp_nr_rings;
6233         struct bnxt_napi *bnapi;
6234
6235         if (bp->flags & BNXT_FLAG_USING_MSIX) {
6236                 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6237                         cp_nr_rings--;
6238                 for (i = 0; i < cp_nr_rings; i++) {
6239                         bnapi = bp->bnapi[i];
6240                         netif_napi_add(bp->dev, &bnapi->napi,
6241                                        bnxt_poll, 64);
6242                 }
6243                 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
6244                         bnapi = bp->bnapi[cp_nr_rings];
6245                         netif_napi_add(bp->dev, &bnapi->napi,
6246                                        bnxt_poll_nitroa0, 64);
6247                 }
6248         } else {
6249                 bnapi = bp->bnapi[0];
6250                 netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64);
6251         }
6252 }
6253
6254 static void bnxt_disable_napi(struct bnxt *bp)
6255 {
6256         int i;
6257
6258         if (!bp->bnapi)
6259                 return;
6260
6261         for (i = 0; i < bp->cp_nr_rings; i++) {
6262                 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
6263
6264                 if (bp->bnapi[i]->rx_ring)
6265                         cancel_work_sync(&cpr->dim.work);
6266
6267                 napi_disable(&bp->bnapi[i]->napi);
6268         }
6269 }
6270
6271 static void bnxt_enable_napi(struct bnxt *bp)
6272 {
6273         int i;
6274
6275         for (i = 0; i < bp->cp_nr_rings; i++) {
6276                 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
6277                 bp->bnapi[i]->in_reset = false;
6278
6279                 if (bp->bnapi[i]->rx_ring) {
6280                         INIT_WORK(&cpr->dim.work, bnxt_dim_work);
6281                         cpr->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE;
6282                 }
6283                 napi_enable(&bp->bnapi[i]->napi);
6284         }
6285 }
6286
6287 void bnxt_tx_disable(struct bnxt *bp)
6288 {
6289         int i;
6290         struct bnxt_tx_ring_info *txr;
6291
6292         if (bp->tx_ring) {
6293                 for (i = 0; i < bp->tx_nr_rings; i++) {
6294                         txr = &bp->tx_ring[i];
6295                         txr->dev_state = BNXT_DEV_STATE_CLOSING;
6296                 }
6297         }
6298         /* Stop all TX queues */
6299         netif_tx_disable(bp->dev);
6300         netif_carrier_off(bp->dev);
6301 }
6302
6303 void bnxt_tx_enable(struct bnxt *bp)
6304 {
6305         int i;
6306         struct bnxt_tx_ring_info *txr;
6307
6308         for (i = 0; i < bp->tx_nr_rings; i++) {
6309                 txr = &bp->tx_ring[i];
6310                 txr->dev_state = 0;
6311         }
6312         netif_tx_wake_all_queues(bp->dev);
6313         if (bp->link_info.link_up)
6314                 netif_carrier_on(bp->dev);
6315 }
6316
6317 static void bnxt_report_link(struct bnxt *bp)
6318 {
6319         if (bp->link_info.link_up) {
6320                 const char *duplex;
6321                 const char *flow_ctrl;
6322                 u32 speed;
6323                 u16 fec;
6324
6325                 netif_carrier_on(bp->dev);
6326                 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
6327                         duplex = "full";
6328                 else
6329                         duplex = "half";
6330                 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
6331                         flow_ctrl = "ON - receive & transmit";
6332                 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
6333                         flow_ctrl = "ON - transmit";
6334                 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
6335                         flow_ctrl = "ON - receive";
6336                 else
6337                         flow_ctrl = "none";
6338                 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
6339                 netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s duplex, Flow control: %s\n",
6340                             speed, duplex, flow_ctrl);
6341                 if (bp->flags & BNXT_FLAG_EEE_CAP)
6342                         netdev_info(bp->dev, "EEE is %s\n",
6343                                     bp->eee.eee_active ? "active" :
6344                                                          "not active");
6345                 fec = bp->link_info.fec_cfg;
6346                 if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED))
6347                         netdev_info(bp->dev, "FEC autoneg %s encodings: %s\n",
6348                                     (fec & BNXT_FEC_AUTONEG) ? "on" : "off",
6349                                     (fec & BNXT_FEC_ENC_BASE_R) ? "BaseR" :
6350                                      (fec & BNXT_FEC_ENC_RS) ? "RS" : "None");
6351         } else {
6352                 netif_carrier_off(bp->dev);
6353                 netdev_err(bp->dev, "NIC Link is Down\n");
6354         }
6355 }
6356
6357 static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
6358 {
6359         int rc = 0;
6360         struct hwrm_port_phy_qcaps_input req = {0};
6361         struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
6362         struct bnxt_link_info *link_info = &bp->link_info;
6363
6364         if (bp->hwrm_spec_code < 0x10201)
6365                 return 0;
6366
6367         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1);
6368
6369         mutex_lock(&bp->hwrm_cmd_lock);
6370         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6371         if (rc)
6372                 goto hwrm_phy_qcaps_exit;
6373
6374         if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) {
6375                 struct ethtool_eee *eee = &bp->eee;
6376                 u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
6377
6378                 bp->flags |= BNXT_FLAG_EEE_CAP;
6379                 eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
6380                 bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
6381                                  PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
6382                 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
6383                                  PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
6384         }
6385         if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED) {
6386                 if (bp->test_info)
6387                         bp->test_info->flags |= BNXT_TEST_FL_EXT_LPBK;
6388         }
6389         if (resp->supported_speeds_auto_mode)
6390                 link_info->support_auto_speeds =
6391                         le16_to_cpu(resp->supported_speeds_auto_mode);
6392
6393         bp->port_count = resp->port_cnt;
6394
6395 hwrm_phy_qcaps_exit:
6396         mutex_unlock(&bp->hwrm_cmd_lock);
6397         return rc;
6398 }
6399
6400 static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
6401 {
6402         int rc = 0;
6403         struct bnxt_link_info *link_info = &bp->link_info;
6404         struct hwrm_port_phy_qcfg_input req = {0};
6405         struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
6406         u8 link_up = link_info->link_up;
6407         u16 diff;
6408
6409         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1);
6410
6411         mutex_lock(&bp->hwrm_cmd_lock);
6412         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6413         if (rc) {
6414                 mutex_unlock(&bp->hwrm_cmd_lock);
6415                 return rc;
6416         }
6417
6418         memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
6419         link_info->phy_link_status = resp->link;
6420         link_info->duplex = resp->duplex_cfg;
6421         if (bp->hwrm_spec_code >= 0x10800)
6422                 link_info->duplex = resp->duplex_state;
6423         link_info->pause = resp->pause;
6424         link_info->auto_mode = resp->auto_mode;
6425         link_info->auto_pause_setting = resp->auto_pause;
6426         link_info->lp_pause = resp->link_partner_adv_pause;
6427         link_info->force_pause_setting = resp->force_pause;
6428         link_info->duplex_setting = resp->duplex_cfg;
6429         if (link_info->phy_link_status == BNXT_LINK_LINK)
6430                 link_info->link_speed = le16_to_cpu(resp->link_speed);
6431         else
6432                 link_info->link_speed = 0;
6433         link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
6434         link_info->support_speeds = le16_to_cpu(resp->support_speeds);
6435         link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
6436         link_info->lp_auto_link_speeds =
6437                 le16_to_cpu(resp->link_partner_adv_speeds);
6438         link_info->preemphasis = le32_to_cpu(resp->preemphasis);
6439         link_info->phy_ver[0] = resp->phy_maj;
6440         link_info->phy_ver[1] = resp->phy_min;
6441         link_info->phy_ver[2] = resp->phy_bld;
6442         link_info->media_type = resp->media_type;
6443         link_info->phy_type = resp->phy_type;
6444         link_info->transceiver = resp->xcvr_pkg_type;
6445         link_info->phy_addr = resp->eee_config_phy_addr &
6446                               PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
6447         link_info->module_status = resp->module_status;
6448
6449         if (bp->flags & BNXT_FLAG_EEE_CAP) {
6450                 struct ethtool_eee *eee = &bp->eee;
6451                 u16 fw_speeds;
6452
6453                 eee->eee_active = 0;
6454                 if (resp->eee_config_phy_addr &
6455                     PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) {
6456                         eee->eee_active = 1;
6457                         fw_speeds = le16_to_cpu(
6458                                 resp->link_partner_adv_eee_link_speed_mask);
6459                         eee->lp_advertised =
6460                                 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
6461                 }
6462
6463                 /* Pull initial EEE config */
6464                 if (!chng_link_state) {
6465                         if (resp->eee_config_phy_addr &
6466                             PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED)
6467                                 eee->eee_enabled = 1;
6468
6469                         fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask);
6470                         eee->advertised =
6471                                 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
6472
6473                         if (resp->eee_config_phy_addr &
6474                             PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) {
6475                                 __le32 tmr;
6476
6477                                 eee->tx_lpi_enabled = 1;
6478                                 tmr = resp->xcvr_identifier_type_tx_lpi_timer;
6479                                 eee->tx_lpi_timer = le32_to_cpu(tmr) &
6480                                         PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK;
6481                         }
6482                 }
6483         }
6484
6485         link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED;
6486         if (bp->hwrm_spec_code >= 0x10504)
6487                 link_info->fec_cfg = le16_to_cpu(resp->fec_cfg);
6488
6489         /* TODO: need to add more logic to report VF link */
6490         if (chng_link_state) {
6491                 if (link_info->phy_link_status == BNXT_LINK_LINK)
6492                         link_info->link_up = 1;
6493                 else
6494                         link_info->link_up = 0;
6495                 if (link_up != link_info->link_up)
6496                         bnxt_report_link(bp);
6497         } else {
6498                 /* alwasy link down if not require to update link state */
6499                 link_info->link_up = 0;
6500         }
6501         mutex_unlock(&bp->hwrm_cmd_lock);
6502
6503         if (!BNXT_SINGLE_PF(bp))
6504                 return 0;
6505
6506         diff = link_info->support_auto_speeds ^ link_info->advertising;
6507         if ((link_info->support_auto_speeds | diff) !=
6508             link_info->support_auto_speeds) {
6509                 /* An advertised speed is no longer supported, so we need to
6510                  * update the advertisement settings.  Caller holds RTNL
6511                  * so we can modify link settings.
6512                  */
6513                 link_info->advertising = link_info->support_auto_speeds;
6514                 if (link_info->autoneg & BNXT_AUTONEG_SPEED)
6515                         bnxt_hwrm_set_link_setting(bp, true, false);
6516         }
6517         return 0;
6518 }
6519
6520 static void bnxt_get_port_module_status(struct bnxt *bp)
6521 {
6522         struct bnxt_link_info *link_info = &bp->link_info;
6523         struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
6524         u8 module_status;
6525
6526         if (bnxt_update_link(bp, true))
6527                 return;
6528
6529         module_status = link_info->module_status;
6530         switch (module_status) {
6531         case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX:
6532         case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
6533         case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG:
6534                 netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n",
6535                             bp->pf.port_id);
6536                 if (bp->hwrm_spec_code >= 0x10201) {
6537                         netdev_warn(bp->dev, "Module part number %s\n",
6538                                     resp->phy_vendor_partnumber);
6539                 }
6540                 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX)
6541                         netdev_warn(bp->dev, "TX is disabled\n");
6542                 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN)
6543                         netdev_warn(bp->dev, "SFP+ module is shutdown\n");
6544         }
6545 }
6546
6547 static void
6548 bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
6549 {
6550         if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
6551                 if (bp->hwrm_spec_code >= 0x10201)
6552                         req->auto_pause =
6553                                 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
6554                 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
6555                         req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
6556                 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
6557                         req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX;
6558                 req->enables |=
6559                         cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
6560         } else {
6561                 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
6562                         req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
6563                 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
6564                         req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
6565                 req->enables |=
6566                         cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
6567                 if (bp->hwrm_spec_code >= 0x10201) {
6568                         req->auto_pause = req->force_pause;
6569                         req->enables |= cpu_to_le32(
6570                                 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
6571                 }
6572         }
6573 }
6574
6575 static void bnxt_hwrm_set_link_common(struct bnxt *bp,
6576                                       struct hwrm_port_phy_cfg_input *req)
6577 {
6578         u8 autoneg = bp->link_info.autoneg;
6579         u16 fw_link_speed = bp->link_info.req_link_speed;
6580         u16 advertising = bp->link_info.advertising;
6581
6582         if (autoneg & BNXT_AUTONEG_SPEED) {
6583                 req->auto_mode |=
6584                         PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
6585
6586                 req->enables |= cpu_to_le32(
6587                         PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
6588                 req->auto_link_speed_mask = cpu_to_le16(advertising);
6589
6590                 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
6591                 req->flags |=
6592                         cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
6593         } else {
6594                 req->force_link_speed = cpu_to_le16(fw_link_speed);
6595                 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
6596         }
6597
6598         /* tell chimp that the setting takes effect immediately */
6599         req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
6600 }
6601
6602 int bnxt_hwrm_set_pause(struct bnxt *bp)
6603 {
6604         struct hwrm_port_phy_cfg_input req = {0};
6605         int rc;
6606
6607         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
6608         bnxt_hwrm_set_pause_common(bp, &req);
6609
6610         if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
6611             bp->link_info.force_link_chng)
6612                 bnxt_hwrm_set_link_common(bp, &req);
6613
6614         mutex_lock(&bp->hwrm_cmd_lock);
6615         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6616         if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
6617                 /* since changing of pause setting doesn't trigger any link
6618                  * change event, the driver needs to update the current pause
6619                  * result upon successfully return of the phy_cfg command
6620                  */
6621                 bp->link_info.pause =
6622                 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
6623                 bp->link_info.auto_pause_setting = 0;
6624                 if (!bp->link_info.force_link_chng)
6625                         bnxt_report_link(bp);
6626         }
6627         bp->link_info.force_link_chng = false;
6628         mutex_unlock(&bp->hwrm_cmd_lock);
6629         return rc;
6630 }
6631
6632 static void bnxt_hwrm_set_eee(struct bnxt *bp,
6633                               struct hwrm_port_phy_cfg_input *req)
6634 {
6635         struct ethtool_eee *eee = &bp->eee;
6636
6637         if (eee->eee_enabled) {
6638                 u16 eee_speeds;
6639                 u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE;
6640
6641                 if (eee->tx_lpi_enabled)
6642                         flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE;
6643                 else
6644                         flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE;
6645
6646                 req->flags |= cpu_to_le32(flags);
6647                 eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised);
6648                 req->eee_link_speed_mask = cpu_to_le16(eee_speeds);
6649                 req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer);
6650         } else {
6651                 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE);
6652         }
6653 }
6654
6655 int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)
6656 {
6657         struct hwrm_port_phy_cfg_input req = {0};
6658
6659         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
6660         if (set_pause)
6661                 bnxt_hwrm_set_pause_common(bp, &req);
6662
6663         bnxt_hwrm_set_link_common(bp, &req);
6664
6665         if (set_eee)
6666                 bnxt_hwrm_set_eee(bp, &req);
6667         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6668 }
6669
6670 static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
6671 {
6672         struct hwrm_port_phy_cfg_input req = {0};
6673
6674         if (!BNXT_SINGLE_PF(bp))
6675                 return 0;
6676
6677         if (pci_num_vf(bp->pdev))
6678                 return 0;
6679
6680         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
6681         req.flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN);
6682         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6683 }
6684
6685 static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
6686 {
6687         struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr;
6688         struct hwrm_func_drv_if_change_input req = {0};
6689         bool resc_reinit = false;
6690         int rc;
6691
6692         if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
6693                 return 0;
6694
6695         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_IF_CHANGE, -1, -1);
6696         if (up)
6697                 req.flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP);
6698         mutex_lock(&bp->hwrm_cmd_lock);
6699         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6700         if (!rc && (resp->flags &
6701                     cpu_to_le32(FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE)))
6702                 resc_reinit = true;
6703         mutex_unlock(&bp->hwrm_cmd_lock);
6704
6705         if (up && resc_reinit && BNXT_NEW_RM(bp)) {
6706                 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6707
6708                 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
6709                 hw_resc->resv_cp_rings = 0;
6710                 hw_resc->resv_tx_rings = 0;
6711                 hw_resc->resv_rx_rings = 0;
6712                 hw_resc->resv_hw_ring_grps = 0;
6713                 hw_resc->resv_vnics = 0;
6714                 bp->tx_nr_rings = 0;
6715                 bp->rx_nr_rings = 0;
6716         }
6717         return rc;
6718 }
6719
6720 static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
6721 {
6722         struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
6723         struct hwrm_port_led_qcaps_input req = {0};
6724         struct bnxt_pf_info *pf = &bp->pf;
6725         int rc;
6726
6727         if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601)
6728                 return 0;
6729
6730         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_QCAPS, -1, -1);
6731         req.port_id = cpu_to_le16(pf->port_id);
6732         mutex_lock(&bp->hwrm_cmd_lock);
6733         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6734         if (rc) {
6735                 mutex_unlock(&bp->hwrm_cmd_lock);
6736                 return rc;
6737         }
6738         if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
6739                 int i;
6740
6741                 bp->num_leds = resp->num_leds;
6742                 memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) *
6743                                                  bp->num_leds);
6744                 for (i = 0; i < bp->num_leds; i++) {
6745                         struct bnxt_led_info *led = &bp->leds[i];
6746                         __le16 caps = led->led_state_caps;
6747
6748                         if (!led->led_group_id ||
6749                             !BNXT_LED_ALT_BLINK_CAP(caps)) {
6750                                 bp->num_leds = 0;
6751                                 break;
6752                         }
6753                 }
6754         }
6755         mutex_unlock(&bp->hwrm_cmd_lock);
6756         return 0;
6757 }
6758
6759 int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp)
6760 {
6761         struct hwrm_wol_filter_alloc_input req = {0};
6762         struct hwrm_wol_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
6763         int rc;
6764
6765         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_ALLOC, -1, -1);
6766         req.port_id = cpu_to_le16(bp->pf.port_id);
6767         req.wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT;
6768         req.enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS);
6769         memcpy(req.mac_address, bp->dev->dev_addr, ETH_ALEN);
6770         mutex_lock(&bp->hwrm_cmd_lock);
6771         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6772         if (!rc)
6773                 bp->wol_filter_id = resp->wol_filter_id;
6774         mutex_unlock(&bp->hwrm_cmd_lock);
6775         return rc;
6776 }
6777
6778 int bnxt_hwrm_free_wol_fltr(struct bnxt *bp)
6779 {
6780         struct hwrm_wol_filter_free_input req = {0};
6781         int rc;
6782
6783         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_FREE, -1, -1);
6784         req.port_id = cpu_to_le16(bp->pf.port_id);
6785         req.enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID);
6786         req.wol_filter_id = bp->wol_filter_id;
6787         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6788         return rc;
6789 }
6790
6791 static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle)
6792 {
6793         struct hwrm_wol_filter_qcfg_input req = {0};
6794         struct hwrm_wol_filter_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
6795         u16 next_handle = 0;
6796         int rc;
6797
6798         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_QCFG, -1, -1);
6799         req.port_id = cpu_to_le16(bp->pf.port_id);
6800         req.handle = cpu_to_le16(handle);
6801         mutex_lock(&bp->hwrm_cmd_lock);
6802         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6803         if (!rc) {
6804                 next_handle = le16_to_cpu(resp->next_handle);
6805                 if (next_handle != 0) {
6806                         if (resp->wol_type ==
6807                             WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) {
6808                                 bp->wol = 1;
6809                                 bp->wol_filter_id = resp->wol_filter_id;
6810                         }
6811                 }
6812         }
6813         mutex_unlock(&bp->hwrm_cmd_lock);
6814         return next_handle;
6815 }
6816
6817 static void bnxt_get_wol_settings(struct bnxt *bp)
6818 {
6819         u16 handle = 0;
6820
6821         if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP))
6822                 return;
6823
6824         do {
6825                 handle = bnxt_hwrm_get_wol_fltrs(bp, handle);
6826         } while (handle && handle != 0xffff);
6827 }
6828
6829 #ifdef CONFIG_BNXT_HWMON
6830 static ssize_t bnxt_show_temp(struct device *dev,
6831                               struct device_attribute *devattr, char *buf)
6832 {
6833         struct hwrm_temp_monitor_query_input req = {0};
6834         struct hwrm_temp_monitor_query_output *resp;
6835         struct bnxt *bp = dev_get_drvdata(dev);
6836         u32 temp = 0;
6837
6838         resp = bp->hwrm_cmd_resp_addr;
6839         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1);
6840         mutex_lock(&bp->hwrm_cmd_lock);
6841         if (!_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT))
6842                 temp = resp->temp * 1000; /* display millidegree */
6843         mutex_unlock(&bp->hwrm_cmd_lock);
6844
6845         return sprintf(buf, "%u\n", temp);
6846 }
6847 static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0);
6848
6849 static struct attribute *bnxt_attrs[] = {
6850         &sensor_dev_attr_temp1_input.dev_attr.attr,
6851         NULL
6852 };
6853 ATTRIBUTE_GROUPS(bnxt);
6854
6855 static void bnxt_hwmon_close(struct bnxt *bp)
6856 {
6857         if (bp->hwmon_dev) {
6858                 hwmon_device_unregister(bp->hwmon_dev);
6859                 bp->hwmon_dev = NULL;
6860         }
6861 }
6862
6863 static void bnxt_hwmon_open(struct bnxt *bp)
6864 {
6865         struct pci_dev *pdev = bp->pdev;
6866
6867         bp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev,
6868                                                           DRV_MODULE_NAME, bp,
6869                                                           bnxt_groups);
6870         if (IS_ERR(bp->hwmon_dev)) {
6871                 bp->hwmon_dev = NULL;
6872                 dev_warn(&pdev->dev, "Cannot register hwmon device\n");
6873         }
6874 }
6875 #else
6876 static void bnxt_hwmon_close(struct bnxt *bp)
6877 {
6878 }
6879
6880 static void bnxt_hwmon_open(struct bnxt *bp)
6881 {
6882 }
6883 #endif
6884
6885 static bool bnxt_eee_config_ok(struct bnxt *bp)
6886 {
6887         struct ethtool_eee *eee = &bp->eee;
6888         struct bnxt_link_info *link_info = &bp->link_info;
6889
6890         if (!(bp->flags & BNXT_FLAG_EEE_CAP))
6891                 return true;
6892
6893         if (eee->eee_enabled) {
6894                 u32 advertising =
6895                         _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
6896
6897                 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
6898                         eee->eee_enabled = 0;
6899                         return false;
6900                 }
6901                 if (eee->advertised & ~advertising) {
6902                         eee->advertised = advertising & eee->supported;
6903                         return false;
6904                 }
6905         }
6906         return true;
6907 }
6908
6909 static int bnxt_update_phy_setting(struct bnxt *bp)
6910 {
6911         int rc;
6912         bool update_link = false;
6913         bool update_pause = false;
6914         bool update_eee = false;
6915         struct bnxt_link_info *link_info = &bp->link_info;
6916
6917         rc = bnxt_update_link(bp, true);
6918         if (rc) {
6919                 netdev_err(bp->dev, "failed to update link (rc: %x)\n",
6920                            rc);
6921                 return rc;
6922         }
6923         if (!BNXT_SINGLE_PF(bp))
6924                 return 0;
6925
6926         if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
6927             (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) !=
6928             link_info->req_flow_ctrl)
6929                 update_pause = true;
6930         if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
6931             link_info->force_pause_setting != link_info->req_flow_ctrl)
6932                 update_pause = true;
6933         if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
6934                 if (BNXT_AUTO_MODE(link_info->auto_mode))
6935                         update_link = true;
6936                 if (link_info->req_link_speed != link_info->force_link_speed)
6937                         update_link = true;
6938                 if (link_info->req_duplex != link_info->duplex_setting)
6939                         update_link = true;
6940         } else {
6941                 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
6942                         update_link = true;
6943                 if (link_info->advertising != link_info->auto_link_speeds)
6944                         update_link = true;
6945         }
6946
6947         /* The last close may have shutdown the link, so need to call
6948          * PHY_CFG to bring it back up.
6949          */
6950         if (!netif_carrier_ok(bp->dev))
6951                 update_link = true;
6952
6953         if (!bnxt_eee_config_ok(bp))
6954                 update_eee = true;
6955
6956         if (update_link)
6957                 rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee);
6958         else if (update_pause)
6959                 rc = bnxt_hwrm_set_pause(bp);
6960         if (rc) {
6961                 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
6962                            rc);
6963                 return rc;
6964         }
6965
6966         return rc;
6967 }
6968
6969 /* Common routine to pre-map certain register block to different GRC window.
6970  * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows
6971  * in PF and 3 windows in VF that can be customized to map in different
6972  * register blocks.
6973  */
6974 static void bnxt_preset_reg_win(struct bnxt *bp)
6975 {
6976         if (BNXT_PF(bp)) {
6977                 /* CAG registers map to GRC window #4 */
6978                 writel(BNXT_CAG_REG_BASE,
6979                        bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12);
6980         }
6981 }
6982
6983 static int bnxt_init_dflt_ring_mode(struct bnxt *bp);
6984
6985 static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
6986 {
6987         int rc = 0;
6988
6989         bnxt_preset_reg_win(bp);
6990         netif_carrier_off(bp->dev);
6991         if (irq_re_init) {
6992                 /* Reserve rings now if none were reserved at driver probe. */
6993                 rc = bnxt_init_dflt_ring_mode(bp);
6994                 if (rc) {
6995                         netdev_err(bp->dev, "Failed to reserve default rings at open\n");
6996                         return rc;
6997                 }
6998                 rc = bnxt_reserve_rings(bp);
6999                 if (rc)
7000                         return rc;
7001         }
7002         if ((bp->flags & BNXT_FLAG_RFS) &&
7003             !(bp->flags & BNXT_FLAG_USING_MSIX)) {
7004                 /* disable RFS if falling back to INTA */
7005                 bp->dev->hw_features &= ~NETIF_F_NTUPLE;
7006                 bp->flags &= ~BNXT_FLAG_RFS;
7007         }
7008
7009         rc = bnxt_alloc_mem(bp, irq_re_init);
7010         if (rc) {
7011                 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
7012                 goto open_err_free_mem;
7013         }
7014
7015         if (irq_re_init) {
7016                 bnxt_init_napi(bp);
7017                 rc = bnxt_request_irq(bp);
7018                 if (rc) {
7019                         netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
7020                         goto open_err_irq;
7021                 }
7022         }
7023
7024         bnxt_enable_napi(bp);
7025         bnxt_debug_dev_init(bp);
7026
7027         rc = bnxt_init_nic(bp, irq_re_init);
7028         if (rc) {
7029                 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
7030                 goto open_err;
7031         }
7032
7033         if (link_re_init) {
7034                 mutex_lock(&bp->link_lock);
7035                 rc = bnxt_update_phy_setting(bp);
7036                 mutex_unlock(&bp->link_lock);
7037                 if (rc) {
7038                         netdev_warn(bp->dev, "failed to update phy settings\n");
7039                         if (BNXT_SINGLE_PF(bp)) {
7040                                 bp->link_info.phy_retry = true;
7041                                 bp->link_info.phy_retry_expires =
7042                                         jiffies + 5 * HZ;
7043                         }
7044                 }
7045         }
7046
7047         if (irq_re_init)
7048                 udp_tunnel_get_rx_info(bp->dev);
7049
7050         set_bit(BNXT_STATE_OPEN, &bp->state);
7051         bnxt_enable_int(bp);
7052         /* Enable TX queues */
7053         bnxt_tx_enable(bp);
7054         mod_timer(&bp->timer, jiffies + bp->current_interval);
7055         /* Poll link status and check for SFP+ module status */
7056         bnxt_get_port_module_status(bp);
7057
7058         /* VF-reps may need to be re-opened after the PF is re-opened */
7059         if (BNXT_PF(bp))
7060                 bnxt_vf_reps_open(bp);
7061         return 0;
7062
7063 open_err:
7064         bnxt_debug_dev_exit(bp);
7065         bnxt_disable_napi(bp);
7066
7067 open_err_irq:
7068         bnxt_del_napi(bp);
7069
7070 open_err_free_mem:
7071         bnxt_free_skbs(bp);
7072         bnxt_free_irq(bp);
7073         bnxt_free_mem(bp, true);
7074         return rc;
7075 }
7076
7077 /* rtnl_lock held */
7078 int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
7079 {
7080         int rc = 0;
7081
7082         rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
7083         if (rc) {
7084                 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
7085                 dev_close(bp->dev);
7086         }
7087         return rc;
7088 }
7089
7090 /* rtnl_lock held, open the NIC half way by allocating all resources, but
7091  * NAPI, IRQ, and TX are not enabled.  This is mainly used for offline
7092  * self tests.
7093  */
7094 int bnxt_half_open_nic(struct bnxt *bp)
7095 {
7096         int rc = 0;
7097
7098         rc = bnxt_alloc_mem(bp, false);
7099         if (rc) {
7100                 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
7101                 goto half_open_err;
7102         }
7103         rc = bnxt_init_nic(bp, false);
7104         if (rc) {
7105                 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
7106                 goto half_open_err;
7107         }
7108         return 0;
7109
7110 half_open_err:
7111         bnxt_free_skbs(bp);
7112         bnxt_free_mem(bp, false);
7113         dev_close(bp->dev);
7114         return rc;
7115 }
7116
7117 /* rtnl_lock held, this call can only be made after a previous successful
7118  * call to bnxt_half_open_nic().
7119  */
7120 void bnxt_half_close_nic(struct bnxt *bp)
7121 {
7122         bnxt_hwrm_resource_free(bp, false, false);
7123         bnxt_free_skbs(bp);
7124         bnxt_free_mem(bp, false);
7125 }
7126
7127 static int bnxt_open(struct net_device *dev)
7128 {
7129         struct bnxt *bp = netdev_priv(dev);
7130         int rc;
7131
7132         bnxt_hwrm_if_change(bp, true);
7133         rc = __bnxt_open_nic(bp, true, true);
7134         if (rc)
7135                 bnxt_hwrm_if_change(bp, false);
7136
7137         bnxt_hwmon_open(bp);
7138
7139         return rc;
7140 }
7141
7142 static bool bnxt_drv_busy(struct bnxt *bp)
7143 {
7144         return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) ||
7145                 test_bit(BNXT_STATE_READ_STATS, &bp->state));
7146 }
7147
7148 static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
7149                              bool link_re_init)
7150 {
7151         /* Close the VF-reps before closing PF */
7152         if (BNXT_PF(bp))
7153                 bnxt_vf_reps_close(bp);
7154
7155         /* Change device state to avoid TX queue wake up's */
7156         bnxt_tx_disable(bp);
7157
7158         clear_bit(BNXT_STATE_OPEN, &bp->state);
7159         smp_mb__after_atomic();
7160         while (bnxt_drv_busy(bp))
7161                 msleep(20);
7162
7163         /* Flush rings and and disable interrupts */
7164         bnxt_shutdown_nic(bp, irq_re_init);
7165
7166         /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
7167
7168         bnxt_debug_dev_exit(bp);
7169         bnxt_disable_napi(bp);
7170         del_timer_sync(&bp->timer);
7171         bnxt_free_skbs(bp);
7172
7173         if (irq_re_init) {
7174                 bnxt_free_irq(bp);
7175                 bnxt_del_napi(bp);
7176         }
7177         bnxt_free_mem(bp, irq_re_init);
7178 }
7179
7180 int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
7181 {
7182         int rc = 0;
7183
7184 #ifdef CONFIG_BNXT_SRIOV
7185         if (bp->sriov_cfg) {
7186                 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
7187                                                       !bp->sriov_cfg,
7188                                                       BNXT_SRIOV_CFG_WAIT_TMO);
7189                 if (rc)
7190                         netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
7191         }
7192 #endif
7193         __bnxt_close_nic(bp, irq_re_init, link_re_init);
7194         return rc;
7195 }
7196
7197 static int bnxt_close(struct net_device *dev)
7198 {
7199         struct bnxt *bp = netdev_priv(dev);
7200
7201         bnxt_hwmon_close(bp);
7202         bnxt_close_nic(bp, true, true);
7203         bnxt_hwrm_shutdown_link(bp);
7204         bnxt_hwrm_if_change(bp, false);
7205         return 0;
7206 }
7207
7208 /* rtnl_lock held */
7209 static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7210 {
7211         switch (cmd) {
7212         case SIOCGMIIPHY:
7213                 /* fallthru */
7214         case SIOCGMIIREG: {
7215                 if (!netif_running(dev))
7216                         return -EAGAIN;
7217
7218                 return 0;
7219         }
7220
7221         case SIOCSMIIREG:
7222                 if (!netif_running(dev))
7223                         return -EAGAIN;
7224
7225                 return 0;
7226
7227         default:
7228                 /* do nothing */
7229                 break;
7230         }
7231         return -EOPNOTSUPP;
7232 }
7233
7234 static void
7235 bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
7236 {
7237         u32 i;
7238         struct bnxt *bp = netdev_priv(dev);
7239
7240         set_bit(BNXT_STATE_READ_STATS, &bp->state);
7241         /* Make sure bnxt_close_nic() sees that we are reading stats before
7242          * we check the BNXT_STATE_OPEN flag.
7243          */
7244         smp_mb__after_atomic();
7245         if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
7246                 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
7247                 return;
7248         }
7249
7250         /* TODO check if we need to synchronize with bnxt_close path */
7251         for (i = 0; i < bp->cp_nr_rings; i++) {
7252                 struct bnxt_napi *bnapi = bp->bnapi[i];
7253                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
7254                 struct ctx_hw_stats *hw_stats = cpr->hw_stats;
7255
7256                 stats->rx_packets += le64_to_cpu(hw_stats->rx_ucast_pkts);
7257                 stats->rx_packets += le64_to_cpu(hw_stats->rx_mcast_pkts);
7258                 stats->rx_packets += le64_to_cpu(hw_stats->rx_bcast_pkts);
7259
7260                 stats->tx_packets += le64_to_cpu(hw_stats->tx_ucast_pkts);
7261                 stats->tx_packets += le64_to_cpu(hw_stats->tx_mcast_pkts);
7262                 stats->tx_packets += le64_to_cpu(hw_stats->tx_bcast_pkts);
7263
7264                 stats->rx_bytes += le64_to_cpu(hw_stats->rx_ucast_bytes);
7265                 stats->rx_bytes += le64_to_cpu(hw_stats->rx_mcast_bytes);
7266                 stats->rx_bytes += le64_to_cpu(hw_stats->rx_bcast_bytes);
7267
7268                 stats->tx_bytes += le64_to_cpu(hw_stats->tx_ucast_bytes);
7269                 stats->tx_bytes += le64_to_cpu(hw_stats->tx_mcast_bytes);
7270                 stats->tx_bytes += le64_to_cpu(hw_stats->tx_bcast_bytes);
7271
7272                 stats->rx_missed_errors +=
7273                         le64_to_cpu(hw_stats->rx_discard_pkts);
7274
7275                 stats->multicast += le64_to_cpu(hw_stats->rx_mcast_pkts);
7276
7277                 stats->tx_dropped += le64_to_cpu(hw_stats->tx_drop_pkts);
7278         }
7279
7280         if (bp->flags & BNXT_FLAG_PORT_STATS) {
7281                 struct rx_port_stats *rx = bp->hw_rx_port_stats;
7282                 struct tx_port_stats *tx = bp->hw_tx_port_stats;
7283
7284                 stats->rx_crc_errors = le64_to_cpu(rx->rx_fcs_err_frames);
7285                 stats->rx_frame_errors = le64_to_cpu(rx->rx_align_err_frames);
7286                 stats->rx_length_errors = le64_to_cpu(rx->rx_undrsz_frames) +
7287                                           le64_to_cpu(rx->rx_ovrsz_frames) +
7288                                           le64_to_cpu(rx->rx_runt_frames);
7289                 stats->rx_errors = le64_to_cpu(rx->rx_false_carrier_frames) +
7290                                    le64_to_cpu(rx->rx_jbr_frames);
7291                 stats->collisions = le64_to_cpu(tx->tx_total_collisions);
7292                 stats->tx_fifo_errors = le64_to_cpu(tx->tx_fifo_underruns);
7293                 stats->tx_errors = le64_to_cpu(tx->tx_err);
7294         }
7295         clear_bit(BNXT_STATE_READ_STATS, &bp->state);
7296 }
7297
7298 static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
7299 {
7300         struct net_device *dev = bp->dev;
7301         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
7302         struct netdev_hw_addr *ha;
7303         u8 *haddr;
7304         int mc_count = 0;
7305         bool update = false;
7306         int off = 0;
7307
7308         netdev_for_each_mc_addr(ha, dev) {
7309                 if (mc_count >= BNXT_MAX_MC_ADDRS) {
7310                         *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
7311                         vnic->mc_list_count = 0;
7312                         return false;
7313                 }
7314                 haddr = ha->addr;
7315                 if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
7316                         memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
7317                         update = true;
7318                 }
7319                 off += ETH_ALEN;
7320                 mc_count++;
7321         }
7322         if (mc_count)
7323                 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
7324
7325         if (mc_count != vnic->mc_list_count) {
7326                 vnic->mc_list_count = mc_count;
7327                 update = true;
7328         }
7329         return update;
7330 }
7331
7332 static bool bnxt_uc_list_updated(struct bnxt *bp)
7333 {
7334         struct net_device *dev = bp->dev;
7335         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
7336         struct netdev_hw_addr *ha;
7337         int off = 0;
7338
7339         if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
7340                 return true;
7341
7342         netdev_for_each_uc_addr(ha, dev) {
7343                 if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
7344                         return true;
7345
7346                 off += ETH_ALEN;
7347         }
7348         return false;
7349 }
7350
7351 static void bnxt_set_rx_mode(struct net_device *dev)
7352 {
7353         struct bnxt *bp = netdev_priv(dev);
7354         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
7355         u32 mask = vnic->rx_mask;
7356         bool mc_update = false;
7357         bool uc_update;
7358
7359         if (!netif_running(dev))
7360                 return;
7361
7362         mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
7363                   CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
7364                   CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST |
7365                   CFA_L2_SET_RX_MASK_REQ_MASK_BCAST);
7366
7367         if ((dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
7368                 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
7369
7370         uc_update = bnxt_uc_list_updated(bp);
7371
7372         if (dev->flags & IFF_BROADCAST)
7373                 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
7374         if (dev->flags & IFF_ALLMULTI) {
7375                 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
7376                 vnic->mc_list_count = 0;
7377         } else {
7378                 mc_update = bnxt_mc_list_updated(bp, &mask);
7379         }
7380
7381         if (mask != vnic->rx_mask || uc_update || mc_update) {
7382                 vnic->rx_mask = mask;
7383
7384                 set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
7385                 bnxt_queue_sp_work(bp);
7386         }
7387 }
7388
7389 static int bnxt_cfg_rx_mode(struct bnxt *bp)
7390 {
7391         struct net_device *dev = bp->dev;
7392         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
7393         struct netdev_hw_addr *ha;
7394         int i, off = 0, rc;
7395         bool uc_update;
7396
7397         netif_addr_lock_bh(dev);
7398         uc_update = bnxt_uc_list_updated(bp);
7399         netif_addr_unlock_bh(dev);
7400
7401         if (!uc_update)
7402                 goto skip_uc;
7403
7404         mutex_lock(&bp->hwrm_cmd_lock);
7405         for (i = 1; i < vnic->uc_filter_count; i++) {
7406                 struct hwrm_cfa_l2_filter_free_input req = {0};
7407
7408                 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_FREE, -1,
7409                                        -1);
7410
7411                 req.l2_filter_id = vnic->fw_l2_filter_id[i];
7412
7413                 rc = _hwrm_send_message(bp, &req, sizeof(req),
7414                                         HWRM_CMD_TIMEOUT);
7415         }
7416         mutex_unlock(&bp->hwrm_cmd_lock);
7417
7418         vnic->uc_filter_count = 1;
7419
7420         netif_addr_lock_bh(dev);
7421         if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
7422                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
7423         } else {
7424                 netdev_for_each_uc_addr(ha, dev) {
7425                         memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
7426                         off += ETH_ALEN;
7427                         vnic->uc_filter_count++;
7428                 }
7429         }
7430         netif_addr_unlock_bh(dev);
7431
7432         for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
7433                 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
7434                 if (rc) {
7435                         netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n",
7436                                    rc);
7437                         vnic->uc_filter_count = i;
7438                         return rc;
7439                 }
7440         }
7441
7442 skip_uc:
7443         rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
7444         if (rc)
7445                 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n",
7446                            rc);
7447
7448         return rc;
7449 }
7450
7451 static bool bnxt_can_reserve_rings(struct bnxt *bp)
7452 {
7453 #ifdef CONFIG_BNXT_SRIOV
7454         if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) {
7455                 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7456
7457                 /* No minimum rings were provisioned by the PF.  Don't
7458                  * reserve rings by default when device is down.
7459                  */
7460                 if (hw_resc->min_tx_rings || hw_resc->resv_tx_rings)
7461                         return true;
7462
7463                 if (!netif_running(bp->dev))
7464                         return false;
7465         }
7466 #endif
7467         return true;
7468 }
7469
7470 /* If the chip and firmware supports RFS */
7471 static bool bnxt_rfs_supported(struct bnxt *bp)
7472 {
7473         if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
7474                 return true;
7475         if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
7476                 return true;
7477         return false;
7478 }
7479
7480 /* If runtime conditions support RFS */
7481 static bool bnxt_rfs_capable(struct bnxt *bp)
7482 {
7483 #ifdef CONFIG_RFS_ACCEL
7484         int vnics, max_vnics, max_rss_ctxs;
7485
7486         if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp))
7487                 return false;
7488
7489         vnics = 1 + bp->rx_nr_rings;
7490         max_vnics = bnxt_get_max_func_vnics(bp);
7491         max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
7492
7493         /* RSS contexts not a limiting factor */
7494         if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
7495                 max_rss_ctxs = max_vnics;
7496         if (vnics > max_vnics || vnics > max_rss_ctxs) {
7497                 if (bp->rx_nr_rings > 1)
7498                         netdev_warn(bp->dev,
7499                                     "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
7500                                     min(max_rss_ctxs - 1, max_vnics - 1));
7501                 return false;
7502         }
7503
7504         if (!BNXT_NEW_RM(bp))
7505                 return true;
7506
7507         if (vnics == bp->hw_resc.resv_vnics)
7508                 return true;
7509
7510         bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, vnics);
7511         if (vnics <= bp->hw_resc.resv_vnics)
7512                 return true;
7513
7514         netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n");
7515         bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 1);
7516         return false;
7517 #else
7518         return false;
7519 #endif
7520 }
7521
7522 static netdev_features_t bnxt_fix_features(struct net_device *dev,
7523                                            netdev_features_t features)
7524 {
7525         struct bnxt *bp = netdev_priv(dev);
7526
7527         if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
7528                 features &= ~NETIF_F_NTUPLE;
7529
7530         if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
7531                 features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
7532
7533         if (!(features & NETIF_F_GRO))
7534                 features &= ~NETIF_F_GRO_HW;
7535
7536         if (features & NETIF_F_GRO_HW)
7537                 features &= ~NETIF_F_LRO;
7538
7539         /* Both CTAG and STAG VLAN accelaration on the RX side have to be
7540          * turned on or off together.
7541          */
7542         if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) !=
7543             (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) {
7544                 if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
7545                         features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
7546                                       NETIF_F_HW_VLAN_STAG_RX);
7547                 else
7548                         features |= NETIF_F_HW_VLAN_CTAG_RX |
7549                                     NETIF_F_HW_VLAN_STAG_RX;
7550         }
7551 #ifdef CONFIG_BNXT_SRIOV
7552         if (BNXT_VF(bp)) {
7553                 if (bp->vf.vlan) {
7554                         features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
7555                                       NETIF_F_HW_VLAN_STAG_RX);
7556                 }
7557         }
7558 #endif
7559         return features;
7560 }
7561
7562 static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
7563 {
7564         struct bnxt *bp = netdev_priv(dev);
7565         u32 flags = bp->flags;
7566         u32 changes;
7567         int rc = 0;
7568         bool re_init = false;
7569         bool update_tpa = false;
7570
7571         flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
7572         if (features & NETIF_F_GRO_HW)
7573                 flags |= BNXT_FLAG_GRO;
7574         else if (features & NETIF_F_LRO)
7575                 flags |= BNXT_FLAG_LRO;
7576
7577         if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
7578                 flags &= ~BNXT_FLAG_TPA;
7579
7580         if (features & NETIF_F_HW_VLAN_CTAG_RX)
7581                 flags |= BNXT_FLAG_STRIP_VLAN;
7582
7583         if (features & NETIF_F_NTUPLE)
7584                 flags |= BNXT_FLAG_RFS;
7585
7586         changes = flags ^ bp->flags;
7587         if (changes & BNXT_FLAG_TPA) {
7588                 update_tpa = true;
7589                 if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
7590                     (flags & BNXT_FLAG_TPA) == 0)
7591                         re_init = true;
7592         }
7593
7594         if (changes & ~BNXT_FLAG_TPA)
7595                 re_init = true;
7596
7597         if (flags != bp->flags) {
7598                 u32 old_flags = bp->flags;
7599
7600                 bp->flags = flags;
7601
7602                 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
7603                         if (update_tpa)
7604                                 bnxt_set_ring_params(bp);
7605                         return rc;
7606                 }
7607
7608                 if (re_init) {
7609                         bnxt_close_nic(bp, false, false);
7610                         if (update_tpa)
7611                                 bnxt_set_ring_params(bp);
7612
7613                         return bnxt_open_nic(bp, false, false);
7614                 }
7615                 if (update_tpa) {
7616                         rc = bnxt_set_tpa(bp,
7617                                           (flags & BNXT_FLAG_TPA) ?
7618                                           true : false);
7619                         if (rc)
7620                                 bp->flags = old_flags;
7621                 }
7622         }
7623         return rc;
7624 }
7625
7626 static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
7627 {
7628         struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
7629         int i = bnapi->index;
7630
7631         if (!txr)
7632                 return;
7633
7634         netdev_info(bnapi->bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
7635                     i, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
7636                     txr->tx_cons);
7637 }
7638
7639 static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
7640 {
7641         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
7642         int i = bnapi->index;
7643
7644         if (!rxr)
7645                 return;
7646
7647         netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
7648                     i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod,
7649                     rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod,
7650                     rxr->rx_sw_agg_prod);
7651 }
7652
7653 static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi)
7654 {
7655         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
7656         int i = bnapi->index;
7657
7658         netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
7659                     i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons);
7660 }
7661
7662 static void bnxt_dbg_dump_states(struct bnxt *bp)
7663 {
7664         int i;
7665         struct bnxt_napi *bnapi;
7666
7667         for (i = 0; i < bp->cp_nr_rings; i++) {
7668                 bnapi = bp->bnapi[i];
7669                 if (netif_msg_drv(bp)) {
7670                         bnxt_dump_tx_sw_state(bnapi);
7671                         bnxt_dump_rx_sw_state(bnapi);
7672                         bnxt_dump_cp_sw_state(bnapi);
7673                 }
7674         }
7675 }
7676
7677 static void bnxt_reset_task(struct bnxt *bp, bool silent)
7678 {
7679         if (!silent)
7680                 bnxt_dbg_dump_states(bp);
7681         if (netif_running(bp->dev)) {
7682                 int rc;
7683
7684                 if (!silent)
7685                         bnxt_ulp_stop(bp);
7686                 bnxt_close_nic(bp, false, false);
7687                 rc = bnxt_open_nic(bp, false, false);
7688                 if (!silent && !rc)
7689                         bnxt_ulp_start(bp);
7690         }
7691 }
7692
7693 static void bnxt_tx_timeout(struct net_device *dev)
7694 {
7695         struct bnxt *bp = netdev_priv(dev);
7696
7697         netdev_err(bp->dev,  "TX timeout detected, starting reset task!\n");
7698         set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
7699         bnxt_queue_sp_work(bp);
7700 }
7701
7702 static void bnxt_timer(struct timer_list *t)
7703 {
7704         struct bnxt *bp = from_timer(bp, t, timer);
7705         struct net_device *dev = bp->dev;
7706
7707         if (!netif_running(dev))
7708                 return;
7709
7710         if (atomic_read(&bp->intr_sem) != 0)
7711                 goto bnxt_restart_timer;
7712
7713         if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS) &&
7714             bp->stats_coal_ticks) {
7715                 set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
7716                 bnxt_queue_sp_work(bp);
7717         }
7718
7719         if (bnxt_tc_flower_enabled(bp)) {
7720                 set_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event);
7721                 bnxt_queue_sp_work(bp);
7722         }
7723
7724         if (bp->link_info.phy_retry) {
7725                 if (time_after(jiffies, bp->link_info.phy_retry_expires)) {
7726                         bp->link_info.phy_retry = 0;
7727                         netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n");
7728                 } else {
7729                         set_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event);
7730                         bnxt_queue_sp_work(bp);
7731                 }
7732         }
7733 bnxt_restart_timer:
7734         mod_timer(&bp->timer, jiffies + bp->current_interval);
7735 }
7736
7737 static void bnxt_rtnl_lock_sp(struct bnxt *bp)
7738 {
7739         /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
7740          * set.  If the device is being closed, bnxt_close() may be holding
7741          * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear.  So we
7742          * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
7743          */
7744         clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
7745         rtnl_lock();
7746 }
7747
7748 static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
7749 {
7750         set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
7751         rtnl_unlock();
7752 }
7753
7754 /* Only called from bnxt_sp_task() */
7755 static void bnxt_reset(struct bnxt *bp, bool silent)
7756 {
7757         bnxt_rtnl_lock_sp(bp);
7758         if (test_bit(BNXT_STATE_OPEN, &bp->state))
7759                 bnxt_reset_task(bp, silent);
7760         bnxt_rtnl_unlock_sp(bp);
7761 }
7762
7763 static void bnxt_cfg_ntp_filters(struct bnxt *);
7764
7765 static void bnxt_sp_task(struct work_struct *work)
7766 {
7767         struct bnxt *bp = container_of(work, struct bnxt, sp_task);
7768
7769         set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
7770         smp_mb__after_atomic();
7771         if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
7772                 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
7773                 return;
7774         }
7775
7776         if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
7777                 bnxt_cfg_rx_mode(bp);
7778
7779         if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
7780                 bnxt_cfg_ntp_filters(bp);
7781         if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
7782                 bnxt_hwrm_exec_fwd_req(bp);
7783         if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) {
7784                 bnxt_hwrm_tunnel_dst_port_alloc(
7785                         bp, bp->vxlan_port,
7786                         TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
7787         }
7788         if (test_and_clear_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event)) {
7789                 bnxt_hwrm_tunnel_dst_port_free(
7790                         bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
7791         }
7792         if (test_and_clear_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event)) {
7793                 bnxt_hwrm_tunnel_dst_port_alloc(
7794                         bp, bp->nge_port,
7795                         TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
7796         }
7797         if (test_and_clear_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event)) {
7798                 bnxt_hwrm_tunnel_dst_port_free(
7799                         bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
7800         }
7801         if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) {
7802                 bnxt_hwrm_port_qstats(bp);
7803                 bnxt_hwrm_port_qstats_ext(bp);
7804         }
7805
7806         if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
7807                 int rc;
7808
7809                 mutex_lock(&bp->link_lock);
7810                 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
7811                                        &bp->sp_event))
7812                         bnxt_hwrm_phy_qcaps(bp);
7813
7814                 rc = bnxt_update_link(bp, true);
7815                 mutex_unlock(&bp->link_lock);
7816                 if (rc)
7817                         netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
7818                                    rc);
7819         }
7820         if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) {
7821                 int rc;
7822
7823                 mutex_lock(&bp->link_lock);
7824                 rc = bnxt_update_phy_setting(bp);
7825                 mutex_unlock(&bp->link_lock);
7826                 if (rc) {
7827                         netdev_warn(bp->dev, "update phy settings retry failed\n");
7828                 } else {
7829                         bp->link_info.phy_retry = false;
7830                         netdev_info(bp->dev, "update phy settings retry succeeded\n");
7831                 }
7832         }
7833         if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
7834                 mutex_lock(&bp->link_lock);
7835                 bnxt_get_port_module_status(bp);
7836                 mutex_unlock(&bp->link_lock);
7837         }
7838
7839         if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event))
7840                 bnxt_tc_flow_stats_work(bp);
7841
7842         /* These functions below will clear BNXT_STATE_IN_SP_TASK.  They
7843          * must be the last functions to be called before exiting.
7844          */
7845         if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
7846                 bnxt_reset(bp, false);
7847
7848         if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
7849                 bnxt_reset(bp, true);
7850
7851         smp_mb__before_atomic();
7852         clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
7853 }
7854
7855 /* Under rtnl_lock */
7856 int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
7857                      int tx_xdp)
7858 {
7859         int max_rx, max_tx, tx_sets = 1;
7860         int tx_rings_needed;
7861         int rx_rings = rx;
7862         int cp, vnics, rc;
7863
7864         if (tcs)
7865                 tx_sets = tcs;
7866
7867         rc = bnxt_get_max_rings(bp, &max_rx, &max_tx, sh);
7868         if (rc)
7869                 return rc;
7870
7871         if (max_rx < rx)
7872                 return -ENOMEM;
7873
7874         tx_rings_needed = tx * tx_sets + tx_xdp;
7875         if (max_tx < tx_rings_needed)
7876                 return -ENOMEM;
7877
7878         vnics = 1;
7879         if (bp->flags & BNXT_FLAG_RFS)
7880                 vnics += rx_rings;
7881
7882         if (bp->flags & BNXT_FLAG_AGG_RINGS)
7883                 rx_rings <<= 1;
7884         cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx;
7885         if (BNXT_NEW_RM(bp))
7886                 cp += bnxt_get_ulp_msix_num(bp);
7887         return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp,
7888                                      vnics);
7889 }
7890
7891 static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
7892 {
7893         if (bp->bar2) {
7894                 pci_iounmap(pdev, bp->bar2);
7895                 bp->bar2 = NULL;
7896         }
7897
7898         if (bp->bar1) {
7899                 pci_iounmap(pdev, bp->bar1);
7900                 bp->bar1 = NULL;
7901         }
7902
7903         if (bp->bar0) {
7904                 pci_iounmap(pdev, bp->bar0);
7905                 bp->bar0 = NULL;
7906         }
7907 }
7908
7909 static void bnxt_cleanup_pci(struct bnxt *bp)
7910 {
7911         bnxt_unmap_bars(bp, bp->pdev);
7912         pci_release_regions(bp->pdev);
7913         pci_disable_device(bp->pdev);
7914 }
7915
7916 static void bnxt_init_dflt_coal(struct bnxt *bp)
7917 {
7918         struct bnxt_coal *coal;
7919
7920         /* Tick values in micro seconds.
7921          * 1 coal_buf x bufs_per_record = 1 completion record.
7922          */
7923         coal = &bp->rx_coal;
7924         coal->coal_ticks = 14;
7925         coal->coal_bufs = 30;
7926         coal->coal_ticks_irq = 1;
7927         coal->coal_bufs_irq = 2;
7928         coal->idle_thresh = 50;
7929         coal->bufs_per_record = 2;
7930         coal->budget = 64;              /* NAPI budget */
7931
7932         coal = &bp->tx_coal;
7933         coal->coal_ticks = 28;
7934         coal->coal_bufs = 30;
7935         coal->coal_ticks_irq = 2;
7936         coal->coal_bufs_irq = 2;
7937         coal->bufs_per_record = 1;
7938
7939         bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
7940 }
7941
7942 static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
7943 {
7944         int rc;
7945         struct bnxt *bp = netdev_priv(dev);
7946
7947         SET_NETDEV_DEV(dev, &pdev->dev);
7948
7949         /* enable device (incl. PCI PM wakeup), and bus-mastering */
7950         rc = pci_enable_device(pdev);
7951         if (rc) {
7952                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
7953                 goto init_err;
7954         }
7955
7956         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
7957                 dev_err(&pdev->dev,
7958                         "Cannot find PCI device base address, aborting\n");
7959                 rc = -ENODEV;
7960                 goto init_err_disable;
7961         }
7962
7963         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7964         if (rc) {
7965                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
7966                 goto init_err_disable;
7967         }
7968
7969         if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
7970             dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
7971                 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
7972                 goto init_err_disable;
7973         }
7974
7975         pci_set_master(pdev);
7976
7977         bp->dev = dev;
7978         bp->pdev = pdev;
7979
7980         bp->bar0 = pci_ioremap_bar(pdev, 0);
7981         if (!bp->bar0) {
7982                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
7983                 rc = -ENOMEM;
7984                 goto init_err_release;
7985         }
7986
7987         bp->bar1 = pci_ioremap_bar(pdev, 2);
7988         if (!bp->bar1) {
7989                 dev_err(&pdev->dev, "Cannot map doorbell registers, aborting\n");
7990                 rc = -ENOMEM;
7991                 goto init_err_release;
7992         }
7993
7994         bp->bar2 = pci_ioremap_bar(pdev, 4);
7995         if (!bp->bar2) {
7996                 dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
7997                 rc = -ENOMEM;
7998                 goto init_err_release;
7999         }
8000
8001         pci_enable_pcie_error_reporting(pdev);
8002
8003         INIT_WORK(&bp->sp_task, bnxt_sp_task);
8004
8005         spin_lock_init(&bp->ntp_fltr_lock);
8006
8007         bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
8008         bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
8009
8010         bnxt_init_dflt_coal(bp);
8011
8012         timer_setup(&bp->timer, bnxt_timer, 0);
8013         bp->current_interval = BNXT_TIMER_INTERVAL;
8014
8015         clear_bit(BNXT_STATE_OPEN, &bp->state);
8016         return 0;
8017
8018 init_err_release:
8019         bnxt_unmap_bars(bp, pdev);
8020         pci_release_regions(pdev);
8021
8022 init_err_disable:
8023         pci_disable_device(pdev);
8024
8025 init_err:
8026         return rc;
8027 }
8028
8029 /* rtnl_lock held */
8030 static int bnxt_change_mac_addr(struct net_device *dev, void *p)
8031 {
8032         struct sockaddr *addr = p;
8033         struct bnxt *bp = netdev_priv(dev);
8034         int rc = 0;
8035
8036         if (!is_valid_ether_addr(addr->sa_data))
8037                 return -EADDRNOTAVAIL;
8038
8039         if (ether_addr_equal(addr->sa_data, dev->dev_addr))
8040                 return 0;
8041
8042         rc = bnxt_approve_mac(bp, addr->sa_data, true);
8043         if (rc)
8044                 return rc;
8045
8046         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
8047         if (netif_running(dev)) {
8048                 bnxt_close_nic(bp, false, false);
8049                 rc = bnxt_open_nic(bp, false, false);
8050         }
8051
8052         return rc;
8053 }
8054
8055 /* rtnl_lock held */
8056 static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
8057 {
8058         struct bnxt *bp = netdev_priv(dev);
8059
8060         if (netif_running(dev))
8061                 bnxt_close_nic(bp, false, false);
8062
8063         dev->mtu = new_mtu;
8064         bnxt_set_ring_params(bp);
8065
8066         if (netif_running(dev))
8067                 return bnxt_open_nic(bp, false, false);
8068
8069         return 0;
8070 }
8071
8072 int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
8073 {
8074         struct bnxt *bp = netdev_priv(dev);
8075         bool sh = false;
8076         int rc;
8077
8078         if (tc > bp->max_tc) {
8079                 netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n",
8080                            tc, bp->max_tc);
8081                 return -EINVAL;
8082         }
8083
8084         if (netdev_get_num_tc(dev) == tc)
8085                 return 0;
8086
8087         if (bp->flags & BNXT_FLAG_SHARED_RINGS)
8088                 sh = true;
8089
8090         rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
8091                               sh, tc, bp->tx_nr_rings_xdp);
8092         if (rc)
8093                 return rc;
8094
8095         /* Needs to close the device and do hw resource re-allocations */
8096         if (netif_running(bp->dev))
8097                 bnxt_close_nic(bp, true, false);
8098
8099         if (tc) {
8100                 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
8101                 netdev_set_num_tc(dev, tc);
8102         } else {
8103                 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
8104                 netdev_reset_tc(dev);
8105         }
8106         bp->tx_nr_rings += bp->tx_nr_rings_xdp;
8107         bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
8108                                bp->tx_nr_rings + bp->rx_nr_rings;
8109         bp->num_stat_ctxs = bp->cp_nr_rings;
8110
8111         if (netif_running(bp->dev))
8112                 return bnxt_open_nic(bp, true, false);
8113
8114         return 0;
8115 }
8116
8117 static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
8118                                   void *cb_priv)
8119 {
8120         struct bnxt *bp = cb_priv;
8121
8122         if (!bnxt_tc_flower_enabled(bp) ||
8123             !tc_cls_can_offload_and_chain0(bp->dev, type_data))
8124                 return -EOPNOTSUPP;
8125
8126         switch (type) {
8127         case TC_SETUP_CLSFLOWER:
8128                 return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data);
8129         default:
8130                 return -EOPNOTSUPP;
8131         }
8132 }
8133
8134 static int bnxt_setup_tc_block(struct net_device *dev,
8135                                struct tc_block_offload *f)
8136 {
8137         struct bnxt *bp = netdev_priv(dev);
8138
8139         if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
8140                 return -EOPNOTSUPP;
8141
8142         switch (f->command) {
8143         case TC_BLOCK_BIND:
8144                 return tcf_block_cb_register(f->block, bnxt_setup_tc_block_cb,
8145                                              bp, bp, f->extack);
8146         case TC_BLOCK_UNBIND:
8147                 tcf_block_cb_unregister(f->block, bnxt_setup_tc_block_cb, bp);
8148                 return 0;
8149         default:
8150                 return -EOPNOTSUPP;
8151         }
8152 }
8153
8154 static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type,
8155                          void *type_data)
8156 {
8157         switch (type) {
8158         case TC_SETUP_BLOCK:
8159                 return bnxt_setup_tc_block(dev, type_data);
8160         case TC_SETUP_QDISC_MQPRIO: {
8161                 struct tc_mqprio_qopt *mqprio = type_data;
8162
8163                 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
8164
8165                 return bnxt_setup_mq_tc(dev, mqprio->num_tc);
8166         }
8167         default:
8168                 return -EOPNOTSUPP;
8169         }
8170 }
8171
8172 #ifdef CONFIG_RFS_ACCEL
8173 static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
8174                             struct bnxt_ntuple_filter *f2)
8175 {
8176         struct flow_keys *keys1 = &f1->fkeys;
8177         struct flow_keys *keys2 = &f2->fkeys;
8178
8179         if (keys1->addrs.v4addrs.src == keys2->addrs.v4addrs.src &&
8180             keys1->addrs.v4addrs.dst == keys2->addrs.v4addrs.dst &&
8181             keys1->ports.ports == keys2->ports.ports &&
8182             keys1->basic.ip_proto == keys2->basic.ip_proto &&
8183             keys1->basic.n_proto == keys2->basic.n_proto &&
8184             keys1->control.flags == keys2->control.flags &&
8185             ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) &&
8186             ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr))
8187                 return true;
8188
8189         return false;
8190 }
8191
8192 static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
8193                               u16 rxq_index, u32 flow_id)
8194 {
8195         struct bnxt *bp = netdev_priv(dev);
8196         struct bnxt_ntuple_filter *fltr, *new_fltr;
8197         struct flow_keys *fkeys;
8198         struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
8199         int rc = 0, idx, bit_id, l2_idx = 0;
8200         struct hlist_head *head;
8201
8202         if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) {
8203                 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
8204                 int off = 0, j;
8205
8206                 netif_addr_lock_bh(dev);
8207                 for (j = 0; j < vnic->uc_filter_count; j++, off += ETH_ALEN) {
8208                         if (ether_addr_equal(eth->h_dest,
8209                                              vnic->uc_list + off)) {
8210                                 l2_idx = j + 1;
8211                                 break;
8212                         }
8213                 }
8214                 netif_addr_unlock_bh(dev);
8215                 if (!l2_idx)
8216                         return -EINVAL;
8217         }
8218         new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
8219         if (!new_fltr)
8220                 return -ENOMEM;
8221
8222         fkeys = &new_fltr->fkeys;
8223         if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
8224                 rc = -EPROTONOSUPPORT;
8225                 goto err_free;
8226         }
8227
8228         if ((fkeys->basic.n_proto != htons(ETH_P_IP) &&
8229              fkeys->basic.n_proto != htons(ETH_P_IPV6)) ||
8230             ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
8231              (fkeys->basic.ip_proto != IPPROTO_UDP))) {
8232                 rc = -EPROTONOSUPPORT;
8233                 goto err_free;
8234         }
8235         if (fkeys->basic.n_proto == htons(ETH_P_IPV6) &&
8236             bp->hwrm_spec_code < 0x10601) {
8237                 rc = -EPROTONOSUPPORT;
8238                 goto err_free;
8239         }
8240         if ((fkeys->control.flags & FLOW_DIS_ENCAPSULATION) &&
8241             bp->hwrm_spec_code < 0x10601) {
8242                 rc = -EPROTONOSUPPORT;
8243                 goto err_free;
8244         }
8245
8246         memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN);
8247         memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN);
8248
8249         idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
8250         head = &bp->ntp_fltr_hash_tbl[idx];
8251         rcu_read_lock();
8252         hlist_for_each_entry_rcu(fltr, head, hash) {
8253                 if (bnxt_fltr_match(fltr, new_fltr)) {
8254                         rcu_read_unlock();
8255                         rc = 0;
8256                         goto err_free;
8257                 }
8258         }
8259         rcu_read_unlock();
8260
8261         spin_lock_bh(&bp->ntp_fltr_lock);
8262         bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
8263                                          BNXT_NTP_FLTR_MAX_FLTR, 0);
8264         if (bit_id < 0) {
8265                 spin_unlock_bh(&bp->ntp_fltr_lock);
8266                 rc = -ENOMEM;
8267                 goto err_free;
8268         }
8269
8270         new_fltr->sw_id = (u16)bit_id;
8271         new_fltr->flow_id = flow_id;
8272         new_fltr->l2_fltr_idx = l2_idx;
8273         new_fltr->rxq = rxq_index;
8274         hlist_add_head_rcu(&new_fltr->hash, head);
8275         bp->ntp_fltr_count++;
8276         spin_unlock_bh(&bp->ntp_fltr_lock);
8277
8278         set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
8279         bnxt_queue_sp_work(bp);
8280
8281         return new_fltr->sw_id;
8282
8283 err_free:
8284         kfree(new_fltr);
8285         return rc;
8286 }
8287
8288 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
8289 {
8290         int i;
8291
8292         for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
8293                 struct hlist_head *head;
8294                 struct hlist_node *tmp;
8295                 struct bnxt_ntuple_filter *fltr;
8296                 int rc;
8297
8298                 head = &bp->ntp_fltr_hash_tbl[i];
8299                 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
8300                         bool del = false;
8301
8302                         if (test_bit(BNXT_FLTR_VALID, &fltr->state)) {
8303                                 if (rps_may_expire_flow(bp->dev, fltr->rxq,
8304                                                         fltr->flow_id,
8305                                                         fltr->sw_id)) {
8306                                         bnxt_hwrm_cfa_ntuple_filter_free(bp,
8307                                                                          fltr);
8308                                         del = true;
8309                                 }
8310                         } else {
8311                                 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
8312                                                                        fltr);
8313                                 if (rc)
8314                                         del = true;
8315                                 else
8316                                         set_bit(BNXT_FLTR_VALID, &fltr->state);
8317                         }
8318
8319                         if (del) {
8320                                 spin_lock_bh(&bp->ntp_fltr_lock);
8321                                 hlist_del_rcu(&fltr->hash);
8322                                 bp->ntp_fltr_count--;
8323                                 spin_unlock_bh(&bp->ntp_fltr_lock);
8324                                 synchronize_rcu();
8325                                 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
8326                                 kfree(fltr);
8327                         }
8328                 }
8329         }
8330         if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
8331                 netdev_info(bp->dev, "Receive PF driver unload event!");
8332 }
8333
8334 #else
8335
8336 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
8337 {
8338 }
8339
8340 #endif /* CONFIG_RFS_ACCEL */
8341
8342 static void bnxt_udp_tunnel_add(struct net_device *dev,
8343                                 struct udp_tunnel_info *ti)
8344 {
8345         struct bnxt *bp = netdev_priv(dev);
8346
8347         if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET)
8348                 return;
8349
8350         if (!netif_running(dev))
8351                 return;
8352
8353         switch (ti->type) {
8354         case UDP_TUNNEL_TYPE_VXLAN:
8355                 if (bp->vxlan_port_cnt && bp->vxlan_port != ti->port)
8356                         return;
8357
8358                 bp->vxlan_port_cnt++;
8359                 if (bp->vxlan_port_cnt == 1) {
8360                         bp->vxlan_port = ti->port;
8361                         set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event);
8362                         bnxt_queue_sp_work(bp);
8363                 }
8364                 break;
8365         case UDP_TUNNEL_TYPE_GENEVE:
8366                 if (bp->nge_port_cnt && bp->nge_port != ti->port)
8367                         return;
8368
8369                 bp->nge_port_cnt++;
8370                 if (bp->nge_port_cnt == 1) {
8371                         bp->nge_port = ti->port;
8372                         set_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event);
8373                 }
8374                 break;
8375         default:
8376                 return;
8377         }
8378
8379         bnxt_queue_sp_work(bp);
8380 }
8381
8382 static void bnxt_udp_tunnel_del(struct net_device *dev,
8383                                 struct udp_tunnel_info *ti)
8384 {
8385         struct bnxt *bp = netdev_priv(dev);
8386
8387         if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET)
8388                 return;
8389
8390         if (!netif_running(dev))
8391                 return;
8392
8393         switch (ti->type) {
8394         case UDP_TUNNEL_TYPE_VXLAN:
8395                 if (!bp->vxlan_port_cnt || bp->vxlan_port != ti->port)
8396                         return;
8397                 bp->vxlan_port_cnt--;
8398
8399                 if (bp->vxlan_port_cnt != 0)
8400                         return;
8401
8402                 set_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event);
8403                 break;
8404         case UDP_TUNNEL_TYPE_GENEVE:
8405                 if (!bp->nge_port_cnt || bp->nge_port != ti->port)
8406                         return;
8407                 bp->nge_port_cnt--;
8408
8409                 if (bp->nge_port_cnt != 0)
8410                         return;
8411
8412                 set_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event);
8413                 break;
8414         default:
8415                 return;
8416         }
8417
8418         bnxt_queue_sp_work(bp);
8419 }
8420
8421 static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
8422                                struct net_device *dev, u32 filter_mask,
8423                                int nlflags)
8424 {
8425         struct bnxt *bp = netdev_priv(dev);
8426
8427         return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0,
8428                                        nlflags, filter_mask, NULL);
8429 }
8430
8431 static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
8432                                u16 flags)
8433 {
8434         struct bnxt *bp = netdev_priv(dev);
8435         struct nlattr *attr, *br_spec;
8436         int rem, rc = 0;
8437
8438         if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp))
8439                 return -EOPNOTSUPP;
8440
8441         br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
8442         if (!br_spec)
8443                 return -EINVAL;
8444
8445         nla_for_each_nested(attr, br_spec, rem) {
8446                 u16 mode;
8447
8448                 if (nla_type(attr) != IFLA_BRIDGE_MODE)
8449                         continue;
8450
8451                 if (nla_len(attr) < sizeof(mode))
8452                         return -EINVAL;
8453
8454                 mode = nla_get_u16(attr);
8455                 if (mode == bp->br_mode)
8456                         break;
8457
8458                 rc = bnxt_hwrm_set_br_mode(bp, mode);
8459                 if (!rc)
8460                         bp->br_mode = mode;
8461                 break;
8462         }
8463         return rc;
8464 }
8465
8466 static int bnxt_get_phys_port_name(struct net_device *dev, char *buf,
8467                                    size_t len)
8468 {
8469         struct bnxt *bp = netdev_priv(dev);
8470         int rc;
8471
8472         /* The PF and it's VF-reps only support the switchdev framework */
8473         if (!BNXT_PF(bp))
8474                 return -EOPNOTSUPP;
8475
8476         rc = snprintf(buf, len, "p%d", bp->pf.port_id);
8477
8478         if (rc >= len)
8479                 return -EOPNOTSUPP;
8480         return 0;
8481 }
8482
8483 int bnxt_port_attr_get(struct bnxt *bp, struct switchdev_attr *attr)
8484 {
8485         if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
8486                 return -EOPNOTSUPP;
8487
8488         /* The PF and it's VF-reps only support the switchdev framework */
8489         if (!BNXT_PF(bp))
8490                 return -EOPNOTSUPP;
8491
8492         switch (attr->id) {
8493         case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
8494                 attr->u.ppid.id_len = sizeof(bp->switch_id);
8495                 memcpy(attr->u.ppid.id, bp->switch_id, attr->u.ppid.id_len);
8496                 break;
8497         default:
8498                 return -EOPNOTSUPP;
8499         }
8500         return 0;
8501 }
8502
8503 static int bnxt_swdev_port_attr_get(struct net_device *dev,
8504                                     struct switchdev_attr *attr)
8505 {
8506         return bnxt_port_attr_get(netdev_priv(dev), attr);
8507 }
8508
8509 static const struct switchdev_ops bnxt_switchdev_ops = {
8510         .switchdev_port_attr_get        = bnxt_swdev_port_attr_get
8511 };
8512
8513 static const struct net_device_ops bnxt_netdev_ops = {
8514         .ndo_open               = bnxt_open,
8515         .ndo_start_xmit         = bnxt_start_xmit,
8516         .ndo_stop               = bnxt_close,
8517         .ndo_get_stats64        = bnxt_get_stats64,
8518         .ndo_set_rx_mode        = bnxt_set_rx_mode,
8519         .ndo_do_ioctl           = bnxt_ioctl,
8520         .ndo_validate_addr      = eth_validate_addr,
8521         .ndo_set_mac_address    = bnxt_change_mac_addr,
8522         .ndo_change_mtu         = bnxt_change_mtu,
8523         .ndo_fix_features       = bnxt_fix_features,
8524         .ndo_set_features       = bnxt_set_features,
8525         .ndo_tx_timeout         = bnxt_tx_timeout,
8526 #ifdef CONFIG_BNXT_SRIOV
8527         .ndo_get_vf_config      = bnxt_get_vf_config,
8528         .ndo_set_vf_mac         = bnxt_set_vf_mac,
8529         .ndo_set_vf_vlan        = bnxt_set_vf_vlan,
8530         .ndo_set_vf_rate        = bnxt_set_vf_bw,
8531         .ndo_set_vf_link_state  = bnxt_set_vf_link_state,
8532         .ndo_set_vf_spoofchk    = bnxt_set_vf_spoofchk,
8533         .ndo_set_vf_trust       = bnxt_set_vf_trust,
8534 #endif
8535         .ndo_setup_tc           = bnxt_setup_tc,
8536 #ifdef CONFIG_RFS_ACCEL
8537         .ndo_rx_flow_steer      = bnxt_rx_flow_steer,
8538 #endif
8539         .ndo_udp_tunnel_add     = bnxt_udp_tunnel_add,
8540         .ndo_udp_tunnel_del     = bnxt_udp_tunnel_del,
8541         .ndo_bpf                = bnxt_xdp,
8542         .ndo_bridge_getlink     = bnxt_bridge_getlink,
8543         .ndo_bridge_setlink     = bnxt_bridge_setlink,
8544         .ndo_get_phys_port_name = bnxt_get_phys_port_name
8545 };
8546
8547 static void bnxt_remove_one(struct pci_dev *pdev)
8548 {
8549         struct net_device *dev = pci_get_drvdata(pdev);
8550         struct bnxt *bp = netdev_priv(dev);
8551
8552         if (BNXT_PF(bp)) {
8553                 bnxt_sriov_disable(bp);
8554                 bnxt_dl_unregister(bp);
8555         }
8556
8557         pci_disable_pcie_error_reporting(pdev);
8558         unregister_netdev(dev);
8559         bnxt_shutdown_tc(bp);
8560         bnxt_cancel_sp_work(bp);
8561         bp->sp_event = 0;
8562
8563         bnxt_clear_int_mode(bp);
8564         bnxt_hwrm_func_drv_unrgtr(bp);
8565         bnxt_free_hwrm_resources(bp);
8566         bnxt_free_hwrm_short_cmd_req(bp);
8567         bnxt_ethtool_free(bp);
8568         bnxt_dcb_free(bp);
8569         kfree(bp->edev);
8570         bp->edev = NULL;
8571         bnxt_cleanup_pci(bp);
8572         free_netdev(dev);
8573 }
8574
8575 static int bnxt_probe_phy(struct bnxt *bp)
8576 {
8577         int rc = 0;
8578         struct bnxt_link_info *link_info = &bp->link_info;
8579
8580         rc = bnxt_hwrm_phy_qcaps(bp);
8581         if (rc) {
8582                 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
8583                            rc);
8584                 return rc;
8585         }
8586         mutex_init(&bp->link_lock);
8587
8588         rc = bnxt_update_link(bp, false);
8589         if (rc) {
8590                 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
8591                            rc);
8592                 return rc;
8593         }
8594
8595         /* Older firmware does not have supported_auto_speeds, so assume
8596          * that all supported speeds can be autonegotiated.
8597          */
8598         if (link_info->auto_link_speeds && !link_info->support_auto_speeds)
8599                 link_info->support_auto_speeds = link_info->support_speeds;
8600
8601         /*initialize the ethool setting copy with NVM settings */
8602         if (BNXT_AUTO_MODE(link_info->auto_mode)) {
8603                 link_info->autoneg = BNXT_AUTONEG_SPEED;
8604                 if (bp->hwrm_spec_code >= 0x10201) {
8605                         if (link_info->auto_pause_setting &
8606                             PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE)
8607                                 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
8608                 } else {
8609                         link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
8610                 }
8611                 link_info->advertising = link_info->auto_link_speeds;
8612         } else {
8613                 link_info->req_link_speed = link_info->force_link_speed;
8614                 link_info->req_duplex = link_info->duplex_setting;
8615         }
8616         if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
8617                 link_info->req_flow_ctrl =
8618                         link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
8619         else
8620                 link_info->req_flow_ctrl = link_info->force_pause_setting;
8621         return rc;
8622 }
8623
8624 static int bnxt_get_max_irq(struct pci_dev *pdev)
8625 {
8626         u16 ctrl;
8627
8628         if (!pdev->msix_cap)
8629                 return 1;
8630
8631         pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
8632         return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
8633 }
8634
8635 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
8636                                 int *max_cp)
8637 {
8638         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
8639         int max_ring_grps = 0;
8640
8641         *max_tx = hw_resc->max_tx_rings;
8642         *max_rx = hw_resc->max_rx_rings;
8643         *max_cp = min_t(int, bnxt_get_max_func_cp_rings_for_en(bp),
8644                         hw_resc->max_irqs - bnxt_get_ulp_msix_num(bp));
8645         *max_cp = min_t(int, *max_cp, hw_resc->max_stat_ctxs);
8646         max_ring_grps = hw_resc->max_hw_ring_grps;
8647         if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
8648                 *max_cp -= 1;
8649                 *max_rx -= 2;
8650         }
8651         if (bp->flags & BNXT_FLAG_AGG_RINGS)
8652                 *max_rx >>= 1;
8653         *max_rx = min_t(int, *max_rx, max_ring_grps);
8654 }
8655
8656 int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
8657 {
8658         int rx, tx, cp;
8659
8660         _bnxt_get_max_rings(bp, &rx, &tx, &cp);
8661         *max_rx = rx;
8662         *max_tx = tx;
8663         if (!rx || !tx || !cp)
8664                 return -ENOMEM;
8665
8666         return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
8667 }
8668
8669 static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
8670                                bool shared)
8671 {
8672         int rc;
8673
8674         rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
8675         if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
8676                 /* Not enough rings, try disabling agg rings. */
8677                 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
8678                 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
8679                 if (rc) {
8680                         /* set BNXT_FLAG_AGG_RINGS back for consistency */
8681                         bp->flags |= BNXT_FLAG_AGG_RINGS;
8682                         return rc;
8683                 }
8684                 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
8685                 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
8686                 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
8687                 bnxt_set_ring_params(bp);
8688         }
8689
8690         if (bp->flags & BNXT_FLAG_ROCE_CAP) {
8691                 int max_cp, max_stat, max_irq;
8692
8693                 /* Reserve minimum resources for RoCE */
8694                 max_cp = bnxt_get_max_func_cp_rings(bp);
8695                 max_stat = bnxt_get_max_func_stat_ctxs(bp);
8696                 max_irq = bnxt_get_max_func_irqs(bp);
8697                 if (max_cp <= BNXT_MIN_ROCE_CP_RINGS ||
8698                     max_irq <= BNXT_MIN_ROCE_CP_RINGS ||
8699                     max_stat <= BNXT_MIN_ROCE_STAT_CTXS)
8700                         return 0;
8701
8702                 max_cp -= BNXT_MIN_ROCE_CP_RINGS;
8703                 max_irq -= BNXT_MIN_ROCE_CP_RINGS;
8704                 max_stat -= BNXT_MIN_ROCE_STAT_CTXS;
8705                 max_cp = min_t(int, max_cp, max_irq);
8706                 max_cp = min_t(int, max_cp, max_stat);
8707                 rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared);
8708                 if (rc)
8709                         rc = 0;
8710         }
8711         return rc;
8712 }
8713
8714 /* In initial default shared ring setting, each shared ring must have a
8715  * RX/TX ring pair.
8716  */
8717 static void bnxt_trim_dflt_sh_rings(struct bnxt *bp)
8718 {
8719         bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings);
8720         bp->rx_nr_rings = bp->cp_nr_rings;
8721         bp->tx_nr_rings_per_tc = bp->cp_nr_rings;
8722         bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
8723 }
8724
8725 static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
8726 {
8727         int dflt_rings, max_rx_rings, max_tx_rings, rc;
8728
8729         if (!bnxt_can_reserve_rings(bp))
8730                 return 0;
8731
8732         if (sh)
8733                 bp->flags |= BNXT_FLAG_SHARED_RINGS;
8734         dflt_rings = netif_get_num_default_rss_queues();
8735         /* Reduce default rings on multi-port cards so that total default
8736          * rings do not exceed CPU count.
8737          */
8738         if (bp->port_count > 1) {
8739                 int max_rings =
8740                         max_t(int, num_online_cpus() / bp->port_count, 1);
8741
8742                 dflt_rings = min_t(int, dflt_rings, max_rings);
8743         }
8744         rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh);
8745         if (rc)
8746                 return rc;
8747         bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
8748         bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
8749         if (sh)
8750                 bnxt_trim_dflt_sh_rings(bp);
8751         else
8752                 bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings;
8753         bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
8754
8755         rc = __bnxt_reserve_rings(bp);
8756         if (rc)
8757                 netdev_warn(bp->dev, "Unable to reserve tx rings\n");
8758         bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
8759         if (sh)
8760                 bnxt_trim_dflt_sh_rings(bp);
8761
8762         /* Rings may have been trimmed, re-reserve the trimmed rings. */
8763         if (bnxt_need_reserve_rings(bp)) {
8764                 rc = __bnxt_reserve_rings(bp);
8765                 if (rc)
8766                         netdev_warn(bp->dev, "2nd rings reservation failed.\n");
8767                 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
8768         }
8769         bp->num_stat_ctxs = bp->cp_nr_rings;
8770         if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
8771                 bp->rx_nr_rings++;
8772                 bp->cp_nr_rings++;
8773         }
8774         return rc;
8775 }
8776
8777 static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
8778 {
8779         int rc;
8780
8781         if (bp->tx_nr_rings)
8782                 return 0;
8783
8784         bnxt_ulp_irq_stop(bp);
8785         bnxt_clear_int_mode(bp);
8786         rc = bnxt_set_dflt_rings(bp, true);
8787         if (rc) {
8788                 netdev_err(bp->dev, "Not enough rings available.\n");
8789                 goto init_dflt_ring_err;
8790         }
8791         rc = bnxt_init_int_mode(bp);
8792         if (rc)
8793                 goto init_dflt_ring_err;
8794
8795         bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
8796         if (bnxt_rfs_supported(bp) && bnxt_rfs_capable(bp)) {
8797                 bp->flags |= BNXT_FLAG_RFS;
8798                 bp->dev->features |= NETIF_F_NTUPLE;
8799         }
8800 init_dflt_ring_err:
8801         bnxt_ulp_irq_restart(bp, rc);
8802         return rc;
8803 }
8804
8805 int bnxt_restore_pf_fw_resources(struct bnxt *bp)
8806 {
8807         int rc;
8808
8809         ASSERT_RTNL();
8810         bnxt_hwrm_func_qcaps(bp);
8811
8812         if (netif_running(bp->dev))
8813                 __bnxt_close_nic(bp, true, false);
8814
8815         bnxt_ulp_irq_stop(bp);
8816         bnxt_clear_int_mode(bp);
8817         rc = bnxt_init_int_mode(bp);
8818         bnxt_ulp_irq_restart(bp, rc);
8819
8820         if (netif_running(bp->dev)) {
8821                 if (rc)
8822                         dev_close(bp->dev);
8823                 else
8824                         rc = bnxt_open_nic(bp, true, false);
8825         }
8826
8827         return rc;
8828 }
8829
8830 static int bnxt_init_mac_addr(struct bnxt *bp)
8831 {
8832         int rc = 0;
8833
8834         if (BNXT_PF(bp)) {
8835                 memcpy(bp->dev->dev_addr, bp->pf.mac_addr, ETH_ALEN);
8836         } else {
8837 #ifdef CONFIG_BNXT_SRIOV
8838                 struct bnxt_vf_info *vf = &bp->vf;
8839                 bool strict_approval = true;
8840
8841                 if (is_valid_ether_addr(vf->mac_addr)) {
8842                         /* overwrite netdev dev_addr with admin VF MAC */
8843                         memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
8844                         /* Older PF driver or firmware may not approve this
8845                          * correctly.
8846                          */
8847                         strict_approval = false;
8848                 } else {
8849                         eth_hw_addr_random(bp->dev);
8850                 }
8851                 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval);
8852 #endif
8853         }
8854         return rc;
8855 }
8856
8857 static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8858 {
8859         static int version_printed;
8860         struct net_device *dev;
8861         struct bnxt *bp;
8862         int rc, max_irqs;
8863
8864         if (pci_is_bridge(pdev))
8865                 return -ENODEV;
8866
8867         if (version_printed++ == 0)
8868                 pr_info("%s", version);
8869
8870         max_irqs = bnxt_get_max_irq(pdev);
8871         dev = alloc_etherdev_mq(sizeof(*bp), max_irqs);
8872         if (!dev)
8873                 return -ENOMEM;
8874
8875         bp = netdev_priv(dev);
8876
8877         if (bnxt_vf_pciid(ent->driver_data))
8878                 bp->flags |= BNXT_FLAG_VF;
8879
8880         if (pdev->msix_cap)
8881                 bp->flags |= BNXT_FLAG_MSIX_CAP;
8882
8883         rc = bnxt_init_board(pdev, dev);
8884         if (rc < 0)
8885                 goto init_err_free;
8886
8887         dev->netdev_ops = &bnxt_netdev_ops;
8888         dev->watchdog_timeo = BNXT_TX_TIMEOUT;
8889         dev->ethtool_ops = &bnxt_ethtool_ops;
8890         SWITCHDEV_SET_OPS(dev, &bnxt_switchdev_ops);
8891         pci_set_drvdata(pdev, dev);
8892
8893         rc = bnxt_alloc_hwrm_resources(bp);
8894         if (rc)
8895                 goto init_err_pci_clean;
8896
8897         mutex_init(&bp->hwrm_cmd_lock);
8898         rc = bnxt_hwrm_ver_get(bp);
8899         if (rc)
8900                 goto init_err_pci_clean;
8901
8902         if (bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) {
8903                 rc = bnxt_alloc_hwrm_short_cmd_req(bp);
8904                 if (rc)
8905                         goto init_err_pci_clean;
8906         }
8907
8908         rc = bnxt_hwrm_func_reset(bp);
8909         if (rc)
8910                 goto init_err_pci_clean;
8911
8912         bnxt_hwrm_fw_set_time(bp);
8913
8914         dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
8915                            NETIF_F_TSO | NETIF_F_TSO6 |
8916                            NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
8917                            NETIF_F_GSO_IPXIP4 |
8918                            NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
8919                            NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
8920                            NETIF_F_RXCSUM | NETIF_F_GRO;
8921
8922         if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
8923                 dev->hw_features |= NETIF_F_LRO;
8924
8925         dev->hw_enc_features =
8926                         NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
8927                         NETIF_F_TSO | NETIF_F_TSO6 |
8928                         NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
8929                         NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
8930                         NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
8931         dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
8932                                     NETIF_F_GSO_GRE_CSUM;
8933         dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
8934         dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX |
8935                             NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX;
8936         if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
8937                 dev->hw_features |= NETIF_F_GRO_HW;
8938         dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
8939         if (dev->features & NETIF_F_GRO_HW)
8940                 dev->features &= ~NETIF_F_LRO;
8941         dev->priv_flags |= IFF_UNICAST_FLT;
8942
8943 #ifdef CONFIG_BNXT_SRIOV
8944         init_waitqueue_head(&bp->sriov_cfg_wait);
8945         mutex_init(&bp->sriov_lock);
8946 #endif
8947         bp->gro_func = bnxt_gro_func_5730x;
8948         if (BNXT_CHIP_P4_PLUS(bp))
8949                 bp->gro_func = bnxt_gro_func_5731x;
8950         else
8951                 bp->flags |= BNXT_FLAG_DOUBLE_DB;
8952
8953         rc = bnxt_hwrm_func_drv_rgtr(bp);
8954         if (rc)
8955                 goto init_err_pci_clean;
8956
8957         rc = bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0);
8958         if (rc)
8959                 goto init_err_pci_clean;
8960
8961         bp->ulp_probe = bnxt_ulp_probe;
8962
8963         /* Get the MAX capabilities for this function */
8964         rc = bnxt_hwrm_func_qcaps(bp);
8965         if (rc) {
8966                 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
8967                            rc);
8968                 rc = -1;
8969                 goto init_err_pci_clean;
8970         }
8971         rc = bnxt_init_mac_addr(bp);
8972         if (rc) {
8973                 dev_err(&pdev->dev, "Unable to initialize mac address.\n");
8974                 rc = -EADDRNOTAVAIL;
8975                 goto init_err_pci_clean;
8976         }
8977         rc = bnxt_hwrm_queue_qportcfg(bp);
8978         if (rc) {
8979                 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %x\n",
8980                            rc);
8981                 rc = -1;
8982                 goto init_err_pci_clean;
8983         }
8984
8985         bnxt_hwrm_func_qcfg(bp);
8986         bnxt_hwrm_port_led_qcaps(bp);
8987         bnxt_ethtool_init(bp);
8988         bnxt_dcb_init(bp);
8989
8990         /* MTU range: 60 - FW defined max */
8991         dev->min_mtu = ETH_ZLEN;
8992         dev->max_mtu = bp->max_mtu;
8993
8994         rc = bnxt_probe_phy(bp);
8995         if (rc)
8996                 goto init_err_pci_clean;
8997
8998         bnxt_set_rx_skb_mode(bp, false);
8999         bnxt_set_tpa_flags(bp);
9000         bnxt_set_ring_params(bp);
9001         bnxt_set_max_func_irqs(bp, max_irqs);
9002         rc = bnxt_set_dflt_rings(bp, true);
9003         if (rc) {
9004                 netdev_err(bp->dev, "Not enough rings available.\n");
9005                 rc = -ENOMEM;
9006                 goto init_err_pci_clean;
9007         }
9008
9009         /* Default RSS hash cfg. */
9010         bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
9011                            VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
9012                            VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
9013                            VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
9014         if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) {
9015                 bp->flags |= BNXT_FLAG_UDP_RSS_CAP;
9016                 bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
9017                                     VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
9018         }
9019
9020         bnxt_hwrm_vnic_qcaps(bp);
9021         if (bnxt_rfs_supported(bp)) {
9022                 dev->hw_features |= NETIF_F_NTUPLE;
9023                 if (bnxt_rfs_capable(bp)) {
9024                         bp->flags |= BNXT_FLAG_RFS;
9025                         dev->features |= NETIF_F_NTUPLE;
9026                 }
9027         }
9028
9029         if (dev->hw_features & NETIF_F_HW_VLAN_CTAG_RX)
9030                 bp->flags |= BNXT_FLAG_STRIP_VLAN;
9031
9032         rc = bnxt_init_int_mode(bp);
9033         if (rc)
9034                 goto init_err_pci_clean;
9035
9036         /* No TC has been set yet and rings may have been trimmed due to
9037          * limited MSIX, so we re-initialize the TX rings per TC.
9038          */
9039         bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
9040
9041         bnxt_get_wol_settings(bp);
9042         if (bp->flags & BNXT_FLAG_WOL_CAP)
9043                 device_set_wakeup_enable(&pdev->dev, bp->wol);
9044         else
9045                 device_set_wakeup_capable(&pdev->dev, false);
9046
9047         bnxt_hwrm_set_cache_line_size(bp, cache_line_size());
9048
9049         if (BNXT_PF(bp)) {
9050                 if (!bnxt_pf_wq) {
9051                         bnxt_pf_wq =
9052                                 create_singlethread_workqueue("bnxt_pf_wq");
9053                         if (!bnxt_pf_wq) {
9054                                 dev_err(&pdev->dev, "Unable to create workqueue.\n");
9055                                 goto init_err_pci_clean;
9056                         }
9057                 }
9058                 bnxt_init_tc(bp);
9059         }
9060
9061         rc = register_netdev(dev);
9062         if (rc)
9063                 goto init_err_cleanup_tc;
9064
9065         if (BNXT_PF(bp))
9066                 bnxt_dl_register(bp);
9067
9068         netdev_info(dev, "%s found at mem %lx, node addr %pM\n",
9069                     board_info[ent->driver_data].name,
9070                     (long)pci_resource_start(pdev, 0), dev->dev_addr);
9071         pcie_print_link_status(pdev);
9072
9073         return 0;
9074
9075 init_err_cleanup_tc:
9076         bnxt_shutdown_tc(bp);
9077         bnxt_clear_int_mode(bp);
9078
9079 init_err_pci_clean:
9080         bnxt_free_hwrm_resources(bp);
9081         bnxt_cleanup_pci(bp);
9082
9083 init_err_free:
9084         free_netdev(dev);
9085         return rc;
9086 }
9087
9088 static void bnxt_shutdown(struct pci_dev *pdev)
9089 {
9090         struct net_device *dev = pci_get_drvdata(pdev);
9091         struct bnxt *bp;
9092
9093         if (!dev)
9094                 return;
9095
9096         rtnl_lock();
9097         bp = netdev_priv(dev);
9098         if (!bp)
9099                 goto shutdown_exit;
9100
9101         if (netif_running(dev))
9102                 dev_close(dev);
9103
9104         bnxt_ulp_shutdown(bp);
9105
9106         if (system_state == SYSTEM_POWER_OFF) {
9107                 bnxt_clear_int_mode(bp);
9108                 pci_wake_from_d3(pdev, bp->wol);
9109                 pci_set_power_state(pdev, PCI_D3hot);
9110         }
9111
9112 shutdown_exit:
9113         rtnl_unlock();
9114 }
9115
9116 #ifdef CONFIG_PM_SLEEP
9117 static int bnxt_suspend(struct device *device)
9118 {
9119         struct pci_dev *pdev = to_pci_dev(device);
9120         struct net_device *dev = pci_get_drvdata(pdev);
9121         struct bnxt *bp = netdev_priv(dev);
9122         int rc = 0;
9123
9124         rtnl_lock();
9125         if (netif_running(dev)) {
9126                 netif_device_detach(dev);
9127                 rc = bnxt_close(dev);
9128         }
9129         bnxt_hwrm_func_drv_unrgtr(bp);
9130         rtnl_unlock();
9131         return rc;
9132 }
9133
9134 static int bnxt_resume(struct device *device)
9135 {
9136         struct pci_dev *pdev = to_pci_dev(device);
9137         struct net_device *dev = pci_get_drvdata(pdev);
9138         struct bnxt *bp = netdev_priv(dev);
9139         int rc = 0;
9140
9141         rtnl_lock();
9142         if (bnxt_hwrm_ver_get(bp) || bnxt_hwrm_func_drv_rgtr(bp)) {
9143                 rc = -ENODEV;
9144                 goto resume_exit;
9145         }
9146         rc = bnxt_hwrm_func_reset(bp);
9147         if (rc) {
9148                 rc = -EBUSY;
9149                 goto resume_exit;
9150         }
9151         bnxt_get_wol_settings(bp);
9152         if (netif_running(dev)) {
9153                 rc = bnxt_open(dev);
9154                 if (!rc)
9155                         netif_device_attach(dev);
9156         }
9157
9158 resume_exit:
9159         rtnl_unlock();
9160         return rc;
9161 }
9162
9163 static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume);
9164 #define BNXT_PM_OPS (&bnxt_pm_ops)
9165
9166 #else
9167
9168 #define BNXT_PM_OPS NULL
9169
9170 #endif /* CONFIG_PM_SLEEP */
9171
9172 /**
9173  * bnxt_io_error_detected - called when PCI error is detected
9174  * @pdev: Pointer to PCI device
9175  * @state: The current pci connection state
9176  *
9177  * This function is called after a PCI bus error affecting
9178  * this device has been detected.
9179  */
9180 static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
9181                                                pci_channel_state_t state)
9182 {
9183         struct net_device *netdev = pci_get_drvdata(pdev);
9184         struct bnxt *bp = netdev_priv(netdev);
9185
9186         netdev_info(netdev, "PCI I/O error detected\n");
9187
9188         rtnl_lock();
9189         netif_device_detach(netdev);
9190
9191         bnxt_ulp_stop(bp);
9192
9193         if (state == pci_channel_io_perm_failure) {
9194                 rtnl_unlock();
9195                 return PCI_ERS_RESULT_DISCONNECT;
9196         }
9197
9198         if (netif_running(netdev))
9199                 bnxt_close(netdev);
9200
9201         pci_disable_device(pdev);
9202         rtnl_unlock();
9203
9204         /* Request a slot slot reset. */
9205         return PCI_ERS_RESULT_NEED_RESET;
9206 }
9207
9208 /**
9209  * bnxt_io_slot_reset - called after the pci bus has been reset.
9210  * @pdev: Pointer to PCI device
9211  *
9212  * Restart the card from scratch, as if from a cold-boot.
9213  * At this point, the card has exprienced a hard reset,
9214  * followed by fixups by BIOS, and has its config space
9215  * set up identically to what it was at cold boot.
9216  */
9217 static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
9218 {
9219         struct net_device *netdev = pci_get_drvdata(pdev);
9220         struct bnxt *bp = netdev_priv(netdev);
9221         int err = 0;
9222         pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
9223
9224         netdev_info(bp->dev, "PCI Slot Reset\n");
9225
9226         rtnl_lock();
9227
9228         if (pci_enable_device(pdev)) {
9229                 dev_err(&pdev->dev,
9230                         "Cannot re-enable PCI device after reset.\n");
9231         } else {
9232                 pci_set_master(pdev);
9233
9234                 err = bnxt_hwrm_func_reset(bp);
9235                 if (!err && netif_running(netdev))
9236                         err = bnxt_open(netdev);
9237
9238                 if (!err) {
9239                         result = PCI_ERS_RESULT_RECOVERED;
9240                         bnxt_ulp_start(bp);
9241                 }
9242         }
9243
9244         if (result != PCI_ERS_RESULT_RECOVERED && netif_running(netdev))
9245                 dev_close(netdev);
9246
9247         rtnl_unlock();
9248
9249         err = pci_cleanup_aer_uncorrect_error_status(pdev);
9250         if (err) {
9251                 dev_err(&pdev->dev,
9252                         "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
9253                          err); /* non-fatal, continue */
9254         }
9255
9256         return PCI_ERS_RESULT_RECOVERED;
9257 }
9258
9259 /**
9260  * bnxt_io_resume - called when traffic can start flowing again.
9261  * @pdev: Pointer to PCI device
9262  *
9263  * This callback is called when the error recovery driver tells
9264  * us that its OK to resume normal operation.
9265  */
9266 static void bnxt_io_resume(struct pci_dev *pdev)
9267 {
9268         struct net_device *netdev = pci_get_drvdata(pdev);
9269
9270         rtnl_lock();
9271
9272         netif_device_attach(netdev);
9273
9274         rtnl_unlock();
9275 }
9276
9277 static const struct pci_error_handlers bnxt_err_handler = {
9278         .error_detected = bnxt_io_error_detected,
9279         .slot_reset     = bnxt_io_slot_reset,
9280         .resume         = bnxt_io_resume
9281 };
9282
9283 static struct pci_driver bnxt_pci_driver = {
9284         .name           = DRV_MODULE_NAME,
9285         .id_table       = bnxt_pci_tbl,
9286         .probe          = bnxt_init_one,
9287         .remove         = bnxt_remove_one,
9288         .shutdown       = bnxt_shutdown,
9289         .driver.pm      = BNXT_PM_OPS,
9290         .err_handler    = &bnxt_err_handler,
9291 #if defined(CONFIG_BNXT_SRIOV)
9292         .sriov_configure = bnxt_sriov_configure,
9293 #endif
9294 };
9295
9296 static int __init bnxt_init(void)
9297 {
9298         bnxt_debug_init();
9299         return pci_register_driver(&bnxt_pci_driver);
9300 }
9301
9302 static void __exit bnxt_exit(void)
9303 {
9304         pci_unregister_driver(&bnxt_pci_driver);
9305         if (bnxt_pf_wq)
9306                 destroy_workqueue(bnxt_pf_wq);
9307         bnxt_debug_exit();
9308 }
9309
9310 module_init(bnxt_init);
9311 module_exit(bnxt_exit);