OSDN Git Service

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
[uclinux-h8/linux.git] / drivers / net / ethernet / chelsio / cxgb4 / cxgb4_main.c
1 /*
2  * This file is part of the Chelsio T4 Ethernet driver for Linux.
3  *
4  * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34
35 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
36
37 #include <linux/bitmap.h>
38 #include <linux/crc32.h>
39 #include <linux/ctype.h>
40 #include <linux/debugfs.h>
41 #include <linux/err.h>
42 #include <linux/etherdevice.h>
43 #include <linux/firmware.h>
44 #include <linux/if.h>
45 #include <linux/if_vlan.h>
46 #include <linux/init.h>
47 #include <linux/log2.h>
48 #include <linux/mdio.h>
49 #include <linux/module.h>
50 #include <linux/moduleparam.h>
51 #include <linux/mutex.h>
52 #include <linux/netdevice.h>
53 #include <linux/pci.h>
54 #include <linux/aer.h>
55 #include <linux/rtnetlink.h>
56 #include <linux/sched.h>
57 #include <linux/seq_file.h>
58 #include <linux/sockios.h>
59 #include <linux/vmalloc.h>
60 #include <linux/workqueue.h>
61 #include <net/neighbour.h>
62 #include <net/netevent.h>
63 #include <net/addrconf.h>
64 #include <net/bonding.h>
65 #include <net/addrconf.h>
66 #include <asm/uaccess.h>
67
68 #include "cxgb4.h"
69 #include "t4_regs.h"
70 #include "t4_values.h"
71 #include "t4_msg.h"
72 #include "t4fw_api.h"
73 #include "t4fw_version.h"
74 #include "cxgb4_dcb.h"
75 #include "cxgb4_debugfs.h"
76 #include "clip_tbl.h"
77 #include "l2t.h"
78
79 char cxgb4_driver_name[] = KBUILD_MODNAME;
80
81 #ifdef DRV_VERSION
82 #undef DRV_VERSION
83 #endif
84 #define DRV_VERSION "2.0.0-ko"
85 const char cxgb4_driver_version[] = DRV_VERSION;
86 #define DRV_DESC "Chelsio T4/T5 Network Driver"
87
88 /* Host shadow copy of ingress filter entry.  This is in host native format
89  * and doesn't match the ordering or bit order, etc. of the hardware of the
90  * firmware command.  The use of bit-field structure elements is purely to
91  * remind ourselves of the field size limitations and save memory in the case
92  * where the filter table is large.
93  */
94 struct filter_entry {
95         /* Administrative fields for filter.
96          */
97         u32 valid:1;            /* filter allocated and valid */
98         u32 locked:1;           /* filter is administratively locked */
99
100         u32 pending:1;          /* filter action is pending firmware reply */
101         u32 smtidx:8;           /* Source MAC Table index for smac */
102         struct l2t_entry *l2t;  /* Layer Two Table entry for dmac */
103
104         /* The filter itself.  Most of this is a straight copy of information
105          * provided by the extended ioctl().  Some fields are translated to
106          * internal forms -- for instance the Ingress Queue ID passed in from
107          * the ioctl() is translated into the Absolute Ingress Queue ID.
108          */
109         struct ch_filter_specification fs;
110 };
111
112 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
113                          NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
114                          NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
115
116 /* Macros needed to support the PCI Device ID Table ...
117  */
118 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
119         static const struct pci_device_id cxgb4_pci_tbl[] = {
120 #define CH_PCI_DEVICE_ID_FUNCTION 0x4
121
122 /* Include PCI Device IDs for both PF4 and PF0-3 so our PCI probe() routine is
123  * called for both.
124  */
125 #define CH_PCI_DEVICE_ID_FUNCTION2 0x0
126
127 #define CH_PCI_ID_TABLE_ENTRY(devid) \
128                 {PCI_VDEVICE(CHELSIO, (devid)), 4}
129
130 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \
131                 { 0, } \
132         }
133
134 #include "t4_pci_id_tbl.h"
135
136 #define FW4_FNAME "cxgb4/t4fw.bin"
137 #define FW5_FNAME "cxgb4/t5fw.bin"
138 #define FW6_FNAME "cxgb4/t6fw.bin"
139 #define FW4_CFNAME "cxgb4/t4-config.txt"
140 #define FW5_CFNAME "cxgb4/t5-config.txt"
141 #define FW6_CFNAME "cxgb4/t6-config.txt"
142 #define PHY_AQ1202_FIRMWARE "cxgb4/aq1202_fw.cld"
143 #define PHY_BCM84834_FIRMWARE "cxgb4/bcm8483.bin"
144 #define PHY_AQ1202_DEVICEID 0x4409
145 #define PHY_BCM84834_DEVICEID 0x4486
146
147 MODULE_DESCRIPTION(DRV_DESC);
148 MODULE_AUTHOR("Chelsio Communications");
149 MODULE_LICENSE("Dual BSD/GPL");
150 MODULE_VERSION(DRV_VERSION);
151 MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
152 MODULE_FIRMWARE(FW4_FNAME);
153 MODULE_FIRMWARE(FW5_FNAME);
154
155 /*
156  * Normally we're willing to become the firmware's Master PF but will be happy
157  * if another PF has already become the Master and initialized the adapter.
158  * Setting "force_init" will cause this driver to forcibly establish itself as
159  * the Master PF and initialize the adapter.
160  */
161 static uint force_init;
162
163 module_param(force_init, uint, 0644);
164 MODULE_PARM_DESC(force_init, "Forcibly become Master PF and initialize adapter");
165
166 /*
167  * Normally if the firmware we connect to has Configuration File support, we
168  * use that and only fall back to the old Driver-based initialization if the
169  * Configuration File fails for some reason.  If force_old_init is set, then
170  * we'll always use the old Driver-based initialization sequence.
171  */
172 static uint force_old_init;
173
174 module_param(force_old_init, uint, 0644);
175 MODULE_PARM_DESC(force_old_init, "Force old initialization sequence, deprecated"
176                  " parameter");
177
178 static int dflt_msg_enable = DFLT_MSG_ENABLE;
179
180 module_param(dflt_msg_enable, int, 0644);
181 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap");
182
183 /*
184  * The driver uses the best interrupt scheme available on a platform in the
185  * order MSI-X, MSI, legacy INTx interrupts.  This parameter determines which
186  * of these schemes the driver may consider as follows:
187  *
188  * msi = 2: choose from among all three options
189  * msi = 1: only consider MSI and INTx interrupts
190  * msi = 0: force INTx interrupts
191  */
192 static int msi = 2;
193
194 module_param(msi, int, 0644);
195 MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
196
197 /*
198  * Queue interrupt hold-off timer values.  Queues default to the first of these
199  * upon creation.
200  */
201 static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 };
202
203 module_param_array(intr_holdoff, uint, NULL, 0644);
204 MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers "
205                  "0..4 in microseconds, deprecated parameter");
206
207 static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 };
208
209 module_param_array(intr_cnt, uint, NULL, 0644);
210 MODULE_PARM_DESC(intr_cnt,
211                  "thresholds 1..3 for queue interrupt packet counters, "
212                  "deprecated parameter");
213
214 /*
215  * Normally we tell the chip to deliver Ingress Packets into our DMA buffers
216  * offset by 2 bytes in order to have the IP headers line up on 4-byte
217  * boundaries.  This is a requirement for many architectures which will throw
218  * a machine check fault if an attempt is made to access one of the 4-byte IP
219  * header fields on a non-4-byte boundary.  And it's a major performance issue
220  * even on some architectures which allow it like some implementations of the
221  * x86 ISA.  However, some architectures don't mind this and for some very
222  * edge-case performance sensitive applications (like forwarding large volumes
223  * of small packets), setting this DMA offset to 0 will decrease the number of
224  * PCI-E Bus transfers enough to measurably affect performance.
225  */
226 static int rx_dma_offset = 2;
227
228 static bool vf_acls;
229
230 #ifdef CONFIG_PCI_IOV
231 module_param(vf_acls, bool, 0644);
232 MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement, "
233                  "deprecated parameter");
234
235 /* Configure the number of PCI-E Virtual Function which are to be instantiated
236  * on SR-IOV Capable Physical Functions.
237  */
238 static unsigned int num_vf[NUM_OF_PF_WITH_SRIOV];
239
240 module_param_array(num_vf, uint, NULL, 0644);
241 MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3");
242 #endif
243
244 /* TX Queue select used to determine what algorithm to use for selecting TX
245  * queue. Select between the kernel provided function (select_queue=0) or user
246  * cxgb_select_queue function (select_queue=1)
247  *
248  * Default: select_queue=0
249  */
250 static int select_queue;
251 module_param(select_queue, int, 0644);
252 MODULE_PARM_DESC(select_queue,
253                  "Select between kernel provided method of selecting or driver method of selecting TX queue. Default is kernel method.");
254
255 static unsigned int tp_vlan_pri_map = HW_TPL_FR_MT_PR_IV_P_FC;
256
257 module_param(tp_vlan_pri_map, uint, 0644);
258 MODULE_PARM_DESC(tp_vlan_pri_map, "global compressed filter configuration, "
259                  "deprecated parameter");
260
261 static struct dentry *cxgb4_debugfs_root;
262
263 static LIST_HEAD(adapter_list);
264 static DEFINE_MUTEX(uld_mutex);
265 /* Adapter list to be accessed from atomic context */
266 static LIST_HEAD(adap_rcu_list);
267 static DEFINE_SPINLOCK(adap_rcu_lock);
268 static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX];
269 static const char *uld_str[] = { "RDMA", "iSCSI" };
270
271 static void link_report(struct net_device *dev)
272 {
273         if (!netif_carrier_ok(dev))
274                 netdev_info(dev, "link down\n");
275         else {
276                 static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
277
278                 const char *s = "10Mbps";
279                 const struct port_info *p = netdev_priv(dev);
280
281                 switch (p->link_cfg.speed) {
282                 case 10000:
283                         s = "10Gbps";
284                         break;
285                 case 1000:
286                         s = "1000Mbps";
287                         break;
288                 case 100:
289                         s = "100Mbps";
290                         break;
291                 case 40000:
292                         s = "40Gbps";
293                         break;
294                 }
295
296                 netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
297                             fc[p->link_cfg.fc]);
298         }
299 }
300
301 #ifdef CONFIG_CHELSIO_T4_DCB
302 /* Set up/tear down Data Center Bridging Priority mapping for a net device. */
303 static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable)
304 {
305         struct port_info *pi = netdev_priv(dev);
306         struct adapter *adap = pi->adapter;
307         struct sge_eth_txq *txq = &adap->sge.ethtxq[pi->first_qset];
308         int i;
309
310         /* We use a simple mapping of Port TX Queue Index to DCB
311          * Priority when we're enabling DCB.
312          */
313         for (i = 0; i < pi->nqsets; i++, txq++) {
314                 u32 name, value;
315                 int err;
316
317                 name = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
318                         FW_PARAMS_PARAM_X_V(
319                                 FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH) |
320                         FW_PARAMS_PARAM_YZ_V(txq->q.cntxt_id));
321                 value = enable ? i : 0xffffffff;
322
323                 /* Since we can be called while atomic (from "interrupt
324                  * level") we need to issue the Set Parameters Commannd
325                  * without sleeping (timeout < 0).
326                  */
327                 err = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1,
328                                             &name, &value,
329                                             -FW_CMD_MAX_TIMEOUT);
330
331                 if (err)
332                         dev_err(adap->pdev_dev,
333                                 "Can't %s DCB Priority on port %d, TX Queue %d: err=%d\n",
334                                 enable ? "set" : "unset", pi->port_id, i, -err);
335                 else
336                         txq->dcb_prio = value;
337         }
338 }
339 #endif /* CONFIG_CHELSIO_T4_DCB */
340
341 void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
342 {
343         struct net_device *dev = adapter->port[port_id];
344
345         /* Skip changes from disabled ports. */
346         if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
347                 if (link_stat)
348                         netif_carrier_on(dev);
349                 else {
350 #ifdef CONFIG_CHELSIO_T4_DCB
351                         cxgb4_dcb_state_init(dev);
352                         dcb_tx_queue_prio_enable(dev, false);
353 #endif /* CONFIG_CHELSIO_T4_DCB */
354                         netif_carrier_off(dev);
355                 }
356
357                 link_report(dev);
358         }
359 }
360
361 void t4_os_portmod_changed(const struct adapter *adap, int port_id)
362 {
363         static const char *mod_str[] = {
364                 NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
365         };
366
367         const struct net_device *dev = adap->port[port_id];
368         const struct port_info *pi = netdev_priv(dev);
369
370         if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
371                 netdev_info(dev, "port module unplugged\n");
372         else if (pi->mod_type < ARRAY_SIZE(mod_str))
373                 netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
374 }
375
376 /*
377  * Configure the exact and hash address filters to handle a port's multicast
378  * and secondary unicast MAC addresses.
379  */
380 static int set_addr_filters(const struct net_device *dev, bool sleep)
381 {
382         u64 mhash = 0;
383         u64 uhash = 0;
384         bool free = true;
385         u16 filt_idx[7];
386         const u8 *addr[7];
387         int ret, naddr = 0;
388         const struct netdev_hw_addr *ha;
389         int uc_cnt = netdev_uc_count(dev);
390         int mc_cnt = netdev_mc_count(dev);
391         const struct port_info *pi = netdev_priv(dev);
392         unsigned int mb = pi->adapter->pf;
393
394         /* first do the secondary unicast addresses */
395         netdev_for_each_uc_addr(ha, dev) {
396                 addr[naddr++] = ha->addr;
397                 if (--uc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
398                         ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
399                                         naddr, addr, filt_idx, &uhash, sleep);
400                         if (ret < 0)
401                                 return ret;
402
403                         free = false;
404                         naddr = 0;
405                 }
406         }
407
408         /* next set up the multicast addresses */
409         netdev_for_each_mc_addr(ha, dev) {
410                 addr[naddr++] = ha->addr;
411                 if (--mc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
412                         ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
413                                         naddr, addr, filt_idx, &mhash, sleep);
414                         if (ret < 0)
415                                 return ret;
416
417                         free = false;
418                         naddr = 0;
419                 }
420         }
421
422         return t4_set_addr_hash(pi->adapter, mb, pi->viid, uhash != 0,
423                                 uhash | mhash, sleep);
424 }
425
426 int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */
427 module_param(dbfifo_int_thresh, int, 0644);
428 MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold");
429
430 /*
431  * usecs to sleep while draining the dbfifo
432  */
433 static int dbfifo_drain_delay = 1000;
434 module_param(dbfifo_drain_delay, int, 0644);
435 MODULE_PARM_DESC(dbfifo_drain_delay,
436                  "usecs to sleep while draining the dbfifo");
437
438 /*
439  * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
440  * If @mtu is -1 it is left unchanged.
441  */
442 static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
443 {
444         int ret;
445         struct port_info *pi = netdev_priv(dev);
446
447         ret = set_addr_filters(dev, sleep_ok);
448         if (ret == 0)
449                 ret = t4_set_rxmode(pi->adapter, pi->adapter->pf, pi->viid, mtu,
450                                     (dev->flags & IFF_PROMISC) ? 1 : 0,
451                                     (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
452                                     sleep_ok);
453         return ret;
454 }
455
456 /**
457  *      link_start - enable a port
458  *      @dev: the port to enable
459  *
460  *      Performs the MAC and PHY actions needed to enable a port.
461  */
462 static int link_start(struct net_device *dev)
463 {
464         int ret;
465         struct port_info *pi = netdev_priv(dev);
466         unsigned int mb = pi->adapter->pf;
467
468         /*
469          * We do not set address filters and promiscuity here, the stack does
470          * that step explicitly.
471          */
472         ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
473                             !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true);
474         if (ret == 0) {
475                 ret = t4_change_mac(pi->adapter, mb, pi->viid,
476                                     pi->xact_addr_filt, dev->dev_addr, true,
477                                     true);
478                 if (ret >= 0) {
479                         pi->xact_addr_filt = ret;
480                         ret = 0;
481                 }
482         }
483         if (ret == 0)
484                 ret = t4_link_l1cfg(pi->adapter, mb, pi->tx_chan,
485                                     &pi->link_cfg);
486         if (ret == 0) {
487                 local_bh_disable();
488                 ret = t4_enable_vi_params(pi->adapter, mb, pi->viid, true,
489                                           true, CXGB4_DCB_ENABLED);
490                 local_bh_enable();
491         }
492
493         return ret;
494 }
495
496 int cxgb4_dcb_enabled(const struct net_device *dev)
497 {
498 #ifdef CONFIG_CHELSIO_T4_DCB
499         struct port_info *pi = netdev_priv(dev);
500
501         if (!pi->dcb.enabled)
502                 return 0;
503
504         return ((pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED) ||
505                 (pi->dcb.state == CXGB4_DCB_STATE_HOST));
506 #else
507         return 0;
508 #endif
509 }
510 EXPORT_SYMBOL(cxgb4_dcb_enabled);
511
512 #ifdef CONFIG_CHELSIO_T4_DCB
513 /* Handle a Data Center Bridging update message from the firmware. */
514 static void dcb_rpl(struct adapter *adap, const struct fw_port_cmd *pcmd)
515 {
516         int port = FW_PORT_CMD_PORTID_G(ntohl(pcmd->op_to_portid));
517         struct net_device *dev = adap->port[port];
518         int old_dcb_enabled = cxgb4_dcb_enabled(dev);
519         int new_dcb_enabled;
520
521         cxgb4_dcb_handle_fw_update(adap, pcmd);
522         new_dcb_enabled = cxgb4_dcb_enabled(dev);
523
524         /* If the DCB has become enabled or disabled on the port then we're
525          * going to need to set up/tear down DCB Priority parameters for the
526          * TX Queues associated with the port.
527          */
528         if (new_dcb_enabled != old_dcb_enabled)
529                 dcb_tx_queue_prio_enable(dev, new_dcb_enabled);
530 }
531 #endif /* CONFIG_CHELSIO_T4_DCB */
532
533 /* Clear a filter and release any of its resources that we own.  This also
534  * clears the filter's "pending" status.
535  */
536 static void clear_filter(struct adapter *adap, struct filter_entry *f)
537 {
538         /* If the new or old filter have loopback rewriteing rules then we'll
539          * need to free any existing Layer Two Table (L2T) entries of the old
540          * filter rule.  The firmware will handle freeing up any Source MAC
541          * Table (SMT) entries used for rewriting Source MAC Addresses in
542          * loopback rules.
543          */
544         if (f->l2t)
545                 cxgb4_l2t_release(f->l2t);
546
547         /* The zeroing of the filter rule below clears the filter valid,
548          * pending, locked flags, l2t pointer, etc. so it's all we need for
549          * this operation.
550          */
551         memset(f, 0, sizeof(*f));
552 }
553
554 /* Handle a filter write/deletion reply.
555  */
556 static void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
557 {
558         unsigned int idx = GET_TID(rpl);
559         unsigned int nidx = idx - adap->tids.ftid_base;
560         unsigned int ret;
561         struct filter_entry *f;
562
563         if (idx >= adap->tids.ftid_base && nidx <
564            (adap->tids.nftids + adap->tids.nsftids)) {
565                 idx = nidx;
566                 ret = TCB_COOKIE_G(rpl->cookie);
567                 f = &adap->tids.ftid_tab[idx];
568
569                 if (ret == FW_FILTER_WR_FLT_DELETED) {
570                         /* Clear the filter when we get confirmation from the
571                          * hardware that the filter has been deleted.
572                          */
573                         clear_filter(adap, f);
574                 } else if (ret == FW_FILTER_WR_SMT_TBL_FULL) {
575                         dev_err(adap->pdev_dev, "filter %u setup failed due to full SMT\n",
576                                 idx);
577                         clear_filter(adap, f);
578                 } else if (ret == FW_FILTER_WR_FLT_ADDED) {
579                         f->smtidx = (be64_to_cpu(rpl->oldval) >> 24) & 0xff;
580                         f->pending = 0;  /* asynchronous setup completed */
581                         f->valid = 1;
582                 } else {
583                         /* Something went wrong.  Issue a warning about the
584                          * problem and clear everything out.
585                          */
586                         dev_err(adap->pdev_dev, "filter %u setup failed with error %u\n",
587                                 idx, ret);
588                         clear_filter(adap, f);
589                 }
590         }
591 }
592
593 /* Response queue handler for the FW event queue.
594  */
595 static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
596                           const struct pkt_gl *gl)
597 {
598         u8 opcode = ((const struct rss_header *)rsp)->opcode;
599
600         rsp++;                                          /* skip RSS header */
601
602         /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
603          */
604         if (unlikely(opcode == CPL_FW4_MSG &&
605            ((const struct cpl_fw4_msg *)rsp)->type == FW_TYPE_RSSCPL)) {
606                 rsp++;
607                 opcode = ((const struct rss_header *)rsp)->opcode;
608                 rsp++;
609                 if (opcode != CPL_SGE_EGR_UPDATE) {
610                         dev_err(q->adap->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n"
611                                 , opcode);
612                         goto out;
613                 }
614         }
615
616         if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
617                 const struct cpl_sge_egr_update *p = (void *)rsp;
618                 unsigned int qid = EGR_QID_G(ntohl(p->opcode_qid));
619                 struct sge_txq *txq;
620
621                 txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
622                 txq->restarts++;
623                 if ((u8 *)txq < (u8 *)q->adap->sge.ofldtxq) {
624                         struct sge_eth_txq *eq;
625
626                         eq = container_of(txq, struct sge_eth_txq, q);
627                         netif_tx_wake_queue(eq->txq);
628                 } else {
629                         struct sge_ofld_txq *oq;
630
631                         oq = container_of(txq, struct sge_ofld_txq, q);
632                         tasklet_schedule(&oq->qresume_tsk);
633                 }
634         } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
635                 const struct cpl_fw6_msg *p = (void *)rsp;
636
637 #ifdef CONFIG_CHELSIO_T4_DCB
638                 const struct fw_port_cmd *pcmd = (const void *)p->data;
639                 unsigned int cmd = FW_CMD_OP_G(ntohl(pcmd->op_to_portid));
640                 unsigned int action =
641                         FW_PORT_CMD_ACTION_G(ntohl(pcmd->action_to_len16));
642
643                 if (cmd == FW_PORT_CMD &&
644                     action == FW_PORT_ACTION_GET_PORT_INFO) {
645                         int port = FW_PORT_CMD_PORTID_G(
646                                         be32_to_cpu(pcmd->op_to_portid));
647                         struct net_device *dev = q->adap->port[port];
648                         int state_input = ((pcmd->u.info.dcbxdis_pkd &
649                                             FW_PORT_CMD_DCBXDIS_F)
650                                            ? CXGB4_DCB_INPUT_FW_DISABLED
651                                            : CXGB4_DCB_INPUT_FW_ENABLED);
652
653                         cxgb4_dcb_state_fsm(dev, state_input);
654                 }
655
656                 if (cmd == FW_PORT_CMD &&
657                     action == FW_PORT_ACTION_L2_DCB_CFG)
658                         dcb_rpl(q->adap, pcmd);
659                 else
660 #endif
661                         if (p->type == 0)
662                                 t4_handle_fw_rpl(q->adap, p->data);
663         } else if (opcode == CPL_L2T_WRITE_RPL) {
664                 const struct cpl_l2t_write_rpl *p = (void *)rsp;
665
666                 do_l2t_write_rpl(q->adap, p);
667         } else if (opcode == CPL_SET_TCB_RPL) {
668                 const struct cpl_set_tcb_rpl *p = (void *)rsp;
669
670                 filter_rpl(q->adap, p);
671         } else
672                 dev_err(q->adap->pdev_dev,
673                         "unexpected CPL %#x on FW event queue\n", opcode);
674 out:
675         return 0;
676 }
677
678 /**
679  *      uldrx_handler - response queue handler for ULD queues
680  *      @q: the response queue that received the packet
681  *      @rsp: the response queue descriptor holding the offload message
682  *      @gl: the gather list of packet fragments
683  *
684  *      Deliver an ingress offload packet to a ULD.  All processing is done by
685  *      the ULD, we just maintain statistics.
686  */
687 static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
688                          const struct pkt_gl *gl)
689 {
690         struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
691
692         /* FW can send CPLs encapsulated in a CPL_FW4_MSG.
693          */
694         if (((const struct rss_header *)rsp)->opcode == CPL_FW4_MSG &&
695             ((const struct cpl_fw4_msg *)(rsp + 1))->type == FW_TYPE_RSSCPL)
696                 rsp += 2;
697
698         if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) {
699                 rxq->stats.nomem++;
700                 return -1;
701         }
702         if (gl == NULL)
703                 rxq->stats.imm++;
704         else if (gl == CXGB4_MSG_AN)
705                 rxq->stats.an++;
706         else
707                 rxq->stats.pkts++;
708         return 0;
709 }
710
711 static void disable_msi(struct adapter *adapter)
712 {
713         if (adapter->flags & USING_MSIX) {
714                 pci_disable_msix(adapter->pdev);
715                 adapter->flags &= ~USING_MSIX;
716         } else if (adapter->flags & USING_MSI) {
717                 pci_disable_msi(adapter->pdev);
718                 adapter->flags &= ~USING_MSI;
719         }
720 }
721
722 /*
723  * Interrupt handler for non-data events used with MSI-X.
724  */
725 static irqreturn_t t4_nondata_intr(int irq, void *cookie)
726 {
727         struct adapter *adap = cookie;
728         u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A));
729
730         if (v & PFSW_F) {
731                 adap->swintr = 1;
732                 t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A), v);
733         }
734         if (adap->flags & MASTER_PF)
735                 t4_slow_intr_handler(adap);
736         return IRQ_HANDLED;
737 }
738
739 /*
740  * Name the MSI-X interrupts.
741  */
742 static void name_msix_vecs(struct adapter *adap)
743 {
744         int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc);
745
746         /* non-data interrupts */
747         snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name);
748
749         /* FW events */
750         snprintf(adap->msix_info[1].desc, n, "%s-FWeventq",
751                  adap->port[0]->name);
752
753         /* Ethernet queues */
754         for_each_port(adap, j) {
755                 struct net_device *d = adap->port[j];
756                 const struct port_info *pi = netdev_priv(d);
757
758                 for (i = 0; i < pi->nqsets; i++, msi_idx++)
759                         snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
760                                  d->name, i);
761         }
762
763         /* offload queues */
764         for_each_ofldrxq(&adap->sge, i)
765                 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-ofld%d",
766                          adap->port[0]->name, i);
767
768         for_each_rdmarxq(&adap->sge, i)
769                 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d",
770                          adap->port[0]->name, i);
771
772         for_each_rdmaciq(&adap->sge, i)
773                 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma-ciq%d",
774                          adap->port[0]->name, i);
775 }
776
777 static int request_msix_queue_irqs(struct adapter *adap)
778 {
779         struct sge *s = &adap->sge;
780         int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, rdmaciqqidx = 0;
781         int msi_index = 2;
782
783         err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
784                           adap->msix_info[1].desc, &s->fw_evtq);
785         if (err)
786                 return err;
787
788         for_each_ethrxq(s, ethqidx) {
789                 err = request_irq(adap->msix_info[msi_index].vec,
790                                   t4_sge_intr_msix, 0,
791                                   adap->msix_info[msi_index].desc,
792                                   &s->ethrxq[ethqidx].rspq);
793                 if (err)
794                         goto unwind;
795                 msi_index++;
796         }
797         for_each_ofldrxq(s, ofldqidx) {
798                 err = request_irq(adap->msix_info[msi_index].vec,
799                                   t4_sge_intr_msix, 0,
800                                   adap->msix_info[msi_index].desc,
801                                   &s->ofldrxq[ofldqidx].rspq);
802                 if (err)
803                         goto unwind;
804                 msi_index++;
805         }
806         for_each_rdmarxq(s, rdmaqidx) {
807                 err = request_irq(adap->msix_info[msi_index].vec,
808                                   t4_sge_intr_msix, 0,
809                                   adap->msix_info[msi_index].desc,
810                                   &s->rdmarxq[rdmaqidx].rspq);
811                 if (err)
812                         goto unwind;
813                 msi_index++;
814         }
815         for_each_rdmaciq(s, rdmaciqqidx) {
816                 err = request_irq(adap->msix_info[msi_index].vec,
817                                   t4_sge_intr_msix, 0,
818                                   adap->msix_info[msi_index].desc,
819                                   &s->rdmaciq[rdmaciqqidx].rspq);
820                 if (err)
821                         goto unwind;
822                 msi_index++;
823         }
824         return 0;
825
826 unwind:
827         while (--rdmaciqqidx >= 0)
828                 free_irq(adap->msix_info[--msi_index].vec,
829                          &s->rdmaciq[rdmaciqqidx].rspq);
830         while (--rdmaqidx >= 0)
831                 free_irq(adap->msix_info[--msi_index].vec,
832                          &s->rdmarxq[rdmaqidx].rspq);
833         while (--ofldqidx >= 0)
834                 free_irq(adap->msix_info[--msi_index].vec,
835                          &s->ofldrxq[ofldqidx].rspq);
836         while (--ethqidx >= 0)
837                 free_irq(adap->msix_info[--msi_index].vec,
838                          &s->ethrxq[ethqidx].rspq);
839         free_irq(adap->msix_info[1].vec, &s->fw_evtq);
840         return err;
841 }
842
843 static void free_msix_queue_irqs(struct adapter *adap)
844 {
845         int i, msi_index = 2;
846         struct sge *s = &adap->sge;
847
848         free_irq(adap->msix_info[1].vec, &s->fw_evtq);
849         for_each_ethrxq(s, i)
850                 free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq);
851         for_each_ofldrxq(s, i)
852                 free_irq(adap->msix_info[msi_index++].vec, &s->ofldrxq[i].rspq);
853         for_each_rdmarxq(s, i)
854                 free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq);
855         for_each_rdmaciq(s, i)
856                 free_irq(adap->msix_info[msi_index++].vec, &s->rdmaciq[i].rspq);
857 }
858
859 /**
860  *      cxgb4_write_rss - write the RSS table for a given port
861  *      @pi: the port
862  *      @queues: array of queue indices for RSS
863  *
864  *      Sets up the portion of the HW RSS table for the port's VI to distribute
865  *      packets to the Rx queues in @queues.
866  *      Should never be called before setting up sge eth rx queues
867  */
868 int cxgb4_write_rss(const struct port_info *pi, const u16 *queues)
869 {
870         u16 *rss;
871         int i, err;
872         struct adapter *adapter = pi->adapter;
873         const struct sge_eth_rxq *rxq;
874
875         rxq = &adapter->sge.ethrxq[pi->first_qset];
876         rss = kmalloc(pi->rss_size * sizeof(u16), GFP_KERNEL);
877         if (!rss)
878                 return -ENOMEM;
879
880         /* map the queue indices to queue ids */
881         for (i = 0; i < pi->rss_size; i++, queues++)
882                 rss[i] = rxq[*queues].rspq.abs_id;
883
884         err = t4_config_rss_range(adapter, adapter->pf, pi->viid, 0,
885                                   pi->rss_size, rss, pi->rss_size);
886         /* If Tunnel All Lookup isn't specified in the global RSS
887          * Configuration, then we need to specify a default Ingress
888          * Queue for any ingress packets which aren't hashed.  We'll
889          * use our first ingress queue ...
890          */
891         if (!err)
892                 err = t4_config_vi_rss(adapter, adapter->mbox, pi->viid,
893                                        FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F |
894                                        FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F |
895                                        FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F |
896                                        FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F |
897                                        FW_RSS_VI_CONFIG_CMD_UDPEN_F,
898                                        rss[0]);
899         kfree(rss);
900         return err;
901 }
902
903 /**
904  *      setup_rss - configure RSS
905  *      @adap: the adapter
906  *
907  *      Sets up RSS for each port.
908  */
909 static int setup_rss(struct adapter *adap)
910 {
911         int i, j, err;
912
913         for_each_port(adap, i) {
914                 const struct port_info *pi = adap2pinfo(adap, i);
915
916                 /* Fill default values with equal distribution */
917                 for (j = 0; j < pi->rss_size; j++)
918                         pi->rss[j] = j % pi->nqsets;
919
920                 err = cxgb4_write_rss(pi, pi->rss);
921                 if (err)
922                         return err;
923         }
924         return 0;
925 }
926
927 /*
928  * Return the channel of the ingress queue with the given qid.
929  */
930 static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
931 {
932         qid -= p->ingr_start;
933         return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
934 }
935
936 /*
937  * Wait until all NAPI handlers are descheduled.
938  */
939 static void quiesce_rx(struct adapter *adap)
940 {
941         int i;
942
943         for (i = 0; i < adap->sge.ingr_sz; i++) {
944                 struct sge_rspq *q = adap->sge.ingr_map[i];
945
946                 if (q && q->handler) {
947                         napi_disable(&q->napi);
948                         local_bh_disable();
949                         while (!cxgb_poll_lock_napi(q))
950                                 mdelay(1);
951                         local_bh_enable();
952                 }
953
954         }
955 }
956
957 /* Disable interrupt and napi handler */
958 static void disable_interrupts(struct adapter *adap)
959 {
960         if (adap->flags & FULL_INIT_DONE) {
961                 t4_intr_disable(adap);
962                 if (adap->flags & USING_MSIX) {
963                         free_msix_queue_irqs(adap);
964                         free_irq(adap->msix_info[0].vec, adap);
965                 } else {
966                         free_irq(adap->pdev->irq, adap);
967                 }
968                 quiesce_rx(adap);
969         }
970 }
971
972 /*
973  * Enable NAPI scheduling and interrupt generation for all Rx queues.
974  */
975 static void enable_rx(struct adapter *adap)
976 {
977         int i;
978
979         for (i = 0; i < adap->sge.ingr_sz; i++) {
980                 struct sge_rspq *q = adap->sge.ingr_map[i];
981
982                 if (!q)
983                         continue;
984                 if (q->handler) {
985                         cxgb_busy_poll_init_lock(q);
986                         napi_enable(&q->napi);
987                 }
988                 /* 0-increment GTS to start the timer and enable interrupts */
989                 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
990                              SEINTARM_V(q->intr_params) |
991                              INGRESSQID_V(q->cntxt_id));
992         }
993 }
994
995 static int alloc_ofld_rxqs(struct adapter *adap, struct sge_ofld_rxq *q,
996                            unsigned int nq, unsigned int per_chan, int msi_idx,
997                            u16 *ids)
998 {
999         int i, err;
1000
1001         for (i = 0; i < nq; i++, q++) {
1002                 if (msi_idx > 0)
1003                         msi_idx++;
1004                 err = t4_sge_alloc_rxq(adap, &q->rspq, false,
1005                                        adap->port[i / per_chan],
1006                                        msi_idx, q->fl.size ? &q->fl : NULL,
1007                                        uldrx_handler, 0);
1008                 if (err)
1009                         return err;
1010                 memset(&q->stats, 0, sizeof(q->stats));
1011                 if (ids)
1012                         ids[i] = q->rspq.abs_id;
1013         }
1014         return 0;
1015 }
1016
1017 /**
1018  *      setup_sge_queues - configure SGE Tx/Rx/response queues
1019  *      @adap: the adapter
1020  *
1021  *      Determines how many sets of SGE queues to use and initializes them.
1022  *      We support multiple queue sets per port if we have MSI-X, otherwise
1023  *      just one queue set per port.
1024  */
1025 static int setup_sge_queues(struct adapter *adap)
1026 {
1027         int err, msi_idx, i, j;
1028         struct sge *s = &adap->sge;
1029
1030         bitmap_zero(s->starving_fl, s->egr_sz);
1031         bitmap_zero(s->txq_maperr, s->egr_sz);
1032
1033         if (adap->flags & USING_MSIX)
1034                 msi_idx = 1;         /* vector 0 is for non-queue interrupts */
1035         else {
1036                 err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
1037                                        NULL, NULL, -1);
1038                 if (err)
1039                         return err;
1040                 msi_idx = -((int)s->intrq.abs_id + 1);
1041         }
1042
1043         /* NOTE: If you add/delete any Ingress/Egress Queue allocations in here,
1044          * don't forget to update the following which need to be
1045          * synchronized to and changes here.
1046          *
1047          * 1. The calculations of MAX_INGQ in cxgb4.h.
1048          *
1049          * 2. Update enable_msix/name_msix_vecs/request_msix_queue_irqs
1050          *    to accommodate any new/deleted Ingress Queues
1051          *    which need MSI-X Vectors.
1052          *
1053          * 3. Update sge_qinfo_show() to include information on the
1054          *    new/deleted queues.
1055          */
1056         err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
1057                                msi_idx, NULL, fwevtq_handler, -1);
1058         if (err) {
1059 freeout:        t4_free_sge_resources(adap);
1060                 return err;
1061         }
1062
1063         for_each_port(adap, i) {
1064                 struct net_device *dev = adap->port[i];
1065                 struct port_info *pi = netdev_priv(dev);
1066                 struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
1067                 struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
1068
1069                 for (j = 0; j < pi->nqsets; j++, q++) {
1070                         if (msi_idx > 0)
1071                                 msi_idx++;
1072                         err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
1073                                                msi_idx, &q->fl,
1074                                                t4_ethrx_handler,
1075                                                t4_get_mps_bg_map(adap,
1076                                                                  pi->tx_chan));
1077                         if (err)
1078                                 goto freeout;
1079                         q->rspq.idx = j;
1080                         memset(&q->stats, 0, sizeof(q->stats));
1081                 }
1082                 for (j = 0; j < pi->nqsets; j++, t++) {
1083                         err = t4_sge_alloc_eth_txq(adap, t, dev,
1084                                         netdev_get_tx_queue(dev, j),
1085                                         s->fw_evtq.cntxt_id);
1086                         if (err)
1087                                 goto freeout;
1088                 }
1089         }
1090
1091         j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */
1092         for_each_ofldrxq(s, i) {
1093                 err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i],
1094                                             adap->port[i / j],
1095                                             s->fw_evtq.cntxt_id);
1096                 if (err)
1097                         goto freeout;
1098         }
1099
1100 #define ALLOC_OFLD_RXQS(firstq, nq, per_chan, ids) do { \
1101         err = alloc_ofld_rxqs(adap, firstq, nq, per_chan, msi_idx, ids); \
1102         if (err) \
1103                 goto freeout; \
1104         if (msi_idx > 0) \
1105                 msi_idx += nq; \
1106 } while (0)
1107
1108         ALLOC_OFLD_RXQS(s->ofldrxq, s->ofldqsets, j, s->ofld_rxq);
1109         ALLOC_OFLD_RXQS(s->rdmarxq, s->rdmaqs, 1, s->rdma_rxq);
1110         j = s->rdmaciqs / adap->params.nports; /* rdmaq queues per channel */
1111         ALLOC_OFLD_RXQS(s->rdmaciq, s->rdmaciqs, j, s->rdma_ciq);
1112
1113 #undef ALLOC_OFLD_RXQS
1114
1115         for_each_port(adap, i) {
1116                 /*
1117                  * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
1118                  * have RDMA queues, and that's the right value.
1119                  */
1120                 err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
1121                                             s->fw_evtq.cntxt_id,
1122                                             s->rdmarxq[i].rspq.cntxt_id);
1123                 if (err)
1124                         goto freeout;
1125         }
1126
1127         t4_write_reg(adap, is_t4(adap->params.chip) ?
1128                                 MPS_TRC_RSS_CONTROL_A :
1129                                 MPS_T5_TRC_RSS_CONTROL_A,
1130                      RSSCONTROL_V(netdev2pinfo(adap->port[0])->tx_chan) |
1131                      QUEUENUMBER_V(s->ethrxq[0].rspq.abs_id));
1132         return 0;
1133 }
1134
1135 /*
1136  * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
1137  * The allocated memory is cleared.
1138  */
1139 void *t4_alloc_mem(size_t size)
1140 {
1141         void *p = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
1142
1143         if (!p)
1144                 p = vzalloc(size);
1145         return p;
1146 }
1147
1148 /*
1149  * Free memory allocated through alloc_mem().
1150  */
1151 void t4_free_mem(void *addr)
1152 {
1153         if (is_vmalloc_addr(addr))
1154                 vfree(addr);
1155         else
1156                 kfree(addr);
1157 }
1158
1159 /* Send a Work Request to write the filter at a specified index.  We construct
1160  * a Firmware Filter Work Request to have the work done and put the indicated
1161  * filter into "pending" mode which will prevent any further actions against
1162  * it till we get a reply from the firmware on the completion status of the
1163  * request.
1164  */
1165 static int set_filter_wr(struct adapter *adapter, int fidx)
1166 {
1167         struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1168         struct sk_buff *skb;
1169         struct fw_filter_wr *fwr;
1170         unsigned int ftid;
1171
1172         skb = alloc_skb(sizeof(*fwr), GFP_KERNEL);
1173         if (!skb)
1174                 return -ENOMEM;
1175
1176         /* If the new filter requires loopback Destination MAC and/or VLAN
1177          * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
1178          * the filter.
1179          */
1180         if (f->fs.newdmac || f->fs.newvlan) {
1181                 /* allocate L2T entry for new filter */
1182                 f->l2t = t4_l2t_alloc_switching(adapter->l2t);
1183                 if (f->l2t == NULL) {
1184                         kfree_skb(skb);
1185                         return -EAGAIN;
1186                 }
1187                 if (t4_l2t_set_switching(adapter, f->l2t, f->fs.vlan,
1188                                         f->fs.eport, f->fs.dmac)) {
1189                         cxgb4_l2t_release(f->l2t);
1190                         f->l2t = NULL;
1191                         kfree_skb(skb);
1192                         return -ENOMEM;
1193                 }
1194         }
1195
1196         ftid = adapter->tids.ftid_base + fidx;
1197
1198         fwr = (struct fw_filter_wr *)__skb_put(skb, sizeof(*fwr));
1199         memset(fwr, 0, sizeof(*fwr));
1200
1201         /* It would be nice to put most of the following in t4_hw.c but most
1202          * of the work is translating the cxgbtool ch_filter_specification
1203          * into the Work Request and the definition of that structure is
1204          * currently in cxgbtool.h which isn't appropriate to pull into the
1205          * common code.  We may eventually try to come up with a more neutral
1206          * filter specification structure but for now it's easiest to simply
1207          * put this fairly direct code in line ...
1208          */
1209         fwr->op_pkd = htonl(FW_WR_OP_V(FW_FILTER_WR));
1210         fwr->len16_pkd = htonl(FW_WR_LEN16_V(sizeof(*fwr)/16));
1211         fwr->tid_to_iq =
1212                 htonl(FW_FILTER_WR_TID_V(ftid) |
1213                       FW_FILTER_WR_RQTYPE_V(f->fs.type) |
1214                       FW_FILTER_WR_NOREPLY_V(0) |
1215                       FW_FILTER_WR_IQ_V(f->fs.iq));
1216         fwr->del_filter_to_l2tix =
1217                 htonl(FW_FILTER_WR_RPTTID_V(f->fs.rpttid) |
1218                       FW_FILTER_WR_DROP_V(f->fs.action == FILTER_DROP) |
1219                       FW_FILTER_WR_DIRSTEER_V(f->fs.dirsteer) |
1220                       FW_FILTER_WR_MASKHASH_V(f->fs.maskhash) |
1221                       FW_FILTER_WR_DIRSTEERHASH_V(f->fs.dirsteerhash) |
1222                       FW_FILTER_WR_LPBK_V(f->fs.action == FILTER_SWITCH) |
1223                       FW_FILTER_WR_DMAC_V(f->fs.newdmac) |
1224                       FW_FILTER_WR_SMAC_V(f->fs.newsmac) |
1225                       FW_FILTER_WR_INSVLAN_V(f->fs.newvlan == VLAN_INSERT ||
1226                                              f->fs.newvlan == VLAN_REWRITE) |
1227                       FW_FILTER_WR_RMVLAN_V(f->fs.newvlan == VLAN_REMOVE ||
1228                                             f->fs.newvlan == VLAN_REWRITE) |
1229                       FW_FILTER_WR_HITCNTS_V(f->fs.hitcnts) |
1230                       FW_FILTER_WR_TXCHAN_V(f->fs.eport) |
1231                       FW_FILTER_WR_PRIO_V(f->fs.prio) |
1232                       FW_FILTER_WR_L2TIX_V(f->l2t ? f->l2t->idx : 0));
1233         fwr->ethtype = htons(f->fs.val.ethtype);
1234         fwr->ethtypem = htons(f->fs.mask.ethtype);
1235         fwr->frag_to_ovlan_vldm =
1236                 (FW_FILTER_WR_FRAG_V(f->fs.val.frag) |
1237                  FW_FILTER_WR_FRAGM_V(f->fs.mask.frag) |
1238                  FW_FILTER_WR_IVLAN_VLD_V(f->fs.val.ivlan_vld) |
1239                  FW_FILTER_WR_OVLAN_VLD_V(f->fs.val.ovlan_vld) |
1240                  FW_FILTER_WR_IVLAN_VLDM_V(f->fs.mask.ivlan_vld) |
1241                  FW_FILTER_WR_OVLAN_VLDM_V(f->fs.mask.ovlan_vld));
1242         fwr->smac_sel = 0;
1243         fwr->rx_chan_rx_rpl_iq =
1244                 htons(FW_FILTER_WR_RX_CHAN_V(0) |
1245                       FW_FILTER_WR_RX_RPL_IQ_V(adapter->sge.fw_evtq.abs_id));
1246         fwr->maci_to_matchtypem =
1247                 htonl(FW_FILTER_WR_MACI_V(f->fs.val.macidx) |
1248                       FW_FILTER_WR_MACIM_V(f->fs.mask.macidx) |
1249                       FW_FILTER_WR_FCOE_V(f->fs.val.fcoe) |
1250                       FW_FILTER_WR_FCOEM_V(f->fs.mask.fcoe) |
1251                       FW_FILTER_WR_PORT_V(f->fs.val.iport) |
1252                       FW_FILTER_WR_PORTM_V(f->fs.mask.iport) |
1253                       FW_FILTER_WR_MATCHTYPE_V(f->fs.val.matchtype) |
1254                       FW_FILTER_WR_MATCHTYPEM_V(f->fs.mask.matchtype));
1255         fwr->ptcl = f->fs.val.proto;
1256         fwr->ptclm = f->fs.mask.proto;
1257         fwr->ttyp = f->fs.val.tos;
1258         fwr->ttypm = f->fs.mask.tos;
1259         fwr->ivlan = htons(f->fs.val.ivlan);
1260         fwr->ivlanm = htons(f->fs.mask.ivlan);
1261         fwr->ovlan = htons(f->fs.val.ovlan);
1262         fwr->ovlanm = htons(f->fs.mask.ovlan);
1263         memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
1264         memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
1265         memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
1266         memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
1267         fwr->lp = htons(f->fs.val.lport);
1268         fwr->lpm = htons(f->fs.mask.lport);
1269         fwr->fp = htons(f->fs.val.fport);
1270         fwr->fpm = htons(f->fs.mask.fport);
1271         if (f->fs.newsmac)
1272                 memcpy(fwr->sma, f->fs.smac, sizeof(fwr->sma));
1273
1274         /* Mark the filter as "pending" and ship off the Filter Work Request.
1275          * When we get the Work Request Reply we'll clear the pending status.
1276          */
1277         f->pending = 1;
1278         set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
1279         t4_ofld_send(adapter, skb);
1280         return 0;
1281 }
1282
1283 /* Delete the filter at a specified index.
1284  */
1285 static int del_filter_wr(struct adapter *adapter, int fidx)
1286 {
1287         struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1288         struct sk_buff *skb;
1289         struct fw_filter_wr *fwr;
1290         unsigned int len, ftid;
1291
1292         len = sizeof(*fwr);
1293         ftid = adapter->tids.ftid_base + fidx;
1294
1295         skb = alloc_skb(len, GFP_KERNEL);
1296         if (!skb)
1297                 return -ENOMEM;
1298
1299         fwr = (struct fw_filter_wr *)__skb_put(skb, len);
1300         t4_mk_filtdelwr(ftid, fwr, adapter->sge.fw_evtq.abs_id);
1301
1302         /* Mark the filter as "pending" and ship off the Filter Work Request.
1303          * When we get the Work Request Reply we'll clear the pending status.
1304          */
1305         f->pending = 1;
1306         t4_mgmt_tx(adapter, skb);
1307         return 0;
1308 }
1309
1310 static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
1311                              void *accel_priv, select_queue_fallback_t fallback)
1312 {
1313         int txq;
1314
1315 #ifdef CONFIG_CHELSIO_T4_DCB
1316         /* If a Data Center Bridging has been successfully negotiated on this
1317          * link then we'll use the skb's priority to map it to a TX Queue.
1318          * The skb's priority is determined via the VLAN Tag Priority Code
1319          * Point field.
1320          */
1321         if (cxgb4_dcb_enabled(dev)) {
1322                 u16 vlan_tci;
1323                 int err;
1324
1325                 err = vlan_get_tag(skb, &vlan_tci);
1326                 if (unlikely(err)) {
1327                         if (net_ratelimit())
1328                                 netdev_warn(dev,
1329                                             "TX Packet without VLAN Tag on DCB Link\n");
1330                         txq = 0;
1331                 } else {
1332                         txq = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
1333 #ifdef CONFIG_CHELSIO_T4_FCOE
1334                         if (skb->protocol == htons(ETH_P_FCOE))
1335                                 txq = skb->priority & 0x7;
1336 #endif /* CONFIG_CHELSIO_T4_FCOE */
1337                 }
1338                 return txq;
1339         }
1340 #endif /* CONFIG_CHELSIO_T4_DCB */
1341
1342         if (select_queue) {
1343                 txq = (skb_rx_queue_recorded(skb)
1344                         ? skb_get_rx_queue(skb)
1345                         : smp_processor_id());
1346
1347                 while (unlikely(txq >= dev->real_num_tx_queues))
1348                         txq -= dev->real_num_tx_queues;
1349
1350                 return txq;
1351         }
1352
1353         return fallback(dev, skb) % dev->real_num_tx_queues;
1354 }
1355
1356 static int closest_timer(const struct sge *s, int time)
1357 {
1358         int i, delta, match = 0, min_delta = INT_MAX;
1359
1360         for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
1361                 delta = time - s->timer_val[i];
1362                 if (delta < 0)
1363                         delta = -delta;
1364                 if (delta < min_delta) {
1365                         min_delta = delta;
1366                         match = i;
1367                 }
1368         }
1369         return match;
1370 }
1371
1372 static int closest_thres(const struct sge *s, int thres)
1373 {
1374         int i, delta, match = 0, min_delta = INT_MAX;
1375
1376         for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
1377                 delta = thres - s->counter_val[i];
1378                 if (delta < 0)
1379                         delta = -delta;
1380                 if (delta < min_delta) {
1381                         min_delta = delta;
1382                         match = i;
1383                 }
1384         }
1385         return match;
1386 }
1387
1388 /**
1389  *      cxgb4_set_rspq_intr_params - set a queue's interrupt holdoff parameters
1390  *      @q: the Rx queue
1391  *      @us: the hold-off time in us, or 0 to disable timer
1392  *      @cnt: the hold-off packet count, or 0 to disable counter
1393  *
1394  *      Sets an Rx queue's interrupt hold-off time and packet count.  At least
1395  *      one of the two needs to be enabled for the queue to generate interrupts.
1396  */
1397 int cxgb4_set_rspq_intr_params(struct sge_rspq *q,
1398                                unsigned int us, unsigned int cnt)
1399 {
1400         struct adapter *adap = q->adap;
1401
1402         if ((us | cnt) == 0)
1403                 cnt = 1;
1404
1405         if (cnt) {
1406                 int err;
1407                 u32 v, new_idx;
1408
1409                 new_idx = closest_thres(&adap->sge, cnt);
1410                 if (q->desc && q->pktcnt_idx != new_idx) {
1411                         /* the queue has already been created, update it */
1412                         v = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
1413                             FW_PARAMS_PARAM_X_V(
1414                                         FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
1415                             FW_PARAMS_PARAM_YZ_V(q->cntxt_id);
1416                         err = t4_set_params(adap, adap->mbox, adap->pf, 0, 1,
1417                                             &v, &new_idx);
1418                         if (err)
1419                                 return err;
1420                 }
1421                 q->pktcnt_idx = new_idx;
1422         }
1423
1424         us = us == 0 ? 6 : closest_timer(&adap->sge, us);
1425         q->intr_params = QINTR_TIMER_IDX_V(us) | QINTR_CNT_EN_V(cnt > 0);
1426         return 0;
1427 }
1428
1429 static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
1430 {
1431         const struct port_info *pi = netdev_priv(dev);
1432         netdev_features_t changed = dev->features ^ features;
1433         int err;
1434
1435         if (!(changed & NETIF_F_HW_VLAN_CTAG_RX))
1436                 return 0;
1437
1438         err = t4_set_rxmode(pi->adapter, pi->adapter->pf, pi->viid, -1,
1439                             -1, -1, -1,
1440                             !!(features & NETIF_F_HW_VLAN_CTAG_RX), true);
1441         if (unlikely(err))
1442                 dev->features = features ^ NETIF_F_HW_VLAN_CTAG_RX;
1443         return err;
1444 }
1445
1446 static int setup_debugfs(struct adapter *adap)
1447 {
1448         if (IS_ERR_OR_NULL(adap->debugfs_root))
1449                 return -1;
1450
1451 #ifdef CONFIG_DEBUG_FS
1452         t4_setup_debugfs(adap);
1453 #endif
1454         return 0;
1455 }
1456
1457 /*
1458  * upper-layer driver support
1459  */
1460
1461 /*
1462  * Allocate an active-open TID and set it to the supplied value.
1463  */
1464 int cxgb4_alloc_atid(struct tid_info *t, void *data)
1465 {
1466         int atid = -1;
1467
1468         spin_lock_bh(&t->atid_lock);
1469         if (t->afree) {
1470                 union aopen_entry *p = t->afree;
1471
1472                 atid = (p - t->atid_tab) + t->atid_base;
1473                 t->afree = p->next;
1474                 p->data = data;
1475                 t->atids_in_use++;
1476         }
1477         spin_unlock_bh(&t->atid_lock);
1478         return atid;
1479 }
1480 EXPORT_SYMBOL(cxgb4_alloc_atid);
1481
1482 /*
1483  * Release an active-open TID.
1484  */
1485 void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
1486 {
1487         union aopen_entry *p = &t->atid_tab[atid - t->atid_base];
1488
1489         spin_lock_bh(&t->atid_lock);
1490         p->next = t->afree;
1491         t->afree = p;
1492         t->atids_in_use--;
1493         spin_unlock_bh(&t->atid_lock);
1494 }
1495 EXPORT_SYMBOL(cxgb4_free_atid);
1496
1497 /*
1498  * Allocate a server TID and set it to the supplied value.
1499  */
1500 int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
1501 {
1502         int stid;
1503
1504         spin_lock_bh(&t->stid_lock);
1505         if (family == PF_INET) {
1506                 stid = find_first_zero_bit(t->stid_bmap, t->nstids);
1507                 if (stid < t->nstids)
1508                         __set_bit(stid, t->stid_bmap);
1509                 else
1510                         stid = -1;
1511         } else {
1512                 stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 2);
1513                 if (stid < 0)
1514                         stid = -1;
1515         }
1516         if (stid >= 0) {
1517                 t->stid_tab[stid].data = data;
1518                 stid += t->stid_base;
1519                 /* IPv6 requires max of 520 bits or 16 cells in TCAM
1520                  * This is equivalent to 4 TIDs. With CLIP enabled it
1521                  * needs 2 TIDs.
1522                  */
1523                 if (family == PF_INET)
1524                         t->stids_in_use++;
1525                 else
1526                         t->stids_in_use += 4;
1527         }
1528         spin_unlock_bh(&t->stid_lock);
1529         return stid;
1530 }
1531 EXPORT_SYMBOL(cxgb4_alloc_stid);
1532
1533 /* Allocate a server filter TID and set it to the supplied value.
1534  */
1535 int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
1536 {
1537         int stid;
1538
1539         spin_lock_bh(&t->stid_lock);
1540         if (family == PF_INET) {
1541                 stid = find_next_zero_bit(t->stid_bmap,
1542                                 t->nstids + t->nsftids, t->nstids);
1543                 if (stid < (t->nstids + t->nsftids))
1544                         __set_bit(stid, t->stid_bmap);
1545                 else
1546                         stid = -1;
1547         } else {
1548                 stid = -1;
1549         }
1550         if (stid >= 0) {
1551                 t->stid_tab[stid].data = data;
1552                 stid -= t->nstids;
1553                 stid += t->sftid_base;
1554                 t->stids_in_use++;
1555         }
1556         spin_unlock_bh(&t->stid_lock);
1557         return stid;
1558 }
1559 EXPORT_SYMBOL(cxgb4_alloc_sftid);
1560
1561 /* Release a server TID.
1562  */
1563 void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
1564 {
1565         /* Is it a server filter TID? */
1566         if (t->nsftids && (stid >= t->sftid_base)) {
1567                 stid -= t->sftid_base;
1568                 stid += t->nstids;
1569         } else {
1570                 stid -= t->stid_base;
1571         }
1572
1573         spin_lock_bh(&t->stid_lock);
1574         if (family == PF_INET)
1575                 __clear_bit(stid, t->stid_bmap);
1576         else
1577                 bitmap_release_region(t->stid_bmap, stid, 2);
1578         t->stid_tab[stid].data = NULL;
1579         if (family == PF_INET)
1580                 t->stids_in_use--;
1581         else
1582                 t->stids_in_use -= 4;
1583         spin_unlock_bh(&t->stid_lock);
1584 }
1585 EXPORT_SYMBOL(cxgb4_free_stid);
1586
1587 /*
1588  * Populate a TID_RELEASE WR.  Caller must properly size the skb.
1589  */
1590 static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
1591                            unsigned int tid)
1592 {
1593         struct cpl_tid_release *req;
1594
1595         set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
1596         req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
1597         INIT_TP_WR(req, tid);
1598         OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
1599 }
1600
1601 /*
1602  * Queue a TID release request and if necessary schedule a work queue to
1603  * process it.
1604  */
1605 static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
1606                                     unsigned int tid)
1607 {
1608         void **p = &t->tid_tab[tid];
1609         struct adapter *adap = container_of(t, struct adapter, tids);
1610
1611         spin_lock_bh(&adap->tid_release_lock);
1612         *p = adap->tid_release_head;
1613         /* Low 2 bits encode the Tx channel number */
1614         adap->tid_release_head = (void **)((uintptr_t)p | chan);
1615         if (!adap->tid_release_task_busy) {
1616                 adap->tid_release_task_busy = true;
1617                 queue_work(adap->workq, &adap->tid_release_task);
1618         }
1619         spin_unlock_bh(&adap->tid_release_lock);
1620 }
1621
1622 /*
1623  * Process the list of pending TID release requests.
1624  */
1625 static void process_tid_release_list(struct work_struct *work)
1626 {
1627         struct sk_buff *skb;
1628         struct adapter *adap;
1629
1630         adap = container_of(work, struct adapter, tid_release_task);
1631
1632         spin_lock_bh(&adap->tid_release_lock);
1633         while (adap->tid_release_head) {
1634                 void **p = adap->tid_release_head;
1635                 unsigned int chan = (uintptr_t)p & 3;
1636                 p = (void *)p - chan;
1637
1638                 adap->tid_release_head = *p;
1639                 *p = NULL;
1640                 spin_unlock_bh(&adap->tid_release_lock);
1641
1642                 while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
1643                                          GFP_KERNEL)))
1644                         schedule_timeout_uninterruptible(1);
1645
1646                 mk_tid_release(skb, chan, p - adap->tids.tid_tab);
1647                 t4_ofld_send(adap, skb);
1648                 spin_lock_bh(&adap->tid_release_lock);
1649         }
1650         adap->tid_release_task_busy = false;
1651         spin_unlock_bh(&adap->tid_release_lock);
1652 }
1653
1654 /*
1655  * Release a TID and inform HW.  If we are unable to allocate the release
1656  * message we defer to a work queue.
1657  */
1658 void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid)
1659 {
1660         void *old;
1661         struct sk_buff *skb;
1662         struct adapter *adap = container_of(t, struct adapter, tids);
1663
1664         old = t->tid_tab[tid];
1665         skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
1666         if (likely(skb)) {
1667                 t->tid_tab[tid] = NULL;
1668                 mk_tid_release(skb, chan, tid);
1669                 t4_ofld_send(adap, skb);
1670         } else
1671                 cxgb4_queue_tid_release(t, chan, tid);
1672         if (old)
1673                 atomic_dec(&t->tids_in_use);
1674 }
1675 EXPORT_SYMBOL(cxgb4_remove_tid);
1676
1677 /*
1678  * Allocate and initialize the TID tables.  Returns 0 on success.
1679  */
1680 static int tid_init(struct tid_info *t)
1681 {
1682         size_t size;
1683         unsigned int stid_bmap_size;
1684         unsigned int natids = t->natids;
1685         struct adapter *adap = container_of(t, struct adapter, tids);
1686
1687         stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
1688         size = t->ntids * sizeof(*t->tid_tab) +
1689                natids * sizeof(*t->atid_tab) +
1690                t->nstids * sizeof(*t->stid_tab) +
1691                t->nsftids * sizeof(*t->stid_tab) +
1692                stid_bmap_size * sizeof(long) +
1693                t->nftids * sizeof(*t->ftid_tab) +
1694                t->nsftids * sizeof(*t->ftid_tab);
1695
1696         t->tid_tab = t4_alloc_mem(size);
1697         if (!t->tid_tab)
1698                 return -ENOMEM;
1699
1700         t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
1701         t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
1702         t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids];
1703         t->ftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size];
1704         spin_lock_init(&t->stid_lock);
1705         spin_lock_init(&t->atid_lock);
1706
1707         t->stids_in_use = 0;
1708         t->afree = NULL;
1709         t->atids_in_use = 0;
1710         atomic_set(&t->tids_in_use, 0);
1711
1712         /* Setup the free list for atid_tab and clear the stid bitmap. */
1713         if (natids) {
1714                 while (--natids)
1715                         t->atid_tab[natids - 1].next = &t->atid_tab[natids];
1716                 t->afree = t->atid_tab;
1717         }
1718         bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
1719         /* Reserve stid 0 for T4/T5 adapters */
1720         if (!t->stid_base &&
1721             (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5))
1722                 __set_bit(0, t->stid_bmap);
1723
1724         return 0;
1725 }
1726
1727 /**
1728  *      cxgb4_create_server - create an IP server
1729  *      @dev: the device
1730  *      @stid: the server TID
1731  *      @sip: local IP address to bind server to
1732  *      @sport: the server's TCP port
1733  *      @queue: queue to direct messages from this server to
1734  *
1735  *      Create an IP server for the given port and address.
1736  *      Returns <0 on error and one of the %NET_XMIT_* values on success.
1737  */
1738 int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
1739                         __be32 sip, __be16 sport, __be16 vlan,
1740                         unsigned int queue)
1741 {
1742         unsigned int chan;
1743         struct sk_buff *skb;
1744         struct adapter *adap;
1745         struct cpl_pass_open_req *req;
1746         int ret;
1747
1748         skb = alloc_skb(sizeof(*req), GFP_KERNEL);
1749         if (!skb)
1750                 return -ENOMEM;
1751
1752         adap = netdev2adap(dev);
1753         req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req));
1754         INIT_TP_WR(req, 0);
1755         OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
1756         req->local_port = sport;
1757         req->peer_port = htons(0);
1758         req->local_ip = sip;
1759         req->peer_ip = htonl(0);
1760         chan = rxq_to_chan(&adap->sge, queue);
1761         req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
1762         req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) |
1763                                 SYN_RSS_ENABLE_F | SYN_RSS_QUEUE_V(queue));
1764         ret = t4_mgmt_tx(adap, skb);
1765         return net_xmit_eval(ret);
1766 }
1767 EXPORT_SYMBOL(cxgb4_create_server);
1768
1769 /*      cxgb4_create_server6 - create an IPv6 server
1770  *      @dev: the device
1771  *      @stid: the server TID
1772  *      @sip: local IPv6 address to bind server to
1773  *      @sport: the server's TCP port
1774  *      @queue: queue to direct messages from this server to
1775  *
1776  *      Create an IPv6 server for the given port and address.
1777  *      Returns <0 on error and one of the %NET_XMIT_* values on success.
1778  */
1779 int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
1780                          const struct in6_addr *sip, __be16 sport,
1781                          unsigned int queue)
1782 {
1783         unsigned int chan;
1784         struct sk_buff *skb;
1785         struct adapter *adap;
1786         struct cpl_pass_open_req6 *req;
1787         int ret;
1788
1789         skb = alloc_skb(sizeof(*req), GFP_KERNEL);
1790         if (!skb)
1791                 return -ENOMEM;
1792
1793         adap = netdev2adap(dev);
1794         req = (struct cpl_pass_open_req6 *)__skb_put(skb, sizeof(*req));
1795         INIT_TP_WR(req, 0);
1796         OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid));
1797         req->local_port = sport;
1798         req->peer_port = htons(0);
1799         req->local_ip_hi = *(__be64 *)(sip->s6_addr);
1800         req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8);
1801         req->peer_ip_hi = cpu_to_be64(0);
1802         req->peer_ip_lo = cpu_to_be64(0);
1803         chan = rxq_to_chan(&adap->sge, queue);
1804         req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
1805         req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) |
1806                                 SYN_RSS_ENABLE_F | SYN_RSS_QUEUE_V(queue));
1807         ret = t4_mgmt_tx(adap, skb);
1808         return net_xmit_eval(ret);
1809 }
1810 EXPORT_SYMBOL(cxgb4_create_server6);
1811
1812 int cxgb4_remove_server(const struct net_device *dev, unsigned int stid,
1813                         unsigned int queue, bool ipv6)
1814 {
1815         struct sk_buff *skb;
1816         struct adapter *adap;
1817         struct cpl_close_listsvr_req *req;
1818         int ret;
1819
1820         adap = netdev2adap(dev);
1821
1822         skb = alloc_skb(sizeof(*req), GFP_KERNEL);
1823         if (!skb)
1824                 return -ENOMEM;
1825
1826         req = (struct cpl_close_listsvr_req *)__skb_put(skb, sizeof(*req));
1827         INIT_TP_WR(req, 0);
1828         OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, stid));
1829         req->reply_ctrl = htons(NO_REPLY_V(0) | (ipv6 ? LISTSVR_IPV6_V(1) :
1830                                 LISTSVR_IPV6_V(0)) | QUEUENO_V(queue));
1831         ret = t4_mgmt_tx(adap, skb);
1832         return net_xmit_eval(ret);
1833 }
1834 EXPORT_SYMBOL(cxgb4_remove_server);
1835
1836 /**
1837  *      cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
1838  *      @mtus: the HW MTU table
1839  *      @mtu: the target MTU
1840  *      @idx: index of selected entry in the MTU table
1841  *
1842  *      Returns the index and the value in the HW MTU table that is closest to
1843  *      but does not exceed @mtu, unless @mtu is smaller than any value in the
1844  *      table, in which case that smallest available value is selected.
1845  */
1846 unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
1847                             unsigned int *idx)
1848 {
1849         unsigned int i = 0;
1850
1851         while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
1852                 ++i;
1853         if (idx)
1854                 *idx = i;
1855         return mtus[i];
1856 }
1857 EXPORT_SYMBOL(cxgb4_best_mtu);
1858
1859 /**
1860  *     cxgb4_best_aligned_mtu - find best MTU, [hopefully] data size aligned
1861  *     @mtus: the HW MTU table
1862  *     @header_size: Header Size
1863  *     @data_size_max: maximum Data Segment Size
1864  *     @data_size_align: desired Data Segment Size Alignment (2^N)
1865  *     @mtu_idxp: HW MTU Table Index return value pointer (possibly NULL)
1866  *
1867  *     Similar to cxgb4_best_mtu() but instead of searching the Hardware
1868  *     MTU Table based solely on a Maximum MTU parameter, we break that
1869  *     parameter up into a Header Size and Maximum Data Segment Size, and
1870  *     provide a desired Data Segment Size Alignment.  If we find an MTU in
1871  *     the Hardware MTU Table which will result in a Data Segment Size with
1872  *     the requested alignment _and_ that MTU isn't "too far" from the
1873  *     closest MTU, then we'll return that rather than the closest MTU.
1874  */
1875 unsigned int cxgb4_best_aligned_mtu(const unsigned short *mtus,
1876                                     unsigned short header_size,
1877                                     unsigned short data_size_max,
1878                                     unsigned short data_size_align,
1879                                     unsigned int *mtu_idxp)
1880 {
1881         unsigned short max_mtu = header_size + data_size_max;
1882         unsigned short data_size_align_mask = data_size_align - 1;
1883         int mtu_idx, aligned_mtu_idx;
1884
1885         /* Scan the MTU Table till we find an MTU which is larger than our
1886          * Maximum MTU or we reach the end of the table.  Along the way,
1887          * record the last MTU found, if any, which will result in a Data
1888          * Segment Length matching the requested alignment.
1889          */
1890         for (mtu_idx = 0, aligned_mtu_idx = -1; mtu_idx < NMTUS; mtu_idx++) {
1891                 unsigned short data_size = mtus[mtu_idx] - header_size;
1892
1893                 /* If this MTU minus the Header Size would result in a
1894                  * Data Segment Size of the desired alignment, remember it.
1895                  */
1896                 if ((data_size & data_size_align_mask) == 0)
1897                         aligned_mtu_idx = mtu_idx;
1898
1899                 /* If we're not at the end of the Hardware MTU Table and the
1900                  * next element is larger than our Maximum MTU, drop out of
1901                  * the loop.
1902                  */
1903                 if (mtu_idx+1 < NMTUS && mtus[mtu_idx+1] > max_mtu)
1904                         break;
1905         }
1906
1907         /* If we fell out of the loop because we ran to the end of the table,
1908          * then we just have to use the last [largest] entry.
1909          */
1910         if (mtu_idx == NMTUS)
1911                 mtu_idx--;
1912
1913         /* If we found an MTU which resulted in the requested Data Segment
1914          * Length alignment and that's "not far" from the largest MTU which is
1915          * less than or equal to the maximum MTU, then use that.
1916          */
1917         if (aligned_mtu_idx >= 0 &&
1918             mtu_idx - aligned_mtu_idx <= 1)
1919                 mtu_idx = aligned_mtu_idx;
1920
1921         /* If the caller has passed in an MTU Index pointer, pass the
1922          * MTU Index back.  Return the MTU value.
1923          */
1924         if (mtu_idxp)
1925                 *mtu_idxp = mtu_idx;
1926         return mtus[mtu_idx];
1927 }
1928 EXPORT_SYMBOL(cxgb4_best_aligned_mtu);
1929
1930 /**
1931  *      cxgb4_port_chan - get the HW channel of a port
1932  *      @dev: the net device for the port
1933  *
1934  *      Return the HW Tx channel of the given port.
1935  */
1936 unsigned int cxgb4_port_chan(const struct net_device *dev)
1937 {
1938         return netdev2pinfo(dev)->tx_chan;
1939 }
1940 EXPORT_SYMBOL(cxgb4_port_chan);
1941
1942 unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
1943 {
1944         struct adapter *adap = netdev2adap(dev);
1945         u32 v1, v2, lp_count, hp_count;
1946
1947         v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A);
1948         v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A);
1949         if (is_t4(adap->params.chip)) {
1950                 lp_count = LP_COUNT_G(v1);
1951                 hp_count = HP_COUNT_G(v1);
1952         } else {
1953                 lp_count = LP_COUNT_T5_G(v1);
1954                 hp_count = HP_COUNT_T5_G(v2);
1955         }
1956         return lpfifo ? lp_count : hp_count;
1957 }
1958 EXPORT_SYMBOL(cxgb4_dbfifo_count);
1959
1960 /**
1961  *      cxgb4_port_viid - get the VI id of a port
1962  *      @dev: the net device for the port
1963  *
1964  *      Return the VI id of the given port.
1965  */
1966 unsigned int cxgb4_port_viid(const struct net_device *dev)
1967 {
1968         return netdev2pinfo(dev)->viid;
1969 }
1970 EXPORT_SYMBOL(cxgb4_port_viid);
1971
1972 /**
1973  *      cxgb4_port_idx - get the index of a port
1974  *      @dev: the net device for the port
1975  *
1976  *      Return the index of the given port.
1977  */
1978 unsigned int cxgb4_port_idx(const struct net_device *dev)
1979 {
1980         return netdev2pinfo(dev)->port_id;
1981 }
1982 EXPORT_SYMBOL(cxgb4_port_idx);
1983
1984 void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
1985                          struct tp_tcp_stats *v6)
1986 {
1987         struct adapter *adap = pci_get_drvdata(pdev);
1988
1989         spin_lock(&adap->stats_lock);
1990         t4_tp_get_tcp_stats(adap, v4, v6);
1991         spin_unlock(&adap->stats_lock);
1992 }
1993 EXPORT_SYMBOL(cxgb4_get_tcp_stats);
1994
1995 void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
1996                       const unsigned int *pgsz_order)
1997 {
1998         struct adapter *adap = netdev2adap(dev);
1999
2000         t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK_A, tag_mask);
2001         t4_write_reg(adap, ULP_RX_ISCSI_PSZ_A, HPZ0_V(pgsz_order[0]) |
2002                      HPZ1_V(pgsz_order[1]) | HPZ2_V(pgsz_order[2]) |
2003                      HPZ3_V(pgsz_order[3]));
2004 }
2005 EXPORT_SYMBOL(cxgb4_iscsi_init);
2006
2007 int cxgb4_flush_eq_cache(struct net_device *dev)
2008 {
2009         struct adapter *adap = netdev2adap(dev);
2010
2011         return t4_sge_ctxt_flush(adap, adap->mbox);
2012 }
2013 EXPORT_SYMBOL(cxgb4_flush_eq_cache);
2014
2015 static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx)
2016 {
2017         u32 addr = t4_read_reg(adap, SGE_DBQ_CTXT_BADDR_A) + 24 * qid + 8;
2018         __be64 indices;
2019         int ret;
2020
2021         spin_lock(&adap->win0_lock);
2022         ret = t4_memory_rw(adap, 0, MEM_EDC0, addr,
2023                            sizeof(indices), (__be32 *)&indices,
2024                            T4_MEMORY_READ);
2025         spin_unlock(&adap->win0_lock);
2026         if (!ret) {
2027                 *cidx = (be64_to_cpu(indices) >> 25) & 0xffff;
2028                 *pidx = (be64_to_cpu(indices) >> 9) & 0xffff;
2029         }
2030         return ret;
2031 }
2032
2033 int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx,
2034                         u16 size)
2035 {
2036         struct adapter *adap = netdev2adap(dev);
2037         u16 hw_pidx, hw_cidx;
2038         int ret;
2039
2040         ret = read_eq_indices(adap, qid, &hw_pidx, &hw_cidx);
2041         if (ret)
2042                 goto out;
2043
2044         if (pidx != hw_pidx) {
2045                 u16 delta;
2046                 u32 val;
2047
2048                 if (pidx >= hw_pidx)
2049                         delta = pidx - hw_pidx;
2050                 else
2051                         delta = size - hw_pidx + pidx;
2052
2053                 if (is_t4(adap->params.chip))
2054                         val = PIDX_V(delta);
2055                 else
2056                         val = PIDX_T5_V(delta);
2057                 wmb();
2058                 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
2059                              QID_V(qid) | val);
2060         }
2061 out:
2062         return ret;
2063 }
2064 EXPORT_SYMBOL(cxgb4_sync_txq_pidx);
2065
2066 int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte)
2067 {
2068         struct adapter *adap;
2069         u32 offset, memtype, memaddr;
2070         u32 edc0_size, edc1_size, mc0_size, mc1_size, size;
2071         u32 edc0_end, edc1_end, mc0_end, mc1_end;
2072         int ret;
2073
2074         adap = netdev2adap(dev);
2075
2076         offset = ((stag >> 8) * 32) + adap->vres.stag.start;
2077
2078         /* Figure out where the offset lands in the Memory Type/Address scheme.
2079          * This code assumes that the memory is laid out starting at offset 0
2080          * with no breaks as: EDC0, EDC1, MC0, MC1. All cards have both EDC0
2081          * and EDC1.  Some cards will have neither MC0 nor MC1, most cards have
2082          * MC0, and some have both MC0 and MC1.
2083          */
2084         size = t4_read_reg(adap, MA_EDRAM0_BAR_A);
2085         edc0_size = EDRAM0_SIZE_G(size) << 20;
2086         size = t4_read_reg(adap, MA_EDRAM1_BAR_A);
2087         edc1_size = EDRAM1_SIZE_G(size) << 20;
2088         size = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A);
2089         mc0_size = EXT_MEM0_SIZE_G(size) << 20;
2090
2091         edc0_end = edc0_size;
2092         edc1_end = edc0_end + edc1_size;
2093         mc0_end = edc1_end + mc0_size;
2094
2095         if (offset < edc0_end) {
2096                 memtype = MEM_EDC0;
2097                 memaddr = offset;
2098         } else if (offset < edc1_end) {
2099                 memtype = MEM_EDC1;
2100                 memaddr = offset - edc0_end;
2101         } else {
2102                 if (offset < mc0_end) {
2103                         memtype = MEM_MC0;
2104                         memaddr = offset - edc1_end;
2105                 } else if (is_t5(adap->params.chip)) {
2106                         size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
2107                         mc1_size = EXT_MEM1_SIZE_G(size) << 20;
2108                         mc1_end = mc0_end + mc1_size;
2109                         if (offset < mc1_end) {
2110                                 memtype = MEM_MC1;
2111                                 memaddr = offset - mc0_end;
2112                         } else {
2113                                 /* offset beyond the end of any memory */
2114                                 goto err;
2115                         }
2116                 } else {
2117                         /* T4/T6 only has a single memory channel */
2118                         goto err;
2119                 }
2120         }
2121
2122         spin_lock(&adap->win0_lock);
2123         ret = t4_memory_rw(adap, 0, memtype, memaddr, 32, tpte, T4_MEMORY_READ);
2124         spin_unlock(&adap->win0_lock);
2125         return ret;
2126
2127 err:
2128         dev_err(adap->pdev_dev, "stag %#x, offset %#x out of range\n",
2129                 stag, offset);
2130         return -EINVAL;
2131 }
2132 EXPORT_SYMBOL(cxgb4_read_tpte);
2133
2134 u64 cxgb4_read_sge_timestamp(struct net_device *dev)
2135 {
2136         u32 hi, lo;
2137         struct adapter *adap;
2138
2139         adap = netdev2adap(dev);
2140         lo = t4_read_reg(adap, SGE_TIMESTAMP_LO_A);
2141         hi = TSVAL_G(t4_read_reg(adap, SGE_TIMESTAMP_HI_A));
2142
2143         return ((u64)hi << 32) | (u64)lo;
2144 }
2145 EXPORT_SYMBOL(cxgb4_read_sge_timestamp);
2146
2147 int cxgb4_bar2_sge_qregs(struct net_device *dev,
2148                          unsigned int qid,
2149                          enum cxgb4_bar2_qtype qtype,
2150                          int user,
2151                          u64 *pbar2_qoffset,
2152                          unsigned int *pbar2_qid)
2153 {
2154         return t4_bar2_sge_qregs(netdev2adap(dev),
2155                                  qid,
2156                                  (qtype == CXGB4_BAR2_QTYPE_EGRESS
2157                                   ? T4_BAR2_QTYPE_EGRESS
2158                                   : T4_BAR2_QTYPE_INGRESS),
2159                                  user,
2160                                  pbar2_qoffset,
2161                                  pbar2_qid);
2162 }
2163 EXPORT_SYMBOL(cxgb4_bar2_sge_qregs);
2164
2165 static struct pci_driver cxgb4_driver;
2166
2167 static void check_neigh_update(struct neighbour *neigh)
2168 {
2169         const struct device *parent;
2170         const struct net_device *netdev = neigh->dev;
2171
2172         if (netdev->priv_flags & IFF_802_1Q_VLAN)
2173                 netdev = vlan_dev_real_dev(netdev);
2174         parent = netdev->dev.parent;
2175         if (parent && parent->driver == &cxgb4_driver.driver)
2176                 t4_l2t_update(dev_get_drvdata(parent), neigh);
2177 }
2178
2179 static int netevent_cb(struct notifier_block *nb, unsigned long event,
2180                        void *data)
2181 {
2182         switch (event) {
2183         case NETEVENT_NEIGH_UPDATE:
2184                 check_neigh_update(data);
2185                 break;
2186         case NETEVENT_REDIRECT:
2187         default:
2188                 break;
2189         }
2190         return 0;
2191 }
2192
2193 static bool netevent_registered;
2194 static struct notifier_block cxgb4_netevent_nb = {
2195         .notifier_call = netevent_cb
2196 };
2197
2198 static void drain_db_fifo(struct adapter *adap, int usecs)
2199 {
2200         u32 v1, v2, lp_count, hp_count;
2201
2202         do {
2203                 v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A);
2204                 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A);
2205                 if (is_t4(adap->params.chip)) {
2206                         lp_count = LP_COUNT_G(v1);
2207                         hp_count = HP_COUNT_G(v1);
2208                 } else {
2209                         lp_count = LP_COUNT_T5_G(v1);
2210                         hp_count = HP_COUNT_T5_G(v2);
2211                 }
2212
2213                 if (lp_count == 0 && hp_count == 0)
2214                         break;
2215                 set_current_state(TASK_UNINTERRUPTIBLE);
2216                 schedule_timeout(usecs_to_jiffies(usecs));
2217         } while (1);
2218 }
2219
2220 static void disable_txq_db(struct sge_txq *q)
2221 {
2222         unsigned long flags;
2223
2224         spin_lock_irqsave(&q->db_lock, flags);
2225         q->db_disabled = 1;
2226         spin_unlock_irqrestore(&q->db_lock, flags);
2227 }
2228
2229 static void enable_txq_db(struct adapter *adap, struct sge_txq *q)
2230 {
2231         spin_lock_irq(&q->db_lock);
2232         if (q->db_pidx_inc) {
2233                 /* Make sure that all writes to the TX descriptors
2234                  * are committed before we tell HW about them.
2235                  */
2236                 wmb();
2237                 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
2238                              QID_V(q->cntxt_id) | PIDX_V(q->db_pidx_inc));
2239                 q->db_pidx_inc = 0;
2240         }
2241         q->db_disabled = 0;
2242         spin_unlock_irq(&q->db_lock);
2243 }
2244
2245 static void disable_dbs(struct adapter *adap)
2246 {
2247         int i;
2248
2249         for_each_ethrxq(&adap->sge, i)
2250                 disable_txq_db(&adap->sge.ethtxq[i].q);
2251         for_each_ofldrxq(&adap->sge, i)
2252                 disable_txq_db(&adap->sge.ofldtxq[i].q);
2253         for_each_port(adap, i)
2254                 disable_txq_db(&adap->sge.ctrlq[i].q);
2255 }
2256
2257 static void enable_dbs(struct adapter *adap)
2258 {
2259         int i;
2260
2261         for_each_ethrxq(&adap->sge, i)
2262                 enable_txq_db(adap, &adap->sge.ethtxq[i].q);
2263         for_each_ofldrxq(&adap->sge, i)
2264                 enable_txq_db(adap, &adap->sge.ofldtxq[i].q);
2265         for_each_port(adap, i)
2266                 enable_txq_db(adap, &adap->sge.ctrlq[i].q);
2267 }
2268
2269 static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
2270 {
2271         if (adap->uld_handle[CXGB4_ULD_RDMA])
2272                 ulds[CXGB4_ULD_RDMA].control(adap->uld_handle[CXGB4_ULD_RDMA],
2273                                 cmd);
2274 }
2275
2276 static void process_db_full(struct work_struct *work)
2277 {
2278         struct adapter *adap;
2279
2280         adap = container_of(work, struct adapter, db_full_task);
2281
2282         drain_db_fifo(adap, dbfifo_drain_delay);
2283         enable_dbs(adap);
2284         notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
2285         if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
2286                 t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
2287                                  DBFIFO_HP_INT_F | DBFIFO_LP_INT_F,
2288                                  DBFIFO_HP_INT_F | DBFIFO_LP_INT_F);
2289         else
2290                 t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
2291                                  DBFIFO_LP_INT_F, DBFIFO_LP_INT_F);
2292 }
2293
2294 static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
2295 {
2296         u16 hw_pidx, hw_cidx;
2297         int ret;
2298
2299         spin_lock_irq(&q->db_lock);
2300         ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx);
2301         if (ret)
2302                 goto out;
2303         if (q->db_pidx != hw_pidx) {
2304                 u16 delta;
2305                 u32 val;
2306
2307                 if (q->db_pidx >= hw_pidx)
2308                         delta = q->db_pidx - hw_pidx;
2309                 else
2310                         delta = q->size - hw_pidx + q->db_pidx;
2311
2312                 if (is_t4(adap->params.chip))
2313                         val = PIDX_V(delta);
2314                 else
2315                         val = PIDX_T5_V(delta);
2316                 wmb();
2317                 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
2318                              QID_V(q->cntxt_id) | val);
2319         }
2320 out:
2321         q->db_disabled = 0;
2322         q->db_pidx_inc = 0;
2323         spin_unlock_irq(&q->db_lock);
2324         if (ret)
2325                 CH_WARN(adap, "DB drop recovery failed.\n");
2326 }
2327 static void recover_all_queues(struct adapter *adap)
2328 {
2329         int i;
2330
2331         for_each_ethrxq(&adap->sge, i)
2332                 sync_txq_pidx(adap, &adap->sge.ethtxq[i].q);
2333         for_each_ofldrxq(&adap->sge, i)
2334                 sync_txq_pidx(adap, &adap->sge.ofldtxq[i].q);
2335         for_each_port(adap, i)
2336                 sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
2337 }
2338
2339 static void process_db_drop(struct work_struct *work)
2340 {
2341         struct adapter *adap;
2342
2343         adap = container_of(work, struct adapter, db_drop_task);
2344
2345         if (is_t4(adap->params.chip)) {
2346                 drain_db_fifo(adap, dbfifo_drain_delay);
2347                 notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
2348                 drain_db_fifo(adap, dbfifo_drain_delay);
2349                 recover_all_queues(adap);
2350                 drain_db_fifo(adap, dbfifo_drain_delay);
2351                 enable_dbs(adap);
2352                 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
2353         } else if (is_t5(adap->params.chip)) {
2354                 u32 dropped_db = t4_read_reg(adap, 0x010ac);
2355                 u16 qid = (dropped_db >> 15) & 0x1ffff;
2356                 u16 pidx_inc = dropped_db & 0x1fff;
2357                 u64 bar2_qoffset;
2358                 unsigned int bar2_qid;
2359                 int ret;
2360
2361                 ret = t4_bar2_sge_qregs(adap, qid, T4_BAR2_QTYPE_EGRESS,
2362                                         0, &bar2_qoffset, &bar2_qid);
2363                 if (ret)
2364                         dev_err(adap->pdev_dev, "doorbell drop recovery: "
2365                                 "qid=%d, pidx_inc=%d\n", qid, pidx_inc);
2366                 else
2367                         writel(PIDX_T5_V(pidx_inc) | QID_V(bar2_qid),
2368                                adap->bar2 + bar2_qoffset + SGE_UDB_KDOORBELL);
2369
2370                 /* Re-enable BAR2 WC */
2371                 t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15);
2372         }
2373
2374         if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
2375                 t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, DROPPED_DB_F, 0);
2376 }
2377
2378 void t4_db_full(struct adapter *adap)
2379 {
2380         if (is_t4(adap->params.chip)) {
2381                 disable_dbs(adap);
2382                 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
2383                 t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
2384                                  DBFIFO_HP_INT_F | DBFIFO_LP_INT_F, 0);
2385                 queue_work(adap->workq, &adap->db_full_task);
2386         }
2387 }
2388
2389 void t4_db_dropped(struct adapter *adap)
2390 {
2391         if (is_t4(adap->params.chip)) {
2392                 disable_dbs(adap);
2393                 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
2394         }
2395         queue_work(adap->workq, &adap->db_drop_task);
2396 }
2397
2398 static void uld_attach(struct adapter *adap, unsigned int uld)
2399 {
2400         void *handle;
2401         struct cxgb4_lld_info lli;
2402         unsigned short i;
2403
2404         lli.pdev = adap->pdev;
2405         lli.pf = adap->pf;
2406         lli.l2t = adap->l2t;
2407         lli.tids = &adap->tids;
2408         lli.ports = adap->port;
2409         lli.vr = &adap->vres;
2410         lli.mtus = adap->params.mtus;
2411         if (uld == CXGB4_ULD_RDMA) {
2412                 lli.rxq_ids = adap->sge.rdma_rxq;
2413                 lli.ciq_ids = adap->sge.rdma_ciq;
2414                 lli.nrxq = adap->sge.rdmaqs;
2415                 lli.nciq = adap->sge.rdmaciqs;
2416         } else if (uld == CXGB4_ULD_ISCSI) {
2417                 lli.rxq_ids = adap->sge.ofld_rxq;
2418                 lli.nrxq = adap->sge.ofldqsets;
2419         }
2420         lli.ntxq = adap->sge.ofldqsets;
2421         lli.nchan = adap->params.nports;
2422         lli.nports = adap->params.nports;
2423         lli.wr_cred = adap->params.ofldq_wr_cred;
2424         lli.adapter_type = adap->params.chip;
2425         lli.iscsi_iolen = MAXRXDATA_G(t4_read_reg(adap, TP_PARA_REG2_A));
2426         lli.cclk_ps = 1000000000 / adap->params.vpd.cclk;
2427         lli.udb_density = 1 << adap->params.sge.eq_qpp;
2428         lli.ucq_density = 1 << adap->params.sge.iq_qpp;
2429         lli.filt_mode = adap->params.tp.vlan_pri_map;
2430         /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
2431         for (i = 0; i < NCHAN; i++)
2432                 lli.tx_modq[i] = i;
2433         lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS_A);
2434         lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL_A);
2435         lli.fw_vers = adap->params.fw_vers;
2436         lli.dbfifo_int_thresh = dbfifo_int_thresh;
2437         lli.sge_ingpadboundary = adap->sge.fl_align;
2438         lli.sge_egrstatuspagesize = adap->sge.stat_len;
2439         lli.sge_pktshift = adap->sge.pktshift;
2440         lli.enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN;
2441         lli.max_ordird_qp = adap->params.max_ordird_qp;
2442         lli.max_ird_adapter = adap->params.max_ird_adapter;
2443         lli.ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl;
2444         lli.nodeid = dev_to_node(adap->pdev_dev);
2445
2446         handle = ulds[uld].add(&lli);
2447         if (IS_ERR(handle)) {
2448                 dev_warn(adap->pdev_dev,
2449                          "could not attach to the %s driver, error %ld\n",
2450                          uld_str[uld], PTR_ERR(handle));
2451                 return;
2452         }
2453
2454         adap->uld_handle[uld] = handle;
2455
2456         if (!netevent_registered) {
2457                 register_netevent_notifier(&cxgb4_netevent_nb);
2458                 netevent_registered = true;
2459         }
2460
2461         if (adap->flags & FULL_INIT_DONE)
2462                 ulds[uld].state_change(handle, CXGB4_STATE_UP);
2463 }
2464
2465 static void attach_ulds(struct adapter *adap)
2466 {
2467         unsigned int i;
2468
2469         spin_lock(&adap_rcu_lock);
2470         list_add_tail_rcu(&adap->rcu_node, &adap_rcu_list);
2471         spin_unlock(&adap_rcu_lock);
2472
2473         mutex_lock(&uld_mutex);
2474         list_add_tail(&adap->list_node, &adapter_list);
2475         for (i = 0; i < CXGB4_ULD_MAX; i++)
2476                 if (ulds[i].add)
2477                         uld_attach(adap, i);
2478         mutex_unlock(&uld_mutex);
2479 }
2480
2481 static void detach_ulds(struct adapter *adap)
2482 {
2483         unsigned int i;
2484
2485         mutex_lock(&uld_mutex);
2486         list_del(&adap->list_node);
2487         for (i = 0; i < CXGB4_ULD_MAX; i++)
2488                 if (adap->uld_handle[i]) {
2489                         ulds[i].state_change(adap->uld_handle[i],
2490                                              CXGB4_STATE_DETACH);
2491                         adap->uld_handle[i] = NULL;
2492                 }
2493         if (netevent_registered && list_empty(&adapter_list)) {
2494                 unregister_netevent_notifier(&cxgb4_netevent_nb);
2495                 netevent_registered = false;
2496         }
2497         mutex_unlock(&uld_mutex);
2498
2499         spin_lock(&adap_rcu_lock);
2500         list_del_rcu(&adap->rcu_node);
2501         spin_unlock(&adap_rcu_lock);
2502 }
2503
2504 static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
2505 {
2506         unsigned int i;
2507
2508         mutex_lock(&uld_mutex);
2509         for (i = 0; i < CXGB4_ULD_MAX; i++)
2510                 if (adap->uld_handle[i])
2511                         ulds[i].state_change(adap->uld_handle[i], new_state);
2512         mutex_unlock(&uld_mutex);
2513 }
2514
2515 /**
2516  *      cxgb4_register_uld - register an upper-layer driver
2517  *      @type: the ULD type
2518  *      @p: the ULD methods
2519  *
2520  *      Registers an upper-layer driver with this driver and notifies the ULD
2521  *      about any presently available devices that support its type.  Returns
2522  *      %-EBUSY if a ULD of the same type is already registered.
2523  */
2524 int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p)
2525 {
2526         int ret = 0;
2527         struct adapter *adap;
2528
2529         if (type >= CXGB4_ULD_MAX)
2530                 return -EINVAL;
2531         mutex_lock(&uld_mutex);
2532         if (ulds[type].add) {
2533                 ret = -EBUSY;
2534                 goto out;
2535         }
2536         ulds[type] = *p;
2537         list_for_each_entry(adap, &adapter_list, list_node)
2538                 uld_attach(adap, type);
2539 out:    mutex_unlock(&uld_mutex);
2540         return ret;
2541 }
2542 EXPORT_SYMBOL(cxgb4_register_uld);
2543
2544 /**
2545  *      cxgb4_unregister_uld - unregister an upper-layer driver
2546  *      @type: the ULD type
2547  *
2548  *      Unregisters an existing upper-layer driver.
2549  */
2550 int cxgb4_unregister_uld(enum cxgb4_uld type)
2551 {
2552         struct adapter *adap;
2553
2554         if (type >= CXGB4_ULD_MAX)
2555                 return -EINVAL;
2556         mutex_lock(&uld_mutex);
2557         list_for_each_entry(adap, &adapter_list, list_node)
2558                 adap->uld_handle[type] = NULL;
2559         ulds[type].add = NULL;
2560         mutex_unlock(&uld_mutex);
2561         return 0;
2562 }
2563 EXPORT_SYMBOL(cxgb4_unregister_uld);
2564
2565 #if IS_ENABLED(CONFIG_IPV6)
2566 static int cxgb4_inet6addr_handler(struct notifier_block *this,
2567                                    unsigned long event, void *data)
2568 {
2569         struct inet6_ifaddr *ifa = data;
2570         struct net_device *event_dev = ifa->idev->dev;
2571         const struct device *parent = NULL;
2572 #if IS_ENABLED(CONFIG_BONDING)
2573         struct adapter *adap;
2574 #endif
2575         if (event_dev->priv_flags & IFF_802_1Q_VLAN)
2576                 event_dev = vlan_dev_real_dev(event_dev);
2577 #if IS_ENABLED(CONFIG_BONDING)
2578         if (event_dev->flags & IFF_MASTER) {
2579                 list_for_each_entry(adap, &adapter_list, list_node) {
2580                         switch (event) {
2581                         case NETDEV_UP:
2582                                 cxgb4_clip_get(adap->port[0],
2583                                                (const u32 *)ifa, 1);
2584                                 break;
2585                         case NETDEV_DOWN:
2586                                 cxgb4_clip_release(adap->port[0],
2587                                                    (const u32 *)ifa, 1);
2588                                 break;
2589                         default:
2590                                 break;
2591                         }
2592                 }
2593                 return NOTIFY_OK;
2594         }
2595 #endif
2596
2597         if (event_dev)
2598                 parent = event_dev->dev.parent;
2599
2600         if (parent && parent->driver == &cxgb4_driver.driver) {
2601                 switch (event) {
2602                 case NETDEV_UP:
2603                         cxgb4_clip_get(event_dev, (const u32 *)ifa, 1);
2604                         break;
2605                 case NETDEV_DOWN:
2606                         cxgb4_clip_release(event_dev, (const u32 *)ifa, 1);
2607                         break;
2608                 default:
2609                         break;
2610                 }
2611         }
2612         return NOTIFY_OK;
2613 }
2614
2615 static bool inet6addr_registered;
2616 static struct notifier_block cxgb4_inet6addr_notifier = {
2617         .notifier_call = cxgb4_inet6addr_handler
2618 };
2619
2620 static void update_clip(const struct adapter *adap)
2621 {
2622         int i;
2623         struct net_device *dev;
2624         int ret;
2625
2626         rcu_read_lock();
2627
2628         for (i = 0; i < MAX_NPORTS; i++) {
2629                 dev = adap->port[i];
2630                 ret = 0;
2631
2632                 if (dev)
2633                         ret = cxgb4_update_root_dev_clip(dev);
2634
2635                 if (ret < 0)
2636                         break;
2637         }
2638         rcu_read_unlock();
2639 }
2640 #endif /* IS_ENABLED(CONFIG_IPV6) */
2641
2642 /**
2643  *      cxgb_up - enable the adapter
2644  *      @adap: adapter being enabled
2645  *
2646  *      Called when the first port is enabled, this function performs the
2647  *      actions necessary to make an adapter operational, such as completing
2648  *      the initialization of HW modules, and enabling interrupts.
2649  *
2650  *      Must be called with the rtnl lock held.
2651  */
2652 static int cxgb_up(struct adapter *adap)
2653 {
2654         int err;
2655
2656         err = setup_sge_queues(adap);
2657         if (err)
2658                 goto out;
2659         err = setup_rss(adap);
2660         if (err)
2661                 goto freeq;
2662
2663         if (adap->flags & USING_MSIX) {
2664                 name_msix_vecs(adap);
2665                 err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
2666                                   adap->msix_info[0].desc, adap);
2667                 if (err)
2668                         goto irq_err;
2669
2670                 err = request_msix_queue_irqs(adap);
2671                 if (err) {
2672                         free_irq(adap->msix_info[0].vec, adap);
2673                         goto irq_err;
2674                 }
2675         } else {
2676                 err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
2677                                   (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
2678                                   adap->port[0]->name, adap);
2679                 if (err)
2680                         goto irq_err;
2681         }
2682         enable_rx(adap);
2683         t4_sge_start(adap);
2684         t4_intr_enable(adap);
2685         adap->flags |= FULL_INIT_DONE;
2686         notify_ulds(adap, CXGB4_STATE_UP);
2687 #if IS_ENABLED(CONFIG_IPV6)
2688         update_clip(adap);
2689 #endif
2690  out:
2691         return err;
2692  irq_err:
2693         dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
2694  freeq:
2695         t4_free_sge_resources(adap);
2696         goto out;
2697 }
2698
2699 static void cxgb_down(struct adapter *adapter)
2700 {
2701         cancel_work_sync(&adapter->tid_release_task);
2702         cancel_work_sync(&adapter->db_full_task);
2703         cancel_work_sync(&adapter->db_drop_task);
2704         adapter->tid_release_task_busy = false;
2705         adapter->tid_release_head = NULL;
2706
2707         t4_sge_stop(adapter);
2708         t4_free_sge_resources(adapter);
2709         adapter->flags &= ~FULL_INIT_DONE;
2710 }
2711
2712 /*
2713  * net_device operations
2714  */
2715 static int cxgb_open(struct net_device *dev)
2716 {
2717         int err;
2718         struct port_info *pi = netdev_priv(dev);
2719         struct adapter *adapter = pi->adapter;
2720
2721         netif_carrier_off(dev);
2722
2723         if (!(adapter->flags & FULL_INIT_DONE)) {
2724                 err = cxgb_up(adapter);
2725                 if (err < 0)
2726                         return err;
2727         }
2728
2729         err = link_start(dev);
2730         if (!err)
2731                 netif_tx_start_all_queues(dev);
2732         return err;
2733 }
2734
2735 static int cxgb_close(struct net_device *dev)
2736 {
2737         struct port_info *pi = netdev_priv(dev);
2738         struct adapter *adapter = pi->adapter;
2739
2740         netif_tx_stop_all_queues(dev);
2741         netif_carrier_off(dev);
2742         return t4_enable_vi(adapter, adapter->pf, pi->viid, false, false);
2743 }
2744
2745 /* Return an error number if the indicated filter isn't writable ...
2746  */
2747 static int writable_filter(struct filter_entry *f)
2748 {
2749         if (f->locked)
2750                 return -EPERM;
2751         if (f->pending)
2752                 return -EBUSY;
2753
2754         return 0;
2755 }
2756
2757 /* Delete the filter at the specified index (if valid).  The checks for all
2758  * the common problems with doing this like the filter being locked, currently
2759  * pending in another operation, etc.
2760  */
2761 static int delete_filter(struct adapter *adapter, unsigned int fidx)
2762 {
2763         struct filter_entry *f;
2764         int ret;
2765
2766         if (fidx >= adapter->tids.nftids + adapter->tids.nsftids)
2767                 return -EINVAL;
2768
2769         f = &adapter->tids.ftid_tab[fidx];
2770         ret = writable_filter(f);
2771         if (ret)
2772                 return ret;
2773         if (f->valid)
2774                 return del_filter_wr(adapter, fidx);
2775
2776         return 0;
2777 }
2778
2779 int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
2780                 __be32 sip, __be16 sport, __be16 vlan,
2781                 unsigned int queue, unsigned char port, unsigned char mask)
2782 {
2783         int ret;
2784         struct filter_entry *f;
2785         struct adapter *adap;
2786         int i;
2787         u8 *val;
2788
2789         adap = netdev2adap(dev);
2790
2791         /* Adjust stid to correct filter index */
2792         stid -= adap->tids.sftid_base;
2793         stid += adap->tids.nftids;
2794
2795         /* Check to make sure the filter requested is writable ...
2796          */
2797         f = &adap->tids.ftid_tab[stid];
2798         ret = writable_filter(f);
2799         if (ret)
2800                 return ret;
2801
2802         /* Clear out any old resources being used by the filter before
2803          * we start constructing the new filter.
2804          */
2805         if (f->valid)
2806                 clear_filter(adap, f);
2807
2808         /* Clear out filter specifications */
2809         memset(&f->fs, 0, sizeof(struct ch_filter_specification));
2810         f->fs.val.lport = cpu_to_be16(sport);
2811         f->fs.mask.lport  = ~0;
2812         val = (u8 *)&sip;
2813         if ((val[0] | val[1] | val[2] | val[3]) != 0) {
2814                 for (i = 0; i < 4; i++) {
2815                         f->fs.val.lip[i] = val[i];
2816                         f->fs.mask.lip[i] = ~0;
2817                 }
2818                 if (adap->params.tp.vlan_pri_map & PORT_F) {
2819                         f->fs.val.iport = port;
2820                         f->fs.mask.iport = mask;
2821                 }
2822         }
2823
2824         if (adap->params.tp.vlan_pri_map & PROTOCOL_F) {
2825                 f->fs.val.proto = IPPROTO_TCP;
2826                 f->fs.mask.proto = ~0;
2827         }
2828
2829         f->fs.dirsteer = 1;
2830         f->fs.iq = queue;
2831         /* Mark filter as locked */
2832         f->locked = 1;
2833         f->fs.rpttid = 1;
2834
2835         ret = set_filter_wr(adap, stid);
2836         if (ret) {
2837                 clear_filter(adap, f);
2838                 return ret;
2839         }
2840
2841         return 0;
2842 }
2843 EXPORT_SYMBOL(cxgb4_create_server_filter);
2844
2845 int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
2846                 unsigned int queue, bool ipv6)
2847 {
2848         int ret;
2849         struct filter_entry *f;
2850         struct adapter *adap;
2851
2852         adap = netdev2adap(dev);
2853
2854         /* Adjust stid to correct filter index */
2855         stid -= adap->tids.sftid_base;
2856         stid += adap->tids.nftids;
2857
2858         f = &adap->tids.ftid_tab[stid];
2859         /* Unlock the filter */
2860         f->locked = 0;
2861
2862         ret = delete_filter(adap, stid);
2863         if (ret)
2864                 return ret;
2865
2866         return 0;
2867 }
2868 EXPORT_SYMBOL(cxgb4_remove_server_filter);
2869
2870 static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
2871                                                 struct rtnl_link_stats64 *ns)
2872 {
2873         struct port_stats stats;
2874         struct port_info *p = netdev_priv(dev);
2875         struct adapter *adapter = p->adapter;
2876
2877         /* Block retrieving statistics during EEH error
2878          * recovery. Otherwise, the recovery might fail
2879          * and the PCI device will be removed permanently
2880          */
2881         spin_lock(&adapter->stats_lock);
2882         if (!netif_device_present(dev)) {
2883                 spin_unlock(&adapter->stats_lock);
2884                 return ns;
2885         }
2886         t4_get_port_stats_offset(adapter, p->tx_chan, &stats,
2887                                  &p->stats_base);
2888         spin_unlock(&adapter->stats_lock);
2889
2890         ns->tx_bytes   = stats.tx_octets;
2891         ns->tx_packets = stats.tx_frames;
2892         ns->rx_bytes   = stats.rx_octets;
2893         ns->rx_packets = stats.rx_frames;
2894         ns->multicast  = stats.rx_mcast_frames;
2895
2896         /* detailed rx_errors */
2897         ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
2898                                stats.rx_runt;
2899         ns->rx_over_errors   = 0;
2900         ns->rx_crc_errors    = stats.rx_fcs_err;
2901         ns->rx_frame_errors  = stats.rx_symbol_err;
2902         ns->rx_fifo_errors   = stats.rx_ovflow0 + stats.rx_ovflow1 +
2903                                stats.rx_ovflow2 + stats.rx_ovflow3 +
2904                                stats.rx_trunc0 + stats.rx_trunc1 +
2905                                stats.rx_trunc2 + stats.rx_trunc3;
2906         ns->rx_missed_errors = 0;
2907
2908         /* detailed tx_errors */
2909         ns->tx_aborted_errors   = 0;
2910         ns->tx_carrier_errors   = 0;
2911         ns->tx_fifo_errors      = 0;
2912         ns->tx_heartbeat_errors = 0;
2913         ns->tx_window_errors    = 0;
2914
2915         ns->tx_errors = stats.tx_error_frames;
2916         ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
2917                 ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
2918         return ns;
2919 }
2920
2921 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2922 {
2923         unsigned int mbox;
2924         int ret = 0, prtad, devad;
2925         struct port_info *pi = netdev_priv(dev);
2926         struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
2927
2928         switch (cmd) {
2929         case SIOCGMIIPHY:
2930                 if (pi->mdio_addr < 0)
2931                         return -EOPNOTSUPP;
2932                 data->phy_id = pi->mdio_addr;
2933                 break;
2934         case SIOCGMIIREG:
2935         case SIOCSMIIREG:
2936                 if (mdio_phy_id_is_c45(data->phy_id)) {
2937                         prtad = mdio_phy_id_prtad(data->phy_id);
2938                         devad = mdio_phy_id_devad(data->phy_id);
2939                 } else if (data->phy_id < 32) {
2940                         prtad = data->phy_id;
2941                         devad = 0;
2942                         data->reg_num &= 0x1f;
2943                 } else
2944                         return -EINVAL;
2945
2946                 mbox = pi->adapter->pf;
2947                 if (cmd == SIOCGMIIREG)
2948                         ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
2949                                          data->reg_num, &data->val_out);
2950                 else
2951                         ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
2952                                          data->reg_num, data->val_in);
2953                 break;
2954         default:
2955                 return -EOPNOTSUPP;
2956         }
2957         return ret;
2958 }
2959
2960 static void cxgb_set_rxmode(struct net_device *dev)
2961 {
2962         /* unfortunately we can't return errors to the stack */
2963         set_rxmode(dev, -1, false);
2964 }
2965
2966 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2967 {
2968         int ret;
2969         struct port_info *pi = netdev_priv(dev);
2970
2971         if (new_mtu < 81 || new_mtu > MAX_MTU)         /* accommodate SACK */
2972                 return -EINVAL;
2973         ret = t4_set_rxmode(pi->adapter, pi->adapter->pf, pi->viid, new_mtu, -1,
2974                             -1, -1, -1, true);
2975         if (!ret)
2976                 dev->mtu = new_mtu;
2977         return ret;
2978 }
2979
2980 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2981 {
2982         int ret;
2983         struct sockaddr *addr = p;
2984         struct port_info *pi = netdev_priv(dev);
2985
2986         if (!is_valid_ether_addr(addr->sa_data))
2987                 return -EADDRNOTAVAIL;
2988
2989         ret = t4_change_mac(pi->adapter, pi->adapter->pf, pi->viid,
2990                             pi->xact_addr_filt, addr->sa_data, true, true);
2991         if (ret < 0)
2992                 return ret;
2993
2994         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2995         pi->xact_addr_filt = ret;
2996         return 0;
2997 }
2998
2999 #ifdef CONFIG_NET_POLL_CONTROLLER
3000 static void cxgb_netpoll(struct net_device *dev)
3001 {
3002         struct port_info *pi = netdev_priv(dev);
3003         struct adapter *adap = pi->adapter;
3004
3005         if (adap->flags & USING_MSIX) {
3006                 int i;
3007                 struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
3008
3009                 for (i = pi->nqsets; i; i--, rx++)
3010                         t4_sge_intr_msix(0, &rx->rspq);
3011         } else
3012                 t4_intr_handler(adap)(0, adap);
3013 }
3014 #endif
3015
3016 static const struct net_device_ops cxgb4_netdev_ops = {
3017         .ndo_open             = cxgb_open,
3018         .ndo_stop             = cxgb_close,
3019         .ndo_start_xmit       = t4_eth_xmit,
3020         .ndo_select_queue     = cxgb_select_queue,
3021         .ndo_get_stats64      = cxgb_get_stats,
3022         .ndo_set_rx_mode      = cxgb_set_rxmode,
3023         .ndo_set_mac_address  = cxgb_set_mac_addr,
3024         .ndo_set_features     = cxgb_set_features,
3025         .ndo_validate_addr    = eth_validate_addr,
3026         .ndo_do_ioctl         = cxgb_ioctl,
3027         .ndo_change_mtu       = cxgb_change_mtu,
3028 #ifdef CONFIG_NET_POLL_CONTROLLER
3029         .ndo_poll_controller  = cxgb_netpoll,
3030 #endif
3031 #ifdef CONFIG_CHELSIO_T4_FCOE
3032         .ndo_fcoe_enable      = cxgb_fcoe_enable,
3033         .ndo_fcoe_disable     = cxgb_fcoe_disable,
3034 #endif /* CONFIG_CHELSIO_T4_FCOE */
3035 #ifdef CONFIG_NET_RX_BUSY_POLL
3036         .ndo_busy_poll        = cxgb_busy_poll,
3037 #endif
3038
3039 };
3040
3041 void t4_fatal_err(struct adapter *adap)
3042 {
3043         t4_set_reg_field(adap, SGE_CONTROL_A, GLOBALENABLE_F, 0);
3044         t4_intr_disable(adap);
3045         dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
3046 }
3047
3048 static void setup_memwin(struct adapter *adap)
3049 {
3050         u32 nic_win_base = t4_get_util_window(adap);
3051
3052         t4_setup_memwin(adap, nic_win_base, MEMWIN_NIC);
3053 }
3054
3055 static void setup_memwin_rdma(struct adapter *adap)
3056 {
3057         if (adap->vres.ocq.size) {
3058                 u32 start;
3059                 unsigned int sz_kb;
3060
3061                 start = t4_read_pcie_cfg4(adap, PCI_BASE_ADDRESS_2);
3062                 start &= PCI_BASE_ADDRESS_MEM_MASK;
3063                 start += OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
3064                 sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
3065                 t4_write_reg(adap,
3066                              PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 3),
3067                              start | BIR_V(1) | WINDOW_V(ilog2(sz_kb)));
3068                 t4_write_reg(adap,
3069                              PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, 3),
3070                              adap->vres.ocq.start);
3071                 t4_read_reg(adap,
3072                             PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, 3));
3073         }
3074 }
3075
3076 static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
3077 {
3078         u32 v;
3079         int ret;
3080
3081         /* get device capabilities */
3082         memset(c, 0, sizeof(*c));
3083         c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
3084                                FW_CMD_REQUEST_F | FW_CMD_READ_F);
3085         c->cfvalid_to_len16 = htonl(FW_LEN16(*c));
3086         ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), c);
3087         if (ret < 0)
3088                 return ret;
3089
3090         /* select capabilities we'll be using */
3091         if (c->niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
3092                 if (!vf_acls)
3093                         c->niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
3094                 else
3095                         c->niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
3096         } else if (vf_acls) {
3097                 dev_err(adap->pdev_dev, "virtualization ACLs not supported");
3098                 return ret;
3099         }
3100         c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
3101                                FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
3102         ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), NULL);
3103         if (ret < 0)
3104                 return ret;
3105
3106         ret = t4_config_glbl_rss(adap, adap->pf,
3107                                  FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
3108                                  FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F |
3109                                  FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F);
3110         if (ret < 0)
3111                 return ret;
3112
3113         ret = t4_cfg_pfvf(adap, adap->mbox, adap->pf, 0, adap->sge.egr_sz, 64,
3114                           MAX_INGQ, 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF,
3115                           FW_CMD_CAP_PF);
3116         if (ret < 0)
3117                 return ret;
3118
3119         t4_sge_init(adap);
3120
3121         /* tweak some settings */
3122         t4_write_reg(adap, TP_SHIFT_CNT_A, 0x64f8849);
3123         t4_write_reg(adap, ULP_RX_TDDP_PSZ_A, HPZ0_V(PAGE_SHIFT - 12));
3124         t4_write_reg(adap, TP_PIO_ADDR_A, TP_INGRESS_CONFIG_A);
3125         v = t4_read_reg(adap, TP_PIO_DATA_A);
3126         t4_write_reg(adap, TP_PIO_DATA_A, v & ~CSUM_HAS_PSEUDO_HDR_F);
3127
3128         /* first 4 Tx modulation queues point to consecutive Tx channels */
3129         adap->params.tp.tx_modq_map = 0xE4;
3130         t4_write_reg(adap, TP_TX_MOD_QUEUE_REQ_MAP_A,
3131                      TX_MOD_QUEUE_REQ_MAP_V(adap->params.tp.tx_modq_map));
3132
3133         /* associate each Tx modulation queue with consecutive Tx channels */
3134         v = 0x84218421;
3135         t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
3136                           &v, 1, TP_TX_SCHED_HDR_A);
3137         t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
3138                           &v, 1, TP_TX_SCHED_FIFO_A);
3139         t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
3140                           &v, 1, TP_TX_SCHED_PCMD_A);
3141
3142 #define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16 /* in KB units */
3143         if (is_offload(adap)) {
3144                 t4_write_reg(adap, TP_TX_MOD_QUEUE_WEIGHT0_A,
3145                              TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3146                              TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3147                              TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3148                              TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
3149                 t4_write_reg(adap, TP_TX_MOD_CHANNEL_WEIGHT_A,
3150                              TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3151                              TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3152                              TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3153                              TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
3154         }
3155
3156         /* get basic stuff going */
3157         return t4_early_init(adap, adap->pf);
3158 }
3159
3160 /*
3161  * Max # of ATIDs.  The absolute HW max is 16K but we keep it lower.
3162  */
3163 #define MAX_ATIDS 8192U
3164
3165 /*
3166  * Phase 0 of initialization: contact FW, obtain config, perform basic init.
3167  *
3168  * If the firmware we're dealing with has Configuration File support, then
3169  * we use that to perform all configuration
3170  */
3171
3172 /*
3173  * Tweak configuration based on module parameters, etc.  Most of these have
3174  * defaults assigned to them by Firmware Configuration Files (if we're using
3175  * them) but need to be explicitly set if we're using hard-coded
3176  * initialization.  But even in the case of using Firmware Configuration
3177  * Files, we'd like to expose the ability to change these via module
3178  * parameters so these are essentially common tweaks/settings for
3179  * Configuration Files and hard-coded initialization ...
3180  */
3181 static int adap_init0_tweaks(struct adapter *adapter)
3182 {
3183         /*
3184          * Fix up various Host-Dependent Parameters like Page Size, Cache
3185          * Line Size, etc.  The firmware default is for a 4KB Page Size and
3186          * 64B Cache Line Size ...
3187          */
3188         t4_fixup_host_params(adapter, PAGE_SIZE, L1_CACHE_BYTES);
3189
3190         /*
3191          * Process module parameters which affect early initialization.
3192          */
3193         if (rx_dma_offset != 2 && rx_dma_offset != 0) {
3194                 dev_err(&adapter->pdev->dev,
3195                         "Ignoring illegal rx_dma_offset=%d, using 2\n",
3196                         rx_dma_offset);
3197                 rx_dma_offset = 2;
3198         }
3199         t4_set_reg_field(adapter, SGE_CONTROL_A,
3200                          PKTSHIFT_V(PKTSHIFT_M),
3201                          PKTSHIFT_V(rx_dma_offset));
3202
3203         /*
3204          * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
3205          * adds the pseudo header itself.
3206          */
3207         t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG_A,
3208                                CSUM_HAS_PSEUDO_HDR_F, 0);
3209
3210         return 0;
3211 }
3212
3213 /* 10Gb/s-BT PHY Support. chip-external 10Gb/s-BT PHYs are complex chips
3214  * unto themselves and they contain their own firmware to perform their
3215  * tasks ...
3216  */
3217 static int phy_aq1202_version(const u8 *phy_fw_data,
3218                               size_t phy_fw_size)
3219 {
3220         int offset;
3221
3222         /* At offset 0x8 you're looking for the primary image's
3223          * starting offset which is 3 Bytes wide
3224          *
3225          * At offset 0xa of the primary image, you look for the offset
3226          * of the DRAM segment which is 3 Bytes wide.
3227          *
3228          * The FW version is at offset 0x27e of the DRAM and is 2 Bytes
3229          * wide
3230          */
3231         #define be16(__p) (((__p)[0] << 8) | (__p)[1])
3232         #define le16(__p) ((__p)[0] | ((__p)[1] << 8))
3233         #define le24(__p) (le16(__p) | ((__p)[2] << 16))
3234
3235         offset = le24(phy_fw_data + 0x8) << 12;
3236         offset = le24(phy_fw_data + offset + 0xa);
3237         return be16(phy_fw_data + offset + 0x27e);
3238
3239         #undef be16
3240         #undef le16
3241         #undef le24
3242 }
3243
3244 static struct info_10gbt_phy_fw {
3245         unsigned int phy_fw_id;         /* PCI Device ID */
3246         char *phy_fw_file;              /* /lib/firmware/ PHY Firmware file */
3247         int (*phy_fw_version)(const u8 *phy_fw_data, size_t phy_fw_size);
3248         int phy_flash;                  /* Has FLASH for PHY Firmware */
3249 } phy_info_array[] = {
3250         {
3251                 PHY_AQ1202_DEVICEID,
3252                 PHY_AQ1202_FIRMWARE,
3253                 phy_aq1202_version,
3254                 1,
3255         },
3256         {
3257                 PHY_BCM84834_DEVICEID,
3258                 PHY_BCM84834_FIRMWARE,
3259                 NULL,
3260                 0,
3261         },
3262         { 0, NULL, NULL },
3263 };
3264
3265 static struct info_10gbt_phy_fw *find_phy_info(int devid)
3266 {
3267         int i;
3268
3269         for (i = 0; i < ARRAY_SIZE(phy_info_array); i++) {
3270                 if (phy_info_array[i].phy_fw_id == devid)
3271                         return &phy_info_array[i];
3272         }
3273         return NULL;
3274 }
3275
3276 /* Handle updating of chip-external 10Gb/s-BT PHY firmware.  This needs to
3277  * happen after the FW_RESET_CMD but before the FW_INITIALIZE_CMD.  On error
3278  * we return a negative error number.  If we transfer new firmware we return 1
3279  * (from t4_load_phy_fw()).  If we don't do anything we return 0.
3280  */
3281 static int adap_init0_phy(struct adapter *adap)
3282 {
3283         const struct firmware *phyf;
3284         int ret;
3285         struct info_10gbt_phy_fw *phy_info;
3286
3287         /* Use the device ID to determine which PHY file to flash.
3288          */
3289         phy_info = find_phy_info(adap->pdev->device);
3290         if (!phy_info) {
3291                 dev_warn(adap->pdev_dev,
3292                          "No PHY Firmware file found for this PHY\n");
3293                 return -EOPNOTSUPP;
3294         }
3295
3296         /* If we have a T4 PHY firmware file under /lib/firmware/cxgb4/, then
3297          * use that. The adapter firmware provides us with a memory buffer
3298          * where we can load a PHY firmware file from the host if we want to
3299          * override the PHY firmware File in flash.
3300          */
3301         ret = request_firmware_direct(&phyf, phy_info->phy_fw_file,
3302                                       adap->pdev_dev);
3303         if (ret < 0) {
3304                 /* For adapters without FLASH attached to PHY for their
3305                  * firmware, it's obviously a fatal error if we can't get the
3306                  * firmware to the adapter.  For adapters with PHY firmware
3307                  * FLASH storage, it's worth a warning if we can't find the
3308                  * PHY Firmware but we'll neuter the error ...
3309                  */
3310                 dev_err(adap->pdev_dev, "unable to find PHY Firmware image "
3311                         "/lib/firmware/%s, error %d\n",
3312                         phy_info->phy_fw_file, -ret);
3313                 if (phy_info->phy_flash) {
3314                         int cur_phy_fw_ver = 0;
3315
3316                         t4_phy_fw_ver(adap, &cur_phy_fw_ver);
3317                         dev_warn(adap->pdev_dev, "continuing with, on-adapter "
3318                                  "FLASH copy, version %#x\n", cur_phy_fw_ver);
3319                         ret = 0;
3320                 }
3321
3322                 return ret;
3323         }
3324
3325         /* Load PHY Firmware onto adapter.
3326          */
3327         ret = t4_load_phy_fw(adap, MEMWIN_NIC, &adap->win0_lock,
3328                              phy_info->phy_fw_version,
3329                              (u8 *)phyf->data, phyf->size);
3330         if (ret < 0)
3331                 dev_err(adap->pdev_dev, "PHY Firmware transfer error %d\n",
3332                         -ret);
3333         else if (ret > 0) {
3334                 int new_phy_fw_ver = 0;
3335
3336                 if (phy_info->phy_fw_version)
3337                         new_phy_fw_ver = phy_info->phy_fw_version(phyf->data,
3338                                                                   phyf->size);
3339                 dev_info(adap->pdev_dev, "Successfully transferred PHY "
3340                          "Firmware /lib/firmware/%s, version %#x\n",
3341                          phy_info->phy_fw_file, new_phy_fw_ver);
3342         }
3343
3344         release_firmware(phyf);
3345
3346         return ret;
3347 }
3348
3349 /*
3350  * Attempt to initialize the adapter via a Firmware Configuration File.
3351  */
3352 static int adap_init0_config(struct adapter *adapter, int reset)
3353 {
3354         struct fw_caps_config_cmd caps_cmd;
3355         const struct firmware *cf;
3356         unsigned long mtype = 0, maddr = 0;
3357         u32 finiver, finicsum, cfcsum;
3358         int ret;
3359         int config_issued = 0;
3360         char *fw_config_file, fw_config_file_path[256];
3361         char *config_name = NULL;
3362
3363         /*
3364          * Reset device if necessary.
3365          */
3366         if (reset) {
3367                 ret = t4_fw_reset(adapter, adapter->mbox,
3368                                   PIORSTMODE_F | PIORST_F);
3369                 if (ret < 0)
3370                         goto bye;
3371         }
3372
3373         /* If this is a 10Gb/s-BT adapter make sure the chip-external
3374          * 10Gb/s-BT PHYs have up-to-date firmware.  Note that this step needs
3375          * to be performed after any global adapter RESET above since some
3376          * PHYs only have local RAM copies of the PHY firmware.
3377          */
3378         if (is_10gbt_device(adapter->pdev->device)) {
3379                 ret = adap_init0_phy(adapter);
3380                 if (ret < 0)
3381                         goto bye;
3382         }
3383         /*
3384          * If we have a T4 configuration file under /lib/firmware/cxgb4/,
3385          * then use that.  Otherwise, use the configuration file stored
3386          * in the adapter flash ...
3387          */
3388         switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) {
3389         case CHELSIO_T4:
3390                 fw_config_file = FW4_CFNAME;
3391                 break;
3392         case CHELSIO_T5:
3393                 fw_config_file = FW5_CFNAME;
3394                 break;
3395         case CHELSIO_T6:
3396                 fw_config_file = FW6_CFNAME;
3397                 break;
3398         default:
3399                 dev_err(adapter->pdev_dev, "Device %d is not supported\n",
3400                        adapter->pdev->device);
3401                 ret = -EINVAL;
3402                 goto bye;
3403         }
3404
3405         ret = request_firmware(&cf, fw_config_file, adapter->pdev_dev);
3406         if (ret < 0) {
3407                 config_name = "On FLASH";
3408                 mtype = FW_MEMTYPE_CF_FLASH;
3409                 maddr = t4_flash_cfg_addr(adapter);
3410         } else {
3411                 u32 params[7], val[7];
3412
3413                 sprintf(fw_config_file_path,
3414                         "/lib/firmware/%s", fw_config_file);
3415                 config_name = fw_config_file_path;
3416
3417                 if (cf->size >= FLASH_CFG_MAX_SIZE)
3418                         ret = -ENOMEM;
3419                 else {
3420                         params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3421                              FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF));
3422                         ret = t4_query_params(adapter, adapter->mbox,
3423                                               adapter->pf, 0, 1, params, val);
3424                         if (ret == 0) {
3425                                 /*
3426                                  * For t4_memory_rw() below addresses and
3427                                  * sizes have to be in terms of multiples of 4
3428                                  * bytes.  So, if the Configuration File isn't
3429                                  * a multiple of 4 bytes in length we'll have
3430                                  * to write that out separately since we can't
3431                                  * guarantee that the bytes following the
3432                                  * residual byte in the buffer returned by
3433                                  * request_firmware() are zeroed out ...
3434                                  */
3435                                 size_t resid = cf->size & 0x3;
3436                                 size_t size = cf->size & ~0x3;
3437                                 __be32 *data = (__be32 *)cf->data;
3438
3439                                 mtype = FW_PARAMS_PARAM_Y_G(val[0]);
3440                                 maddr = FW_PARAMS_PARAM_Z_G(val[0]) << 16;
3441
3442                                 spin_lock(&adapter->win0_lock);
3443                                 ret = t4_memory_rw(adapter, 0, mtype, maddr,
3444                                                    size, data, T4_MEMORY_WRITE);
3445                                 if (ret == 0 && resid != 0) {
3446                                         union {
3447                                                 __be32 word;
3448                                                 char buf[4];
3449                                         } last;
3450                                         int i;
3451
3452                                         last.word = data[size >> 2];
3453                                         for (i = resid; i < 4; i++)
3454                                                 last.buf[i] = 0;
3455                                         ret = t4_memory_rw(adapter, 0, mtype,
3456                                                            maddr + size,
3457                                                            4, &last.word,
3458                                                            T4_MEMORY_WRITE);
3459                                 }
3460                                 spin_unlock(&adapter->win0_lock);
3461                         }
3462                 }
3463
3464                 release_firmware(cf);
3465                 if (ret)
3466                         goto bye;
3467         }
3468
3469         /*
3470          * Issue a Capability Configuration command to the firmware to get it
3471          * to parse the Configuration File.  We don't use t4_fw_config_file()
3472          * because we want the ability to modify various features after we've
3473          * processed the configuration file ...
3474          */
3475         memset(&caps_cmd, 0, sizeof(caps_cmd));
3476         caps_cmd.op_to_write =
3477                 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
3478                       FW_CMD_REQUEST_F |
3479                       FW_CMD_READ_F);
3480         caps_cmd.cfvalid_to_len16 =
3481                 htonl(FW_CAPS_CONFIG_CMD_CFVALID_F |
3482                       FW_CAPS_CONFIG_CMD_MEMTYPE_CF_V(mtype) |
3483                       FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_V(maddr >> 16) |
3484                       FW_LEN16(caps_cmd));
3485         ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
3486                          &caps_cmd);
3487
3488         /* If the CAPS_CONFIG failed with an ENOENT (for a Firmware
3489          * Configuration File in FLASH), our last gasp effort is to use the
3490          * Firmware Configuration File which is embedded in the firmware.  A
3491          * very few early versions of the firmware didn't have one embedded
3492          * but we can ignore those.
3493          */
3494         if (ret == -ENOENT) {
3495                 memset(&caps_cmd, 0, sizeof(caps_cmd));
3496                 caps_cmd.op_to_write =
3497                         htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
3498                                         FW_CMD_REQUEST_F |
3499                                         FW_CMD_READ_F);
3500                 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
3501                 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd,
3502                                 sizeof(caps_cmd), &caps_cmd);
3503                 config_name = "Firmware Default";
3504         }
3505
3506         config_issued = 1;
3507         if (ret < 0)
3508                 goto bye;
3509
3510         finiver = ntohl(caps_cmd.finiver);
3511         finicsum = ntohl(caps_cmd.finicsum);
3512         cfcsum = ntohl(caps_cmd.cfcsum);
3513         if (finicsum != cfcsum)
3514                 dev_warn(adapter->pdev_dev, "Configuration File checksum "\
3515                          "mismatch: [fini] csum=%#x, computed csum=%#x\n",
3516                          finicsum, cfcsum);
3517
3518         /*
3519          * And now tell the firmware to use the configuration we just loaded.
3520          */
3521         caps_cmd.op_to_write =
3522                 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
3523                       FW_CMD_REQUEST_F |
3524                       FW_CMD_WRITE_F);
3525         caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
3526         ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
3527                          NULL);
3528         if (ret < 0)
3529                 goto bye;
3530
3531         /*
3532          * Tweak configuration based on system architecture, module
3533          * parameters, etc.
3534          */
3535         ret = adap_init0_tweaks(adapter);
3536         if (ret < 0)
3537                 goto bye;
3538
3539         /*
3540          * And finally tell the firmware to initialize itself using the
3541          * parameters from the Configuration File.
3542          */
3543         ret = t4_fw_initialize(adapter, adapter->mbox);
3544         if (ret < 0)
3545                 goto bye;
3546
3547         /* Emit Firmware Configuration File information and return
3548          * successfully.
3549          */
3550         dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\
3551                  "Configuration File \"%s\", version %#x, computed checksum %#x\n",
3552                  config_name, finiver, cfcsum);
3553         return 0;
3554
3555         /*
3556          * Something bad happened.  Return the error ...  (If the "error"
3557          * is that there's no Configuration File on the adapter we don't
3558          * want to issue a warning since this is fairly common.)
3559          */
3560 bye:
3561         if (config_issued && ret != -ENOENT)
3562                 dev_warn(adapter->pdev_dev, "\"%s\" configuration file error %d\n",
3563                          config_name, -ret);
3564         return ret;
3565 }
3566
3567 static struct fw_info fw_info_array[] = {
3568         {
3569                 .chip = CHELSIO_T4,
3570                 .fs_name = FW4_CFNAME,
3571                 .fw_mod_name = FW4_FNAME,
3572                 .fw_hdr = {
3573                         .chip = FW_HDR_CHIP_T4,
3574                         .fw_ver = __cpu_to_be32(FW_VERSION(T4)),
3575                         .intfver_nic = FW_INTFVER(T4, NIC),
3576                         .intfver_vnic = FW_INTFVER(T4, VNIC),
3577                         .intfver_ri = FW_INTFVER(T4, RI),
3578                         .intfver_iscsi = FW_INTFVER(T4, ISCSI),
3579                         .intfver_fcoe = FW_INTFVER(T4, FCOE),
3580                 },
3581         }, {
3582                 .chip = CHELSIO_T5,
3583                 .fs_name = FW5_CFNAME,
3584                 .fw_mod_name = FW5_FNAME,
3585                 .fw_hdr = {
3586                         .chip = FW_HDR_CHIP_T5,
3587                         .fw_ver = __cpu_to_be32(FW_VERSION(T5)),
3588                         .intfver_nic = FW_INTFVER(T5, NIC),
3589                         .intfver_vnic = FW_INTFVER(T5, VNIC),
3590                         .intfver_ri = FW_INTFVER(T5, RI),
3591                         .intfver_iscsi = FW_INTFVER(T5, ISCSI),
3592                         .intfver_fcoe = FW_INTFVER(T5, FCOE),
3593                 },
3594         }, {
3595                 .chip = CHELSIO_T6,
3596                 .fs_name = FW6_CFNAME,
3597                 .fw_mod_name = FW6_FNAME,
3598                 .fw_hdr = {
3599                         .chip = FW_HDR_CHIP_T6,
3600                         .fw_ver = __cpu_to_be32(FW_VERSION(T6)),
3601                         .intfver_nic = FW_INTFVER(T6, NIC),
3602                         .intfver_vnic = FW_INTFVER(T6, VNIC),
3603                         .intfver_ofld = FW_INTFVER(T6, OFLD),
3604                         .intfver_ri = FW_INTFVER(T6, RI),
3605                         .intfver_iscsipdu = FW_INTFVER(T6, ISCSIPDU),
3606                         .intfver_iscsi = FW_INTFVER(T6, ISCSI),
3607                         .intfver_fcoepdu = FW_INTFVER(T6, FCOEPDU),
3608                         .intfver_fcoe = FW_INTFVER(T6, FCOE),
3609                 },
3610         }
3611
3612 };
3613
3614 static struct fw_info *find_fw_info(int chip)
3615 {
3616         int i;
3617
3618         for (i = 0; i < ARRAY_SIZE(fw_info_array); i++) {
3619                 if (fw_info_array[i].chip == chip)
3620                         return &fw_info_array[i];
3621         }
3622         return NULL;
3623 }
3624
3625 /*
3626  * Phase 0 of initialization: contact FW, obtain config, perform basic init.
3627  */
3628 static int adap_init0(struct adapter *adap)
3629 {
3630         int ret;
3631         u32 v, port_vec;
3632         enum dev_state state;
3633         u32 params[7], val[7];
3634         struct fw_caps_config_cmd caps_cmd;
3635         int reset = 1;
3636
3637         /* Grab Firmware Device Log parameters as early as possible so we have
3638          * access to it for debugging, etc.
3639          */
3640         ret = t4_init_devlog_params(adap);
3641         if (ret < 0)
3642                 return ret;
3643
3644         /* Contact FW, advertising Master capability */
3645         ret = t4_fw_hello(adap, adap->mbox, adap->mbox, MASTER_MAY, &state);
3646         if (ret < 0) {
3647                 dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
3648                         ret);
3649                 return ret;
3650         }
3651         if (ret == adap->mbox)
3652                 adap->flags |= MASTER_PF;
3653
3654         /*
3655          * If we're the Master PF Driver and the device is uninitialized,
3656          * then let's consider upgrading the firmware ...  (We always want
3657          * to check the firmware version number in order to A. get it for
3658          * later reporting and B. to warn if the currently loaded firmware
3659          * is excessively mismatched relative to the driver.)
3660          */
3661         t4_get_fw_version(adap, &adap->params.fw_vers);
3662         t4_get_tp_version(adap, &adap->params.tp_vers);
3663         if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) {
3664                 struct fw_info *fw_info;
3665                 struct fw_hdr *card_fw;
3666                 const struct firmware *fw;
3667                 const u8 *fw_data = NULL;
3668                 unsigned int fw_size = 0;
3669
3670                 /* This is the firmware whose headers the driver was compiled
3671                  * against
3672                  */
3673                 fw_info = find_fw_info(CHELSIO_CHIP_VERSION(adap->params.chip));
3674                 if (fw_info == NULL) {
3675                         dev_err(adap->pdev_dev,
3676                                 "unable to get firmware info for chip %d.\n",
3677                                 CHELSIO_CHIP_VERSION(adap->params.chip));
3678                         return -EINVAL;
3679                 }
3680
3681                 /* allocate memory to read the header of the firmware on the
3682                  * card
3683                  */
3684                 card_fw = t4_alloc_mem(sizeof(*card_fw));
3685
3686                 /* Get FW from from /lib/firmware/ */
3687                 ret = request_firmware(&fw, fw_info->fw_mod_name,
3688                                        adap->pdev_dev);
3689                 if (ret < 0) {
3690                         dev_err(adap->pdev_dev,
3691                                 "unable to load firmware image %s, error %d\n",
3692                                 fw_info->fw_mod_name, ret);
3693                 } else {
3694                         fw_data = fw->data;
3695                         fw_size = fw->size;
3696                 }
3697
3698                 /* upgrade FW logic */
3699                 ret = t4_prep_fw(adap, fw_info, fw_data, fw_size, card_fw,
3700                                  state, &reset);
3701
3702                 /* Cleaning up */
3703                 release_firmware(fw);
3704                 t4_free_mem(card_fw);
3705
3706                 if (ret < 0)
3707                         goto bye;
3708         }
3709
3710         /*
3711          * Grab VPD parameters.  This should be done after we establish a
3712          * connection to the firmware since some of the VPD parameters
3713          * (notably the Core Clock frequency) are retrieved via requests to
3714          * the firmware.  On the other hand, we need these fairly early on
3715          * so we do this right after getting ahold of the firmware.
3716          */
3717         ret = t4_get_vpd_params(adap, &adap->params.vpd);
3718         if (ret < 0)
3719                 goto bye;
3720
3721         /*
3722          * Find out what ports are available to us.  Note that we need to do
3723          * this before calling adap_init0_no_config() since it needs nports
3724          * and portvec ...
3725          */
3726         v =
3727             FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3728             FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC);
3729         ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &v, &port_vec);
3730         if (ret < 0)
3731                 goto bye;
3732
3733         adap->params.nports = hweight32(port_vec);
3734         adap->params.portvec = port_vec;
3735
3736         /* If the firmware is initialized already, emit a simply note to that
3737          * effect. Otherwise, it's time to try initializing the adapter.
3738          */
3739         if (state == DEV_STATE_INIT) {
3740                 dev_info(adap->pdev_dev, "Coming up as %s: "\
3741                          "Adapter already initialized\n",
3742                          adap->flags & MASTER_PF ? "MASTER" : "SLAVE");
3743         } else {
3744                 dev_info(adap->pdev_dev, "Coming up as MASTER: "\
3745                          "Initializing adapter\n");
3746
3747                 /* Find out whether we're dealing with a version of the
3748                  * firmware which has configuration file support.
3749                  */
3750                 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3751                              FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF));
3752                 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
3753                                       params, val);
3754
3755                 /* If the firmware doesn't support Configuration Files,
3756                  * return an error.
3757                  */
3758                 if (ret < 0) {
3759                         dev_err(adap->pdev_dev, "firmware doesn't support "
3760                                 "Firmware Configuration Files\n");
3761                         goto bye;
3762                 }
3763
3764                 /* The firmware provides us with a memory buffer where we can
3765                  * load a Configuration File from the host if we want to
3766                  * override the Configuration File in flash.
3767                  */
3768                 ret = adap_init0_config(adap, reset);
3769                 if (ret == -ENOENT) {
3770                         dev_err(adap->pdev_dev, "no Configuration File "
3771                                 "present on adapter.\n");
3772                         goto bye;
3773                 }
3774                 if (ret < 0) {
3775                         dev_err(adap->pdev_dev, "could not initialize "
3776                                 "adapter, error %d\n", -ret);
3777                         goto bye;
3778                 }
3779         }
3780
3781         /* Give the SGE code a chance to pull in anything that it needs ...
3782          * Note that this must be called after we retrieve our VPD parameters
3783          * in order to know how to convert core ticks to seconds, etc.
3784          */
3785         ret = t4_sge_init(adap);
3786         if (ret < 0)
3787                 goto bye;
3788
3789         if (is_bypass_device(adap->pdev->device))
3790                 adap->params.bypass = 1;
3791
3792         /*
3793          * Grab some of our basic fundamental operating parameters.
3794          */
3795 #define FW_PARAM_DEV(param) \
3796         (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | \
3797         FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_##param))
3798
3799 #define FW_PARAM_PFVF(param) \
3800         FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | \
3801         FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_##param)|  \
3802         FW_PARAMS_PARAM_Y_V(0) | \
3803         FW_PARAMS_PARAM_Z_V(0)
3804
3805         params[0] = FW_PARAM_PFVF(EQ_START);
3806         params[1] = FW_PARAM_PFVF(L2T_START);
3807         params[2] = FW_PARAM_PFVF(L2T_END);
3808         params[3] = FW_PARAM_PFVF(FILTER_START);
3809         params[4] = FW_PARAM_PFVF(FILTER_END);
3810         params[5] = FW_PARAM_PFVF(IQFLINT_START);
3811         ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, params, val);
3812         if (ret < 0)
3813                 goto bye;
3814         adap->sge.egr_start = val[0];
3815         adap->l2t_start = val[1];
3816         adap->l2t_end = val[2];
3817         adap->tids.ftid_base = val[3];
3818         adap->tids.nftids = val[4] - val[3] + 1;
3819         adap->sge.ingr_start = val[5];
3820
3821         /* qids (ingress/egress) returned from firmware can be anywhere
3822          * in the range from EQ(IQFLINT)_START to EQ(IQFLINT)_END.
3823          * Hence driver needs to allocate memory for this range to
3824          * store the queue info. Get the highest IQFLINT/EQ index returned
3825          * in FW_EQ_*_CMD.alloc command.
3826          */
3827         params[0] = FW_PARAM_PFVF(EQ_END);
3828         params[1] = FW_PARAM_PFVF(IQFLINT_END);
3829         ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
3830         if (ret < 0)
3831                 goto bye;
3832         adap->sge.egr_sz = val[0] - adap->sge.egr_start + 1;
3833         adap->sge.ingr_sz = val[1] - adap->sge.ingr_start + 1;
3834
3835         adap->sge.egr_map = kcalloc(adap->sge.egr_sz,
3836                                     sizeof(*adap->sge.egr_map), GFP_KERNEL);
3837         if (!adap->sge.egr_map) {
3838                 ret = -ENOMEM;
3839                 goto bye;
3840         }
3841
3842         adap->sge.ingr_map = kcalloc(adap->sge.ingr_sz,
3843                                      sizeof(*adap->sge.ingr_map), GFP_KERNEL);
3844         if (!adap->sge.ingr_map) {
3845                 ret = -ENOMEM;
3846                 goto bye;
3847         }
3848
3849         /* Allocate the memory for the vaious egress queue bitmaps
3850          * ie starving_fl, txq_maperr and blocked_fl.
3851          */
3852         adap->sge.starving_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
3853                                         sizeof(long), GFP_KERNEL);
3854         if (!adap->sge.starving_fl) {
3855                 ret = -ENOMEM;
3856                 goto bye;
3857         }
3858
3859         adap->sge.txq_maperr = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
3860                                        sizeof(long), GFP_KERNEL);
3861         if (!adap->sge.txq_maperr) {
3862                 ret = -ENOMEM;
3863                 goto bye;
3864         }
3865
3866 #ifdef CONFIG_DEBUG_FS
3867         adap->sge.blocked_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
3868                                        sizeof(long), GFP_KERNEL);
3869         if (!adap->sge.blocked_fl) {
3870                 ret = -ENOMEM;
3871                 goto bye;
3872         }
3873 #endif
3874
3875         params[0] = FW_PARAM_PFVF(CLIP_START);
3876         params[1] = FW_PARAM_PFVF(CLIP_END);
3877         ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
3878         if (ret < 0)
3879                 goto bye;
3880         adap->clipt_start = val[0];
3881         adap->clipt_end = val[1];
3882
3883         /* query params related to active filter region */
3884         params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START);
3885         params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END);
3886         ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
3887         /* If Active filter size is set we enable establishing
3888          * offload connection through firmware work request
3889          */
3890         if ((val[0] != val[1]) && (ret >= 0)) {
3891                 adap->flags |= FW_OFLD_CONN;
3892                 adap->tids.aftid_base = val[0];
3893                 adap->tids.aftid_end = val[1];
3894         }
3895
3896         /* If we're running on newer firmware, let it know that we're
3897          * prepared to deal with encapsulated CPL messages.  Older
3898          * firmware won't understand this and we'll just get
3899          * unencapsulated messages ...
3900          */
3901         params[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
3902         val[0] = 1;
3903         (void)t4_set_params(adap, adap->mbox, adap->pf, 0, 1, params, val);
3904
3905         /*
3906          * Find out whether we're allowed to use the T5+ ULPTX MEMWRITE DSGL
3907          * capability.  Earlier versions of the firmware didn't have the
3908          * ULPTX_MEMWRITE_DSGL so we'll interpret a query failure as no
3909          * permission to use ULPTX MEMWRITE DSGL.
3910          */
3911         if (is_t4(adap->params.chip)) {
3912                 adap->params.ulptx_memwrite_dsgl = false;
3913         } else {
3914                 params[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL);
3915                 ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
3916                                       1, params, val);
3917                 adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0);
3918         }
3919
3920         /*
3921          * Get device capabilities so we can determine what resources we need
3922          * to manage.
3923          */
3924         memset(&caps_cmd, 0, sizeof(caps_cmd));
3925         caps_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
3926                                      FW_CMD_REQUEST_F | FW_CMD_READ_F);
3927         caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
3928         ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
3929                          &caps_cmd);
3930         if (ret < 0)
3931                 goto bye;
3932
3933         if (caps_cmd.ofldcaps) {
3934                 /* query offload-related parameters */
3935                 params[0] = FW_PARAM_DEV(NTID);
3936                 params[1] = FW_PARAM_PFVF(SERVER_START);
3937                 params[2] = FW_PARAM_PFVF(SERVER_END);
3938                 params[3] = FW_PARAM_PFVF(TDDP_START);
3939                 params[4] = FW_PARAM_PFVF(TDDP_END);
3940                 params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
3941                 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6,
3942                                       params, val);
3943                 if (ret < 0)
3944                         goto bye;
3945                 adap->tids.ntids = val[0];
3946                 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
3947                 adap->tids.stid_base = val[1];
3948                 adap->tids.nstids = val[2] - val[1] + 1;
3949                 /*
3950                  * Setup server filter region. Divide the available filter
3951                  * region into two parts. Regular filters get 1/3rd and server
3952                  * filters get 2/3rd part. This is only enabled if workarond
3953                  * path is enabled.
3954                  * 1. For regular filters.
3955                  * 2. Server filter: This are special filters which are used
3956                  * to redirect SYN packets to offload queue.
3957                  */
3958                 if (adap->flags & FW_OFLD_CONN && !is_bypass(adap)) {
3959                         adap->tids.sftid_base = adap->tids.ftid_base +
3960                                         DIV_ROUND_UP(adap->tids.nftids, 3);
3961                         adap->tids.nsftids = adap->tids.nftids -
3962                                          DIV_ROUND_UP(adap->tids.nftids, 3);
3963                         adap->tids.nftids = adap->tids.sftid_base -
3964                                                 adap->tids.ftid_base;
3965                 }
3966                 adap->vres.ddp.start = val[3];
3967                 adap->vres.ddp.size = val[4] - val[3] + 1;
3968                 adap->params.ofldq_wr_cred = val[5];
3969
3970                 adap->params.offload = 1;
3971         }
3972         if (caps_cmd.rdmacaps) {
3973                 params[0] = FW_PARAM_PFVF(STAG_START);
3974                 params[1] = FW_PARAM_PFVF(STAG_END);
3975                 params[2] = FW_PARAM_PFVF(RQ_START);
3976                 params[3] = FW_PARAM_PFVF(RQ_END);
3977                 params[4] = FW_PARAM_PFVF(PBL_START);
3978                 params[5] = FW_PARAM_PFVF(PBL_END);
3979                 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6,
3980                                       params, val);
3981                 if (ret < 0)
3982                         goto bye;
3983                 adap->vres.stag.start = val[0];
3984                 adap->vres.stag.size = val[1] - val[0] + 1;
3985                 adap->vres.rq.start = val[2];
3986                 adap->vres.rq.size = val[3] - val[2] + 1;
3987                 adap->vres.pbl.start = val[4];
3988                 adap->vres.pbl.size = val[5] - val[4] + 1;
3989
3990                 params[0] = FW_PARAM_PFVF(SQRQ_START);
3991                 params[1] = FW_PARAM_PFVF(SQRQ_END);
3992                 params[2] = FW_PARAM_PFVF(CQ_START);
3993                 params[3] = FW_PARAM_PFVF(CQ_END);
3994                 params[4] = FW_PARAM_PFVF(OCQ_START);
3995                 params[5] = FW_PARAM_PFVF(OCQ_END);
3996                 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, params,
3997                                       val);
3998                 if (ret < 0)
3999                         goto bye;
4000                 adap->vres.qp.start = val[0];
4001                 adap->vres.qp.size = val[1] - val[0] + 1;
4002                 adap->vres.cq.start = val[2];
4003                 adap->vres.cq.size = val[3] - val[2] + 1;
4004                 adap->vres.ocq.start = val[4];
4005                 adap->vres.ocq.size = val[5] - val[4] + 1;
4006
4007                 params[0] = FW_PARAM_DEV(MAXORDIRD_QP);
4008                 params[1] = FW_PARAM_DEV(MAXIRD_ADAPTER);
4009                 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params,
4010                                       val);
4011                 if (ret < 0) {
4012                         adap->params.max_ordird_qp = 8;
4013                         adap->params.max_ird_adapter = 32 * adap->tids.ntids;
4014                         ret = 0;
4015                 } else {
4016                         adap->params.max_ordird_qp = val[0];
4017                         adap->params.max_ird_adapter = val[1];
4018                 }
4019                 dev_info(adap->pdev_dev,
4020                          "max_ordird_qp %d max_ird_adapter %d\n",
4021                          adap->params.max_ordird_qp,
4022                          adap->params.max_ird_adapter);
4023         }
4024         if (caps_cmd.iscsicaps) {
4025                 params[0] = FW_PARAM_PFVF(ISCSI_START);
4026                 params[1] = FW_PARAM_PFVF(ISCSI_END);
4027                 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
4028                                       params, val);
4029                 if (ret < 0)
4030                         goto bye;
4031                 adap->vres.iscsi.start = val[0];
4032                 adap->vres.iscsi.size = val[1] - val[0] + 1;
4033         }
4034 #undef FW_PARAM_PFVF
4035 #undef FW_PARAM_DEV
4036
4037         /* The MTU/MSS Table is initialized by now, so load their values.  If
4038          * we're initializing the adapter, then we'll make any modifications
4039          * we want to the MTU/MSS Table and also initialize the congestion
4040          * parameters.
4041          */
4042         t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
4043         if (state != DEV_STATE_INIT) {
4044                 int i;
4045
4046                 /* The default MTU Table contains values 1492 and 1500.
4047                  * However, for TCP, it's better to have two values which are
4048                  * a multiple of 8 +/- 4 bytes apart near this popular MTU.
4049                  * This allows us to have a TCP Data Payload which is a
4050                  * multiple of 8 regardless of what combination of TCP Options
4051                  * are in use (always a multiple of 4 bytes) which is
4052                  * important for performance reasons.  For instance, if no
4053                  * options are in use, then we have a 20-byte IP header and a
4054                  * 20-byte TCP header.  In this case, a 1500-byte MSS would
4055                  * result in a TCP Data Payload of 1500 - 40 == 1460 bytes
4056                  * which is not a multiple of 8.  So using an MSS of 1488 in
4057                  * this case results in a TCP Data Payload of 1448 bytes which
4058                  * is a multiple of 8.  On the other hand, if 12-byte TCP Time
4059                  * Stamps have been negotiated, then an MTU of 1500 bytes
4060                  * results in a TCP Data Payload of 1448 bytes which, as
4061                  * above, is a multiple of 8 bytes ...
4062                  */
4063                 for (i = 0; i < NMTUS; i++)
4064                         if (adap->params.mtus[i] == 1492) {
4065                                 adap->params.mtus[i] = 1488;
4066                                 break;
4067                         }
4068
4069                 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
4070                              adap->params.b_wnd);
4071         }
4072         t4_init_sge_params(adap);
4073         adap->flags |= FW_OK;
4074         t4_init_tp_params(adap);
4075         return 0;
4076
4077         /*
4078          * Something bad happened.  If a command timed out or failed with EIO
4079          * FW does not operate within its spec or something catastrophic
4080          * happened to HW/FW, stop issuing commands.
4081          */
4082 bye:
4083         kfree(adap->sge.egr_map);
4084         kfree(adap->sge.ingr_map);
4085         kfree(adap->sge.starving_fl);
4086         kfree(adap->sge.txq_maperr);
4087 #ifdef CONFIG_DEBUG_FS
4088         kfree(adap->sge.blocked_fl);
4089 #endif
4090         if (ret != -ETIMEDOUT && ret != -EIO)
4091                 t4_fw_bye(adap, adap->mbox);
4092         return ret;
4093 }
4094
4095 /* EEH callbacks */
4096
4097 static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
4098                                          pci_channel_state_t state)
4099 {
4100         int i;
4101         struct adapter *adap = pci_get_drvdata(pdev);
4102
4103         if (!adap)
4104                 goto out;
4105
4106         rtnl_lock();
4107         adap->flags &= ~FW_OK;
4108         notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
4109         spin_lock(&adap->stats_lock);
4110         for_each_port(adap, i) {
4111                 struct net_device *dev = adap->port[i];
4112
4113                 netif_device_detach(dev);
4114                 netif_carrier_off(dev);
4115         }
4116         spin_unlock(&adap->stats_lock);
4117         disable_interrupts(adap);
4118         if (adap->flags & FULL_INIT_DONE)
4119                 cxgb_down(adap);
4120         rtnl_unlock();
4121         if ((adap->flags & DEV_ENABLED)) {
4122                 pci_disable_device(pdev);
4123                 adap->flags &= ~DEV_ENABLED;
4124         }
4125 out:    return state == pci_channel_io_perm_failure ?
4126                 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
4127 }
4128
4129 static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
4130 {
4131         int i, ret;
4132         struct fw_caps_config_cmd c;
4133         struct adapter *adap = pci_get_drvdata(pdev);
4134
4135         if (!adap) {
4136                 pci_restore_state(pdev);
4137                 pci_save_state(pdev);
4138                 return PCI_ERS_RESULT_RECOVERED;
4139         }
4140
4141         if (!(adap->flags & DEV_ENABLED)) {
4142                 if (pci_enable_device(pdev)) {
4143                         dev_err(&pdev->dev, "Cannot reenable PCI "
4144                                             "device after reset\n");
4145                         return PCI_ERS_RESULT_DISCONNECT;
4146                 }
4147                 adap->flags |= DEV_ENABLED;
4148         }
4149
4150         pci_set_master(pdev);
4151         pci_restore_state(pdev);
4152         pci_save_state(pdev);
4153         pci_cleanup_aer_uncorrect_error_status(pdev);
4154
4155         if (t4_wait_dev_ready(adap->regs) < 0)
4156                 return PCI_ERS_RESULT_DISCONNECT;
4157         if (t4_fw_hello(adap, adap->mbox, adap->pf, MASTER_MUST, NULL) < 0)
4158                 return PCI_ERS_RESULT_DISCONNECT;
4159         adap->flags |= FW_OK;
4160         if (adap_init1(adap, &c))
4161                 return PCI_ERS_RESULT_DISCONNECT;
4162
4163         for_each_port(adap, i) {
4164                 struct port_info *p = adap2pinfo(adap, i);
4165
4166                 ret = t4_alloc_vi(adap, adap->mbox, p->tx_chan, adap->pf, 0, 1,
4167                                   NULL, NULL);
4168                 if (ret < 0)
4169                         return PCI_ERS_RESULT_DISCONNECT;
4170                 p->viid = ret;
4171                 p->xact_addr_filt = -1;
4172         }
4173
4174         t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
4175                      adap->params.b_wnd);
4176         setup_memwin(adap);
4177         if (cxgb_up(adap))
4178                 return PCI_ERS_RESULT_DISCONNECT;
4179         return PCI_ERS_RESULT_RECOVERED;
4180 }
4181
4182 static void eeh_resume(struct pci_dev *pdev)
4183 {
4184         int i;
4185         struct adapter *adap = pci_get_drvdata(pdev);
4186
4187         if (!adap)
4188                 return;
4189
4190         rtnl_lock();
4191         for_each_port(adap, i) {
4192                 struct net_device *dev = adap->port[i];
4193
4194                 if (netif_running(dev)) {
4195                         link_start(dev);
4196                         cxgb_set_rxmode(dev);
4197                 }
4198                 netif_device_attach(dev);
4199         }
4200         rtnl_unlock();
4201 }
4202
4203 static const struct pci_error_handlers cxgb4_eeh = {
4204         .error_detected = eeh_err_detected,
4205         .slot_reset     = eeh_slot_reset,
4206         .resume         = eeh_resume,
4207 };
4208
4209 static inline bool is_x_10g_port(const struct link_config *lc)
4210 {
4211         return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0 ||
4212                (lc->supported & FW_PORT_CAP_SPEED_40G) != 0;
4213 }
4214
4215 static inline void init_rspq(struct adapter *adap, struct sge_rspq *q,
4216                              unsigned int us, unsigned int cnt,
4217                              unsigned int size, unsigned int iqe_size)
4218 {
4219         q->adap = adap;
4220         cxgb4_set_rspq_intr_params(q, us, cnt);
4221         q->iqe_len = iqe_size;
4222         q->size = size;
4223 }
4224
4225 /*
4226  * Perform default configuration of DMA queues depending on the number and type
4227  * of ports we found and the number of available CPUs.  Most settings can be
4228  * modified by the admin prior to actual use.
4229  */
4230 static void cfg_queues(struct adapter *adap)
4231 {
4232         struct sge *s = &adap->sge;
4233         int i, n10g = 0, qidx = 0;
4234 #ifndef CONFIG_CHELSIO_T4_DCB
4235         int q10g = 0;
4236 #endif
4237         int ciq_size;
4238
4239         for_each_port(adap, i)
4240                 n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
4241 #ifdef CONFIG_CHELSIO_T4_DCB
4242         /* For Data Center Bridging support we need to be able to support up
4243          * to 8 Traffic Priorities; each of which will be assigned to its
4244          * own TX Queue in order to prevent Head-Of-Line Blocking.
4245          */
4246         if (adap->params.nports * 8 > MAX_ETH_QSETS) {
4247                 dev_err(adap->pdev_dev, "MAX_ETH_QSETS=%d < %d!\n",
4248                         MAX_ETH_QSETS, adap->params.nports * 8);
4249                 BUG_ON(1);
4250         }
4251
4252         for_each_port(adap, i) {
4253                 struct port_info *pi = adap2pinfo(adap, i);
4254
4255                 pi->first_qset = qidx;
4256                 pi->nqsets = 8;
4257                 qidx += pi->nqsets;
4258         }
4259 #else /* !CONFIG_CHELSIO_T4_DCB */
4260         /*
4261          * We default to 1 queue per non-10G port and up to # of cores queues
4262          * per 10G port.
4263          */
4264         if (n10g)
4265                 q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
4266         if (q10g > netif_get_num_default_rss_queues())
4267                 q10g = netif_get_num_default_rss_queues();
4268
4269         for_each_port(adap, i) {
4270                 struct port_info *pi = adap2pinfo(adap, i);
4271
4272                 pi->first_qset = qidx;
4273                 pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1;
4274                 qidx += pi->nqsets;
4275         }
4276 #endif /* !CONFIG_CHELSIO_T4_DCB */
4277
4278         s->ethqsets = qidx;
4279         s->max_ethqsets = qidx;   /* MSI-X may lower it later */
4280
4281         if (is_offload(adap)) {
4282                 /*
4283                  * For offload we use 1 queue/channel if all ports are up to 1G,
4284                  * otherwise we divide all available queues amongst the channels
4285                  * capped by the number of available cores.
4286                  */
4287                 if (n10g) {
4288                         i = min_t(int, ARRAY_SIZE(s->ofldrxq),
4289                                   num_online_cpus());
4290                         s->ofldqsets = roundup(i, adap->params.nports);
4291                 } else
4292                         s->ofldqsets = adap->params.nports;
4293                 /* For RDMA one Rx queue per channel suffices */
4294                 s->rdmaqs = adap->params.nports;
4295                 /* Try and allow at least 1 CIQ per cpu rounding down
4296                  * to the number of ports, with a minimum of 1 per port.
4297                  * A 2 port card in a 6 cpu system: 6 CIQs, 3 / port.
4298                  * A 4 port card in a 6 cpu system: 4 CIQs, 1 / port.
4299                  * A 4 port card in a 2 cpu system: 4 CIQs, 1 / port.
4300                  */
4301                 s->rdmaciqs = min_t(int, MAX_RDMA_CIQS, num_online_cpus());
4302                 s->rdmaciqs = (s->rdmaciqs / adap->params.nports) *
4303                                 adap->params.nports;
4304                 s->rdmaciqs = max_t(int, s->rdmaciqs, adap->params.nports);
4305         }
4306
4307         for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
4308                 struct sge_eth_rxq *r = &s->ethrxq[i];
4309
4310                 init_rspq(adap, &r->rspq, 5, 10, 1024, 64);
4311                 r->fl.size = 72;
4312         }
4313
4314         for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
4315                 s->ethtxq[i].q.size = 1024;
4316
4317         for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
4318                 s->ctrlq[i].q.size = 512;
4319
4320         for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++)
4321                 s->ofldtxq[i].q.size = 1024;
4322
4323         for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) {
4324                 struct sge_ofld_rxq *r = &s->ofldrxq[i];
4325
4326                 init_rspq(adap, &r->rspq, 5, 1, 1024, 64);
4327                 r->rspq.uld = CXGB4_ULD_ISCSI;
4328                 r->fl.size = 72;
4329         }
4330
4331         for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) {
4332                 struct sge_ofld_rxq *r = &s->rdmarxq[i];
4333
4334                 init_rspq(adap, &r->rspq, 5, 1, 511, 64);
4335                 r->rspq.uld = CXGB4_ULD_RDMA;
4336                 r->fl.size = 72;
4337         }
4338
4339         ciq_size = 64 + adap->vres.cq.size + adap->tids.nftids;
4340         if (ciq_size > SGE_MAX_IQ_SIZE) {
4341                 CH_WARN(adap, "CIQ size too small for available IQs\n");
4342                 ciq_size = SGE_MAX_IQ_SIZE;
4343         }
4344
4345         for (i = 0; i < ARRAY_SIZE(s->rdmaciq); i++) {
4346                 struct sge_ofld_rxq *r = &s->rdmaciq[i];
4347
4348                 init_rspq(adap, &r->rspq, 5, 1, ciq_size, 64);
4349                 r->rspq.uld = CXGB4_ULD_RDMA;
4350         }
4351
4352         init_rspq(adap, &s->fw_evtq, 0, 1, 1024, 64);
4353         init_rspq(adap, &s->intrq, 0, 1, 2 * MAX_INGQ, 64);
4354 }
4355
4356 /*
4357  * Reduce the number of Ethernet queues across all ports to at most n.
4358  * n provides at least one queue per port.
4359  */
4360 static void reduce_ethqs(struct adapter *adap, int n)
4361 {
4362         int i;
4363         struct port_info *pi;
4364
4365         while (n < adap->sge.ethqsets)
4366                 for_each_port(adap, i) {
4367                         pi = adap2pinfo(adap, i);
4368                         if (pi->nqsets > 1) {
4369                                 pi->nqsets--;
4370                                 adap->sge.ethqsets--;
4371                                 if (adap->sge.ethqsets <= n)
4372                                         break;
4373                         }
4374                 }
4375
4376         n = 0;
4377         for_each_port(adap, i) {
4378                 pi = adap2pinfo(adap, i);
4379                 pi->first_qset = n;
4380                 n += pi->nqsets;
4381         }
4382 }
4383
4384 /* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
4385 #define EXTRA_VECS 2
4386
4387 static int enable_msix(struct adapter *adap)
4388 {
4389         int ofld_need = 0;
4390         int i, want, need, allocated;
4391         struct sge *s = &adap->sge;
4392         unsigned int nchan = adap->params.nports;
4393         struct msix_entry *entries;
4394
4395         entries = kmalloc(sizeof(*entries) * (MAX_INGQ + 1),
4396                           GFP_KERNEL);
4397         if (!entries)
4398                 return -ENOMEM;
4399
4400         for (i = 0; i < MAX_INGQ + 1; ++i)
4401                 entries[i].entry = i;
4402
4403         want = s->max_ethqsets + EXTRA_VECS;
4404         if (is_offload(adap)) {
4405                 want += s->rdmaqs + s->rdmaciqs + s->ofldqsets;
4406                 /* need nchan for each possible ULD */
4407                 ofld_need = 3 * nchan;
4408         }
4409 #ifdef CONFIG_CHELSIO_T4_DCB
4410         /* For Data Center Bridging we need 8 Ethernet TX Priority Queues for
4411          * each port.
4412          */
4413         need = 8 * adap->params.nports + EXTRA_VECS + ofld_need;
4414 #else
4415         need = adap->params.nports + EXTRA_VECS + ofld_need;
4416 #endif
4417         allocated = pci_enable_msix_range(adap->pdev, entries, need, want);
4418         if (allocated < 0) {
4419                 dev_info(adap->pdev_dev, "not enough MSI-X vectors left,"
4420                          " not using MSI-X\n");
4421                 kfree(entries);
4422                 return allocated;
4423         }
4424
4425         /* Distribute available vectors to the various queue groups.
4426          * Every group gets its minimum requirement and NIC gets top
4427          * priority for leftovers.
4428          */
4429         i = allocated - EXTRA_VECS - ofld_need;
4430         if (i < s->max_ethqsets) {
4431                 s->max_ethqsets = i;
4432                 if (i < s->ethqsets)
4433                         reduce_ethqs(adap, i);
4434         }
4435         if (is_offload(adap)) {
4436                 if (allocated < want) {
4437                         s->rdmaqs = nchan;
4438                         s->rdmaciqs = nchan;
4439                 }
4440
4441                 /* leftovers go to OFLD */
4442                 i = allocated - EXTRA_VECS - s->max_ethqsets -
4443                     s->rdmaqs - s->rdmaciqs;
4444                 s->ofldqsets = (i / nchan) * nchan;  /* round down */
4445         }
4446         for (i = 0; i < allocated; ++i)
4447                 adap->msix_info[i].vec = entries[i].vector;
4448
4449         kfree(entries);
4450         return 0;
4451 }
4452
4453 #undef EXTRA_VECS
4454
4455 static int init_rss(struct adapter *adap)
4456 {
4457         unsigned int i;
4458         int err;
4459
4460         err = t4_init_rss_mode(adap, adap->mbox);
4461         if (err)
4462                 return err;
4463
4464         for_each_port(adap, i) {
4465                 struct port_info *pi = adap2pinfo(adap, i);
4466
4467                 pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
4468                 if (!pi->rss)
4469                         return -ENOMEM;
4470         }
4471         return 0;
4472 }
4473
4474 static void print_port_info(const struct net_device *dev)
4475 {
4476         char buf[80];
4477         char *bufp = buf;
4478         const char *spd = "";
4479         const struct port_info *pi = netdev_priv(dev);
4480         const struct adapter *adap = pi->adapter;
4481
4482         if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB)
4483                 spd = " 2.5 GT/s";
4484         else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
4485                 spd = " 5 GT/s";
4486         else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_8_0GB)
4487                 spd = " 8 GT/s";
4488
4489         if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
4490                 bufp += sprintf(bufp, "100/");
4491         if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
4492                 bufp += sprintf(bufp, "1000/");
4493         if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
4494                 bufp += sprintf(bufp, "10G/");
4495         if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_40G)
4496                 bufp += sprintf(bufp, "40G/");
4497         if (bufp != buf)
4498                 --bufp;
4499         sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type));
4500
4501         netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
4502                     adap->params.vpd.id,
4503                     CHELSIO_CHIP_RELEASE(adap->params.chip), buf,
4504                     is_offload(adap) ? "R" : "", adap->params.pci.width, spd,
4505                     (adap->flags & USING_MSIX) ? " MSI-X" :
4506                     (adap->flags & USING_MSI) ? " MSI" : "");
4507         netdev_info(dev, "S/N: %s, P/N: %s\n",
4508                     adap->params.vpd.sn, adap->params.vpd.pn);
4509 }
4510
4511 static void enable_pcie_relaxed_ordering(struct pci_dev *dev)
4512 {
4513         pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
4514 }
4515
4516 /*
4517  * Free the following resources:
4518  * - memory used for tables
4519  * - MSI/MSI-X
4520  * - net devices
4521  * - resources FW is holding for us
4522  */
4523 static void free_some_resources(struct adapter *adapter)
4524 {
4525         unsigned int i;
4526
4527         t4_free_mem(adapter->l2t);
4528         t4_free_mem(adapter->tids.tid_tab);
4529         kfree(adapter->sge.egr_map);
4530         kfree(adapter->sge.ingr_map);
4531         kfree(adapter->sge.starving_fl);
4532         kfree(adapter->sge.txq_maperr);
4533 #ifdef CONFIG_DEBUG_FS
4534         kfree(adapter->sge.blocked_fl);
4535 #endif
4536         disable_msi(adapter);
4537
4538         for_each_port(adapter, i)
4539                 if (adapter->port[i]) {
4540                         struct port_info *pi = adap2pinfo(adapter, i);
4541
4542                         if (pi->viid != 0)
4543                                 t4_free_vi(adapter, adapter->mbox, adapter->pf,
4544                                            0, pi->viid);
4545                         kfree(adap2pinfo(adapter, i)->rss);
4546                         free_netdev(adapter->port[i]);
4547                 }
4548         if (adapter->flags & FW_OK)
4549                 t4_fw_bye(adapter, adapter->pf);
4550 }
4551
4552 #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
4553 #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
4554                    NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
4555 #define SEGMENT_SIZE 128
4556
4557 static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
4558 {
4559         int func, i, err, s_qpp, qpp, num_seg;
4560         struct port_info *pi;
4561         bool highdma = false;
4562         struct adapter *adapter = NULL;
4563         void __iomem *regs;
4564
4565         printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
4566
4567         err = pci_request_regions(pdev, KBUILD_MODNAME);
4568         if (err) {
4569                 /* Just info, some other driver may have claimed the device. */
4570                 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
4571                 return err;
4572         }
4573
4574         err = pci_enable_device(pdev);
4575         if (err) {
4576                 dev_err(&pdev->dev, "cannot enable PCI device\n");
4577                 goto out_release_regions;
4578         }
4579
4580         regs = pci_ioremap_bar(pdev, 0);
4581         if (!regs) {
4582                 dev_err(&pdev->dev, "cannot map device registers\n");
4583                 err = -ENOMEM;
4584                 goto out_disable_device;
4585         }
4586
4587         err = t4_wait_dev_ready(regs);
4588         if (err < 0)
4589                 goto out_unmap_bar0;
4590
4591         /* We control everything through one PF */
4592         func = SOURCEPF_G(readl(regs + PL_WHOAMI_A));
4593         if (func != ent->driver_data) {
4594                 iounmap(regs);
4595                 pci_disable_device(pdev);
4596                 pci_save_state(pdev);        /* to restore SR-IOV later */
4597                 goto sriov;
4598         }
4599
4600         if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4601                 highdma = true;
4602                 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
4603                 if (err) {
4604                         dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
4605                                 "coherent allocations\n");
4606                         goto out_unmap_bar0;
4607                 }
4608         } else {
4609                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4610                 if (err) {
4611                         dev_err(&pdev->dev, "no usable DMA configuration\n");
4612                         goto out_unmap_bar0;
4613                 }
4614         }
4615
4616         pci_enable_pcie_error_reporting(pdev);
4617         enable_pcie_relaxed_ordering(pdev);
4618         pci_set_master(pdev);
4619         pci_save_state(pdev);
4620
4621         adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
4622         if (!adapter) {
4623                 err = -ENOMEM;
4624                 goto out_unmap_bar0;
4625         }
4626
4627         adapter->workq = create_singlethread_workqueue("cxgb4");
4628         if (!adapter->workq) {
4629                 err = -ENOMEM;
4630                 goto out_free_adapter;
4631         }
4632
4633         /* PCI device has been enabled */
4634         adapter->flags |= DEV_ENABLED;
4635
4636         adapter->regs = regs;
4637         adapter->pdev = pdev;
4638         adapter->pdev_dev = &pdev->dev;
4639         adapter->mbox = func;
4640         adapter->pf = func;
4641         adapter->msg_enable = dflt_msg_enable;
4642         memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
4643
4644         spin_lock_init(&adapter->stats_lock);
4645         spin_lock_init(&adapter->tid_release_lock);
4646         spin_lock_init(&adapter->win0_lock);
4647
4648         INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
4649         INIT_WORK(&adapter->db_full_task, process_db_full);
4650         INIT_WORK(&adapter->db_drop_task, process_db_drop);
4651
4652         err = t4_prep_adapter(adapter);
4653         if (err)
4654                 goto out_free_adapter;
4655
4656
4657         if (!is_t4(adapter->params.chip)) {
4658                 s_qpp = (QUEUESPERPAGEPF0_S +
4659                         (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) *
4660                         adapter->pf);
4661                 qpp = 1 << QUEUESPERPAGEPF0_G(t4_read_reg(adapter,
4662                       SGE_EGRESS_QUEUES_PER_PAGE_PF_A) >> s_qpp);
4663                 num_seg = PAGE_SIZE / SEGMENT_SIZE;
4664
4665                 /* Each segment size is 128B. Write coalescing is enabled only
4666                  * when SGE_EGRESS_QUEUES_PER_PAGE_PF reg value for the
4667                  * queue is less no of segments that can be accommodated in
4668                  * a page size.
4669                  */
4670                 if (qpp > num_seg) {
4671                         dev_err(&pdev->dev,
4672                                 "Incorrect number of egress queues per page\n");
4673                         err = -EINVAL;
4674                         goto out_free_adapter;
4675                 }
4676                 adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
4677                 pci_resource_len(pdev, 2));
4678                 if (!adapter->bar2) {
4679                         dev_err(&pdev->dev, "cannot map device bar2 region\n");
4680                         err = -ENOMEM;
4681                         goto out_free_adapter;
4682                 }
4683                 t4_write_reg(adapter, SGE_STAT_CFG_A,
4684                              STATSOURCE_T5_V(7) | STATMODE_V(0));
4685         }
4686
4687         setup_memwin(adapter);
4688         err = adap_init0(adapter);
4689 #ifdef CONFIG_DEBUG_FS
4690         bitmap_zero(adapter->sge.blocked_fl, adapter->sge.egr_sz);
4691 #endif
4692         setup_memwin_rdma(adapter);
4693         if (err)
4694                 goto out_unmap_bar;
4695
4696         for_each_port(adapter, i) {
4697                 struct net_device *netdev;
4698
4699                 netdev = alloc_etherdev_mq(sizeof(struct port_info),
4700                                            MAX_ETH_QSETS);
4701                 if (!netdev) {
4702                         err = -ENOMEM;
4703                         goto out_free_dev;
4704                 }
4705
4706                 SET_NETDEV_DEV(netdev, &pdev->dev);
4707
4708                 adapter->port[i] = netdev;
4709                 pi = netdev_priv(netdev);
4710                 pi->adapter = adapter;
4711                 pi->xact_addr_filt = -1;
4712                 pi->port_id = i;
4713                 netdev->irq = pdev->irq;
4714
4715                 netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
4716                         NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4717                         NETIF_F_RXCSUM | NETIF_F_RXHASH |
4718                         NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
4719                 if (highdma)
4720                         netdev->hw_features |= NETIF_F_HIGHDMA;
4721                 netdev->features |= netdev->hw_features;
4722                 netdev->vlan_features = netdev->features & VLAN_FEAT;
4723
4724                 netdev->priv_flags |= IFF_UNICAST_FLT;
4725
4726                 netdev->netdev_ops = &cxgb4_netdev_ops;
4727 #ifdef CONFIG_CHELSIO_T4_DCB
4728                 netdev->dcbnl_ops = &cxgb4_dcb_ops;
4729                 cxgb4_dcb_state_init(netdev);
4730 #endif
4731                 cxgb4_set_ethtool_ops(netdev);
4732         }
4733
4734         pci_set_drvdata(pdev, adapter);
4735
4736         if (adapter->flags & FW_OK) {
4737                 err = t4_port_init(adapter, func, func, 0);
4738                 if (err)
4739                         goto out_free_dev;
4740         } else if (adapter->params.nports == 1) {
4741                 /* If we don't have a connection to the firmware -- possibly
4742                  * because of an error -- grab the raw VPD parameters so we
4743                  * can set the proper MAC Address on the debug network
4744                  * interface that we've created.
4745                  */
4746                 u8 hw_addr[ETH_ALEN];
4747                 u8 *na = adapter->params.vpd.na;
4748
4749                 err = t4_get_raw_vpd_params(adapter, &adapter->params.vpd);
4750                 if (!err) {
4751                         for (i = 0; i < ETH_ALEN; i++)
4752                                 hw_addr[i] = (hex2val(na[2 * i + 0]) * 16 +
4753                                               hex2val(na[2 * i + 1]));
4754                         t4_set_hw_addr(adapter, 0, hw_addr);
4755                 }
4756         }
4757
4758         /* Configure queues and allocate tables now, they can be needed as
4759          * soon as the first register_netdev completes.
4760          */
4761         cfg_queues(adapter);
4762
4763         adapter->l2t = t4_init_l2t();
4764         if (!adapter->l2t) {
4765                 /* We tolerate a lack of L2T, giving up some functionality */
4766                 dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
4767                 adapter->params.offload = 0;
4768         }
4769
4770 #if IS_ENABLED(CONFIG_IPV6)
4771         adapter->clipt = t4_init_clip_tbl(adapter->clipt_start,
4772                                           adapter->clipt_end);
4773         if (!adapter->clipt) {
4774                 /* We tolerate a lack of clip_table, giving up
4775                  * some functionality
4776                  */
4777                 dev_warn(&pdev->dev,
4778                          "could not allocate Clip table, continuing\n");
4779                 adapter->params.offload = 0;
4780         }
4781 #endif
4782         if (is_offload(adapter) && tid_init(&adapter->tids) < 0) {
4783                 dev_warn(&pdev->dev, "could not allocate TID table, "
4784                          "continuing\n");
4785                 adapter->params.offload = 0;
4786         }
4787
4788         /* See what interrupts we'll be using */
4789         if (msi > 1 && enable_msix(adapter) == 0)
4790                 adapter->flags |= USING_MSIX;
4791         else if (msi > 0 && pci_enable_msi(pdev) == 0)
4792                 adapter->flags |= USING_MSI;
4793
4794         err = init_rss(adapter);
4795         if (err)
4796                 goto out_free_dev;
4797
4798         /*
4799          * The card is now ready to go.  If any errors occur during device
4800          * registration we do not fail the whole card but rather proceed only
4801          * with the ports we manage to register successfully.  However we must
4802          * register at least one net device.
4803          */
4804         for_each_port(adapter, i) {
4805                 pi = adap2pinfo(adapter, i);
4806                 netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets);
4807                 netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets);
4808
4809                 err = register_netdev(adapter->port[i]);
4810                 if (err)
4811                         break;
4812                 adapter->chan_map[pi->tx_chan] = i;
4813                 print_port_info(adapter->port[i]);
4814         }
4815         if (i == 0) {
4816                 dev_err(&pdev->dev, "could not register any net devices\n");
4817                 goto out_free_dev;
4818         }
4819         if (err) {
4820                 dev_warn(&pdev->dev, "only %d net devices registered\n", i);
4821                 err = 0;
4822         }
4823
4824         if (cxgb4_debugfs_root) {
4825                 adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
4826                                                            cxgb4_debugfs_root);
4827                 setup_debugfs(adapter);
4828         }
4829
4830         /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
4831         pdev->needs_freset = 1;
4832
4833         if (is_offload(adapter))
4834                 attach_ulds(adapter);
4835
4836 sriov:
4837 #ifdef CONFIG_PCI_IOV
4838         if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0)
4839                 if (pci_enable_sriov(pdev, num_vf[func]) == 0)
4840                         dev_info(&pdev->dev,
4841                                  "instantiated %u virtual functions\n",
4842                                  num_vf[func]);
4843 #endif
4844         return 0;
4845
4846  out_free_dev:
4847         free_some_resources(adapter);
4848  out_unmap_bar:
4849         if (!is_t4(adapter->params.chip))
4850                 iounmap(adapter->bar2);
4851  out_free_adapter:
4852         if (adapter->workq)
4853                 destroy_workqueue(adapter->workq);
4854
4855         kfree(adapter);
4856  out_unmap_bar0:
4857         iounmap(regs);
4858  out_disable_device:
4859         pci_disable_pcie_error_reporting(pdev);
4860         pci_disable_device(pdev);
4861  out_release_regions:
4862         pci_release_regions(pdev);
4863         return err;
4864 }
4865
4866 static void remove_one(struct pci_dev *pdev)
4867 {
4868         struct adapter *adapter = pci_get_drvdata(pdev);
4869
4870 #ifdef CONFIG_PCI_IOV
4871         pci_disable_sriov(pdev);
4872
4873 #endif
4874
4875         if (adapter) {
4876                 int i;
4877
4878                 /* Tear down per-adapter Work Queue first since it can contain
4879                  * references to our adapter data structure.
4880                  */
4881                 destroy_workqueue(adapter->workq);
4882
4883                 if (is_offload(adapter))
4884                         detach_ulds(adapter);
4885
4886                 disable_interrupts(adapter);
4887
4888                 for_each_port(adapter, i)
4889                         if (adapter->port[i]->reg_state == NETREG_REGISTERED)
4890                                 unregister_netdev(adapter->port[i]);
4891
4892                 debugfs_remove_recursive(adapter->debugfs_root);
4893
4894                 /* If we allocated filters, free up state associated with any
4895                  * valid filters ...
4896                  */
4897                 if (adapter->tids.ftid_tab) {
4898                         struct filter_entry *f = &adapter->tids.ftid_tab[0];
4899                         for (i = 0; i < (adapter->tids.nftids +
4900                                         adapter->tids.nsftids); i++, f++)
4901                                 if (f->valid)
4902                                         clear_filter(adapter, f);
4903                 }
4904
4905                 if (adapter->flags & FULL_INIT_DONE)
4906                         cxgb_down(adapter);
4907
4908                 free_some_resources(adapter);
4909 #if IS_ENABLED(CONFIG_IPV6)
4910                 t4_cleanup_clip_tbl(adapter);
4911 #endif
4912                 iounmap(adapter->regs);
4913                 if (!is_t4(adapter->params.chip))
4914                         iounmap(adapter->bar2);
4915                 pci_disable_pcie_error_reporting(pdev);
4916                 if ((adapter->flags & DEV_ENABLED)) {
4917                         pci_disable_device(pdev);
4918                         adapter->flags &= ~DEV_ENABLED;
4919                 }
4920                 pci_release_regions(pdev);
4921                 synchronize_rcu();
4922                 kfree(adapter);
4923         } else
4924                 pci_release_regions(pdev);
4925 }
4926
4927 static struct pci_driver cxgb4_driver = {
4928         .name     = KBUILD_MODNAME,
4929         .id_table = cxgb4_pci_tbl,
4930         .probe    = init_one,
4931         .remove   = remove_one,
4932         .shutdown = remove_one,
4933         .err_handler = &cxgb4_eeh,
4934 };
4935
4936 static int __init cxgb4_init_module(void)
4937 {
4938         int ret;
4939
4940         /* Debugfs support is optional, just warn if this fails */
4941         cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
4942         if (!cxgb4_debugfs_root)
4943                 pr_warn("could not create debugfs entry, continuing\n");
4944
4945         ret = pci_register_driver(&cxgb4_driver);
4946         if (ret < 0)
4947                 debugfs_remove(cxgb4_debugfs_root);
4948
4949 #if IS_ENABLED(CONFIG_IPV6)
4950         if (!inet6addr_registered) {
4951                 register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
4952                 inet6addr_registered = true;
4953         }
4954 #endif
4955
4956         return ret;
4957 }
4958
4959 static void __exit cxgb4_cleanup_module(void)
4960 {
4961 #if IS_ENABLED(CONFIG_IPV6)
4962         if (inet6addr_registered) {
4963                 unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier);
4964                 inet6addr_registered = false;
4965         }
4966 #endif
4967         pci_unregister_driver(&cxgb4_driver);
4968         debugfs_remove(cxgb4_debugfs_root);  /* NULL ok */
4969 }
4970
4971 module_init(cxgb4_init_module);
4972 module_exit(cxgb4_cleanup_module);