OSDN Git Service

net: hns3: fix RMW issue for VLAN filter switch
[tomoyo/tomoyo-test1.git] / drivers / net / ethernet / hisilicon / hns3 / hns3pf / hclge_main.c
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
22 #include "hclge_tm.h"
23 #include "hclge_err.h"
24 #include "hnae3.h"
25
26 #define HCLGE_NAME                      "hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
29
30 #define HCLGE_BUF_SIZE_UNIT     256U
31 #define HCLGE_BUF_MUL_BY        2
32 #define HCLGE_BUF_DIV_BY        2
33 #define NEED_RESERVE_TC_NUM     2
34 #define BUF_MAX_PERCENT         100
35 #define BUF_RESERVE_PERCENT     90
36
37 #define HCLGE_RESET_MAX_FAIL_CNT        5
38 #define HCLGE_RESET_SYNC_TIME           100
39 #define HCLGE_PF_RESET_SYNC_TIME        20
40 #define HCLGE_PF_RESET_SYNC_CNT         1500
41
42 /* Get DFX BD number offset */
43 #define HCLGE_DFX_BIOS_BD_OFFSET        1
44 #define HCLGE_DFX_SSU_0_BD_OFFSET       2
45 #define HCLGE_DFX_SSU_1_BD_OFFSET       3
46 #define HCLGE_DFX_IGU_BD_OFFSET         4
47 #define HCLGE_DFX_RPU_0_BD_OFFSET       5
48 #define HCLGE_DFX_RPU_1_BD_OFFSET       6
49 #define HCLGE_DFX_NCSI_BD_OFFSET        7
50 #define HCLGE_DFX_RTC_BD_OFFSET         8
51 #define HCLGE_DFX_PPP_BD_OFFSET         9
52 #define HCLGE_DFX_RCB_BD_OFFSET         10
53 #define HCLGE_DFX_TQP_BD_OFFSET         11
54 #define HCLGE_DFX_SSU_2_BD_OFFSET       12
55
56 #define HCLGE_LINK_STATUS_MS    10
57
58 #define HCLGE_VF_VPORT_START_NUM        1
59
60 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
61 static int hclge_init_vlan_config(struct hclge_dev *hdev);
62 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
63 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
64 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
65 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
66                                u16 *allocated_size, bool is_alloc);
67 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
68 static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
69 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
70                                                    unsigned long *addr);
71 static int hclge_set_default_loopback(struct hclge_dev *hdev);
72
73 static struct hnae3_ae_algo ae_algo;
74
75 static struct workqueue_struct *hclge_wq;
76
77 static const struct pci_device_id ae_algo_pci_tbl[] = {
78         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
79         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
80         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
81         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
82         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
83         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
84         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
85         /* required last entry */
86         {0, }
87 };
88
89 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
90
91 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
92                                          HCLGE_CMDQ_TX_ADDR_H_REG,
93                                          HCLGE_CMDQ_TX_DEPTH_REG,
94                                          HCLGE_CMDQ_TX_TAIL_REG,
95                                          HCLGE_CMDQ_TX_HEAD_REG,
96                                          HCLGE_CMDQ_RX_ADDR_L_REG,
97                                          HCLGE_CMDQ_RX_ADDR_H_REG,
98                                          HCLGE_CMDQ_RX_DEPTH_REG,
99                                          HCLGE_CMDQ_RX_TAIL_REG,
100                                          HCLGE_CMDQ_RX_HEAD_REG,
101                                          HCLGE_VECTOR0_CMDQ_SRC_REG,
102                                          HCLGE_CMDQ_INTR_STS_REG,
103                                          HCLGE_CMDQ_INTR_EN_REG,
104                                          HCLGE_CMDQ_INTR_GEN_REG};
105
106 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
107                                            HCLGE_VECTOR0_OTER_EN_REG,
108                                            HCLGE_MISC_RESET_STS_REG,
109                                            HCLGE_MISC_VECTOR_INT_STS,
110                                            HCLGE_GLOBAL_RESET_REG,
111                                            HCLGE_FUN_RST_ING,
112                                            HCLGE_GRO_EN_REG};
113
114 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
115                                          HCLGE_RING_RX_ADDR_H_REG,
116                                          HCLGE_RING_RX_BD_NUM_REG,
117                                          HCLGE_RING_RX_BD_LENGTH_REG,
118                                          HCLGE_RING_RX_MERGE_EN_REG,
119                                          HCLGE_RING_RX_TAIL_REG,
120                                          HCLGE_RING_RX_HEAD_REG,
121                                          HCLGE_RING_RX_FBD_NUM_REG,
122                                          HCLGE_RING_RX_OFFSET_REG,
123                                          HCLGE_RING_RX_FBD_OFFSET_REG,
124                                          HCLGE_RING_RX_STASH_REG,
125                                          HCLGE_RING_RX_BD_ERR_REG,
126                                          HCLGE_RING_TX_ADDR_L_REG,
127                                          HCLGE_RING_TX_ADDR_H_REG,
128                                          HCLGE_RING_TX_BD_NUM_REG,
129                                          HCLGE_RING_TX_PRIORITY_REG,
130                                          HCLGE_RING_TX_TC_REG,
131                                          HCLGE_RING_TX_MERGE_EN_REG,
132                                          HCLGE_RING_TX_TAIL_REG,
133                                          HCLGE_RING_TX_HEAD_REG,
134                                          HCLGE_RING_TX_FBD_NUM_REG,
135                                          HCLGE_RING_TX_OFFSET_REG,
136                                          HCLGE_RING_TX_EBD_NUM_REG,
137                                          HCLGE_RING_TX_EBD_OFFSET_REG,
138                                          HCLGE_RING_TX_BD_ERR_REG,
139                                          HCLGE_RING_EN_REG};
140
141 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
142                                              HCLGE_TQP_INTR_GL0_REG,
143                                              HCLGE_TQP_INTR_GL1_REG,
144                                              HCLGE_TQP_INTR_GL2_REG,
145                                              HCLGE_TQP_INTR_RL_REG};
146
147 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
148         "App    Loopback test",
149         "Serdes serial Loopback test",
150         "Serdes parallel Loopback test",
151         "Phy    Loopback test"
152 };
153
154 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
155         {"mac_tx_mac_pause_num",
156                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
157         {"mac_rx_mac_pause_num",
158                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
159         {"mac_tx_control_pkt_num",
160                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
161         {"mac_rx_control_pkt_num",
162                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
163         {"mac_tx_pfc_pkt_num",
164                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
165         {"mac_tx_pfc_pri0_pkt_num",
166                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
167         {"mac_tx_pfc_pri1_pkt_num",
168                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
169         {"mac_tx_pfc_pri2_pkt_num",
170                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
171         {"mac_tx_pfc_pri3_pkt_num",
172                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
173         {"mac_tx_pfc_pri4_pkt_num",
174                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
175         {"mac_tx_pfc_pri5_pkt_num",
176                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
177         {"mac_tx_pfc_pri6_pkt_num",
178                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
179         {"mac_tx_pfc_pri7_pkt_num",
180                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
181         {"mac_rx_pfc_pkt_num",
182                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
183         {"mac_rx_pfc_pri0_pkt_num",
184                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
185         {"mac_rx_pfc_pri1_pkt_num",
186                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
187         {"mac_rx_pfc_pri2_pkt_num",
188                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
189         {"mac_rx_pfc_pri3_pkt_num",
190                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
191         {"mac_rx_pfc_pri4_pkt_num",
192                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
193         {"mac_rx_pfc_pri5_pkt_num",
194                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
195         {"mac_rx_pfc_pri6_pkt_num",
196                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
197         {"mac_rx_pfc_pri7_pkt_num",
198                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
199         {"mac_tx_total_pkt_num",
200                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
201         {"mac_tx_total_oct_num",
202                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
203         {"mac_tx_good_pkt_num",
204                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
205         {"mac_tx_bad_pkt_num",
206                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
207         {"mac_tx_good_oct_num",
208                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
209         {"mac_tx_bad_oct_num",
210                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
211         {"mac_tx_uni_pkt_num",
212                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
213         {"mac_tx_multi_pkt_num",
214                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
215         {"mac_tx_broad_pkt_num",
216                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
217         {"mac_tx_undersize_pkt_num",
218                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
219         {"mac_tx_oversize_pkt_num",
220                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
221         {"mac_tx_64_oct_pkt_num",
222                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
223         {"mac_tx_65_127_oct_pkt_num",
224                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
225         {"mac_tx_128_255_oct_pkt_num",
226                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
227         {"mac_tx_256_511_oct_pkt_num",
228                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
229         {"mac_tx_512_1023_oct_pkt_num",
230                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
231         {"mac_tx_1024_1518_oct_pkt_num",
232                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
233         {"mac_tx_1519_2047_oct_pkt_num",
234                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
235         {"mac_tx_2048_4095_oct_pkt_num",
236                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
237         {"mac_tx_4096_8191_oct_pkt_num",
238                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
239         {"mac_tx_8192_9216_oct_pkt_num",
240                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
241         {"mac_tx_9217_12287_oct_pkt_num",
242                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
243         {"mac_tx_12288_16383_oct_pkt_num",
244                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
245         {"mac_tx_1519_max_good_pkt_num",
246                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
247         {"mac_tx_1519_max_bad_pkt_num",
248                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
249         {"mac_rx_total_pkt_num",
250                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
251         {"mac_rx_total_oct_num",
252                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
253         {"mac_rx_good_pkt_num",
254                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
255         {"mac_rx_bad_pkt_num",
256                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
257         {"mac_rx_good_oct_num",
258                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
259         {"mac_rx_bad_oct_num",
260                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
261         {"mac_rx_uni_pkt_num",
262                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
263         {"mac_rx_multi_pkt_num",
264                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
265         {"mac_rx_broad_pkt_num",
266                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
267         {"mac_rx_undersize_pkt_num",
268                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
269         {"mac_rx_oversize_pkt_num",
270                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
271         {"mac_rx_64_oct_pkt_num",
272                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
273         {"mac_rx_65_127_oct_pkt_num",
274                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
275         {"mac_rx_128_255_oct_pkt_num",
276                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
277         {"mac_rx_256_511_oct_pkt_num",
278                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
279         {"mac_rx_512_1023_oct_pkt_num",
280                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
281         {"mac_rx_1024_1518_oct_pkt_num",
282                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
283         {"mac_rx_1519_2047_oct_pkt_num",
284                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
285         {"mac_rx_2048_4095_oct_pkt_num",
286                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
287         {"mac_rx_4096_8191_oct_pkt_num",
288                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
289         {"mac_rx_8192_9216_oct_pkt_num",
290                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
291         {"mac_rx_9217_12287_oct_pkt_num",
292                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
293         {"mac_rx_12288_16383_oct_pkt_num",
294                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
295         {"mac_rx_1519_max_good_pkt_num",
296                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
297         {"mac_rx_1519_max_bad_pkt_num",
298                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
299
300         {"mac_tx_fragment_pkt_num",
301                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
302         {"mac_tx_undermin_pkt_num",
303                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
304         {"mac_tx_jabber_pkt_num",
305                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
306         {"mac_tx_err_all_pkt_num",
307                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
308         {"mac_tx_from_app_good_pkt_num",
309                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
310         {"mac_tx_from_app_bad_pkt_num",
311                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
312         {"mac_rx_fragment_pkt_num",
313                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
314         {"mac_rx_undermin_pkt_num",
315                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
316         {"mac_rx_jabber_pkt_num",
317                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
318         {"mac_rx_fcs_err_pkt_num",
319                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
320         {"mac_rx_send_app_good_pkt_num",
321                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
322         {"mac_rx_send_app_bad_pkt_num",
323                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
324 };
325
326 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
327         {
328                 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
329                 .ethter_type = cpu_to_le16(ETH_P_LLDP),
330                 .mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
331                 .i_port_bitmap = 0x1,
332         },
333 };
334
335 static const u8 hclge_hash_key[] = {
336         0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
337         0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
338         0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
339         0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
340         0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
341 };
342
343 static const u32 hclge_dfx_bd_offset_list[] = {
344         HCLGE_DFX_BIOS_BD_OFFSET,
345         HCLGE_DFX_SSU_0_BD_OFFSET,
346         HCLGE_DFX_SSU_1_BD_OFFSET,
347         HCLGE_DFX_IGU_BD_OFFSET,
348         HCLGE_DFX_RPU_0_BD_OFFSET,
349         HCLGE_DFX_RPU_1_BD_OFFSET,
350         HCLGE_DFX_NCSI_BD_OFFSET,
351         HCLGE_DFX_RTC_BD_OFFSET,
352         HCLGE_DFX_PPP_BD_OFFSET,
353         HCLGE_DFX_RCB_BD_OFFSET,
354         HCLGE_DFX_TQP_BD_OFFSET,
355         HCLGE_DFX_SSU_2_BD_OFFSET
356 };
357
358 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
359         HCLGE_OPC_DFX_BIOS_COMMON_REG,
360         HCLGE_OPC_DFX_SSU_REG_0,
361         HCLGE_OPC_DFX_SSU_REG_1,
362         HCLGE_OPC_DFX_IGU_EGU_REG,
363         HCLGE_OPC_DFX_RPU_REG_0,
364         HCLGE_OPC_DFX_RPU_REG_1,
365         HCLGE_OPC_DFX_NCSI_REG,
366         HCLGE_OPC_DFX_RTC_REG,
367         HCLGE_OPC_DFX_PPP_REG,
368         HCLGE_OPC_DFX_RCB_REG,
369         HCLGE_OPC_DFX_TQP_REG,
370         HCLGE_OPC_DFX_SSU_REG_2
371 };
372
373 static const struct key_info meta_data_key_info[] = {
374         { PACKET_TYPE_ID, 6},
375         { IP_FRAGEMENT, 1},
376         { ROCE_TYPE, 1},
377         { NEXT_KEY, 5},
378         { VLAN_NUMBER, 2},
379         { SRC_VPORT, 12},
380         { DST_VPORT, 12},
381         { TUNNEL_PACKET, 1},
382 };
383
384 static const struct key_info tuple_key_info[] = {
385         { OUTER_DST_MAC, 48},
386         { OUTER_SRC_MAC, 48},
387         { OUTER_VLAN_TAG_FST, 16},
388         { OUTER_VLAN_TAG_SEC, 16},
389         { OUTER_ETH_TYPE, 16},
390         { OUTER_L2_RSV, 16},
391         { OUTER_IP_TOS, 8},
392         { OUTER_IP_PROTO, 8},
393         { OUTER_SRC_IP, 32},
394         { OUTER_DST_IP, 32},
395         { OUTER_L3_RSV, 16},
396         { OUTER_SRC_PORT, 16},
397         { OUTER_DST_PORT, 16},
398         { OUTER_L4_RSV, 32},
399         { OUTER_TUN_VNI, 24},
400         { OUTER_TUN_FLOW_ID, 8},
401         { INNER_DST_MAC, 48},
402         { INNER_SRC_MAC, 48},
403         { INNER_VLAN_TAG_FST, 16},
404         { INNER_VLAN_TAG_SEC, 16},
405         { INNER_ETH_TYPE, 16},
406         { INNER_L2_RSV, 16},
407         { INNER_IP_TOS, 8},
408         { INNER_IP_PROTO, 8},
409         { INNER_SRC_IP, 32},
410         { INNER_DST_IP, 32},
411         { INNER_L3_RSV, 16},
412         { INNER_SRC_PORT, 16},
413         { INNER_DST_PORT, 16},
414         { INNER_L4_RSV, 32},
415 };
416
417 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
418 {
419 #define HCLGE_MAC_CMD_NUM 21
420
421         u64 *data = (u64 *)(&hdev->mac_stats);
422         struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
423         __le64 *desc_data;
424         int i, k, n;
425         int ret;
426
427         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
428         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
429         if (ret) {
430                 dev_err(&hdev->pdev->dev,
431                         "Get MAC pkt stats fail, status = %d.\n", ret);
432
433                 return ret;
434         }
435
436         for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
437                 /* for special opcode 0032, only the first desc has the head */
438                 if (unlikely(i == 0)) {
439                         desc_data = (__le64 *)(&desc[i].data[0]);
440                         n = HCLGE_RD_FIRST_STATS_NUM;
441                 } else {
442                         desc_data = (__le64 *)(&desc[i]);
443                         n = HCLGE_RD_OTHER_STATS_NUM;
444                 }
445
446                 for (k = 0; k < n; k++) {
447                         *data += le64_to_cpu(*desc_data);
448                         data++;
449                         desc_data++;
450                 }
451         }
452
453         return 0;
454 }
455
456 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
457 {
458         u64 *data = (u64 *)(&hdev->mac_stats);
459         struct hclge_desc *desc;
460         __le64 *desc_data;
461         u16 i, k, n;
462         int ret;
463
464         /* This may be called inside atomic sections,
465          * so GFP_ATOMIC is more suitalbe here
466          */
467         desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
468         if (!desc)
469                 return -ENOMEM;
470
471         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
472         ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
473         if (ret) {
474                 kfree(desc);
475                 return ret;
476         }
477
478         for (i = 0; i < desc_num; i++) {
479                 /* for special opcode 0034, only the first desc has the head */
480                 if (i == 0) {
481                         desc_data = (__le64 *)(&desc[i].data[0]);
482                         n = HCLGE_RD_FIRST_STATS_NUM;
483                 } else {
484                         desc_data = (__le64 *)(&desc[i]);
485                         n = HCLGE_RD_OTHER_STATS_NUM;
486                 }
487
488                 for (k = 0; k < n; k++) {
489                         *data += le64_to_cpu(*desc_data);
490                         data++;
491                         desc_data++;
492                 }
493         }
494
495         kfree(desc);
496
497         return 0;
498 }
499
500 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
501 {
502         struct hclge_desc desc;
503         __le32 *desc_data;
504         u32 reg_num;
505         int ret;
506
507         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
508         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
509         if (ret)
510                 return ret;
511
512         desc_data = (__le32 *)(&desc.data[0]);
513         reg_num = le32_to_cpu(*desc_data);
514
515         *desc_num = 1 + ((reg_num - 3) >> 2) +
516                     (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
517
518         return 0;
519 }
520
521 static int hclge_mac_update_stats(struct hclge_dev *hdev)
522 {
523         u32 desc_num;
524         int ret;
525
526         ret = hclge_mac_query_reg_num(hdev, &desc_num);
527
528         /* The firmware supports the new statistics acquisition method */
529         if (!ret)
530                 ret = hclge_mac_update_stats_complete(hdev, desc_num);
531         else if (ret == -EOPNOTSUPP)
532                 ret = hclge_mac_update_stats_defective(hdev);
533         else
534                 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
535
536         return ret;
537 }
538
539 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
540 {
541         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
542         struct hclge_vport *vport = hclge_get_vport(handle);
543         struct hclge_dev *hdev = vport->back;
544         struct hnae3_queue *queue;
545         struct hclge_desc desc[1];
546         struct hclge_tqp *tqp;
547         int ret, i;
548
549         for (i = 0; i < kinfo->num_tqps; i++) {
550                 queue = handle->kinfo.tqp[i];
551                 tqp = container_of(queue, struct hclge_tqp, q);
552                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
553                 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATUS,
554                                            true);
555
556                 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
557                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
558                 if (ret) {
559                         dev_err(&hdev->pdev->dev,
560                                 "Query tqp stat fail, status = %d,queue = %d\n",
561                                 ret, i);
562                         return ret;
563                 }
564                 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
565                         le32_to_cpu(desc[0].data[1]);
566         }
567
568         for (i = 0; i < kinfo->num_tqps; i++) {
569                 queue = handle->kinfo.tqp[i];
570                 tqp = container_of(queue, struct hclge_tqp, q);
571                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
572                 hclge_cmd_setup_basic_desc(&desc[0],
573                                            HCLGE_OPC_QUERY_TX_STATUS,
574                                            true);
575
576                 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
577                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
578                 if (ret) {
579                         dev_err(&hdev->pdev->dev,
580                                 "Query tqp stat fail, status = %d,queue = %d\n",
581                                 ret, i);
582                         return ret;
583                 }
584                 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
585                         le32_to_cpu(desc[0].data[1]);
586         }
587
588         return 0;
589 }
590
591 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
592 {
593         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
594         struct hclge_tqp *tqp;
595         u64 *buff = data;
596         int i;
597
598         for (i = 0; i < kinfo->num_tqps; i++) {
599                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
600                 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
601         }
602
603         for (i = 0; i < kinfo->num_tqps; i++) {
604                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
605                 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
606         }
607
608         return buff;
609 }
610
611 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
612 {
613         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
614
615         /* each tqp has TX & RX two queues */
616         return kinfo->num_tqps * (2);
617 }
618
619 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
620 {
621         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
622         u8 *buff = data;
623         int i = 0;
624
625         for (i = 0; i < kinfo->num_tqps; i++) {
626                 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
627                         struct hclge_tqp, q);
628                 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
629                          tqp->index);
630                 buff = buff + ETH_GSTRING_LEN;
631         }
632
633         for (i = 0; i < kinfo->num_tqps; i++) {
634                 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
635                         struct hclge_tqp, q);
636                 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
637                          tqp->index);
638                 buff = buff + ETH_GSTRING_LEN;
639         }
640
641         return buff;
642 }
643
644 static u64 *hclge_comm_get_stats(const void *comm_stats,
645                                  const struct hclge_comm_stats_str strs[],
646                                  int size, u64 *data)
647 {
648         u64 *buf = data;
649         u32 i;
650
651         for (i = 0; i < size; i++)
652                 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
653
654         return buf + size;
655 }
656
657 static u8 *hclge_comm_get_strings(u32 stringset,
658                                   const struct hclge_comm_stats_str strs[],
659                                   int size, u8 *data)
660 {
661         char *buff = (char *)data;
662         u32 i;
663
664         if (stringset != ETH_SS_STATS)
665                 return buff;
666
667         for (i = 0; i < size; i++) {
668                 snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
669                 buff = buff + ETH_GSTRING_LEN;
670         }
671
672         return (u8 *)buff;
673 }
674
675 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
676 {
677         struct hnae3_handle *handle;
678         int status;
679
680         handle = &hdev->vport[0].nic;
681         if (handle->client) {
682                 status = hclge_tqps_update_stats(handle);
683                 if (status) {
684                         dev_err(&hdev->pdev->dev,
685                                 "Update TQPS stats fail, status = %d.\n",
686                                 status);
687                 }
688         }
689
690         status = hclge_mac_update_stats(hdev);
691         if (status)
692                 dev_err(&hdev->pdev->dev,
693                         "Update MAC stats fail, status = %d.\n", status);
694 }
695
696 static void hclge_update_stats(struct hnae3_handle *handle,
697                                struct net_device_stats *net_stats)
698 {
699         struct hclge_vport *vport = hclge_get_vport(handle);
700         struct hclge_dev *hdev = vport->back;
701         int status;
702
703         if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
704                 return;
705
706         status = hclge_mac_update_stats(hdev);
707         if (status)
708                 dev_err(&hdev->pdev->dev,
709                         "Update MAC stats fail, status = %d.\n",
710                         status);
711
712         status = hclge_tqps_update_stats(handle);
713         if (status)
714                 dev_err(&hdev->pdev->dev,
715                         "Update TQPS stats fail, status = %d.\n",
716                         status);
717
718         clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
719 }
720
721 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
722 {
723 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
724                 HNAE3_SUPPORT_PHY_LOOPBACK |\
725                 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
726                 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
727
728         struct hclge_vport *vport = hclge_get_vport(handle);
729         struct hclge_dev *hdev = vport->back;
730         int count = 0;
731
732         /* Loopback test support rules:
733          * mac: only GE mode support
734          * serdes: all mac mode will support include GE/XGE/LGE/CGE
735          * phy: only support when phy device exist on board
736          */
737         if (stringset == ETH_SS_TEST) {
738                 /* clear loopback bit flags at first */
739                 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
740                 if (hdev->pdev->revision >= 0x21 ||
741                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
742                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
743                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
744                         count += 1;
745                         handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
746                 }
747
748                 count += 2;
749                 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
750                 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
751
752                 if (hdev->hw.mac.phydev) {
753                         count += 1;
754                         handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
755                 }
756
757         } else if (stringset == ETH_SS_STATS) {
758                 count = ARRAY_SIZE(g_mac_stats_string) +
759                         hclge_tqps_get_sset_count(handle, stringset);
760         }
761
762         return count;
763 }
764
765 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
766                               u8 *data)
767 {
768         u8 *p = (char *)data;
769         int size;
770
771         if (stringset == ETH_SS_STATS) {
772                 size = ARRAY_SIZE(g_mac_stats_string);
773                 p = hclge_comm_get_strings(stringset, g_mac_stats_string,
774                                            size, p);
775                 p = hclge_tqps_get_strings(handle, p);
776         } else if (stringset == ETH_SS_TEST) {
777                 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
778                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
779                                ETH_GSTRING_LEN);
780                         p += ETH_GSTRING_LEN;
781                 }
782                 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
783                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
784                                ETH_GSTRING_LEN);
785                         p += ETH_GSTRING_LEN;
786                 }
787                 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
788                         memcpy(p,
789                                hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
790                                ETH_GSTRING_LEN);
791                         p += ETH_GSTRING_LEN;
792                 }
793                 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
794                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
795                                ETH_GSTRING_LEN);
796                         p += ETH_GSTRING_LEN;
797                 }
798         }
799 }
800
801 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
802 {
803         struct hclge_vport *vport = hclge_get_vport(handle);
804         struct hclge_dev *hdev = vport->back;
805         u64 *p;
806
807         p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
808                                  ARRAY_SIZE(g_mac_stats_string), data);
809         p = hclge_tqps_get_stats(handle, p);
810 }
811
812 static void hclge_get_mac_stat(struct hnae3_handle *handle,
813                                struct hns3_mac_stats *mac_stats)
814 {
815         struct hclge_vport *vport = hclge_get_vport(handle);
816         struct hclge_dev *hdev = vport->back;
817
818         hclge_update_stats(handle, NULL);
819
820         mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
821         mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
822 }
823
824 static int hclge_parse_func_status(struct hclge_dev *hdev,
825                                    struct hclge_func_status_cmd *status)
826 {
827         if (!(status->pf_state & HCLGE_PF_STATE_DONE))
828                 return -EINVAL;
829
830         /* Set the pf to main pf */
831         if (status->pf_state & HCLGE_PF_STATE_MAIN)
832                 hdev->flag |= HCLGE_FLAG_MAIN;
833         else
834                 hdev->flag &= ~HCLGE_FLAG_MAIN;
835
836         return 0;
837 }
838
839 static int hclge_query_function_status(struct hclge_dev *hdev)
840 {
841 #define HCLGE_QUERY_MAX_CNT     5
842
843         struct hclge_func_status_cmd *req;
844         struct hclge_desc desc;
845         int timeout = 0;
846         int ret;
847
848         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
849         req = (struct hclge_func_status_cmd *)desc.data;
850
851         do {
852                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
853                 if (ret) {
854                         dev_err(&hdev->pdev->dev,
855                                 "query function status failed %d.\n", ret);
856                         return ret;
857                 }
858
859                 /* Check pf reset is done */
860                 if (req->pf_state)
861                         break;
862                 usleep_range(1000, 2000);
863         } while (timeout++ < HCLGE_QUERY_MAX_CNT);
864
865         return hclge_parse_func_status(hdev, req);
866 }
867
868 static int hclge_query_pf_resource(struct hclge_dev *hdev)
869 {
870         struct hclge_pf_res_cmd *req;
871         struct hclge_desc desc;
872         int ret;
873
874         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
875         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
876         if (ret) {
877                 dev_err(&hdev->pdev->dev,
878                         "query pf resource failed %d.\n", ret);
879                 return ret;
880         }
881
882         req = (struct hclge_pf_res_cmd *)desc.data;
883         hdev->num_tqps = le16_to_cpu(req->tqp_num);
884         hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
885
886         if (req->tx_buf_size)
887                 hdev->tx_buf_size =
888                         le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
889         else
890                 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
891
892         hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
893
894         if (req->dv_buf_size)
895                 hdev->dv_buf_size =
896                         le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
897         else
898                 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
899
900         hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
901
902         if (hnae3_dev_roce_supported(hdev)) {
903                 hdev->roce_base_msix_offset =
904                 hnae3_get_field(le16_to_cpu(req->msixcap_localid_ba_rocee),
905                                 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
906                 hdev->num_roce_msi =
907                 hnae3_get_field(le16_to_cpu(req->pf_intr_vector_number),
908                                 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
909
910                 /* nic's msix numbers is always equals to the roce's. */
911                 hdev->num_nic_msi = hdev->num_roce_msi;
912
913                 /* PF should have NIC vectors and Roce vectors,
914                  * NIC vectors are queued before Roce vectors.
915                  */
916                 hdev->num_msi = hdev->num_roce_msi +
917                                 hdev->roce_base_msix_offset;
918         } else {
919                 hdev->num_msi =
920                 hnae3_get_field(le16_to_cpu(req->pf_intr_vector_number),
921                                 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
922
923                 hdev->num_nic_msi = hdev->num_msi;
924         }
925
926         if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
927                 dev_err(&hdev->pdev->dev,
928                         "Just %u msi resources, not enough for pf(min:2).\n",
929                         hdev->num_nic_msi);
930                 return -EINVAL;
931         }
932
933         return 0;
934 }
935
936 static int hclge_parse_speed(int speed_cmd, int *speed)
937 {
938         switch (speed_cmd) {
939         case 6:
940                 *speed = HCLGE_MAC_SPEED_10M;
941                 break;
942         case 7:
943                 *speed = HCLGE_MAC_SPEED_100M;
944                 break;
945         case 0:
946                 *speed = HCLGE_MAC_SPEED_1G;
947                 break;
948         case 1:
949                 *speed = HCLGE_MAC_SPEED_10G;
950                 break;
951         case 2:
952                 *speed = HCLGE_MAC_SPEED_25G;
953                 break;
954         case 3:
955                 *speed = HCLGE_MAC_SPEED_40G;
956                 break;
957         case 4:
958                 *speed = HCLGE_MAC_SPEED_50G;
959                 break;
960         case 5:
961                 *speed = HCLGE_MAC_SPEED_100G;
962                 break;
963         default:
964                 return -EINVAL;
965         }
966
967         return 0;
968 }
969
970 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
971 {
972         struct hclge_vport *vport = hclge_get_vport(handle);
973         struct hclge_dev *hdev = vport->back;
974         u32 speed_ability = hdev->hw.mac.speed_ability;
975         u32 speed_bit = 0;
976
977         switch (speed) {
978         case HCLGE_MAC_SPEED_10M:
979                 speed_bit = HCLGE_SUPPORT_10M_BIT;
980                 break;
981         case HCLGE_MAC_SPEED_100M:
982                 speed_bit = HCLGE_SUPPORT_100M_BIT;
983                 break;
984         case HCLGE_MAC_SPEED_1G:
985                 speed_bit = HCLGE_SUPPORT_1G_BIT;
986                 break;
987         case HCLGE_MAC_SPEED_10G:
988                 speed_bit = HCLGE_SUPPORT_10G_BIT;
989                 break;
990         case HCLGE_MAC_SPEED_25G:
991                 speed_bit = HCLGE_SUPPORT_25G_BIT;
992                 break;
993         case HCLGE_MAC_SPEED_40G:
994                 speed_bit = HCLGE_SUPPORT_40G_BIT;
995                 break;
996         case HCLGE_MAC_SPEED_50G:
997                 speed_bit = HCLGE_SUPPORT_50G_BIT;
998                 break;
999         case HCLGE_MAC_SPEED_100G:
1000                 speed_bit = HCLGE_SUPPORT_100G_BIT;
1001                 break;
1002         default:
1003                 return -EINVAL;
1004         }
1005
1006         if (speed_bit & speed_ability)
1007                 return 0;
1008
1009         return -EINVAL;
1010 }
1011
1012 static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability)
1013 {
1014         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1015                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1016                                  mac->supported);
1017         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1018                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1019                                  mac->supported);
1020         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1021                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1022                                  mac->supported);
1023         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1024                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1025                                  mac->supported);
1026         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1027                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1028                                  mac->supported);
1029 }
1030
1031 static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability)
1032 {
1033         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1034                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1035                                  mac->supported);
1036         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1037                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1038                                  mac->supported);
1039         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1040                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1041                                  mac->supported);
1042         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1043                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1044                                  mac->supported);
1045         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1046                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1047                                  mac->supported);
1048 }
1049
1050 static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability)
1051 {
1052         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1053                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1054                                  mac->supported);
1055         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1056                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1057                                  mac->supported);
1058         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1059                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1060                                  mac->supported);
1061         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1062                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1063                                  mac->supported);
1064         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1065                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1066                                  mac->supported);
1067 }
1068
1069 static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability)
1070 {
1071         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1072                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1073                                  mac->supported);
1074         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1075                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1076                                  mac->supported);
1077         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1078                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1079                                  mac->supported);
1080         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1081                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1082                                  mac->supported);
1083         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1084                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1085                                  mac->supported);
1086         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1087                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1088                                  mac->supported);
1089 }
1090
1091 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1092 {
1093         linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1094         linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1095
1096         switch (mac->speed) {
1097         case HCLGE_MAC_SPEED_10G:
1098         case HCLGE_MAC_SPEED_40G:
1099                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1100                                  mac->supported);
1101                 mac->fec_ability =
1102                         BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1103                 break;
1104         case HCLGE_MAC_SPEED_25G:
1105         case HCLGE_MAC_SPEED_50G:
1106                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1107                                  mac->supported);
1108                 mac->fec_ability =
1109                         BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1110                         BIT(HNAE3_FEC_AUTO);
1111                 break;
1112         case HCLGE_MAC_SPEED_100G:
1113                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1114                 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1115                 break;
1116         default:
1117                 mac->fec_ability = 0;
1118                 break;
1119         }
1120 }
1121
1122 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1123                                         u8 speed_ability)
1124 {
1125         struct hclge_mac *mac = &hdev->hw.mac;
1126
1127         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1128                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1129                                  mac->supported);
1130
1131         hclge_convert_setting_sr(mac, speed_ability);
1132         hclge_convert_setting_lr(mac, speed_ability);
1133         hclge_convert_setting_cr(mac, speed_ability);
1134         if (hdev->pdev->revision >= 0x21)
1135                 hclge_convert_setting_fec(mac);
1136
1137         linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1138         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1139         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1140 }
1141
1142 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1143                                             u8 speed_ability)
1144 {
1145         struct hclge_mac *mac = &hdev->hw.mac;
1146
1147         hclge_convert_setting_kr(mac, speed_ability);
1148         if (hdev->pdev->revision >= 0x21)
1149                 hclge_convert_setting_fec(mac);
1150         linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1151         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1152         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1153 }
1154
1155 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1156                                          u8 speed_ability)
1157 {
1158         unsigned long *supported = hdev->hw.mac.supported;
1159
1160         /* default to support all speed for GE port */
1161         if (!speed_ability)
1162                 speed_ability = HCLGE_SUPPORT_GE;
1163
1164         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1165                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1166                                  supported);
1167
1168         if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1169                 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1170                                  supported);
1171                 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1172                                  supported);
1173         }
1174
1175         if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1176                 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1177                 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1178         }
1179
1180         linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1181         linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1182         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1183         linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1184 }
1185
1186 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
1187 {
1188         u8 media_type = hdev->hw.mac.media_type;
1189
1190         if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1191                 hclge_parse_fiber_link_mode(hdev, speed_ability);
1192         else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1193                 hclge_parse_copper_link_mode(hdev, speed_ability);
1194         else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1195                 hclge_parse_backplane_link_mode(hdev, speed_ability);
1196 }
1197
1198 static u32 hclge_get_max_speed(u8 speed_ability)
1199 {
1200         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1201                 return HCLGE_MAC_SPEED_100G;
1202
1203         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1204                 return HCLGE_MAC_SPEED_50G;
1205
1206         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1207                 return HCLGE_MAC_SPEED_40G;
1208
1209         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1210                 return HCLGE_MAC_SPEED_25G;
1211
1212         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1213                 return HCLGE_MAC_SPEED_10G;
1214
1215         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1216                 return HCLGE_MAC_SPEED_1G;
1217
1218         if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1219                 return HCLGE_MAC_SPEED_100M;
1220
1221         if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1222                 return HCLGE_MAC_SPEED_10M;
1223
1224         return HCLGE_MAC_SPEED_1G;
1225 }
1226
1227 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1228 {
1229         struct hclge_cfg_param_cmd *req;
1230         u64 mac_addr_tmp_high;
1231         u64 mac_addr_tmp;
1232         unsigned int i;
1233
1234         req = (struct hclge_cfg_param_cmd *)desc[0].data;
1235
1236         /* get the configuration */
1237         cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1238                                               HCLGE_CFG_VMDQ_M,
1239                                               HCLGE_CFG_VMDQ_S);
1240         cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1241                                       HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1242         cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1243                                             HCLGE_CFG_TQP_DESC_N_M,
1244                                             HCLGE_CFG_TQP_DESC_N_S);
1245
1246         cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1247                                         HCLGE_CFG_PHY_ADDR_M,
1248                                         HCLGE_CFG_PHY_ADDR_S);
1249         cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1250                                           HCLGE_CFG_MEDIA_TP_M,
1251                                           HCLGE_CFG_MEDIA_TP_S);
1252         cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1253                                           HCLGE_CFG_RX_BUF_LEN_M,
1254                                           HCLGE_CFG_RX_BUF_LEN_S);
1255         /* get mac_address */
1256         mac_addr_tmp = __le32_to_cpu(req->param[2]);
1257         mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1258                                             HCLGE_CFG_MAC_ADDR_H_M,
1259                                             HCLGE_CFG_MAC_ADDR_H_S);
1260
1261         mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1262
1263         cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1264                                              HCLGE_CFG_DEFAULT_SPEED_M,
1265                                              HCLGE_CFG_DEFAULT_SPEED_S);
1266         cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1267                                             HCLGE_CFG_RSS_SIZE_M,
1268                                             HCLGE_CFG_RSS_SIZE_S);
1269
1270         for (i = 0; i < ETH_ALEN; i++)
1271                 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1272
1273         req = (struct hclge_cfg_param_cmd *)desc[1].data;
1274         cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1275
1276         cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1277                                              HCLGE_CFG_SPEED_ABILITY_M,
1278                                              HCLGE_CFG_SPEED_ABILITY_S);
1279         cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1280                                          HCLGE_CFG_UMV_TBL_SPACE_M,
1281                                          HCLGE_CFG_UMV_TBL_SPACE_S);
1282         if (!cfg->umv_space)
1283                 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1284 }
1285
1286 /* hclge_get_cfg: query the static parameter from flash
1287  * @hdev: pointer to struct hclge_dev
1288  * @hcfg: the config structure to be getted
1289  */
1290 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1291 {
1292         struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1293         struct hclge_cfg_param_cmd *req;
1294         unsigned int i;
1295         int ret;
1296
1297         for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1298                 u32 offset = 0;
1299
1300                 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1301                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1302                                            true);
1303                 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1304                                 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1305                 /* Len should be united by 4 bytes when send to hardware */
1306                 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1307                                 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1308                 req->offset = cpu_to_le32(offset);
1309         }
1310
1311         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1312         if (ret) {
1313                 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1314                 return ret;
1315         }
1316
1317         hclge_parse_cfg(hcfg, desc);
1318
1319         return 0;
1320 }
1321
1322 static int hclge_get_cap(struct hclge_dev *hdev)
1323 {
1324         int ret;
1325
1326         ret = hclge_query_function_status(hdev);
1327         if (ret) {
1328                 dev_err(&hdev->pdev->dev,
1329                         "query function status error %d.\n", ret);
1330                 return ret;
1331         }
1332
1333         /* get pf resource */
1334         return hclge_query_pf_resource(hdev);
1335 }
1336
1337 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1338 {
1339 #define HCLGE_MIN_TX_DESC       64
1340 #define HCLGE_MIN_RX_DESC       64
1341
1342         if (!is_kdump_kernel())
1343                 return;
1344
1345         dev_info(&hdev->pdev->dev,
1346                  "Running kdump kernel. Using minimal resources\n");
1347
1348         /* minimal queue pairs equals to the number of vports */
1349         hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1350         hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1351         hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1352 }
1353
1354 static int hclge_configure(struct hclge_dev *hdev)
1355 {
1356         struct hclge_cfg cfg;
1357         unsigned int i;
1358         int ret;
1359
1360         ret = hclge_get_cfg(hdev, &cfg);
1361         if (ret) {
1362                 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1363                 return ret;
1364         }
1365
1366         hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1367         hdev->base_tqp_pid = 0;
1368         hdev->rss_size_max = cfg.rss_size_max;
1369         hdev->rx_buf_len = cfg.rx_buf_len;
1370         ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1371         hdev->hw.mac.media_type = cfg.media_type;
1372         hdev->hw.mac.phy_addr = cfg.phy_addr;
1373         hdev->num_tx_desc = cfg.tqp_desc_num;
1374         hdev->num_rx_desc = cfg.tqp_desc_num;
1375         hdev->tm_info.num_pg = 1;
1376         hdev->tc_max = cfg.tc_num;
1377         hdev->tm_info.hw_pfc_map = 0;
1378         hdev->wanted_umv_size = cfg.umv_space;
1379
1380         if (hnae3_dev_fd_supported(hdev)) {
1381                 hdev->fd_en = true;
1382                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1383         }
1384
1385         ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1386         if (ret) {
1387                 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1388                 return ret;
1389         }
1390
1391         hclge_parse_link_mode(hdev, cfg.speed_ability);
1392
1393         hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1394
1395         if ((hdev->tc_max > HNAE3_MAX_TC) ||
1396             (hdev->tc_max < 1)) {
1397                 dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1398                          hdev->tc_max);
1399                 hdev->tc_max = 1;
1400         }
1401
1402         /* Dev does not support DCB */
1403         if (!hnae3_dev_dcb_supported(hdev)) {
1404                 hdev->tc_max = 1;
1405                 hdev->pfc_max = 0;
1406         } else {
1407                 hdev->pfc_max = hdev->tc_max;
1408         }
1409
1410         hdev->tm_info.num_tc = 1;
1411
1412         /* Currently not support uncontiuous tc */
1413         for (i = 0; i < hdev->tm_info.num_tc; i++)
1414                 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1415
1416         hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1417
1418         hclge_init_kdump_kernel_config(hdev);
1419
1420         /* Set the init affinity based on pci func number */
1421         i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
1422         i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
1423         cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
1424                         &hdev->affinity_mask);
1425
1426         return ret;
1427 }
1428
1429 static int hclge_config_tso(struct hclge_dev *hdev, unsigned int tso_mss_min,
1430                             unsigned int tso_mss_max)
1431 {
1432         struct hclge_cfg_tso_status_cmd *req;
1433         struct hclge_desc desc;
1434         u16 tso_mss;
1435
1436         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1437
1438         req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1439
1440         tso_mss = 0;
1441         hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1442                         HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1443         req->tso_mss_min = cpu_to_le16(tso_mss);
1444
1445         tso_mss = 0;
1446         hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1447                         HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1448         req->tso_mss_max = cpu_to_le16(tso_mss);
1449
1450         return hclge_cmd_send(&hdev->hw, &desc, 1);
1451 }
1452
1453 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1454 {
1455         struct hclge_cfg_gro_status_cmd *req;
1456         struct hclge_desc desc;
1457         int ret;
1458
1459         if (!hnae3_dev_gro_supported(hdev))
1460                 return 0;
1461
1462         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1463         req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1464
1465         req->gro_en = cpu_to_le16(en ? 1 : 0);
1466
1467         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1468         if (ret)
1469                 dev_err(&hdev->pdev->dev,
1470                         "GRO hardware config cmd failed, ret = %d\n", ret);
1471
1472         return ret;
1473 }
1474
1475 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1476 {
1477         struct hclge_tqp *tqp;
1478         int i;
1479
1480         hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1481                                   sizeof(struct hclge_tqp), GFP_KERNEL);
1482         if (!hdev->htqp)
1483                 return -ENOMEM;
1484
1485         tqp = hdev->htqp;
1486
1487         for (i = 0; i < hdev->num_tqps; i++) {
1488                 tqp->dev = &hdev->pdev->dev;
1489                 tqp->index = i;
1490
1491                 tqp->q.ae_algo = &ae_algo;
1492                 tqp->q.buf_size = hdev->rx_buf_len;
1493                 tqp->q.tx_desc_num = hdev->num_tx_desc;
1494                 tqp->q.rx_desc_num = hdev->num_rx_desc;
1495                 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1496                         i * HCLGE_TQP_REG_SIZE;
1497
1498                 tqp++;
1499         }
1500
1501         return 0;
1502 }
1503
1504 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1505                                   u16 tqp_pid, u16 tqp_vid, bool is_pf)
1506 {
1507         struct hclge_tqp_map_cmd *req;
1508         struct hclge_desc desc;
1509         int ret;
1510
1511         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1512
1513         req = (struct hclge_tqp_map_cmd *)desc.data;
1514         req->tqp_id = cpu_to_le16(tqp_pid);
1515         req->tqp_vf = func_id;
1516         req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1517         if (!is_pf)
1518                 req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1519         req->tqp_vid = cpu_to_le16(tqp_vid);
1520
1521         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1522         if (ret)
1523                 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1524
1525         return ret;
1526 }
1527
1528 static int  hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1529 {
1530         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1531         struct hclge_dev *hdev = vport->back;
1532         int i, alloced;
1533
1534         for (i = 0, alloced = 0; i < hdev->num_tqps &&
1535              alloced < num_tqps; i++) {
1536                 if (!hdev->htqp[i].alloced) {
1537                         hdev->htqp[i].q.handle = &vport->nic;
1538                         hdev->htqp[i].q.tqp_index = alloced;
1539                         hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1540                         hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1541                         kinfo->tqp[alloced] = &hdev->htqp[i].q;
1542                         hdev->htqp[i].alloced = true;
1543                         alloced++;
1544                 }
1545         }
1546         vport->alloc_tqps = alloced;
1547         kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1548                                 vport->alloc_tqps / hdev->tm_info.num_tc);
1549
1550         /* ensure one to one mapping between irq and queue at default */
1551         kinfo->rss_size = min_t(u16, kinfo->rss_size,
1552                                 (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1553
1554         return 0;
1555 }
1556
1557 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1558                             u16 num_tx_desc, u16 num_rx_desc)
1559
1560 {
1561         struct hnae3_handle *nic = &vport->nic;
1562         struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1563         struct hclge_dev *hdev = vport->back;
1564         int ret;
1565
1566         kinfo->num_tx_desc = num_tx_desc;
1567         kinfo->num_rx_desc = num_rx_desc;
1568
1569         kinfo->rx_buf_len = hdev->rx_buf_len;
1570
1571         kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1572                                   sizeof(struct hnae3_queue *), GFP_KERNEL);
1573         if (!kinfo->tqp)
1574                 return -ENOMEM;
1575
1576         ret = hclge_assign_tqp(vport, num_tqps);
1577         if (ret)
1578                 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1579
1580         return ret;
1581 }
1582
1583 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1584                                   struct hclge_vport *vport)
1585 {
1586         struct hnae3_handle *nic = &vport->nic;
1587         struct hnae3_knic_private_info *kinfo;
1588         u16 i;
1589
1590         kinfo = &nic->kinfo;
1591         for (i = 0; i < vport->alloc_tqps; i++) {
1592                 struct hclge_tqp *q =
1593                         container_of(kinfo->tqp[i], struct hclge_tqp, q);
1594                 bool is_pf;
1595                 int ret;
1596
1597                 is_pf = !(vport->vport_id);
1598                 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1599                                              i, is_pf);
1600                 if (ret)
1601                         return ret;
1602         }
1603
1604         return 0;
1605 }
1606
1607 static int hclge_map_tqp(struct hclge_dev *hdev)
1608 {
1609         struct hclge_vport *vport = hdev->vport;
1610         u16 i, num_vport;
1611
1612         num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1613         for (i = 0; i < num_vport; i++) {
1614                 int ret;
1615
1616                 ret = hclge_map_tqp_to_vport(hdev, vport);
1617                 if (ret)
1618                         return ret;
1619
1620                 vport++;
1621         }
1622
1623         return 0;
1624 }
1625
1626 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1627 {
1628         struct hnae3_handle *nic = &vport->nic;
1629         struct hclge_dev *hdev = vport->back;
1630         int ret;
1631
1632         nic->pdev = hdev->pdev;
1633         nic->ae_algo = &ae_algo;
1634         nic->numa_node_mask = hdev->numa_node_mask;
1635
1636         ret = hclge_knic_setup(vport, num_tqps,
1637                                hdev->num_tx_desc, hdev->num_rx_desc);
1638         if (ret)
1639                 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1640
1641         return ret;
1642 }
1643
1644 static int hclge_alloc_vport(struct hclge_dev *hdev)
1645 {
1646         struct pci_dev *pdev = hdev->pdev;
1647         struct hclge_vport *vport;
1648         u32 tqp_main_vport;
1649         u32 tqp_per_vport;
1650         int num_vport, i;
1651         int ret;
1652
1653         /* We need to alloc a vport for main NIC of PF */
1654         num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1655
1656         if (hdev->num_tqps < num_vport) {
1657                 dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1658                         hdev->num_tqps, num_vport);
1659                 return -EINVAL;
1660         }
1661
1662         /* Alloc the same number of TQPs for every vport */
1663         tqp_per_vport = hdev->num_tqps / num_vport;
1664         tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1665
1666         vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1667                              GFP_KERNEL);
1668         if (!vport)
1669                 return -ENOMEM;
1670
1671         hdev->vport = vport;
1672         hdev->num_alloc_vport = num_vport;
1673
1674         if (IS_ENABLED(CONFIG_PCI_IOV))
1675                 hdev->num_alloc_vfs = hdev->num_req_vfs;
1676
1677         for (i = 0; i < num_vport; i++) {
1678                 vport->back = hdev;
1679                 vport->vport_id = i;
1680                 vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1681                 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1682                 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1683                 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1684                 INIT_LIST_HEAD(&vport->vlan_list);
1685                 INIT_LIST_HEAD(&vport->uc_mac_list);
1686                 INIT_LIST_HEAD(&vport->mc_mac_list);
1687
1688                 if (i == 0)
1689                         ret = hclge_vport_setup(vport, tqp_main_vport);
1690                 else
1691                         ret = hclge_vport_setup(vport, tqp_per_vport);
1692                 if (ret) {
1693                         dev_err(&pdev->dev,
1694                                 "vport setup failed for vport %d, %d\n",
1695                                 i, ret);
1696                         return ret;
1697                 }
1698
1699                 vport++;
1700         }
1701
1702         return 0;
1703 }
1704
1705 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1706                                     struct hclge_pkt_buf_alloc *buf_alloc)
1707 {
1708 /* TX buffer size is unit by 128 byte */
1709 #define HCLGE_BUF_SIZE_UNIT_SHIFT       7
1710 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK    BIT(15)
1711         struct hclge_tx_buff_alloc_cmd *req;
1712         struct hclge_desc desc;
1713         int ret;
1714         u8 i;
1715
1716         req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1717
1718         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1719         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1720                 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1721
1722                 req->tx_pkt_buff[i] =
1723                         cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1724                                      HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1725         }
1726
1727         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1728         if (ret)
1729                 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1730                         ret);
1731
1732         return ret;
1733 }
1734
1735 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1736                                  struct hclge_pkt_buf_alloc *buf_alloc)
1737 {
1738         int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1739
1740         if (ret)
1741                 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1742
1743         return ret;
1744 }
1745
1746 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1747 {
1748         unsigned int i;
1749         u32 cnt = 0;
1750
1751         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1752                 if (hdev->hw_tc_map & BIT(i))
1753                         cnt++;
1754         return cnt;
1755 }
1756
1757 /* Get the number of pfc enabled TCs, which have private buffer */
1758 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1759                                   struct hclge_pkt_buf_alloc *buf_alloc)
1760 {
1761         struct hclge_priv_buf *priv;
1762         unsigned int i;
1763         int cnt = 0;
1764
1765         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1766                 priv = &buf_alloc->priv_buf[i];
1767                 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1768                     priv->enable)
1769                         cnt++;
1770         }
1771
1772         return cnt;
1773 }
1774
1775 /* Get the number of pfc disabled TCs, which have private buffer */
1776 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1777                                      struct hclge_pkt_buf_alloc *buf_alloc)
1778 {
1779         struct hclge_priv_buf *priv;
1780         unsigned int i;
1781         int cnt = 0;
1782
1783         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1784                 priv = &buf_alloc->priv_buf[i];
1785                 if (hdev->hw_tc_map & BIT(i) &&
1786                     !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1787                     priv->enable)
1788                         cnt++;
1789         }
1790
1791         return cnt;
1792 }
1793
1794 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1795 {
1796         struct hclge_priv_buf *priv;
1797         u32 rx_priv = 0;
1798         int i;
1799
1800         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1801                 priv = &buf_alloc->priv_buf[i];
1802                 if (priv->enable)
1803                         rx_priv += priv->buf_size;
1804         }
1805         return rx_priv;
1806 }
1807
1808 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1809 {
1810         u32 i, total_tx_size = 0;
1811
1812         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1813                 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1814
1815         return total_tx_size;
1816 }
1817
1818 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1819                                 struct hclge_pkt_buf_alloc *buf_alloc,
1820                                 u32 rx_all)
1821 {
1822         u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1823         u32 tc_num = hclge_get_tc_num(hdev);
1824         u32 shared_buf, aligned_mps;
1825         u32 rx_priv;
1826         int i;
1827
1828         aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1829
1830         if (hnae3_dev_dcb_supported(hdev))
1831                 shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
1832                                         hdev->dv_buf_size;
1833         else
1834                 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1835                                         + hdev->dv_buf_size;
1836
1837         shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1838         shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1839                              HCLGE_BUF_SIZE_UNIT);
1840
1841         rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1842         if (rx_all < rx_priv + shared_std)
1843                 return false;
1844
1845         shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1846         buf_alloc->s_buf.buf_size = shared_buf;
1847         if (hnae3_dev_dcb_supported(hdev)) {
1848                 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1849                 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1850                         - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
1851                                   HCLGE_BUF_SIZE_UNIT);
1852         } else {
1853                 buf_alloc->s_buf.self.high = aligned_mps +
1854                                                 HCLGE_NON_DCB_ADDITIONAL_BUF;
1855                 buf_alloc->s_buf.self.low = aligned_mps;
1856         }
1857
1858         if (hnae3_dev_dcb_supported(hdev)) {
1859                 hi_thrd = shared_buf - hdev->dv_buf_size;
1860
1861                 if (tc_num <= NEED_RESERVE_TC_NUM)
1862                         hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
1863                                         / BUF_MAX_PERCENT;
1864
1865                 if (tc_num)
1866                         hi_thrd = hi_thrd / tc_num;
1867
1868                 hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
1869                 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1870                 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
1871         } else {
1872                 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1873                 lo_thrd = aligned_mps;
1874         }
1875
1876         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1877                 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1878                 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
1879         }
1880
1881         return true;
1882 }
1883
1884 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1885                                 struct hclge_pkt_buf_alloc *buf_alloc)
1886 {
1887         u32 i, total_size;
1888
1889         total_size = hdev->pkt_buf_size;
1890
1891         /* alloc tx buffer for all enabled tc */
1892         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1893                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1894
1895                 if (hdev->hw_tc_map & BIT(i)) {
1896                         if (total_size < hdev->tx_buf_size)
1897                                 return -ENOMEM;
1898
1899                         priv->tx_buf_size = hdev->tx_buf_size;
1900                 } else {
1901                         priv->tx_buf_size = 0;
1902                 }
1903
1904                 total_size -= priv->tx_buf_size;
1905         }
1906
1907         return 0;
1908 }
1909
1910 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
1911                                   struct hclge_pkt_buf_alloc *buf_alloc)
1912 {
1913         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1914         u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1915         unsigned int i;
1916
1917         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1918                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1919
1920                 priv->enable = 0;
1921                 priv->wl.low = 0;
1922                 priv->wl.high = 0;
1923                 priv->buf_size = 0;
1924
1925                 if (!(hdev->hw_tc_map & BIT(i)))
1926                         continue;
1927
1928                 priv->enable = 1;
1929
1930                 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1931                         priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
1932                         priv->wl.high = roundup(priv->wl.low + aligned_mps,
1933                                                 HCLGE_BUF_SIZE_UNIT);
1934                 } else {
1935                         priv->wl.low = 0;
1936                         priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
1937                                         aligned_mps;
1938                 }
1939
1940                 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1941         }
1942
1943         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1944 }
1945
1946 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
1947                                           struct hclge_pkt_buf_alloc *buf_alloc)
1948 {
1949         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1950         int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1951         int i;
1952
1953         /* let the last to be cleared first */
1954         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1955                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1956                 unsigned int mask = BIT((unsigned int)i);
1957
1958                 if (hdev->hw_tc_map & mask &&
1959                     !(hdev->tm_info.hw_pfc_map & mask)) {
1960                         /* Clear the no pfc TC private buffer */
1961                         priv->wl.low = 0;
1962                         priv->wl.high = 0;
1963                         priv->buf_size = 0;
1964                         priv->enable = 0;
1965                         no_pfc_priv_num--;
1966                 }
1967
1968                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1969                     no_pfc_priv_num == 0)
1970                         break;
1971         }
1972
1973         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1974 }
1975
1976 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
1977                                         struct hclge_pkt_buf_alloc *buf_alloc)
1978 {
1979         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1980         int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1981         int i;
1982
1983         /* let the last to be cleared first */
1984         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1985                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1986                 unsigned int mask = BIT((unsigned int)i);
1987
1988                 if (hdev->hw_tc_map & mask &&
1989                     hdev->tm_info.hw_pfc_map & mask) {
1990                         /* Reduce the number of pfc TC with private buffer */
1991                         priv->wl.low = 0;
1992                         priv->enable = 0;
1993                         priv->wl.high = 0;
1994                         priv->buf_size = 0;
1995                         pfc_priv_num--;
1996                 }
1997
1998                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1999                     pfc_priv_num == 0)
2000                         break;
2001         }
2002
2003         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2004 }
2005
2006 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2007                                       struct hclge_pkt_buf_alloc *buf_alloc)
2008 {
2009 #define COMPENSATE_BUFFER       0x3C00
2010 #define COMPENSATE_HALF_MPS_NUM 5
2011 #define PRIV_WL_GAP             0x1800
2012
2013         u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2014         u32 tc_num = hclge_get_tc_num(hdev);
2015         u32 half_mps = hdev->mps >> 1;
2016         u32 min_rx_priv;
2017         unsigned int i;
2018
2019         if (tc_num)
2020                 rx_priv = rx_priv / tc_num;
2021
2022         if (tc_num <= NEED_RESERVE_TC_NUM)
2023                 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2024
2025         min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2026                         COMPENSATE_HALF_MPS_NUM * half_mps;
2027         min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2028         rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2029
2030         if (rx_priv < min_rx_priv)
2031                 return false;
2032
2033         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2034                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2035
2036                 priv->enable = 0;
2037                 priv->wl.low = 0;
2038                 priv->wl.high = 0;
2039                 priv->buf_size = 0;
2040
2041                 if (!(hdev->hw_tc_map & BIT(i)))
2042                         continue;
2043
2044                 priv->enable = 1;
2045                 priv->buf_size = rx_priv;
2046                 priv->wl.high = rx_priv - hdev->dv_buf_size;
2047                 priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2048         }
2049
2050         buf_alloc->s_buf.buf_size = 0;
2051
2052         return true;
2053 }
2054
2055 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2056  * @hdev: pointer to struct hclge_dev
2057  * @buf_alloc: pointer to buffer calculation data
2058  * @return: 0: calculate sucessful, negative: fail
2059  */
2060 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2061                                 struct hclge_pkt_buf_alloc *buf_alloc)
2062 {
2063         /* When DCB is not supported, rx private buffer is not allocated. */
2064         if (!hnae3_dev_dcb_supported(hdev)) {
2065                 u32 rx_all = hdev->pkt_buf_size;
2066
2067                 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2068                 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2069                         return -ENOMEM;
2070
2071                 return 0;
2072         }
2073
2074         if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2075                 return 0;
2076
2077         if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2078                 return 0;
2079
2080         /* try to decrease the buffer size */
2081         if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2082                 return 0;
2083
2084         if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2085                 return 0;
2086
2087         if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2088                 return 0;
2089
2090         return -ENOMEM;
2091 }
2092
2093 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2094                                    struct hclge_pkt_buf_alloc *buf_alloc)
2095 {
2096         struct hclge_rx_priv_buff_cmd *req;
2097         struct hclge_desc desc;
2098         int ret;
2099         int i;
2100
2101         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2102         req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2103
2104         /* Alloc private buffer TCs */
2105         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2106                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2107
2108                 req->buf_num[i] =
2109                         cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2110                 req->buf_num[i] |=
2111                         cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2112         }
2113
2114         req->shared_buf =
2115                 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2116                             (1 << HCLGE_TC0_PRI_BUF_EN_B));
2117
2118         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2119         if (ret)
2120                 dev_err(&hdev->pdev->dev,
2121                         "rx private buffer alloc cmd failed %d\n", ret);
2122
2123         return ret;
2124 }
2125
2126 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2127                                    struct hclge_pkt_buf_alloc *buf_alloc)
2128 {
2129         struct hclge_rx_priv_wl_buf *req;
2130         struct hclge_priv_buf *priv;
2131         struct hclge_desc desc[2];
2132         int i, j;
2133         int ret;
2134
2135         for (i = 0; i < 2; i++) {
2136                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2137                                            false);
2138                 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2139
2140                 /* The first descriptor set the NEXT bit to 1 */
2141                 if (i == 0)
2142                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2143                 else
2144                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2145
2146                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2147                         u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2148
2149                         priv = &buf_alloc->priv_buf[idx];
2150                         req->tc_wl[j].high =
2151                                 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2152                         req->tc_wl[j].high |=
2153                                 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2154                         req->tc_wl[j].low =
2155                                 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2156                         req->tc_wl[j].low |=
2157                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2158                 }
2159         }
2160
2161         /* Send 2 descriptor at one time */
2162         ret = hclge_cmd_send(&hdev->hw, desc, 2);
2163         if (ret)
2164                 dev_err(&hdev->pdev->dev,
2165                         "rx private waterline config cmd failed %d\n",
2166                         ret);
2167         return ret;
2168 }
2169
2170 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2171                                     struct hclge_pkt_buf_alloc *buf_alloc)
2172 {
2173         struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2174         struct hclge_rx_com_thrd *req;
2175         struct hclge_desc desc[2];
2176         struct hclge_tc_thrd *tc;
2177         int i, j;
2178         int ret;
2179
2180         for (i = 0; i < 2; i++) {
2181                 hclge_cmd_setup_basic_desc(&desc[i],
2182                                            HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2183                 req = (struct hclge_rx_com_thrd *)&desc[i].data;
2184
2185                 /* The first descriptor set the NEXT bit to 1 */
2186                 if (i == 0)
2187                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2188                 else
2189                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2190
2191                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2192                         tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2193
2194                         req->com_thrd[j].high =
2195                                 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2196                         req->com_thrd[j].high |=
2197                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2198                         req->com_thrd[j].low =
2199                                 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2200                         req->com_thrd[j].low |=
2201                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2202                 }
2203         }
2204
2205         /* Send 2 descriptors at one time */
2206         ret = hclge_cmd_send(&hdev->hw, desc, 2);
2207         if (ret)
2208                 dev_err(&hdev->pdev->dev,
2209                         "common threshold config cmd failed %d\n", ret);
2210         return ret;
2211 }
2212
2213 static int hclge_common_wl_config(struct hclge_dev *hdev,
2214                                   struct hclge_pkt_buf_alloc *buf_alloc)
2215 {
2216         struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2217         struct hclge_rx_com_wl *req;
2218         struct hclge_desc desc;
2219         int ret;
2220
2221         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2222
2223         req = (struct hclge_rx_com_wl *)desc.data;
2224         req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2225         req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2226
2227         req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2228         req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2229
2230         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2231         if (ret)
2232                 dev_err(&hdev->pdev->dev,
2233                         "common waterline config cmd failed %d\n", ret);
2234
2235         return ret;
2236 }
2237
2238 int hclge_buffer_alloc(struct hclge_dev *hdev)
2239 {
2240         struct hclge_pkt_buf_alloc *pkt_buf;
2241         int ret;
2242
2243         pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2244         if (!pkt_buf)
2245                 return -ENOMEM;
2246
2247         ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2248         if (ret) {
2249                 dev_err(&hdev->pdev->dev,
2250                         "could not calc tx buffer size for all TCs %d\n", ret);
2251                 goto out;
2252         }
2253
2254         ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2255         if (ret) {
2256                 dev_err(&hdev->pdev->dev,
2257                         "could not alloc tx buffers %d\n", ret);
2258                 goto out;
2259         }
2260
2261         ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2262         if (ret) {
2263                 dev_err(&hdev->pdev->dev,
2264                         "could not calc rx priv buffer size for all TCs %d\n",
2265                         ret);
2266                 goto out;
2267         }
2268
2269         ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2270         if (ret) {
2271                 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2272                         ret);
2273                 goto out;
2274         }
2275
2276         if (hnae3_dev_dcb_supported(hdev)) {
2277                 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2278                 if (ret) {
2279                         dev_err(&hdev->pdev->dev,
2280                                 "could not configure rx private waterline %d\n",
2281                                 ret);
2282                         goto out;
2283                 }
2284
2285                 ret = hclge_common_thrd_config(hdev, pkt_buf);
2286                 if (ret) {
2287                         dev_err(&hdev->pdev->dev,
2288                                 "could not configure common threshold %d\n",
2289                                 ret);
2290                         goto out;
2291                 }
2292         }
2293
2294         ret = hclge_common_wl_config(hdev, pkt_buf);
2295         if (ret)
2296                 dev_err(&hdev->pdev->dev,
2297                         "could not configure common waterline %d\n", ret);
2298
2299 out:
2300         kfree(pkt_buf);
2301         return ret;
2302 }
2303
2304 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2305 {
2306         struct hnae3_handle *roce = &vport->roce;
2307         struct hnae3_handle *nic = &vport->nic;
2308
2309         roce->rinfo.num_vectors = vport->back->num_roce_msi;
2310
2311         if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
2312             vport->back->num_msi_left == 0)
2313                 return -EINVAL;
2314
2315         roce->rinfo.base_vector = vport->back->roce_base_vector;
2316
2317         roce->rinfo.netdev = nic->kinfo.netdev;
2318         roce->rinfo.roce_io_base = vport->back->hw.io_base;
2319
2320         roce->pdev = nic->pdev;
2321         roce->ae_algo = nic->ae_algo;
2322         roce->numa_node_mask = nic->numa_node_mask;
2323
2324         return 0;
2325 }
2326
2327 static int hclge_init_msi(struct hclge_dev *hdev)
2328 {
2329         struct pci_dev *pdev = hdev->pdev;
2330         int vectors;
2331         int i;
2332
2333         vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2334                                         hdev->num_msi,
2335                                         PCI_IRQ_MSI | PCI_IRQ_MSIX);
2336         if (vectors < 0) {
2337                 dev_err(&pdev->dev,
2338                         "failed(%d) to allocate MSI/MSI-X vectors\n",
2339                         vectors);
2340                 return vectors;
2341         }
2342         if (vectors < hdev->num_msi)
2343                 dev_warn(&hdev->pdev->dev,
2344                          "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2345                          hdev->num_msi, vectors);
2346
2347         hdev->num_msi = vectors;
2348         hdev->num_msi_left = vectors;
2349
2350         hdev->base_msi_vector = pdev->irq;
2351         hdev->roce_base_vector = hdev->base_msi_vector +
2352                                 hdev->roce_base_msix_offset;
2353
2354         hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2355                                            sizeof(u16), GFP_KERNEL);
2356         if (!hdev->vector_status) {
2357                 pci_free_irq_vectors(pdev);
2358                 return -ENOMEM;
2359         }
2360
2361         for (i = 0; i < hdev->num_msi; i++)
2362                 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2363
2364         hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2365                                         sizeof(int), GFP_KERNEL);
2366         if (!hdev->vector_irq) {
2367                 pci_free_irq_vectors(pdev);
2368                 return -ENOMEM;
2369         }
2370
2371         return 0;
2372 }
2373
2374 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2375 {
2376         if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2377                 duplex = HCLGE_MAC_FULL;
2378
2379         return duplex;
2380 }
2381
2382 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2383                                       u8 duplex)
2384 {
2385         struct hclge_config_mac_speed_dup_cmd *req;
2386         struct hclge_desc desc;
2387         int ret;
2388
2389         req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2390
2391         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2392
2393         if (duplex)
2394                 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2395
2396         switch (speed) {
2397         case HCLGE_MAC_SPEED_10M:
2398                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2399                                 HCLGE_CFG_SPEED_S, 6);
2400                 break;
2401         case HCLGE_MAC_SPEED_100M:
2402                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2403                                 HCLGE_CFG_SPEED_S, 7);
2404                 break;
2405         case HCLGE_MAC_SPEED_1G:
2406                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2407                                 HCLGE_CFG_SPEED_S, 0);
2408                 break;
2409         case HCLGE_MAC_SPEED_10G:
2410                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2411                                 HCLGE_CFG_SPEED_S, 1);
2412                 break;
2413         case HCLGE_MAC_SPEED_25G:
2414                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2415                                 HCLGE_CFG_SPEED_S, 2);
2416                 break;
2417         case HCLGE_MAC_SPEED_40G:
2418                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2419                                 HCLGE_CFG_SPEED_S, 3);
2420                 break;
2421         case HCLGE_MAC_SPEED_50G:
2422                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2423                                 HCLGE_CFG_SPEED_S, 4);
2424                 break;
2425         case HCLGE_MAC_SPEED_100G:
2426                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2427                                 HCLGE_CFG_SPEED_S, 5);
2428                 break;
2429         default:
2430                 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2431                 return -EINVAL;
2432         }
2433
2434         hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2435                       1);
2436
2437         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2438         if (ret) {
2439                 dev_err(&hdev->pdev->dev,
2440                         "mac speed/duplex config cmd failed %d.\n", ret);
2441                 return ret;
2442         }
2443
2444         return 0;
2445 }
2446
2447 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2448 {
2449         struct hclge_mac *mac = &hdev->hw.mac;
2450         int ret;
2451
2452         duplex = hclge_check_speed_dup(duplex, speed);
2453         if (!mac->support_autoneg && mac->speed == speed &&
2454             mac->duplex == duplex)
2455                 return 0;
2456
2457         ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2458         if (ret)
2459                 return ret;
2460
2461         hdev->hw.mac.speed = speed;
2462         hdev->hw.mac.duplex = duplex;
2463
2464         return 0;
2465 }
2466
2467 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2468                                      u8 duplex)
2469 {
2470         struct hclge_vport *vport = hclge_get_vport(handle);
2471         struct hclge_dev *hdev = vport->back;
2472
2473         return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2474 }
2475
2476 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2477 {
2478         struct hclge_config_auto_neg_cmd *req;
2479         struct hclge_desc desc;
2480         u32 flag = 0;
2481         int ret;
2482
2483         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2484
2485         req = (struct hclge_config_auto_neg_cmd *)desc.data;
2486         if (enable)
2487                 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2488         req->cfg_an_cmd_flag = cpu_to_le32(flag);
2489
2490         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2491         if (ret)
2492                 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2493                         ret);
2494
2495         return ret;
2496 }
2497
2498 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2499 {
2500         struct hclge_vport *vport = hclge_get_vport(handle);
2501         struct hclge_dev *hdev = vport->back;
2502
2503         if (!hdev->hw.mac.support_autoneg) {
2504                 if (enable) {
2505                         dev_err(&hdev->pdev->dev,
2506                                 "autoneg is not supported by current port\n");
2507                         return -EOPNOTSUPP;
2508                 } else {
2509                         return 0;
2510                 }
2511         }
2512
2513         return hclge_set_autoneg_en(hdev, enable);
2514 }
2515
2516 static int hclge_get_autoneg(struct hnae3_handle *handle)
2517 {
2518         struct hclge_vport *vport = hclge_get_vport(handle);
2519         struct hclge_dev *hdev = vport->back;
2520         struct phy_device *phydev = hdev->hw.mac.phydev;
2521
2522         if (phydev)
2523                 return phydev->autoneg;
2524
2525         return hdev->hw.mac.autoneg;
2526 }
2527
2528 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2529 {
2530         struct hclge_vport *vport = hclge_get_vport(handle);
2531         struct hclge_dev *hdev = vport->back;
2532         int ret;
2533
2534         dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2535
2536         ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2537         if (ret)
2538                 return ret;
2539         return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2540 }
2541
2542 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2543 {
2544         struct hclge_vport *vport = hclge_get_vport(handle);
2545         struct hclge_dev *hdev = vport->back;
2546
2547         if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2548                 return hclge_set_autoneg_en(hdev, !halt);
2549
2550         return 0;
2551 }
2552
2553 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2554 {
2555         struct hclge_config_fec_cmd *req;
2556         struct hclge_desc desc;
2557         int ret;
2558
2559         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2560
2561         req = (struct hclge_config_fec_cmd *)desc.data;
2562         if (fec_mode & BIT(HNAE3_FEC_AUTO))
2563                 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2564         if (fec_mode & BIT(HNAE3_FEC_RS))
2565                 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2566                                 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2567         if (fec_mode & BIT(HNAE3_FEC_BASER))
2568                 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2569                                 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2570
2571         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2572         if (ret)
2573                 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2574
2575         return ret;
2576 }
2577
2578 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2579 {
2580         struct hclge_vport *vport = hclge_get_vport(handle);
2581         struct hclge_dev *hdev = vport->back;
2582         struct hclge_mac *mac = &hdev->hw.mac;
2583         int ret;
2584
2585         if (fec_mode && !(mac->fec_ability & fec_mode)) {
2586                 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2587                 return -EINVAL;
2588         }
2589
2590         ret = hclge_set_fec_hw(hdev, fec_mode);
2591         if (ret)
2592                 return ret;
2593
2594         mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2595         return 0;
2596 }
2597
2598 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2599                           u8 *fec_mode)
2600 {
2601         struct hclge_vport *vport = hclge_get_vport(handle);
2602         struct hclge_dev *hdev = vport->back;
2603         struct hclge_mac *mac = &hdev->hw.mac;
2604
2605         if (fec_ability)
2606                 *fec_ability = mac->fec_ability;
2607         if (fec_mode)
2608                 *fec_mode = mac->fec_mode;
2609 }
2610
2611 static int hclge_mac_init(struct hclge_dev *hdev)
2612 {
2613         struct hclge_mac *mac = &hdev->hw.mac;
2614         int ret;
2615
2616         hdev->support_sfp_query = true;
2617         hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2618         ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2619                                          hdev->hw.mac.duplex);
2620         if (ret)
2621                 return ret;
2622
2623         if (hdev->hw.mac.support_autoneg) {
2624                 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2625                 if (ret)
2626                         return ret;
2627         }
2628
2629         mac->link = 0;
2630
2631         if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2632                 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2633                 if (ret)
2634                         return ret;
2635         }
2636
2637         ret = hclge_set_mac_mtu(hdev, hdev->mps);
2638         if (ret) {
2639                 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2640                 return ret;
2641         }
2642
2643         ret = hclge_set_default_loopback(hdev);
2644         if (ret)
2645                 return ret;
2646
2647         ret = hclge_buffer_alloc(hdev);
2648         if (ret)
2649                 dev_err(&hdev->pdev->dev,
2650                         "allocate buffer fail, ret=%d\n", ret);
2651
2652         return ret;
2653 }
2654
2655 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2656 {
2657         if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2658             !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2659                 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2660                                     hclge_wq, &hdev->service_task, 0);
2661 }
2662
2663 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2664 {
2665         if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2666             !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2667                 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2668                                     hclge_wq, &hdev->service_task, 0);
2669 }
2670
2671 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2672 {
2673         if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2674             !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
2675                 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2676                                     hclge_wq, &hdev->service_task,
2677                                     delay_time);
2678 }
2679
2680 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2681 {
2682         struct hclge_link_status_cmd *req;
2683         struct hclge_desc desc;
2684         int link_status;
2685         int ret;
2686
2687         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2688         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2689         if (ret) {
2690                 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2691                         ret);
2692                 return ret;
2693         }
2694
2695         req = (struct hclge_link_status_cmd *)desc.data;
2696         link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2697
2698         return !!link_status;
2699 }
2700
2701 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2702 {
2703         unsigned int mac_state;
2704         int link_stat;
2705
2706         if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2707                 return 0;
2708
2709         mac_state = hclge_get_mac_link_status(hdev);
2710
2711         if (hdev->hw.mac.phydev) {
2712                 if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2713                         link_stat = mac_state &
2714                                 hdev->hw.mac.phydev->link;
2715                 else
2716                         link_stat = 0;
2717
2718         } else {
2719                 link_stat = mac_state;
2720         }
2721
2722         return !!link_stat;
2723 }
2724
2725 static void hclge_update_link_status(struct hclge_dev *hdev)
2726 {
2727         struct hnae3_client *rclient = hdev->roce_client;
2728         struct hnae3_client *client = hdev->nic_client;
2729         struct hnae3_handle *rhandle;
2730         struct hnae3_handle *handle;
2731         int state;
2732         int i;
2733
2734         if (!client)
2735                 return;
2736
2737         if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
2738                 return;
2739
2740         state = hclge_get_mac_phy_link(hdev);
2741         if (state != hdev->hw.mac.link) {
2742                 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2743                         handle = &hdev->vport[i].nic;
2744                         client->ops->link_status_change(handle, state);
2745                         hclge_config_mac_tnl_int(hdev, state);
2746                         rhandle = &hdev->vport[i].roce;
2747                         if (rclient && rclient->ops->link_status_change)
2748                                 rclient->ops->link_status_change(rhandle,
2749                                                                  state);
2750                 }
2751                 hdev->hw.mac.link = state;
2752         }
2753
2754         clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2755 }
2756
2757 static void hclge_update_port_capability(struct hclge_mac *mac)
2758 {
2759         /* update fec ability by speed */
2760         hclge_convert_setting_fec(mac);
2761
2762         /* firmware can not identify back plane type, the media type
2763          * read from configuration can help deal it
2764          */
2765         if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2766             mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2767                 mac->module_type = HNAE3_MODULE_TYPE_KR;
2768         else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2769                 mac->module_type = HNAE3_MODULE_TYPE_TP;
2770
2771         if (mac->support_autoneg) {
2772                 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2773                 linkmode_copy(mac->advertising, mac->supported);
2774         } else {
2775                 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2776                                    mac->supported);
2777                 linkmode_zero(mac->advertising);
2778         }
2779 }
2780
2781 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2782 {
2783         struct hclge_sfp_info_cmd *resp;
2784         struct hclge_desc desc;
2785         int ret;
2786
2787         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2788         resp = (struct hclge_sfp_info_cmd *)desc.data;
2789         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2790         if (ret == -EOPNOTSUPP) {
2791                 dev_warn(&hdev->pdev->dev,
2792                          "IMP do not support get SFP speed %d\n", ret);
2793                 return ret;
2794         } else if (ret) {
2795                 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2796                 return ret;
2797         }
2798
2799         *speed = le32_to_cpu(resp->speed);
2800
2801         return 0;
2802 }
2803
2804 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2805 {
2806         struct hclge_sfp_info_cmd *resp;
2807         struct hclge_desc desc;
2808         int ret;
2809
2810         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2811         resp = (struct hclge_sfp_info_cmd *)desc.data;
2812
2813         resp->query_type = QUERY_ACTIVE_SPEED;
2814
2815         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2816         if (ret == -EOPNOTSUPP) {
2817                 dev_warn(&hdev->pdev->dev,
2818                          "IMP does not support get SFP info %d\n", ret);
2819                 return ret;
2820         } else if (ret) {
2821                 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2822                 return ret;
2823         }
2824
2825         /* In some case, mac speed get from IMP may be 0, it shouldn't be
2826          * set to mac->speed.
2827          */
2828         if (!le32_to_cpu(resp->speed))
2829                 return 0;
2830
2831         mac->speed = le32_to_cpu(resp->speed);
2832         /* if resp->speed_ability is 0, it means it's an old version
2833          * firmware, do not update these params
2834          */
2835         if (resp->speed_ability) {
2836                 mac->module_type = le32_to_cpu(resp->module_type);
2837                 mac->speed_ability = le32_to_cpu(resp->speed_ability);
2838                 mac->autoneg = resp->autoneg;
2839                 mac->support_autoneg = resp->autoneg_ability;
2840                 mac->speed_type = QUERY_ACTIVE_SPEED;
2841                 if (!resp->active_fec)
2842                         mac->fec_mode = 0;
2843                 else
2844                         mac->fec_mode = BIT(resp->active_fec);
2845         } else {
2846                 mac->speed_type = QUERY_SFP_SPEED;
2847         }
2848
2849         return 0;
2850 }
2851
2852 static int hclge_update_port_info(struct hclge_dev *hdev)
2853 {
2854         struct hclge_mac *mac = &hdev->hw.mac;
2855         int speed = HCLGE_MAC_SPEED_UNKNOWN;
2856         int ret;
2857
2858         /* get the port info from SFP cmd if not copper port */
2859         if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2860                 return 0;
2861
2862         /* if IMP does not support get SFP/qSFP info, return directly */
2863         if (!hdev->support_sfp_query)
2864                 return 0;
2865
2866         if (hdev->pdev->revision >= 0x21)
2867                 ret = hclge_get_sfp_info(hdev, mac);
2868         else
2869                 ret = hclge_get_sfp_speed(hdev, &speed);
2870
2871         if (ret == -EOPNOTSUPP) {
2872                 hdev->support_sfp_query = false;
2873                 return ret;
2874         } else if (ret) {
2875                 return ret;
2876         }
2877
2878         if (hdev->pdev->revision >= 0x21) {
2879                 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
2880                         hclge_update_port_capability(mac);
2881                         return 0;
2882                 }
2883                 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
2884                                                HCLGE_MAC_FULL);
2885         } else {
2886                 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2887                         return 0; /* do nothing if no SFP */
2888
2889                 /* must config full duplex for SFP */
2890                 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2891         }
2892 }
2893
2894 static int hclge_get_status(struct hnae3_handle *handle)
2895 {
2896         struct hclge_vport *vport = hclge_get_vport(handle);
2897         struct hclge_dev *hdev = vport->back;
2898
2899         hclge_update_link_status(hdev);
2900
2901         return hdev->hw.mac.link;
2902 }
2903
2904 static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
2905 {
2906         if (!pci_num_vf(hdev->pdev)) {
2907                 dev_err(&hdev->pdev->dev,
2908                         "SRIOV is disabled, can not get vport(%d) info.\n", vf);
2909                 return NULL;
2910         }
2911
2912         if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
2913                 dev_err(&hdev->pdev->dev,
2914                         "vf id(%d) is out of range(0 <= vfid < %d)\n",
2915                         vf, pci_num_vf(hdev->pdev));
2916                 return NULL;
2917         }
2918
2919         /* VF start from 1 in vport */
2920         vf += HCLGE_VF_VPORT_START_NUM;
2921         return &hdev->vport[vf];
2922 }
2923
2924 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
2925                                struct ifla_vf_info *ivf)
2926 {
2927         struct hclge_vport *vport = hclge_get_vport(handle);
2928         struct hclge_dev *hdev = vport->back;
2929
2930         vport = hclge_get_vf_vport(hdev, vf);
2931         if (!vport)
2932                 return -EINVAL;
2933
2934         ivf->vf = vf;
2935         ivf->linkstate = vport->vf_info.link_state;
2936         ivf->spoofchk = vport->vf_info.spoofchk;
2937         ivf->trusted = vport->vf_info.trusted;
2938         ivf->min_tx_rate = 0;
2939         ivf->max_tx_rate = vport->vf_info.max_tx_rate;
2940         ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
2941         ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
2942         ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
2943         ether_addr_copy(ivf->mac, vport->vf_info.mac);
2944
2945         return 0;
2946 }
2947
2948 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
2949                                    int link_state)
2950 {
2951         struct hclge_vport *vport = hclge_get_vport(handle);
2952         struct hclge_dev *hdev = vport->back;
2953
2954         vport = hclge_get_vf_vport(hdev, vf);
2955         if (!vport)
2956                 return -EINVAL;
2957
2958         vport->vf_info.link_state = link_state;
2959
2960         return 0;
2961 }
2962
2963 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2964 {
2965         u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
2966
2967         /* fetch the events from their corresponding regs */
2968         rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2969         cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2970         msix_src_reg = hclge_read_dev(&hdev->hw,
2971                                       HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
2972
2973         /* Assumption: If by any chance reset and mailbox events are reported
2974          * together then we will only process reset event in this go and will
2975          * defer the processing of the mailbox events. Since, we would have not
2976          * cleared RX CMDQ event this time we would receive again another
2977          * interrupt from H/W just for the mailbox.
2978          *
2979          * check for vector0 reset event sources
2980          */
2981         if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2982                 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2983                 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2984                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2985                 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2986                 hdev->rst_stats.imp_rst_cnt++;
2987                 return HCLGE_VECTOR0_EVENT_RST;
2988         }
2989
2990         if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2991                 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2992                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2993                 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2994                 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2995                 hdev->rst_stats.global_rst_cnt++;
2996                 return HCLGE_VECTOR0_EVENT_RST;
2997         }
2998
2999         /* check for vector0 msix event source */
3000         if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
3001                 *clearval = msix_src_reg;
3002                 return HCLGE_VECTOR0_EVENT_ERR;
3003         }
3004
3005         /* check for vector0 mailbox(=CMDQ RX) event source */
3006         if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3007                 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3008                 *clearval = cmdq_src_reg;
3009                 return HCLGE_VECTOR0_EVENT_MBX;
3010         }
3011
3012         /* print other vector0 event source */
3013         dev_info(&hdev->pdev->dev,
3014                  "CMDQ INT status:0x%x, other INT status:0x%x\n",
3015                  cmdq_src_reg, msix_src_reg);
3016         *clearval = msix_src_reg;
3017
3018         return HCLGE_VECTOR0_EVENT_OTHER;
3019 }
3020
3021 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3022                                     u32 regclr)
3023 {
3024         switch (event_type) {
3025         case HCLGE_VECTOR0_EVENT_RST:
3026                 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3027                 break;
3028         case HCLGE_VECTOR0_EVENT_MBX:
3029                 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3030                 break;
3031         default:
3032                 break;
3033         }
3034 }
3035
3036 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3037 {
3038         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3039                                 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3040                                 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3041                                 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3042         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3043 }
3044
3045 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3046 {
3047         writel(enable ? 1 : 0, vector->addr);
3048 }
3049
3050 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3051 {
3052         struct hclge_dev *hdev = data;
3053         u32 clearval = 0;
3054         u32 event_cause;
3055
3056         hclge_enable_vector(&hdev->misc_vector, false);
3057         event_cause = hclge_check_event_cause(hdev, &clearval);
3058
3059         /* vector 0 interrupt is shared with reset and mailbox source events.*/
3060         switch (event_cause) {
3061         case HCLGE_VECTOR0_EVENT_ERR:
3062                 /* we do not know what type of reset is required now. This could
3063                  * only be decided after we fetch the type of errors which
3064                  * caused this event. Therefore, we will do below for now:
3065                  * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
3066                  *    have defered type of reset to be used.
3067                  * 2. Schedule the reset serivce task.
3068                  * 3. When service task receives  HNAE3_UNKNOWN_RESET type it
3069                  *    will fetch the correct type of reset.  This would be done
3070                  *    by first decoding the types of errors.
3071                  */
3072                 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
3073                 /* fall through */
3074         case HCLGE_VECTOR0_EVENT_RST:
3075                 hclge_reset_task_schedule(hdev);
3076                 break;
3077         case HCLGE_VECTOR0_EVENT_MBX:
3078                 /* If we are here then,
3079                  * 1. Either we are not handling any mbx task and we are not
3080                  *    scheduled as well
3081                  *                        OR
3082                  * 2. We could be handling a mbx task but nothing more is
3083                  *    scheduled.
3084                  * In both cases, we should schedule mbx task as there are more
3085                  * mbx messages reported by this interrupt.
3086                  */
3087                 hclge_mbx_task_schedule(hdev);
3088                 break;
3089         default:
3090                 dev_warn(&hdev->pdev->dev,
3091                          "received unknown or unhandled event of vector0\n");
3092                 break;
3093         }
3094
3095         hclge_clear_event_cause(hdev, event_cause, clearval);
3096
3097         /* Enable interrupt if it is not cause by reset. And when
3098          * clearval equal to 0, it means interrupt status may be
3099          * cleared by hardware before driver reads status register.
3100          * For this case, vector0 interrupt also should be enabled.
3101          */
3102         if (!clearval ||
3103             event_cause == HCLGE_VECTOR0_EVENT_MBX) {
3104                 hclge_enable_vector(&hdev->misc_vector, true);
3105         }
3106
3107         return IRQ_HANDLED;
3108 }
3109
3110 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3111 {
3112         if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3113                 dev_warn(&hdev->pdev->dev,
3114                          "vector(vector_id %d) has been freed.\n", vector_id);
3115                 return;
3116         }
3117
3118         hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3119         hdev->num_msi_left += 1;
3120         hdev->num_msi_used -= 1;
3121 }
3122
3123 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3124 {
3125         struct hclge_misc_vector *vector = &hdev->misc_vector;
3126
3127         vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3128
3129         vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3130         hdev->vector_status[0] = 0;
3131
3132         hdev->num_msi_left -= 1;
3133         hdev->num_msi_used += 1;
3134 }
3135
3136 static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3137                                       const cpumask_t *mask)
3138 {
3139         struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3140                                               affinity_notify);
3141
3142         cpumask_copy(&hdev->affinity_mask, mask);
3143 }
3144
3145 static void hclge_irq_affinity_release(struct kref *ref)
3146 {
3147 }
3148
3149 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3150 {
3151         irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3152                               &hdev->affinity_mask);
3153
3154         hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3155         hdev->affinity_notify.release = hclge_irq_affinity_release;
3156         irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3157                                   &hdev->affinity_notify);
3158 }
3159
3160 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3161 {
3162         irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3163         irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3164 }
3165
3166 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3167 {
3168         int ret;
3169
3170         hclge_get_misc_vector(hdev);
3171
3172         /* this would be explicitly freed in the end */
3173         snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
3174                  HCLGE_NAME, pci_name(hdev->pdev));
3175         ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3176                           0, hdev->misc_vector.name, hdev);
3177         if (ret) {
3178                 hclge_free_vector(hdev, 0);
3179                 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3180                         hdev->misc_vector.vector_irq);
3181         }
3182
3183         return ret;
3184 }
3185
3186 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3187 {
3188         free_irq(hdev->misc_vector.vector_irq, hdev);
3189         hclge_free_vector(hdev, 0);
3190 }
3191
3192 int hclge_notify_client(struct hclge_dev *hdev,
3193                         enum hnae3_reset_notify_type type)
3194 {
3195         struct hnae3_client *client = hdev->nic_client;
3196         u16 i;
3197
3198         if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3199                 return 0;
3200
3201         if (!client->ops->reset_notify)
3202                 return -EOPNOTSUPP;
3203
3204         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3205                 struct hnae3_handle *handle = &hdev->vport[i].nic;
3206                 int ret;
3207
3208                 ret = client->ops->reset_notify(handle, type);
3209                 if (ret) {
3210                         dev_err(&hdev->pdev->dev,
3211                                 "notify nic client failed %d(%d)\n", type, ret);
3212                         return ret;
3213                 }
3214         }
3215
3216         return 0;
3217 }
3218
3219 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3220                                     enum hnae3_reset_notify_type type)
3221 {
3222         struct hnae3_client *client = hdev->roce_client;
3223         int ret = 0;
3224         u16 i;
3225
3226         if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3227                 return 0;
3228
3229         if (!client->ops->reset_notify)
3230                 return -EOPNOTSUPP;
3231
3232         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3233                 struct hnae3_handle *handle = &hdev->vport[i].roce;
3234
3235                 ret = client->ops->reset_notify(handle, type);
3236                 if (ret) {
3237                         dev_err(&hdev->pdev->dev,
3238                                 "notify roce client failed %d(%d)",
3239                                 type, ret);
3240                         return ret;
3241                 }
3242         }
3243
3244         return ret;
3245 }
3246
3247 static int hclge_reset_wait(struct hclge_dev *hdev)
3248 {
3249 #define HCLGE_RESET_WATI_MS     100
3250 #define HCLGE_RESET_WAIT_CNT    350
3251
3252         u32 val, reg, reg_bit;
3253         u32 cnt = 0;
3254
3255         switch (hdev->reset_type) {
3256         case HNAE3_IMP_RESET:
3257                 reg = HCLGE_GLOBAL_RESET_REG;
3258                 reg_bit = HCLGE_IMP_RESET_BIT;
3259                 break;
3260         case HNAE3_GLOBAL_RESET:
3261                 reg = HCLGE_GLOBAL_RESET_REG;
3262                 reg_bit = HCLGE_GLOBAL_RESET_BIT;
3263                 break;
3264         case HNAE3_FUNC_RESET:
3265                 reg = HCLGE_FUN_RST_ING;
3266                 reg_bit = HCLGE_FUN_RST_ING_B;
3267                 break;
3268         default:
3269                 dev_err(&hdev->pdev->dev,
3270                         "Wait for unsupported reset type: %d\n",
3271                         hdev->reset_type);
3272                 return -EINVAL;
3273         }
3274
3275         val = hclge_read_dev(&hdev->hw, reg);
3276         while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3277                 msleep(HCLGE_RESET_WATI_MS);
3278                 val = hclge_read_dev(&hdev->hw, reg);
3279                 cnt++;
3280         }
3281
3282         if (cnt >= HCLGE_RESET_WAIT_CNT) {
3283                 dev_warn(&hdev->pdev->dev,
3284                          "Wait for reset timeout: %d\n", hdev->reset_type);
3285                 return -EBUSY;
3286         }
3287
3288         return 0;
3289 }
3290
3291 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3292 {
3293         struct hclge_vf_rst_cmd *req;
3294         struct hclge_desc desc;
3295
3296         req = (struct hclge_vf_rst_cmd *)desc.data;
3297         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3298         req->dest_vfid = func_id;
3299
3300         if (reset)
3301                 req->vf_rst = 0x1;
3302
3303         return hclge_cmd_send(&hdev->hw, &desc, 1);
3304 }
3305
3306 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3307 {
3308         int i;
3309
3310         for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
3311                 struct hclge_vport *vport = &hdev->vport[i];
3312                 int ret;
3313
3314                 /* Send cmd to set/clear VF's FUNC_RST_ING */
3315                 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3316                 if (ret) {
3317                         dev_err(&hdev->pdev->dev,
3318                                 "set vf(%u) rst failed %d!\n",
3319                                 vport->vport_id, ret);
3320                         return ret;
3321                 }
3322
3323                 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3324                         continue;
3325
3326                 /* Inform VF to process the reset.
3327                  * hclge_inform_reset_assert_to_vf may fail if VF
3328                  * driver is not loaded.
3329                  */
3330                 ret = hclge_inform_reset_assert_to_vf(vport);
3331                 if (ret)
3332                         dev_warn(&hdev->pdev->dev,
3333                                  "inform reset to vf(%u) failed %d!\n",
3334                                  vport->vport_id, ret);
3335         }
3336
3337         return 0;
3338 }
3339
3340 static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3341 {
3342         if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3343             test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
3344             test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3345                 return;
3346
3347         hclge_mbx_handler(hdev);
3348
3349         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3350 }
3351
3352 static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3353 {
3354         struct hclge_pf_rst_sync_cmd *req;
3355         struct hclge_desc desc;
3356         int cnt = 0;
3357         int ret;
3358
3359         req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3360         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3361
3362         do {
3363                 /* vf need to down netdev by mbx during PF or FLR reset */
3364                 hclge_mailbox_service_task(hdev);
3365
3366                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3367                 /* for compatible with old firmware, wait
3368                  * 100 ms for VF to stop IO
3369                  */
3370                 if (ret == -EOPNOTSUPP) {
3371                         msleep(HCLGE_RESET_SYNC_TIME);
3372                         return;
3373                 } else if (ret) {
3374                         dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
3375                                  ret);
3376                         return;
3377                 } else if (req->all_vf_ready) {
3378                         return;
3379                 }
3380                 msleep(HCLGE_PF_RESET_SYNC_TIME);
3381                 hclge_cmd_reuse_desc(&desc, true);
3382         } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3383
3384         dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
3385 }
3386
3387 void hclge_report_hw_error(struct hclge_dev *hdev,
3388                            enum hnae3_hw_error_type type)
3389 {
3390         struct hnae3_client *client = hdev->nic_client;
3391         u16 i;
3392
3393         if (!client || !client->ops->process_hw_error ||
3394             !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3395                 return;
3396
3397         for (i = 0; i < hdev->num_vmdq_vport + 1; i++)
3398                 client->ops->process_hw_error(&hdev->vport[i].nic, type);
3399 }
3400
3401 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3402 {
3403         u32 reg_val;
3404
3405         reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3406         if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3407                 hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3408                 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3409                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3410         }
3411
3412         if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3413                 hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3414                 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3415                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3416         }
3417 }
3418
3419 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3420 {
3421         struct hclge_desc desc;
3422         struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3423         int ret;
3424
3425         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3426         hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3427         req->fun_reset_vfid = func_id;
3428
3429         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3430         if (ret)
3431                 dev_err(&hdev->pdev->dev,
3432                         "send function reset cmd fail, status =%d\n", ret);
3433
3434         return ret;
3435 }
3436
3437 static void hclge_do_reset(struct hclge_dev *hdev)
3438 {
3439         struct hnae3_handle *handle = &hdev->vport[0].nic;
3440         struct pci_dev *pdev = hdev->pdev;
3441         u32 val;
3442
3443         if (hclge_get_hw_reset_stat(handle)) {
3444                 dev_info(&pdev->dev, "Hardware reset not finish\n");
3445                 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3446                          hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3447                          hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3448                 return;
3449         }
3450
3451         switch (hdev->reset_type) {
3452         case HNAE3_GLOBAL_RESET:
3453                 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3454                 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3455                 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3456                 dev_info(&pdev->dev, "Global Reset requested\n");
3457                 break;
3458         case HNAE3_FUNC_RESET:
3459                 dev_info(&pdev->dev, "PF Reset requested\n");
3460                 /* schedule again to check later */
3461                 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3462                 hclge_reset_task_schedule(hdev);
3463                 break;
3464         default:
3465                 dev_warn(&pdev->dev,
3466                          "Unsupported reset type: %d\n", hdev->reset_type);
3467                 break;
3468         }
3469 }
3470
3471 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3472                                                    unsigned long *addr)
3473 {
3474         enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3475         struct hclge_dev *hdev = ae_dev->priv;
3476
3477         /* first, resolve any unknown reset type to the known type(s) */
3478         if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3479                 u32 msix_sts_reg = hclge_read_dev(&hdev->hw,
3480                                         HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
3481                 /* we will intentionally ignore any errors from this function
3482                  *  as we will end up in *some* reset request in any case
3483                  */
3484                 if (hclge_handle_hw_msix_error(hdev, addr))
3485                         dev_info(&hdev->pdev->dev, "received msix interrupt 0x%x\n",
3486                                  msix_sts_reg);
3487
3488                 clear_bit(HNAE3_UNKNOWN_RESET, addr);
3489                 /* We defered the clearing of the error event which caused
3490                  * interrupt since it was not posssible to do that in
3491                  * interrupt context (and this is the reason we introduced
3492                  * new UNKNOWN reset type). Now, the errors have been
3493                  * handled and cleared in hardware we can safely enable
3494                  * interrupts. This is an exception to the norm.
3495                  */
3496                 hclge_enable_vector(&hdev->misc_vector, true);
3497         }
3498
3499         /* return the highest priority reset level amongst all */
3500         if (test_bit(HNAE3_IMP_RESET, addr)) {
3501                 rst_level = HNAE3_IMP_RESET;
3502                 clear_bit(HNAE3_IMP_RESET, addr);
3503                 clear_bit(HNAE3_GLOBAL_RESET, addr);
3504                 clear_bit(HNAE3_FUNC_RESET, addr);
3505         } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3506                 rst_level = HNAE3_GLOBAL_RESET;
3507                 clear_bit(HNAE3_GLOBAL_RESET, addr);
3508                 clear_bit(HNAE3_FUNC_RESET, addr);
3509         } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3510                 rst_level = HNAE3_FUNC_RESET;
3511                 clear_bit(HNAE3_FUNC_RESET, addr);
3512         } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3513                 rst_level = HNAE3_FLR_RESET;
3514                 clear_bit(HNAE3_FLR_RESET, addr);
3515         }
3516
3517         if (hdev->reset_type != HNAE3_NONE_RESET &&
3518             rst_level < hdev->reset_type)
3519                 return HNAE3_NONE_RESET;
3520
3521         return rst_level;
3522 }
3523
3524 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3525 {
3526         u32 clearval = 0;
3527
3528         switch (hdev->reset_type) {
3529         case HNAE3_IMP_RESET:
3530                 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3531                 break;
3532         case HNAE3_GLOBAL_RESET:
3533                 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3534                 break;
3535         default:
3536                 break;
3537         }
3538
3539         if (!clearval)
3540                 return;
3541
3542         /* For revision 0x20, the reset interrupt source
3543          * can only be cleared after hardware reset done
3544          */
3545         if (hdev->pdev->revision == 0x20)
3546                 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3547                                 clearval);
3548
3549         hclge_enable_vector(&hdev->misc_vector, true);
3550 }
3551
3552 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3553 {
3554         u32 reg_val;
3555
3556         reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3557         if (enable)
3558                 reg_val |= HCLGE_NIC_SW_RST_RDY;
3559         else
3560                 reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3561
3562         hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3563 }
3564
3565 static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
3566 {
3567         int ret;
3568
3569         ret = hclge_set_all_vf_rst(hdev, true);
3570         if (ret)
3571                 return ret;
3572
3573         hclge_func_reset_sync_vf(hdev);
3574
3575         return 0;
3576 }
3577
3578 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3579 {
3580         u32 reg_val;
3581         int ret = 0;
3582
3583         switch (hdev->reset_type) {
3584         case HNAE3_FUNC_RESET:
3585                 ret = hclge_func_reset_notify_vf(hdev);
3586                 if (ret)
3587                         return ret;
3588
3589                 ret = hclge_func_reset_cmd(hdev, 0);
3590                 if (ret) {
3591                         dev_err(&hdev->pdev->dev,
3592                                 "asserting function reset fail %d!\n", ret);
3593                         return ret;
3594                 }
3595
3596                 /* After performaning pf reset, it is not necessary to do the
3597                  * mailbox handling or send any command to firmware, because
3598                  * any mailbox handling or command to firmware is only valid
3599                  * after hclge_cmd_init is called.
3600                  */
3601                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3602                 hdev->rst_stats.pf_rst_cnt++;
3603                 break;
3604         case HNAE3_FLR_RESET:
3605                 ret = hclge_func_reset_notify_vf(hdev);
3606                 if (ret)
3607                         return ret;
3608                 break;
3609         case HNAE3_IMP_RESET:
3610                 hclge_handle_imp_error(hdev);
3611                 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3612                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3613                                 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3614                 break;
3615         default:
3616                 break;
3617         }
3618
3619         /* inform hardware that preparatory work is done */
3620         msleep(HCLGE_RESET_SYNC_TIME);
3621         hclge_reset_handshake(hdev, true);
3622         dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3623
3624         return ret;
3625 }
3626
3627 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3628 {
3629 #define MAX_RESET_FAIL_CNT 5
3630
3631         if (hdev->reset_pending) {
3632                 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3633                          hdev->reset_pending);
3634                 return true;
3635         } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3636                    HCLGE_RESET_INT_M) {
3637                 dev_info(&hdev->pdev->dev,
3638                          "reset failed because new reset interrupt\n");
3639                 hclge_clear_reset_cause(hdev);
3640                 return false;
3641         } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3642                 hdev->rst_stats.reset_fail_cnt++;
3643                 set_bit(hdev->reset_type, &hdev->reset_pending);
3644                 dev_info(&hdev->pdev->dev,
3645                          "re-schedule reset task(%u)\n",
3646                          hdev->rst_stats.reset_fail_cnt);
3647                 return true;
3648         }
3649
3650         hclge_clear_reset_cause(hdev);
3651
3652         /* recover the handshake status when reset fail */
3653         hclge_reset_handshake(hdev, true);
3654
3655         dev_err(&hdev->pdev->dev, "Reset fail!\n");
3656
3657         hclge_dbg_dump_rst_info(hdev);
3658
3659         set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3660
3661         return false;
3662 }
3663
3664 static int hclge_set_rst_done(struct hclge_dev *hdev)
3665 {
3666         struct hclge_pf_rst_done_cmd *req;
3667         struct hclge_desc desc;
3668         int ret;
3669
3670         req = (struct hclge_pf_rst_done_cmd *)desc.data;
3671         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
3672         req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
3673
3674         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3675         /* To be compatible with the old firmware, which does not support
3676          * command HCLGE_OPC_PF_RST_DONE, just print a warning and
3677          * return success
3678          */
3679         if (ret == -EOPNOTSUPP) {
3680                 dev_warn(&hdev->pdev->dev,
3681                          "current firmware does not support command(0x%x)!\n",
3682                          HCLGE_OPC_PF_RST_DONE);
3683                 return 0;
3684         } else if (ret) {
3685                 dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
3686                         ret);
3687         }
3688
3689         return ret;
3690 }
3691
3692 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3693 {
3694         int ret = 0;
3695
3696         switch (hdev->reset_type) {
3697         case HNAE3_FUNC_RESET:
3698                 /* fall through */
3699         case HNAE3_FLR_RESET:
3700                 ret = hclge_set_all_vf_rst(hdev, false);
3701                 break;
3702         case HNAE3_GLOBAL_RESET:
3703                 /* fall through */
3704         case HNAE3_IMP_RESET:
3705                 ret = hclge_set_rst_done(hdev);
3706                 break;
3707         default:
3708                 break;
3709         }
3710
3711         /* clear up the handshake status after re-initialize done */
3712         hclge_reset_handshake(hdev, false);
3713
3714         return ret;
3715 }
3716
3717 static int hclge_reset_stack(struct hclge_dev *hdev)
3718 {
3719         int ret;
3720
3721         ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3722         if (ret)
3723                 return ret;
3724
3725         ret = hclge_reset_ae_dev(hdev->ae_dev);
3726         if (ret)
3727                 return ret;
3728
3729         ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3730         if (ret)
3731                 return ret;
3732
3733         return hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
3734 }
3735
3736 static int hclge_reset_prepare(struct hclge_dev *hdev)
3737 {
3738         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3739         int ret;
3740
3741         /* Initialize ae_dev reset status as well, in case enet layer wants to
3742          * know if device is undergoing reset
3743          */
3744         ae_dev->reset_type = hdev->reset_type;
3745         hdev->rst_stats.reset_cnt++;
3746         /* perform reset of the stack & ae device for a client */
3747         ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3748         if (ret)
3749                 return ret;
3750
3751         rtnl_lock();
3752         ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3753         rtnl_unlock();
3754         if (ret)
3755                 return ret;
3756
3757         return hclge_reset_prepare_wait(hdev);
3758 }
3759
3760 static int hclge_reset_rebuild(struct hclge_dev *hdev)
3761 {
3762         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3763         enum hnae3_reset_type reset_level;
3764         int ret;
3765
3766         hdev->rst_stats.hw_reset_done_cnt++;
3767
3768         ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3769         if (ret)
3770                 return ret;
3771
3772         rtnl_lock();
3773         ret = hclge_reset_stack(hdev);
3774         rtnl_unlock();
3775         if (ret)
3776                 return ret;
3777
3778         hclge_clear_reset_cause(hdev);
3779
3780         ret = hclge_reset_prepare_up(hdev);
3781         if (ret)
3782                 return ret;
3783
3784
3785         ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3786         /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
3787          * times
3788          */
3789         if (ret &&
3790             hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
3791                 return ret;
3792
3793         rtnl_lock();
3794         ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3795         rtnl_unlock();
3796         if (ret)
3797                 return ret;
3798
3799         ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3800         if (ret)
3801                 return ret;
3802
3803         hdev->last_reset_time = jiffies;
3804         hdev->rst_stats.reset_fail_cnt = 0;
3805         hdev->rst_stats.reset_done_cnt++;
3806         ae_dev->reset_type = HNAE3_NONE_RESET;
3807         clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3808
3809         /* if default_reset_request has a higher level reset request,
3810          * it should be handled as soon as possible. since some errors
3811          * need this kind of reset to fix.
3812          */
3813         reset_level = hclge_get_reset_level(ae_dev,
3814                                             &hdev->default_reset_request);
3815         if (reset_level != HNAE3_NONE_RESET)
3816                 set_bit(reset_level, &hdev->reset_request);
3817
3818         return 0;
3819 }
3820
3821 static void hclge_reset(struct hclge_dev *hdev)
3822 {
3823         if (hclge_reset_prepare(hdev))
3824                 goto err_reset;
3825
3826         if (hclge_reset_wait(hdev))
3827                 goto err_reset;
3828
3829         if (hclge_reset_rebuild(hdev))
3830                 goto err_reset;
3831
3832         return;
3833
3834 err_reset:
3835         if (hclge_reset_err_handle(hdev))
3836                 hclge_reset_task_schedule(hdev);
3837 }
3838
3839 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3840 {
3841         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3842         struct hclge_dev *hdev = ae_dev->priv;
3843
3844         /* We might end up getting called broadly because of 2 below cases:
3845          * 1. Recoverable error was conveyed through APEI and only way to bring
3846          *    normalcy is to reset.
3847          * 2. A new reset request from the stack due to timeout
3848          *
3849          * For the first case,error event might not have ae handle available.
3850          * check if this is a new reset request and we are not here just because
3851          * last reset attempt did not succeed and watchdog hit us again. We will
3852          * know this if last reset request did not occur very recently (watchdog
3853          * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3854          * In case of new request we reset the "reset level" to PF reset.
3855          * And if it is a repeat reset request of the most recent one then we
3856          * want to make sure we throttle the reset request. Therefore, we will
3857          * not allow it again before 3*HZ times.
3858          */
3859         if (!handle)
3860                 handle = &hdev->vport[0].nic;
3861
3862         if (time_before(jiffies, (hdev->last_reset_time +
3863                                   HCLGE_RESET_INTERVAL))) {
3864                 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
3865                 return;
3866         } else if (hdev->default_reset_request) {
3867                 hdev->reset_level =
3868                         hclge_get_reset_level(ae_dev,
3869                                               &hdev->default_reset_request);
3870         } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
3871                 hdev->reset_level = HNAE3_FUNC_RESET;
3872         }
3873
3874         dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
3875                  hdev->reset_level);
3876
3877         /* request reset & schedule reset task */
3878         set_bit(hdev->reset_level, &hdev->reset_request);
3879         hclge_reset_task_schedule(hdev);
3880
3881         if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3882                 hdev->reset_level++;
3883 }
3884
3885 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3886                                         enum hnae3_reset_type rst_type)
3887 {
3888         struct hclge_dev *hdev = ae_dev->priv;
3889
3890         set_bit(rst_type, &hdev->default_reset_request);
3891 }
3892
3893 static void hclge_reset_timer(struct timer_list *t)
3894 {
3895         struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3896
3897         /* if default_reset_request has no value, it means that this reset
3898          * request has already be handled, so just return here
3899          */
3900         if (!hdev->default_reset_request)
3901                 return;
3902
3903         dev_info(&hdev->pdev->dev,
3904                  "triggering reset in reset timer\n");
3905         hclge_reset_event(hdev->pdev, NULL);
3906 }
3907
3908 static void hclge_reset_subtask(struct hclge_dev *hdev)
3909 {
3910         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3911
3912         /* check if there is any ongoing reset in the hardware. This status can
3913          * be checked from reset_pending. If there is then, we need to wait for
3914          * hardware to complete reset.
3915          *    a. If we are able to figure out in reasonable time that hardware
3916          *       has fully resetted then, we can proceed with driver, client
3917          *       reset.
3918          *    b. else, we can come back later to check this status so re-sched
3919          *       now.
3920          */
3921         hdev->last_reset_time = jiffies;
3922         hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
3923         if (hdev->reset_type != HNAE3_NONE_RESET)
3924                 hclge_reset(hdev);
3925
3926         /* check if we got any *new* reset requests to be honored */
3927         hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
3928         if (hdev->reset_type != HNAE3_NONE_RESET)
3929                 hclge_do_reset(hdev);
3930
3931         hdev->reset_type = HNAE3_NONE_RESET;
3932 }
3933
3934 static void hclge_reset_service_task(struct hclge_dev *hdev)
3935 {
3936         if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
3937                 return;
3938
3939         down(&hdev->reset_sem);
3940         set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3941
3942         hclge_reset_subtask(hdev);
3943
3944         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3945         up(&hdev->reset_sem);
3946 }
3947
3948 static void hclge_update_vport_alive(struct hclge_dev *hdev)
3949 {
3950         int i;
3951
3952         /* start from vport 1 for PF is always alive */
3953         for (i = 1; i < hdev->num_alloc_vport; i++) {
3954                 struct hclge_vport *vport = &hdev->vport[i];
3955
3956                 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3957                         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
3958
3959                 /* If vf is not alive, set to default value */
3960                 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3961                         vport->mps = HCLGE_MAC_DEFAULT_FRAME;
3962         }
3963 }
3964
3965 static void hclge_periodic_service_task(struct hclge_dev *hdev)
3966 {
3967         unsigned long delta = round_jiffies_relative(HZ);
3968
3969         /* Always handle the link updating to make sure link state is
3970          * updated when it is triggered by mbx.
3971          */
3972         hclge_update_link_status(hdev);
3973
3974         if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
3975                 delta = jiffies - hdev->last_serv_processed;
3976
3977                 if (delta < round_jiffies_relative(HZ)) {
3978                         delta = round_jiffies_relative(HZ) - delta;
3979                         goto out;
3980                 }
3981         }
3982
3983         hdev->serv_processed_cnt++;
3984         hclge_update_vport_alive(hdev);
3985
3986         if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
3987                 hdev->last_serv_processed = jiffies;
3988                 goto out;
3989         }
3990
3991         if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
3992                 hclge_update_stats_for_all(hdev);
3993
3994         hclge_update_port_info(hdev);
3995         hclge_sync_vlan_filter(hdev);
3996
3997         if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
3998                 hclge_rfs_filter_expire(hdev);
3999
4000         hdev->last_serv_processed = jiffies;
4001
4002 out:
4003         hclge_task_schedule(hdev, delta);
4004 }
4005
4006 static void hclge_service_task(struct work_struct *work)
4007 {
4008         struct hclge_dev *hdev =
4009                 container_of(work, struct hclge_dev, service_task.work);
4010
4011         hclge_reset_service_task(hdev);
4012         hclge_mailbox_service_task(hdev);
4013         hclge_periodic_service_task(hdev);
4014
4015         /* Handle reset and mbx again in case periodical task delays the
4016          * handling by calling hclge_task_schedule() in
4017          * hclge_periodic_service_task().
4018          */
4019         hclge_reset_service_task(hdev);
4020         hclge_mailbox_service_task(hdev);
4021 }
4022
4023 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4024 {
4025         /* VF handle has no client */
4026         if (!handle->client)
4027                 return container_of(handle, struct hclge_vport, nic);
4028         else if (handle->client->type == HNAE3_CLIENT_ROCE)
4029                 return container_of(handle, struct hclge_vport, roce);
4030         else
4031                 return container_of(handle, struct hclge_vport, nic);
4032 }
4033
4034 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4035                             struct hnae3_vector_info *vector_info)
4036 {
4037         struct hclge_vport *vport = hclge_get_vport(handle);
4038         struct hnae3_vector_info *vector = vector_info;
4039         struct hclge_dev *hdev = vport->back;
4040         int alloc = 0;
4041         int i, j;
4042
4043         vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
4044         vector_num = min(hdev->num_msi_left, vector_num);
4045
4046         for (j = 0; j < vector_num; j++) {
4047                 for (i = 1; i < hdev->num_msi; i++) {
4048                         if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4049                                 vector->vector = pci_irq_vector(hdev->pdev, i);
4050                                 vector->io_addr = hdev->hw.io_base +
4051                                         HCLGE_VECTOR_REG_BASE +
4052                                         (i - 1) * HCLGE_VECTOR_REG_OFFSET +
4053                                         vport->vport_id *
4054                                         HCLGE_VECTOR_VF_OFFSET;
4055                                 hdev->vector_status[i] = vport->vport_id;
4056                                 hdev->vector_irq[i] = vector->vector;
4057
4058                                 vector++;
4059                                 alloc++;
4060
4061                                 break;
4062                         }
4063                 }
4064         }
4065         hdev->num_msi_left -= alloc;
4066         hdev->num_msi_used += alloc;
4067
4068         return alloc;
4069 }
4070
4071 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4072 {
4073         int i;
4074
4075         for (i = 0; i < hdev->num_msi; i++)
4076                 if (vector == hdev->vector_irq[i])
4077                         return i;
4078
4079         return -EINVAL;
4080 }
4081
4082 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4083 {
4084         struct hclge_vport *vport = hclge_get_vport(handle);
4085         struct hclge_dev *hdev = vport->back;
4086         int vector_id;
4087
4088         vector_id = hclge_get_vector_index(hdev, vector);
4089         if (vector_id < 0) {
4090                 dev_err(&hdev->pdev->dev,
4091                         "Get vector index fail. vector = %d\n", vector);
4092                 return vector_id;
4093         }
4094
4095         hclge_free_vector(hdev, vector_id);
4096
4097         return 0;
4098 }
4099
4100 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
4101 {
4102         return HCLGE_RSS_KEY_SIZE;
4103 }
4104
4105 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
4106 {
4107         return HCLGE_RSS_IND_TBL_SIZE;
4108 }
4109
4110 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
4111                                   const u8 hfunc, const u8 *key)
4112 {
4113         struct hclge_rss_config_cmd *req;
4114         unsigned int key_offset = 0;
4115         struct hclge_desc desc;
4116         int key_counts;
4117         int key_size;
4118         int ret;
4119
4120         key_counts = HCLGE_RSS_KEY_SIZE;
4121         req = (struct hclge_rss_config_cmd *)desc.data;
4122
4123         while (key_counts) {
4124                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4125                                            false);
4126
4127                 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4128                 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4129
4130                 key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
4131                 memcpy(req->hash_key,
4132                        key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4133
4134                 key_counts -= key_size;
4135                 key_offset++;
4136                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4137                 if (ret) {
4138                         dev_err(&hdev->pdev->dev,
4139                                 "Configure RSS config fail, status = %d\n",
4140                                 ret);
4141                         return ret;
4142                 }
4143         }
4144         return 0;
4145 }
4146
4147 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
4148 {
4149         struct hclge_rss_indirection_table_cmd *req;
4150         struct hclge_desc desc;
4151         int i, j;
4152         int ret;
4153
4154         req = (struct hclge_rss_indirection_table_cmd *)desc.data;
4155
4156         for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
4157                 hclge_cmd_setup_basic_desc
4158                         (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4159
4160                 req->start_table_index =
4161                         cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4162                 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
4163
4164                 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
4165                         req->rss_result[j] =
4166                                 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4167
4168                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4169                 if (ret) {
4170                         dev_err(&hdev->pdev->dev,
4171                                 "Configure rss indir table fail,status = %d\n",
4172                                 ret);
4173                         return ret;
4174                 }
4175         }
4176         return 0;
4177 }
4178
4179 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4180                                  u16 *tc_size, u16 *tc_offset)
4181 {
4182         struct hclge_rss_tc_mode_cmd *req;
4183         struct hclge_desc desc;
4184         int ret;
4185         int i;
4186
4187         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
4188         req = (struct hclge_rss_tc_mode_cmd *)desc.data;
4189
4190         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4191                 u16 mode = 0;
4192
4193                 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4194                 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4195                                 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4196                 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4197                                 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
4198
4199                 req->rss_tc_mode[i] = cpu_to_le16(mode);
4200         }
4201
4202         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4203         if (ret)
4204                 dev_err(&hdev->pdev->dev,
4205                         "Configure rss tc mode fail, status = %d\n", ret);
4206
4207         return ret;
4208 }
4209
4210 static void hclge_get_rss_type(struct hclge_vport *vport)
4211 {
4212         if (vport->rss_tuple_sets.ipv4_tcp_en ||
4213             vport->rss_tuple_sets.ipv4_udp_en ||
4214             vport->rss_tuple_sets.ipv4_sctp_en ||
4215             vport->rss_tuple_sets.ipv6_tcp_en ||
4216             vport->rss_tuple_sets.ipv6_udp_en ||
4217             vport->rss_tuple_sets.ipv6_sctp_en)
4218                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4219         else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4220                  vport->rss_tuple_sets.ipv6_fragment_en)
4221                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4222         else
4223                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4224 }
4225
4226 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4227 {
4228         struct hclge_rss_input_tuple_cmd *req;
4229         struct hclge_desc desc;
4230         int ret;
4231
4232         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4233
4234         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4235
4236         /* Get the tuple cfg from pf */
4237         req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4238         req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4239         req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4240         req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4241         req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4242         req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4243         req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4244         req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4245         hclge_get_rss_type(&hdev->vport[0]);
4246         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4247         if (ret)
4248                 dev_err(&hdev->pdev->dev,
4249                         "Configure rss input fail, status = %d\n", ret);
4250         return ret;
4251 }
4252
4253 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4254                          u8 *key, u8 *hfunc)
4255 {
4256         struct hclge_vport *vport = hclge_get_vport(handle);
4257         int i;
4258
4259         /* Get hash algorithm */
4260         if (hfunc) {
4261                 switch (vport->rss_algo) {
4262                 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4263                         *hfunc = ETH_RSS_HASH_TOP;
4264                         break;
4265                 case HCLGE_RSS_HASH_ALGO_SIMPLE:
4266                         *hfunc = ETH_RSS_HASH_XOR;
4267                         break;
4268                 default:
4269                         *hfunc = ETH_RSS_HASH_UNKNOWN;
4270                         break;
4271                 }
4272         }
4273
4274         /* Get the RSS Key required by the user */
4275         if (key)
4276                 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4277
4278         /* Get indirect table */
4279         if (indir)
4280                 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4281                         indir[i] =  vport->rss_indirection_tbl[i];
4282
4283         return 0;
4284 }
4285
4286 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4287                          const  u8 *key, const  u8 hfunc)
4288 {
4289         struct hclge_vport *vport = hclge_get_vport(handle);
4290         struct hclge_dev *hdev = vport->back;
4291         u8 hash_algo;
4292         int ret, i;
4293
4294         /* Set the RSS Hash Key if specififed by the user */
4295         if (key) {
4296                 switch (hfunc) {
4297                 case ETH_RSS_HASH_TOP:
4298                         hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4299                         break;
4300                 case ETH_RSS_HASH_XOR:
4301                         hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4302                         break;
4303                 case ETH_RSS_HASH_NO_CHANGE:
4304                         hash_algo = vport->rss_algo;
4305                         break;
4306                 default:
4307                         return -EINVAL;
4308                 }
4309
4310                 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4311                 if (ret)
4312                         return ret;
4313
4314                 /* Update the shadow RSS key with user specified qids */
4315                 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4316                 vport->rss_algo = hash_algo;
4317         }
4318
4319         /* Update the shadow RSS table with user specified qids */
4320         for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4321                 vport->rss_indirection_tbl[i] = indir[i];
4322
4323         /* Update the hardware */
4324         return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4325 }
4326
4327 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4328 {
4329         u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4330
4331         if (nfc->data & RXH_L4_B_2_3)
4332                 hash_sets |= HCLGE_D_PORT_BIT;
4333         else
4334                 hash_sets &= ~HCLGE_D_PORT_BIT;
4335
4336         if (nfc->data & RXH_IP_SRC)
4337                 hash_sets |= HCLGE_S_IP_BIT;
4338         else
4339                 hash_sets &= ~HCLGE_S_IP_BIT;
4340
4341         if (nfc->data & RXH_IP_DST)
4342                 hash_sets |= HCLGE_D_IP_BIT;
4343         else
4344                 hash_sets &= ~HCLGE_D_IP_BIT;
4345
4346         if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4347                 hash_sets |= HCLGE_V_TAG_BIT;
4348
4349         return hash_sets;
4350 }
4351
4352 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4353                                struct ethtool_rxnfc *nfc)
4354 {
4355         struct hclge_vport *vport = hclge_get_vport(handle);
4356         struct hclge_dev *hdev = vport->back;
4357         struct hclge_rss_input_tuple_cmd *req;
4358         struct hclge_desc desc;
4359         u8 tuple_sets;
4360         int ret;
4361
4362         if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4363                           RXH_L4_B_0_1 | RXH_L4_B_2_3))
4364                 return -EINVAL;
4365
4366         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4367         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4368
4369         req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4370         req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4371         req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4372         req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4373         req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4374         req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4375         req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4376         req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4377
4378         tuple_sets = hclge_get_rss_hash_bits(nfc);
4379         switch (nfc->flow_type) {
4380         case TCP_V4_FLOW:
4381                 req->ipv4_tcp_en = tuple_sets;
4382                 break;
4383         case TCP_V6_FLOW:
4384                 req->ipv6_tcp_en = tuple_sets;
4385                 break;
4386         case UDP_V4_FLOW:
4387                 req->ipv4_udp_en = tuple_sets;
4388                 break;
4389         case UDP_V6_FLOW:
4390                 req->ipv6_udp_en = tuple_sets;
4391                 break;
4392         case SCTP_V4_FLOW:
4393                 req->ipv4_sctp_en = tuple_sets;
4394                 break;
4395         case SCTP_V6_FLOW:
4396                 if ((nfc->data & RXH_L4_B_0_1) ||
4397                     (nfc->data & RXH_L4_B_2_3))
4398                         return -EINVAL;
4399
4400                 req->ipv6_sctp_en = tuple_sets;
4401                 break;
4402         case IPV4_FLOW:
4403                 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4404                 break;
4405         case IPV6_FLOW:
4406                 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4407                 break;
4408         default:
4409                 return -EINVAL;
4410         }
4411
4412         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4413         if (ret) {
4414                 dev_err(&hdev->pdev->dev,
4415                         "Set rss tuple fail, status = %d\n", ret);
4416                 return ret;
4417         }
4418
4419         vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4420         vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4421         vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4422         vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4423         vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4424         vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4425         vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4426         vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4427         hclge_get_rss_type(vport);
4428         return 0;
4429 }
4430
4431 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4432                                struct ethtool_rxnfc *nfc)
4433 {
4434         struct hclge_vport *vport = hclge_get_vport(handle);
4435         u8 tuple_sets;
4436
4437         nfc->data = 0;
4438
4439         switch (nfc->flow_type) {
4440         case TCP_V4_FLOW:
4441                 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4442                 break;
4443         case UDP_V4_FLOW:
4444                 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4445                 break;
4446         case TCP_V6_FLOW:
4447                 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4448                 break;
4449         case UDP_V6_FLOW:
4450                 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4451                 break;
4452         case SCTP_V4_FLOW:
4453                 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4454                 break;
4455         case SCTP_V6_FLOW:
4456                 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4457                 break;
4458         case IPV4_FLOW:
4459         case IPV6_FLOW:
4460                 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4461                 break;
4462         default:
4463                 return -EINVAL;
4464         }
4465
4466         if (!tuple_sets)
4467                 return 0;
4468
4469         if (tuple_sets & HCLGE_D_PORT_BIT)
4470                 nfc->data |= RXH_L4_B_2_3;
4471         if (tuple_sets & HCLGE_S_PORT_BIT)
4472                 nfc->data |= RXH_L4_B_0_1;
4473         if (tuple_sets & HCLGE_D_IP_BIT)
4474                 nfc->data |= RXH_IP_DST;
4475         if (tuple_sets & HCLGE_S_IP_BIT)
4476                 nfc->data |= RXH_IP_SRC;
4477
4478         return 0;
4479 }
4480
4481 static int hclge_get_tc_size(struct hnae3_handle *handle)
4482 {
4483         struct hclge_vport *vport = hclge_get_vport(handle);
4484         struct hclge_dev *hdev = vport->back;
4485
4486         return hdev->rss_size_max;
4487 }
4488
4489 int hclge_rss_init_hw(struct hclge_dev *hdev)
4490 {
4491         struct hclge_vport *vport = hdev->vport;
4492         u8 *rss_indir = vport[0].rss_indirection_tbl;
4493         u16 rss_size = vport[0].alloc_rss_size;
4494         u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4495         u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4496         u8 *key = vport[0].rss_hash_key;
4497         u8 hfunc = vport[0].rss_algo;
4498         u16 tc_valid[HCLGE_MAX_TC_NUM];
4499         u16 roundup_size;
4500         unsigned int i;
4501         int ret;
4502
4503         ret = hclge_set_rss_indir_table(hdev, rss_indir);
4504         if (ret)
4505                 return ret;
4506
4507         ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4508         if (ret)
4509                 return ret;
4510
4511         ret = hclge_set_rss_input_tuple(hdev);
4512         if (ret)
4513                 return ret;
4514
4515         /* Each TC have the same queue size, and tc_size set to hardware is
4516          * the log2 of roundup power of two of rss_size, the acutal queue
4517          * size is limited by indirection table.
4518          */
4519         if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
4520                 dev_err(&hdev->pdev->dev,
4521                         "Configure rss tc size failed, invalid TC_SIZE = %u\n",
4522                         rss_size);
4523                 return -EINVAL;
4524         }
4525
4526         roundup_size = roundup_pow_of_two(rss_size);
4527         roundup_size = ilog2(roundup_size);
4528
4529         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4530                 tc_valid[i] = 0;
4531
4532                 if (!(hdev->hw_tc_map & BIT(i)))
4533                         continue;
4534
4535                 tc_valid[i] = 1;
4536                 tc_size[i] = roundup_size;
4537                 tc_offset[i] = rss_size * i;
4538         }
4539
4540         return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4541 }
4542
4543 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4544 {
4545         struct hclge_vport *vport = hdev->vport;
4546         int i, j;
4547
4548         for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4549                 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4550                         vport[j].rss_indirection_tbl[i] =
4551                                 i % vport[j].alloc_rss_size;
4552         }
4553 }
4554
4555 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
4556 {
4557         int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4558         struct hclge_vport *vport = hdev->vport;
4559
4560         if (hdev->pdev->revision >= 0x21)
4561                 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4562
4563         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4564                 vport[i].rss_tuple_sets.ipv4_tcp_en =
4565                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4566                 vport[i].rss_tuple_sets.ipv4_udp_en =
4567                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4568                 vport[i].rss_tuple_sets.ipv4_sctp_en =
4569                         HCLGE_RSS_INPUT_TUPLE_SCTP;
4570                 vport[i].rss_tuple_sets.ipv4_fragment_en =
4571                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4572                 vport[i].rss_tuple_sets.ipv6_tcp_en =
4573                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4574                 vport[i].rss_tuple_sets.ipv6_udp_en =
4575                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4576                 vport[i].rss_tuple_sets.ipv6_sctp_en =
4577                         HCLGE_RSS_INPUT_TUPLE_SCTP;
4578                 vport[i].rss_tuple_sets.ipv6_fragment_en =
4579                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4580
4581                 vport[i].rss_algo = rss_algo;
4582
4583                 memcpy(vport[i].rss_hash_key, hclge_hash_key,
4584                        HCLGE_RSS_KEY_SIZE);
4585         }
4586
4587         hclge_rss_indir_init_cfg(hdev);
4588 }
4589
4590 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4591                                 int vector_id, bool en,
4592                                 struct hnae3_ring_chain_node *ring_chain)
4593 {
4594         struct hclge_dev *hdev = vport->back;
4595         struct hnae3_ring_chain_node *node;
4596         struct hclge_desc desc;
4597         struct hclge_ctrl_vector_chain_cmd *req =
4598                 (struct hclge_ctrl_vector_chain_cmd *)desc.data;
4599         enum hclge_cmd_status status;
4600         enum hclge_opcode_type op;
4601         u16 tqp_type_and_id;
4602         int i;
4603
4604         op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4605         hclge_cmd_setup_basic_desc(&desc, op, false);
4606         req->int_vector_id = vector_id;
4607
4608         i = 0;
4609         for (node = ring_chain; node; node = node->next) {
4610                 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4611                 hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
4612                                 HCLGE_INT_TYPE_S,
4613                                 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4614                 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4615                                 HCLGE_TQP_ID_S, node->tqp_index);
4616                 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4617                                 HCLGE_INT_GL_IDX_S,
4618                                 hnae3_get_field(node->int_gl_idx,
4619                                                 HNAE3_RING_GL_IDX_M,
4620                                                 HNAE3_RING_GL_IDX_S));
4621                 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4622                 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4623                         req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4624                         req->vfid = vport->vport_id;
4625
4626                         status = hclge_cmd_send(&hdev->hw, &desc, 1);
4627                         if (status) {
4628                                 dev_err(&hdev->pdev->dev,
4629                                         "Map TQP fail, status is %d.\n",
4630                                         status);
4631                                 return -EIO;
4632                         }
4633                         i = 0;
4634
4635                         hclge_cmd_setup_basic_desc(&desc,
4636                                                    op,
4637                                                    false);
4638                         req->int_vector_id = vector_id;
4639                 }
4640         }
4641
4642         if (i > 0) {
4643                 req->int_cause_num = i;
4644                 req->vfid = vport->vport_id;
4645                 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4646                 if (status) {
4647                         dev_err(&hdev->pdev->dev,
4648                                 "Map TQP fail, status is %d.\n", status);
4649                         return -EIO;
4650                 }
4651         }
4652
4653         return 0;
4654 }
4655
4656 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
4657                                     struct hnae3_ring_chain_node *ring_chain)
4658 {
4659         struct hclge_vport *vport = hclge_get_vport(handle);
4660         struct hclge_dev *hdev = vport->back;
4661         int vector_id;
4662
4663         vector_id = hclge_get_vector_index(hdev, vector);
4664         if (vector_id < 0) {
4665                 dev_err(&hdev->pdev->dev,
4666                         "failed to get vector index. vector=%d\n", vector);
4667                 return vector_id;
4668         }
4669
4670         return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4671 }
4672
4673 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
4674                                        struct hnae3_ring_chain_node *ring_chain)
4675 {
4676         struct hclge_vport *vport = hclge_get_vport(handle);
4677         struct hclge_dev *hdev = vport->back;
4678         int vector_id, ret;
4679
4680         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4681                 return 0;
4682
4683         vector_id = hclge_get_vector_index(hdev, vector);
4684         if (vector_id < 0) {
4685                 dev_err(&handle->pdev->dev,
4686                         "Get vector index fail. ret =%d\n", vector_id);
4687                 return vector_id;
4688         }
4689
4690         ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4691         if (ret)
4692                 dev_err(&handle->pdev->dev,
4693                         "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4694                         vector_id, ret);
4695
4696         return ret;
4697 }
4698
4699 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
4700                                       struct hclge_promisc_param *param)
4701 {
4702         struct hclge_promisc_cfg_cmd *req;
4703         struct hclge_desc desc;
4704         int ret;
4705
4706         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4707
4708         req = (struct hclge_promisc_cfg_cmd *)desc.data;
4709         req->vf_id = param->vf_id;
4710
4711         /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
4712          * pdev revision(0x20), new revision support them. The
4713          * value of this two fields will not return error when driver
4714          * send command to fireware in revision(0x20).
4715          */
4716         req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
4717                 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
4718
4719         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4720         if (ret)
4721                 dev_err(&hdev->pdev->dev,
4722                         "Set promisc mode fail, status is %d.\n", ret);
4723
4724         return ret;
4725 }
4726
4727 static void hclge_promisc_param_init(struct hclge_promisc_param *param,
4728                                      bool en_uc, bool en_mc, bool en_bc,
4729                                      int vport_id)
4730 {
4731         if (!param)
4732                 return;
4733
4734         memset(param, 0, sizeof(struct hclge_promisc_param));
4735         if (en_uc)
4736                 param->enable = HCLGE_PROMISC_EN_UC;
4737         if (en_mc)
4738                 param->enable |= HCLGE_PROMISC_EN_MC;
4739         if (en_bc)
4740                 param->enable |= HCLGE_PROMISC_EN_BC;
4741         param->vf_id = vport_id;
4742 }
4743
4744 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
4745                                  bool en_mc_pmc, bool en_bc_pmc)
4746 {
4747         struct hclge_dev *hdev = vport->back;
4748         struct hclge_promisc_param param;
4749
4750         hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, en_bc_pmc,
4751                                  vport->vport_id);
4752         return hclge_cmd_set_promisc_mode(hdev, &param);
4753 }
4754
4755 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4756                                   bool en_mc_pmc)
4757 {
4758         struct hclge_vport *vport = hclge_get_vport(handle);
4759         bool en_bc_pmc = true;
4760
4761         /* For revision 0x20, if broadcast promisc enabled, vlan filter is
4762          * always bypassed. So broadcast promisc should be disabled until
4763          * user enable promisc mode
4764          */
4765         if (handle->pdev->revision == 0x20)
4766                 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4767
4768         return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
4769                                             en_bc_pmc);
4770 }
4771
4772 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4773 {
4774         struct hclge_get_fd_mode_cmd *req;
4775         struct hclge_desc desc;
4776         int ret;
4777
4778         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4779
4780         req = (struct hclge_get_fd_mode_cmd *)desc.data;
4781
4782         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4783         if (ret) {
4784                 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4785                 return ret;
4786         }
4787
4788         *fd_mode = req->mode;
4789
4790         return ret;
4791 }
4792
4793 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4794                                    u32 *stage1_entry_num,
4795                                    u32 *stage2_entry_num,
4796                                    u16 *stage1_counter_num,
4797                                    u16 *stage2_counter_num)
4798 {
4799         struct hclge_get_fd_allocation_cmd *req;
4800         struct hclge_desc desc;
4801         int ret;
4802
4803         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4804
4805         req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4806
4807         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4808         if (ret) {
4809                 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4810                         ret);
4811                 return ret;
4812         }
4813
4814         *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4815         *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4816         *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4817         *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4818
4819         return ret;
4820 }
4821
4822 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
4823 {
4824         struct hclge_set_fd_key_config_cmd *req;
4825         struct hclge_fd_key_cfg *stage;
4826         struct hclge_desc desc;
4827         int ret;
4828
4829         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4830
4831         req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4832         stage = &hdev->fd_cfg.key_cfg[stage_num];
4833         req->stage = stage_num;
4834         req->key_select = stage->key_sel;
4835         req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4836         req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4837         req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4838         req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4839         req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4840         req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4841
4842         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4843         if (ret)
4844                 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4845
4846         return ret;
4847 }
4848
4849 static int hclge_init_fd_config(struct hclge_dev *hdev)
4850 {
4851 #define LOW_2_WORDS             0x03
4852         struct hclge_fd_key_cfg *key_cfg;
4853         int ret;
4854
4855         if (!hnae3_dev_fd_supported(hdev))
4856                 return 0;
4857
4858         ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4859         if (ret)
4860                 return ret;
4861
4862         switch (hdev->fd_cfg.fd_mode) {
4863         case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4864                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4865                 break;
4866         case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4867                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4868                 break;
4869         default:
4870                 dev_err(&hdev->pdev->dev,
4871                         "Unsupported flow director mode %u\n",
4872                         hdev->fd_cfg.fd_mode);
4873                 return -EOPNOTSUPP;
4874         }
4875
4876         hdev->fd_cfg.proto_support =
4877                 TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
4878                 UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
4879         key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4880         key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4881         key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4882         key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4883         key_cfg->outer_sipv6_word_en = 0;
4884         key_cfg->outer_dipv6_word_en = 0;
4885
4886         key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4887                                 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4888                                 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4889                                 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4890
4891         /* If use max 400bit key, we can support tuples for ether type */
4892         if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
4893                 hdev->fd_cfg.proto_support |= ETHER_FLOW;
4894                 key_cfg->tuple_active |=
4895                                 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
4896         }
4897
4898         /* roce_type is used to filter roce frames
4899          * dst_vport is used to specify the rule
4900          */
4901         key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4902
4903         ret = hclge_get_fd_allocation(hdev,
4904                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4905                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4906                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4907                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4908         if (ret)
4909                 return ret;
4910
4911         return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4912 }
4913
4914 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4915                                 int loc, u8 *key, bool is_add)
4916 {
4917         struct hclge_fd_tcam_config_1_cmd *req1;
4918         struct hclge_fd_tcam_config_2_cmd *req2;
4919         struct hclge_fd_tcam_config_3_cmd *req3;
4920         struct hclge_desc desc[3];
4921         int ret;
4922
4923         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4924         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4925         hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4926         desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4927         hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4928
4929         req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4930         req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4931         req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4932
4933         req1->stage = stage;
4934         req1->xy_sel = sel_x ? 1 : 0;
4935         hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4936         req1->index = cpu_to_le32(loc);
4937         req1->entry_vld = sel_x ? is_add : 0;
4938
4939         if (key) {
4940                 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4941                 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4942                        sizeof(req2->tcam_data));
4943                 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4944                        sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4945         }
4946
4947         ret = hclge_cmd_send(&hdev->hw, desc, 3);
4948         if (ret)
4949                 dev_err(&hdev->pdev->dev,
4950                         "config tcam key fail, ret=%d\n",
4951                         ret);
4952
4953         return ret;
4954 }
4955
4956 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4957                               struct hclge_fd_ad_data *action)
4958 {
4959         struct hclge_fd_ad_config_cmd *req;
4960         struct hclge_desc desc;
4961         u64 ad_data = 0;
4962         int ret;
4963
4964         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
4965
4966         req = (struct hclge_fd_ad_config_cmd *)desc.data;
4967         req->index = cpu_to_le32(loc);
4968         req->stage = stage;
4969
4970         hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
4971                       action->write_rule_id_to_bd);
4972         hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
4973                         action->rule_id);
4974         ad_data <<= 32;
4975         hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
4976         hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
4977                       action->forward_to_direct_queue);
4978         hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
4979                         action->queue_id);
4980         hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
4981         hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
4982                         HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
4983         hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
4984         hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
4985                         action->counter_id);
4986
4987         req->ad_data = cpu_to_le64(ad_data);
4988         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4989         if (ret)
4990                 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
4991
4992         return ret;
4993 }
4994
4995 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
4996                                    struct hclge_fd_rule *rule)
4997 {
4998         u16 tmp_x_s, tmp_y_s;
4999         u32 tmp_x_l, tmp_y_l;
5000         int i;
5001
5002         if (rule->unused_tuple & tuple_bit)
5003                 return true;
5004
5005         switch (tuple_bit) {
5006         case 0:
5007                 return false;
5008         case BIT(INNER_DST_MAC):
5009                 for (i = 0; i < ETH_ALEN; i++) {
5010                         calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5011                                rule->tuples_mask.dst_mac[i]);
5012                         calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5013                                rule->tuples_mask.dst_mac[i]);
5014                 }
5015
5016                 return true;
5017         case BIT(INNER_SRC_MAC):
5018                 for (i = 0; i < ETH_ALEN; i++) {
5019                         calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5020                                rule->tuples.src_mac[i]);
5021                         calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5022                                rule->tuples.src_mac[i]);
5023                 }
5024
5025                 return true;
5026         case BIT(INNER_VLAN_TAG_FST):
5027                 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
5028                        rule->tuples_mask.vlan_tag1);
5029                 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
5030                        rule->tuples_mask.vlan_tag1);
5031                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5032                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5033
5034                 return true;
5035         case BIT(INNER_ETH_TYPE):
5036                 calc_x(tmp_x_s, rule->tuples.ether_proto,
5037                        rule->tuples_mask.ether_proto);
5038                 calc_y(tmp_y_s, rule->tuples.ether_proto,
5039                        rule->tuples_mask.ether_proto);
5040                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5041                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5042
5043                 return true;
5044         case BIT(INNER_IP_TOS):
5045                 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5046                 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5047
5048                 return true;
5049         case BIT(INNER_IP_PROTO):
5050                 calc_x(*key_x, rule->tuples.ip_proto,
5051                        rule->tuples_mask.ip_proto);
5052                 calc_y(*key_y, rule->tuples.ip_proto,
5053                        rule->tuples_mask.ip_proto);
5054
5055                 return true;
5056         case BIT(INNER_SRC_IP):
5057                 calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
5058                        rule->tuples_mask.src_ip[IPV4_INDEX]);
5059                 calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
5060                        rule->tuples_mask.src_ip[IPV4_INDEX]);
5061                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5062                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5063
5064                 return true;
5065         case BIT(INNER_DST_IP):
5066                 calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
5067                        rule->tuples_mask.dst_ip[IPV4_INDEX]);
5068                 calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
5069                        rule->tuples_mask.dst_ip[IPV4_INDEX]);
5070                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5071                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5072
5073                 return true;
5074         case BIT(INNER_SRC_PORT):
5075                 calc_x(tmp_x_s, rule->tuples.src_port,
5076                        rule->tuples_mask.src_port);
5077                 calc_y(tmp_y_s, rule->tuples.src_port,
5078                        rule->tuples_mask.src_port);
5079                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5080                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5081
5082                 return true;
5083         case BIT(INNER_DST_PORT):
5084                 calc_x(tmp_x_s, rule->tuples.dst_port,
5085                        rule->tuples_mask.dst_port);
5086                 calc_y(tmp_y_s, rule->tuples.dst_port,
5087                        rule->tuples_mask.dst_port);
5088                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5089                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5090
5091                 return true;
5092         default:
5093                 return false;
5094         }
5095 }
5096
5097 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5098                                  u8 vf_id, u8 network_port_id)
5099 {
5100         u32 port_number = 0;
5101
5102         if (port_type == HOST_PORT) {
5103                 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5104                                 pf_id);
5105                 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5106                                 vf_id);
5107                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5108         } else {
5109                 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5110                                 HCLGE_NETWORK_PORT_ID_S, network_port_id);
5111                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5112         }
5113
5114         return port_number;
5115 }
5116
5117 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5118                                        __le32 *key_x, __le32 *key_y,
5119                                        struct hclge_fd_rule *rule)
5120 {
5121         u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5122         u8 cur_pos = 0, tuple_size, shift_bits;
5123         unsigned int i;
5124
5125         for (i = 0; i < MAX_META_DATA; i++) {
5126                 tuple_size = meta_data_key_info[i].key_length;
5127                 tuple_bit = key_cfg->meta_data_active & BIT(i);
5128
5129                 switch (tuple_bit) {
5130                 case BIT(ROCE_TYPE):
5131                         hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5132                         cur_pos += tuple_size;
5133                         break;
5134                 case BIT(DST_VPORT):
5135                         port_number = hclge_get_port_number(HOST_PORT, 0,
5136                                                             rule->vf_id, 0);
5137                         hnae3_set_field(meta_data,
5138                                         GENMASK(cur_pos + tuple_size, cur_pos),
5139                                         cur_pos, port_number);
5140                         cur_pos += tuple_size;
5141                         break;
5142                 default:
5143                         break;
5144                 }
5145         }
5146
5147         calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5148         calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5149         shift_bits = sizeof(meta_data) * 8 - cur_pos;
5150
5151         *key_x = cpu_to_le32(tmp_x << shift_bits);
5152         *key_y = cpu_to_le32(tmp_y << shift_bits);
5153 }
5154
5155 /* A complete key is combined with meta data key and tuple key.
5156  * Meta data key is stored at the MSB region, and tuple key is stored at
5157  * the LSB region, unused bits will be filled 0.
5158  */
5159 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5160                             struct hclge_fd_rule *rule)
5161 {
5162         struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5163         u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5164         u8 *cur_key_x, *cur_key_y;
5165         unsigned int i;
5166         int ret, tuple_size;
5167         u8 meta_data_region;
5168
5169         memset(key_x, 0, sizeof(key_x));
5170         memset(key_y, 0, sizeof(key_y));
5171         cur_key_x = key_x;
5172         cur_key_y = key_y;
5173
5174         for (i = 0 ; i < MAX_TUPLE; i++) {
5175                 bool tuple_valid;
5176                 u32 check_tuple;
5177
5178                 tuple_size = tuple_key_info[i].key_length / 8;
5179                 check_tuple = key_cfg->tuple_active & BIT(i);
5180
5181                 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
5182                                                      cur_key_y, rule);
5183                 if (tuple_valid) {
5184                         cur_key_x += tuple_size;
5185                         cur_key_y += tuple_size;
5186                 }
5187         }
5188
5189         meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5190                         MAX_META_DATA_LENGTH / 8;
5191
5192         hclge_fd_convert_meta_data(key_cfg,
5193                                    (__le32 *)(key_x + meta_data_region),
5194                                    (__le32 *)(key_y + meta_data_region),
5195                                    rule);
5196
5197         ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5198                                    true);
5199         if (ret) {
5200                 dev_err(&hdev->pdev->dev,
5201                         "fd key_y config fail, loc=%u, ret=%d\n",
5202                         rule->queue_id, ret);
5203                 return ret;
5204         }
5205
5206         ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5207                                    true);
5208         if (ret)
5209                 dev_err(&hdev->pdev->dev,
5210                         "fd key_x config fail, loc=%u, ret=%d\n",
5211                         rule->queue_id, ret);
5212         return ret;
5213 }
5214
5215 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5216                                struct hclge_fd_rule *rule)
5217 {
5218         struct hclge_fd_ad_data ad_data;
5219
5220         ad_data.ad_id = rule->location;
5221
5222         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5223                 ad_data.drop_packet = true;
5224                 ad_data.forward_to_direct_queue = false;
5225                 ad_data.queue_id = 0;
5226         } else {
5227                 ad_data.drop_packet = false;
5228                 ad_data.forward_to_direct_queue = true;
5229                 ad_data.queue_id = rule->queue_id;
5230         }
5231
5232         ad_data.use_counter = false;
5233         ad_data.counter_id = 0;
5234
5235         ad_data.use_next_stage = false;
5236         ad_data.next_input_key = 0;
5237
5238         ad_data.write_rule_id_to_bd = true;
5239         ad_data.rule_id = rule->location;
5240
5241         return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
5242 }
5243
5244 static int hclge_fd_check_spec(struct hclge_dev *hdev,
5245                                struct ethtool_rx_flow_spec *fs, u32 *unused)
5246 {
5247         struct ethtool_tcpip4_spec *tcp_ip4_spec;
5248         struct ethtool_usrip4_spec *usr_ip4_spec;
5249         struct ethtool_tcpip6_spec *tcp_ip6_spec;
5250         struct ethtool_usrip6_spec *usr_ip6_spec;
5251         struct ethhdr *ether_spec;
5252
5253         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5254                 return -EINVAL;
5255
5256         if (!(fs->flow_type & hdev->fd_cfg.proto_support))
5257                 return -EOPNOTSUPP;
5258
5259         if ((fs->flow_type & FLOW_EXT) &&
5260             (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
5261                 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
5262                 return -EOPNOTSUPP;
5263         }
5264
5265         switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5266         case SCTP_V4_FLOW:
5267         case TCP_V4_FLOW:
5268         case UDP_V4_FLOW:
5269                 tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
5270                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5271
5272                 if (!tcp_ip4_spec->ip4src)
5273                         *unused |= BIT(INNER_SRC_IP);
5274
5275                 if (!tcp_ip4_spec->ip4dst)
5276                         *unused |= BIT(INNER_DST_IP);
5277
5278                 if (!tcp_ip4_spec->psrc)
5279                         *unused |= BIT(INNER_SRC_PORT);
5280
5281                 if (!tcp_ip4_spec->pdst)
5282                         *unused |= BIT(INNER_DST_PORT);
5283
5284                 if (!tcp_ip4_spec->tos)
5285                         *unused |= BIT(INNER_IP_TOS);
5286
5287                 break;
5288         case IP_USER_FLOW:
5289                 usr_ip4_spec = &fs->h_u.usr_ip4_spec;
5290                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5291                         BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5292
5293                 if (!usr_ip4_spec->ip4src)
5294                         *unused |= BIT(INNER_SRC_IP);
5295
5296                 if (!usr_ip4_spec->ip4dst)
5297                         *unused |= BIT(INNER_DST_IP);
5298
5299                 if (!usr_ip4_spec->tos)
5300                         *unused |= BIT(INNER_IP_TOS);
5301
5302                 if (!usr_ip4_spec->proto)
5303                         *unused |= BIT(INNER_IP_PROTO);
5304
5305                 if (usr_ip4_spec->l4_4_bytes)
5306                         return -EOPNOTSUPP;
5307
5308                 if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
5309                         return -EOPNOTSUPP;
5310
5311                 break;
5312         case SCTP_V6_FLOW:
5313         case TCP_V6_FLOW:
5314         case UDP_V6_FLOW:
5315                 tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
5316                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5317                         BIT(INNER_IP_TOS);
5318
5319                 /* check whether src/dst ip address used */
5320                 if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
5321                     !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
5322                         *unused |= BIT(INNER_SRC_IP);
5323
5324                 if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
5325                     !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
5326                         *unused |= BIT(INNER_DST_IP);
5327
5328                 if (!tcp_ip6_spec->psrc)
5329                         *unused |= BIT(INNER_SRC_PORT);
5330
5331                 if (!tcp_ip6_spec->pdst)
5332                         *unused |= BIT(INNER_DST_PORT);
5333
5334                 if (tcp_ip6_spec->tclass)
5335                         return -EOPNOTSUPP;
5336
5337                 break;
5338         case IPV6_USER_FLOW:
5339                 usr_ip6_spec = &fs->h_u.usr_ip6_spec;
5340                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5341                         BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
5342                         BIT(INNER_DST_PORT);
5343
5344                 /* check whether src/dst ip address used */
5345                 if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
5346                     !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
5347                         *unused |= BIT(INNER_SRC_IP);
5348
5349                 if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
5350                     !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
5351                         *unused |= BIT(INNER_DST_IP);
5352
5353                 if (!usr_ip6_spec->l4_proto)
5354                         *unused |= BIT(INNER_IP_PROTO);
5355
5356                 if (usr_ip6_spec->tclass)
5357                         return -EOPNOTSUPP;
5358
5359                 if (usr_ip6_spec->l4_4_bytes)
5360                         return -EOPNOTSUPP;
5361
5362                 break;
5363         case ETHER_FLOW:
5364                 ether_spec = &fs->h_u.ether_spec;
5365                 *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5366                         BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
5367                         BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
5368
5369                 if (is_zero_ether_addr(ether_spec->h_source))
5370                         *unused |= BIT(INNER_SRC_MAC);
5371
5372                 if (is_zero_ether_addr(ether_spec->h_dest))
5373                         *unused |= BIT(INNER_DST_MAC);
5374
5375                 if (!ether_spec->h_proto)
5376                         *unused |= BIT(INNER_ETH_TYPE);
5377
5378                 break;
5379         default:
5380                 return -EOPNOTSUPP;
5381         }
5382
5383         if ((fs->flow_type & FLOW_EXT)) {
5384                 if (fs->h_ext.vlan_etype)
5385                         return -EOPNOTSUPP;
5386                 if (!fs->h_ext.vlan_tci)
5387                         *unused |= BIT(INNER_VLAN_TAG_FST);
5388
5389                 if (fs->m_ext.vlan_tci) {
5390                         if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
5391                                 return -EINVAL;
5392                 }
5393         } else {
5394                 *unused |= BIT(INNER_VLAN_TAG_FST);
5395         }
5396
5397         if (fs->flow_type & FLOW_MAC_EXT) {
5398                 if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
5399                         return -EOPNOTSUPP;
5400
5401                 if (is_zero_ether_addr(fs->h_ext.h_dest))
5402                         *unused |= BIT(INNER_DST_MAC);
5403                 else
5404                         *unused &= ~(BIT(INNER_DST_MAC));
5405         }
5406
5407         return 0;
5408 }
5409
5410 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
5411 {
5412         struct hclge_fd_rule *rule = NULL;
5413         struct hlist_node *node2;
5414
5415         spin_lock_bh(&hdev->fd_rule_lock);
5416         hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5417                 if (rule->location >= location)
5418                         break;
5419         }
5420
5421         spin_unlock_bh(&hdev->fd_rule_lock);
5422
5423         return  rule && rule->location == location;
5424 }
5425
5426 /* make sure being called after lock up with fd_rule_lock */
5427 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
5428                                      struct hclge_fd_rule *new_rule,
5429                                      u16 location,
5430                                      bool is_add)
5431 {
5432         struct hclge_fd_rule *rule = NULL, *parent = NULL;
5433         struct hlist_node *node2;
5434
5435         if (is_add && !new_rule)
5436                 return -EINVAL;
5437
5438         hlist_for_each_entry_safe(rule, node2,
5439                                   &hdev->fd_rule_list, rule_node) {
5440                 if (rule->location >= location)
5441                         break;
5442                 parent = rule;
5443         }
5444
5445         if (rule && rule->location == location) {
5446                 hlist_del(&rule->rule_node);
5447                 kfree(rule);
5448                 hdev->hclge_fd_rule_num--;
5449
5450                 if (!is_add) {
5451                         if (!hdev->hclge_fd_rule_num)
5452                                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5453                         clear_bit(location, hdev->fd_bmap);
5454
5455                         return 0;
5456                 }
5457         } else if (!is_add) {
5458                 dev_err(&hdev->pdev->dev,
5459                         "delete fail, rule %u is inexistent\n",
5460                         location);
5461                 return -EINVAL;
5462         }
5463
5464         INIT_HLIST_NODE(&new_rule->rule_node);
5465
5466         if (parent)
5467                 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
5468         else
5469                 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
5470
5471         set_bit(location, hdev->fd_bmap);
5472         hdev->hclge_fd_rule_num++;
5473         hdev->fd_active_type = new_rule->rule_type;
5474
5475         return 0;
5476 }
5477
5478 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
5479                               struct ethtool_rx_flow_spec *fs,
5480                               struct hclge_fd_rule *rule)
5481 {
5482         u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5483
5484         switch (flow_type) {
5485         case SCTP_V4_FLOW:
5486         case TCP_V4_FLOW:
5487         case UDP_V4_FLOW:
5488                 rule->tuples.src_ip[IPV4_INDEX] =
5489                                 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
5490                 rule->tuples_mask.src_ip[IPV4_INDEX] =
5491                                 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
5492
5493                 rule->tuples.dst_ip[IPV4_INDEX] =
5494                                 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
5495                 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5496                                 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5497
5498                 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5499                 rule->tuples_mask.src_port =
5500                                 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5501
5502                 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5503                 rule->tuples_mask.dst_port =
5504                                 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5505
5506                 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5507                 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5508
5509                 rule->tuples.ether_proto = ETH_P_IP;
5510                 rule->tuples_mask.ether_proto = 0xFFFF;
5511
5512                 break;
5513         case IP_USER_FLOW:
5514                 rule->tuples.src_ip[IPV4_INDEX] =
5515                                 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5516                 rule->tuples_mask.src_ip[IPV4_INDEX] =
5517                                 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5518
5519                 rule->tuples.dst_ip[IPV4_INDEX] =
5520                                 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5521                 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5522                                 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5523
5524                 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5525                 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5526
5527                 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5528                 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5529
5530                 rule->tuples.ether_proto = ETH_P_IP;
5531                 rule->tuples_mask.ether_proto = 0xFFFF;
5532
5533                 break;
5534         case SCTP_V6_FLOW:
5535         case TCP_V6_FLOW:
5536         case UDP_V6_FLOW:
5537                 be32_to_cpu_array(rule->tuples.src_ip,
5538                                   fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5539                 be32_to_cpu_array(rule->tuples_mask.src_ip,
5540                                   fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5541
5542                 be32_to_cpu_array(rule->tuples.dst_ip,
5543                                   fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5544                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5545                                   fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5546
5547                 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5548                 rule->tuples_mask.src_port =
5549                                 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5550
5551                 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5552                 rule->tuples_mask.dst_port =
5553                                 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5554
5555                 rule->tuples.ether_proto = ETH_P_IPV6;
5556                 rule->tuples_mask.ether_proto = 0xFFFF;
5557
5558                 break;
5559         case IPV6_USER_FLOW:
5560                 be32_to_cpu_array(rule->tuples.src_ip,
5561                                   fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5562                 be32_to_cpu_array(rule->tuples_mask.src_ip,
5563                                   fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5564
5565                 be32_to_cpu_array(rule->tuples.dst_ip,
5566                                   fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5567                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5568                                   fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5569
5570                 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5571                 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5572
5573                 rule->tuples.ether_proto = ETH_P_IPV6;
5574                 rule->tuples_mask.ether_proto = 0xFFFF;
5575
5576                 break;
5577         case ETHER_FLOW:
5578                 ether_addr_copy(rule->tuples.src_mac,
5579                                 fs->h_u.ether_spec.h_source);
5580                 ether_addr_copy(rule->tuples_mask.src_mac,
5581                                 fs->m_u.ether_spec.h_source);
5582
5583                 ether_addr_copy(rule->tuples.dst_mac,
5584                                 fs->h_u.ether_spec.h_dest);
5585                 ether_addr_copy(rule->tuples_mask.dst_mac,
5586                                 fs->m_u.ether_spec.h_dest);
5587
5588                 rule->tuples.ether_proto =
5589                                 be16_to_cpu(fs->h_u.ether_spec.h_proto);
5590                 rule->tuples_mask.ether_proto =
5591                                 be16_to_cpu(fs->m_u.ether_spec.h_proto);
5592
5593                 break;
5594         default:
5595                 return -EOPNOTSUPP;
5596         }
5597
5598         switch (flow_type) {
5599         case SCTP_V4_FLOW:
5600         case SCTP_V6_FLOW:
5601                 rule->tuples.ip_proto = IPPROTO_SCTP;
5602                 rule->tuples_mask.ip_proto = 0xFF;
5603                 break;
5604         case TCP_V4_FLOW:
5605         case TCP_V6_FLOW:
5606                 rule->tuples.ip_proto = IPPROTO_TCP;
5607                 rule->tuples_mask.ip_proto = 0xFF;
5608                 break;
5609         case UDP_V4_FLOW:
5610         case UDP_V6_FLOW:
5611                 rule->tuples.ip_proto = IPPROTO_UDP;
5612                 rule->tuples_mask.ip_proto = 0xFF;
5613                 break;
5614         default:
5615                 break;
5616         }
5617
5618         if ((fs->flow_type & FLOW_EXT)) {
5619                 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5620                 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5621         }
5622
5623         if (fs->flow_type & FLOW_MAC_EXT) {
5624                 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5625                 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5626         }
5627
5628         return 0;
5629 }
5630
5631 /* make sure being called after lock up with fd_rule_lock */
5632 static int hclge_fd_config_rule(struct hclge_dev *hdev,
5633                                 struct hclge_fd_rule *rule)
5634 {
5635         int ret;
5636
5637         if (!rule) {
5638                 dev_err(&hdev->pdev->dev,
5639                         "The flow director rule is NULL\n");
5640                 return -EINVAL;
5641         }
5642
5643         /* it will never fail here, so needn't to check return value */
5644         hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5645
5646         ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5647         if (ret)
5648                 goto clear_rule;
5649
5650         ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5651         if (ret)
5652                 goto clear_rule;
5653
5654         return 0;
5655
5656 clear_rule:
5657         hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5658         return ret;
5659 }
5660
5661 static int hclge_add_fd_entry(struct hnae3_handle *handle,
5662                               struct ethtool_rxnfc *cmd)
5663 {
5664         struct hclge_vport *vport = hclge_get_vport(handle);
5665         struct hclge_dev *hdev = vport->back;
5666         u16 dst_vport_id = 0, q_index = 0;
5667         struct ethtool_rx_flow_spec *fs;
5668         struct hclge_fd_rule *rule;
5669         u32 unused = 0;
5670         u8 action;
5671         int ret;
5672
5673         if (!hnae3_dev_fd_supported(hdev))
5674                 return -EOPNOTSUPP;
5675
5676         if (!hdev->fd_en) {
5677                 dev_warn(&hdev->pdev->dev,
5678                          "Please enable flow director first\n");
5679                 return -EOPNOTSUPP;
5680         }
5681
5682         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5683
5684         ret = hclge_fd_check_spec(hdev, fs, &unused);
5685         if (ret) {
5686                 dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
5687                 return ret;
5688         }
5689
5690         if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5691                 action = HCLGE_FD_ACTION_DROP_PACKET;
5692         } else {
5693                 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5694                 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5695                 u16 tqps;
5696
5697                 if (vf > hdev->num_req_vfs) {
5698                         dev_err(&hdev->pdev->dev,
5699                                 "Error: vf id (%u) > max vf num (%u)\n",
5700                                 vf, hdev->num_req_vfs);
5701                         return -EINVAL;
5702                 }
5703
5704                 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
5705                 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
5706
5707                 if (ring >= tqps) {
5708                         dev_err(&hdev->pdev->dev,
5709                                 "Error: queue id (%u) > max tqp num (%u)\n",
5710                                 ring, tqps - 1);
5711                         return -EINVAL;
5712                 }
5713
5714                 action = HCLGE_FD_ACTION_ACCEPT_PACKET;
5715                 q_index = ring;
5716         }
5717
5718         rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5719         if (!rule)
5720                 return -ENOMEM;
5721
5722         ret = hclge_fd_get_tuple(hdev, fs, rule);
5723         if (ret) {
5724                 kfree(rule);
5725                 return ret;
5726         }
5727
5728         rule->flow_type = fs->flow_type;
5729
5730         rule->location = fs->location;
5731         rule->unused_tuple = unused;
5732         rule->vf_id = dst_vport_id;
5733         rule->queue_id = q_index;
5734         rule->action = action;
5735         rule->rule_type = HCLGE_FD_EP_ACTIVE;
5736
5737         /* to avoid rule conflict, when user configure rule by ethtool,
5738          * we need to clear all arfs rules
5739          */
5740         hclge_clear_arfs_rules(handle);
5741
5742         spin_lock_bh(&hdev->fd_rule_lock);
5743         ret = hclge_fd_config_rule(hdev, rule);
5744
5745         spin_unlock_bh(&hdev->fd_rule_lock);
5746
5747         return ret;
5748 }
5749
5750 static int hclge_del_fd_entry(struct hnae3_handle *handle,
5751                               struct ethtool_rxnfc *cmd)
5752 {
5753         struct hclge_vport *vport = hclge_get_vport(handle);
5754         struct hclge_dev *hdev = vport->back;
5755         struct ethtool_rx_flow_spec *fs;
5756         int ret;
5757
5758         if (!hnae3_dev_fd_supported(hdev))
5759                 return -EOPNOTSUPP;
5760
5761         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5762
5763         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5764                 return -EINVAL;
5765
5766         if (!hclge_fd_rule_exist(hdev, fs->location)) {
5767                 dev_err(&hdev->pdev->dev,
5768                         "Delete fail, rule %u is inexistent\n", fs->location);
5769                 return -ENOENT;
5770         }
5771
5772         ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
5773                                    NULL, false);
5774         if (ret)
5775                 return ret;
5776
5777         spin_lock_bh(&hdev->fd_rule_lock);
5778         ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
5779
5780         spin_unlock_bh(&hdev->fd_rule_lock);
5781
5782         return ret;
5783 }
5784
5785 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
5786                                      bool clear_list)
5787 {
5788         struct hclge_vport *vport = hclge_get_vport(handle);
5789         struct hclge_dev *hdev = vport->back;
5790         struct hclge_fd_rule *rule;
5791         struct hlist_node *node;
5792         u16 location;
5793
5794         if (!hnae3_dev_fd_supported(hdev))
5795                 return;
5796
5797         spin_lock_bh(&hdev->fd_rule_lock);
5798         for_each_set_bit(location, hdev->fd_bmap,
5799                          hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5800                 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
5801                                      NULL, false);
5802
5803         if (clear_list) {
5804                 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
5805                                           rule_node) {
5806                         hlist_del(&rule->rule_node);
5807                         kfree(rule);
5808                 }
5809                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5810                 hdev->hclge_fd_rule_num = 0;
5811                 bitmap_zero(hdev->fd_bmap,
5812                             hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
5813         }
5814
5815         spin_unlock_bh(&hdev->fd_rule_lock);
5816 }
5817
5818 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
5819 {
5820         struct hclge_vport *vport = hclge_get_vport(handle);
5821         struct hclge_dev *hdev = vport->back;
5822         struct hclge_fd_rule *rule;
5823         struct hlist_node *node;
5824         int ret;
5825
5826         /* Return ok here, because reset error handling will check this
5827          * return value. If error is returned here, the reset process will
5828          * fail.
5829          */
5830         if (!hnae3_dev_fd_supported(hdev))
5831                 return 0;
5832
5833         /* if fd is disabled, should not restore it when reset */
5834         if (!hdev->fd_en)
5835                 return 0;
5836
5837         spin_lock_bh(&hdev->fd_rule_lock);
5838         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5839                 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5840                 if (!ret)
5841                         ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5842
5843                 if (ret) {
5844                         dev_warn(&hdev->pdev->dev,
5845                                  "Restore rule %u failed, remove it\n",
5846                                  rule->location);
5847                         clear_bit(rule->location, hdev->fd_bmap);
5848                         hlist_del(&rule->rule_node);
5849                         kfree(rule);
5850                         hdev->hclge_fd_rule_num--;
5851                 }
5852         }
5853
5854         if (hdev->hclge_fd_rule_num)
5855                 hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
5856
5857         spin_unlock_bh(&hdev->fd_rule_lock);
5858
5859         return 0;
5860 }
5861
5862 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
5863                                  struct ethtool_rxnfc *cmd)
5864 {
5865         struct hclge_vport *vport = hclge_get_vport(handle);
5866         struct hclge_dev *hdev = vport->back;
5867
5868         if (!hnae3_dev_fd_supported(hdev))
5869                 return -EOPNOTSUPP;
5870
5871         cmd->rule_cnt = hdev->hclge_fd_rule_num;
5872         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5873
5874         return 0;
5875 }
5876
5877 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
5878                                   struct ethtool_rxnfc *cmd)
5879 {
5880         struct hclge_vport *vport = hclge_get_vport(handle);
5881         struct hclge_fd_rule *rule = NULL;
5882         struct hclge_dev *hdev = vport->back;
5883         struct ethtool_rx_flow_spec *fs;
5884         struct hlist_node *node2;
5885
5886         if (!hnae3_dev_fd_supported(hdev))
5887                 return -EOPNOTSUPP;
5888
5889         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5890
5891         spin_lock_bh(&hdev->fd_rule_lock);
5892
5893         hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5894                 if (rule->location >= fs->location)
5895                         break;
5896         }
5897
5898         if (!rule || fs->location != rule->location) {
5899                 spin_unlock_bh(&hdev->fd_rule_lock);
5900
5901                 return -ENOENT;
5902         }
5903
5904         fs->flow_type = rule->flow_type;
5905         switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5906         case SCTP_V4_FLOW:
5907         case TCP_V4_FLOW:
5908         case UDP_V4_FLOW:
5909                 fs->h_u.tcp_ip4_spec.ip4src =
5910                                 cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5911                 fs->m_u.tcp_ip4_spec.ip4src =
5912                         rule->unused_tuple & BIT(INNER_SRC_IP) ?
5913                         0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5914
5915                 fs->h_u.tcp_ip4_spec.ip4dst =
5916                                 cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5917                 fs->m_u.tcp_ip4_spec.ip4dst =
5918                         rule->unused_tuple & BIT(INNER_DST_IP) ?
5919                         0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5920
5921                 fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5922                 fs->m_u.tcp_ip4_spec.psrc =
5923                                 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5924                                 0 : cpu_to_be16(rule->tuples_mask.src_port);
5925
5926                 fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5927                 fs->m_u.tcp_ip4_spec.pdst =
5928                                 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5929                                 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5930
5931                 fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
5932                 fs->m_u.tcp_ip4_spec.tos =
5933                                 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5934                                 0 : rule->tuples_mask.ip_tos;
5935
5936                 break;
5937         case IP_USER_FLOW:
5938                 fs->h_u.usr_ip4_spec.ip4src =
5939                                 cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5940                 fs->m_u.tcp_ip4_spec.ip4src =
5941                         rule->unused_tuple & BIT(INNER_SRC_IP) ?
5942                         0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5943
5944                 fs->h_u.usr_ip4_spec.ip4dst =
5945                                 cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5946                 fs->m_u.usr_ip4_spec.ip4dst =
5947                         rule->unused_tuple & BIT(INNER_DST_IP) ?
5948                         0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5949
5950                 fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
5951                 fs->m_u.usr_ip4_spec.tos =
5952                                 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5953                                 0 : rule->tuples_mask.ip_tos;
5954
5955                 fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
5956                 fs->m_u.usr_ip4_spec.proto =
5957                                 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5958                                 0 : rule->tuples_mask.ip_proto;
5959
5960                 fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
5961
5962                 break;
5963         case SCTP_V6_FLOW:
5964         case TCP_V6_FLOW:
5965         case UDP_V6_FLOW:
5966                 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
5967                                   rule->tuples.src_ip, IPV6_SIZE);
5968                 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5969                         memset(fs->m_u.tcp_ip6_spec.ip6src, 0,
5970                                sizeof(int) * IPV6_SIZE);
5971                 else
5972                         cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
5973                                           rule->tuples_mask.src_ip, IPV6_SIZE);
5974
5975                 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
5976                                   rule->tuples.dst_ip, IPV6_SIZE);
5977                 if (rule->unused_tuple & BIT(INNER_DST_IP))
5978                         memset(fs->m_u.tcp_ip6_spec.ip6dst, 0,
5979                                sizeof(int) * IPV6_SIZE);
5980                 else
5981                         cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
5982                                           rule->tuples_mask.dst_ip, IPV6_SIZE);
5983
5984                 fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5985                 fs->m_u.tcp_ip6_spec.psrc =
5986                                 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5987                                 0 : cpu_to_be16(rule->tuples_mask.src_port);
5988
5989                 fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5990                 fs->m_u.tcp_ip6_spec.pdst =
5991                                 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5992                                 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5993
5994                 break;
5995         case IPV6_USER_FLOW:
5996                 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
5997                                   rule->tuples.src_ip, IPV6_SIZE);
5998                 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5999                         memset(fs->m_u.usr_ip6_spec.ip6src, 0,
6000                                sizeof(int) * IPV6_SIZE);
6001                 else
6002                         cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
6003                                           rule->tuples_mask.src_ip, IPV6_SIZE);
6004
6005                 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
6006                                   rule->tuples.dst_ip, IPV6_SIZE);
6007                 if (rule->unused_tuple & BIT(INNER_DST_IP))
6008                         memset(fs->m_u.usr_ip6_spec.ip6dst, 0,
6009                                sizeof(int) * IPV6_SIZE);
6010                 else
6011                         cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
6012                                           rule->tuples_mask.dst_ip, IPV6_SIZE);
6013
6014                 fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
6015                 fs->m_u.usr_ip6_spec.l4_proto =
6016                                 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6017                                 0 : rule->tuples_mask.ip_proto;
6018
6019                 break;
6020         case ETHER_FLOW:
6021                 ether_addr_copy(fs->h_u.ether_spec.h_source,
6022                                 rule->tuples.src_mac);
6023                 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6024                         eth_zero_addr(fs->m_u.ether_spec.h_source);
6025                 else
6026                         ether_addr_copy(fs->m_u.ether_spec.h_source,
6027                                         rule->tuples_mask.src_mac);
6028
6029                 ether_addr_copy(fs->h_u.ether_spec.h_dest,
6030                                 rule->tuples.dst_mac);
6031                 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6032                         eth_zero_addr(fs->m_u.ether_spec.h_dest);
6033                 else
6034                         ether_addr_copy(fs->m_u.ether_spec.h_dest,
6035                                         rule->tuples_mask.dst_mac);
6036
6037                 fs->h_u.ether_spec.h_proto =
6038                                 cpu_to_be16(rule->tuples.ether_proto);
6039                 fs->m_u.ether_spec.h_proto =
6040                                 rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6041                                 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6042
6043                 break;
6044         default:
6045                 spin_unlock_bh(&hdev->fd_rule_lock);
6046                 return -EOPNOTSUPP;
6047         }
6048
6049         if (fs->flow_type & FLOW_EXT) {
6050                 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
6051                 fs->m_ext.vlan_tci =
6052                                 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
6053                                 cpu_to_be16(VLAN_VID_MASK) :
6054                                 cpu_to_be16(rule->tuples_mask.vlan_tag1);
6055         }
6056
6057         if (fs->flow_type & FLOW_MAC_EXT) {
6058                 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
6059                 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6060                         eth_zero_addr(fs->m_u.ether_spec.h_dest);
6061                 else
6062                         ether_addr_copy(fs->m_u.ether_spec.h_dest,
6063                                         rule->tuples_mask.dst_mac);
6064         }
6065
6066         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
6067                 fs->ring_cookie = RX_CLS_FLOW_DISC;
6068         } else {
6069                 u64 vf_id;
6070
6071                 fs->ring_cookie = rule->queue_id;
6072                 vf_id = rule->vf_id;
6073                 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
6074                 fs->ring_cookie |= vf_id;
6075         }
6076
6077         spin_unlock_bh(&hdev->fd_rule_lock);
6078
6079         return 0;
6080 }
6081
6082 static int hclge_get_all_rules(struct hnae3_handle *handle,
6083                                struct ethtool_rxnfc *cmd, u32 *rule_locs)
6084 {
6085         struct hclge_vport *vport = hclge_get_vport(handle);
6086         struct hclge_dev *hdev = vport->back;
6087         struct hclge_fd_rule *rule;
6088         struct hlist_node *node2;
6089         int cnt = 0;
6090
6091         if (!hnae3_dev_fd_supported(hdev))
6092                 return -EOPNOTSUPP;
6093
6094         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6095
6096         spin_lock_bh(&hdev->fd_rule_lock);
6097         hlist_for_each_entry_safe(rule, node2,
6098                                   &hdev->fd_rule_list, rule_node) {
6099                 if (cnt == cmd->rule_cnt) {
6100                         spin_unlock_bh(&hdev->fd_rule_lock);
6101                         return -EMSGSIZE;
6102                 }
6103
6104                 rule_locs[cnt] = rule->location;
6105                 cnt++;
6106         }
6107
6108         spin_unlock_bh(&hdev->fd_rule_lock);
6109
6110         cmd->rule_cnt = cnt;
6111
6112         return 0;
6113 }
6114
6115 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
6116                                      struct hclge_fd_rule_tuples *tuples)
6117 {
6118 #define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
6119 #define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
6120
6121         tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
6122         tuples->ip_proto = fkeys->basic.ip_proto;
6123         tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
6124
6125         if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
6126                 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
6127                 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
6128         } else {
6129                 int i;
6130
6131                 for (i = 0; i < IPV6_SIZE; i++) {
6132                         tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
6133                         tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
6134                 }
6135         }
6136 }
6137
6138 /* traverse all rules, check whether an existed rule has the same tuples */
6139 static struct hclge_fd_rule *
6140 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
6141                           const struct hclge_fd_rule_tuples *tuples)
6142 {
6143         struct hclge_fd_rule *rule = NULL;
6144         struct hlist_node *node;
6145
6146         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6147                 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
6148                         return rule;
6149         }
6150
6151         return NULL;
6152 }
6153
6154 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
6155                                      struct hclge_fd_rule *rule)
6156 {
6157         rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6158                              BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
6159                              BIT(INNER_SRC_PORT);
6160         rule->action = 0;
6161         rule->vf_id = 0;
6162         rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
6163         if (tuples->ether_proto == ETH_P_IP) {
6164                 if (tuples->ip_proto == IPPROTO_TCP)
6165                         rule->flow_type = TCP_V4_FLOW;
6166                 else
6167                         rule->flow_type = UDP_V4_FLOW;
6168         } else {
6169                 if (tuples->ip_proto == IPPROTO_TCP)
6170                         rule->flow_type = TCP_V6_FLOW;
6171                 else
6172                         rule->flow_type = UDP_V6_FLOW;
6173         }
6174         memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
6175         memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
6176 }
6177
6178 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
6179                                       u16 flow_id, struct flow_keys *fkeys)
6180 {
6181         struct hclge_vport *vport = hclge_get_vport(handle);
6182         struct hclge_fd_rule_tuples new_tuples;
6183         struct hclge_dev *hdev = vport->back;
6184         struct hclge_fd_rule *rule;
6185         u16 tmp_queue_id;
6186         u16 bit_id;
6187         int ret;
6188
6189         if (!hnae3_dev_fd_supported(hdev))
6190                 return -EOPNOTSUPP;
6191
6192         memset(&new_tuples, 0, sizeof(new_tuples));
6193         hclge_fd_get_flow_tuples(fkeys, &new_tuples);
6194
6195         spin_lock_bh(&hdev->fd_rule_lock);
6196
6197         /* when there is already fd rule existed add by user,
6198          * arfs should not work
6199          */
6200         if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
6201                 spin_unlock_bh(&hdev->fd_rule_lock);
6202
6203                 return -EOPNOTSUPP;
6204         }
6205
6206         /* check is there flow director filter existed for this flow,
6207          * if not, create a new filter for it;
6208          * if filter exist with different queue id, modify the filter;
6209          * if filter exist with same queue id, do nothing
6210          */
6211         rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
6212         if (!rule) {
6213                 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
6214                 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6215                         spin_unlock_bh(&hdev->fd_rule_lock);
6216
6217                         return -ENOSPC;
6218                 }
6219
6220                 rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
6221                 if (!rule) {
6222                         spin_unlock_bh(&hdev->fd_rule_lock);
6223
6224                         return -ENOMEM;
6225                 }
6226
6227                 set_bit(bit_id, hdev->fd_bmap);
6228                 rule->location = bit_id;
6229                 rule->flow_id = flow_id;
6230                 rule->queue_id = queue_id;
6231                 hclge_fd_build_arfs_rule(&new_tuples, rule);
6232                 ret = hclge_fd_config_rule(hdev, rule);
6233
6234                 spin_unlock_bh(&hdev->fd_rule_lock);
6235
6236                 if (ret)
6237                         return ret;
6238
6239                 return rule->location;
6240         }
6241
6242         spin_unlock_bh(&hdev->fd_rule_lock);
6243
6244         if (rule->queue_id == queue_id)
6245                 return rule->location;
6246
6247         tmp_queue_id = rule->queue_id;
6248         rule->queue_id = queue_id;
6249         ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6250         if (ret) {
6251                 rule->queue_id = tmp_queue_id;
6252                 return ret;
6253         }
6254
6255         return rule->location;
6256 }
6257
6258 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
6259 {
6260 #ifdef CONFIG_RFS_ACCEL
6261         struct hnae3_handle *handle = &hdev->vport[0].nic;
6262         struct hclge_fd_rule *rule;
6263         struct hlist_node *node;
6264         HLIST_HEAD(del_list);
6265
6266         spin_lock_bh(&hdev->fd_rule_lock);
6267         if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
6268                 spin_unlock_bh(&hdev->fd_rule_lock);
6269                 return;
6270         }
6271         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6272                 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
6273                                         rule->flow_id, rule->location)) {
6274                         hlist_del_init(&rule->rule_node);
6275                         hlist_add_head(&rule->rule_node, &del_list);
6276                         hdev->hclge_fd_rule_num--;
6277                         clear_bit(rule->location, hdev->fd_bmap);
6278                 }
6279         }
6280         spin_unlock_bh(&hdev->fd_rule_lock);
6281
6282         hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
6283                 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
6284                                      rule->location, NULL, false);
6285                 kfree(rule);
6286         }
6287 #endif
6288 }
6289
6290 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
6291 {
6292 #ifdef CONFIG_RFS_ACCEL
6293         struct hclge_vport *vport = hclge_get_vport(handle);
6294         struct hclge_dev *hdev = vport->back;
6295
6296         if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
6297                 hclge_del_all_fd_entries(handle, true);
6298 #endif
6299 }
6300
6301 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
6302 {
6303         struct hclge_vport *vport = hclge_get_vport(handle);
6304         struct hclge_dev *hdev = vport->back;
6305
6306         return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
6307                hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
6308 }
6309
6310 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
6311 {
6312         struct hclge_vport *vport = hclge_get_vport(handle);
6313         struct hclge_dev *hdev = vport->back;
6314
6315         return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
6316 }
6317
6318 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
6319 {
6320         struct hclge_vport *vport = hclge_get_vport(handle);
6321         struct hclge_dev *hdev = vport->back;
6322
6323         return hdev->rst_stats.hw_reset_done_cnt;
6324 }
6325
6326 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
6327 {
6328         struct hclge_vport *vport = hclge_get_vport(handle);
6329         struct hclge_dev *hdev = vport->back;
6330         bool clear;
6331
6332         hdev->fd_en = enable;
6333         clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
6334         if (!enable)
6335                 hclge_del_all_fd_entries(handle, clear);
6336         else
6337                 hclge_restore_fd_entries(handle);
6338 }
6339
6340 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
6341 {
6342         struct hclge_desc desc;
6343         struct hclge_config_mac_mode_cmd *req =
6344                 (struct hclge_config_mac_mode_cmd *)desc.data;
6345         u32 loop_en = 0;
6346         int ret;
6347
6348         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
6349
6350         if (enable) {
6351                 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
6352                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
6353                 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
6354                 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
6355                 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
6356                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
6357                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
6358                 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
6359                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
6360                 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
6361         }
6362
6363         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6364
6365         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6366         if (ret)
6367                 dev_err(&hdev->pdev->dev,
6368                         "mac enable fail, ret =%d.\n", ret);
6369 }
6370
6371 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
6372                                      u8 switch_param, u8 param_mask)
6373 {
6374         struct hclge_mac_vlan_switch_cmd *req;
6375         struct hclge_desc desc;
6376         u32 func_id;
6377         int ret;
6378
6379         func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
6380         req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
6381
6382         /* read current config parameter */
6383         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
6384                                    true);
6385         req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
6386         req->func_id = cpu_to_le32(func_id);
6387
6388         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6389         if (ret) {
6390                 dev_err(&hdev->pdev->dev,
6391                         "read mac vlan switch parameter fail, ret = %d\n", ret);
6392                 return ret;
6393         }
6394
6395         /* modify and write new config parameter */
6396         hclge_cmd_reuse_desc(&desc, false);
6397         req->switch_param = (req->switch_param & param_mask) | switch_param;
6398         req->param_mask = param_mask;
6399
6400         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6401         if (ret)
6402                 dev_err(&hdev->pdev->dev,
6403                         "set mac vlan switch parameter fail, ret = %d\n", ret);
6404         return ret;
6405 }
6406
6407 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
6408                                        int link_ret)
6409 {
6410 #define HCLGE_PHY_LINK_STATUS_NUM  200
6411
6412         struct phy_device *phydev = hdev->hw.mac.phydev;
6413         int i = 0;
6414         int ret;
6415
6416         do {
6417                 ret = phy_read_status(phydev);
6418                 if (ret) {
6419                         dev_err(&hdev->pdev->dev,
6420                                 "phy update link status fail, ret = %d\n", ret);
6421                         return;
6422                 }
6423
6424                 if (phydev->link == link_ret)
6425                         break;
6426
6427                 msleep(HCLGE_LINK_STATUS_MS);
6428         } while (++i < HCLGE_PHY_LINK_STATUS_NUM);
6429 }
6430
6431 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
6432 {
6433 #define HCLGE_MAC_LINK_STATUS_NUM  100
6434
6435         int i = 0;
6436         int ret;
6437
6438         do {
6439                 ret = hclge_get_mac_link_status(hdev);
6440                 if (ret < 0)
6441                         return ret;
6442                 else if (ret == link_ret)
6443                         return 0;
6444
6445                 msleep(HCLGE_LINK_STATUS_MS);
6446         } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
6447         return -EBUSY;
6448 }
6449
6450 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
6451                                           bool is_phy)
6452 {
6453 #define HCLGE_LINK_STATUS_DOWN 0
6454 #define HCLGE_LINK_STATUS_UP   1
6455
6456         int link_ret;
6457
6458         link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
6459
6460         if (is_phy)
6461                 hclge_phy_link_status_wait(hdev, link_ret);
6462
6463         return hclge_mac_link_status_wait(hdev, link_ret);
6464 }
6465
6466 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
6467 {
6468         struct hclge_config_mac_mode_cmd *req;
6469         struct hclge_desc desc;
6470         u32 loop_en;
6471         int ret;
6472
6473         req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
6474         /* 1 Read out the MAC mode config at first */
6475         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
6476         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6477         if (ret) {
6478                 dev_err(&hdev->pdev->dev,
6479                         "mac loopback get fail, ret =%d.\n", ret);
6480                 return ret;
6481         }
6482
6483         /* 2 Then setup the loopback flag */
6484         loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
6485         hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
6486         hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
6487         hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
6488
6489         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6490
6491         /* 3 Config mac work mode with loopback flag
6492          * and its original configure parameters
6493          */
6494         hclge_cmd_reuse_desc(&desc, false);
6495         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6496         if (ret)
6497                 dev_err(&hdev->pdev->dev,
6498                         "mac loopback set fail, ret =%d.\n", ret);
6499         return ret;
6500 }
6501
6502 static int hclge_cfg_serdes_loopback(struct hclge_dev *hdev, bool en,
6503                                      enum hnae3_loop loop_mode)
6504 {
6505 #define HCLGE_SERDES_RETRY_MS   10
6506 #define HCLGE_SERDES_RETRY_NUM  100
6507
6508         struct hclge_serdes_lb_cmd *req;
6509         struct hclge_desc desc;
6510         int ret, i = 0;
6511         u8 loop_mode_b;
6512
6513         req = (struct hclge_serdes_lb_cmd *)desc.data;
6514         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
6515
6516         switch (loop_mode) {
6517         case HNAE3_LOOP_SERIAL_SERDES:
6518                 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
6519                 break;
6520         case HNAE3_LOOP_PARALLEL_SERDES:
6521                 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
6522                 break;
6523         default:
6524                 dev_err(&hdev->pdev->dev,
6525                         "unsupported serdes loopback mode %d\n", loop_mode);
6526                 return -ENOTSUPP;
6527         }
6528
6529         if (en) {
6530                 req->enable = loop_mode_b;
6531                 req->mask = loop_mode_b;
6532         } else {
6533                 req->mask = loop_mode_b;
6534         }
6535
6536         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6537         if (ret) {
6538                 dev_err(&hdev->pdev->dev,
6539                         "serdes loopback set fail, ret = %d\n", ret);
6540                 return ret;
6541         }
6542
6543         do {
6544                 msleep(HCLGE_SERDES_RETRY_MS);
6545                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
6546                                            true);
6547                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6548                 if (ret) {
6549                         dev_err(&hdev->pdev->dev,
6550                                 "serdes loopback get, ret = %d\n", ret);
6551                         return ret;
6552                 }
6553         } while (++i < HCLGE_SERDES_RETRY_NUM &&
6554                  !(req->result & HCLGE_CMD_SERDES_DONE_B));
6555
6556         if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
6557                 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
6558                 return -EBUSY;
6559         } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
6560                 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
6561                 return -EIO;
6562         }
6563         return ret;
6564 }
6565
6566 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
6567                                      enum hnae3_loop loop_mode)
6568 {
6569         int ret;
6570
6571         ret = hclge_cfg_serdes_loopback(hdev, en, loop_mode);
6572         if (ret)
6573                 return ret;
6574
6575         hclge_cfg_mac_mode(hdev, en);
6576
6577         ret = hclge_mac_phy_link_status_wait(hdev, en, false);
6578         if (ret)
6579                 dev_err(&hdev->pdev->dev,
6580                         "serdes loopback config mac mode timeout\n");
6581
6582         return ret;
6583 }
6584
6585 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
6586                                      struct phy_device *phydev)
6587 {
6588         int ret;
6589
6590         if (!phydev->suspended) {
6591                 ret = phy_suspend(phydev);
6592                 if (ret)
6593                         return ret;
6594         }
6595
6596         ret = phy_resume(phydev);
6597         if (ret)
6598                 return ret;
6599
6600         return phy_loopback(phydev, true);
6601 }
6602
6603 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
6604                                       struct phy_device *phydev)
6605 {
6606         int ret;
6607
6608         ret = phy_loopback(phydev, false);
6609         if (ret)
6610                 return ret;
6611
6612         return phy_suspend(phydev);
6613 }
6614
6615 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
6616 {
6617         struct phy_device *phydev = hdev->hw.mac.phydev;
6618         int ret;
6619
6620         if (!phydev)
6621                 return -ENOTSUPP;
6622
6623         if (en)
6624                 ret = hclge_enable_phy_loopback(hdev, phydev);
6625         else
6626                 ret = hclge_disable_phy_loopback(hdev, phydev);
6627         if (ret) {
6628                 dev_err(&hdev->pdev->dev,
6629                         "set phy loopback fail, ret = %d\n", ret);
6630                 return ret;
6631         }
6632
6633         hclge_cfg_mac_mode(hdev, en);
6634
6635         ret = hclge_mac_phy_link_status_wait(hdev, en, true);
6636         if (ret)
6637                 dev_err(&hdev->pdev->dev,
6638                         "phy loopback config mac mode timeout\n");
6639
6640         return ret;
6641 }
6642
6643 static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
6644                             int stream_id, bool enable)
6645 {
6646         struct hclge_desc desc;
6647         struct hclge_cfg_com_tqp_queue_cmd *req =
6648                 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
6649         int ret;
6650
6651         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
6652         req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
6653         req->stream_id = cpu_to_le16(stream_id);
6654         if (enable)
6655                 req->enable |= 1U << HCLGE_TQP_ENABLE_B;
6656
6657         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6658         if (ret)
6659                 dev_err(&hdev->pdev->dev,
6660                         "Tqp enable fail, status =%d.\n", ret);
6661         return ret;
6662 }
6663
6664 static int hclge_set_loopback(struct hnae3_handle *handle,
6665                               enum hnae3_loop loop_mode, bool en)
6666 {
6667         struct hclge_vport *vport = hclge_get_vport(handle);
6668         struct hnae3_knic_private_info *kinfo;
6669         struct hclge_dev *hdev = vport->back;
6670         int i, ret;
6671
6672         /* Loopback can be enabled in three places: SSU, MAC, and serdes. By
6673          * default, SSU loopback is enabled, so if the SMAC and the DMAC are
6674          * the same, the packets are looped back in the SSU. If SSU loopback
6675          * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
6676          */
6677         if (hdev->pdev->revision >= 0x21) {
6678                 u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
6679
6680                 ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
6681                                                 HCLGE_SWITCH_ALW_LPBK_MASK);
6682                 if (ret)
6683                         return ret;
6684         }
6685
6686         switch (loop_mode) {
6687         case HNAE3_LOOP_APP:
6688                 ret = hclge_set_app_loopback(hdev, en);
6689                 break;
6690         case HNAE3_LOOP_SERIAL_SERDES:
6691         case HNAE3_LOOP_PARALLEL_SERDES:
6692                 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
6693                 break;
6694         case HNAE3_LOOP_PHY:
6695                 ret = hclge_set_phy_loopback(hdev, en);
6696                 break;
6697         default:
6698                 ret = -ENOTSUPP;
6699                 dev_err(&hdev->pdev->dev,
6700                         "loop_mode %d is not supported\n", loop_mode);
6701                 break;
6702         }
6703
6704         if (ret)
6705                 return ret;
6706
6707         kinfo = &vport->nic.kinfo;
6708         for (i = 0; i < kinfo->num_tqps; i++) {
6709                 ret = hclge_tqp_enable(hdev, i, 0, en);
6710                 if (ret)
6711                         return ret;
6712         }
6713
6714         return 0;
6715 }
6716
6717 static int hclge_set_default_loopback(struct hclge_dev *hdev)
6718 {
6719         int ret;
6720
6721         ret = hclge_set_app_loopback(hdev, false);
6722         if (ret)
6723                 return ret;
6724
6725         ret = hclge_cfg_serdes_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
6726         if (ret)
6727                 return ret;
6728
6729         return hclge_cfg_serdes_loopback(hdev, false,
6730                                          HNAE3_LOOP_PARALLEL_SERDES);
6731 }
6732
6733 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
6734 {
6735         struct hclge_vport *vport = hclge_get_vport(handle);
6736         struct hnae3_knic_private_info *kinfo;
6737         struct hnae3_queue *queue;
6738         struct hclge_tqp *tqp;
6739         int i;
6740
6741         kinfo = &vport->nic.kinfo;
6742         for (i = 0; i < kinfo->num_tqps; i++) {
6743                 queue = handle->kinfo.tqp[i];
6744                 tqp = container_of(queue, struct hclge_tqp, q);
6745                 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
6746         }
6747 }
6748
6749 static void hclge_flush_link_update(struct hclge_dev *hdev)
6750 {
6751 #define HCLGE_FLUSH_LINK_TIMEOUT        100000
6752
6753         unsigned long last = hdev->serv_processed_cnt;
6754         int i = 0;
6755
6756         while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
6757                i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
6758                last == hdev->serv_processed_cnt)
6759                 usleep_range(1, 1);
6760 }
6761
6762 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
6763 {
6764         struct hclge_vport *vport = hclge_get_vport(handle);
6765         struct hclge_dev *hdev = vport->back;
6766
6767         if (enable) {
6768                 hclge_task_schedule(hdev, round_jiffies_relative(HZ));
6769         } else {
6770                 /* Set the DOWN flag here to disable link updating */
6771                 set_bit(HCLGE_STATE_DOWN, &hdev->state);
6772
6773                 /* flush memory to make sure DOWN is seen by service task */
6774                 smp_mb__before_atomic();
6775                 hclge_flush_link_update(hdev);
6776         }
6777 }
6778
6779 static int hclge_ae_start(struct hnae3_handle *handle)
6780 {
6781         struct hclge_vport *vport = hclge_get_vport(handle);
6782         struct hclge_dev *hdev = vport->back;
6783
6784         /* mac enable */
6785         hclge_cfg_mac_mode(hdev, true);
6786         clear_bit(HCLGE_STATE_DOWN, &hdev->state);
6787         hdev->hw.mac.link = 0;
6788
6789         /* reset tqp stats */
6790         hclge_reset_tqp_stats(handle);
6791
6792         hclge_mac_start_phy(hdev);
6793
6794         return 0;
6795 }
6796
6797 static void hclge_ae_stop(struct hnae3_handle *handle)
6798 {
6799         struct hclge_vport *vport = hclge_get_vport(handle);
6800         struct hclge_dev *hdev = vport->back;
6801         int i;
6802
6803         set_bit(HCLGE_STATE_DOWN, &hdev->state);
6804
6805         hclge_clear_arfs_rules(handle);
6806
6807         /* If it is not PF reset, the firmware will disable the MAC,
6808          * so it only need to stop phy here.
6809          */
6810         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
6811             hdev->reset_type != HNAE3_FUNC_RESET) {
6812                 hclge_mac_stop_phy(hdev);
6813                 hclge_update_link_status(hdev);
6814                 return;
6815         }
6816
6817         for (i = 0; i < handle->kinfo.num_tqps; i++)
6818                 hclge_reset_tqp(handle, i);
6819
6820         hclge_config_mac_tnl_int(hdev, false);
6821
6822         /* Mac disable */
6823         hclge_cfg_mac_mode(hdev, false);
6824
6825         hclge_mac_stop_phy(hdev);
6826
6827         /* reset tqp stats */
6828         hclge_reset_tqp_stats(handle);
6829         hclge_update_link_status(hdev);
6830 }
6831
6832 int hclge_vport_start(struct hclge_vport *vport)
6833 {
6834         set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6835         vport->last_active_jiffies = jiffies;
6836         return 0;
6837 }
6838
6839 void hclge_vport_stop(struct hclge_vport *vport)
6840 {
6841         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6842 }
6843
6844 static int hclge_client_start(struct hnae3_handle *handle)
6845 {
6846         struct hclge_vport *vport = hclge_get_vport(handle);
6847
6848         return hclge_vport_start(vport);
6849 }
6850
6851 static void hclge_client_stop(struct hnae3_handle *handle)
6852 {
6853         struct hclge_vport *vport = hclge_get_vport(handle);
6854
6855         hclge_vport_stop(vport);
6856 }
6857
6858 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
6859                                          u16 cmdq_resp, u8  resp_code,
6860                                          enum hclge_mac_vlan_tbl_opcode op)
6861 {
6862         struct hclge_dev *hdev = vport->back;
6863
6864         if (cmdq_resp) {
6865                 dev_err(&hdev->pdev->dev,
6866                         "cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
6867                         cmdq_resp);
6868                 return -EIO;
6869         }
6870
6871         if (op == HCLGE_MAC_VLAN_ADD) {
6872                 if ((!resp_code) || (resp_code == 1)) {
6873                         return 0;
6874                 } else if (resp_code == HCLGE_ADD_UC_OVERFLOW) {
6875                         dev_err(&hdev->pdev->dev,
6876                                 "add mac addr failed for uc_overflow.\n");
6877                         return -ENOSPC;
6878                 } else if (resp_code == HCLGE_ADD_MC_OVERFLOW) {
6879                         dev_err(&hdev->pdev->dev,
6880                                 "add mac addr failed for mc_overflow.\n");
6881                         return -ENOSPC;
6882                 }
6883
6884                 dev_err(&hdev->pdev->dev,
6885                         "add mac addr failed for undefined, code=%u.\n",
6886                         resp_code);
6887                 return -EIO;
6888         } else if (op == HCLGE_MAC_VLAN_REMOVE) {
6889                 if (!resp_code) {
6890                         return 0;
6891                 } else if (resp_code == 1) {
6892                         dev_dbg(&hdev->pdev->dev,
6893                                 "remove mac addr failed for miss.\n");
6894                         return -ENOENT;
6895                 }
6896
6897                 dev_err(&hdev->pdev->dev,
6898                         "remove mac addr failed for undefined, code=%u.\n",
6899                         resp_code);
6900                 return -EIO;
6901         } else if (op == HCLGE_MAC_VLAN_LKUP) {
6902                 if (!resp_code) {
6903                         return 0;
6904                 } else if (resp_code == 1) {
6905                         dev_dbg(&hdev->pdev->dev,
6906                                 "lookup mac addr failed for miss.\n");
6907                         return -ENOENT;
6908                 }
6909
6910                 dev_err(&hdev->pdev->dev,
6911                         "lookup mac addr failed for undefined, code=%u.\n",
6912                         resp_code);
6913                 return -EIO;
6914         }
6915
6916         dev_err(&hdev->pdev->dev,
6917                 "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
6918
6919         return -EINVAL;
6920 }
6921
6922 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
6923 {
6924 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
6925
6926         unsigned int word_num;
6927         unsigned int bit_num;
6928
6929         if (vfid > 255 || vfid < 0)
6930                 return -EIO;
6931
6932         if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
6933                 word_num = vfid / 32;
6934                 bit_num  = vfid % 32;
6935                 if (clr)
6936                         desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6937                 else
6938                         desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
6939         } else {
6940                 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
6941                 bit_num  = vfid % 32;
6942                 if (clr)
6943                         desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6944                 else
6945                         desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
6946         }
6947
6948         return 0;
6949 }
6950
6951 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
6952 {
6953 #define HCLGE_DESC_NUMBER 3
6954 #define HCLGE_FUNC_NUMBER_PER_DESC 6
6955         int i, j;
6956
6957         for (i = 1; i < HCLGE_DESC_NUMBER; i++)
6958                 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
6959                         if (desc[i].data[j])
6960                                 return false;
6961
6962         return true;
6963 }
6964
6965 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
6966                                    const u8 *addr, bool is_mc)
6967 {
6968         const unsigned char *mac_addr = addr;
6969         u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
6970                        (mac_addr[0]) | (mac_addr[1] << 8);
6971         u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
6972
6973         hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6974         if (is_mc) {
6975                 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
6976                 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6977         }
6978
6979         new_req->mac_addr_hi32 = cpu_to_le32(high_val);
6980         new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
6981 }
6982
6983 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
6984                                      struct hclge_mac_vlan_tbl_entry_cmd *req)
6985 {
6986         struct hclge_dev *hdev = vport->back;
6987         struct hclge_desc desc;
6988         u8 resp_code;
6989         u16 retval;
6990         int ret;
6991
6992         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
6993
6994         memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6995
6996         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6997         if (ret) {
6998                 dev_err(&hdev->pdev->dev,
6999                         "del mac addr failed for cmd_send, ret =%d.\n",
7000                         ret);
7001                 return ret;
7002         }
7003         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7004         retval = le16_to_cpu(desc.retval);
7005
7006         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7007                                              HCLGE_MAC_VLAN_REMOVE);
7008 }
7009
7010 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
7011                                      struct hclge_mac_vlan_tbl_entry_cmd *req,
7012                                      struct hclge_desc *desc,
7013                                      bool is_mc)
7014 {
7015         struct hclge_dev *hdev = vport->back;
7016         u8 resp_code;
7017         u16 retval;
7018         int ret;
7019
7020         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
7021         if (is_mc) {
7022                 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7023                 memcpy(desc[0].data,
7024                        req,
7025                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7026                 hclge_cmd_setup_basic_desc(&desc[1],
7027                                            HCLGE_OPC_MAC_VLAN_ADD,
7028                                            true);
7029                 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7030                 hclge_cmd_setup_basic_desc(&desc[2],
7031                                            HCLGE_OPC_MAC_VLAN_ADD,
7032                                            true);
7033                 ret = hclge_cmd_send(&hdev->hw, desc, 3);
7034         } else {
7035                 memcpy(desc[0].data,
7036                        req,
7037                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7038                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
7039         }
7040         if (ret) {
7041                 dev_err(&hdev->pdev->dev,
7042                         "lookup mac addr failed for cmd_send, ret =%d.\n",
7043                         ret);
7044                 return ret;
7045         }
7046         resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
7047         retval = le16_to_cpu(desc[0].retval);
7048
7049         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7050                                              HCLGE_MAC_VLAN_LKUP);
7051 }
7052
7053 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
7054                                   struct hclge_mac_vlan_tbl_entry_cmd *req,
7055                                   struct hclge_desc *mc_desc)
7056 {
7057         struct hclge_dev *hdev = vport->back;
7058         int cfg_status;
7059         u8 resp_code;
7060         u16 retval;
7061         int ret;
7062
7063         if (!mc_desc) {
7064                 struct hclge_desc desc;
7065
7066                 hclge_cmd_setup_basic_desc(&desc,
7067                                            HCLGE_OPC_MAC_VLAN_ADD,
7068                                            false);
7069                 memcpy(desc.data, req,
7070                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7071                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7072                 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7073                 retval = le16_to_cpu(desc.retval);
7074
7075                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7076                                                            resp_code,
7077                                                            HCLGE_MAC_VLAN_ADD);
7078         } else {
7079                 hclge_cmd_reuse_desc(&mc_desc[0], false);
7080                 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7081                 hclge_cmd_reuse_desc(&mc_desc[1], false);
7082                 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7083                 hclge_cmd_reuse_desc(&mc_desc[2], false);
7084                 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
7085                 memcpy(mc_desc[0].data, req,
7086                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7087                 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
7088                 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
7089                 retval = le16_to_cpu(mc_desc[0].retval);
7090
7091                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7092                                                            resp_code,
7093                                                            HCLGE_MAC_VLAN_ADD);
7094         }
7095
7096         if (ret) {
7097                 dev_err(&hdev->pdev->dev,
7098                         "add mac addr failed for cmd_send, ret =%d.\n",
7099                         ret);
7100                 return ret;
7101         }
7102
7103         return cfg_status;
7104 }
7105
7106 static int hclge_init_umv_space(struct hclge_dev *hdev)
7107 {
7108         u16 allocated_size = 0;
7109         int ret;
7110
7111         ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
7112                                   true);
7113         if (ret)
7114                 return ret;
7115
7116         if (allocated_size < hdev->wanted_umv_size)
7117                 dev_warn(&hdev->pdev->dev,
7118                          "Alloc umv space failed, want %u, get %u\n",
7119                          hdev->wanted_umv_size, allocated_size);
7120
7121         mutex_init(&hdev->umv_mutex);
7122         hdev->max_umv_size = allocated_size;
7123         /* divide max_umv_size by (hdev->num_req_vfs + 2), in order to
7124          * preserve some unicast mac vlan table entries shared by pf
7125          * and its vfs.
7126          */
7127         hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
7128         hdev->share_umv_size = hdev->priv_umv_size +
7129                         hdev->max_umv_size % (hdev->num_req_vfs + 2);
7130
7131         return 0;
7132 }
7133
7134 static int hclge_uninit_umv_space(struct hclge_dev *hdev)
7135 {
7136         int ret;
7137
7138         if (hdev->max_umv_size > 0) {
7139                 ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
7140                                           false);
7141                 if (ret)
7142                         return ret;
7143                 hdev->max_umv_size = 0;
7144         }
7145         mutex_destroy(&hdev->umv_mutex);
7146
7147         return 0;
7148 }
7149
7150 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
7151                                u16 *allocated_size, bool is_alloc)
7152 {
7153         struct hclge_umv_spc_alc_cmd *req;
7154         struct hclge_desc desc;
7155         int ret;
7156
7157         req = (struct hclge_umv_spc_alc_cmd *)desc.data;
7158         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
7159         if (!is_alloc)
7160                 hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, 1);
7161
7162         req->space_size = cpu_to_le32(space_size);
7163
7164         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7165         if (ret) {
7166                 dev_err(&hdev->pdev->dev,
7167                         "%s umv space failed for cmd_send, ret =%d\n",
7168                         is_alloc ? "allocate" : "free", ret);
7169                 return ret;
7170         }
7171
7172         if (is_alloc && allocated_size)
7173                 *allocated_size = le32_to_cpu(desc.data[1]);
7174
7175         return 0;
7176 }
7177
7178 static void hclge_reset_umv_space(struct hclge_dev *hdev)
7179 {
7180         struct hclge_vport *vport;
7181         int i;
7182
7183         for (i = 0; i < hdev->num_alloc_vport; i++) {
7184                 vport = &hdev->vport[i];
7185                 vport->used_umv_num = 0;
7186         }
7187
7188         mutex_lock(&hdev->umv_mutex);
7189         hdev->share_umv_size = hdev->priv_umv_size +
7190                         hdev->max_umv_size % (hdev->num_req_vfs + 2);
7191         mutex_unlock(&hdev->umv_mutex);
7192 }
7193
7194 static bool hclge_is_umv_space_full(struct hclge_vport *vport)
7195 {
7196         struct hclge_dev *hdev = vport->back;
7197         bool is_full;
7198
7199         mutex_lock(&hdev->umv_mutex);
7200         is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
7201                    hdev->share_umv_size == 0);
7202         mutex_unlock(&hdev->umv_mutex);
7203
7204         return is_full;
7205 }
7206
7207 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
7208 {
7209         struct hclge_dev *hdev = vport->back;
7210
7211         mutex_lock(&hdev->umv_mutex);
7212         if (is_free) {
7213                 if (vport->used_umv_num > hdev->priv_umv_size)
7214                         hdev->share_umv_size++;
7215
7216                 if (vport->used_umv_num > 0)
7217                         vport->used_umv_num--;
7218         } else {
7219                 if (vport->used_umv_num >= hdev->priv_umv_size &&
7220                     hdev->share_umv_size > 0)
7221                         hdev->share_umv_size--;
7222                 vport->used_umv_num++;
7223         }
7224         mutex_unlock(&hdev->umv_mutex);
7225 }
7226
7227 static int hclge_add_uc_addr(struct hnae3_handle *handle,
7228                              const unsigned char *addr)
7229 {
7230         struct hclge_vport *vport = hclge_get_vport(handle);
7231
7232         return hclge_add_uc_addr_common(vport, addr);
7233 }
7234
7235 int hclge_add_uc_addr_common(struct hclge_vport *vport,
7236                              const unsigned char *addr)
7237 {
7238         struct hclge_dev *hdev = vport->back;
7239         struct hclge_mac_vlan_tbl_entry_cmd req;
7240         struct hclge_desc desc;
7241         u16 egress_port = 0;
7242         int ret;
7243
7244         /* mac addr check */
7245         if (is_zero_ether_addr(addr) ||
7246             is_broadcast_ether_addr(addr) ||
7247             is_multicast_ether_addr(addr)) {
7248                 dev_err(&hdev->pdev->dev,
7249                         "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
7250                          addr, is_zero_ether_addr(addr),
7251                          is_broadcast_ether_addr(addr),
7252                          is_multicast_ether_addr(addr));
7253                 return -EINVAL;
7254         }
7255
7256         memset(&req, 0, sizeof(req));
7257
7258         hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
7259                         HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
7260
7261         req.egress_port = cpu_to_le16(egress_port);
7262
7263         hclge_prepare_mac_addr(&req, addr, false);
7264
7265         /* Lookup the mac address in the mac_vlan table, and add
7266          * it if the entry is inexistent. Repeated unicast entry
7267          * is not allowed in the mac vlan table.
7268          */
7269         ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
7270         if (ret == -ENOENT) {
7271                 if (!hclge_is_umv_space_full(vport)) {
7272                         ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
7273                         if (!ret)
7274                                 hclge_update_umv_space(vport, false);
7275                         return ret;
7276                 }
7277
7278                 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
7279                         hdev->priv_umv_size);
7280
7281                 return -ENOSPC;
7282         }
7283
7284         /* check if we just hit the duplicate */
7285         if (!ret) {
7286                 dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n",
7287                          vport->vport_id, addr);
7288                 return 0;
7289         }
7290
7291         dev_err(&hdev->pdev->dev,
7292                 "PF failed to add unicast entry(%pM) in the MAC table\n",
7293                 addr);
7294
7295         return ret;
7296 }
7297
7298 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
7299                             const unsigned char *addr)
7300 {
7301         struct hclge_vport *vport = hclge_get_vport(handle);
7302
7303         return hclge_rm_uc_addr_common(vport, addr);
7304 }
7305
7306 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
7307                             const unsigned char *addr)
7308 {
7309         struct hclge_dev *hdev = vport->back;
7310         struct hclge_mac_vlan_tbl_entry_cmd req;
7311         int ret;
7312
7313         /* mac addr check */
7314         if (is_zero_ether_addr(addr) ||
7315             is_broadcast_ether_addr(addr) ||
7316             is_multicast_ether_addr(addr)) {
7317                 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
7318                         addr);
7319                 return -EINVAL;
7320         }
7321
7322         memset(&req, 0, sizeof(req));
7323         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7324         hclge_prepare_mac_addr(&req, addr, false);
7325         ret = hclge_remove_mac_vlan_tbl(vport, &req);
7326         if (!ret)
7327                 hclge_update_umv_space(vport, true);
7328
7329         return ret;
7330 }
7331
7332 static int hclge_add_mc_addr(struct hnae3_handle *handle,
7333                              const unsigned char *addr)
7334 {
7335         struct hclge_vport *vport = hclge_get_vport(handle);
7336
7337         return hclge_add_mc_addr_common(vport, addr);
7338 }
7339
7340 int hclge_add_mc_addr_common(struct hclge_vport *vport,
7341                              const unsigned char *addr)
7342 {
7343         struct hclge_dev *hdev = vport->back;
7344         struct hclge_mac_vlan_tbl_entry_cmd req;
7345         struct hclge_desc desc[3];
7346         int status;
7347
7348         /* mac addr check */
7349         if (!is_multicast_ether_addr(addr)) {
7350                 dev_err(&hdev->pdev->dev,
7351                         "Add mc mac err! invalid mac:%pM.\n",
7352                          addr);
7353                 return -EINVAL;
7354         }
7355         memset(&req, 0, sizeof(req));
7356         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7357         hclge_prepare_mac_addr(&req, addr, true);
7358         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7359         if (status) {
7360                 /* This mac addr do not exist, add new entry for it */
7361                 memset(desc[0].data, 0, sizeof(desc[0].data));
7362                 memset(desc[1].data, 0, sizeof(desc[0].data));
7363                 memset(desc[2].data, 0, sizeof(desc[0].data));
7364         }
7365         status = hclge_update_desc_vfid(desc, vport->vport_id, false);
7366         if (status)
7367                 return status;
7368         status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7369
7370         if (status == -ENOSPC)
7371                 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
7372
7373         return status;
7374 }
7375
7376 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
7377                             const unsigned char *addr)
7378 {
7379         struct hclge_vport *vport = hclge_get_vport(handle);
7380
7381         return hclge_rm_mc_addr_common(vport, addr);
7382 }
7383
7384 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
7385                             const unsigned char *addr)
7386 {
7387         struct hclge_dev *hdev = vport->back;
7388         struct hclge_mac_vlan_tbl_entry_cmd req;
7389         enum hclge_cmd_status status;
7390         struct hclge_desc desc[3];
7391
7392         /* mac addr check */
7393         if (!is_multicast_ether_addr(addr)) {
7394                 dev_dbg(&hdev->pdev->dev,
7395                         "Remove mc mac err! invalid mac:%pM.\n",
7396                          addr);
7397                 return -EINVAL;
7398         }
7399
7400         memset(&req, 0, sizeof(req));
7401         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7402         hclge_prepare_mac_addr(&req, addr, true);
7403         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7404         if (!status) {
7405                 /* This mac addr exist, remove this handle's VFID for it */
7406                 status = hclge_update_desc_vfid(desc, vport->vport_id, true);
7407                 if (status)
7408                         return status;
7409
7410                 if (hclge_is_all_function_id_zero(desc))
7411                         /* All the vfid is zero, so need to delete this entry */
7412                         status = hclge_remove_mac_vlan_tbl(vport, &req);
7413                 else
7414                         /* Not all the vfid is zero, update the vfid */
7415                         status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7416
7417         } else {
7418                 /* Maybe this mac address is in mta table, but it cannot be
7419                  * deleted here because an entry of mta represents an address
7420                  * range rather than a specific address. the delete action to
7421                  * all entries will take effect in update_mta_status called by
7422                  * hns3_nic_set_rx_mode.
7423                  */
7424                 status = 0;
7425         }
7426
7427         return status;
7428 }
7429
7430 void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
7431                                enum HCLGE_MAC_ADDR_TYPE mac_type)
7432 {
7433         struct hclge_vport_mac_addr_cfg *mac_cfg;
7434         struct list_head *list;
7435
7436         if (!vport->vport_id)
7437                 return;
7438
7439         mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL);
7440         if (!mac_cfg)
7441                 return;
7442
7443         mac_cfg->hd_tbl_status = true;
7444         memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN);
7445
7446         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7447                &vport->uc_mac_list : &vport->mc_mac_list;
7448
7449         list_add_tail(&mac_cfg->node, list);
7450 }
7451
7452 void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
7453                               bool is_write_tbl,
7454                               enum HCLGE_MAC_ADDR_TYPE mac_type)
7455 {
7456         struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
7457         struct list_head *list;
7458         bool uc_flag, mc_flag;
7459
7460         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7461                &vport->uc_mac_list : &vport->mc_mac_list;
7462
7463         uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC;
7464         mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
7465
7466         list_for_each_entry_safe(mac_cfg, tmp, list, node) {
7467                 if (ether_addr_equal(mac_cfg->mac_addr, mac_addr)) {
7468                         if (uc_flag && mac_cfg->hd_tbl_status)
7469                                 hclge_rm_uc_addr_common(vport, mac_addr);
7470
7471                         if (mc_flag && mac_cfg->hd_tbl_status)
7472                                 hclge_rm_mc_addr_common(vport, mac_addr);
7473
7474                         list_del(&mac_cfg->node);
7475                         kfree(mac_cfg);
7476                         break;
7477                 }
7478         }
7479 }
7480
7481 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
7482                                   enum HCLGE_MAC_ADDR_TYPE mac_type)
7483 {
7484         struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
7485         struct list_head *list;
7486
7487         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7488                &vport->uc_mac_list : &vport->mc_mac_list;
7489
7490         list_for_each_entry_safe(mac_cfg, tmp, list, node) {
7491                 if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status)
7492                         hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr);
7493
7494                 if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status)
7495                         hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr);
7496
7497                 mac_cfg->hd_tbl_status = false;
7498                 if (is_del_list) {
7499                         list_del(&mac_cfg->node);
7500                         kfree(mac_cfg);
7501                 }
7502         }
7503 }
7504
7505 void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
7506 {
7507         struct hclge_vport_mac_addr_cfg *mac, *tmp;
7508         struct hclge_vport *vport;
7509         int i;
7510
7511         for (i = 0; i < hdev->num_alloc_vport; i++) {
7512                 vport = &hdev->vport[i];
7513                 list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
7514                         list_del(&mac->node);
7515                         kfree(mac);
7516                 }
7517
7518                 list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) {
7519                         list_del(&mac->node);
7520                         kfree(mac);
7521                 }
7522         }
7523 }
7524
7525 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
7526                                               u16 cmdq_resp, u8 resp_code)
7527 {
7528 #define HCLGE_ETHERTYPE_SUCCESS_ADD             0
7529 #define HCLGE_ETHERTYPE_ALREADY_ADD             1
7530 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW        2
7531 #define HCLGE_ETHERTYPE_KEY_CONFLICT            3
7532
7533         int return_status;
7534
7535         if (cmdq_resp) {
7536                 dev_err(&hdev->pdev->dev,
7537                         "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
7538                         cmdq_resp);
7539                 return -EIO;
7540         }
7541
7542         switch (resp_code) {
7543         case HCLGE_ETHERTYPE_SUCCESS_ADD:
7544         case HCLGE_ETHERTYPE_ALREADY_ADD:
7545                 return_status = 0;
7546                 break;
7547         case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
7548                 dev_err(&hdev->pdev->dev,
7549                         "add mac ethertype failed for manager table overflow.\n");
7550                 return_status = -EIO;
7551                 break;
7552         case HCLGE_ETHERTYPE_KEY_CONFLICT:
7553                 dev_err(&hdev->pdev->dev,
7554                         "add mac ethertype failed for key conflict.\n");
7555                 return_status = -EIO;
7556                 break;
7557         default:
7558                 dev_err(&hdev->pdev->dev,
7559                         "add mac ethertype failed for undefined, code=%u.\n",
7560                         resp_code);
7561                 return_status = -EIO;
7562         }
7563
7564         return return_status;
7565 }
7566
7567 static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
7568                                      u8 *mac_addr)
7569 {
7570         struct hclge_mac_vlan_tbl_entry_cmd req;
7571         struct hclge_dev *hdev = vport->back;
7572         struct hclge_desc desc;
7573         u16 egress_port = 0;
7574         int i;
7575
7576         if (is_zero_ether_addr(mac_addr))
7577                 return false;
7578
7579         memset(&req, 0, sizeof(req));
7580         hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
7581                         HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
7582         req.egress_port = cpu_to_le16(egress_port);
7583         hclge_prepare_mac_addr(&req, mac_addr, false);
7584
7585         if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
7586                 return true;
7587
7588         vf_idx += HCLGE_VF_VPORT_START_NUM;
7589         for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++)
7590                 if (i != vf_idx &&
7591                     ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
7592                         return true;
7593
7594         return false;
7595 }
7596
7597 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
7598                             u8 *mac_addr)
7599 {
7600         struct hclge_vport *vport = hclge_get_vport(handle);
7601         struct hclge_dev *hdev = vport->back;
7602
7603         vport = hclge_get_vf_vport(hdev, vf);
7604         if (!vport)
7605                 return -EINVAL;
7606
7607         if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
7608                 dev_info(&hdev->pdev->dev,
7609                          "Specified MAC(=%pM) is same as before, no change committed!\n",
7610                          mac_addr);
7611                 return 0;
7612         }
7613
7614         if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
7615                 dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
7616                         mac_addr);
7617                 return -EEXIST;
7618         }
7619
7620         ether_addr_copy(vport->vf_info.mac, mac_addr);
7621         dev_info(&hdev->pdev->dev,
7622                  "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
7623                  vf, mac_addr);
7624
7625         return hclge_inform_reset_assert_to_vf(vport);
7626 }
7627
7628 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
7629                              const struct hclge_mac_mgr_tbl_entry_cmd *req)
7630 {
7631         struct hclge_desc desc;
7632         u8 resp_code;
7633         u16 retval;
7634         int ret;
7635
7636         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
7637         memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
7638
7639         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7640         if (ret) {
7641                 dev_err(&hdev->pdev->dev,
7642                         "add mac ethertype failed for cmd_send, ret =%d.\n",
7643                         ret);
7644                 return ret;
7645         }
7646
7647         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7648         retval = le16_to_cpu(desc.retval);
7649
7650         return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
7651 }
7652
7653 static int init_mgr_tbl(struct hclge_dev *hdev)
7654 {
7655         int ret;
7656         int i;
7657
7658         for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
7659                 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
7660                 if (ret) {
7661                         dev_err(&hdev->pdev->dev,
7662                                 "add mac ethertype failed, ret =%d.\n",
7663                                 ret);
7664                         return ret;
7665                 }
7666         }
7667
7668         return 0;
7669 }
7670
7671 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
7672 {
7673         struct hclge_vport *vport = hclge_get_vport(handle);
7674         struct hclge_dev *hdev = vport->back;
7675
7676         ether_addr_copy(p, hdev->hw.mac.mac_addr);
7677 }
7678
7679 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
7680                               bool is_first)
7681 {
7682         const unsigned char *new_addr = (const unsigned char *)p;
7683         struct hclge_vport *vport = hclge_get_vport(handle);
7684         struct hclge_dev *hdev = vport->back;
7685         int ret;
7686
7687         /* mac addr check */
7688         if (is_zero_ether_addr(new_addr) ||
7689             is_broadcast_ether_addr(new_addr) ||
7690             is_multicast_ether_addr(new_addr)) {
7691                 dev_err(&hdev->pdev->dev,
7692                         "Change uc mac err! invalid mac:%pM.\n",
7693                          new_addr);
7694                 return -EINVAL;
7695         }
7696
7697         if ((!is_first || is_kdump_kernel()) &&
7698             hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
7699                 dev_warn(&hdev->pdev->dev,
7700                          "remove old uc mac address fail.\n");
7701
7702         ret = hclge_add_uc_addr(handle, new_addr);
7703         if (ret) {
7704                 dev_err(&hdev->pdev->dev,
7705                         "add uc mac address fail, ret =%d.\n",
7706                         ret);
7707
7708                 if (!is_first &&
7709                     hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
7710                         dev_err(&hdev->pdev->dev,
7711                                 "restore uc mac address fail.\n");
7712
7713                 return -EIO;
7714         }
7715
7716         ret = hclge_pause_addr_cfg(hdev, new_addr);
7717         if (ret) {
7718                 dev_err(&hdev->pdev->dev,
7719                         "configure mac pause address fail, ret =%d.\n",
7720                         ret);
7721                 return -EIO;
7722         }
7723
7724         ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
7725
7726         return 0;
7727 }
7728
7729 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
7730                           int cmd)
7731 {
7732         struct hclge_vport *vport = hclge_get_vport(handle);
7733         struct hclge_dev *hdev = vport->back;
7734
7735         if (!hdev->hw.mac.phydev)
7736                 return -EOPNOTSUPP;
7737
7738         return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
7739 }
7740
7741 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
7742                                       u8 fe_type, bool filter_en, u8 vf_id)
7743 {
7744         struct hclge_vlan_filter_ctrl_cmd *req;
7745         struct hclge_desc desc;
7746         int ret;
7747
7748         /* read current vlan filter parameter */
7749         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
7750         req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
7751         req->vlan_type = vlan_type;
7752         req->vf_id = vf_id;
7753
7754         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7755         if (ret) {
7756                 dev_err(&hdev->pdev->dev,
7757                         "failed to get vlan filter config, ret = %d.\n", ret);
7758                 return ret;
7759         }
7760
7761         /* modify and write new config parameter */
7762         hclge_cmd_reuse_desc(&desc, false);
7763         req->vlan_fe = filter_en ?
7764                         (req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type);
7765
7766         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7767         if (ret)
7768                 dev_err(&hdev->pdev->dev, "failed to set vlan filter, ret = %d.\n",
7769                         ret);
7770
7771         return ret;
7772 }
7773
7774 #define HCLGE_FILTER_TYPE_VF            0
7775 #define HCLGE_FILTER_TYPE_PORT          1
7776 #define HCLGE_FILTER_FE_EGRESS_V1_B     BIT(0)
7777 #define HCLGE_FILTER_FE_NIC_INGRESS_B   BIT(0)
7778 #define HCLGE_FILTER_FE_NIC_EGRESS_B    BIT(1)
7779 #define HCLGE_FILTER_FE_ROCE_INGRESS_B  BIT(2)
7780 #define HCLGE_FILTER_FE_ROCE_EGRESS_B   BIT(3)
7781 #define HCLGE_FILTER_FE_EGRESS          (HCLGE_FILTER_FE_NIC_EGRESS_B \
7782                                         | HCLGE_FILTER_FE_ROCE_EGRESS_B)
7783 #define HCLGE_FILTER_FE_INGRESS         (HCLGE_FILTER_FE_NIC_INGRESS_B \
7784                                         | HCLGE_FILTER_FE_ROCE_INGRESS_B)
7785
7786 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
7787 {
7788         struct hclge_vport *vport = hclge_get_vport(handle);
7789         struct hclge_dev *hdev = vport->back;
7790
7791         if (hdev->pdev->revision >= 0x21) {
7792                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7793                                            HCLGE_FILTER_FE_EGRESS, enable, 0);
7794                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7795                                            HCLGE_FILTER_FE_INGRESS, enable, 0);
7796         } else {
7797                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7798                                            HCLGE_FILTER_FE_EGRESS_V1_B, enable,
7799                                            0);
7800         }
7801         if (enable)
7802                 handle->netdev_flags |= HNAE3_VLAN_FLTR;
7803         else
7804                 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
7805 }
7806
7807 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
7808                                     bool is_kill, u16 vlan,
7809                                     __be16 proto)
7810 {
7811         struct hclge_vport *vport = &hdev->vport[vfid];
7812         struct hclge_vlan_filter_vf_cfg_cmd *req0;
7813         struct hclge_vlan_filter_vf_cfg_cmd *req1;
7814         struct hclge_desc desc[2];
7815         u8 vf_byte_val;
7816         u8 vf_byte_off;
7817         int ret;
7818
7819         /* if vf vlan table is full, firmware will close vf vlan filter, it
7820          * is unable and unnecessary to add new vlan id to vf vlan filter.
7821          * If spoof check is enable, and vf vlan is full, it shouldn't add
7822          * new vlan, because tx packets with these vlan id will be dropped.
7823          */
7824         if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
7825                 if (vport->vf_info.spoofchk && vlan) {
7826                         dev_err(&hdev->pdev->dev,
7827                                 "Can't add vlan due to spoof check is on and vf vlan table is full\n");
7828                         return -EPERM;
7829                 }
7830                 return 0;
7831         }
7832
7833         hclge_cmd_setup_basic_desc(&desc[0],
7834                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7835         hclge_cmd_setup_basic_desc(&desc[1],
7836                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7837
7838         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7839
7840         vf_byte_off = vfid / 8;
7841         vf_byte_val = 1 << (vfid % 8);
7842
7843         req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
7844         req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
7845
7846         req0->vlan_id  = cpu_to_le16(vlan);
7847         req0->vlan_cfg = is_kill;
7848
7849         if (vf_byte_off < HCLGE_MAX_VF_BYTES)
7850                 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
7851         else
7852                 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
7853
7854         ret = hclge_cmd_send(&hdev->hw, desc, 2);
7855         if (ret) {
7856                 dev_err(&hdev->pdev->dev,
7857                         "Send vf vlan command fail, ret =%d.\n",
7858                         ret);
7859                 return ret;
7860         }
7861
7862         if (!is_kill) {
7863 #define HCLGE_VF_VLAN_NO_ENTRY  2
7864                 if (!req0->resp_code || req0->resp_code == 1)
7865                         return 0;
7866
7867                 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
7868                         set_bit(vfid, hdev->vf_vlan_full);
7869                         dev_warn(&hdev->pdev->dev,
7870                                  "vf vlan table is full, vf vlan filter is disabled\n");
7871                         return 0;
7872                 }
7873
7874                 dev_err(&hdev->pdev->dev,
7875                         "Add vf vlan filter fail, ret =%u.\n",
7876                         req0->resp_code);
7877         } else {
7878 #define HCLGE_VF_VLAN_DEL_NO_FOUND      1
7879                 if (!req0->resp_code)
7880                         return 0;
7881
7882                 /* vf vlan filter is disabled when vf vlan table is full,
7883                  * then new vlan id will not be added into vf vlan table.
7884                  * Just return 0 without warning, avoid massive verbose
7885                  * print logs when unload.
7886                  */
7887                 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
7888                         return 0;
7889
7890                 dev_err(&hdev->pdev->dev,
7891                         "Kill vf vlan filter fail, ret =%u.\n",
7892                         req0->resp_code);
7893         }
7894
7895         return -EIO;
7896 }
7897
7898 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
7899                                       u16 vlan_id, bool is_kill)
7900 {
7901         struct hclge_vlan_filter_pf_cfg_cmd *req;
7902         struct hclge_desc desc;
7903         u8 vlan_offset_byte_val;
7904         u8 vlan_offset_byte;
7905         u8 vlan_offset_160;
7906         int ret;
7907
7908         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
7909
7910         vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
7911         vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
7912                            HCLGE_VLAN_BYTE_SIZE;
7913         vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
7914
7915         req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
7916         req->vlan_offset = vlan_offset_160;
7917         req->vlan_cfg = is_kill;
7918         req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
7919
7920         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7921         if (ret)
7922                 dev_err(&hdev->pdev->dev,
7923                         "port vlan command, send fail, ret =%d.\n", ret);
7924         return ret;
7925 }
7926
7927 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
7928                                     u16 vport_id, u16 vlan_id,
7929                                     bool is_kill)
7930 {
7931         u16 vport_idx, vport_num = 0;
7932         int ret;
7933
7934         if (is_kill && !vlan_id)
7935                 return 0;
7936
7937         ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
7938                                        proto);
7939         if (ret) {
7940                 dev_err(&hdev->pdev->dev,
7941                         "Set %u vport vlan filter config fail, ret =%d.\n",
7942                         vport_id, ret);
7943                 return ret;
7944         }
7945
7946         /* vlan 0 may be added twice when 8021q module is enabled */
7947         if (!is_kill && !vlan_id &&
7948             test_bit(vport_id, hdev->vlan_table[vlan_id]))
7949                 return 0;
7950
7951         if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
7952                 dev_err(&hdev->pdev->dev,
7953                         "Add port vlan failed, vport %u is already in vlan %u\n",
7954                         vport_id, vlan_id);
7955                 return -EINVAL;
7956         }
7957
7958         if (is_kill &&
7959             !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
7960                 dev_err(&hdev->pdev->dev,
7961                         "Delete port vlan failed, vport %u is not in vlan %u\n",
7962                         vport_id, vlan_id);
7963                 return -EINVAL;
7964         }
7965
7966         for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
7967                 vport_num++;
7968
7969         if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
7970                 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
7971                                                  is_kill);
7972
7973         return ret;
7974 }
7975
7976 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
7977 {
7978         struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
7979         struct hclge_vport_vtag_tx_cfg_cmd *req;
7980         struct hclge_dev *hdev = vport->back;
7981         struct hclge_desc desc;
7982         u16 bmap_index;
7983         int status;
7984
7985         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
7986
7987         req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
7988         req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
7989         req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
7990         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
7991                       vcfg->accept_tag1 ? 1 : 0);
7992         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
7993                       vcfg->accept_untag1 ? 1 : 0);
7994         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
7995                       vcfg->accept_tag2 ? 1 : 0);
7996         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
7997                       vcfg->accept_untag2 ? 1 : 0);
7998         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
7999                       vcfg->insert_tag1_en ? 1 : 0);
8000         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
8001                       vcfg->insert_tag2_en ? 1 : 0);
8002         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
8003
8004         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
8005         bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
8006                         HCLGE_VF_NUM_PER_BYTE;
8007         req->vf_bitmap[bmap_index] =
8008                 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
8009
8010         status = hclge_cmd_send(&hdev->hw, &desc, 1);
8011         if (status)
8012                 dev_err(&hdev->pdev->dev,
8013                         "Send port txvlan cfg command fail, ret =%d\n",
8014                         status);
8015
8016         return status;
8017 }
8018
8019 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
8020 {
8021         struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
8022         struct hclge_vport_vtag_rx_cfg_cmd *req;
8023         struct hclge_dev *hdev = vport->back;
8024         struct hclge_desc desc;
8025         u16 bmap_index;
8026         int status;
8027
8028         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
8029
8030         req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
8031         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
8032                       vcfg->strip_tag1_en ? 1 : 0);
8033         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
8034                       vcfg->strip_tag2_en ? 1 : 0);
8035         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
8036                       vcfg->vlan1_vlan_prionly ? 1 : 0);
8037         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
8038                       vcfg->vlan2_vlan_prionly ? 1 : 0);
8039
8040         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
8041         bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
8042                         HCLGE_VF_NUM_PER_BYTE;
8043         req->vf_bitmap[bmap_index] =
8044                 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
8045
8046         status = hclge_cmd_send(&hdev->hw, &desc, 1);
8047         if (status)
8048                 dev_err(&hdev->pdev->dev,
8049                         "Send port rxvlan cfg command fail, ret =%d\n",
8050                         status);
8051
8052         return status;
8053 }
8054
8055 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
8056                                   u16 port_base_vlan_state,
8057                                   u16 vlan_tag)
8058 {
8059         int ret;
8060
8061         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8062                 vport->txvlan_cfg.accept_tag1 = true;
8063                 vport->txvlan_cfg.insert_tag1_en = false;
8064                 vport->txvlan_cfg.default_tag1 = 0;
8065         } else {
8066                 vport->txvlan_cfg.accept_tag1 = false;
8067                 vport->txvlan_cfg.insert_tag1_en = true;
8068                 vport->txvlan_cfg.default_tag1 = vlan_tag;
8069         }
8070
8071         vport->txvlan_cfg.accept_untag1 = true;
8072
8073         /* accept_tag2 and accept_untag2 are not supported on
8074          * pdev revision(0x20), new revision support them,
8075          * this two fields can not be configured by user.
8076          */
8077         vport->txvlan_cfg.accept_tag2 = true;
8078         vport->txvlan_cfg.accept_untag2 = true;
8079         vport->txvlan_cfg.insert_tag2_en = false;
8080         vport->txvlan_cfg.default_tag2 = 0;
8081
8082         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8083                 vport->rxvlan_cfg.strip_tag1_en = false;
8084                 vport->rxvlan_cfg.strip_tag2_en =
8085                                 vport->rxvlan_cfg.rx_vlan_offload_en;
8086         } else {
8087                 vport->rxvlan_cfg.strip_tag1_en =
8088                                 vport->rxvlan_cfg.rx_vlan_offload_en;
8089                 vport->rxvlan_cfg.strip_tag2_en = true;
8090         }
8091         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8092         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
8093
8094         ret = hclge_set_vlan_tx_offload_cfg(vport);
8095         if (ret)
8096                 return ret;
8097
8098         return hclge_set_vlan_rx_offload_cfg(vport);
8099 }
8100
8101 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
8102 {
8103         struct hclge_rx_vlan_type_cfg_cmd *rx_req;
8104         struct hclge_tx_vlan_type_cfg_cmd *tx_req;
8105         struct hclge_desc desc;
8106         int status;
8107
8108         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
8109         rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
8110         rx_req->ot_fst_vlan_type =
8111                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
8112         rx_req->ot_sec_vlan_type =
8113                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
8114         rx_req->in_fst_vlan_type =
8115                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
8116         rx_req->in_sec_vlan_type =
8117                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
8118
8119         status = hclge_cmd_send(&hdev->hw, &desc, 1);
8120         if (status) {
8121                 dev_err(&hdev->pdev->dev,
8122                         "Send rxvlan protocol type command fail, ret =%d\n",
8123                         status);
8124                 return status;
8125         }
8126
8127         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
8128
8129         tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
8130         tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
8131         tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
8132
8133         status = hclge_cmd_send(&hdev->hw, &desc, 1);
8134         if (status)
8135                 dev_err(&hdev->pdev->dev,
8136                         "Send txvlan protocol type command fail, ret =%d\n",
8137                         status);
8138
8139         return status;
8140 }
8141
8142 static int hclge_init_vlan_config(struct hclge_dev *hdev)
8143 {
8144 #define HCLGE_DEF_VLAN_TYPE             0x8100
8145
8146         struct hnae3_handle *handle = &hdev->vport[0].nic;
8147         struct hclge_vport *vport;
8148         int ret;
8149         int i;
8150
8151         if (hdev->pdev->revision >= 0x21) {
8152                 /* for revision 0x21, vf vlan filter is per function */
8153                 for (i = 0; i < hdev->num_alloc_vport; i++) {
8154                         vport = &hdev->vport[i];
8155                         ret = hclge_set_vlan_filter_ctrl(hdev,
8156                                                          HCLGE_FILTER_TYPE_VF,
8157                                                          HCLGE_FILTER_FE_EGRESS,
8158                                                          true,
8159                                                          vport->vport_id);
8160                         if (ret)
8161                                 return ret;
8162                 }
8163
8164                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
8165                                                  HCLGE_FILTER_FE_INGRESS, true,
8166                                                  0);
8167                 if (ret)
8168                         return ret;
8169         } else {
8170                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8171                                                  HCLGE_FILTER_FE_EGRESS_V1_B,
8172                                                  true, 0);
8173                 if (ret)
8174                         return ret;
8175         }
8176
8177         handle->netdev_flags |= HNAE3_VLAN_FLTR;
8178
8179         hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
8180         hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
8181         hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
8182         hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
8183         hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
8184         hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
8185
8186         ret = hclge_set_vlan_protocol_type(hdev);
8187         if (ret)
8188                 return ret;
8189
8190         for (i = 0; i < hdev->num_alloc_vport; i++) {
8191                 u16 vlan_tag;
8192
8193                 vport = &hdev->vport[i];
8194                 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
8195
8196                 ret = hclge_vlan_offload_cfg(vport,
8197                                              vport->port_base_vlan_cfg.state,
8198                                              vlan_tag);
8199                 if (ret)
8200                         return ret;
8201         }
8202
8203         return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
8204 }
8205
8206 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8207                                        bool writen_to_tbl)
8208 {
8209         struct hclge_vport_vlan_cfg *vlan;
8210
8211         vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
8212         if (!vlan)
8213                 return;
8214
8215         vlan->hd_tbl_status = writen_to_tbl;
8216         vlan->vlan_id = vlan_id;
8217
8218         list_add_tail(&vlan->node, &vport->vlan_list);
8219 }
8220
8221 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
8222 {
8223         struct hclge_vport_vlan_cfg *vlan, *tmp;
8224         struct hclge_dev *hdev = vport->back;
8225         int ret;
8226
8227         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8228                 if (!vlan->hd_tbl_status) {
8229                         ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8230                                                        vport->vport_id,
8231                                                        vlan->vlan_id, false);
8232                         if (ret) {
8233                                 dev_err(&hdev->pdev->dev,
8234                                         "restore vport vlan list failed, ret=%d\n",
8235                                         ret);
8236                                 return ret;
8237                         }
8238                 }
8239                 vlan->hd_tbl_status = true;
8240         }
8241
8242         return 0;
8243 }
8244
8245 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8246                                       bool is_write_tbl)
8247 {
8248         struct hclge_vport_vlan_cfg *vlan, *tmp;
8249         struct hclge_dev *hdev = vport->back;
8250
8251         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8252                 if (vlan->vlan_id == vlan_id) {
8253                         if (is_write_tbl && vlan->hd_tbl_status)
8254                                 hclge_set_vlan_filter_hw(hdev,
8255                                                          htons(ETH_P_8021Q),
8256                                                          vport->vport_id,
8257                                                          vlan_id,
8258                                                          true);
8259
8260                         list_del(&vlan->node);
8261                         kfree(vlan);
8262                         break;
8263                 }
8264         }
8265 }
8266
8267 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
8268 {
8269         struct hclge_vport_vlan_cfg *vlan, *tmp;
8270         struct hclge_dev *hdev = vport->back;
8271
8272         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8273                 if (vlan->hd_tbl_status)
8274                         hclge_set_vlan_filter_hw(hdev,
8275                                                  htons(ETH_P_8021Q),
8276                                                  vport->vport_id,
8277                                                  vlan->vlan_id,
8278                                                  true);
8279
8280                 vlan->hd_tbl_status = false;
8281                 if (is_del_list) {
8282                         list_del(&vlan->node);
8283                         kfree(vlan);
8284                 }
8285         }
8286         clear_bit(vport->vport_id, hdev->vf_vlan_full);
8287 }
8288
8289 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
8290 {
8291         struct hclge_vport_vlan_cfg *vlan, *tmp;
8292         struct hclge_vport *vport;
8293         int i;
8294
8295         for (i = 0; i < hdev->num_alloc_vport; i++) {
8296                 vport = &hdev->vport[i];
8297                 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8298                         list_del(&vlan->node);
8299                         kfree(vlan);
8300                 }
8301         }
8302 }
8303
8304 static void hclge_restore_vlan_table(struct hnae3_handle *handle)
8305 {
8306         struct hclge_vport *vport = hclge_get_vport(handle);
8307         struct hclge_vport_vlan_cfg *vlan, *tmp;
8308         struct hclge_dev *hdev = vport->back;
8309         u16 vlan_proto;
8310         u16 state, vlan_id;
8311         int i;
8312
8313         for (i = 0; i < hdev->num_alloc_vport; i++) {
8314                 vport = &hdev->vport[i];
8315                 vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
8316                 vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
8317                 state = vport->port_base_vlan_cfg.state;
8318
8319                 if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
8320                         hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
8321                                                  vport->vport_id, vlan_id,
8322                                                  false);
8323                         continue;
8324                 }
8325
8326                 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8327                         int ret;
8328
8329                         if (!vlan->hd_tbl_status)
8330                                 continue;
8331                         ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8332                                                        vport->vport_id,
8333                                                        vlan->vlan_id, false);
8334                         if (ret)
8335                                 break;
8336                 }
8337         }
8338 }
8339
8340 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
8341 {
8342         struct hclge_vport *vport = hclge_get_vport(handle);
8343
8344         if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8345                 vport->rxvlan_cfg.strip_tag1_en = false;
8346                 vport->rxvlan_cfg.strip_tag2_en = enable;
8347         } else {
8348                 vport->rxvlan_cfg.strip_tag1_en = enable;
8349                 vport->rxvlan_cfg.strip_tag2_en = true;
8350         }
8351         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8352         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
8353         vport->rxvlan_cfg.rx_vlan_offload_en = enable;
8354
8355         return hclge_set_vlan_rx_offload_cfg(vport);
8356 }
8357
8358 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
8359                                             u16 port_base_vlan_state,
8360                                             struct hclge_vlan_info *new_info,
8361                                             struct hclge_vlan_info *old_info)
8362 {
8363         struct hclge_dev *hdev = vport->back;
8364         int ret;
8365
8366         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
8367                 hclge_rm_vport_all_vlan_table(vport, false);
8368                 return hclge_set_vlan_filter_hw(hdev,
8369                                                  htons(new_info->vlan_proto),
8370                                                  vport->vport_id,
8371                                                  new_info->vlan_tag,
8372                                                  false);
8373         }
8374
8375         ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
8376                                        vport->vport_id, old_info->vlan_tag,
8377                                        true);
8378         if (ret)
8379                 return ret;
8380
8381         return hclge_add_vport_all_vlan_table(vport);
8382 }
8383
8384 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
8385                                     struct hclge_vlan_info *vlan_info)
8386 {
8387         struct hnae3_handle *nic = &vport->nic;
8388         struct hclge_vlan_info *old_vlan_info;
8389         struct hclge_dev *hdev = vport->back;
8390         int ret;
8391
8392         old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
8393
8394         ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
8395         if (ret)
8396                 return ret;
8397
8398         if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
8399                 /* add new VLAN tag */
8400                 ret = hclge_set_vlan_filter_hw(hdev,
8401                                                htons(vlan_info->vlan_proto),
8402                                                vport->vport_id,
8403                                                vlan_info->vlan_tag,
8404                                                false);
8405                 if (ret)
8406                         return ret;
8407
8408                 /* remove old VLAN tag */
8409                 ret = hclge_set_vlan_filter_hw(hdev,
8410                                                htons(old_vlan_info->vlan_proto),
8411                                                vport->vport_id,
8412                                                old_vlan_info->vlan_tag,
8413                                                true);
8414                 if (ret)
8415                         return ret;
8416
8417                 goto update;
8418         }
8419
8420         ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
8421                                                old_vlan_info);
8422         if (ret)
8423                 return ret;
8424
8425         /* update state only when disable/enable port based VLAN */
8426         vport->port_base_vlan_cfg.state = state;
8427         if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
8428                 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
8429         else
8430                 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
8431
8432 update:
8433         vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
8434         vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
8435         vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
8436
8437         return 0;
8438 }
8439
8440 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
8441                                           enum hnae3_port_base_vlan_state state,
8442                                           u16 vlan)
8443 {
8444         if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8445                 if (!vlan)
8446                         return HNAE3_PORT_BASE_VLAN_NOCHANGE;
8447                 else
8448                         return HNAE3_PORT_BASE_VLAN_ENABLE;
8449         } else {
8450                 if (!vlan)
8451                         return HNAE3_PORT_BASE_VLAN_DISABLE;
8452                 else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
8453                         return HNAE3_PORT_BASE_VLAN_NOCHANGE;
8454                 else
8455                         return HNAE3_PORT_BASE_VLAN_MODIFY;
8456         }
8457 }
8458
8459 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
8460                                     u16 vlan, u8 qos, __be16 proto)
8461 {
8462         struct hclge_vport *vport = hclge_get_vport(handle);
8463         struct hclge_dev *hdev = vport->back;
8464         struct hclge_vlan_info vlan_info;
8465         u16 state;
8466         int ret;
8467
8468         if (hdev->pdev->revision == 0x20)
8469                 return -EOPNOTSUPP;
8470
8471         vport = hclge_get_vf_vport(hdev, vfid);
8472         if (!vport)
8473                 return -EINVAL;
8474
8475         /* qos is a 3 bits value, so can not be bigger than 7 */
8476         if (vlan > VLAN_N_VID - 1 || qos > 7)
8477                 return -EINVAL;
8478         if (proto != htons(ETH_P_8021Q))
8479                 return -EPROTONOSUPPORT;
8480
8481         state = hclge_get_port_base_vlan_state(vport,
8482                                                vport->port_base_vlan_cfg.state,
8483                                                vlan);
8484         if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
8485                 return 0;
8486
8487         vlan_info.vlan_tag = vlan;
8488         vlan_info.qos = qos;
8489         vlan_info.vlan_proto = ntohs(proto);
8490
8491         if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
8492                 return hclge_update_port_base_vlan_cfg(vport, state,
8493                                                        &vlan_info);
8494         } else {
8495                 ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
8496                                                         vport->vport_id, state,
8497                                                         vlan, qos,
8498                                                         ntohs(proto));
8499                 return ret;
8500         }
8501 }
8502
8503 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
8504                           u16 vlan_id, bool is_kill)
8505 {
8506         struct hclge_vport *vport = hclge_get_vport(handle);
8507         struct hclge_dev *hdev = vport->back;
8508         bool writen_to_tbl = false;
8509         int ret = 0;
8510
8511         /* When device is resetting, firmware is unable to handle
8512          * mailbox. Just record the vlan id, and remove it after
8513          * reset finished.
8514          */
8515         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && is_kill) {
8516                 set_bit(vlan_id, vport->vlan_del_fail_bmap);
8517                 return -EBUSY;
8518         }
8519
8520         /* when port base vlan enabled, we use port base vlan as the vlan
8521          * filter entry. In this case, we don't update vlan filter table
8522          * when user add new vlan or remove exist vlan, just update the vport
8523          * vlan list. The vlan id in vlan list will be writen in vlan filter
8524          * table until port base vlan disabled
8525          */
8526         if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8527                 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
8528                                                vlan_id, is_kill);
8529                 writen_to_tbl = true;
8530         }
8531
8532         if (!ret) {
8533                 if (is_kill)
8534                         hclge_rm_vport_vlan_table(vport, vlan_id, false);
8535                 else
8536                         hclge_add_vport_vlan_table(vport, vlan_id,
8537                                                    writen_to_tbl);
8538         } else if (is_kill) {
8539                 /* when remove hw vlan filter failed, record the vlan id,
8540                  * and try to remove it from hw later, to be consistence
8541                  * with stack
8542                  */
8543                 set_bit(vlan_id, vport->vlan_del_fail_bmap);
8544         }
8545         return ret;
8546 }
8547
8548 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
8549 {
8550 #define HCLGE_MAX_SYNC_COUNT    60
8551
8552         int i, ret, sync_cnt = 0;
8553         u16 vlan_id;
8554
8555         /* start from vport 1 for PF is always alive */
8556         for (i = 0; i < hdev->num_alloc_vport; i++) {
8557                 struct hclge_vport *vport = &hdev->vport[i];
8558
8559                 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
8560                                          VLAN_N_VID);
8561                 while (vlan_id != VLAN_N_VID) {
8562                         ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8563                                                        vport->vport_id, vlan_id,
8564                                                        true);
8565                         if (ret && ret != -EINVAL)
8566                                 return;
8567
8568                         clear_bit(vlan_id, vport->vlan_del_fail_bmap);
8569                         hclge_rm_vport_vlan_table(vport, vlan_id, false);
8570
8571                         sync_cnt++;
8572                         if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
8573                                 return;
8574
8575                         vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
8576                                                  VLAN_N_VID);
8577                 }
8578         }
8579 }
8580
8581 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
8582 {
8583         struct hclge_config_max_frm_size_cmd *req;
8584         struct hclge_desc desc;
8585
8586         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
8587
8588         req = (struct hclge_config_max_frm_size_cmd *)desc.data;
8589         req->max_frm_size = cpu_to_le16(new_mps);
8590         req->min_frm_size = HCLGE_MAC_MIN_FRAME;
8591
8592         return hclge_cmd_send(&hdev->hw, &desc, 1);
8593 }
8594
8595 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
8596 {
8597         struct hclge_vport *vport = hclge_get_vport(handle);
8598
8599         return hclge_set_vport_mtu(vport, new_mtu);
8600 }
8601
8602 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
8603 {
8604         struct hclge_dev *hdev = vport->back;
8605         int i, max_frm_size, ret;
8606
8607         /* HW supprt 2 layer vlan */
8608         max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
8609         if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
8610             max_frm_size > HCLGE_MAC_MAX_FRAME)
8611                 return -EINVAL;
8612
8613         max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
8614         mutex_lock(&hdev->vport_lock);
8615         /* VF's mps must fit within hdev->mps */
8616         if (vport->vport_id && max_frm_size > hdev->mps) {
8617                 mutex_unlock(&hdev->vport_lock);
8618                 return -EINVAL;
8619         } else if (vport->vport_id) {
8620                 vport->mps = max_frm_size;
8621                 mutex_unlock(&hdev->vport_lock);
8622                 return 0;
8623         }
8624
8625         /* PF's mps must be greater then VF's mps */
8626         for (i = 1; i < hdev->num_alloc_vport; i++)
8627                 if (max_frm_size < hdev->vport[i].mps) {
8628                         mutex_unlock(&hdev->vport_lock);
8629                         return -EINVAL;
8630                 }
8631
8632         hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
8633
8634         ret = hclge_set_mac_mtu(hdev, max_frm_size);
8635         if (ret) {
8636                 dev_err(&hdev->pdev->dev,
8637                         "Change mtu fail, ret =%d\n", ret);
8638                 goto out;
8639         }
8640
8641         hdev->mps = max_frm_size;
8642         vport->mps = max_frm_size;
8643
8644         ret = hclge_buffer_alloc(hdev);
8645         if (ret)
8646                 dev_err(&hdev->pdev->dev,
8647                         "Allocate buffer fail, ret =%d\n", ret);
8648
8649 out:
8650         hclge_notify_client(hdev, HNAE3_UP_CLIENT);
8651         mutex_unlock(&hdev->vport_lock);
8652         return ret;
8653 }
8654
8655 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
8656                                     bool enable)
8657 {
8658         struct hclge_reset_tqp_queue_cmd *req;
8659         struct hclge_desc desc;
8660         int ret;
8661
8662         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
8663
8664         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
8665         req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
8666         if (enable)
8667                 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
8668
8669         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8670         if (ret) {
8671                 dev_err(&hdev->pdev->dev,
8672                         "Send tqp reset cmd error, status =%d\n", ret);
8673                 return ret;
8674         }
8675
8676         return 0;
8677 }
8678
8679 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
8680 {
8681         struct hclge_reset_tqp_queue_cmd *req;
8682         struct hclge_desc desc;
8683         int ret;
8684
8685         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
8686
8687         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
8688         req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
8689
8690         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8691         if (ret) {
8692                 dev_err(&hdev->pdev->dev,
8693                         "Get reset status error, status =%d\n", ret);
8694                 return ret;
8695         }
8696
8697         return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
8698 }
8699
8700 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
8701 {
8702         struct hnae3_queue *queue;
8703         struct hclge_tqp *tqp;
8704
8705         queue = handle->kinfo.tqp[queue_id];
8706         tqp = container_of(queue, struct hclge_tqp, q);
8707
8708         return tqp->index;
8709 }
8710
8711 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
8712 {
8713         struct hclge_vport *vport = hclge_get_vport(handle);
8714         struct hclge_dev *hdev = vport->back;
8715         int reset_try_times = 0;
8716         int reset_status;
8717         u16 queue_gid;
8718         int ret;
8719
8720         queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
8721
8722         ret = hclge_tqp_enable(hdev, queue_id, 0, false);
8723         if (ret) {
8724                 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
8725                 return ret;
8726         }
8727
8728         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
8729         if (ret) {
8730                 dev_err(&hdev->pdev->dev,
8731                         "Send reset tqp cmd fail, ret = %d\n", ret);
8732                 return ret;
8733         }
8734
8735         while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
8736                 reset_status = hclge_get_reset_status(hdev, queue_gid);
8737                 if (reset_status)
8738                         break;
8739
8740                 /* Wait for tqp hw reset */
8741                 usleep_range(1000, 1200);
8742         }
8743
8744         if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
8745                 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
8746                 return ret;
8747         }
8748
8749         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
8750         if (ret)
8751                 dev_err(&hdev->pdev->dev,
8752                         "Deassert the soft reset fail, ret = %d\n", ret);
8753
8754         return ret;
8755 }
8756
8757 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
8758 {
8759         struct hclge_dev *hdev = vport->back;
8760         int reset_try_times = 0;
8761         int reset_status;
8762         u16 queue_gid;
8763         int ret;
8764
8765         queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
8766
8767         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
8768         if (ret) {
8769                 dev_warn(&hdev->pdev->dev,
8770                          "Send reset tqp cmd fail, ret = %d\n", ret);
8771                 return;
8772         }
8773
8774         while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
8775                 reset_status = hclge_get_reset_status(hdev, queue_gid);
8776                 if (reset_status)
8777                         break;
8778
8779                 /* Wait for tqp hw reset */
8780                 usleep_range(1000, 1200);
8781         }
8782
8783         if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
8784                 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
8785                 return;
8786         }
8787
8788         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
8789         if (ret)
8790                 dev_warn(&hdev->pdev->dev,
8791                          "Deassert the soft reset fail, ret = %d\n", ret);
8792 }
8793
8794 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
8795 {
8796         struct hclge_vport *vport = hclge_get_vport(handle);
8797         struct hclge_dev *hdev = vport->back;
8798
8799         return hdev->fw_version;
8800 }
8801
8802 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
8803 {
8804         struct phy_device *phydev = hdev->hw.mac.phydev;
8805
8806         if (!phydev)
8807                 return;
8808
8809         phy_set_asym_pause(phydev, rx_en, tx_en);
8810 }
8811
8812 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
8813 {
8814         int ret;
8815
8816         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
8817                 return 0;
8818
8819         ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
8820         if (ret)
8821                 dev_err(&hdev->pdev->dev,
8822                         "configure pauseparam error, ret = %d.\n", ret);
8823
8824         return ret;
8825 }
8826
8827 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
8828 {
8829         struct phy_device *phydev = hdev->hw.mac.phydev;
8830         u16 remote_advertising = 0;
8831         u16 local_advertising;
8832         u32 rx_pause, tx_pause;
8833         u8 flowctl;
8834
8835         if (!phydev->link || !phydev->autoneg)
8836                 return 0;
8837
8838         local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
8839
8840         if (phydev->pause)
8841                 remote_advertising = LPA_PAUSE_CAP;
8842
8843         if (phydev->asym_pause)
8844                 remote_advertising |= LPA_PAUSE_ASYM;
8845
8846         flowctl = mii_resolve_flowctrl_fdx(local_advertising,
8847                                            remote_advertising);
8848         tx_pause = flowctl & FLOW_CTRL_TX;
8849         rx_pause = flowctl & FLOW_CTRL_RX;
8850
8851         if (phydev->duplex == HCLGE_MAC_HALF) {
8852                 tx_pause = 0;
8853                 rx_pause = 0;
8854         }
8855
8856         return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
8857 }
8858
8859 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
8860                                  u32 *rx_en, u32 *tx_en)
8861 {
8862         struct hclge_vport *vport = hclge_get_vport(handle);
8863         struct hclge_dev *hdev = vport->back;
8864         struct phy_device *phydev = hdev->hw.mac.phydev;
8865
8866         *auto_neg = phydev ? hclge_get_autoneg(handle) : 0;
8867
8868         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8869                 *rx_en = 0;
8870                 *tx_en = 0;
8871                 return;
8872         }
8873
8874         if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
8875                 *rx_en = 1;
8876                 *tx_en = 0;
8877         } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
8878                 *tx_en = 1;
8879                 *rx_en = 0;
8880         } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
8881                 *rx_en = 1;
8882                 *tx_en = 1;
8883         } else {
8884                 *rx_en = 0;
8885                 *tx_en = 0;
8886         }
8887 }
8888
8889 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
8890                                          u32 rx_en, u32 tx_en)
8891 {
8892         if (rx_en && tx_en)
8893                 hdev->fc_mode_last_time = HCLGE_FC_FULL;
8894         else if (rx_en && !tx_en)
8895                 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
8896         else if (!rx_en && tx_en)
8897                 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
8898         else
8899                 hdev->fc_mode_last_time = HCLGE_FC_NONE;
8900
8901         hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
8902 }
8903
8904 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
8905                                 u32 rx_en, u32 tx_en)
8906 {
8907         struct hclge_vport *vport = hclge_get_vport(handle);
8908         struct hclge_dev *hdev = vport->back;
8909         struct phy_device *phydev = hdev->hw.mac.phydev;
8910         u32 fc_autoneg;
8911
8912         if (phydev) {
8913                 fc_autoneg = hclge_get_autoneg(handle);
8914                 if (auto_neg != fc_autoneg) {
8915                         dev_info(&hdev->pdev->dev,
8916                                  "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
8917                         return -EOPNOTSUPP;
8918                 }
8919         }
8920
8921         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8922                 dev_info(&hdev->pdev->dev,
8923                          "Priority flow control enabled. Cannot set link flow control.\n");
8924                 return -EOPNOTSUPP;
8925         }
8926
8927         hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
8928
8929         hclge_record_user_pauseparam(hdev, rx_en, tx_en);
8930
8931         if (!auto_neg)
8932                 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
8933
8934         if (phydev)
8935                 return phy_start_aneg(phydev);
8936
8937         return -EOPNOTSUPP;
8938 }
8939
8940 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
8941                                           u8 *auto_neg, u32 *speed, u8 *duplex)
8942 {
8943         struct hclge_vport *vport = hclge_get_vport(handle);
8944         struct hclge_dev *hdev = vport->back;
8945
8946         if (speed)
8947                 *speed = hdev->hw.mac.speed;
8948         if (duplex)
8949                 *duplex = hdev->hw.mac.duplex;
8950         if (auto_neg)
8951                 *auto_neg = hdev->hw.mac.autoneg;
8952 }
8953
8954 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
8955                                  u8 *module_type)
8956 {
8957         struct hclge_vport *vport = hclge_get_vport(handle);
8958         struct hclge_dev *hdev = vport->back;
8959
8960         if (media_type)
8961                 *media_type = hdev->hw.mac.media_type;
8962
8963         if (module_type)
8964                 *module_type = hdev->hw.mac.module_type;
8965 }
8966
8967 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
8968                                 u8 *tp_mdix_ctrl, u8 *tp_mdix)
8969 {
8970         struct hclge_vport *vport = hclge_get_vport(handle);
8971         struct hclge_dev *hdev = vport->back;
8972         struct phy_device *phydev = hdev->hw.mac.phydev;
8973         int mdix_ctrl, mdix, is_resolved;
8974         unsigned int retval;
8975
8976         if (!phydev) {
8977                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8978                 *tp_mdix = ETH_TP_MDI_INVALID;
8979                 return;
8980         }
8981
8982         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
8983
8984         retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
8985         mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
8986                                     HCLGE_PHY_MDIX_CTRL_S);
8987
8988         retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
8989         mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
8990         is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
8991
8992         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
8993
8994         switch (mdix_ctrl) {
8995         case 0x0:
8996                 *tp_mdix_ctrl = ETH_TP_MDI;
8997                 break;
8998         case 0x1:
8999                 *tp_mdix_ctrl = ETH_TP_MDI_X;
9000                 break;
9001         case 0x3:
9002                 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
9003                 break;
9004         default:
9005                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
9006                 break;
9007         }
9008
9009         if (!is_resolved)
9010                 *tp_mdix = ETH_TP_MDI_INVALID;
9011         else if (mdix)
9012                 *tp_mdix = ETH_TP_MDI_X;
9013         else
9014                 *tp_mdix = ETH_TP_MDI;
9015 }
9016
9017 static void hclge_info_show(struct hclge_dev *hdev)
9018 {
9019         struct device *dev = &hdev->pdev->dev;
9020
9021         dev_info(dev, "PF info begin:\n");
9022
9023         dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
9024         dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
9025         dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
9026         dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
9027         dev_info(dev, "Numbers of vmdp vports: %u\n", hdev->num_vmdq_vport);
9028         dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
9029         dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
9030         dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
9031         dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
9032         dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
9033         dev_info(dev, "This is %s PF\n",
9034                  hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
9035         dev_info(dev, "DCB %s\n",
9036                  hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
9037         dev_info(dev, "MQPRIO %s\n",
9038                  hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
9039
9040         dev_info(dev, "PF info end.\n");
9041 }
9042
9043 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
9044                                           struct hclge_vport *vport)
9045 {
9046         struct hnae3_client *client = vport->nic.client;
9047         struct hclge_dev *hdev = ae_dev->priv;
9048         int rst_cnt = hdev->rst_stats.reset_cnt;
9049         int ret;
9050
9051         ret = client->ops->init_instance(&vport->nic);
9052         if (ret)
9053                 return ret;
9054
9055         set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9056         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9057             rst_cnt != hdev->rst_stats.reset_cnt) {
9058                 ret = -EBUSY;
9059                 goto init_nic_err;
9060         }
9061
9062         /* Enable nic hw error interrupts */
9063         ret = hclge_config_nic_hw_error(hdev, true);
9064         if (ret) {
9065                 dev_err(&ae_dev->pdev->dev,
9066                         "fail(%d) to enable hw error interrupts\n", ret);
9067                 goto init_nic_err;
9068         }
9069
9070         hnae3_set_client_init_flag(client, ae_dev, 1);
9071
9072         if (netif_msg_drv(&hdev->vport->nic))
9073                 hclge_info_show(hdev);
9074
9075         return ret;
9076
9077 init_nic_err:
9078         clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9079         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9080                 msleep(HCLGE_WAIT_RESET_DONE);
9081
9082         client->ops->uninit_instance(&vport->nic, 0);
9083
9084         return ret;
9085 }
9086
9087 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
9088                                            struct hclge_vport *vport)
9089 {
9090         struct hnae3_client *client = vport->roce.client;
9091         struct hclge_dev *hdev = ae_dev->priv;
9092         int rst_cnt;
9093         int ret;
9094
9095         if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
9096             !hdev->nic_client)
9097                 return 0;
9098
9099         client = hdev->roce_client;
9100         ret = hclge_init_roce_base_info(vport);
9101         if (ret)
9102                 return ret;
9103
9104         rst_cnt = hdev->rst_stats.reset_cnt;
9105         ret = client->ops->init_instance(&vport->roce);
9106         if (ret)
9107                 return ret;
9108
9109         set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9110         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9111             rst_cnt != hdev->rst_stats.reset_cnt) {
9112                 ret = -EBUSY;
9113                 goto init_roce_err;
9114         }
9115
9116         /* Enable roce ras interrupts */
9117         ret = hclge_config_rocee_ras_interrupt(hdev, true);
9118         if (ret) {
9119                 dev_err(&ae_dev->pdev->dev,
9120                         "fail(%d) to enable roce ras interrupts\n", ret);
9121                 goto init_roce_err;
9122         }
9123
9124         hnae3_set_client_init_flag(client, ae_dev, 1);
9125
9126         return 0;
9127
9128 init_roce_err:
9129         clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9130         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9131                 msleep(HCLGE_WAIT_RESET_DONE);
9132
9133         hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
9134
9135         return ret;
9136 }
9137
9138 static int hclge_init_client_instance(struct hnae3_client *client,
9139                                       struct hnae3_ae_dev *ae_dev)
9140 {
9141         struct hclge_dev *hdev = ae_dev->priv;
9142         struct hclge_vport *vport;
9143         int i, ret;
9144
9145         for (i = 0; i <  hdev->num_vmdq_vport + 1; i++) {
9146                 vport = &hdev->vport[i];
9147
9148                 switch (client->type) {
9149                 case HNAE3_CLIENT_KNIC:
9150                         hdev->nic_client = client;
9151                         vport->nic.client = client;
9152                         ret = hclge_init_nic_client_instance(ae_dev, vport);
9153                         if (ret)
9154                                 goto clear_nic;
9155
9156                         ret = hclge_init_roce_client_instance(ae_dev, vport);
9157                         if (ret)
9158                                 goto clear_roce;
9159
9160                         break;
9161                 case HNAE3_CLIENT_ROCE:
9162                         if (hnae3_dev_roce_supported(hdev)) {
9163                                 hdev->roce_client = client;
9164                                 vport->roce.client = client;
9165                         }
9166
9167                         ret = hclge_init_roce_client_instance(ae_dev, vport);
9168                         if (ret)
9169                                 goto clear_roce;
9170
9171                         break;
9172                 default:
9173                         return -EINVAL;
9174                 }
9175         }
9176
9177         return 0;
9178
9179 clear_nic:
9180         hdev->nic_client = NULL;
9181         vport->nic.client = NULL;
9182         return ret;
9183 clear_roce:
9184         hdev->roce_client = NULL;
9185         vport->roce.client = NULL;
9186         return ret;
9187 }
9188
9189 static void hclge_uninit_client_instance(struct hnae3_client *client,
9190                                          struct hnae3_ae_dev *ae_dev)
9191 {
9192         struct hclge_dev *hdev = ae_dev->priv;
9193         struct hclge_vport *vport;
9194         int i;
9195
9196         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
9197                 vport = &hdev->vport[i];
9198                 if (hdev->roce_client) {
9199                         clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9200                         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9201                                 msleep(HCLGE_WAIT_RESET_DONE);
9202
9203                         hdev->roce_client->ops->uninit_instance(&vport->roce,
9204                                                                 0);
9205                         hdev->roce_client = NULL;
9206                         vport->roce.client = NULL;
9207                 }
9208                 if (client->type == HNAE3_CLIENT_ROCE)
9209                         return;
9210                 if (hdev->nic_client && client->ops->uninit_instance) {
9211                         clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9212                         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9213                                 msleep(HCLGE_WAIT_RESET_DONE);
9214
9215                         client->ops->uninit_instance(&vport->nic, 0);
9216                         hdev->nic_client = NULL;
9217                         vport->nic.client = NULL;
9218                 }
9219         }
9220 }
9221
9222 static int hclge_pci_init(struct hclge_dev *hdev)
9223 {
9224         struct pci_dev *pdev = hdev->pdev;
9225         struct hclge_hw *hw;
9226         int ret;
9227
9228         ret = pci_enable_device(pdev);
9229         if (ret) {
9230                 dev_err(&pdev->dev, "failed to enable PCI device\n");
9231                 return ret;
9232         }
9233
9234         ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
9235         if (ret) {
9236                 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9237                 if (ret) {
9238                         dev_err(&pdev->dev,
9239                                 "can't set consistent PCI DMA");
9240                         goto err_disable_device;
9241                 }
9242                 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
9243         }
9244
9245         ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
9246         if (ret) {
9247                 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
9248                 goto err_disable_device;
9249         }
9250
9251         pci_set_master(pdev);
9252         hw = &hdev->hw;
9253         hw->io_base = pcim_iomap(pdev, 2, 0);
9254         if (!hw->io_base) {
9255                 dev_err(&pdev->dev, "Can't map configuration register space\n");
9256                 ret = -ENOMEM;
9257                 goto err_clr_master;
9258         }
9259
9260         hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
9261
9262         return 0;
9263 err_clr_master:
9264         pci_clear_master(pdev);
9265         pci_release_regions(pdev);
9266 err_disable_device:
9267         pci_disable_device(pdev);
9268
9269         return ret;
9270 }
9271
9272 static void hclge_pci_uninit(struct hclge_dev *hdev)
9273 {
9274         struct pci_dev *pdev = hdev->pdev;
9275
9276         pcim_iounmap(pdev, hdev->hw.io_base);
9277         pci_free_irq_vectors(pdev);
9278         pci_clear_master(pdev);
9279         pci_release_mem_regions(pdev);
9280         pci_disable_device(pdev);
9281 }
9282
9283 static void hclge_state_init(struct hclge_dev *hdev)
9284 {
9285         set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
9286         set_bit(HCLGE_STATE_DOWN, &hdev->state);
9287         clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
9288         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9289         clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
9290         clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
9291         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
9292 }
9293
9294 static void hclge_state_uninit(struct hclge_dev *hdev)
9295 {
9296         set_bit(HCLGE_STATE_DOWN, &hdev->state);
9297         set_bit(HCLGE_STATE_REMOVING, &hdev->state);
9298
9299         if (hdev->reset_timer.function)
9300                 del_timer_sync(&hdev->reset_timer);
9301         if (hdev->service_task.work.func)
9302                 cancel_delayed_work_sync(&hdev->service_task);
9303 }
9304
9305 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
9306 {
9307 #define HCLGE_FLR_RETRY_WAIT_MS 500
9308 #define HCLGE_FLR_RETRY_CNT     5
9309
9310         struct hclge_dev *hdev = ae_dev->priv;
9311         int retry_cnt = 0;
9312         int ret;
9313
9314 retry:
9315         down(&hdev->reset_sem);
9316         set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9317         hdev->reset_type = HNAE3_FLR_RESET;
9318         ret = hclge_reset_prepare(hdev);
9319         if (ret) {
9320                 dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n",
9321                         ret);
9322                 if (hdev->reset_pending ||
9323                     retry_cnt++ < HCLGE_FLR_RETRY_CNT) {
9324                         dev_err(&hdev->pdev->dev,
9325                                 "reset_pending:0x%lx, retry_cnt:%d\n",
9326                                 hdev->reset_pending, retry_cnt);
9327                         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9328                         up(&hdev->reset_sem);
9329                         msleep(HCLGE_FLR_RETRY_WAIT_MS);
9330                         goto retry;
9331                 }
9332         }
9333
9334         /* disable misc vector before FLR done */
9335         hclge_enable_vector(&hdev->misc_vector, false);
9336         set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
9337         hdev->rst_stats.flr_rst_cnt++;
9338 }
9339
9340 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
9341 {
9342         struct hclge_dev *hdev = ae_dev->priv;
9343         int ret;
9344
9345         hclge_enable_vector(&hdev->misc_vector, true);
9346
9347         ret = hclge_reset_rebuild(hdev);
9348         if (ret)
9349                 dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
9350
9351         hdev->reset_type = HNAE3_NONE_RESET;
9352         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9353         up(&hdev->reset_sem);
9354 }
9355
9356 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
9357 {
9358         u16 i;
9359
9360         for (i = 0; i < hdev->num_alloc_vport; i++) {
9361                 struct hclge_vport *vport = &hdev->vport[i];
9362                 int ret;
9363
9364                  /* Send cmd to clear VF's FUNC_RST_ING */
9365                 ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
9366                 if (ret)
9367                         dev_warn(&hdev->pdev->dev,
9368                                  "clear vf(%u) rst failed %d!\n",
9369                                  vport->vport_id, ret);
9370         }
9371 }
9372
9373 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
9374 {
9375         struct pci_dev *pdev = ae_dev->pdev;
9376         struct hclge_dev *hdev;
9377         int ret;
9378
9379         hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
9380         if (!hdev) {
9381                 ret = -ENOMEM;
9382                 goto out;
9383         }
9384
9385         hdev->pdev = pdev;
9386         hdev->ae_dev = ae_dev;
9387         hdev->reset_type = HNAE3_NONE_RESET;
9388         hdev->reset_level = HNAE3_FUNC_RESET;
9389         ae_dev->priv = hdev;
9390
9391         /* HW supprt 2 layer vlan */
9392         hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
9393
9394         mutex_init(&hdev->vport_lock);
9395         spin_lock_init(&hdev->fd_rule_lock);
9396         sema_init(&hdev->reset_sem, 1);
9397
9398         ret = hclge_pci_init(hdev);
9399         if (ret)
9400                 goto out;
9401
9402         /* Firmware command queue initialize */
9403         ret = hclge_cmd_queue_init(hdev);
9404         if (ret)
9405                 goto err_pci_uninit;
9406
9407         /* Firmware command initialize */
9408         ret = hclge_cmd_init(hdev);
9409         if (ret)
9410                 goto err_cmd_uninit;
9411
9412         ret = hclge_get_cap(hdev);
9413         if (ret)
9414                 goto err_cmd_uninit;
9415
9416         ret = hclge_configure(hdev);
9417         if (ret) {
9418                 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
9419                 goto err_cmd_uninit;
9420         }
9421
9422         ret = hclge_init_msi(hdev);
9423         if (ret) {
9424                 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
9425                 goto err_cmd_uninit;
9426         }
9427
9428         ret = hclge_misc_irq_init(hdev);
9429         if (ret)
9430                 goto err_msi_uninit;
9431
9432         ret = hclge_alloc_tqps(hdev);
9433         if (ret) {
9434                 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
9435                 goto err_msi_irq_uninit;
9436         }
9437
9438         ret = hclge_alloc_vport(hdev);
9439         if (ret)
9440                 goto err_msi_irq_uninit;
9441
9442         ret = hclge_map_tqp(hdev);
9443         if (ret)
9444                 goto err_msi_irq_uninit;
9445
9446         if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
9447                 ret = hclge_mac_mdio_config(hdev);
9448                 if (ret)
9449                         goto err_msi_irq_uninit;
9450         }
9451
9452         ret = hclge_init_umv_space(hdev);
9453         if (ret)
9454                 goto err_mdiobus_unreg;
9455
9456         ret = hclge_mac_init(hdev);
9457         if (ret) {
9458                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
9459                 goto err_mdiobus_unreg;
9460         }
9461
9462         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
9463         if (ret) {
9464                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
9465                 goto err_mdiobus_unreg;
9466         }
9467
9468         ret = hclge_config_gro(hdev, true);
9469         if (ret)
9470                 goto err_mdiobus_unreg;
9471
9472         ret = hclge_init_vlan_config(hdev);
9473         if (ret) {
9474                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
9475                 goto err_mdiobus_unreg;
9476         }
9477
9478         ret = hclge_tm_schd_init(hdev);
9479         if (ret) {
9480                 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
9481                 goto err_mdiobus_unreg;
9482         }
9483
9484         hclge_rss_init_cfg(hdev);
9485         ret = hclge_rss_init_hw(hdev);
9486         if (ret) {
9487                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
9488                 goto err_mdiobus_unreg;
9489         }
9490
9491         ret = init_mgr_tbl(hdev);
9492         if (ret) {
9493                 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
9494                 goto err_mdiobus_unreg;
9495         }
9496
9497         ret = hclge_init_fd_config(hdev);
9498         if (ret) {
9499                 dev_err(&pdev->dev,
9500                         "fd table init fail, ret=%d\n", ret);
9501                 goto err_mdiobus_unreg;
9502         }
9503
9504         INIT_KFIFO(hdev->mac_tnl_log);
9505
9506         hclge_dcb_ops_set(hdev);
9507
9508         timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
9509         INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
9510
9511         /* Setup affinity after service timer setup because add_timer_on
9512          * is called in affinity notify.
9513          */
9514         hclge_misc_affinity_setup(hdev);
9515
9516         hclge_clear_all_event_cause(hdev);
9517         hclge_clear_resetting_state(hdev);
9518
9519         /* Log and clear the hw errors those already occurred */
9520         hclge_handle_all_hns_hw_errors(ae_dev);
9521
9522         /* request delayed reset for the error recovery because an immediate
9523          * global reset on a PF affecting pending initialization of other PFs
9524          */
9525         if (ae_dev->hw_err_reset_req) {
9526                 enum hnae3_reset_type reset_level;
9527
9528                 reset_level = hclge_get_reset_level(ae_dev,
9529                                                     &ae_dev->hw_err_reset_req);
9530                 hclge_set_def_reset_request(ae_dev, reset_level);
9531                 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
9532         }
9533
9534         /* Enable MISC vector(vector0) */
9535         hclge_enable_vector(&hdev->misc_vector, true);
9536
9537         hclge_state_init(hdev);
9538         hdev->last_reset_time = jiffies;
9539
9540         dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
9541                  HCLGE_DRIVER_NAME);
9542
9543         hclge_task_schedule(hdev, round_jiffies_relative(HZ));
9544
9545         return 0;
9546
9547 err_mdiobus_unreg:
9548         if (hdev->hw.mac.phydev)
9549                 mdiobus_unregister(hdev->hw.mac.mdio_bus);
9550 err_msi_irq_uninit:
9551         hclge_misc_irq_uninit(hdev);
9552 err_msi_uninit:
9553         pci_free_irq_vectors(pdev);
9554 err_cmd_uninit:
9555         hclge_cmd_uninit(hdev);
9556 err_pci_uninit:
9557         pcim_iounmap(pdev, hdev->hw.io_base);
9558         pci_clear_master(pdev);
9559         pci_release_regions(pdev);
9560         pci_disable_device(pdev);
9561 out:
9562         return ret;
9563 }
9564
9565 static void hclge_stats_clear(struct hclge_dev *hdev)
9566 {
9567         memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
9568 }
9569
9570 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
9571 {
9572         return hclge_config_switch_param(hdev, vf, enable,
9573                                          HCLGE_SWITCH_ANTI_SPOOF_MASK);
9574 }
9575
9576 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
9577 {
9578         return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9579                                           HCLGE_FILTER_FE_NIC_INGRESS_B,
9580                                           enable, vf);
9581 }
9582
9583 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
9584 {
9585         int ret;
9586
9587         ret = hclge_set_mac_spoofchk(hdev, vf, enable);
9588         if (ret) {
9589                 dev_err(&hdev->pdev->dev,
9590                         "Set vf %d mac spoof check %s failed, ret=%d\n",
9591                         vf, enable ? "on" : "off", ret);
9592                 return ret;
9593         }
9594
9595         ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
9596         if (ret)
9597                 dev_err(&hdev->pdev->dev,
9598                         "Set vf %d vlan spoof check %s failed, ret=%d\n",
9599                         vf, enable ? "on" : "off", ret);
9600
9601         return ret;
9602 }
9603
9604 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
9605                                  bool enable)
9606 {
9607         struct hclge_vport *vport = hclge_get_vport(handle);
9608         struct hclge_dev *hdev = vport->back;
9609         u32 new_spoofchk = enable ? 1 : 0;
9610         int ret;
9611
9612         if (hdev->pdev->revision == 0x20)
9613                 return -EOPNOTSUPP;
9614
9615         vport = hclge_get_vf_vport(hdev, vf);
9616         if (!vport)
9617                 return -EINVAL;
9618
9619         if (vport->vf_info.spoofchk == new_spoofchk)
9620                 return 0;
9621
9622         if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
9623                 dev_warn(&hdev->pdev->dev,
9624                          "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
9625                          vf);
9626         else if (enable && hclge_is_umv_space_full(vport))
9627                 dev_warn(&hdev->pdev->dev,
9628                          "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
9629                          vf);
9630
9631         ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
9632         if (ret)
9633                 return ret;
9634
9635         vport->vf_info.spoofchk = new_spoofchk;
9636         return 0;
9637 }
9638
9639 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
9640 {
9641         struct hclge_vport *vport = hdev->vport;
9642         int ret;
9643         int i;
9644
9645         if (hdev->pdev->revision == 0x20)
9646                 return 0;
9647
9648         /* resume the vf spoof check state after reset */
9649         for (i = 0; i < hdev->num_alloc_vport; i++) {
9650                 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
9651                                                vport->vf_info.spoofchk);
9652                 if (ret)
9653                         return ret;
9654
9655                 vport++;
9656         }
9657
9658         return 0;
9659 }
9660
9661 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
9662 {
9663         struct hclge_vport *vport = hclge_get_vport(handle);
9664         struct hclge_dev *hdev = vport->back;
9665         u32 new_trusted = enable ? 1 : 0;
9666         bool en_bc_pmc;
9667         int ret;
9668
9669         vport = hclge_get_vf_vport(hdev, vf);
9670         if (!vport)
9671                 return -EINVAL;
9672
9673         if (vport->vf_info.trusted == new_trusted)
9674                 return 0;
9675
9676         /* Disable promisc mode for VF if it is not trusted any more. */
9677         if (!enable && vport->vf_info.promisc_enable) {
9678                 en_bc_pmc = hdev->pdev->revision != 0x20;
9679                 ret = hclge_set_vport_promisc_mode(vport, false, false,
9680                                                    en_bc_pmc);
9681                 if (ret)
9682                         return ret;
9683                 vport->vf_info.promisc_enable = 0;
9684                 hclge_inform_vf_promisc_info(vport);
9685         }
9686
9687         vport->vf_info.trusted = new_trusted;
9688
9689         return 0;
9690 }
9691
9692 static void hclge_reset_vf_rate(struct hclge_dev *hdev)
9693 {
9694         int ret;
9695         int vf;
9696
9697         /* reset vf rate to default value */
9698         for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
9699                 struct hclge_vport *vport = &hdev->vport[vf];
9700
9701                 vport->vf_info.max_tx_rate = 0;
9702                 ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
9703                 if (ret)
9704                         dev_err(&hdev->pdev->dev,
9705                                 "vf%d failed to reset to default, ret=%d\n",
9706                                 vf - HCLGE_VF_VPORT_START_NUM, ret);
9707         }
9708 }
9709
9710 static int hclge_vf_rate_param_check(struct hclge_dev *hdev, int vf,
9711                                      int min_tx_rate, int max_tx_rate)
9712 {
9713         if (min_tx_rate != 0 ||
9714             max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
9715                 dev_err(&hdev->pdev->dev,
9716                         "min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
9717                         min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
9718                 return -EINVAL;
9719         }
9720
9721         return 0;
9722 }
9723
9724 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
9725                              int min_tx_rate, int max_tx_rate, bool force)
9726 {
9727         struct hclge_vport *vport = hclge_get_vport(handle);
9728         struct hclge_dev *hdev = vport->back;
9729         int ret;
9730
9731         ret = hclge_vf_rate_param_check(hdev, vf, min_tx_rate, max_tx_rate);
9732         if (ret)
9733                 return ret;
9734
9735         vport = hclge_get_vf_vport(hdev, vf);
9736         if (!vport)
9737                 return -EINVAL;
9738
9739         if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
9740                 return 0;
9741
9742         ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
9743         if (ret)
9744                 return ret;
9745
9746         vport->vf_info.max_tx_rate = max_tx_rate;
9747
9748         return 0;
9749 }
9750
9751 static int hclge_resume_vf_rate(struct hclge_dev *hdev)
9752 {
9753         struct hnae3_handle *handle = &hdev->vport->nic;
9754         struct hclge_vport *vport;
9755         int ret;
9756         int vf;
9757
9758         /* resume the vf max_tx_rate after reset */
9759         for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
9760                 vport = hclge_get_vf_vport(hdev, vf);
9761                 if (!vport)
9762                         return -EINVAL;
9763
9764                 /* zero means max rate, after reset, firmware already set it to
9765                  * max rate, so just continue.
9766                  */
9767                 if (!vport->vf_info.max_tx_rate)
9768                         continue;
9769
9770                 ret = hclge_set_vf_rate(handle, vf, 0,
9771                                         vport->vf_info.max_tx_rate, true);
9772                 if (ret) {
9773                         dev_err(&hdev->pdev->dev,
9774                                 "vf%d failed to resume tx_rate:%u, ret=%d\n",
9775                                 vf, vport->vf_info.max_tx_rate, ret);
9776                         return ret;
9777                 }
9778         }
9779
9780         return 0;
9781 }
9782
9783 static void hclge_reset_vport_state(struct hclge_dev *hdev)
9784 {
9785         struct hclge_vport *vport = hdev->vport;
9786         int i;
9787
9788         for (i = 0; i < hdev->num_alloc_vport; i++) {
9789                 hclge_vport_stop(vport);
9790                 vport++;
9791         }
9792 }
9793
9794 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
9795 {
9796         struct hclge_dev *hdev = ae_dev->priv;
9797         struct pci_dev *pdev = ae_dev->pdev;
9798         int ret;
9799
9800         set_bit(HCLGE_STATE_DOWN, &hdev->state);
9801
9802         hclge_stats_clear(hdev);
9803         memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
9804         memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
9805
9806         ret = hclge_cmd_init(hdev);
9807         if (ret) {
9808                 dev_err(&pdev->dev, "Cmd queue init failed\n");
9809                 return ret;
9810         }
9811
9812         ret = hclge_map_tqp(hdev);
9813         if (ret) {
9814                 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
9815                 return ret;
9816         }
9817
9818         hclge_reset_umv_space(hdev);
9819
9820         ret = hclge_mac_init(hdev);
9821         if (ret) {
9822                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
9823                 return ret;
9824         }
9825
9826         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
9827         if (ret) {
9828                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
9829                 return ret;
9830         }
9831
9832         ret = hclge_config_gro(hdev, true);
9833         if (ret)
9834                 return ret;
9835
9836         ret = hclge_init_vlan_config(hdev);
9837         if (ret) {
9838                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
9839                 return ret;
9840         }
9841
9842         ret = hclge_tm_init_hw(hdev, true);
9843         if (ret) {
9844                 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
9845                 return ret;
9846         }
9847
9848         ret = hclge_rss_init_hw(hdev);
9849         if (ret) {
9850                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
9851                 return ret;
9852         }
9853
9854         ret = init_mgr_tbl(hdev);
9855         if (ret) {
9856                 dev_err(&pdev->dev,
9857                         "failed to reinit manager table, ret = %d\n", ret);
9858                 return ret;
9859         }
9860
9861         ret = hclge_init_fd_config(hdev);
9862         if (ret) {
9863                 dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
9864                 return ret;
9865         }
9866
9867         /* Log and clear the hw errors those already occurred */
9868         hclge_handle_all_hns_hw_errors(ae_dev);
9869
9870         /* Re-enable the hw error interrupts because
9871          * the interrupts get disabled on global reset.
9872          */
9873         ret = hclge_config_nic_hw_error(hdev, true);
9874         if (ret) {
9875                 dev_err(&pdev->dev,
9876                         "fail(%d) to re-enable NIC hw error interrupts\n",
9877                         ret);
9878                 return ret;
9879         }
9880
9881         if (hdev->roce_client) {
9882                 ret = hclge_config_rocee_ras_interrupt(hdev, true);
9883                 if (ret) {
9884                         dev_err(&pdev->dev,
9885                                 "fail(%d) to re-enable roce ras interrupts\n",
9886                                 ret);
9887                         return ret;
9888                 }
9889         }
9890
9891         hclge_reset_vport_state(hdev);
9892         ret = hclge_reset_vport_spoofchk(hdev);
9893         if (ret)
9894                 return ret;
9895
9896         ret = hclge_resume_vf_rate(hdev);
9897         if (ret)
9898                 return ret;
9899
9900         dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
9901                  HCLGE_DRIVER_NAME);
9902
9903         return 0;
9904 }
9905
9906 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
9907 {
9908         struct hclge_dev *hdev = ae_dev->priv;
9909         struct hclge_mac *mac = &hdev->hw.mac;
9910
9911         hclge_reset_vf_rate(hdev);
9912         hclge_misc_affinity_teardown(hdev);
9913         hclge_state_uninit(hdev);
9914
9915         if (mac->phydev)
9916                 mdiobus_unregister(mac->mdio_bus);
9917
9918         hclge_uninit_umv_space(hdev);
9919
9920         /* Disable MISC vector(vector0) */
9921         hclge_enable_vector(&hdev->misc_vector, false);
9922         synchronize_irq(hdev->misc_vector.vector_irq);
9923
9924         /* Disable all hw interrupts */
9925         hclge_config_mac_tnl_int(hdev, false);
9926         hclge_config_nic_hw_error(hdev, false);
9927         hclge_config_rocee_ras_interrupt(hdev, false);
9928
9929         hclge_cmd_uninit(hdev);
9930         hclge_misc_irq_uninit(hdev);
9931         hclge_pci_uninit(hdev);
9932         mutex_destroy(&hdev->vport_lock);
9933         hclge_uninit_vport_mac_table(hdev);
9934         hclge_uninit_vport_vlan_table(hdev);
9935         ae_dev->priv = NULL;
9936 }
9937
9938 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
9939 {
9940         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9941         struct hclge_vport *vport = hclge_get_vport(handle);
9942         struct hclge_dev *hdev = vport->back;
9943
9944         return min_t(u32, hdev->rss_size_max,
9945                      vport->alloc_tqps / kinfo->num_tc);
9946 }
9947
9948 static void hclge_get_channels(struct hnae3_handle *handle,
9949                                struct ethtool_channels *ch)
9950 {
9951         ch->max_combined = hclge_get_max_channels(handle);
9952         ch->other_count = 1;
9953         ch->max_other = 1;
9954         ch->combined_count = handle->kinfo.rss_size;
9955 }
9956
9957 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
9958                                         u16 *alloc_tqps, u16 *max_rss_size)
9959 {
9960         struct hclge_vport *vport = hclge_get_vport(handle);
9961         struct hclge_dev *hdev = vport->back;
9962
9963         *alloc_tqps = vport->alloc_tqps;
9964         *max_rss_size = hdev->rss_size_max;
9965 }
9966
9967 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
9968                               bool rxfh_configured)
9969 {
9970         struct hclge_vport *vport = hclge_get_vport(handle);
9971         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
9972         u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
9973         struct hclge_dev *hdev = vport->back;
9974         u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
9975         u16 cur_rss_size = kinfo->rss_size;
9976         u16 cur_tqps = kinfo->num_tqps;
9977         u16 tc_valid[HCLGE_MAX_TC_NUM];
9978         u16 roundup_size;
9979         u32 *rss_indir;
9980         unsigned int i;
9981         int ret;
9982
9983         kinfo->req_rss_size = new_tqps_num;
9984
9985         ret = hclge_tm_vport_map_update(hdev);
9986         if (ret) {
9987                 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
9988                 return ret;
9989         }
9990
9991         roundup_size = roundup_pow_of_two(kinfo->rss_size);
9992         roundup_size = ilog2(roundup_size);
9993         /* Set the RSS TC mode according to the new RSS size */
9994         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
9995                 tc_valid[i] = 0;
9996
9997                 if (!(hdev->hw_tc_map & BIT(i)))
9998                         continue;
9999
10000                 tc_valid[i] = 1;
10001                 tc_size[i] = roundup_size;
10002                 tc_offset[i] = kinfo->rss_size * i;
10003         }
10004         ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
10005         if (ret)
10006                 return ret;
10007
10008         /* RSS indirection table has been configuared by user */
10009         if (rxfh_configured)
10010                 goto out;
10011
10012         /* Reinitializes the rss indirect table according to the new RSS size */
10013         rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
10014         if (!rss_indir)
10015                 return -ENOMEM;
10016
10017         for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
10018                 rss_indir[i] = i % kinfo->rss_size;
10019
10020         ret = hclge_set_rss(handle, rss_indir, NULL, 0);
10021         if (ret)
10022                 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
10023                         ret);
10024
10025         kfree(rss_indir);
10026
10027 out:
10028         if (!ret)
10029                 dev_info(&hdev->pdev->dev,
10030                          "Channels changed, rss_size from %u to %u, tqps from %u to %u",
10031                          cur_rss_size, kinfo->rss_size,
10032                          cur_tqps, kinfo->rss_size * kinfo->num_tc);
10033
10034         return ret;
10035 }
10036
10037 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
10038                               u32 *regs_num_64_bit)
10039 {
10040         struct hclge_desc desc;
10041         u32 total_num;
10042         int ret;
10043
10044         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
10045         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10046         if (ret) {
10047                 dev_err(&hdev->pdev->dev,
10048                         "Query register number cmd failed, ret = %d.\n", ret);
10049                 return ret;
10050         }
10051
10052         *regs_num_32_bit = le32_to_cpu(desc.data[0]);
10053         *regs_num_64_bit = le32_to_cpu(desc.data[1]);
10054
10055         total_num = *regs_num_32_bit + *regs_num_64_bit;
10056         if (!total_num)
10057                 return -EINVAL;
10058
10059         return 0;
10060 }
10061
10062 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
10063                                  void *data)
10064 {
10065 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
10066 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
10067
10068         struct hclge_desc *desc;
10069         u32 *reg_val = data;
10070         __le32 *desc_data;
10071         int nodata_num;
10072         int cmd_num;
10073         int i, k, n;
10074         int ret;
10075
10076         if (regs_num == 0)
10077                 return 0;
10078
10079         nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
10080         cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
10081                                HCLGE_32_BIT_REG_RTN_DATANUM);
10082         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
10083         if (!desc)
10084                 return -ENOMEM;
10085
10086         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
10087         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
10088         if (ret) {
10089                 dev_err(&hdev->pdev->dev,
10090                         "Query 32 bit register cmd failed, ret = %d.\n", ret);
10091                 kfree(desc);
10092                 return ret;
10093         }
10094
10095         for (i = 0; i < cmd_num; i++) {
10096                 if (i == 0) {
10097                         desc_data = (__le32 *)(&desc[i].data[0]);
10098                         n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
10099                 } else {
10100                         desc_data = (__le32 *)(&desc[i]);
10101                         n = HCLGE_32_BIT_REG_RTN_DATANUM;
10102                 }
10103                 for (k = 0; k < n; k++) {
10104                         *reg_val++ = le32_to_cpu(*desc_data++);
10105
10106                         regs_num--;
10107                         if (!regs_num)
10108                                 break;
10109                 }
10110         }
10111
10112         kfree(desc);
10113         return 0;
10114 }
10115
10116 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
10117                                  void *data)
10118 {
10119 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
10120 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
10121
10122         struct hclge_desc *desc;
10123         u64 *reg_val = data;
10124         __le64 *desc_data;
10125         int nodata_len;
10126         int cmd_num;
10127         int i, k, n;
10128         int ret;
10129
10130         if (regs_num == 0)
10131                 return 0;
10132
10133         nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
10134         cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
10135                                HCLGE_64_BIT_REG_RTN_DATANUM);
10136         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
10137         if (!desc)
10138                 return -ENOMEM;
10139
10140         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
10141         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
10142         if (ret) {
10143                 dev_err(&hdev->pdev->dev,
10144                         "Query 64 bit register cmd failed, ret = %d.\n", ret);
10145                 kfree(desc);
10146                 return ret;
10147         }
10148
10149         for (i = 0; i < cmd_num; i++) {
10150                 if (i == 0) {
10151                         desc_data = (__le64 *)(&desc[i].data[0]);
10152                         n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
10153                 } else {
10154                         desc_data = (__le64 *)(&desc[i]);
10155                         n = HCLGE_64_BIT_REG_RTN_DATANUM;
10156                 }
10157                 for (k = 0; k < n; k++) {
10158                         *reg_val++ = le64_to_cpu(*desc_data++);
10159
10160                         regs_num--;
10161                         if (!regs_num)
10162                                 break;
10163                 }
10164         }
10165
10166         kfree(desc);
10167         return 0;
10168 }
10169
10170 #define MAX_SEPARATE_NUM        4
10171 #define SEPARATOR_VALUE         0xFDFCFBFA
10172 #define REG_NUM_PER_LINE        4
10173 #define REG_LEN_PER_LINE        (REG_NUM_PER_LINE * sizeof(u32))
10174 #define REG_SEPARATOR_LINE      1
10175 #define REG_NUM_REMAIN_MASK     3
10176 #define BD_LIST_MAX_NUM         30
10177
10178 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
10179 {
10180         /*prepare 4 commands to query DFX BD number*/
10181         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_DFX_BD_NUM, true);
10182         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10183         hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_DFX_BD_NUM, true);
10184         desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10185         hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_DFX_BD_NUM, true);
10186         desc[2].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10187         hclge_cmd_setup_basic_desc(&desc[3], HCLGE_OPC_DFX_BD_NUM, true);
10188
10189         return hclge_cmd_send(&hdev->hw, desc, 4);
10190 }
10191
10192 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
10193                                     int *bd_num_list,
10194                                     u32 type_num)
10195 {
10196         u32 entries_per_desc, desc_index, index, offset, i;
10197         struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
10198         int ret;
10199
10200         ret = hclge_query_bd_num_cmd_send(hdev, desc);
10201         if (ret) {
10202                 dev_err(&hdev->pdev->dev,
10203                         "Get dfx bd num fail, status is %d.\n", ret);
10204                 return ret;
10205         }
10206
10207         entries_per_desc = ARRAY_SIZE(desc[0].data);
10208         for (i = 0; i < type_num; i++) {
10209                 offset = hclge_dfx_bd_offset_list[i];
10210                 index = offset % entries_per_desc;
10211                 desc_index = offset / entries_per_desc;
10212                 bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
10213         }
10214
10215         return ret;
10216 }
10217
10218 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
10219                                   struct hclge_desc *desc_src, int bd_num,
10220                                   enum hclge_opcode_type cmd)
10221 {
10222         struct hclge_desc *desc = desc_src;
10223         int i, ret;
10224
10225         hclge_cmd_setup_basic_desc(desc, cmd, true);
10226         for (i = 0; i < bd_num - 1; i++) {
10227                 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10228                 desc++;
10229                 hclge_cmd_setup_basic_desc(desc, cmd, true);
10230         }
10231
10232         desc = desc_src;
10233         ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
10234         if (ret)
10235                 dev_err(&hdev->pdev->dev,
10236                         "Query dfx reg cmd(0x%x) send fail, status is %d.\n",
10237                         cmd, ret);
10238
10239         return ret;
10240 }
10241
10242 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
10243                                     void *data)
10244 {
10245         int entries_per_desc, reg_num, separator_num, desc_index, index, i;
10246         struct hclge_desc *desc = desc_src;
10247         u32 *reg = data;
10248
10249         entries_per_desc = ARRAY_SIZE(desc->data);
10250         reg_num = entries_per_desc * bd_num;
10251         separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
10252         for (i = 0; i < reg_num; i++) {
10253                 index = i % entries_per_desc;
10254                 desc_index = i / entries_per_desc;
10255                 *reg++ = le32_to_cpu(desc[desc_index].data[index]);
10256         }
10257         for (i = 0; i < separator_num; i++)
10258                 *reg++ = SEPARATOR_VALUE;
10259
10260         return reg_num + separator_num;
10261 }
10262
10263 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
10264 {
10265         u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
10266         int data_len_per_desc, data_len, bd_num, i;
10267         int bd_num_list[BD_LIST_MAX_NUM];
10268         int ret;
10269
10270         ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
10271         if (ret) {
10272                 dev_err(&hdev->pdev->dev,
10273                         "Get dfx reg bd num fail, status is %d.\n", ret);
10274                 return ret;
10275         }
10276
10277         data_len_per_desc = sizeof_field(struct hclge_desc, data);
10278         *len = 0;
10279         for (i = 0; i < dfx_reg_type_num; i++) {
10280                 bd_num = bd_num_list[i];
10281                 data_len = data_len_per_desc * bd_num;
10282                 *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
10283         }
10284
10285         return ret;
10286 }
10287
10288 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
10289 {
10290         u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
10291         int bd_num, bd_num_max, buf_len, i;
10292         int bd_num_list[BD_LIST_MAX_NUM];
10293         struct hclge_desc *desc_src;
10294         u32 *reg = data;
10295         int ret;
10296
10297         ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
10298         if (ret) {
10299                 dev_err(&hdev->pdev->dev,
10300                         "Get dfx reg bd num fail, status is %d.\n", ret);
10301                 return ret;
10302         }
10303
10304         bd_num_max = bd_num_list[0];
10305         for (i = 1; i < dfx_reg_type_num; i++)
10306                 bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
10307
10308         buf_len = sizeof(*desc_src) * bd_num_max;
10309         desc_src = kzalloc(buf_len, GFP_KERNEL);
10310         if (!desc_src)
10311                 return -ENOMEM;
10312
10313         for (i = 0; i < dfx_reg_type_num; i++) {
10314                 bd_num = bd_num_list[i];
10315                 ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
10316                                              hclge_dfx_reg_opcode_list[i]);
10317                 if (ret) {
10318                         dev_err(&hdev->pdev->dev,
10319                                 "Get dfx reg fail, status is %d.\n", ret);
10320                         break;
10321                 }
10322
10323                 reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
10324         }
10325
10326         kfree(desc_src);
10327         return ret;
10328 }
10329
10330 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
10331                               struct hnae3_knic_private_info *kinfo)
10332 {
10333 #define HCLGE_RING_REG_OFFSET           0x200
10334 #define HCLGE_RING_INT_REG_OFFSET       0x4
10335
10336         int i, j, reg_num, separator_num;
10337         int data_num_sum;
10338         u32 *reg = data;
10339
10340         /* fetching per-PF registers valus from PF PCIe register space */
10341         reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
10342         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10343         for (i = 0; i < reg_num; i++)
10344                 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
10345         for (i = 0; i < separator_num; i++)
10346                 *reg++ = SEPARATOR_VALUE;
10347         data_num_sum = reg_num + separator_num;
10348
10349         reg_num = ARRAY_SIZE(common_reg_addr_list);
10350         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10351         for (i = 0; i < reg_num; i++)
10352                 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
10353         for (i = 0; i < separator_num; i++)
10354                 *reg++ = SEPARATOR_VALUE;
10355         data_num_sum += reg_num + separator_num;
10356
10357         reg_num = ARRAY_SIZE(ring_reg_addr_list);
10358         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10359         for (j = 0; j < kinfo->num_tqps; j++) {
10360                 for (i = 0; i < reg_num; i++)
10361                         *reg++ = hclge_read_dev(&hdev->hw,
10362                                                 ring_reg_addr_list[i] +
10363                                                 HCLGE_RING_REG_OFFSET * j);
10364                 for (i = 0; i < separator_num; i++)
10365                         *reg++ = SEPARATOR_VALUE;
10366         }
10367         data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
10368
10369         reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
10370         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10371         for (j = 0; j < hdev->num_msi_used - 1; j++) {
10372                 for (i = 0; i < reg_num; i++)
10373                         *reg++ = hclge_read_dev(&hdev->hw,
10374                                                 tqp_intr_reg_addr_list[i] +
10375                                                 HCLGE_RING_INT_REG_OFFSET * j);
10376                 for (i = 0; i < separator_num; i++)
10377                         *reg++ = SEPARATOR_VALUE;
10378         }
10379         data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
10380
10381         return data_num_sum;
10382 }
10383
10384 static int hclge_get_regs_len(struct hnae3_handle *handle)
10385 {
10386         int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
10387         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
10388         struct hclge_vport *vport = hclge_get_vport(handle);
10389         struct hclge_dev *hdev = vport->back;
10390         int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
10391         int regs_lines_32_bit, regs_lines_64_bit;
10392         int ret;
10393
10394         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
10395         if (ret) {
10396                 dev_err(&hdev->pdev->dev,
10397                         "Get register number failed, ret = %d.\n", ret);
10398                 return ret;
10399         }
10400
10401         ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
10402         if (ret) {
10403                 dev_err(&hdev->pdev->dev,
10404                         "Get dfx reg len failed, ret = %d.\n", ret);
10405                 return ret;
10406         }
10407
10408         cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
10409                 REG_SEPARATOR_LINE;
10410         common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
10411                 REG_SEPARATOR_LINE;
10412         ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
10413                 REG_SEPARATOR_LINE;
10414         tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
10415                 REG_SEPARATOR_LINE;
10416         regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
10417                 REG_SEPARATOR_LINE;
10418         regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
10419                 REG_SEPARATOR_LINE;
10420
10421         return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
10422                 tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
10423                 regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
10424 }
10425
10426 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
10427                            void *data)
10428 {
10429         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
10430         struct hclge_vport *vport = hclge_get_vport(handle);
10431         struct hclge_dev *hdev = vport->back;
10432         u32 regs_num_32_bit, regs_num_64_bit;
10433         int i, reg_num, separator_num, ret;
10434         u32 *reg = data;
10435
10436         *version = hdev->fw_version;
10437
10438         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
10439         if (ret) {
10440                 dev_err(&hdev->pdev->dev,
10441                         "Get register number failed, ret = %d.\n", ret);
10442                 return;
10443         }
10444
10445         reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
10446
10447         ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
10448         if (ret) {
10449                 dev_err(&hdev->pdev->dev,
10450                         "Get 32 bit register failed, ret = %d.\n", ret);
10451                 return;
10452         }
10453         reg_num = regs_num_32_bit;
10454         reg += reg_num;
10455         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10456         for (i = 0; i < separator_num; i++)
10457                 *reg++ = SEPARATOR_VALUE;
10458
10459         ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
10460         if (ret) {
10461                 dev_err(&hdev->pdev->dev,
10462                         "Get 64 bit register failed, ret = %d.\n", ret);
10463                 return;
10464         }
10465         reg_num = regs_num_64_bit * 2;
10466         reg += reg_num;
10467         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10468         for (i = 0; i < separator_num; i++)
10469                 *reg++ = SEPARATOR_VALUE;
10470
10471         ret = hclge_get_dfx_reg(hdev, reg);
10472         if (ret)
10473                 dev_err(&hdev->pdev->dev,
10474                         "Get dfx register failed, ret = %d.\n", ret);
10475 }
10476
10477 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
10478 {
10479         struct hclge_set_led_state_cmd *req;
10480         struct hclge_desc desc;
10481         int ret;
10482
10483         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
10484
10485         req = (struct hclge_set_led_state_cmd *)desc.data;
10486         hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
10487                         HCLGE_LED_LOCATE_STATE_S, locate_led_status);
10488
10489         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10490         if (ret)
10491                 dev_err(&hdev->pdev->dev,
10492                         "Send set led state cmd error, ret =%d\n", ret);
10493
10494         return ret;
10495 }
10496
10497 enum hclge_led_status {
10498         HCLGE_LED_OFF,
10499         HCLGE_LED_ON,
10500         HCLGE_LED_NO_CHANGE = 0xFF,
10501 };
10502
10503 static int hclge_set_led_id(struct hnae3_handle *handle,
10504                             enum ethtool_phys_id_state status)
10505 {
10506         struct hclge_vport *vport = hclge_get_vport(handle);
10507         struct hclge_dev *hdev = vport->back;
10508
10509         switch (status) {
10510         case ETHTOOL_ID_ACTIVE:
10511                 return hclge_set_led_status(hdev, HCLGE_LED_ON);
10512         case ETHTOOL_ID_INACTIVE:
10513                 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
10514         default:
10515                 return -EINVAL;
10516         }
10517 }
10518
10519 static void hclge_get_link_mode(struct hnae3_handle *handle,
10520                                 unsigned long *supported,
10521                                 unsigned long *advertising)
10522 {
10523         unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
10524         struct hclge_vport *vport = hclge_get_vport(handle);
10525         struct hclge_dev *hdev = vport->back;
10526         unsigned int idx = 0;
10527
10528         for (; idx < size; idx++) {
10529                 supported[idx] = hdev->hw.mac.supported[idx];
10530                 advertising[idx] = hdev->hw.mac.advertising[idx];
10531         }
10532 }
10533
10534 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
10535 {
10536         struct hclge_vport *vport = hclge_get_vport(handle);
10537         struct hclge_dev *hdev = vport->back;
10538
10539         return hclge_config_gro(hdev, enable);
10540 }
10541
10542 static const struct hnae3_ae_ops hclge_ops = {
10543         .init_ae_dev = hclge_init_ae_dev,
10544         .uninit_ae_dev = hclge_uninit_ae_dev,
10545         .flr_prepare = hclge_flr_prepare,
10546         .flr_done = hclge_flr_done,
10547         .init_client_instance = hclge_init_client_instance,
10548         .uninit_client_instance = hclge_uninit_client_instance,
10549         .map_ring_to_vector = hclge_map_ring_to_vector,
10550         .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
10551         .get_vector = hclge_get_vector,
10552         .put_vector = hclge_put_vector,
10553         .set_promisc_mode = hclge_set_promisc_mode,
10554         .set_loopback = hclge_set_loopback,
10555         .start = hclge_ae_start,
10556         .stop = hclge_ae_stop,
10557         .client_start = hclge_client_start,
10558         .client_stop = hclge_client_stop,
10559         .get_status = hclge_get_status,
10560         .get_ksettings_an_result = hclge_get_ksettings_an_result,
10561         .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
10562         .get_media_type = hclge_get_media_type,
10563         .check_port_speed = hclge_check_port_speed,
10564         .get_fec = hclge_get_fec,
10565         .set_fec = hclge_set_fec,
10566         .get_rss_key_size = hclge_get_rss_key_size,
10567         .get_rss_indir_size = hclge_get_rss_indir_size,
10568         .get_rss = hclge_get_rss,
10569         .set_rss = hclge_set_rss,
10570         .set_rss_tuple = hclge_set_rss_tuple,
10571         .get_rss_tuple = hclge_get_rss_tuple,
10572         .get_tc_size = hclge_get_tc_size,
10573         .get_mac_addr = hclge_get_mac_addr,
10574         .set_mac_addr = hclge_set_mac_addr,
10575         .do_ioctl = hclge_do_ioctl,
10576         .add_uc_addr = hclge_add_uc_addr,
10577         .rm_uc_addr = hclge_rm_uc_addr,
10578         .add_mc_addr = hclge_add_mc_addr,
10579         .rm_mc_addr = hclge_rm_mc_addr,
10580         .set_autoneg = hclge_set_autoneg,
10581         .get_autoneg = hclge_get_autoneg,
10582         .restart_autoneg = hclge_restart_autoneg,
10583         .halt_autoneg = hclge_halt_autoneg,
10584         .get_pauseparam = hclge_get_pauseparam,
10585         .set_pauseparam = hclge_set_pauseparam,
10586         .set_mtu = hclge_set_mtu,
10587         .reset_queue = hclge_reset_tqp,
10588         .get_stats = hclge_get_stats,
10589         .get_mac_stats = hclge_get_mac_stat,
10590         .update_stats = hclge_update_stats,
10591         .get_strings = hclge_get_strings,
10592         .get_sset_count = hclge_get_sset_count,
10593         .get_fw_version = hclge_get_fw_version,
10594         .get_mdix_mode = hclge_get_mdix_mode,
10595         .enable_vlan_filter = hclge_enable_vlan_filter,
10596         .set_vlan_filter = hclge_set_vlan_filter,
10597         .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
10598         .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
10599         .reset_event = hclge_reset_event,
10600         .get_reset_level = hclge_get_reset_level,
10601         .set_default_reset_request = hclge_set_def_reset_request,
10602         .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
10603         .set_channels = hclge_set_channels,
10604         .get_channels = hclge_get_channels,
10605         .get_regs_len = hclge_get_regs_len,
10606         .get_regs = hclge_get_regs,
10607         .set_led_id = hclge_set_led_id,
10608         .get_link_mode = hclge_get_link_mode,
10609         .add_fd_entry = hclge_add_fd_entry,
10610         .del_fd_entry = hclge_del_fd_entry,
10611         .del_all_fd_entries = hclge_del_all_fd_entries,
10612         .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
10613         .get_fd_rule_info = hclge_get_fd_rule_info,
10614         .get_fd_all_rules = hclge_get_all_rules,
10615         .restore_fd_rules = hclge_restore_fd_entries,
10616         .enable_fd = hclge_enable_fd,
10617         .add_arfs_entry = hclge_add_fd_entry_by_arfs,
10618         .dbg_run_cmd = hclge_dbg_run_cmd,
10619         .handle_hw_ras_error = hclge_handle_hw_ras_error,
10620         .get_hw_reset_stat = hclge_get_hw_reset_stat,
10621         .ae_dev_resetting = hclge_ae_dev_resetting,
10622         .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
10623         .set_gro_en = hclge_gro_en,
10624         .get_global_queue_id = hclge_covert_handle_qid_global,
10625         .set_timer_task = hclge_set_timer_task,
10626         .mac_connect_phy = hclge_mac_connect_phy,
10627         .mac_disconnect_phy = hclge_mac_disconnect_phy,
10628         .restore_vlan_table = hclge_restore_vlan_table,
10629         .get_vf_config = hclge_get_vf_config,
10630         .set_vf_link_state = hclge_set_vf_link_state,
10631         .set_vf_spoofchk = hclge_set_vf_spoofchk,
10632         .set_vf_trust = hclge_set_vf_trust,
10633         .set_vf_rate = hclge_set_vf_rate,
10634         .set_vf_mac = hclge_set_vf_mac,
10635 };
10636
10637 static struct hnae3_ae_algo ae_algo = {
10638         .ops = &hclge_ops,
10639         .pdev_id_table = ae_algo_pci_tbl,
10640 };
10641
10642 static int hclge_init(void)
10643 {
10644         pr_info("%s is initializing\n", HCLGE_NAME);
10645
10646         hclge_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, HCLGE_NAME);
10647         if (!hclge_wq) {
10648                 pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
10649                 return -ENOMEM;
10650         }
10651
10652         hnae3_register_ae_algo(&ae_algo);
10653
10654         return 0;
10655 }
10656
10657 static void hclge_exit(void)
10658 {
10659         hnae3_unregister_ae_algo(&ae_algo);
10660         destroy_workqueue(hclge_wq);
10661 }
10662 module_init(hclge_init);
10663 module_exit(hclge_exit);
10664
10665 MODULE_LICENSE("GPL");
10666 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
10667 MODULE_DESCRIPTION("HCLGE Driver");
10668 MODULE_VERSION(HCLGE_MOD_VERSION);