OSDN Git Service

Merge tag 'asoc-v5.6-3' of https://git.kernel.org/pub/scm/linux/kernel/git/broonie...
[tomoyo/tomoyo-test1.git] / drivers / net / ethernet / hisilicon / hns3 / hns3pf / hclge_main.c
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
22 #include "hclge_tm.h"
23 #include "hclge_err.h"
24 #include "hnae3.h"
25
26 #define HCLGE_NAME                      "hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
29
30 #define HCLGE_BUF_SIZE_UNIT     256U
31 #define HCLGE_BUF_MUL_BY        2
32 #define HCLGE_BUF_DIV_BY        2
33 #define NEED_RESERVE_TC_NUM     2
34 #define BUF_MAX_PERCENT         100
35 #define BUF_RESERVE_PERCENT     90
36
37 #define HCLGE_RESET_MAX_FAIL_CNT        5
38 #define HCLGE_RESET_SYNC_TIME           100
39 #define HCLGE_PF_RESET_SYNC_TIME        20
40 #define HCLGE_PF_RESET_SYNC_CNT         1500
41
42 /* Get DFX BD number offset */
43 #define HCLGE_DFX_BIOS_BD_OFFSET        1
44 #define HCLGE_DFX_SSU_0_BD_OFFSET       2
45 #define HCLGE_DFX_SSU_1_BD_OFFSET       3
46 #define HCLGE_DFX_IGU_BD_OFFSET         4
47 #define HCLGE_DFX_RPU_0_BD_OFFSET       5
48 #define HCLGE_DFX_RPU_1_BD_OFFSET       6
49 #define HCLGE_DFX_NCSI_BD_OFFSET        7
50 #define HCLGE_DFX_RTC_BD_OFFSET         8
51 #define HCLGE_DFX_PPP_BD_OFFSET         9
52 #define HCLGE_DFX_RCB_BD_OFFSET         10
53 #define HCLGE_DFX_TQP_BD_OFFSET         11
54 #define HCLGE_DFX_SSU_2_BD_OFFSET       12
55
56 #define HCLGE_LINK_STATUS_MS    10
57
58 #define HCLGE_VF_VPORT_START_NUM        1
59
60 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
61 static int hclge_init_vlan_config(struct hclge_dev *hdev);
62 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
63 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
64 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
65 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
66                                u16 *allocated_size, bool is_alloc);
67 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
68 static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
69 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
70                                                    unsigned long *addr);
71 static int hclge_set_default_loopback(struct hclge_dev *hdev);
72
73 static struct hnae3_ae_algo ae_algo;
74
75 static const struct pci_device_id ae_algo_pci_tbl[] = {
76         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
77         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
78         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
79         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
80         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
81         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
82         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
83         /* required last entry */
84         {0, }
85 };
86
87 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
88
89 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
90                                          HCLGE_CMDQ_TX_ADDR_H_REG,
91                                          HCLGE_CMDQ_TX_DEPTH_REG,
92                                          HCLGE_CMDQ_TX_TAIL_REG,
93                                          HCLGE_CMDQ_TX_HEAD_REG,
94                                          HCLGE_CMDQ_RX_ADDR_L_REG,
95                                          HCLGE_CMDQ_RX_ADDR_H_REG,
96                                          HCLGE_CMDQ_RX_DEPTH_REG,
97                                          HCLGE_CMDQ_RX_TAIL_REG,
98                                          HCLGE_CMDQ_RX_HEAD_REG,
99                                          HCLGE_VECTOR0_CMDQ_SRC_REG,
100                                          HCLGE_CMDQ_INTR_STS_REG,
101                                          HCLGE_CMDQ_INTR_EN_REG,
102                                          HCLGE_CMDQ_INTR_GEN_REG};
103
104 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
105                                            HCLGE_VECTOR0_OTER_EN_REG,
106                                            HCLGE_MISC_RESET_STS_REG,
107                                            HCLGE_MISC_VECTOR_INT_STS,
108                                            HCLGE_GLOBAL_RESET_REG,
109                                            HCLGE_FUN_RST_ING,
110                                            HCLGE_GRO_EN_REG};
111
112 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
113                                          HCLGE_RING_RX_ADDR_H_REG,
114                                          HCLGE_RING_RX_BD_NUM_REG,
115                                          HCLGE_RING_RX_BD_LENGTH_REG,
116                                          HCLGE_RING_RX_MERGE_EN_REG,
117                                          HCLGE_RING_RX_TAIL_REG,
118                                          HCLGE_RING_RX_HEAD_REG,
119                                          HCLGE_RING_RX_FBD_NUM_REG,
120                                          HCLGE_RING_RX_OFFSET_REG,
121                                          HCLGE_RING_RX_FBD_OFFSET_REG,
122                                          HCLGE_RING_RX_STASH_REG,
123                                          HCLGE_RING_RX_BD_ERR_REG,
124                                          HCLGE_RING_TX_ADDR_L_REG,
125                                          HCLGE_RING_TX_ADDR_H_REG,
126                                          HCLGE_RING_TX_BD_NUM_REG,
127                                          HCLGE_RING_TX_PRIORITY_REG,
128                                          HCLGE_RING_TX_TC_REG,
129                                          HCLGE_RING_TX_MERGE_EN_REG,
130                                          HCLGE_RING_TX_TAIL_REG,
131                                          HCLGE_RING_TX_HEAD_REG,
132                                          HCLGE_RING_TX_FBD_NUM_REG,
133                                          HCLGE_RING_TX_OFFSET_REG,
134                                          HCLGE_RING_TX_EBD_NUM_REG,
135                                          HCLGE_RING_TX_EBD_OFFSET_REG,
136                                          HCLGE_RING_TX_BD_ERR_REG,
137                                          HCLGE_RING_EN_REG};
138
139 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
140                                              HCLGE_TQP_INTR_GL0_REG,
141                                              HCLGE_TQP_INTR_GL1_REG,
142                                              HCLGE_TQP_INTR_GL2_REG,
143                                              HCLGE_TQP_INTR_RL_REG};
144
145 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
146         "App    Loopback test",
147         "Serdes serial Loopback test",
148         "Serdes parallel Loopback test",
149         "Phy    Loopback test"
150 };
151
152 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
153         {"mac_tx_mac_pause_num",
154                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
155         {"mac_rx_mac_pause_num",
156                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
157         {"mac_tx_control_pkt_num",
158                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
159         {"mac_rx_control_pkt_num",
160                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
161         {"mac_tx_pfc_pkt_num",
162                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
163         {"mac_tx_pfc_pri0_pkt_num",
164                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
165         {"mac_tx_pfc_pri1_pkt_num",
166                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
167         {"mac_tx_pfc_pri2_pkt_num",
168                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
169         {"mac_tx_pfc_pri3_pkt_num",
170                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
171         {"mac_tx_pfc_pri4_pkt_num",
172                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
173         {"mac_tx_pfc_pri5_pkt_num",
174                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
175         {"mac_tx_pfc_pri6_pkt_num",
176                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
177         {"mac_tx_pfc_pri7_pkt_num",
178                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
179         {"mac_rx_pfc_pkt_num",
180                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
181         {"mac_rx_pfc_pri0_pkt_num",
182                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
183         {"mac_rx_pfc_pri1_pkt_num",
184                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
185         {"mac_rx_pfc_pri2_pkt_num",
186                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
187         {"mac_rx_pfc_pri3_pkt_num",
188                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
189         {"mac_rx_pfc_pri4_pkt_num",
190                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
191         {"mac_rx_pfc_pri5_pkt_num",
192                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
193         {"mac_rx_pfc_pri6_pkt_num",
194                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
195         {"mac_rx_pfc_pri7_pkt_num",
196                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
197         {"mac_tx_total_pkt_num",
198                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
199         {"mac_tx_total_oct_num",
200                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
201         {"mac_tx_good_pkt_num",
202                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
203         {"mac_tx_bad_pkt_num",
204                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
205         {"mac_tx_good_oct_num",
206                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
207         {"mac_tx_bad_oct_num",
208                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
209         {"mac_tx_uni_pkt_num",
210                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
211         {"mac_tx_multi_pkt_num",
212                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
213         {"mac_tx_broad_pkt_num",
214                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
215         {"mac_tx_undersize_pkt_num",
216                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
217         {"mac_tx_oversize_pkt_num",
218                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
219         {"mac_tx_64_oct_pkt_num",
220                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
221         {"mac_tx_65_127_oct_pkt_num",
222                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
223         {"mac_tx_128_255_oct_pkt_num",
224                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
225         {"mac_tx_256_511_oct_pkt_num",
226                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
227         {"mac_tx_512_1023_oct_pkt_num",
228                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
229         {"mac_tx_1024_1518_oct_pkt_num",
230                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
231         {"mac_tx_1519_2047_oct_pkt_num",
232                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
233         {"mac_tx_2048_4095_oct_pkt_num",
234                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
235         {"mac_tx_4096_8191_oct_pkt_num",
236                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
237         {"mac_tx_8192_9216_oct_pkt_num",
238                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
239         {"mac_tx_9217_12287_oct_pkt_num",
240                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
241         {"mac_tx_12288_16383_oct_pkt_num",
242                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
243         {"mac_tx_1519_max_good_pkt_num",
244                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
245         {"mac_tx_1519_max_bad_pkt_num",
246                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
247         {"mac_rx_total_pkt_num",
248                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
249         {"mac_rx_total_oct_num",
250                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
251         {"mac_rx_good_pkt_num",
252                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
253         {"mac_rx_bad_pkt_num",
254                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
255         {"mac_rx_good_oct_num",
256                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
257         {"mac_rx_bad_oct_num",
258                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
259         {"mac_rx_uni_pkt_num",
260                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
261         {"mac_rx_multi_pkt_num",
262                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
263         {"mac_rx_broad_pkt_num",
264                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
265         {"mac_rx_undersize_pkt_num",
266                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
267         {"mac_rx_oversize_pkt_num",
268                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
269         {"mac_rx_64_oct_pkt_num",
270                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
271         {"mac_rx_65_127_oct_pkt_num",
272                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
273         {"mac_rx_128_255_oct_pkt_num",
274                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
275         {"mac_rx_256_511_oct_pkt_num",
276                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
277         {"mac_rx_512_1023_oct_pkt_num",
278                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
279         {"mac_rx_1024_1518_oct_pkt_num",
280                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
281         {"mac_rx_1519_2047_oct_pkt_num",
282                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
283         {"mac_rx_2048_4095_oct_pkt_num",
284                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
285         {"mac_rx_4096_8191_oct_pkt_num",
286                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
287         {"mac_rx_8192_9216_oct_pkt_num",
288                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
289         {"mac_rx_9217_12287_oct_pkt_num",
290                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
291         {"mac_rx_12288_16383_oct_pkt_num",
292                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
293         {"mac_rx_1519_max_good_pkt_num",
294                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
295         {"mac_rx_1519_max_bad_pkt_num",
296                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
297
298         {"mac_tx_fragment_pkt_num",
299                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
300         {"mac_tx_undermin_pkt_num",
301                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
302         {"mac_tx_jabber_pkt_num",
303                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
304         {"mac_tx_err_all_pkt_num",
305                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
306         {"mac_tx_from_app_good_pkt_num",
307                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
308         {"mac_tx_from_app_bad_pkt_num",
309                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
310         {"mac_rx_fragment_pkt_num",
311                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
312         {"mac_rx_undermin_pkt_num",
313                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
314         {"mac_rx_jabber_pkt_num",
315                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
316         {"mac_rx_fcs_err_pkt_num",
317                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
318         {"mac_rx_send_app_good_pkt_num",
319                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
320         {"mac_rx_send_app_bad_pkt_num",
321                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
322 };
323
324 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
325         {
326                 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
327                 .ethter_type = cpu_to_le16(ETH_P_LLDP),
328                 .mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
329                 .i_port_bitmap = 0x1,
330         },
331 };
332
333 static const u8 hclge_hash_key[] = {
334         0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
335         0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
336         0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
337         0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
338         0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
339 };
340
341 static const u32 hclge_dfx_bd_offset_list[] = {
342         HCLGE_DFX_BIOS_BD_OFFSET,
343         HCLGE_DFX_SSU_0_BD_OFFSET,
344         HCLGE_DFX_SSU_1_BD_OFFSET,
345         HCLGE_DFX_IGU_BD_OFFSET,
346         HCLGE_DFX_RPU_0_BD_OFFSET,
347         HCLGE_DFX_RPU_1_BD_OFFSET,
348         HCLGE_DFX_NCSI_BD_OFFSET,
349         HCLGE_DFX_RTC_BD_OFFSET,
350         HCLGE_DFX_PPP_BD_OFFSET,
351         HCLGE_DFX_RCB_BD_OFFSET,
352         HCLGE_DFX_TQP_BD_OFFSET,
353         HCLGE_DFX_SSU_2_BD_OFFSET
354 };
355
356 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
357         HCLGE_OPC_DFX_BIOS_COMMON_REG,
358         HCLGE_OPC_DFX_SSU_REG_0,
359         HCLGE_OPC_DFX_SSU_REG_1,
360         HCLGE_OPC_DFX_IGU_EGU_REG,
361         HCLGE_OPC_DFX_RPU_REG_0,
362         HCLGE_OPC_DFX_RPU_REG_1,
363         HCLGE_OPC_DFX_NCSI_REG,
364         HCLGE_OPC_DFX_RTC_REG,
365         HCLGE_OPC_DFX_PPP_REG,
366         HCLGE_OPC_DFX_RCB_REG,
367         HCLGE_OPC_DFX_TQP_REG,
368         HCLGE_OPC_DFX_SSU_REG_2
369 };
370
371 static const struct key_info meta_data_key_info[] = {
372         { PACKET_TYPE_ID, 6},
373         { IP_FRAGEMENT, 1},
374         { ROCE_TYPE, 1},
375         { NEXT_KEY, 5},
376         { VLAN_NUMBER, 2},
377         { SRC_VPORT, 12},
378         { DST_VPORT, 12},
379         { TUNNEL_PACKET, 1},
380 };
381
382 static const struct key_info tuple_key_info[] = {
383         { OUTER_DST_MAC, 48},
384         { OUTER_SRC_MAC, 48},
385         { OUTER_VLAN_TAG_FST, 16},
386         { OUTER_VLAN_TAG_SEC, 16},
387         { OUTER_ETH_TYPE, 16},
388         { OUTER_L2_RSV, 16},
389         { OUTER_IP_TOS, 8},
390         { OUTER_IP_PROTO, 8},
391         { OUTER_SRC_IP, 32},
392         { OUTER_DST_IP, 32},
393         { OUTER_L3_RSV, 16},
394         { OUTER_SRC_PORT, 16},
395         { OUTER_DST_PORT, 16},
396         { OUTER_L4_RSV, 32},
397         { OUTER_TUN_VNI, 24},
398         { OUTER_TUN_FLOW_ID, 8},
399         { INNER_DST_MAC, 48},
400         { INNER_SRC_MAC, 48},
401         { INNER_VLAN_TAG_FST, 16},
402         { INNER_VLAN_TAG_SEC, 16},
403         { INNER_ETH_TYPE, 16},
404         { INNER_L2_RSV, 16},
405         { INNER_IP_TOS, 8},
406         { INNER_IP_PROTO, 8},
407         { INNER_SRC_IP, 32},
408         { INNER_DST_IP, 32},
409         { INNER_L3_RSV, 16},
410         { INNER_SRC_PORT, 16},
411         { INNER_DST_PORT, 16},
412         { INNER_L4_RSV, 32},
413 };
414
415 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
416 {
417 #define HCLGE_MAC_CMD_NUM 21
418
419         u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
420         struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
421         __le64 *desc_data;
422         int i, k, n;
423         int ret;
424
425         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
426         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
427         if (ret) {
428                 dev_err(&hdev->pdev->dev,
429                         "Get MAC pkt stats fail, status = %d.\n", ret);
430
431                 return ret;
432         }
433
434         for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
435                 /* for special opcode 0032, only the first desc has the head */
436                 if (unlikely(i == 0)) {
437                         desc_data = (__le64 *)(&desc[i].data[0]);
438                         n = HCLGE_RD_FIRST_STATS_NUM;
439                 } else {
440                         desc_data = (__le64 *)(&desc[i]);
441                         n = HCLGE_RD_OTHER_STATS_NUM;
442                 }
443
444                 for (k = 0; k < n; k++) {
445                         *data += le64_to_cpu(*desc_data);
446                         data++;
447                         desc_data++;
448                 }
449         }
450
451         return 0;
452 }
453
454 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
455 {
456         u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
457         struct hclge_desc *desc;
458         __le64 *desc_data;
459         u16 i, k, n;
460         int ret;
461
462         /* This may be called inside atomic sections,
463          * so GFP_ATOMIC is more suitalbe here
464          */
465         desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
466         if (!desc)
467                 return -ENOMEM;
468
469         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
470         ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
471         if (ret) {
472                 kfree(desc);
473                 return ret;
474         }
475
476         for (i = 0; i < desc_num; i++) {
477                 /* for special opcode 0034, only the first desc has the head */
478                 if (i == 0) {
479                         desc_data = (__le64 *)(&desc[i].data[0]);
480                         n = HCLGE_RD_FIRST_STATS_NUM;
481                 } else {
482                         desc_data = (__le64 *)(&desc[i]);
483                         n = HCLGE_RD_OTHER_STATS_NUM;
484                 }
485
486                 for (k = 0; k < n; k++) {
487                         *data += le64_to_cpu(*desc_data);
488                         data++;
489                         desc_data++;
490                 }
491         }
492
493         kfree(desc);
494
495         return 0;
496 }
497
498 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
499 {
500         struct hclge_desc desc;
501         __le32 *desc_data;
502         u32 reg_num;
503         int ret;
504
505         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
506         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
507         if (ret)
508                 return ret;
509
510         desc_data = (__le32 *)(&desc.data[0]);
511         reg_num = le32_to_cpu(*desc_data);
512
513         *desc_num = 1 + ((reg_num - 3) >> 2) +
514                     (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
515
516         return 0;
517 }
518
519 static int hclge_mac_update_stats(struct hclge_dev *hdev)
520 {
521         u32 desc_num;
522         int ret;
523
524         ret = hclge_mac_query_reg_num(hdev, &desc_num);
525
526         /* The firmware supports the new statistics acquisition method */
527         if (!ret)
528                 ret = hclge_mac_update_stats_complete(hdev, desc_num);
529         else if (ret == -EOPNOTSUPP)
530                 ret = hclge_mac_update_stats_defective(hdev);
531         else
532                 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
533
534         return ret;
535 }
536
537 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
538 {
539         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
540         struct hclge_vport *vport = hclge_get_vport(handle);
541         struct hclge_dev *hdev = vport->back;
542         struct hnae3_queue *queue;
543         struct hclge_desc desc[1];
544         struct hclge_tqp *tqp;
545         int ret, i;
546
547         for (i = 0; i < kinfo->num_tqps; i++) {
548                 queue = handle->kinfo.tqp[i];
549                 tqp = container_of(queue, struct hclge_tqp, q);
550                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
551                 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATUS,
552                                            true);
553
554                 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
555                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
556                 if (ret) {
557                         dev_err(&hdev->pdev->dev,
558                                 "Query tqp stat fail, status = %d,queue = %d\n",
559                                 ret, i);
560                         return ret;
561                 }
562                 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
563                         le32_to_cpu(desc[0].data[1]);
564         }
565
566         for (i = 0; i < kinfo->num_tqps; i++) {
567                 queue = handle->kinfo.tqp[i];
568                 tqp = container_of(queue, struct hclge_tqp, q);
569                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
570                 hclge_cmd_setup_basic_desc(&desc[0],
571                                            HCLGE_OPC_QUERY_TX_STATUS,
572                                            true);
573
574                 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
575                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
576                 if (ret) {
577                         dev_err(&hdev->pdev->dev,
578                                 "Query tqp stat fail, status = %d,queue = %d\n",
579                                 ret, i);
580                         return ret;
581                 }
582                 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
583                         le32_to_cpu(desc[0].data[1]);
584         }
585
586         return 0;
587 }
588
589 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
590 {
591         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
592         struct hclge_tqp *tqp;
593         u64 *buff = data;
594         int i;
595
596         for (i = 0; i < kinfo->num_tqps; i++) {
597                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
598                 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
599         }
600
601         for (i = 0; i < kinfo->num_tqps; i++) {
602                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
603                 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
604         }
605
606         return buff;
607 }
608
609 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
610 {
611         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
612
613         /* each tqp has TX & RX two queues */
614         return kinfo->num_tqps * (2);
615 }
616
617 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
618 {
619         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
620         u8 *buff = data;
621         int i = 0;
622
623         for (i = 0; i < kinfo->num_tqps; i++) {
624                 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
625                         struct hclge_tqp, q);
626                 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
627                          tqp->index);
628                 buff = buff + ETH_GSTRING_LEN;
629         }
630
631         for (i = 0; i < kinfo->num_tqps; i++) {
632                 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
633                         struct hclge_tqp, q);
634                 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
635                          tqp->index);
636                 buff = buff + ETH_GSTRING_LEN;
637         }
638
639         return buff;
640 }
641
642 static u64 *hclge_comm_get_stats(const void *comm_stats,
643                                  const struct hclge_comm_stats_str strs[],
644                                  int size, u64 *data)
645 {
646         u64 *buf = data;
647         u32 i;
648
649         for (i = 0; i < size; i++)
650                 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
651
652         return buf + size;
653 }
654
655 static u8 *hclge_comm_get_strings(u32 stringset,
656                                   const struct hclge_comm_stats_str strs[],
657                                   int size, u8 *data)
658 {
659         char *buff = (char *)data;
660         u32 i;
661
662         if (stringset != ETH_SS_STATS)
663                 return buff;
664
665         for (i = 0; i < size; i++) {
666                 snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
667                 buff = buff + ETH_GSTRING_LEN;
668         }
669
670         return (u8 *)buff;
671 }
672
673 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
674 {
675         struct hnae3_handle *handle;
676         int status;
677
678         handle = &hdev->vport[0].nic;
679         if (handle->client) {
680                 status = hclge_tqps_update_stats(handle);
681                 if (status) {
682                         dev_err(&hdev->pdev->dev,
683                                 "Update TQPS stats fail, status = %d.\n",
684                                 status);
685                 }
686         }
687
688         status = hclge_mac_update_stats(hdev);
689         if (status)
690                 dev_err(&hdev->pdev->dev,
691                         "Update MAC stats fail, status = %d.\n", status);
692 }
693
694 static void hclge_update_stats(struct hnae3_handle *handle,
695                                struct net_device_stats *net_stats)
696 {
697         struct hclge_vport *vport = hclge_get_vport(handle);
698         struct hclge_dev *hdev = vport->back;
699         int status;
700
701         if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
702                 return;
703
704         status = hclge_mac_update_stats(hdev);
705         if (status)
706                 dev_err(&hdev->pdev->dev,
707                         "Update MAC stats fail, status = %d.\n",
708                         status);
709
710         status = hclge_tqps_update_stats(handle);
711         if (status)
712                 dev_err(&hdev->pdev->dev,
713                         "Update TQPS stats fail, status = %d.\n",
714                         status);
715
716         clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
717 }
718
719 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
720 {
721 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
722                 HNAE3_SUPPORT_PHY_LOOPBACK |\
723                 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
724                 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
725
726         struct hclge_vport *vport = hclge_get_vport(handle);
727         struct hclge_dev *hdev = vport->back;
728         int count = 0;
729
730         /* Loopback test support rules:
731          * mac: only GE mode support
732          * serdes: all mac mode will support include GE/XGE/LGE/CGE
733          * phy: only support when phy device exist on board
734          */
735         if (stringset == ETH_SS_TEST) {
736                 /* clear loopback bit flags at first */
737                 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
738                 if (hdev->pdev->revision >= 0x21 ||
739                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
740                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
741                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
742                         count += 1;
743                         handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
744                 }
745
746                 count += 2;
747                 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
748                 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
749
750                 if (hdev->hw.mac.phydev) {
751                         count += 1;
752                         handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
753                 }
754
755         } else if (stringset == ETH_SS_STATS) {
756                 count = ARRAY_SIZE(g_mac_stats_string) +
757                         hclge_tqps_get_sset_count(handle, stringset);
758         }
759
760         return count;
761 }
762
763 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
764                               u8 *data)
765 {
766         u8 *p = (char *)data;
767         int size;
768
769         if (stringset == ETH_SS_STATS) {
770                 size = ARRAY_SIZE(g_mac_stats_string);
771                 p = hclge_comm_get_strings(stringset, g_mac_stats_string,
772                                            size, p);
773                 p = hclge_tqps_get_strings(handle, p);
774         } else if (stringset == ETH_SS_TEST) {
775                 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
776                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
777                                ETH_GSTRING_LEN);
778                         p += ETH_GSTRING_LEN;
779                 }
780                 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
781                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
782                                ETH_GSTRING_LEN);
783                         p += ETH_GSTRING_LEN;
784                 }
785                 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
786                         memcpy(p,
787                                hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
788                                ETH_GSTRING_LEN);
789                         p += ETH_GSTRING_LEN;
790                 }
791                 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
792                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
793                                ETH_GSTRING_LEN);
794                         p += ETH_GSTRING_LEN;
795                 }
796         }
797 }
798
799 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
800 {
801         struct hclge_vport *vport = hclge_get_vport(handle);
802         struct hclge_dev *hdev = vport->back;
803         u64 *p;
804
805         p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats, g_mac_stats_string,
806                                  ARRAY_SIZE(g_mac_stats_string), data);
807         p = hclge_tqps_get_stats(handle, p);
808 }
809
810 static void hclge_get_mac_stat(struct hnae3_handle *handle,
811                                struct hns3_mac_stats *mac_stats)
812 {
813         struct hclge_vport *vport = hclge_get_vport(handle);
814         struct hclge_dev *hdev = vport->back;
815
816         hclge_update_stats(handle, NULL);
817
818         mac_stats->tx_pause_cnt = hdev->hw_stats.mac_stats.mac_tx_mac_pause_num;
819         mac_stats->rx_pause_cnt = hdev->hw_stats.mac_stats.mac_rx_mac_pause_num;
820 }
821
822 static int hclge_parse_func_status(struct hclge_dev *hdev,
823                                    struct hclge_func_status_cmd *status)
824 {
825         if (!(status->pf_state & HCLGE_PF_STATE_DONE))
826                 return -EINVAL;
827
828         /* Set the pf to main pf */
829         if (status->pf_state & HCLGE_PF_STATE_MAIN)
830                 hdev->flag |= HCLGE_FLAG_MAIN;
831         else
832                 hdev->flag &= ~HCLGE_FLAG_MAIN;
833
834         return 0;
835 }
836
837 static int hclge_query_function_status(struct hclge_dev *hdev)
838 {
839 #define HCLGE_QUERY_MAX_CNT     5
840
841         struct hclge_func_status_cmd *req;
842         struct hclge_desc desc;
843         int timeout = 0;
844         int ret;
845
846         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
847         req = (struct hclge_func_status_cmd *)desc.data;
848
849         do {
850                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
851                 if (ret) {
852                         dev_err(&hdev->pdev->dev,
853                                 "query function status failed %d.\n", ret);
854                         return ret;
855                 }
856
857                 /* Check pf reset is done */
858                 if (req->pf_state)
859                         break;
860                 usleep_range(1000, 2000);
861         } while (timeout++ < HCLGE_QUERY_MAX_CNT);
862
863         ret = hclge_parse_func_status(hdev, req);
864
865         return ret;
866 }
867
868 static int hclge_query_pf_resource(struct hclge_dev *hdev)
869 {
870         struct hclge_pf_res_cmd *req;
871         struct hclge_desc desc;
872         int ret;
873
874         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
875         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
876         if (ret) {
877                 dev_err(&hdev->pdev->dev,
878                         "query pf resource failed %d.\n", ret);
879                 return ret;
880         }
881
882         req = (struct hclge_pf_res_cmd *)desc.data;
883         hdev->num_tqps = __le16_to_cpu(req->tqp_num);
884         hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
885
886         if (req->tx_buf_size)
887                 hdev->tx_buf_size =
888                         __le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
889         else
890                 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
891
892         hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
893
894         if (req->dv_buf_size)
895                 hdev->dv_buf_size =
896                         __le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
897         else
898                 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
899
900         hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
901
902         if (hnae3_dev_roce_supported(hdev)) {
903                 hdev->roce_base_msix_offset =
904                 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
905                                 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
906                 hdev->num_roce_msi =
907                 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
908                                 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
909
910                 /* nic's msix numbers is always equals to the roce's. */
911                 hdev->num_nic_msi = hdev->num_roce_msi;
912
913                 /* PF should have NIC vectors and Roce vectors,
914                  * NIC vectors are queued before Roce vectors.
915                  */
916                 hdev->num_msi = hdev->num_roce_msi +
917                                 hdev->roce_base_msix_offset;
918         } else {
919                 hdev->num_msi =
920                 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
921                                 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
922
923                 hdev->num_nic_msi = hdev->num_msi;
924         }
925
926         if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
927                 dev_err(&hdev->pdev->dev,
928                         "Just %u msi resources, not enough for pf(min:2).\n",
929                         hdev->num_nic_msi);
930                 return -EINVAL;
931         }
932
933         return 0;
934 }
935
936 static int hclge_parse_speed(int speed_cmd, int *speed)
937 {
938         switch (speed_cmd) {
939         case 6:
940                 *speed = HCLGE_MAC_SPEED_10M;
941                 break;
942         case 7:
943                 *speed = HCLGE_MAC_SPEED_100M;
944                 break;
945         case 0:
946                 *speed = HCLGE_MAC_SPEED_1G;
947                 break;
948         case 1:
949                 *speed = HCLGE_MAC_SPEED_10G;
950                 break;
951         case 2:
952                 *speed = HCLGE_MAC_SPEED_25G;
953                 break;
954         case 3:
955                 *speed = HCLGE_MAC_SPEED_40G;
956                 break;
957         case 4:
958                 *speed = HCLGE_MAC_SPEED_50G;
959                 break;
960         case 5:
961                 *speed = HCLGE_MAC_SPEED_100G;
962                 break;
963         default:
964                 return -EINVAL;
965         }
966
967         return 0;
968 }
969
970 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
971 {
972         struct hclge_vport *vport = hclge_get_vport(handle);
973         struct hclge_dev *hdev = vport->back;
974         u32 speed_ability = hdev->hw.mac.speed_ability;
975         u32 speed_bit = 0;
976
977         switch (speed) {
978         case HCLGE_MAC_SPEED_10M:
979                 speed_bit = HCLGE_SUPPORT_10M_BIT;
980                 break;
981         case HCLGE_MAC_SPEED_100M:
982                 speed_bit = HCLGE_SUPPORT_100M_BIT;
983                 break;
984         case HCLGE_MAC_SPEED_1G:
985                 speed_bit = HCLGE_SUPPORT_1G_BIT;
986                 break;
987         case HCLGE_MAC_SPEED_10G:
988                 speed_bit = HCLGE_SUPPORT_10G_BIT;
989                 break;
990         case HCLGE_MAC_SPEED_25G:
991                 speed_bit = HCLGE_SUPPORT_25G_BIT;
992                 break;
993         case HCLGE_MAC_SPEED_40G:
994                 speed_bit = HCLGE_SUPPORT_40G_BIT;
995                 break;
996         case HCLGE_MAC_SPEED_50G:
997                 speed_bit = HCLGE_SUPPORT_50G_BIT;
998                 break;
999         case HCLGE_MAC_SPEED_100G:
1000                 speed_bit = HCLGE_SUPPORT_100G_BIT;
1001                 break;
1002         default:
1003                 return -EINVAL;
1004         }
1005
1006         if (speed_bit & speed_ability)
1007                 return 0;
1008
1009         return -EINVAL;
1010 }
1011
1012 static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability)
1013 {
1014         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1015                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1016                                  mac->supported);
1017         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1018                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1019                                  mac->supported);
1020         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1021                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1022                                  mac->supported);
1023         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1024                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1025                                  mac->supported);
1026         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1027                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1028                                  mac->supported);
1029 }
1030
1031 static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability)
1032 {
1033         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1034                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1035                                  mac->supported);
1036         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1037                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1038                                  mac->supported);
1039         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1040                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1041                                  mac->supported);
1042         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1043                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1044                                  mac->supported);
1045         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1046                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1047                                  mac->supported);
1048 }
1049
1050 static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability)
1051 {
1052         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1053                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1054                                  mac->supported);
1055         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1056                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1057                                  mac->supported);
1058         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1059                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1060                                  mac->supported);
1061         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1062                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1063                                  mac->supported);
1064         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1065                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1066                                  mac->supported);
1067 }
1068
1069 static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability)
1070 {
1071         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1072                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1073                                  mac->supported);
1074         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1075                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1076                                  mac->supported);
1077         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1078                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1079                                  mac->supported);
1080         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1081                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1082                                  mac->supported);
1083         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1084                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1085                                  mac->supported);
1086         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1087                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1088                                  mac->supported);
1089 }
1090
1091 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1092 {
1093         linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1094         linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1095
1096         switch (mac->speed) {
1097         case HCLGE_MAC_SPEED_10G:
1098         case HCLGE_MAC_SPEED_40G:
1099                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1100                                  mac->supported);
1101                 mac->fec_ability =
1102                         BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1103                 break;
1104         case HCLGE_MAC_SPEED_25G:
1105         case HCLGE_MAC_SPEED_50G:
1106                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1107                                  mac->supported);
1108                 mac->fec_ability =
1109                         BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1110                         BIT(HNAE3_FEC_AUTO);
1111                 break;
1112         case HCLGE_MAC_SPEED_100G:
1113                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1114                 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1115                 break;
1116         default:
1117                 mac->fec_ability = 0;
1118                 break;
1119         }
1120 }
1121
1122 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1123                                         u8 speed_ability)
1124 {
1125         struct hclge_mac *mac = &hdev->hw.mac;
1126
1127         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1128                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1129                                  mac->supported);
1130
1131         hclge_convert_setting_sr(mac, speed_ability);
1132         hclge_convert_setting_lr(mac, speed_ability);
1133         hclge_convert_setting_cr(mac, speed_ability);
1134         if (hdev->pdev->revision >= 0x21)
1135                 hclge_convert_setting_fec(mac);
1136
1137         linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1138         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1139         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1140 }
1141
1142 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1143                                             u8 speed_ability)
1144 {
1145         struct hclge_mac *mac = &hdev->hw.mac;
1146
1147         hclge_convert_setting_kr(mac, speed_ability);
1148         if (hdev->pdev->revision >= 0x21)
1149                 hclge_convert_setting_fec(mac);
1150         linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1151         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1152         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1153 }
1154
1155 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1156                                          u8 speed_ability)
1157 {
1158         unsigned long *supported = hdev->hw.mac.supported;
1159
1160         /* default to support all speed for GE port */
1161         if (!speed_ability)
1162                 speed_ability = HCLGE_SUPPORT_GE;
1163
1164         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1165                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1166                                  supported);
1167
1168         if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1169                 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1170                                  supported);
1171                 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1172                                  supported);
1173         }
1174
1175         if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1176                 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1177                 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1178         }
1179
1180         linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1181         linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1182         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1183         linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1184 }
1185
1186 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
1187 {
1188         u8 media_type = hdev->hw.mac.media_type;
1189
1190         if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1191                 hclge_parse_fiber_link_mode(hdev, speed_ability);
1192         else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1193                 hclge_parse_copper_link_mode(hdev, speed_ability);
1194         else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1195                 hclge_parse_backplane_link_mode(hdev, speed_ability);
1196 }
1197
1198 static u32 hclge_get_max_speed(u8 speed_ability)
1199 {
1200         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1201                 return HCLGE_MAC_SPEED_100G;
1202
1203         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1204                 return HCLGE_MAC_SPEED_50G;
1205
1206         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1207                 return HCLGE_MAC_SPEED_40G;
1208
1209         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1210                 return HCLGE_MAC_SPEED_25G;
1211
1212         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1213                 return HCLGE_MAC_SPEED_10G;
1214
1215         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1216                 return HCLGE_MAC_SPEED_1G;
1217
1218         if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1219                 return HCLGE_MAC_SPEED_100M;
1220
1221         if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1222                 return HCLGE_MAC_SPEED_10M;
1223
1224         return HCLGE_MAC_SPEED_1G;
1225 }
1226
1227 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1228 {
1229         struct hclge_cfg_param_cmd *req;
1230         u64 mac_addr_tmp_high;
1231         u64 mac_addr_tmp;
1232         unsigned int i;
1233
1234         req = (struct hclge_cfg_param_cmd *)desc[0].data;
1235
1236         /* get the configuration */
1237         cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1238                                               HCLGE_CFG_VMDQ_M,
1239                                               HCLGE_CFG_VMDQ_S);
1240         cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1241                                       HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1242         cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1243                                             HCLGE_CFG_TQP_DESC_N_M,
1244                                             HCLGE_CFG_TQP_DESC_N_S);
1245
1246         cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1247                                         HCLGE_CFG_PHY_ADDR_M,
1248                                         HCLGE_CFG_PHY_ADDR_S);
1249         cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1250                                           HCLGE_CFG_MEDIA_TP_M,
1251                                           HCLGE_CFG_MEDIA_TP_S);
1252         cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1253                                           HCLGE_CFG_RX_BUF_LEN_M,
1254                                           HCLGE_CFG_RX_BUF_LEN_S);
1255         /* get mac_address */
1256         mac_addr_tmp = __le32_to_cpu(req->param[2]);
1257         mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1258                                             HCLGE_CFG_MAC_ADDR_H_M,
1259                                             HCLGE_CFG_MAC_ADDR_H_S);
1260
1261         mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1262
1263         cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1264                                              HCLGE_CFG_DEFAULT_SPEED_M,
1265                                              HCLGE_CFG_DEFAULT_SPEED_S);
1266         cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1267                                             HCLGE_CFG_RSS_SIZE_M,
1268                                             HCLGE_CFG_RSS_SIZE_S);
1269
1270         for (i = 0; i < ETH_ALEN; i++)
1271                 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1272
1273         req = (struct hclge_cfg_param_cmd *)desc[1].data;
1274         cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1275
1276         cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1277                                              HCLGE_CFG_SPEED_ABILITY_M,
1278                                              HCLGE_CFG_SPEED_ABILITY_S);
1279         cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1280                                          HCLGE_CFG_UMV_TBL_SPACE_M,
1281                                          HCLGE_CFG_UMV_TBL_SPACE_S);
1282         if (!cfg->umv_space)
1283                 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1284 }
1285
1286 /* hclge_get_cfg: query the static parameter from flash
1287  * @hdev: pointer to struct hclge_dev
1288  * @hcfg: the config structure to be getted
1289  */
1290 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1291 {
1292         struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1293         struct hclge_cfg_param_cmd *req;
1294         unsigned int i;
1295         int ret;
1296
1297         for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1298                 u32 offset = 0;
1299
1300                 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1301                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1302                                            true);
1303                 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1304                                 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1305                 /* Len should be united by 4 bytes when send to hardware */
1306                 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1307                                 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1308                 req->offset = cpu_to_le32(offset);
1309         }
1310
1311         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1312         if (ret) {
1313                 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1314                 return ret;
1315         }
1316
1317         hclge_parse_cfg(hcfg, desc);
1318
1319         return 0;
1320 }
1321
1322 static int hclge_get_cap(struct hclge_dev *hdev)
1323 {
1324         int ret;
1325
1326         ret = hclge_query_function_status(hdev);
1327         if (ret) {
1328                 dev_err(&hdev->pdev->dev,
1329                         "query function status error %d.\n", ret);
1330                 return ret;
1331         }
1332
1333         /* get pf resource */
1334         ret = hclge_query_pf_resource(hdev);
1335         if (ret)
1336                 dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
1337
1338         return ret;
1339 }
1340
1341 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1342 {
1343 #define HCLGE_MIN_TX_DESC       64
1344 #define HCLGE_MIN_RX_DESC       64
1345
1346         if (!is_kdump_kernel())
1347                 return;
1348
1349         dev_info(&hdev->pdev->dev,
1350                  "Running kdump kernel. Using minimal resources\n");
1351
1352         /* minimal queue pairs equals to the number of vports */
1353         hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1354         hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1355         hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1356 }
1357
1358 static int hclge_configure(struct hclge_dev *hdev)
1359 {
1360         struct hclge_cfg cfg;
1361         unsigned int i;
1362         int ret;
1363
1364         ret = hclge_get_cfg(hdev, &cfg);
1365         if (ret) {
1366                 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1367                 return ret;
1368         }
1369
1370         hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1371         hdev->base_tqp_pid = 0;
1372         hdev->rss_size_max = cfg.rss_size_max;
1373         hdev->rx_buf_len = cfg.rx_buf_len;
1374         ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1375         hdev->hw.mac.media_type = cfg.media_type;
1376         hdev->hw.mac.phy_addr = cfg.phy_addr;
1377         hdev->num_tx_desc = cfg.tqp_desc_num;
1378         hdev->num_rx_desc = cfg.tqp_desc_num;
1379         hdev->tm_info.num_pg = 1;
1380         hdev->tc_max = cfg.tc_num;
1381         hdev->tm_info.hw_pfc_map = 0;
1382         hdev->wanted_umv_size = cfg.umv_space;
1383
1384         if (hnae3_dev_fd_supported(hdev)) {
1385                 hdev->fd_en = true;
1386                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1387         }
1388
1389         ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1390         if (ret) {
1391                 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1392                 return ret;
1393         }
1394
1395         hclge_parse_link_mode(hdev, cfg.speed_ability);
1396
1397         hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1398
1399         if ((hdev->tc_max > HNAE3_MAX_TC) ||
1400             (hdev->tc_max < 1)) {
1401                 dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1402                          hdev->tc_max);
1403                 hdev->tc_max = 1;
1404         }
1405
1406         /* Dev does not support DCB */
1407         if (!hnae3_dev_dcb_supported(hdev)) {
1408                 hdev->tc_max = 1;
1409                 hdev->pfc_max = 0;
1410         } else {
1411                 hdev->pfc_max = hdev->tc_max;
1412         }
1413
1414         hdev->tm_info.num_tc = 1;
1415
1416         /* Currently not support uncontiuous tc */
1417         for (i = 0; i < hdev->tm_info.num_tc; i++)
1418                 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1419
1420         hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1421
1422         hclge_init_kdump_kernel_config(hdev);
1423
1424         /* Set the init affinity based on pci func number */
1425         i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
1426         i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
1427         cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
1428                         &hdev->affinity_mask);
1429
1430         return ret;
1431 }
1432
1433 static int hclge_config_tso(struct hclge_dev *hdev, unsigned int tso_mss_min,
1434                             unsigned int tso_mss_max)
1435 {
1436         struct hclge_cfg_tso_status_cmd *req;
1437         struct hclge_desc desc;
1438         u16 tso_mss;
1439
1440         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1441
1442         req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1443
1444         tso_mss = 0;
1445         hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1446                         HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1447         req->tso_mss_min = cpu_to_le16(tso_mss);
1448
1449         tso_mss = 0;
1450         hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1451                         HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1452         req->tso_mss_max = cpu_to_le16(tso_mss);
1453
1454         return hclge_cmd_send(&hdev->hw, &desc, 1);
1455 }
1456
1457 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1458 {
1459         struct hclge_cfg_gro_status_cmd *req;
1460         struct hclge_desc desc;
1461         int ret;
1462
1463         if (!hnae3_dev_gro_supported(hdev))
1464                 return 0;
1465
1466         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1467         req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1468
1469         req->gro_en = cpu_to_le16(en ? 1 : 0);
1470
1471         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1472         if (ret)
1473                 dev_err(&hdev->pdev->dev,
1474                         "GRO hardware config cmd failed, ret = %d\n", ret);
1475
1476         return ret;
1477 }
1478
1479 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1480 {
1481         struct hclge_tqp *tqp;
1482         int i;
1483
1484         hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1485                                   sizeof(struct hclge_tqp), GFP_KERNEL);
1486         if (!hdev->htqp)
1487                 return -ENOMEM;
1488
1489         tqp = hdev->htqp;
1490
1491         for (i = 0; i < hdev->num_tqps; i++) {
1492                 tqp->dev = &hdev->pdev->dev;
1493                 tqp->index = i;
1494
1495                 tqp->q.ae_algo = &ae_algo;
1496                 tqp->q.buf_size = hdev->rx_buf_len;
1497                 tqp->q.tx_desc_num = hdev->num_tx_desc;
1498                 tqp->q.rx_desc_num = hdev->num_rx_desc;
1499                 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1500                         i * HCLGE_TQP_REG_SIZE;
1501
1502                 tqp++;
1503         }
1504
1505         return 0;
1506 }
1507
1508 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1509                                   u16 tqp_pid, u16 tqp_vid, bool is_pf)
1510 {
1511         struct hclge_tqp_map_cmd *req;
1512         struct hclge_desc desc;
1513         int ret;
1514
1515         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1516
1517         req = (struct hclge_tqp_map_cmd *)desc.data;
1518         req->tqp_id = cpu_to_le16(tqp_pid);
1519         req->tqp_vf = func_id;
1520         req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1521         if (!is_pf)
1522                 req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1523         req->tqp_vid = cpu_to_le16(tqp_vid);
1524
1525         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1526         if (ret)
1527                 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1528
1529         return ret;
1530 }
1531
1532 static int  hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1533 {
1534         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1535         struct hclge_dev *hdev = vport->back;
1536         int i, alloced;
1537
1538         for (i = 0, alloced = 0; i < hdev->num_tqps &&
1539              alloced < num_tqps; i++) {
1540                 if (!hdev->htqp[i].alloced) {
1541                         hdev->htqp[i].q.handle = &vport->nic;
1542                         hdev->htqp[i].q.tqp_index = alloced;
1543                         hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1544                         hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1545                         kinfo->tqp[alloced] = &hdev->htqp[i].q;
1546                         hdev->htqp[i].alloced = true;
1547                         alloced++;
1548                 }
1549         }
1550         vport->alloc_tqps = alloced;
1551         kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1552                                 vport->alloc_tqps / hdev->tm_info.num_tc);
1553
1554         /* ensure one to one mapping between irq and queue at default */
1555         kinfo->rss_size = min_t(u16, kinfo->rss_size,
1556                                 (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1557
1558         return 0;
1559 }
1560
1561 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1562                             u16 num_tx_desc, u16 num_rx_desc)
1563
1564 {
1565         struct hnae3_handle *nic = &vport->nic;
1566         struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1567         struct hclge_dev *hdev = vport->back;
1568         int ret;
1569
1570         kinfo->num_tx_desc = num_tx_desc;
1571         kinfo->num_rx_desc = num_rx_desc;
1572
1573         kinfo->rx_buf_len = hdev->rx_buf_len;
1574
1575         kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1576                                   sizeof(struct hnae3_queue *), GFP_KERNEL);
1577         if (!kinfo->tqp)
1578                 return -ENOMEM;
1579
1580         ret = hclge_assign_tqp(vport, num_tqps);
1581         if (ret)
1582                 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1583
1584         return ret;
1585 }
1586
1587 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1588                                   struct hclge_vport *vport)
1589 {
1590         struct hnae3_handle *nic = &vport->nic;
1591         struct hnae3_knic_private_info *kinfo;
1592         u16 i;
1593
1594         kinfo = &nic->kinfo;
1595         for (i = 0; i < vport->alloc_tqps; i++) {
1596                 struct hclge_tqp *q =
1597                         container_of(kinfo->tqp[i], struct hclge_tqp, q);
1598                 bool is_pf;
1599                 int ret;
1600
1601                 is_pf = !(vport->vport_id);
1602                 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1603                                              i, is_pf);
1604                 if (ret)
1605                         return ret;
1606         }
1607
1608         return 0;
1609 }
1610
1611 static int hclge_map_tqp(struct hclge_dev *hdev)
1612 {
1613         struct hclge_vport *vport = hdev->vport;
1614         u16 i, num_vport;
1615
1616         num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1617         for (i = 0; i < num_vport; i++) {
1618                 int ret;
1619
1620                 ret = hclge_map_tqp_to_vport(hdev, vport);
1621                 if (ret)
1622                         return ret;
1623
1624                 vport++;
1625         }
1626
1627         return 0;
1628 }
1629
1630 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1631 {
1632         struct hnae3_handle *nic = &vport->nic;
1633         struct hclge_dev *hdev = vport->back;
1634         int ret;
1635
1636         nic->pdev = hdev->pdev;
1637         nic->ae_algo = &ae_algo;
1638         nic->numa_node_mask = hdev->numa_node_mask;
1639
1640         ret = hclge_knic_setup(vport, num_tqps,
1641                                hdev->num_tx_desc, hdev->num_rx_desc);
1642         if (ret)
1643                 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1644
1645         return ret;
1646 }
1647
1648 static int hclge_alloc_vport(struct hclge_dev *hdev)
1649 {
1650         struct pci_dev *pdev = hdev->pdev;
1651         struct hclge_vport *vport;
1652         u32 tqp_main_vport;
1653         u32 tqp_per_vport;
1654         int num_vport, i;
1655         int ret;
1656
1657         /* We need to alloc a vport for main NIC of PF */
1658         num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1659
1660         if (hdev->num_tqps < num_vport) {
1661                 dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1662                         hdev->num_tqps, num_vport);
1663                 return -EINVAL;
1664         }
1665
1666         /* Alloc the same number of TQPs for every vport */
1667         tqp_per_vport = hdev->num_tqps / num_vport;
1668         tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1669
1670         vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1671                              GFP_KERNEL);
1672         if (!vport)
1673                 return -ENOMEM;
1674
1675         hdev->vport = vport;
1676         hdev->num_alloc_vport = num_vport;
1677
1678         if (IS_ENABLED(CONFIG_PCI_IOV))
1679                 hdev->num_alloc_vfs = hdev->num_req_vfs;
1680
1681         for (i = 0; i < num_vport; i++) {
1682                 vport->back = hdev;
1683                 vport->vport_id = i;
1684                 vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1685                 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1686                 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1687                 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1688                 INIT_LIST_HEAD(&vport->vlan_list);
1689                 INIT_LIST_HEAD(&vport->uc_mac_list);
1690                 INIT_LIST_HEAD(&vport->mc_mac_list);
1691
1692                 if (i == 0)
1693                         ret = hclge_vport_setup(vport, tqp_main_vport);
1694                 else
1695                         ret = hclge_vport_setup(vport, tqp_per_vport);
1696                 if (ret) {
1697                         dev_err(&pdev->dev,
1698                                 "vport setup failed for vport %d, %d\n",
1699                                 i, ret);
1700                         return ret;
1701                 }
1702
1703                 vport++;
1704         }
1705
1706         return 0;
1707 }
1708
1709 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1710                                     struct hclge_pkt_buf_alloc *buf_alloc)
1711 {
1712 /* TX buffer size is unit by 128 byte */
1713 #define HCLGE_BUF_SIZE_UNIT_SHIFT       7
1714 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK    BIT(15)
1715         struct hclge_tx_buff_alloc_cmd *req;
1716         struct hclge_desc desc;
1717         int ret;
1718         u8 i;
1719
1720         req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1721
1722         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1723         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1724                 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1725
1726                 req->tx_pkt_buff[i] =
1727                         cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1728                                      HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1729         }
1730
1731         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1732         if (ret)
1733                 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1734                         ret);
1735
1736         return ret;
1737 }
1738
1739 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1740                                  struct hclge_pkt_buf_alloc *buf_alloc)
1741 {
1742         int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1743
1744         if (ret)
1745                 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1746
1747         return ret;
1748 }
1749
1750 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1751 {
1752         unsigned int i;
1753         u32 cnt = 0;
1754
1755         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1756                 if (hdev->hw_tc_map & BIT(i))
1757                         cnt++;
1758         return cnt;
1759 }
1760
1761 /* Get the number of pfc enabled TCs, which have private buffer */
1762 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1763                                   struct hclge_pkt_buf_alloc *buf_alloc)
1764 {
1765         struct hclge_priv_buf *priv;
1766         unsigned int i;
1767         int cnt = 0;
1768
1769         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1770                 priv = &buf_alloc->priv_buf[i];
1771                 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1772                     priv->enable)
1773                         cnt++;
1774         }
1775
1776         return cnt;
1777 }
1778
1779 /* Get the number of pfc disabled TCs, which have private buffer */
1780 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1781                                      struct hclge_pkt_buf_alloc *buf_alloc)
1782 {
1783         struct hclge_priv_buf *priv;
1784         unsigned int i;
1785         int cnt = 0;
1786
1787         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1788                 priv = &buf_alloc->priv_buf[i];
1789                 if (hdev->hw_tc_map & BIT(i) &&
1790                     !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1791                     priv->enable)
1792                         cnt++;
1793         }
1794
1795         return cnt;
1796 }
1797
1798 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1799 {
1800         struct hclge_priv_buf *priv;
1801         u32 rx_priv = 0;
1802         int i;
1803
1804         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1805                 priv = &buf_alloc->priv_buf[i];
1806                 if (priv->enable)
1807                         rx_priv += priv->buf_size;
1808         }
1809         return rx_priv;
1810 }
1811
1812 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1813 {
1814         u32 i, total_tx_size = 0;
1815
1816         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1817                 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1818
1819         return total_tx_size;
1820 }
1821
1822 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1823                                 struct hclge_pkt_buf_alloc *buf_alloc,
1824                                 u32 rx_all)
1825 {
1826         u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1827         u32 tc_num = hclge_get_tc_num(hdev);
1828         u32 shared_buf, aligned_mps;
1829         u32 rx_priv;
1830         int i;
1831
1832         aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1833
1834         if (hnae3_dev_dcb_supported(hdev))
1835                 shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
1836                                         hdev->dv_buf_size;
1837         else
1838                 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1839                                         + hdev->dv_buf_size;
1840
1841         shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1842         shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1843                              HCLGE_BUF_SIZE_UNIT);
1844
1845         rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1846         if (rx_all < rx_priv + shared_std)
1847                 return false;
1848
1849         shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1850         buf_alloc->s_buf.buf_size = shared_buf;
1851         if (hnae3_dev_dcb_supported(hdev)) {
1852                 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1853                 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1854                         - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
1855                                   HCLGE_BUF_SIZE_UNIT);
1856         } else {
1857                 buf_alloc->s_buf.self.high = aligned_mps +
1858                                                 HCLGE_NON_DCB_ADDITIONAL_BUF;
1859                 buf_alloc->s_buf.self.low = aligned_mps;
1860         }
1861
1862         if (hnae3_dev_dcb_supported(hdev)) {
1863                 hi_thrd = shared_buf - hdev->dv_buf_size;
1864
1865                 if (tc_num <= NEED_RESERVE_TC_NUM)
1866                         hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
1867                                         / BUF_MAX_PERCENT;
1868
1869                 if (tc_num)
1870                         hi_thrd = hi_thrd / tc_num;
1871
1872                 hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
1873                 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1874                 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
1875         } else {
1876                 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1877                 lo_thrd = aligned_mps;
1878         }
1879
1880         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1881                 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1882                 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
1883         }
1884
1885         return true;
1886 }
1887
1888 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1889                                 struct hclge_pkt_buf_alloc *buf_alloc)
1890 {
1891         u32 i, total_size;
1892
1893         total_size = hdev->pkt_buf_size;
1894
1895         /* alloc tx buffer for all enabled tc */
1896         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1897                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1898
1899                 if (hdev->hw_tc_map & BIT(i)) {
1900                         if (total_size < hdev->tx_buf_size)
1901                                 return -ENOMEM;
1902
1903                         priv->tx_buf_size = hdev->tx_buf_size;
1904                 } else {
1905                         priv->tx_buf_size = 0;
1906                 }
1907
1908                 total_size -= priv->tx_buf_size;
1909         }
1910
1911         return 0;
1912 }
1913
1914 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
1915                                   struct hclge_pkt_buf_alloc *buf_alloc)
1916 {
1917         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1918         u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1919         unsigned int i;
1920
1921         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1922                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1923
1924                 priv->enable = 0;
1925                 priv->wl.low = 0;
1926                 priv->wl.high = 0;
1927                 priv->buf_size = 0;
1928
1929                 if (!(hdev->hw_tc_map & BIT(i)))
1930                         continue;
1931
1932                 priv->enable = 1;
1933
1934                 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1935                         priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
1936                         priv->wl.high = roundup(priv->wl.low + aligned_mps,
1937                                                 HCLGE_BUF_SIZE_UNIT);
1938                 } else {
1939                         priv->wl.low = 0;
1940                         priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
1941                                         aligned_mps;
1942                 }
1943
1944                 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1945         }
1946
1947         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1948 }
1949
1950 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
1951                                           struct hclge_pkt_buf_alloc *buf_alloc)
1952 {
1953         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1954         int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1955         int i;
1956
1957         /* let the last to be cleared first */
1958         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1959                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1960                 unsigned int mask = BIT((unsigned int)i);
1961
1962                 if (hdev->hw_tc_map & mask &&
1963                     !(hdev->tm_info.hw_pfc_map & mask)) {
1964                         /* Clear the no pfc TC private buffer */
1965                         priv->wl.low = 0;
1966                         priv->wl.high = 0;
1967                         priv->buf_size = 0;
1968                         priv->enable = 0;
1969                         no_pfc_priv_num--;
1970                 }
1971
1972                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1973                     no_pfc_priv_num == 0)
1974                         break;
1975         }
1976
1977         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1978 }
1979
1980 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
1981                                         struct hclge_pkt_buf_alloc *buf_alloc)
1982 {
1983         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1984         int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1985         int i;
1986
1987         /* let the last to be cleared first */
1988         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1989                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1990                 unsigned int mask = BIT((unsigned int)i);
1991
1992                 if (hdev->hw_tc_map & mask &&
1993                     hdev->tm_info.hw_pfc_map & mask) {
1994                         /* Reduce the number of pfc TC with private buffer */
1995                         priv->wl.low = 0;
1996                         priv->enable = 0;
1997                         priv->wl.high = 0;
1998                         priv->buf_size = 0;
1999                         pfc_priv_num--;
2000                 }
2001
2002                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2003                     pfc_priv_num == 0)
2004                         break;
2005         }
2006
2007         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2008 }
2009
2010 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2011                                       struct hclge_pkt_buf_alloc *buf_alloc)
2012 {
2013 #define COMPENSATE_BUFFER       0x3C00
2014 #define COMPENSATE_HALF_MPS_NUM 5
2015 #define PRIV_WL_GAP             0x1800
2016
2017         u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2018         u32 tc_num = hclge_get_tc_num(hdev);
2019         u32 half_mps = hdev->mps >> 1;
2020         u32 min_rx_priv;
2021         unsigned int i;
2022
2023         if (tc_num)
2024                 rx_priv = rx_priv / tc_num;
2025
2026         if (tc_num <= NEED_RESERVE_TC_NUM)
2027                 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2028
2029         min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2030                         COMPENSATE_HALF_MPS_NUM * half_mps;
2031         min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2032         rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2033
2034         if (rx_priv < min_rx_priv)
2035                 return false;
2036
2037         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2038                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2039
2040                 priv->enable = 0;
2041                 priv->wl.low = 0;
2042                 priv->wl.high = 0;
2043                 priv->buf_size = 0;
2044
2045                 if (!(hdev->hw_tc_map & BIT(i)))
2046                         continue;
2047
2048                 priv->enable = 1;
2049                 priv->buf_size = rx_priv;
2050                 priv->wl.high = rx_priv - hdev->dv_buf_size;
2051                 priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2052         }
2053
2054         buf_alloc->s_buf.buf_size = 0;
2055
2056         return true;
2057 }
2058
2059 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2060  * @hdev: pointer to struct hclge_dev
2061  * @buf_alloc: pointer to buffer calculation data
2062  * @return: 0: calculate sucessful, negative: fail
2063  */
2064 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2065                                 struct hclge_pkt_buf_alloc *buf_alloc)
2066 {
2067         /* When DCB is not supported, rx private buffer is not allocated. */
2068         if (!hnae3_dev_dcb_supported(hdev)) {
2069                 u32 rx_all = hdev->pkt_buf_size;
2070
2071                 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2072                 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2073                         return -ENOMEM;
2074
2075                 return 0;
2076         }
2077
2078         if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2079                 return 0;
2080
2081         if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2082                 return 0;
2083
2084         /* try to decrease the buffer size */
2085         if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2086                 return 0;
2087
2088         if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2089                 return 0;
2090
2091         if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2092                 return 0;
2093
2094         return -ENOMEM;
2095 }
2096
2097 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2098                                    struct hclge_pkt_buf_alloc *buf_alloc)
2099 {
2100         struct hclge_rx_priv_buff_cmd *req;
2101         struct hclge_desc desc;
2102         int ret;
2103         int i;
2104
2105         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2106         req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2107
2108         /* Alloc private buffer TCs */
2109         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2110                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2111
2112                 req->buf_num[i] =
2113                         cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2114                 req->buf_num[i] |=
2115                         cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2116         }
2117
2118         req->shared_buf =
2119                 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2120                             (1 << HCLGE_TC0_PRI_BUF_EN_B));
2121
2122         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2123         if (ret)
2124                 dev_err(&hdev->pdev->dev,
2125                         "rx private buffer alloc cmd failed %d\n", ret);
2126
2127         return ret;
2128 }
2129
2130 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2131                                    struct hclge_pkt_buf_alloc *buf_alloc)
2132 {
2133         struct hclge_rx_priv_wl_buf *req;
2134         struct hclge_priv_buf *priv;
2135         struct hclge_desc desc[2];
2136         int i, j;
2137         int ret;
2138
2139         for (i = 0; i < 2; i++) {
2140                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2141                                            false);
2142                 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2143
2144                 /* The first descriptor set the NEXT bit to 1 */
2145                 if (i == 0)
2146                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2147                 else
2148                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2149
2150                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2151                         u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2152
2153                         priv = &buf_alloc->priv_buf[idx];
2154                         req->tc_wl[j].high =
2155                                 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2156                         req->tc_wl[j].high |=
2157                                 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2158                         req->tc_wl[j].low =
2159                                 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2160                         req->tc_wl[j].low |=
2161                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2162                 }
2163         }
2164
2165         /* Send 2 descriptor at one time */
2166         ret = hclge_cmd_send(&hdev->hw, desc, 2);
2167         if (ret)
2168                 dev_err(&hdev->pdev->dev,
2169                         "rx private waterline config cmd failed %d\n",
2170                         ret);
2171         return ret;
2172 }
2173
2174 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2175                                     struct hclge_pkt_buf_alloc *buf_alloc)
2176 {
2177         struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2178         struct hclge_rx_com_thrd *req;
2179         struct hclge_desc desc[2];
2180         struct hclge_tc_thrd *tc;
2181         int i, j;
2182         int ret;
2183
2184         for (i = 0; i < 2; i++) {
2185                 hclge_cmd_setup_basic_desc(&desc[i],
2186                                            HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2187                 req = (struct hclge_rx_com_thrd *)&desc[i].data;
2188
2189                 /* The first descriptor set the NEXT bit to 1 */
2190                 if (i == 0)
2191                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2192                 else
2193                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2194
2195                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2196                         tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2197
2198                         req->com_thrd[j].high =
2199                                 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2200                         req->com_thrd[j].high |=
2201                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2202                         req->com_thrd[j].low =
2203                                 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2204                         req->com_thrd[j].low |=
2205                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2206                 }
2207         }
2208
2209         /* Send 2 descriptors at one time */
2210         ret = hclge_cmd_send(&hdev->hw, desc, 2);
2211         if (ret)
2212                 dev_err(&hdev->pdev->dev,
2213                         "common threshold config cmd failed %d\n", ret);
2214         return ret;
2215 }
2216
2217 static int hclge_common_wl_config(struct hclge_dev *hdev,
2218                                   struct hclge_pkt_buf_alloc *buf_alloc)
2219 {
2220         struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2221         struct hclge_rx_com_wl *req;
2222         struct hclge_desc desc;
2223         int ret;
2224
2225         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2226
2227         req = (struct hclge_rx_com_wl *)desc.data;
2228         req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2229         req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2230
2231         req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2232         req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2233
2234         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2235         if (ret)
2236                 dev_err(&hdev->pdev->dev,
2237                         "common waterline config cmd failed %d\n", ret);
2238
2239         return ret;
2240 }
2241
2242 int hclge_buffer_alloc(struct hclge_dev *hdev)
2243 {
2244         struct hclge_pkt_buf_alloc *pkt_buf;
2245         int ret;
2246
2247         pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2248         if (!pkt_buf)
2249                 return -ENOMEM;
2250
2251         ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2252         if (ret) {
2253                 dev_err(&hdev->pdev->dev,
2254                         "could not calc tx buffer size for all TCs %d\n", ret);
2255                 goto out;
2256         }
2257
2258         ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2259         if (ret) {
2260                 dev_err(&hdev->pdev->dev,
2261                         "could not alloc tx buffers %d\n", ret);
2262                 goto out;
2263         }
2264
2265         ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2266         if (ret) {
2267                 dev_err(&hdev->pdev->dev,
2268                         "could not calc rx priv buffer size for all TCs %d\n",
2269                         ret);
2270                 goto out;
2271         }
2272
2273         ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2274         if (ret) {
2275                 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2276                         ret);
2277                 goto out;
2278         }
2279
2280         if (hnae3_dev_dcb_supported(hdev)) {
2281                 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2282                 if (ret) {
2283                         dev_err(&hdev->pdev->dev,
2284                                 "could not configure rx private waterline %d\n",
2285                                 ret);
2286                         goto out;
2287                 }
2288
2289                 ret = hclge_common_thrd_config(hdev, pkt_buf);
2290                 if (ret) {
2291                         dev_err(&hdev->pdev->dev,
2292                                 "could not configure common threshold %d\n",
2293                                 ret);
2294                         goto out;
2295                 }
2296         }
2297
2298         ret = hclge_common_wl_config(hdev, pkt_buf);
2299         if (ret)
2300                 dev_err(&hdev->pdev->dev,
2301                         "could not configure common waterline %d\n", ret);
2302
2303 out:
2304         kfree(pkt_buf);
2305         return ret;
2306 }
2307
2308 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2309 {
2310         struct hnae3_handle *roce = &vport->roce;
2311         struct hnae3_handle *nic = &vport->nic;
2312
2313         roce->rinfo.num_vectors = vport->back->num_roce_msi;
2314
2315         if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
2316             vport->back->num_msi_left == 0)
2317                 return -EINVAL;
2318
2319         roce->rinfo.base_vector = vport->back->roce_base_vector;
2320
2321         roce->rinfo.netdev = nic->kinfo.netdev;
2322         roce->rinfo.roce_io_base = vport->back->hw.io_base;
2323
2324         roce->pdev = nic->pdev;
2325         roce->ae_algo = nic->ae_algo;
2326         roce->numa_node_mask = nic->numa_node_mask;
2327
2328         return 0;
2329 }
2330
2331 static int hclge_init_msi(struct hclge_dev *hdev)
2332 {
2333         struct pci_dev *pdev = hdev->pdev;
2334         int vectors;
2335         int i;
2336
2337         vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2338                                         hdev->num_msi,
2339                                         PCI_IRQ_MSI | PCI_IRQ_MSIX);
2340         if (vectors < 0) {
2341                 dev_err(&pdev->dev,
2342                         "failed(%d) to allocate MSI/MSI-X vectors\n",
2343                         vectors);
2344                 return vectors;
2345         }
2346         if (vectors < hdev->num_msi)
2347                 dev_warn(&hdev->pdev->dev,
2348                          "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2349                          hdev->num_msi, vectors);
2350
2351         hdev->num_msi = vectors;
2352         hdev->num_msi_left = vectors;
2353
2354         hdev->base_msi_vector = pdev->irq;
2355         hdev->roce_base_vector = hdev->base_msi_vector +
2356                                 hdev->roce_base_msix_offset;
2357
2358         hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2359                                            sizeof(u16), GFP_KERNEL);
2360         if (!hdev->vector_status) {
2361                 pci_free_irq_vectors(pdev);
2362                 return -ENOMEM;
2363         }
2364
2365         for (i = 0; i < hdev->num_msi; i++)
2366                 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2367
2368         hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2369                                         sizeof(int), GFP_KERNEL);
2370         if (!hdev->vector_irq) {
2371                 pci_free_irq_vectors(pdev);
2372                 return -ENOMEM;
2373         }
2374
2375         return 0;
2376 }
2377
2378 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2379 {
2380         if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2381                 duplex = HCLGE_MAC_FULL;
2382
2383         return duplex;
2384 }
2385
2386 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2387                                       u8 duplex)
2388 {
2389         struct hclge_config_mac_speed_dup_cmd *req;
2390         struct hclge_desc desc;
2391         int ret;
2392
2393         req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2394
2395         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2396
2397         if (duplex)
2398                 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2399
2400         switch (speed) {
2401         case HCLGE_MAC_SPEED_10M:
2402                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2403                                 HCLGE_CFG_SPEED_S, 6);
2404                 break;
2405         case HCLGE_MAC_SPEED_100M:
2406                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2407                                 HCLGE_CFG_SPEED_S, 7);
2408                 break;
2409         case HCLGE_MAC_SPEED_1G:
2410                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2411                                 HCLGE_CFG_SPEED_S, 0);
2412                 break;
2413         case HCLGE_MAC_SPEED_10G:
2414                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2415                                 HCLGE_CFG_SPEED_S, 1);
2416                 break;
2417         case HCLGE_MAC_SPEED_25G:
2418                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2419                                 HCLGE_CFG_SPEED_S, 2);
2420                 break;
2421         case HCLGE_MAC_SPEED_40G:
2422                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2423                                 HCLGE_CFG_SPEED_S, 3);
2424                 break;
2425         case HCLGE_MAC_SPEED_50G:
2426                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2427                                 HCLGE_CFG_SPEED_S, 4);
2428                 break;
2429         case HCLGE_MAC_SPEED_100G:
2430                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2431                                 HCLGE_CFG_SPEED_S, 5);
2432                 break;
2433         default:
2434                 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2435                 return -EINVAL;
2436         }
2437
2438         hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2439                       1);
2440
2441         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2442         if (ret) {
2443                 dev_err(&hdev->pdev->dev,
2444                         "mac speed/duplex config cmd failed %d.\n", ret);
2445                 return ret;
2446         }
2447
2448         return 0;
2449 }
2450
2451 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2452 {
2453         int ret;
2454
2455         duplex = hclge_check_speed_dup(duplex, speed);
2456         if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
2457                 return 0;
2458
2459         ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2460         if (ret)
2461                 return ret;
2462
2463         hdev->hw.mac.speed = speed;
2464         hdev->hw.mac.duplex = duplex;
2465
2466         return 0;
2467 }
2468
2469 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2470                                      u8 duplex)
2471 {
2472         struct hclge_vport *vport = hclge_get_vport(handle);
2473         struct hclge_dev *hdev = vport->back;
2474
2475         return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2476 }
2477
2478 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2479 {
2480         struct hclge_config_auto_neg_cmd *req;
2481         struct hclge_desc desc;
2482         u32 flag = 0;
2483         int ret;
2484
2485         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2486
2487         req = (struct hclge_config_auto_neg_cmd *)desc.data;
2488         if (enable)
2489                 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2490         req->cfg_an_cmd_flag = cpu_to_le32(flag);
2491
2492         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2493         if (ret)
2494                 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2495                         ret);
2496
2497         return ret;
2498 }
2499
2500 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2501 {
2502         struct hclge_vport *vport = hclge_get_vport(handle);
2503         struct hclge_dev *hdev = vport->back;
2504
2505         if (!hdev->hw.mac.support_autoneg) {
2506                 if (enable) {
2507                         dev_err(&hdev->pdev->dev,
2508                                 "autoneg is not supported by current port\n");
2509                         return -EOPNOTSUPP;
2510                 } else {
2511                         return 0;
2512                 }
2513         }
2514
2515         return hclge_set_autoneg_en(hdev, enable);
2516 }
2517
2518 static int hclge_get_autoneg(struct hnae3_handle *handle)
2519 {
2520         struct hclge_vport *vport = hclge_get_vport(handle);
2521         struct hclge_dev *hdev = vport->back;
2522         struct phy_device *phydev = hdev->hw.mac.phydev;
2523
2524         if (phydev)
2525                 return phydev->autoneg;
2526
2527         return hdev->hw.mac.autoneg;
2528 }
2529
2530 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2531 {
2532         struct hclge_vport *vport = hclge_get_vport(handle);
2533         struct hclge_dev *hdev = vport->back;
2534         int ret;
2535
2536         dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2537
2538         ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2539         if (ret)
2540                 return ret;
2541         return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2542 }
2543
2544 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2545 {
2546         struct hclge_vport *vport = hclge_get_vport(handle);
2547         struct hclge_dev *hdev = vport->back;
2548
2549         if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2550                 return hclge_set_autoneg_en(hdev, !halt);
2551
2552         return 0;
2553 }
2554
2555 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2556 {
2557         struct hclge_config_fec_cmd *req;
2558         struct hclge_desc desc;
2559         int ret;
2560
2561         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2562
2563         req = (struct hclge_config_fec_cmd *)desc.data;
2564         if (fec_mode & BIT(HNAE3_FEC_AUTO))
2565                 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2566         if (fec_mode & BIT(HNAE3_FEC_RS))
2567                 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2568                                 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2569         if (fec_mode & BIT(HNAE3_FEC_BASER))
2570                 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2571                                 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2572
2573         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2574         if (ret)
2575                 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2576
2577         return ret;
2578 }
2579
2580 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2581 {
2582         struct hclge_vport *vport = hclge_get_vport(handle);
2583         struct hclge_dev *hdev = vport->back;
2584         struct hclge_mac *mac = &hdev->hw.mac;
2585         int ret;
2586
2587         if (fec_mode && !(mac->fec_ability & fec_mode)) {
2588                 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2589                 return -EINVAL;
2590         }
2591
2592         ret = hclge_set_fec_hw(hdev, fec_mode);
2593         if (ret)
2594                 return ret;
2595
2596         mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2597         return 0;
2598 }
2599
2600 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2601                           u8 *fec_mode)
2602 {
2603         struct hclge_vport *vport = hclge_get_vport(handle);
2604         struct hclge_dev *hdev = vport->back;
2605         struct hclge_mac *mac = &hdev->hw.mac;
2606
2607         if (fec_ability)
2608                 *fec_ability = mac->fec_ability;
2609         if (fec_mode)
2610                 *fec_mode = mac->fec_mode;
2611 }
2612
2613 static int hclge_mac_init(struct hclge_dev *hdev)
2614 {
2615         struct hclge_mac *mac = &hdev->hw.mac;
2616         int ret;
2617
2618         hdev->support_sfp_query = true;
2619         hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2620         ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2621                                          hdev->hw.mac.duplex);
2622         if (ret) {
2623                 dev_err(&hdev->pdev->dev,
2624                         "Config mac speed dup fail ret=%d\n", ret);
2625                 return ret;
2626         }
2627
2628         if (hdev->hw.mac.support_autoneg) {
2629                 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2630                 if (ret) {
2631                         dev_err(&hdev->pdev->dev,
2632                                 "Config mac autoneg fail ret=%d\n", ret);
2633                         return ret;
2634                 }
2635         }
2636
2637         mac->link = 0;
2638
2639         if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2640                 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2641                 if (ret) {
2642                         dev_err(&hdev->pdev->dev,
2643                                 "Fec mode init fail, ret = %d\n", ret);
2644                         return ret;
2645                 }
2646         }
2647
2648         ret = hclge_set_mac_mtu(hdev, hdev->mps);
2649         if (ret) {
2650                 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2651                 return ret;
2652         }
2653
2654         ret = hclge_set_default_loopback(hdev);
2655         if (ret)
2656                 return ret;
2657
2658         ret = hclge_buffer_alloc(hdev);
2659         if (ret)
2660                 dev_err(&hdev->pdev->dev,
2661                         "allocate buffer fail, ret=%d\n", ret);
2662
2663         return ret;
2664 }
2665
2666 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2667 {
2668         if (!test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) &&
2669             !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2670                 queue_work_on(cpumask_first(&hdev->affinity_mask), system_wq,
2671                               &hdev->mbx_service_task);
2672 }
2673
2674 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2675 {
2676         if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2677             !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2678                 queue_work_on(cpumask_first(&hdev->affinity_mask), system_wq,
2679                               &hdev->rst_service_task);
2680 }
2681
2682 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2683 {
2684         if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
2685             !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2686             !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state)) {
2687                 hdev->hw_stats.stats_timer++;
2688                 hdev->fd_arfs_expire_timer++;
2689                 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2690                                     system_wq, &hdev->service_task,
2691                                     delay_time);
2692         }
2693 }
2694
2695 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2696 {
2697         struct hclge_link_status_cmd *req;
2698         struct hclge_desc desc;
2699         int link_status;
2700         int ret;
2701
2702         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2703         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2704         if (ret) {
2705                 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2706                         ret);
2707                 return ret;
2708         }
2709
2710         req = (struct hclge_link_status_cmd *)desc.data;
2711         link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2712
2713         return !!link_status;
2714 }
2715
2716 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2717 {
2718         unsigned int mac_state;
2719         int link_stat;
2720
2721         if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2722                 return 0;
2723
2724         mac_state = hclge_get_mac_link_status(hdev);
2725
2726         if (hdev->hw.mac.phydev) {
2727                 if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2728                         link_stat = mac_state &
2729                                 hdev->hw.mac.phydev->link;
2730                 else
2731                         link_stat = 0;
2732
2733         } else {
2734                 link_stat = mac_state;
2735         }
2736
2737         return !!link_stat;
2738 }
2739
2740 static void hclge_update_link_status(struct hclge_dev *hdev)
2741 {
2742         struct hnae3_client *rclient = hdev->roce_client;
2743         struct hnae3_client *client = hdev->nic_client;
2744         struct hnae3_handle *rhandle;
2745         struct hnae3_handle *handle;
2746         int state;
2747         int i;
2748
2749         if (!client)
2750                 return;
2751         state = hclge_get_mac_phy_link(hdev);
2752         if (state != hdev->hw.mac.link) {
2753                 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2754                         handle = &hdev->vport[i].nic;
2755                         client->ops->link_status_change(handle, state);
2756                         hclge_config_mac_tnl_int(hdev, state);
2757                         rhandle = &hdev->vport[i].roce;
2758                         if (rclient && rclient->ops->link_status_change)
2759                                 rclient->ops->link_status_change(rhandle,
2760                                                                  state);
2761                 }
2762                 hdev->hw.mac.link = state;
2763         }
2764 }
2765
2766 static void hclge_update_port_capability(struct hclge_mac *mac)
2767 {
2768         /* update fec ability by speed */
2769         hclge_convert_setting_fec(mac);
2770
2771         /* firmware can not identify back plane type, the media type
2772          * read from configuration can help deal it
2773          */
2774         if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2775             mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2776                 mac->module_type = HNAE3_MODULE_TYPE_KR;
2777         else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2778                 mac->module_type = HNAE3_MODULE_TYPE_TP;
2779
2780         if (mac->support_autoneg) {
2781                 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2782                 linkmode_copy(mac->advertising, mac->supported);
2783         } else {
2784                 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2785                                    mac->supported);
2786                 linkmode_zero(mac->advertising);
2787         }
2788 }
2789
2790 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2791 {
2792         struct hclge_sfp_info_cmd *resp;
2793         struct hclge_desc desc;
2794         int ret;
2795
2796         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2797         resp = (struct hclge_sfp_info_cmd *)desc.data;
2798         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2799         if (ret == -EOPNOTSUPP) {
2800                 dev_warn(&hdev->pdev->dev,
2801                          "IMP do not support get SFP speed %d\n", ret);
2802                 return ret;
2803         } else if (ret) {
2804                 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2805                 return ret;
2806         }
2807
2808         *speed = le32_to_cpu(resp->speed);
2809
2810         return 0;
2811 }
2812
2813 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2814 {
2815         struct hclge_sfp_info_cmd *resp;
2816         struct hclge_desc desc;
2817         int ret;
2818
2819         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2820         resp = (struct hclge_sfp_info_cmd *)desc.data;
2821
2822         resp->query_type = QUERY_ACTIVE_SPEED;
2823
2824         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2825         if (ret == -EOPNOTSUPP) {
2826                 dev_warn(&hdev->pdev->dev,
2827                          "IMP does not support get SFP info %d\n", ret);
2828                 return ret;
2829         } else if (ret) {
2830                 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2831                 return ret;
2832         }
2833
2834         mac->speed = le32_to_cpu(resp->speed);
2835         /* if resp->speed_ability is 0, it means it's an old version
2836          * firmware, do not update these params
2837          */
2838         if (resp->speed_ability) {
2839                 mac->module_type = le32_to_cpu(resp->module_type);
2840                 mac->speed_ability = le32_to_cpu(resp->speed_ability);
2841                 mac->autoneg = resp->autoneg;
2842                 mac->support_autoneg = resp->autoneg_ability;
2843                 mac->speed_type = QUERY_ACTIVE_SPEED;
2844                 if (!resp->active_fec)
2845                         mac->fec_mode = 0;
2846                 else
2847                         mac->fec_mode = BIT(resp->active_fec);
2848         } else {
2849                 mac->speed_type = QUERY_SFP_SPEED;
2850         }
2851
2852         return 0;
2853 }
2854
2855 static int hclge_update_port_info(struct hclge_dev *hdev)
2856 {
2857         struct hclge_mac *mac = &hdev->hw.mac;
2858         int speed = HCLGE_MAC_SPEED_UNKNOWN;
2859         int ret;
2860
2861         /* get the port info from SFP cmd if not copper port */
2862         if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2863                 return 0;
2864
2865         /* if IMP does not support get SFP/qSFP info, return directly */
2866         if (!hdev->support_sfp_query)
2867                 return 0;
2868
2869         if (hdev->pdev->revision >= 0x21)
2870                 ret = hclge_get_sfp_info(hdev, mac);
2871         else
2872                 ret = hclge_get_sfp_speed(hdev, &speed);
2873
2874         if (ret == -EOPNOTSUPP) {
2875                 hdev->support_sfp_query = false;
2876                 return ret;
2877         } else if (ret) {
2878                 return ret;
2879         }
2880
2881         if (hdev->pdev->revision >= 0x21) {
2882                 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
2883                         hclge_update_port_capability(mac);
2884                         return 0;
2885                 }
2886                 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
2887                                                HCLGE_MAC_FULL);
2888         } else {
2889                 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2890                         return 0; /* do nothing if no SFP */
2891
2892                 /* must config full duplex for SFP */
2893                 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2894         }
2895 }
2896
2897 static int hclge_get_status(struct hnae3_handle *handle)
2898 {
2899         struct hclge_vport *vport = hclge_get_vport(handle);
2900         struct hclge_dev *hdev = vport->back;
2901
2902         hclge_update_link_status(hdev);
2903
2904         return hdev->hw.mac.link;
2905 }
2906
2907 static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
2908 {
2909         if (pci_num_vf(hdev->pdev) == 0) {
2910                 dev_err(&hdev->pdev->dev,
2911                         "SRIOV is disabled, can not get vport(%d) info.\n", vf);
2912                 return NULL;
2913         }
2914
2915         if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
2916                 dev_err(&hdev->pdev->dev,
2917                         "vf id(%d) is out of range(0 <= vfid < %d)\n",
2918                         vf, pci_num_vf(hdev->pdev));
2919                 return NULL;
2920         }
2921
2922         /* VF start from 1 in vport */
2923         vf += HCLGE_VF_VPORT_START_NUM;
2924         return &hdev->vport[vf];
2925 }
2926
2927 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
2928                                struct ifla_vf_info *ivf)
2929 {
2930         struct hclge_vport *vport = hclge_get_vport(handle);
2931         struct hclge_dev *hdev = vport->back;
2932
2933         vport = hclge_get_vf_vport(hdev, vf);
2934         if (!vport)
2935                 return -EINVAL;
2936
2937         ivf->vf = vf;
2938         ivf->linkstate = vport->vf_info.link_state;
2939         ivf->spoofchk = vport->vf_info.spoofchk;
2940         ivf->trusted = vport->vf_info.trusted;
2941         ivf->min_tx_rate = 0;
2942         ivf->max_tx_rate = vport->vf_info.max_tx_rate;
2943         ether_addr_copy(ivf->mac, vport->vf_info.mac);
2944
2945         return 0;
2946 }
2947
2948 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
2949                                    int link_state)
2950 {
2951         struct hclge_vport *vport = hclge_get_vport(handle);
2952         struct hclge_dev *hdev = vport->back;
2953
2954         vport = hclge_get_vf_vport(hdev, vf);
2955         if (!vport)
2956                 return -EINVAL;
2957
2958         vport->vf_info.link_state = link_state;
2959
2960         return 0;
2961 }
2962
2963 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2964 {
2965         u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
2966
2967         /* fetch the events from their corresponding regs */
2968         rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2969         cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2970         msix_src_reg = hclge_read_dev(&hdev->hw,
2971                                       HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
2972
2973         /* Assumption: If by any chance reset and mailbox events are reported
2974          * together then we will only process reset event in this go and will
2975          * defer the processing of the mailbox events. Since, we would have not
2976          * cleared RX CMDQ event this time we would receive again another
2977          * interrupt from H/W just for the mailbox.
2978          *
2979          * check for vector0 reset event sources
2980          */
2981         if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2982                 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2983                 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2984                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2985                 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2986                 hdev->rst_stats.imp_rst_cnt++;
2987                 return HCLGE_VECTOR0_EVENT_RST;
2988         }
2989
2990         if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2991                 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2992                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2993                 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2994                 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2995                 hdev->rst_stats.global_rst_cnt++;
2996                 return HCLGE_VECTOR0_EVENT_RST;
2997         }
2998
2999         /* check for vector0 msix event source */
3000         if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
3001                 dev_info(&hdev->pdev->dev, "received event 0x%x\n",
3002                          msix_src_reg);
3003                 *clearval = msix_src_reg;
3004                 return HCLGE_VECTOR0_EVENT_ERR;
3005         }
3006
3007         /* check for vector0 mailbox(=CMDQ RX) event source */
3008         if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3009                 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3010                 *clearval = cmdq_src_reg;
3011                 return HCLGE_VECTOR0_EVENT_MBX;
3012         }
3013
3014         /* print other vector0 event source */
3015         dev_info(&hdev->pdev->dev,
3016                  "CMDQ INT status:0x%x, other INT status:0x%x\n",
3017                  cmdq_src_reg, msix_src_reg);
3018         *clearval = msix_src_reg;
3019
3020         return HCLGE_VECTOR0_EVENT_OTHER;
3021 }
3022
3023 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3024                                     u32 regclr)
3025 {
3026         switch (event_type) {
3027         case HCLGE_VECTOR0_EVENT_RST:
3028                 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3029                 break;
3030         case HCLGE_VECTOR0_EVENT_MBX:
3031                 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3032                 break;
3033         default:
3034                 break;
3035         }
3036 }
3037
3038 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3039 {
3040         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3041                                 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3042                                 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3043                                 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3044         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3045 }
3046
3047 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3048 {
3049         writel(enable ? 1 : 0, vector->addr);
3050 }
3051
3052 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3053 {
3054         struct hclge_dev *hdev = data;
3055         u32 clearval = 0;
3056         u32 event_cause;
3057
3058         hclge_enable_vector(&hdev->misc_vector, false);
3059         event_cause = hclge_check_event_cause(hdev, &clearval);
3060
3061         /* vector 0 interrupt is shared with reset and mailbox source events.*/
3062         switch (event_cause) {
3063         case HCLGE_VECTOR0_EVENT_ERR:
3064                 /* we do not know what type of reset is required now. This could
3065                  * only be decided after we fetch the type of errors which
3066                  * caused this event. Therefore, we will do below for now:
3067                  * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
3068                  *    have defered type of reset to be used.
3069                  * 2. Schedule the reset serivce task.
3070                  * 3. When service task receives  HNAE3_UNKNOWN_RESET type it
3071                  *    will fetch the correct type of reset.  This would be done
3072                  *    by first decoding the types of errors.
3073                  */
3074                 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
3075                 /* fall through */
3076         case HCLGE_VECTOR0_EVENT_RST:
3077                 hclge_reset_task_schedule(hdev);
3078                 break;
3079         case HCLGE_VECTOR0_EVENT_MBX:
3080                 /* If we are here then,
3081                  * 1. Either we are not handling any mbx task and we are not
3082                  *    scheduled as well
3083                  *                        OR
3084                  * 2. We could be handling a mbx task but nothing more is
3085                  *    scheduled.
3086                  * In both cases, we should schedule mbx task as there are more
3087                  * mbx messages reported by this interrupt.
3088                  */
3089                 hclge_mbx_task_schedule(hdev);
3090                 break;
3091         default:
3092                 dev_warn(&hdev->pdev->dev,
3093                          "received unknown or unhandled event of vector0\n");
3094                 break;
3095         }
3096
3097         hclge_clear_event_cause(hdev, event_cause, clearval);
3098
3099         /* Enable interrupt if it is not cause by reset. And when
3100          * clearval equal to 0, it means interrupt status may be
3101          * cleared by hardware before driver reads status register.
3102          * For this case, vector0 interrupt also should be enabled.
3103          */
3104         if (!clearval ||
3105             event_cause == HCLGE_VECTOR0_EVENT_MBX) {
3106                 hclge_enable_vector(&hdev->misc_vector, true);
3107         }
3108
3109         return IRQ_HANDLED;
3110 }
3111
3112 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3113 {
3114         if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3115                 dev_warn(&hdev->pdev->dev,
3116                          "vector(vector_id %d) has been freed.\n", vector_id);
3117                 return;
3118         }
3119
3120         hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3121         hdev->num_msi_left += 1;
3122         hdev->num_msi_used -= 1;
3123 }
3124
3125 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3126 {
3127         struct hclge_misc_vector *vector = &hdev->misc_vector;
3128
3129         vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3130
3131         vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3132         hdev->vector_status[0] = 0;
3133
3134         hdev->num_msi_left -= 1;
3135         hdev->num_msi_used += 1;
3136 }
3137
3138 static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3139                                       const cpumask_t *mask)
3140 {
3141         struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3142                                               affinity_notify);
3143
3144         cpumask_copy(&hdev->affinity_mask, mask);
3145 }
3146
3147 static void hclge_irq_affinity_release(struct kref *ref)
3148 {
3149 }
3150
3151 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3152 {
3153         irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3154                               &hdev->affinity_mask);
3155
3156         hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3157         hdev->affinity_notify.release = hclge_irq_affinity_release;
3158         irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3159                                   &hdev->affinity_notify);
3160 }
3161
3162 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3163 {
3164         irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3165         irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3166 }
3167
3168 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3169 {
3170         int ret;
3171
3172         hclge_get_misc_vector(hdev);
3173
3174         /* this would be explicitly freed in the end */
3175         ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3176                           0, "hclge_misc", hdev);
3177         if (ret) {
3178                 hclge_free_vector(hdev, 0);
3179                 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3180                         hdev->misc_vector.vector_irq);
3181         }
3182
3183         return ret;
3184 }
3185
3186 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3187 {
3188         free_irq(hdev->misc_vector.vector_irq, hdev);
3189         hclge_free_vector(hdev, 0);
3190 }
3191
3192 int hclge_notify_client(struct hclge_dev *hdev,
3193                         enum hnae3_reset_notify_type type)
3194 {
3195         struct hnae3_client *client = hdev->nic_client;
3196         u16 i;
3197
3198         if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3199                 return 0;
3200
3201         if (!client->ops->reset_notify)
3202                 return -EOPNOTSUPP;
3203
3204         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3205                 struct hnae3_handle *handle = &hdev->vport[i].nic;
3206                 int ret;
3207
3208                 ret = client->ops->reset_notify(handle, type);
3209                 if (ret) {
3210                         dev_err(&hdev->pdev->dev,
3211                                 "notify nic client failed %d(%d)\n", type, ret);
3212                         return ret;
3213                 }
3214         }
3215
3216         return 0;
3217 }
3218
3219 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3220                                     enum hnae3_reset_notify_type type)
3221 {
3222         struct hnae3_client *client = hdev->roce_client;
3223         int ret = 0;
3224         u16 i;
3225
3226         if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3227                 return 0;
3228
3229         if (!client->ops->reset_notify)
3230                 return -EOPNOTSUPP;
3231
3232         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3233                 struct hnae3_handle *handle = &hdev->vport[i].roce;
3234
3235                 ret = client->ops->reset_notify(handle, type);
3236                 if (ret) {
3237                         dev_err(&hdev->pdev->dev,
3238                                 "notify roce client failed %d(%d)",
3239                                 type, ret);
3240                         return ret;
3241                 }
3242         }
3243
3244         return ret;
3245 }
3246
3247 static int hclge_reset_wait(struct hclge_dev *hdev)
3248 {
3249 #define HCLGE_RESET_WATI_MS     100
3250 #define HCLGE_RESET_WAIT_CNT    200
3251         u32 val, reg, reg_bit;
3252         u32 cnt = 0;
3253
3254         switch (hdev->reset_type) {
3255         case HNAE3_IMP_RESET:
3256                 reg = HCLGE_GLOBAL_RESET_REG;
3257                 reg_bit = HCLGE_IMP_RESET_BIT;
3258                 break;
3259         case HNAE3_GLOBAL_RESET:
3260                 reg = HCLGE_GLOBAL_RESET_REG;
3261                 reg_bit = HCLGE_GLOBAL_RESET_BIT;
3262                 break;
3263         case HNAE3_FUNC_RESET:
3264                 reg = HCLGE_FUN_RST_ING;
3265                 reg_bit = HCLGE_FUN_RST_ING_B;
3266                 break;
3267         case HNAE3_FLR_RESET:
3268                 break;
3269         default:
3270                 dev_err(&hdev->pdev->dev,
3271                         "Wait for unsupported reset type: %d\n",
3272                         hdev->reset_type);
3273                 return -EINVAL;
3274         }
3275
3276         if (hdev->reset_type == HNAE3_FLR_RESET) {
3277                 while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
3278                        cnt++ < HCLGE_RESET_WAIT_CNT)
3279                         msleep(HCLGE_RESET_WATI_MS);
3280
3281                 if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
3282                         dev_err(&hdev->pdev->dev,
3283                                 "flr wait timeout: %u\n", cnt);
3284                         return -EBUSY;
3285                 }
3286
3287                 return 0;
3288         }
3289
3290         val = hclge_read_dev(&hdev->hw, reg);
3291         while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3292                 msleep(HCLGE_RESET_WATI_MS);
3293                 val = hclge_read_dev(&hdev->hw, reg);
3294                 cnt++;
3295         }
3296
3297         if (cnt >= HCLGE_RESET_WAIT_CNT) {
3298                 dev_warn(&hdev->pdev->dev,
3299                          "Wait for reset timeout: %d\n", hdev->reset_type);
3300                 return -EBUSY;
3301         }
3302
3303         return 0;
3304 }
3305
3306 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3307 {
3308         struct hclge_vf_rst_cmd *req;
3309         struct hclge_desc desc;
3310
3311         req = (struct hclge_vf_rst_cmd *)desc.data;
3312         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3313         req->dest_vfid = func_id;
3314
3315         if (reset)
3316                 req->vf_rst = 0x1;
3317
3318         return hclge_cmd_send(&hdev->hw, &desc, 1);
3319 }
3320
3321 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3322 {
3323         int i;
3324
3325         for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
3326                 struct hclge_vport *vport = &hdev->vport[i];
3327                 int ret;
3328
3329                 /* Send cmd to set/clear VF's FUNC_RST_ING */
3330                 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3331                 if (ret) {
3332                         dev_err(&hdev->pdev->dev,
3333                                 "set vf(%u) rst failed %d!\n",
3334                                 vport->vport_id, ret);
3335                         return ret;
3336                 }
3337
3338                 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3339                         continue;
3340
3341                 /* Inform VF to process the reset.
3342                  * hclge_inform_reset_assert_to_vf may fail if VF
3343                  * driver is not loaded.
3344                  */
3345                 ret = hclge_inform_reset_assert_to_vf(vport);
3346                 if (ret)
3347                         dev_warn(&hdev->pdev->dev,
3348                                  "inform reset to vf(%u) failed %d!\n",
3349                                  vport->vport_id, ret);
3350         }
3351
3352         return 0;
3353 }
3354
3355 static int hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3356 {
3357         struct hclge_pf_rst_sync_cmd *req;
3358         struct hclge_desc desc;
3359         int cnt = 0;
3360         int ret;
3361
3362         req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3363         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3364
3365         do {
3366                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3367                 /* for compatible with old firmware, wait
3368                  * 100 ms for VF to stop IO
3369                  */
3370                 if (ret == -EOPNOTSUPP) {
3371                         msleep(HCLGE_RESET_SYNC_TIME);
3372                         return 0;
3373                 } else if (ret) {
3374                         dev_err(&hdev->pdev->dev, "sync with VF fail %d!\n",
3375                                 ret);
3376                         return ret;
3377                 } else if (req->all_vf_ready) {
3378                         return 0;
3379                 }
3380                 msleep(HCLGE_PF_RESET_SYNC_TIME);
3381                 hclge_cmd_reuse_desc(&desc, true);
3382         } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3383
3384         dev_err(&hdev->pdev->dev, "sync with VF timeout!\n");
3385         return -ETIME;
3386 }
3387
3388 void hclge_report_hw_error(struct hclge_dev *hdev,
3389                            enum hnae3_hw_error_type type)
3390 {
3391         struct hnae3_client *client = hdev->nic_client;
3392         u16 i;
3393
3394         if (!client || !client->ops->process_hw_error ||
3395             !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3396                 return;
3397
3398         for (i = 0; i < hdev->num_vmdq_vport + 1; i++)
3399                 client->ops->process_hw_error(&hdev->vport[i].nic, type);
3400 }
3401
3402 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3403 {
3404         u32 reg_val;
3405
3406         reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3407         if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3408                 hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3409                 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3410                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3411         }
3412
3413         if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3414                 hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3415                 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3416                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3417         }
3418 }
3419
3420 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3421 {
3422         struct hclge_desc desc;
3423         struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3424         int ret;
3425
3426         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3427         hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3428         req->fun_reset_vfid = func_id;
3429
3430         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3431         if (ret)
3432                 dev_err(&hdev->pdev->dev,
3433                         "send function reset cmd fail, status =%d\n", ret);
3434
3435         return ret;
3436 }
3437
3438 static void hclge_do_reset(struct hclge_dev *hdev)
3439 {
3440         struct hnae3_handle *handle = &hdev->vport[0].nic;
3441         struct pci_dev *pdev = hdev->pdev;
3442         u32 val;
3443
3444         if (hclge_get_hw_reset_stat(handle)) {
3445                 dev_info(&pdev->dev, "Hardware reset not finish\n");
3446                 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3447                          hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3448                          hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3449                 return;
3450         }
3451
3452         switch (hdev->reset_type) {
3453         case HNAE3_GLOBAL_RESET:
3454                 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3455                 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3456                 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3457                 dev_info(&pdev->dev, "Global Reset requested\n");
3458                 break;
3459         case HNAE3_FUNC_RESET:
3460                 dev_info(&pdev->dev, "PF Reset requested\n");
3461                 /* schedule again to check later */
3462                 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3463                 hclge_reset_task_schedule(hdev);
3464                 break;
3465         case HNAE3_FLR_RESET:
3466                 dev_info(&pdev->dev, "FLR requested\n");
3467                 /* schedule again to check later */
3468                 set_bit(HNAE3_FLR_RESET, &hdev->reset_pending);
3469                 hclge_reset_task_schedule(hdev);
3470                 break;
3471         default:
3472                 dev_warn(&pdev->dev,
3473                          "Unsupported reset type: %d\n", hdev->reset_type);
3474                 break;
3475         }
3476 }
3477
3478 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3479                                                    unsigned long *addr)
3480 {
3481         enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3482         struct hclge_dev *hdev = ae_dev->priv;
3483
3484         /* first, resolve any unknown reset type to the known type(s) */
3485         if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3486                 /* we will intentionally ignore any errors from this function
3487                  *  as we will end up in *some* reset request in any case
3488                  */
3489                 hclge_handle_hw_msix_error(hdev, addr);
3490                 clear_bit(HNAE3_UNKNOWN_RESET, addr);
3491                 /* We defered the clearing of the error event which caused
3492                  * interrupt since it was not posssible to do that in
3493                  * interrupt context (and this is the reason we introduced
3494                  * new UNKNOWN reset type). Now, the errors have been
3495                  * handled and cleared in hardware we can safely enable
3496                  * interrupts. This is an exception to the norm.
3497                  */
3498                 hclge_enable_vector(&hdev->misc_vector, true);
3499         }
3500
3501         /* return the highest priority reset level amongst all */
3502         if (test_bit(HNAE3_IMP_RESET, addr)) {
3503                 rst_level = HNAE3_IMP_RESET;
3504                 clear_bit(HNAE3_IMP_RESET, addr);
3505                 clear_bit(HNAE3_GLOBAL_RESET, addr);
3506                 clear_bit(HNAE3_FUNC_RESET, addr);
3507         } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3508                 rst_level = HNAE3_GLOBAL_RESET;
3509                 clear_bit(HNAE3_GLOBAL_RESET, addr);
3510                 clear_bit(HNAE3_FUNC_RESET, addr);
3511         } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3512                 rst_level = HNAE3_FUNC_RESET;
3513                 clear_bit(HNAE3_FUNC_RESET, addr);
3514         } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3515                 rst_level = HNAE3_FLR_RESET;
3516                 clear_bit(HNAE3_FLR_RESET, addr);
3517         }
3518
3519         if (hdev->reset_type != HNAE3_NONE_RESET &&
3520             rst_level < hdev->reset_type)
3521                 return HNAE3_NONE_RESET;
3522
3523         return rst_level;
3524 }
3525
3526 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3527 {
3528         u32 clearval = 0;
3529
3530         switch (hdev->reset_type) {
3531         case HNAE3_IMP_RESET:
3532                 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3533                 break;
3534         case HNAE3_GLOBAL_RESET:
3535                 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3536                 break;
3537         default:
3538                 break;
3539         }
3540
3541         if (!clearval)
3542                 return;
3543
3544         /* For revision 0x20, the reset interrupt source
3545          * can only be cleared after hardware reset done
3546          */
3547         if (hdev->pdev->revision == 0x20)
3548                 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3549                                 clearval);
3550
3551         hclge_enable_vector(&hdev->misc_vector, true);
3552 }
3553
3554 static int hclge_reset_prepare_down(struct hclge_dev *hdev)
3555 {
3556         int ret = 0;
3557
3558         switch (hdev->reset_type) {
3559         case HNAE3_FUNC_RESET:
3560                 /* fall through */
3561         case HNAE3_FLR_RESET:
3562                 ret = hclge_set_all_vf_rst(hdev, true);
3563                 break;
3564         default:
3565                 break;
3566         }
3567
3568         return ret;
3569 }
3570
3571 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3572 {
3573         u32 reg_val;
3574
3575         reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3576         if (enable)
3577                 reg_val |= HCLGE_NIC_SW_RST_RDY;
3578         else
3579                 reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3580
3581         hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3582 }
3583
3584 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3585 {
3586         u32 reg_val;
3587         int ret = 0;
3588
3589         switch (hdev->reset_type) {
3590         case HNAE3_FUNC_RESET:
3591                 /* to confirm whether all running VF is ready
3592                  * before request PF reset
3593                  */
3594                 ret = hclge_func_reset_sync_vf(hdev);
3595                 if (ret)
3596                         return ret;
3597
3598                 ret = hclge_func_reset_cmd(hdev, 0);
3599                 if (ret) {
3600                         dev_err(&hdev->pdev->dev,
3601                                 "asserting function reset fail %d!\n", ret);
3602                         return ret;
3603                 }
3604
3605                 /* After performaning pf reset, it is not necessary to do the
3606                  * mailbox handling or send any command to firmware, because
3607                  * any mailbox handling or command to firmware is only valid
3608                  * after hclge_cmd_init is called.
3609                  */
3610                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3611                 hdev->rst_stats.pf_rst_cnt++;
3612                 break;
3613         case HNAE3_FLR_RESET:
3614                 /* to confirm whether all running VF is ready
3615                  * before request PF reset
3616                  */
3617                 ret = hclge_func_reset_sync_vf(hdev);
3618                 if (ret)
3619                         return ret;
3620
3621                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3622                 set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
3623                 hdev->rst_stats.flr_rst_cnt++;
3624                 break;
3625         case HNAE3_IMP_RESET:
3626                 hclge_handle_imp_error(hdev);
3627                 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3628                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3629                                 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3630                 break;
3631         default:
3632                 break;
3633         }
3634
3635         /* inform hardware that preparatory work is done */
3636         msleep(HCLGE_RESET_SYNC_TIME);
3637         hclge_reset_handshake(hdev, true);
3638         dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3639
3640         return ret;
3641 }
3642
3643 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3644 {
3645 #define MAX_RESET_FAIL_CNT 5
3646
3647         if (hdev->reset_pending) {
3648                 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3649                          hdev->reset_pending);
3650                 return true;
3651         } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3652                    HCLGE_RESET_INT_M) {
3653                 dev_info(&hdev->pdev->dev,
3654                          "reset failed because new reset interrupt\n");
3655                 hclge_clear_reset_cause(hdev);
3656                 return false;
3657         } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3658                 hdev->rst_stats.reset_fail_cnt++;
3659                 set_bit(hdev->reset_type, &hdev->reset_pending);
3660                 dev_info(&hdev->pdev->dev,
3661                          "re-schedule reset task(%u)\n",
3662                          hdev->rst_stats.reset_fail_cnt);
3663                 return true;
3664         }
3665
3666         hclge_clear_reset_cause(hdev);
3667
3668         /* recover the handshake status when reset fail */
3669         hclge_reset_handshake(hdev, true);
3670
3671         dev_err(&hdev->pdev->dev, "Reset fail!\n");
3672
3673         hclge_dbg_dump_rst_info(hdev);
3674
3675         return false;
3676 }
3677
3678 static int hclge_set_rst_done(struct hclge_dev *hdev)
3679 {
3680         struct hclge_pf_rst_done_cmd *req;
3681         struct hclge_desc desc;
3682         int ret;
3683
3684         req = (struct hclge_pf_rst_done_cmd *)desc.data;
3685         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
3686         req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
3687
3688         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3689         /* To be compatible with the old firmware, which does not support
3690          * command HCLGE_OPC_PF_RST_DONE, just print a warning and
3691          * return success
3692          */
3693         if (ret == -EOPNOTSUPP) {
3694                 dev_warn(&hdev->pdev->dev,
3695                          "current firmware does not support command(0x%x)!\n",
3696                          HCLGE_OPC_PF_RST_DONE);
3697                 return 0;
3698         } else if (ret) {
3699                 dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
3700                         ret);
3701         }
3702
3703         return ret;
3704 }
3705
3706 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3707 {
3708         int ret = 0;
3709
3710         switch (hdev->reset_type) {
3711         case HNAE3_FUNC_RESET:
3712                 /* fall through */
3713         case HNAE3_FLR_RESET:
3714                 ret = hclge_set_all_vf_rst(hdev, false);
3715                 break;
3716         case HNAE3_GLOBAL_RESET:
3717                 /* fall through */
3718         case HNAE3_IMP_RESET:
3719                 ret = hclge_set_rst_done(hdev);
3720                 break;
3721         default:
3722                 break;
3723         }
3724
3725         /* clear up the handshake status after re-initialize done */
3726         hclge_reset_handshake(hdev, false);
3727
3728         return ret;
3729 }
3730
3731 static int hclge_reset_stack(struct hclge_dev *hdev)
3732 {
3733         int ret;
3734
3735         ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3736         if (ret)
3737                 return ret;
3738
3739         ret = hclge_reset_ae_dev(hdev->ae_dev);
3740         if (ret)
3741                 return ret;
3742
3743         ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3744         if (ret)
3745                 return ret;
3746
3747         return hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
3748 }
3749
3750 static void hclge_reset(struct hclge_dev *hdev)
3751 {
3752         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3753         enum hnae3_reset_type reset_level;
3754         int ret;
3755
3756         /* Initialize ae_dev reset status as well, in case enet layer wants to
3757          * know if device is undergoing reset
3758          */
3759         ae_dev->reset_type = hdev->reset_type;
3760         hdev->rst_stats.reset_cnt++;
3761         /* perform reset of the stack & ae device for a client */
3762         ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3763         if (ret)
3764                 goto err_reset;
3765
3766         ret = hclge_reset_prepare_down(hdev);
3767         if (ret)
3768                 goto err_reset;
3769
3770         rtnl_lock();
3771         ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3772         if (ret)
3773                 goto err_reset_lock;
3774
3775         rtnl_unlock();
3776
3777         ret = hclge_reset_prepare_wait(hdev);
3778         if (ret)
3779                 goto err_reset;
3780
3781         if (hclge_reset_wait(hdev))
3782                 goto err_reset;
3783
3784         hdev->rst_stats.hw_reset_done_cnt++;
3785
3786         ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3787         if (ret)
3788                 goto err_reset;
3789
3790         rtnl_lock();
3791
3792         ret = hclge_reset_stack(hdev);
3793         if (ret)
3794                 goto err_reset_lock;
3795
3796         hclge_clear_reset_cause(hdev);
3797
3798         ret = hclge_reset_prepare_up(hdev);
3799         if (ret)
3800                 goto err_reset_lock;
3801
3802         rtnl_unlock();
3803
3804         ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3805         /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
3806          * times
3807          */
3808         if (ret &&
3809             hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
3810                 goto err_reset;
3811
3812         rtnl_lock();
3813
3814         ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3815         if (ret)
3816                 goto err_reset_lock;
3817
3818         rtnl_unlock();
3819
3820         ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3821         if (ret)
3822                 goto err_reset;
3823
3824         hdev->last_reset_time = jiffies;
3825         hdev->rst_stats.reset_fail_cnt = 0;
3826         hdev->rst_stats.reset_done_cnt++;
3827         ae_dev->reset_type = HNAE3_NONE_RESET;
3828
3829         /* if default_reset_request has a higher level reset request,
3830          * it should be handled as soon as possible. since some errors
3831          * need this kind of reset to fix.
3832          */
3833         reset_level = hclge_get_reset_level(ae_dev,
3834                                             &hdev->default_reset_request);
3835         if (reset_level != HNAE3_NONE_RESET)
3836                 set_bit(reset_level, &hdev->reset_request);
3837
3838         return;
3839
3840 err_reset_lock:
3841         rtnl_unlock();
3842 err_reset:
3843         if (hclge_reset_err_handle(hdev))
3844                 hclge_reset_task_schedule(hdev);
3845 }
3846
3847 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3848 {
3849         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3850         struct hclge_dev *hdev = ae_dev->priv;
3851
3852         /* We might end up getting called broadly because of 2 below cases:
3853          * 1. Recoverable error was conveyed through APEI and only way to bring
3854          *    normalcy is to reset.
3855          * 2. A new reset request from the stack due to timeout
3856          *
3857          * For the first case,error event might not have ae handle available.
3858          * check if this is a new reset request and we are not here just because
3859          * last reset attempt did not succeed and watchdog hit us again. We will
3860          * know this if last reset request did not occur very recently (watchdog
3861          * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3862          * In case of new request we reset the "reset level" to PF reset.
3863          * And if it is a repeat reset request of the most recent one then we
3864          * want to make sure we throttle the reset request. Therefore, we will
3865          * not allow it again before 3*HZ times.
3866          */
3867         if (!handle)
3868                 handle = &hdev->vport[0].nic;
3869
3870         if (time_before(jiffies, (hdev->last_reset_time +
3871                                   HCLGE_RESET_INTERVAL))) {
3872                 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
3873                 return;
3874         } else if (hdev->default_reset_request) {
3875                 hdev->reset_level =
3876                         hclge_get_reset_level(ae_dev,
3877                                               &hdev->default_reset_request);
3878         } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
3879                 hdev->reset_level = HNAE3_FUNC_RESET;
3880         }
3881
3882         dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
3883                  hdev->reset_level);
3884
3885         /* request reset & schedule reset task */
3886         set_bit(hdev->reset_level, &hdev->reset_request);
3887         hclge_reset_task_schedule(hdev);
3888
3889         if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3890                 hdev->reset_level++;
3891 }
3892
3893 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3894                                         enum hnae3_reset_type rst_type)
3895 {
3896         struct hclge_dev *hdev = ae_dev->priv;
3897
3898         set_bit(rst_type, &hdev->default_reset_request);
3899 }
3900
3901 static void hclge_reset_timer(struct timer_list *t)
3902 {
3903         struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3904
3905         /* if default_reset_request has no value, it means that this reset
3906          * request has already be handled, so just return here
3907          */
3908         if (!hdev->default_reset_request)
3909                 return;
3910
3911         dev_info(&hdev->pdev->dev,
3912                  "triggering reset in reset timer\n");
3913         hclge_reset_event(hdev->pdev, NULL);
3914 }
3915
3916 static void hclge_reset_subtask(struct hclge_dev *hdev)
3917 {
3918         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3919
3920         /* check if there is any ongoing reset in the hardware. This status can
3921          * be checked from reset_pending. If there is then, we need to wait for
3922          * hardware to complete reset.
3923          *    a. If we are able to figure out in reasonable time that hardware
3924          *       has fully resetted then, we can proceed with driver, client
3925          *       reset.
3926          *    b. else, we can come back later to check this status so re-sched
3927          *       now.
3928          */
3929         hdev->last_reset_time = jiffies;
3930         hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
3931         if (hdev->reset_type != HNAE3_NONE_RESET)
3932                 hclge_reset(hdev);
3933
3934         /* check if we got any *new* reset requests to be honored */
3935         hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
3936         if (hdev->reset_type != HNAE3_NONE_RESET)
3937                 hclge_do_reset(hdev);
3938
3939         hdev->reset_type = HNAE3_NONE_RESET;
3940 }
3941
3942 static void hclge_reset_service_task(struct work_struct *work)
3943 {
3944         struct hclge_dev *hdev =
3945                 container_of(work, struct hclge_dev, rst_service_task);
3946
3947         if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3948                 return;
3949
3950         clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
3951
3952         hclge_reset_subtask(hdev);
3953
3954         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3955 }
3956
3957 static void hclge_mailbox_service_task(struct work_struct *work)
3958 {
3959         struct hclge_dev *hdev =
3960                 container_of(work, struct hclge_dev, mbx_service_task);
3961
3962         if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3963                 return;
3964
3965         clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
3966
3967         hclge_mbx_handler(hdev);
3968
3969         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3970 }
3971
3972 static void hclge_update_vport_alive(struct hclge_dev *hdev)
3973 {
3974         int i;
3975
3976         /* start from vport 1 for PF is always alive */
3977         for (i = 1; i < hdev->num_alloc_vport; i++) {
3978                 struct hclge_vport *vport = &hdev->vport[i];
3979
3980                 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3981                         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
3982
3983                 /* If vf is not alive, set to default value */
3984                 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3985                         vport->mps = HCLGE_MAC_DEFAULT_FRAME;
3986         }
3987 }
3988
3989 static void hclge_service_task(struct work_struct *work)
3990 {
3991         struct hclge_dev *hdev =
3992                 container_of(work, struct hclge_dev, service_task.work);
3993
3994         clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
3995
3996         if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
3997                 hclge_update_stats_for_all(hdev);
3998                 hdev->hw_stats.stats_timer = 0;
3999         }
4000
4001         hclge_update_port_info(hdev);
4002         hclge_update_link_status(hdev);
4003         hclge_update_vport_alive(hdev);
4004         hclge_sync_vlan_filter(hdev);
4005
4006         if (hdev->fd_arfs_expire_timer >= HCLGE_FD_ARFS_EXPIRE_TIMER_INTERVAL) {
4007                 hclge_rfs_filter_expire(hdev);
4008                 hdev->fd_arfs_expire_timer = 0;
4009         }
4010
4011         hclge_task_schedule(hdev, round_jiffies_relative(HZ));
4012 }
4013
4014 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4015 {
4016         /* VF handle has no client */
4017         if (!handle->client)
4018                 return container_of(handle, struct hclge_vport, nic);
4019         else if (handle->client->type == HNAE3_CLIENT_ROCE)
4020                 return container_of(handle, struct hclge_vport, roce);
4021         else
4022                 return container_of(handle, struct hclge_vport, nic);
4023 }
4024
4025 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4026                             struct hnae3_vector_info *vector_info)
4027 {
4028         struct hclge_vport *vport = hclge_get_vport(handle);
4029         struct hnae3_vector_info *vector = vector_info;
4030         struct hclge_dev *hdev = vport->back;
4031         int alloc = 0;
4032         int i, j;
4033
4034         vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
4035         vector_num = min(hdev->num_msi_left, vector_num);
4036
4037         for (j = 0; j < vector_num; j++) {
4038                 for (i = 1; i < hdev->num_msi; i++) {
4039                         if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4040                                 vector->vector = pci_irq_vector(hdev->pdev, i);
4041                                 vector->io_addr = hdev->hw.io_base +
4042                                         HCLGE_VECTOR_REG_BASE +
4043                                         (i - 1) * HCLGE_VECTOR_REG_OFFSET +
4044                                         vport->vport_id *
4045                                         HCLGE_VECTOR_VF_OFFSET;
4046                                 hdev->vector_status[i] = vport->vport_id;
4047                                 hdev->vector_irq[i] = vector->vector;
4048
4049                                 vector++;
4050                                 alloc++;
4051
4052                                 break;
4053                         }
4054                 }
4055         }
4056         hdev->num_msi_left -= alloc;
4057         hdev->num_msi_used += alloc;
4058
4059         return alloc;
4060 }
4061
4062 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4063 {
4064         int i;
4065
4066         for (i = 0; i < hdev->num_msi; i++)
4067                 if (vector == hdev->vector_irq[i])
4068                         return i;
4069
4070         return -EINVAL;
4071 }
4072
4073 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4074 {
4075         struct hclge_vport *vport = hclge_get_vport(handle);
4076         struct hclge_dev *hdev = vport->back;
4077         int vector_id;
4078
4079         vector_id = hclge_get_vector_index(hdev, vector);
4080         if (vector_id < 0) {
4081                 dev_err(&hdev->pdev->dev,
4082                         "Get vector index fail. vector_id =%d\n", vector_id);
4083                 return vector_id;
4084         }
4085
4086         hclge_free_vector(hdev, vector_id);
4087
4088         return 0;
4089 }
4090
4091 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
4092 {
4093         return HCLGE_RSS_KEY_SIZE;
4094 }
4095
4096 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
4097 {
4098         return HCLGE_RSS_IND_TBL_SIZE;
4099 }
4100
4101 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
4102                                   const u8 hfunc, const u8 *key)
4103 {
4104         struct hclge_rss_config_cmd *req;
4105         unsigned int key_offset = 0;
4106         struct hclge_desc desc;
4107         int key_counts;
4108         int key_size;
4109         int ret;
4110
4111         key_counts = HCLGE_RSS_KEY_SIZE;
4112         req = (struct hclge_rss_config_cmd *)desc.data;
4113
4114         while (key_counts) {
4115                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4116                                            false);
4117
4118                 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4119                 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4120
4121                 key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
4122                 memcpy(req->hash_key,
4123                        key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4124
4125                 key_counts -= key_size;
4126                 key_offset++;
4127                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4128                 if (ret) {
4129                         dev_err(&hdev->pdev->dev,
4130                                 "Configure RSS config fail, status = %d\n",
4131                                 ret);
4132                         return ret;
4133                 }
4134         }
4135         return 0;
4136 }
4137
4138 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
4139 {
4140         struct hclge_rss_indirection_table_cmd *req;
4141         struct hclge_desc desc;
4142         int i, j;
4143         int ret;
4144
4145         req = (struct hclge_rss_indirection_table_cmd *)desc.data;
4146
4147         for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
4148                 hclge_cmd_setup_basic_desc
4149                         (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4150
4151                 req->start_table_index =
4152                         cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4153                 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
4154
4155                 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
4156                         req->rss_result[j] =
4157                                 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4158
4159                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4160                 if (ret) {
4161                         dev_err(&hdev->pdev->dev,
4162                                 "Configure rss indir table fail,status = %d\n",
4163                                 ret);
4164                         return ret;
4165                 }
4166         }
4167         return 0;
4168 }
4169
4170 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4171                                  u16 *tc_size, u16 *tc_offset)
4172 {
4173         struct hclge_rss_tc_mode_cmd *req;
4174         struct hclge_desc desc;
4175         int ret;
4176         int i;
4177
4178         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
4179         req = (struct hclge_rss_tc_mode_cmd *)desc.data;
4180
4181         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4182                 u16 mode = 0;
4183
4184                 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4185                 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4186                                 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4187                 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4188                                 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
4189
4190                 req->rss_tc_mode[i] = cpu_to_le16(mode);
4191         }
4192
4193         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4194         if (ret)
4195                 dev_err(&hdev->pdev->dev,
4196                         "Configure rss tc mode fail, status = %d\n", ret);
4197
4198         return ret;
4199 }
4200
4201 static void hclge_get_rss_type(struct hclge_vport *vport)
4202 {
4203         if (vport->rss_tuple_sets.ipv4_tcp_en ||
4204             vport->rss_tuple_sets.ipv4_udp_en ||
4205             vport->rss_tuple_sets.ipv4_sctp_en ||
4206             vport->rss_tuple_sets.ipv6_tcp_en ||
4207             vport->rss_tuple_sets.ipv6_udp_en ||
4208             vport->rss_tuple_sets.ipv6_sctp_en)
4209                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4210         else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4211                  vport->rss_tuple_sets.ipv6_fragment_en)
4212                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4213         else
4214                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4215 }
4216
4217 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4218 {
4219         struct hclge_rss_input_tuple_cmd *req;
4220         struct hclge_desc desc;
4221         int ret;
4222
4223         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4224
4225         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4226
4227         /* Get the tuple cfg from pf */
4228         req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4229         req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4230         req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4231         req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4232         req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4233         req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4234         req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4235         req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4236         hclge_get_rss_type(&hdev->vport[0]);
4237         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4238         if (ret)
4239                 dev_err(&hdev->pdev->dev,
4240                         "Configure rss input fail, status = %d\n", ret);
4241         return ret;
4242 }
4243
4244 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4245                          u8 *key, u8 *hfunc)
4246 {
4247         struct hclge_vport *vport = hclge_get_vport(handle);
4248         int i;
4249
4250         /* Get hash algorithm */
4251         if (hfunc) {
4252                 switch (vport->rss_algo) {
4253                 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4254                         *hfunc = ETH_RSS_HASH_TOP;
4255                         break;
4256                 case HCLGE_RSS_HASH_ALGO_SIMPLE:
4257                         *hfunc = ETH_RSS_HASH_XOR;
4258                         break;
4259                 default:
4260                         *hfunc = ETH_RSS_HASH_UNKNOWN;
4261                         break;
4262                 }
4263         }
4264
4265         /* Get the RSS Key required by the user */
4266         if (key)
4267                 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4268
4269         /* Get indirect table */
4270         if (indir)
4271                 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4272                         indir[i] =  vport->rss_indirection_tbl[i];
4273
4274         return 0;
4275 }
4276
4277 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4278                          const  u8 *key, const  u8 hfunc)
4279 {
4280         struct hclge_vport *vport = hclge_get_vport(handle);
4281         struct hclge_dev *hdev = vport->back;
4282         u8 hash_algo;
4283         int ret, i;
4284
4285         /* Set the RSS Hash Key if specififed by the user */
4286         if (key) {
4287                 switch (hfunc) {
4288                 case ETH_RSS_HASH_TOP:
4289                         hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4290                         break;
4291                 case ETH_RSS_HASH_XOR:
4292                         hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4293                         break;
4294                 case ETH_RSS_HASH_NO_CHANGE:
4295                         hash_algo = vport->rss_algo;
4296                         break;
4297                 default:
4298                         return -EINVAL;
4299                 }
4300
4301                 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4302                 if (ret)
4303                         return ret;
4304
4305                 /* Update the shadow RSS key with user specified qids */
4306                 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4307                 vport->rss_algo = hash_algo;
4308         }
4309
4310         /* Update the shadow RSS table with user specified qids */
4311         for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4312                 vport->rss_indirection_tbl[i] = indir[i];
4313
4314         /* Update the hardware */
4315         return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4316 }
4317
4318 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4319 {
4320         u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4321
4322         if (nfc->data & RXH_L4_B_2_3)
4323                 hash_sets |= HCLGE_D_PORT_BIT;
4324         else
4325                 hash_sets &= ~HCLGE_D_PORT_BIT;
4326
4327         if (nfc->data & RXH_IP_SRC)
4328                 hash_sets |= HCLGE_S_IP_BIT;
4329         else
4330                 hash_sets &= ~HCLGE_S_IP_BIT;
4331
4332         if (nfc->data & RXH_IP_DST)
4333                 hash_sets |= HCLGE_D_IP_BIT;
4334         else
4335                 hash_sets &= ~HCLGE_D_IP_BIT;
4336
4337         if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4338                 hash_sets |= HCLGE_V_TAG_BIT;
4339
4340         return hash_sets;
4341 }
4342
4343 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4344                                struct ethtool_rxnfc *nfc)
4345 {
4346         struct hclge_vport *vport = hclge_get_vport(handle);
4347         struct hclge_dev *hdev = vport->back;
4348         struct hclge_rss_input_tuple_cmd *req;
4349         struct hclge_desc desc;
4350         u8 tuple_sets;
4351         int ret;
4352
4353         if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4354                           RXH_L4_B_0_1 | RXH_L4_B_2_3))
4355                 return -EINVAL;
4356
4357         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4358         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4359
4360         req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4361         req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4362         req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4363         req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4364         req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4365         req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4366         req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4367         req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4368
4369         tuple_sets = hclge_get_rss_hash_bits(nfc);
4370         switch (nfc->flow_type) {
4371         case TCP_V4_FLOW:
4372                 req->ipv4_tcp_en = tuple_sets;
4373                 break;
4374         case TCP_V6_FLOW:
4375                 req->ipv6_tcp_en = tuple_sets;
4376                 break;
4377         case UDP_V4_FLOW:
4378                 req->ipv4_udp_en = tuple_sets;
4379                 break;
4380         case UDP_V6_FLOW:
4381                 req->ipv6_udp_en = tuple_sets;
4382                 break;
4383         case SCTP_V4_FLOW:
4384                 req->ipv4_sctp_en = tuple_sets;
4385                 break;
4386         case SCTP_V6_FLOW:
4387                 if ((nfc->data & RXH_L4_B_0_1) ||
4388                     (nfc->data & RXH_L4_B_2_3))
4389                         return -EINVAL;
4390
4391                 req->ipv6_sctp_en = tuple_sets;
4392                 break;
4393         case IPV4_FLOW:
4394                 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4395                 break;
4396         case IPV6_FLOW:
4397                 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4398                 break;
4399         default:
4400                 return -EINVAL;
4401         }
4402
4403         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4404         if (ret) {
4405                 dev_err(&hdev->pdev->dev,
4406                         "Set rss tuple fail, status = %d\n", ret);
4407                 return ret;
4408         }
4409
4410         vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4411         vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4412         vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4413         vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4414         vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4415         vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4416         vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4417         vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4418         hclge_get_rss_type(vport);
4419         return 0;
4420 }
4421
4422 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4423                                struct ethtool_rxnfc *nfc)
4424 {
4425         struct hclge_vport *vport = hclge_get_vport(handle);
4426         u8 tuple_sets;
4427
4428         nfc->data = 0;
4429
4430         switch (nfc->flow_type) {
4431         case TCP_V4_FLOW:
4432                 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4433                 break;
4434         case UDP_V4_FLOW:
4435                 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4436                 break;
4437         case TCP_V6_FLOW:
4438                 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4439                 break;
4440         case UDP_V6_FLOW:
4441                 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4442                 break;
4443         case SCTP_V4_FLOW:
4444                 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4445                 break;
4446         case SCTP_V6_FLOW:
4447                 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4448                 break;
4449         case IPV4_FLOW:
4450         case IPV6_FLOW:
4451                 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4452                 break;
4453         default:
4454                 return -EINVAL;
4455         }
4456
4457         if (!tuple_sets)
4458                 return 0;
4459
4460         if (tuple_sets & HCLGE_D_PORT_BIT)
4461                 nfc->data |= RXH_L4_B_2_3;
4462         if (tuple_sets & HCLGE_S_PORT_BIT)
4463                 nfc->data |= RXH_L4_B_0_1;
4464         if (tuple_sets & HCLGE_D_IP_BIT)
4465                 nfc->data |= RXH_IP_DST;
4466         if (tuple_sets & HCLGE_S_IP_BIT)
4467                 nfc->data |= RXH_IP_SRC;
4468
4469         return 0;
4470 }
4471
4472 static int hclge_get_tc_size(struct hnae3_handle *handle)
4473 {
4474         struct hclge_vport *vport = hclge_get_vport(handle);
4475         struct hclge_dev *hdev = vport->back;
4476
4477         return hdev->rss_size_max;
4478 }
4479
4480 int hclge_rss_init_hw(struct hclge_dev *hdev)
4481 {
4482         struct hclge_vport *vport = hdev->vport;
4483         u8 *rss_indir = vport[0].rss_indirection_tbl;
4484         u16 rss_size = vport[0].alloc_rss_size;
4485         u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4486         u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4487         u8 *key = vport[0].rss_hash_key;
4488         u8 hfunc = vport[0].rss_algo;
4489         u16 tc_valid[HCLGE_MAX_TC_NUM];
4490         u16 roundup_size;
4491         unsigned int i;
4492         int ret;
4493
4494         ret = hclge_set_rss_indir_table(hdev, rss_indir);
4495         if (ret)
4496                 return ret;
4497
4498         ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4499         if (ret)
4500                 return ret;
4501
4502         ret = hclge_set_rss_input_tuple(hdev);
4503         if (ret)
4504                 return ret;
4505
4506         /* Each TC have the same queue size, and tc_size set to hardware is
4507          * the log2 of roundup power of two of rss_size, the acutal queue
4508          * size is limited by indirection table.
4509          */
4510         if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
4511                 dev_err(&hdev->pdev->dev,
4512                         "Configure rss tc size failed, invalid TC_SIZE = %u\n",
4513                         rss_size);
4514                 return -EINVAL;
4515         }
4516
4517         roundup_size = roundup_pow_of_two(rss_size);
4518         roundup_size = ilog2(roundup_size);
4519
4520         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4521                 tc_valid[i] = 0;
4522
4523                 if (!(hdev->hw_tc_map & BIT(i)))
4524                         continue;
4525
4526                 tc_valid[i] = 1;
4527                 tc_size[i] = roundup_size;
4528                 tc_offset[i] = rss_size * i;
4529         }
4530
4531         return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4532 }
4533
4534 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4535 {
4536         struct hclge_vport *vport = hdev->vport;
4537         int i, j;
4538
4539         for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4540                 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4541                         vport[j].rss_indirection_tbl[i] =
4542                                 i % vport[j].alloc_rss_size;
4543         }
4544 }
4545
4546 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
4547 {
4548         int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4549         struct hclge_vport *vport = hdev->vport;
4550
4551         if (hdev->pdev->revision >= 0x21)
4552                 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4553
4554         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4555                 vport[i].rss_tuple_sets.ipv4_tcp_en =
4556                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4557                 vport[i].rss_tuple_sets.ipv4_udp_en =
4558                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4559                 vport[i].rss_tuple_sets.ipv4_sctp_en =
4560                         HCLGE_RSS_INPUT_TUPLE_SCTP;
4561                 vport[i].rss_tuple_sets.ipv4_fragment_en =
4562                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4563                 vport[i].rss_tuple_sets.ipv6_tcp_en =
4564                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4565                 vport[i].rss_tuple_sets.ipv6_udp_en =
4566                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4567                 vport[i].rss_tuple_sets.ipv6_sctp_en =
4568                         HCLGE_RSS_INPUT_TUPLE_SCTP;
4569                 vport[i].rss_tuple_sets.ipv6_fragment_en =
4570                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4571
4572                 vport[i].rss_algo = rss_algo;
4573
4574                 memcpy(vport[i].rss_hash_key, hclge_hash_key,
4575                        HCLGE_RSS_KEY_SIZE);
4576         }
4577
4578         hclge_rss_indir_init_cfg(hdev);
4579 }
4580
4581 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4582                                 int vector_id, bool en,
4583                                 struct hnae3_ring_chain_node *ring_chain)
4584 {
4585         struct hclge_dev *hdev = vport->back;
4586         struct hnae3_ring_chain_node *node;
4587         struct hclge_desc desc;
4588         struct hclge_ctrl_vector_chain_cmd *req =
4589                 (struct hclge_ctrl_vector_chain_cmd *)desc.data;
4590         enum hclge_cmd_status status;
4591         enum hclge_opcode_type op;
4592         u16 tqp_type_and_id;
4593         int i;
4594
4595         op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4596         hclge_cmd_setup_basic_desc(&desc, op, false);
4597         req->int_vector_id = vector_id;
4598
4599         i = 0;
4600         for (node = ring_chain; node; node = node->next) {
4601                 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4602                 hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
4603                                 HCLGE_INT_TYPE_S,
4604                                 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4605                 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4606                                 HCLGE_TQP_ID_S, node->tqp_index);
4607                 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4608                                 HCLGE_INT_GL_IDX_S,
4609                                 hnae3_get_field(node->int_gl_idx,
4610                                                 HNAE3_RING_GL_IDX_M,
4611                                                 HNAE3_RING_GL_IDX_S));
4612                 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4613                 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4614                         req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4615                         req->vfid = vport->vport_id;
4616
4617                         status = hclge_cmd_send(&hdev->hw, &desc, 1);
4618                         if (status) {
4619                                 dev_err(&hdev->pdev->dev,
4620                                         "Map TQP fail, status is %d.\n",
4621                                         status);
4622                                 return -EIO;
4623                         }
4624                         i = 0;
4625
4626                         hclge_cmd_setup_basic_desc(&desc,
4627                                                    op,
4628                                                    false);
4629                         req->int_vector_id = vector_id;
4630                 }
4631         }
4632
4633         if (i > 0) {
4634                 req->int_cause_num = i;
4635                 req->vfid = vport->vport_id;
4636                 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4637                 if (status) {
4638                         dev_err(&hdev->pdev->dev,
4639                                 "Map TQP fail, status is %d.\n", status);
4640                         return -EIO;
4641                 }
4642         }
4643
4644         return 0;
4645 }
4646
4647 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
4648                                     struct hnae3_ring_chain_node *ring_chain)
4649 {
4650         struct hclge_vport *vport = hclge_get_vport(handle);
4651         struct hclge_dev *hdev = vport->back;
4652         int vector_id;
4653
4654         vector_id = hclge_get_vector_index(hdev, vector);
4655         if (vector_id < 0) {
4656                 dev_err(&hdev->pdev->dev,
4657                         "Get vector index fail. vector_id =%d\n", vector_id);
4658                 return vector_id;
4659         }
4660
4661         return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4662 }
4663
4664 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
4665                                        struct hnae3_ring_chain_node *ring_chain)
4666 {
4667         struct hclge_vport *vport = hclge_get_vport(handle);
4668         struct hclge_dev *hdev = vport->back;
4669         int vector_id, ret;
4670
4671         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4672                 return 0;
4673
4674         vector_id = hclge_get_vector_index(hdev, vector);
4675         if (vector_id < 0) {
4676                 dev_err(&handle->pdev->dev,
4677                         "Get vector index fail. ret =%d\n", vector_id);
4678                 return vector_id;
4679         }
4680
4681         ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4682         if (ret)
4683                 dev_err(&handle->pdev->dev,
4684                         "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4685                         vector_id, ret);
4686
4687         return ret;
4688 }
4689
4690 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
4691                                       struct hclge_promisc_param *param)
4692 {
4693         struct hclge_promisc_cfg_cmd *req;
4694         struct hclge_desc desc;
4695         int ret;
4696
4697         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4698
4699         req = (struct hclge_promisc_cfg_cmd *)desc.data;
4700         req->vf_id = param->vf_id;
4701
4702         /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
4703          * pdev revision(0x20), new revision support them. The
4704          * value of this two fields will not return error when driver
4705          * send command to fireware in revision(0x20).
4706          */
4707         req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
4708                 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
4709
4710         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4711         if (ret)
4712                 dev_err(&hdev->pdev->dev,
4713                         "Set promisc mode fail, status is %d.\n", ret);
4714
4715         return ret;
4716 }
4717
4718 static void hclge_promisc_param_init(struct hclge_promisc_param *param,
4719                                      bool en_uc, bool en_mc, bool en_bc,
4720                                      int vport_id)
4721 {
4722         if (!param)
4723                 return;
4724
4725         memset(param, 0, sizeof(struct hclge_promisc_param));
4726         if (en_uc)
4727                 param->enable = HCLGE_PROMISC_EN_UC;
4728         if (en_mc)
4729                 param->enable |= HCLGE_PROMISC_EN_MC;
4730         if (en_bc)
4731                 param->enable |= HCLGE_PROMISC_EN_BC;
4732         param->vf_id = vport_id;
4733 }
4734
4735 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
4736                                  bool en_mc_pmc, bool en_bc_pmc)
4737 {
4738         struct hclge_dev *hdev = vport->back;
4739         struct hclge_promisc_param param;
4740
4741         hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, en_bc_pmc,
4742                                  vport->vport_id);
4743         return hclge_cmd_set_promisc_mode(hdev, &param);
4744 }
4745
4746 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4747                                   bool en_mc_pmc)
4748 {
4749         struct hclge_vport *vport = hclge_get_vport(handle);
4750         bool en_bc_pmc = true;
4751
4752         /* For revision 0x20, if broadcast promisc enabled, vlan filter is
4753          * always bypassed. So broadcast promisc should be disabled until
4754          * user enable promisc mode
4755          */
4756         if (handle->pdev->revision == 0x20)
4757                 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4758
4759         return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
4760                                             en_bc_pmc);
4761 }
4762
4763 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4764 {
4765         struct hclge_get_fd_mode_cmd *req;
4766         struct hclge_desc desc;
4767         int ret;
4768
4769         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4770
4771         req = (struct hclge_get_fd_mode_cmd *)desc.data;
4772
4773         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4774         if (ret) {
4775                 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4776                 return ret;
4777         }
4778
4779         *fd_mode = req->mode;
4780
4781         return ret;
4782 }
4783
4784 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4785                                    u32 *stage1_entry_num,
4786                                    u32 *stage2_entry_num,
4787                                    u16 *stage1_counter_num,
4788                                    u16 *stage2_counter_num)
4789 {
4790         struct hclge_get_fd_allocation_cmd *req;
4791         struct hclge_desc desc;
4792         int ret;
4793
4794         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4795
4796         req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4797
4798         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4799         if (ret) {
4800                 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4801                         ret);
4802                 return ret;
4803         }
4804
4805         *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4806         *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4807         *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4808         *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4809
4810         return ret;
4811 }
4812
4813 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
4814 {
4815         struct hclge_set_fd_key_config_cmd *req;
4816         struct hclge_fd_key_cfg *stage;
4817         struct hclge_desc desc;
4818         int ret;
4819
4820         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4821
4822         req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4823         stage = &hdev->fd_cfg.key_cfg[stage_num];
4824         req->stage = stage_num;
4825         req->key_select = stage->key_sel;
4826         req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4827         req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4828         req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4829         req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4830         req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4831         req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4832
4833         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4834         if (ret)
4835                 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4836
4837         return ret;
4838 }
4839
4840 static int hclge_init_fd_config(struct hclge_dev *hdev)
4841 {
4842 #define LOW_2_WORDS             0x03
4843         struct hclge_fd_key_cfg *key_cfg;
4844         int ret;
4845
4846         if (!hnae3_dev_fd_supported(hdev))
4847                 return 0;
4848
4849         ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4850         if (ret)
4851                 return ret;
4852
4853         switch (hdev->fd_cfg.fd_mode) {
4854         case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4855                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4856                 break;
4857         case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4858                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4859                 break;
4860         default:
4861                 dev_err(&hdev->pdev->dev,
4862                         "Unsupported flow director mode %u\n",
4863                         hdev->fd_cfg.fd_mode);
4864                 return -EOPNOTSUPP;
4865         }
4866
4867         hdev->fd_cfg.proto_support =
4868                 TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
4869                 UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
4870         key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4871         key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4872         key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4873         key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4874         key_cfg->outer_sipv6_word_en = 0;
4875         key_cfg->outer_dipv6_word_en = 0;
4876
4877         key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4878                                 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4879                                 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4880                                 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4881
4882         /* If use max 400bit key, we can support tuples for ether type */
4883         if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
4884                 hdev->fd_cfg.proto_support |= ETHER_FLOW;
4885                 key_cfg->tuple_active |=
4886                                 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
4887         }
4888
4889         /* roce_type is used to filter roce frames
4890          * dst_vport is used to specify the rule
4891          */
4892         key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4893
4894         ret = hclge_get_fd_allocation(hdev,
4895                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4896                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4897                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4898                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4899         if (ret)
4900                 return ret;
4901
4902         return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4903 }
4904
4905 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4906                                 int loc, u8 *key, bool is_add)
4907 {
4908         struct hclge_fd_tcam_config_1_cmd *req1;
4909         struct hclge_fd_tcam_config_2_cmd *req2;
4910         struct hclge_fd_tcam_config_3_cmd *req3;
4911         struct hclge_desc desc[3];
4912         int ret;
4913
4914         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4915         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4916         hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4917         desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4918         hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4919
4920         req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4921         req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4922         req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4923
4924         req1->stage = stage;
4925         req1->xy_sel = sel_x ? 1 : 0;
4926         hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4927         req1->index = cpu_to_le32(loc);
4928         req1->entry_vld = sel_x ? is_add : 0;
4929
4930         if (key) {
4931                 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4932                 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4933                        sizeof(req2->tcam_data));
4934                 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4935                        sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4936         }
4937
4938         ret = hclge_cmd_send(&hdev->hw, desc, 3);
4939         if (ret)
4940                 dev_err(&hdev->pdev->dev,
4941                         "config tcam key fail, ret=%d\n",
4942                         ret);
4943
4944         return ret;
4945 }
4946
4947 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4948                               struct hclge_fd_ad_data *action)
4949 {
4950         struct hclge_fd_ad_config_cmd *req;
4951         struct hclge_desc desc;
4952         u64 ad_data = 0;
4953         int ret;
4954
4955         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
4956
4957         req = (struct hclge_fd_ad_config_cmd *)desc.data;
4958         req->index = cpu_to_le32(loc);
4959         req->stage = stage;
4960
4961         hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
4962                       action->write_rule_id_to_bd);
4963         hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
4964                         action->rule_id);
4965         ad_data <<= 32;
4966         hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
4967         hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
4968                       action->forward_to_direct_queue);
4969         hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
4970                         action->queue_id);
4971         hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
4972         hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
4973                         HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
4974         hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
4975         hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
4976                         action->counter_id);
4977
4978         req->ad_data = cpu_to_le64(ad_data);
4979         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4980         if (ret)
4981                 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
4982
4983         return ret;
4984 }
4985
4986 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
4987                                    struct hclge_fd_rule *rule)
4988 {
4989         u16 tmp_x_s, tmp_y_s;
4990         u32 tmp_x_l, tmp_y_l;
4991         int i;
4992
4993         if (rule->unused_tuple & tuple_bit)
4994                 return true;
4995
4996         switch (tuple_bit) {
4997         case 0:
4998                 return false;
4999         case BIT(INNER_DST_MAC):
5000                 for (i = 0; i < ETH_ALEN; i++) {
5001                         calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5002                                rule->tuples_mask.dst_mac[i]);
5003                         calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5004                                rule->tuples_mask.dst_mac[i]);
5005                 }
5006
5007                 return true;
5008         case BIT(INNER_SRC_MAC):
5009                 for (i = 0; i < ETH_ALEN; i++) {
5010                         calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5011                                rule->tuples.src_mac[i]);
5012                         calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5013                                rule->tuples.src_mac[i]);
5014                 }
5015
5016                 return true;
5017         case BIT(INNER_VLAN_TAG_FST):
5018                 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
5019                        rule->tuples_mask.vlan_tag1);
5020                 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
5021                        rule->tuples_mask.vlan_tag1);
5022                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5023                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5024
5025                 return true;
5026         case BIT(INNER_ETH_TYPE):
5027                 calc_x(tmp_x_s, rule->tuples.ether_proto,
5028                        rule->tuples_mask.ether_proto);
5029                 calc_y(tmp_y_s, rule->tuples.ether_proto,
5030                        rule->tuples_mask.ether_proto);
5031                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5032                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5033
5034                 return true;
5035         case BIT(INNER_IP_TOS):
5036                 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5037                 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5038
5039                 return true;
5040         case BIT(INNER_IP_PROTO):
5041                 calc_x(*key_x, rule->tuples.ip_proto,
5042                        rule->tuples_mask.ip_proto);
5043                 calc_y(*key_y, rule->tuples.ip_proto,
5044                        rule->tuples_mask.ip_proto);
5045
5046                 return true;
5047         case BIT(INNER_SRC_IP):
5048                 calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
5049                        rule->tuples_mask.src_ip[IPV4_INDEX]);
5050                 calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
5051                        rule->tuples_mask.src_ip[IPV4_INDEX]);
5052                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5053                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5054
5055                 return true;
5056         case BIT(INNER_DST_IP):
5057                 calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
5058                        rule->tuples_mask.dst_ip[IPV4_INDEX]);
5059                 calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
5060                        rule->tuples_mask.dst_ip[IPV4_INDEX]);
5061                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5062                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5063
5064                 return true;
5065         case BIT(INNER_SRC_PORT):
5066                 calc_x(tmp_x_s, rule->tuples.src_port,
5067                        rule->tuples_mask.src_port);
5068                 calc_y(tmp_y_s, rule->tuples.src_port,
5069                        rule->tuples_mask.src_port);
5070                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5071                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5072
5073                 return true;
5074         case BIT(INNER_DST_PORT):
5075                 calc_x(tmp_x_s, rule->tuples.dst_port,
5076                        rule->tuples_mask.dst_port);
5077                 calc_y(tmp_y_s, rule->tuples.dst_port,
5078                        rule->tuples_mask.dst_port);
5079                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5080                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5081
5082                 return true;
5083         default:
5084                 return false;
5085         }
5086 }
5087
5088 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5089                                  u8 vf_id, u8 network_port_id)
5090 {
5091         u32 port_number = 0;
5092
5093         if (port_type == HOST_PORT) {
5094                 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5095                                 pf_id);
5096                 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5097                                 vf_id);
5098                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5099         } else {
5100                 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5101                                 HCLGE_NETWORK_PORT_ID_S, network_port_id);
5102                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5103         }
5104
5105         return port_number;
5106 }
5107
5108 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5109                                        __le32 *key_x, __le32 *key_y,
5110                                        struct hclge_fd_rule *rule)
5111 {
5112         u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5113         u8 cur_pos = 0, tuple_size, shift_bits;
5114         unsigned int i;
5115
5116         for (i = 0; i < MAX_META_DATA; i++) {
5117                 tuple_size = meta_data_key_info[i].key_length;
5118                 tuple_bit = key_cfg->meta_data_active & BIT(i);
5119
5120                 switch (tuple_bit) {
5121                 case BIT(ROCE_TYPE):
5122                         hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5123                         cur_pos += tuple_size;
5124                         break;
5125                 case BIT(DST_VPORT):
5126                         port_number = hclge_get_port_number(HOST_PORT, 0,
5127                                                             rule->vf_id, 0);
5128                         hnae3_set_field(meta_data,
5129                                         GENMASK(cur_pos + tuple_size, cur_pos),
5130                                         cur_pos, port_number);
5131                         cur_pos += tuple_size;
5132                         break;
5133                 default:
5134                         break;
5135                 }
5136         }
5137
5138         calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5139         calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5140         shift_bits = sizeof(meta_data) * 8 - cur_pos;
5141
5142         *key_x = cpu_to_le32(tmp_x << shift_bits);
5143         *key_y = cpu_to_le32(tmp_y << shift_bits);
5144 }
5145
5146 /* A complete key is combined with meta data key and tuple key.
5147  * Meta data key is stored at the MSB region, and tuple key is stored at
5148  * the LSB region, unused bits will be filled 0.
5149  */
5150 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5151                             struct hclge_fd_rule *rule)
5152 {
5153         struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5154         u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5155         u8 *cur_key_x, *cur_key_y;
5156         unsigned int i;
5157         int ret, tuple_size;
5158         u8 meta_data_region;
5159
5160         memset(key_x, 0, sizeof(key_x));
5161         memset(key_y, 0, sizeof(key_y));
5162         cur_key_x = key_x;
5163         cur_key_y = key_y;
5164
5165         for (i = 0 ; i < MAX_TUPLE; i++) {
5166                 bool tuple_valid;
5167                 u32 check_tuple;
5168
5169                 tuple_size = tuple_key_info[i].key_length / 8;
5170                 check_tuple = key_cfg->tuple_active & BIT(i);
5171
5172                 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
5173                                                      cur_key_y, rule);
5174                 if (tuple_valid) {
5175                         cur_key_x += tuple_size;
5176                         cur_key_y += tuple_size;
5177                 }
5178         }
5179
5180         meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5181                         MAX_META_DATA_LENGTH / 8;
5182
5183         hclge_fd_convert_meta_data(key_cfg,
5184                                    (__le32 *)(key_x + meta_data_region),
5185                                    (__le32 *)(key_y + meta_data_region),
5186                                    rule);
5187
5188         ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5189                                    true);
5190         if (ret) {
5191                 dev_err(&hdev->pdev->dev,
5192                         "fd key_y config fail, loc=%u, ret=%d\n",
5193                         rule->queue_id, ret);
5194                 return ret;
5195         }
5196
5197         ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5198                                    true);
5199         if (ret)
5200                 dev_err(&hdev->pdev->dev,
5201                         "fd key_x config fail, loc=%u, ret=%d\n",
5202                         rule->queue_id, ret);
5203         return ret;
5204 }
5205
5206 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5207                                struct hclge_fd_rule *rule)
5208 {
5209         struct hclge_fd_ad_data ad_data;
5210
5211         ad_data.ad_id = rule->location;
5212
5213         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5214                 ad_data.drop_packet = true;
5215                 ad_data.forward_to_direct_queue = false;
5216                 ad_data.queue_id = 0;
5217         } else {
5218                 ad_data.drop_packet = false;
5219                 ad_data.forward_to_direct_queue = true;
5220                 ad_data.queue_id = rule->queue_id;
5221         }
5222
5223         ad_data.use_counter = false;
5224         ad_data.counter_id = 0;
5225
5226         ad_data.use_next_stage = false;
5227         ad_data.next_input_key = 0;
5228
5229         ad_data.write_rule_id_to_bd = true;
5230         ad_data.rule_id = rule->location;
5231
5232         return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
5233 }
5234
5235 static int hclge_fd_check_spec(struct hclge_dev *hdev,
5236                                struct ethtool_rx_flow_spec *fs, u32 *unused)
5237 {
5238         struct ethtool_tcpip4_spec *tcp_ip4_spec;
5239         struct ethtool_usrip4_spec *usr_ip4_spec;
5240         struct ethtool_tcpip6_spec *tcp_ip6_spec;
5241         struct ethtool_usrip6_spec *usr_ip6_spec;
5242         struct ethhdr *ether_spec;
5243
5244         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5245                 return -EINVAL;
5246
5247         if (!(fs->flow_type & hdev->fd_cfg.proto_support))
5248                 return -EOPNOTSUPP;
5249
5250         if ((fs->flow_type & FLOW_EXT) &&
5251             (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
5252                 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
5253                 return -EOPNOTSUPP;
5254         }
5255
5256         switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5257         case SCTP_V4_FLOW:
5258         case TCP_V4_FLOW:
5259         case UDP_V4_FLOW:
5260                 tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
5261                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5262
5263                 if (!tcp_ip4_spec->ip4src)
5264                         *unused |= BIT(INNER_SRC_IP);
5265
5266                 if (!tcp_ip4_spec->ip4dst)
5267                         *unused |= BIT(INNER_DST_IP);
5268
5269                 if (!tcp_ip4_spec->psrc)
5270                         *unused |= BIT(INNER_SRC_PORT);
5271
5272                 if (!tcp_ip4_spec->pdst)
5273                         *unused |= BIT(INNER_DST_PORT);
5274
5275                 if (!tcp_ip4_spec->tos)
5276                         *unused |= BIT(INNER_IP_TOS);
5277
5278                 break;
5279         case IP_USER_FLOW:
5280                 usr_ip4_spec = &fs->h_u.usr_ip4_spec;
5281                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5282                         BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5283
5284                 if (!usr_ip4_spec->ip4src)
5285                         *unused |= BIT(INNER_SRC_IP);
5286
5287                 if (!usr_ip4_spec->ip4dst)
5288                         *unused |= BIT(INNER_DST_IP);
5289
5290                 if (!usr_ip4_spec->tos)
5291                         *unused |= BIT(INNER_IP_TOS);
5292
5293                 if (!usr_ip4_spec->proto)
5294                         *unused |= BIT(INNER_IP_PROTO);
5295
5296                 if (usr_ip4_spec->l4_4_bytes)
5297                         return -EOPNOTSUPP;
5298
5299                 if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
5300                         return -EOPNOTSUPP;
5301
5302                 break;
5303         case SCTP_V6_FLOW:
5304         case TCP_V6_FLOW:
5305         case UDP_V6_FLOW:
5306                 tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
5307                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5308                         BIT(INNER_IP_TOS);
5309
5310                 /* check whether src/dst ip address used */
5311                 if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
5312                     !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
5313                         *unused |= BIT(INNER_SRC_IP);
5314
5315                 if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
5316                     !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
5317                         *unused |= BIT(INNER_DST_IP);
5318
5319                 if (!tcp_ip6_spec->psrc)
5320                         *unused |= BIT(INNER_SRC_PORT);
5321
5322                 if (!tcp_ip6_spec->pdst)
5323                         *unused |= BIT(INNER_DST_PORT);
5324
5325                 if (tcp_ip6_spec->tclass)
5326                         return -EOPNOTSUPP;
5327
5328                 break;
5329         case IPV6_USER_FLOW:
5330                 usr_ip6_spec = &fs->h_u.usr_ip6_spec;
5331                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5332                         BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
5333                         BIT(INNER_DST_PORT);
5334
5335                 /* check whether src/dst ip address used */
5336                 if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
5337                     !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
5338                         *unused |= BIT(INNER_SRC_IP);
5339
5340                 if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
5341                     !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
5342                         *unused |= BIT(INNER_DST_IP);
5343
5344                 if (!usr_ip6_spec->l4_proto)
5345                         *unused |= BIT(INNER_IP_PROTO);
5346
5347                 if (usr_ip6_spec->tclass)
5348                         return -EOPNOTSUPP;
5349
5350                 if (usr_ip6_spec->l4_4_bytes)
5351                         return -EOPNOTSUPP;
5352
5353                 break;
5354         case ETHER_FLOW:
5355                 ether_spec = &fs->h_u.ether_spec;
5356                 *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5357                         BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
5358                         BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
5359
5360                 if (is_zero_ether_addr(ether_spec->h_source))
5361                         *unused |= BIT(INNER_SRC_MAC);
5362
5363                 if (is_zero_ether_addr(ether_spec->h_dest))
5364                         *unused |= BIT(INNER_DST_MAC);
5365
5366                 if (!ether_spec->h_proto)
5367                         *unused |= BIT(INNER_ETH_TYPE);
5368
5369                 break;
5370         default:
5371                 return -EOPNOTSUPP;
5372         }
5373
5374         if ((fs->flow_type & FLOW_EXT)) {
5375                 if (fs->h_ext.vlan_etype)
5376                         return -EOPNOTSUPP;
5377                 if (!fs->h_ext.vlan_tci)
5378                         *unused |= BIT(INNER_VLAN_TAG_FST);
5379
5380                 if (fs->m_ext.vlan_tci) {
5381                         if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
5382                                 return -EINVAL;
5383                 }
5384         } else {
5385                 *unused |= BIT(INNER_VLAN_TAG_FST);
5386         }
5387
5388         if (fs->flow_type & FLOW_MAC_EXT) {
5389                 if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
5390                         return -EOPNOTSUPP;
5391
5392                 if (is_zero_ether_addr(fs->h_ext.h_dest))
5393                         *unused |= BIT(INNER_DST_MAC);
5394                 else
5395                         *unused &= ~(BIT(INNER_DST_MAC));
5396         }
5397
5398         return 0;
5399 }
5400
5401 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
5402 {
5403         struct hclge_fd_rule *rule = NULL;
5404         struct hlist_node *node2;
5405
5406         spin_lock_bh(&hdev->fd_rule_lock);
5407         hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5408                 if (rule->location >= location)
5409                         break;
5410         }
5411
5412         spin_unlock_bh(&hdev->fd_rule_lock);
5413
5414         return  rule && rule->location == location;
5415 }
5416
5417 /* make sure being called after lock up with fd_rule_lock */
5418 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
5419                                      struct hclge_fd_rule *new_rule,
5420                                      u16 location,
5421                                      bool is_add)
5422 {
5423         struct hclge_fd_rule *rule = NULL, *parent = NULL;
5424         struct hlist_node *node2;
5425
5426         if (is_add && !new_rule)
5427                 return -EINVAL;
5428
5429         hlist_for_each_entry_safe(rule, node2,
5430                                   &hdev->fd_rule_list, rule_node) {
5431                 if (rule->location >= location)
5432                         break;
5433                 parent = rule;
5434         }
5435
5436         if (rule && rule->location == location) {
5437                 hlist_del(&rule->rule_node);
5438                 kfree(rule);
5439                 hdev->hclge_fd_rule_num--;
5440
5441                 if (!is_add) {
5442                         if (!hdev->hclge_fd_rule_num)
5443                                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5444                         clear_bit(location, hdev->fd_bmap);
5445
5446                         return 0;
5447                 }
5448         } else if (!is_add) {
5449                 dev_err(&hdev->pdev->dev,
5450                         "delete fail, rule %u is inexistent\n",
5451                         location);
5452                 return -EINVAL;
5453         }
5454
5455         INIT_HLIST_NODE(&new_rule->rule_node);
5456
5457         if (parent)
5458                 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
5459         else
5460                 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
5461
5462         set_bit(location, hdev->fd_bmap);
5463         hdev->hclge_fd_rule_num++;
5464         hdev->fd_active_type = new_rule->rule_type;
5465
5466         return 0;
5467 }
5468
5469 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
5470                               struct ethtool_rx_flow_spec *fs,
5471                               struct hclge_fd_rule *rule)
5472 {
5473         u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5474
5475         switch (flow_type) {
5476         case SCTP_V4_FLOW:
5477         case TCP_V4_FLOW:
5478         case UDP_V4_FLOW:
5479                 rule->tuples.src_ip[IPV4_INDEX] =
5480                                 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
5481                 rule->tuples_mask.src_ip[IPV4_INDEX] =
5482                                 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
5483
5484                 rule->tuples.dst_ip[IPV4_INDEX] =
5485                                 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
5486                 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5487                                 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5488
5489                 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5490                 rule->tuples_mask.src_port =
5491                                 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5492
5493                 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5494                 rule->tuples_mask.dst_port =
5495                                 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5496
5497                 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5498                 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5499
5500                 rule->tuples.ether_proto = ETH_P_IP;
5501                 rule->tuples_mask.ether_proto = 0xFFFF;
5502
5503                 break;
5504         case IP_USER_FLOW:
5505                 rule->tuples.src_ip[IPV4_INDEX] =
5506                                 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5507                 rule->tuples_mask.src_ip[IPV4_INDEX] =
5508                                 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5509
5510                 rule->tuples.dst_ip[IPV4_INDEX] =
5511                                 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5512                 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5513                                 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5514
5515                 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5516                 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5517
5518                 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5519                 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5520
5521                 rule->tuples.ether_proto = ETH_P_IP;
5522                 rule->tuples_mask.ether_proto = 0xFFFF;
5523
5524                 break;
5525         case SCTP_V6_FLOW:
5526         case TCP_V6_FLOW:
5527         case UDP_V6_FLOW:
5528                 be32_to_cpu_array(rule->tuples.src_ip,
5529                                   fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5530                 be32_to_cpu_array(rule->tuples_mask.src_ip,
5531                                   fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5532
5533                 be32_to_cpu_array(rule->tuples.dst_ip,
5534                                   fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5535                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5536                                   fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5537
5538                 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5539                 rule->tuples_mask.src_port =
5540                                 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5541
5542                 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5543                 rule->tuples_mask.dst_port =
5544                                 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5545
5546                 rule->tuples.ether_proto = ETH_P_IPV6;
5547                 rule->tuples_mask.ether_proto = 0xFFFF;
5548
5549                 break;
5550         case IPV6_USER_FLOW:
5551                 be32_to_cpu_array(rule->tuples.src_ip,
5552                                   fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5553                 be32_to_cpu_array(rule->tuples_mask.src_ip,
5554                                   fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5555
5556                 be32_to_cpu_array(rule->tuples.dst_ip,
5557                                   fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5558                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5559                                   fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5560
5561                 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5562                 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5563
5564                 rule->tuples.ether_proto = ETH_P_IPV6;
5565                 rule->tuples_mask.ether_proto = 0xFFFF;
5566
5567                 break;
5568         case ETHER_FLOW:
5569                 ether_addr_copy(rule->tuples.src_mac,
5570                                 fs->h_u.ether_spec.h_source);
5571                 ether_addr_copy(rule->tuples_mask.src_mac,
5572                                 fs->m_u.ether_spec.h_source);
5573
5574                 ether_addr_copy(rule->tuples.dst_mac,
5575                                 fs->h_u.ether_spec.h_dest);
5576                 ether_addr_copy(rule->tuples_mask.dst_mac,
5577                                 fs->m_u.ether_spec.h_dest);
5578
5579                 rule->tuples.ether_proto =
5580                                 be16_to_cpu(fs->h_u.ether_spec.h_proto);
5581                 rule->tuples_mask.ether_proto =
5582                                 be16_to_cpu(fs->m_u.ether_spec.h_proto);
5583
5584                 break;
5585         default:
5586                 return -EOPNOTSUPP;
5587         }
5588
5589         switch (flow_type) {
5590         case SCTP_V4_FLOW:
5591         case SCTP_V6_FLOW:
5592                 rule->tuples.ip_proto = IPPROTO_SCTP;
5593                 rule->tuples_mask.ip_proto = 0xFF;
5594                 break;
5595         case TCP_V4_FLOW:
5596         case TCP_V6_FLOW:
5597                 rule->tuples.ip_proto = IPPROTO_TCP;
5598                 rule->tuples_mask.ip_proto = 0xFF;
5599                 break;
5600         case UDP_V4_FLOW:
5601         case UDP_V6_FLOW:
5602                 rule->tuples.ip_proto = IPPROTO_UDP;
5603                 rule->tuples_mask.ip_proto = 0xFF;
5604                 break;
5605         default:
5606                 break;
5607         }
5608
5609         if ((fs->flow_type & FLOW_EXT)) {
5610                 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5611                 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5612         }
5613
5614         if (fs->flow_type & FLOW_MAC_EXT) {
5615                 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5616                 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5617         }
5618
5619         return 0;
5620 }
5621
5622 /* make sure being called after lock up with fd_rule_lock */
5623 static int hclge_fd_config_rule(struct hclge_dev *hdev,
5624                                 struct hclge_fd_rule *rule)
5625 {
5626         int ret;
5627
5628         if (!rule) {
5629                 dev_err(&hdev->pdev->dev,
5630                         "The flow director rule is NULL\n");
5631                 return -EINVAL;
5632         }
5633
5634         /* it will never fail here, so needn't to check return value */
5635         hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5636
5637         ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5638         if (ret)
5639                 goto clear_rule;
5640
5641         ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5642         if (ret)
5643                 goto clear_rule;
5644
5645         return 0;
5646
5647 clear_rule:
5648         hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5649         return ret;
5650 }
5651
5652 static int hclge_add_fd_entry(struct hnae3_handle *handle,
5653                               struct ethtool_rxnfc *cmd)
5654 {
5655         struct hclge_vport *vport = hclge_get_vport(handle);
5656         struct hclge_dev *hdev = vport->back;
5657         u16 dst_vport_id = 0, q_index = 0;
5658         struct ethtool_rx_flow_spec *fs;
5659         struct hclge_fd_rule *rule;
5660         u32 unused = 0;
5661         u8 action;
5662         int ret;
5663
5664         if (!hnae3_dev_fd_supported(hdev))
5665                 return -EOPNOTSUPP;
5666
5667         if (!hdev->fd_en) {
5668                 dev_warn(&hdev->pdev->dev,
5669                          "Please enable flow director first\n");
5670                 return -EOPNOTSUPP;
5671         }
5672
5673         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5674
5675         ret = hclge_fd_check_spec(hdev, fs, &unused);
5676         if (ret) {
5677                 dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
5678                 return ret;
5679         }
5680
5681         if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5682                 action = HCLGE_FD_ACTION_DROP_PACKET;
5683         } else {
5684                 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5685                 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5686                 u16 tqps;
5687
5688                 if (vf > hdev->num_req_vfs) {
5689                         dev_err(&hdev->pdev->dev,
5690                                 "Error: vf id (%u) > max vf num (%u)\n",
5691                                 vf, hdev->num_req_vfs);
5692                         return -EINVAL;
5693                 }
5694
5695                 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
5696                 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
5697
5698                 if (ring >= tqps) {
5699                         dev_err(&hdev->pdev->dev,
5700                                 "Error: queue id (%u) > max tqp num (%u)\n",
5701                                 ring, tqps - 1);
5702                         return -EINVAL;
5703                 }
5704
5705                 action = HCLGE_FD_ACTION_ACCEPT_PACKET;
5706                 q_index = ring;
5707         }
5708
5709         rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5710         if (!rule)
5711                 return -ENOMEM;
5712
5713         ret = hclge_fd_get_tuple(hdev, fs, rule);
5714         if (ret) {
5715                 kfree(rule);
5716                 return ret;
5717         }
5718
5719         rule->flow_type = fs->flow_type;
5720
5721         rule->location = fs->location;
5722         rule->unused_tuple = unused;
5723         rule->vf_id = dst_vport_id;
5724         rule->queue_id = q_index;
5725         rule->action = action;
5726         rule->rule_type = HCLGE_FD_EP_ACTIVE;
5727
5728         /* to avoid rule conflict, when user configure rule by ethtool,
5729          * we need to clear all arfs rules
5730          */
5731         hclge_clear_arfs_rules(handle);
5732
5733         spin_lock_bh(&hdev->fd_rule_lock);
5734         ret = hclge_fd_config_rule(hdev, rule);
5735
5736         spin_unlock_bh(&hdev->fd_rule_lock);
5737
5738         return ret;
5739 }
5740
5741 static int hclge_del_fd_entry(struct hnae3_handle *handle,
5742                               struct ethtool_rxnfc *cmd)
5743 {
5744         struct hclge_vport *vport = hclge_get_vport(handle);
5745         struct hclge_dev *hdev = vport->back;
5746         struct ethtool_rx_flow_spec *fs;
5747         int ret;
5748
5749         if (!hnae3_dev_fd_supported(hdev))
5750                 return -EOPNOTSUPP;
5751
5752         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5753
5754         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5755                 return -EINVAL;
5756
5757         if (!hclge_fd_rule_exist(hdev, fs->location)) {
5758                 dev_err(&hdev->pdev->dev,
5759                         "Delete fail, rule %u is inexistent\n", fs->location);
5760                 return -ENOENT;
5761         }
5762
5763         ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
5764                                    NULL, false);
5765         if (ret)
5766                 return ret;
5767
5768         spin_lock_bh(&hdev->fd_rule_lock);
5769         ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
5770
5771         spin_unlock_bh(&hdev->fd_rule_lock);
5772
5773         return ret;
5774 }
5775
5776 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
5777                                      bool clear_list)
5778 {
5779         struct hclge_vport *vport = hclge_get_vport(handle);
5780         struct hclge_dev *hdev = vport->back;
5781         struct hclge_fd_rule *rule;
5782         struct hlist_node *node;
5783         u16 location;
5784
5785         if (!hnae3_dev_fd_supported(hdev))
5786                 return;
5787
5788         spin_lock_bh(&hdev->fd_rule_lock);
5789         for_each_set_bit(location, hdev->fd_bmap,
5790                          hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5791                 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
5792                                      NULL, false);
5793
5794         if (clear_list) {
5795                 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
5796                                           rule_node) {
5797                         hlist_del(&rule->rule_node);
5798                         kfree(rule);
5799                 }
5800                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5801                 hdev->hclge_fd_rule_num = 0;
5802                 bitmap_zero(hdev->fd_bmap,
5803                             hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
5804         }
5805
5806         spin_unlock_bh(&hdev->fd_rule_lock);
5807 }
5808
5809 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
5810 {
5811         struct hclge_vport *vport = hclge_get_vport(handle);
5812         struct hclge_dev *hdev = vport->back;
5813         struct hclge_fd_rule *rule;
5814         struct hlist_node *node;
5815         int ret;
5816
5817         /* Return ok here, because reset error handling will check this
5818          * return value. If error is returned here, the reset process will
5819          * fail.
5820          */
5821         if (!hnae3_dev_fd_supported(hdev))
5822                 return 0;
5823
5824         /* if fd is disabled, should not restore it when reset */
5825         if (!hdev->fd_en)
5826                 return 0;
5827
5828         spin_lock_bh(&hdev->fd_rule_lock);
5829         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5830                 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5831                 if (!ret)
5832                         ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5833
5834                 if (ret) {
5835                         dev_warn(&hdev->pdev->dev,
5836                                  "Restore rule %u failed, remove it\n",
5837                                  rule->location);
5838                         clear_bit(rule->location, hdev->fd_bmap);
5839                         hlist_del(&rule->rule_node);
5840                         kfree(rule);
5841                         hdev->hclge_fd_rule_num--;
5842                 }
5843         }
5844
5845         if (hdev->hclge_fd_rule_num)
5846                 hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
5847
5848         spin_unlock_bh(&hdev->fd_rule_lock);
5849
5850         return 0;
5851 }
5852
5853 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
5854                                  struct ethtool_rxnfc *cmd)
5855 {
5856         struct hclge_vport *vport = hclge_get_vport(handle);
5857         struct hclge_dev *hdev = vport->back;
5858
5859         if (!hnae3_dev_fd_supported(hdev))
5860                 return -EOPNOTSUPP;
5861
5862         cmd->rule_cnt = hdev->hclge_fd_rule_num;
5863         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5864
5865         return 0;
5866 }
5867
5868 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
5869                                   struct ethtool_rxnfc *cmd)
5870 {
5871         struct hclge_vport *vport = hclge_get_vport(handle);
5872         struct hclge_fd_rule *rule = NULL;
5873         struct hclge_dev *hdev = vport->back;
5874         struct ethtool_rx_flow_spec *fs;
5875         struct hlist_node *node2;
5876
5877         if (!hnae3_dev_fd_supported(hdev))
5878                 return -EOPNOTSUPP;
5879
5880         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5881
5882         spin_lock_bh(&hdev->fd_rule_lock);
5883
5884         hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5885                 if (rule->location >= fs->location)
5886                         break;
5887         }
5888
5889         if (!rule || fs->location != rule->location) {
5890                 spin_unlock_bh(&hdev->fd_rule_lock);
5891
5892                 return -ENOENT;
5893         }
5894
5895         fs->flow_type = rule->flow_type;
5896         switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5897         case SCTP_V4_FLOW:
5898         case TCP_V4_FLOW:
5899         case UDP_V4_FLOW:
5900                 fs->h_u.tcp_ip4_spec.ip4src =
5901                                 cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5902                 fs->m_u.tcp_ip4_spec.ip4src =
5903                         rule->unused_tuple & BIT(INNER_SRC_IP) ?
5904                         0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5905
5906                 fs->h_u.tcp_ip4_spec.ip4dst =
5907                                 cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5908                 fs->m_u.tcp_ip4_spec.ip4dst =
5909                         rule->unused_tuple & BIT(INNER_DST_IP) ?
5910                         0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5911
5912                 fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5913                 fs->m_u.tcp_ip4_spec.psrc =
5914                                 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5915                                 0 : cpu_to_be16(rule->tuples_mask.src_port);
5916
5917                 fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5918                 fs->m_u.tcp_ip4_spec.pdst =
5919                                 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5920                                 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5921
5922                 fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
5923                 fs->m_u.tcp_ip4_spec.tos =
5924                                 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5925                                 0 : rule->tuples_mask.ip_tos;
5926
5927                 break;
5928         case IP_USER_FLOW:
5929                 fs->h_u.usr_ip4_spec.ip4src =
5930                                 cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5931                 fs->m_u.tcp_ip4_spec.ip4src =
5932                         rule->unused_tuple & BIT(INNER_SRC_IP) ?
5933                         0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5934
5935                 fs->h_u.usr_ip4_spec.ip4dst =
5936                                 cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5937                 fs->m_u.usr_ip4_spec.ip4dst =
5938                         rule->unused_tuple & BIT(INNER_DST_IP) ?
5939                         0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5940
5941                 fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
5942                 fs->m_u.usr_ip4_spec.tos =
5943                                 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5944                                 0 : rule->tuples_mask.ip_tos;
5945
5946                 fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
5947                 fs->m_u.usr_ip4_spec.proto =
5948                                 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5949                                 0 : rule->tuples_mask.ip_proto;
5950
5951                 fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
5952
5953                 break;
5954         case SCTP_V6_FLOW:
5955         case TCP_V6_FLOW:
5956         case UDP_V6_FLOW:
5957                 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
5958                                   rule->tuples.src_ip, IPV6_SIZE);
5959                 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5960                         memset(fs->m_u.tcp_ip6_spec.ip6src, 0,
5961                                sizeof(int) * IPV6_SIZE);
5962                 else
5963                         cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
5964                                           rule->tuples_mask.src_ip, IPV6_SIZE);
5965
5966                 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
5967                                   rule->tuples.dst_ip, IPV6_SIZE);
5968                 if (rule->unused_tuple & BIT(INNER_DST_IP))
5969                         memset(fs->m_u.tcp_ip6_spec.ip6dst, 0,
5970                                sizeof(int) * IPV6_SIZE);
5971                 else
5972                         cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
5973                                           rule->tuples_mask.dst_ip, IPV6_SIZE);
5974
5975                 fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5976                 fs->m_u.tcp_ip6_spec.psrc =
5977                                 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5978                                 0 : cpu_to_be16(rule->tuples_mask.src_port);
5979
5980                 fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5981                 fs->m_u.tcp_ip6_spec.pdst =
5982                                 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5983                                 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5984
5985                 break;
5986         case IPV6_USER_FLOW:
5987                 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
5988                                   rule->tuples.src_ip, IPV6_SIZE);
5989                 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5990                         memset(fs->m_u.usr_ip6_spec.ip6src, 0,
5991                                sizeof(int) * IPV6_SIZE);
5992                 else
5993                         cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
5994                                           rule->tuples_mask.src_ip, IPV6_SIZE);
5995
5996                 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
5997                                   rule->tuples.dst_ip, IPV6_SIZE);
5998                 if (rule->unused_tuple & BIT(INNER_DST_IP))
5999                         memset(fs->m_u.usr_ip6_spec.ip6dst, 0,
6000                                sizeof(int) * IPV6_SIZE);
6001                 else
6002                         cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
6003                                           rule->tuples_mask.dst_ip, IPV6_SIZE);
6004
6005                 fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
6006                 fs->m_u.usr_ip6_spec.l4_proto =
6007                                 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6008                                 0 : rule->tuples_mask.ip_proto;
6009
6010                 break;
6011         case ETHER_FLOW:
6012                 ether_addr_copy(fs->h_u.ether_spec.h_source,
6013                                 rule->tuples.src_mac);
6014                 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6015                         eth_zero_addr(fs->m_u.ether_spec.h_source);
6016                 else
6017                         ether_addr_copy(fs->m_u.ether_spec.h_source,
6018                                         rule->tuples_mask.src_mac);
6019
6020                 ether_addr_copy(fs->h_u.ether_spec.h_dest,
6021                                 rule->tuples.dst_mac);
6022                 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6023                         eth_zero_addr(fs->m_u.ether_spec.h_dest);
6024                 else
6025                         ether_addr_copy(fs->m_u.ether_spec.h_dest,
6026                                         rule->tuples_mask.dst_mac);
6027
6028                 fs->h_u.ether_spec.h_proto =
6029                                 cpu_to_be16(rule->tuples.ether_proto);
6030                 fs->m_u.ether_spec.h_proto =
6031                                 rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6032                                 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6033
6034                 break;
6035         default:
6036                 spin_unlock_bh(&hdev->fd_rule_lock);
6037                 return -EOPNOTSUPP;
6038         }
6039
6040         if (fs->flow_type & FLOW_EXT) {
6041                 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
6042                 fs->m_ext.vlan_tci =
6043                                 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
6044                                 cpu_to_be16(VLAN_VID_MASK) :
6045                                 cpu_to_be16(rule->tuples_mask.vlan_tag1);
6046         }
6047
6048         if (fs->flow_type & FLOW_MAC_EXT) {
6049                 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
6050                 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6051                         eth_zero_addr(fs->m_u.ether_spec.h_dest);
6052                 else
6053                         ether_addr_copy(fs->m_u.ether_spec.h_dest,
6054                                         rule->tuples_mask.dst_mac);
6055         }
6056
6057         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
6058                 fs->ring_cookie = RX_CLS_FLOW_DISC;
6059         } else {
6060                 u64 vf_id;
6061
6062                 fs->ring_cookie = rule->queue_id;
6063                 vf_id = rule->vf_id;
6064                 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
6065                 fs->ring_cookie |= vf_id;
6066         }
6067
6068         spin_unlock_bh(&hdev->fd_rule_lock);
6069
6070         return 0;
6071 }
6072
6073 static int hclge_get_all_rules(struct hnae3_handle *handle,
6074                                struct ethtool_rxnfc *cmd, u32 *rule_locs)
6075 {
6076         struct hclge_vport *vport = hclge_get_vport(handle);
6077         struct hclge_dev *hdev = vport->back;
6078         struct hclge_fd_rule *rule;
6079         struct hlist_node *node2;
6080         int cnt = 0;
6081
6082         if (!hnae3_dev_fd_supported(hdev))
6083                 return -EOPNOTSUPP;
6084
6085         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6086
6087         spin_lock_bh(&hdev->fd_rule_lock);
6088         hlist_for_each_entry_safe(rule, node2,
6089                                   &hdev->fd_rule_list, rule_node) {
6090                 if (cnt == cmd->rule_cnt) {
6091                         spin_unlock_bh(&hdev->fd_rule_lock);
6092                         return -EMSGSIZE;
6093                 }
6094
6095                 rule_locs[cnt] = rule->location;
6096                 cnt++;
6097         }
6098
6099         spin_unlock_bh(&hdev->fd_rule_lock);
6100
6101         cmd->rule_cnt = cnt;
6102
6103         return 0;
6104 }
6105
6106 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
6107                                      struct hclge_fd_rule_tuples *tuples)
6108 {
6109         tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
6110         tuples->ip_proto = fkeys->basic.ip_proto;
6111         tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
6112
6113         if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
6114                 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
6115                 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
6116         } else {
6117                 memcpy(tuples->src_ip,
6118                        fkeys->addrs.v6addrs.src.in6_u.u6_addr32,
6119                        sizeof(tuples->src_ip));
6120                 memcpy(tuples->dst_ip,
6121                        fkeys->addrs.v6addrs.dst.in6_u.u6_addr32,
6122                        sizeof(tuples->dst_ip));
6123         }
6124 }
6125
6126 /* traverse all rules, check whether an existed rule has the same tuples */
6127 static struct hclge_fd_rule *
6128 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
6129                           const struct hclge_fd_rule_tuples *tuples)
6130 {
6131         struct hclge_fd_rule *rule = NULL;
6132         struct hlist_node *node;
6133
6134         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6135                 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
6136                         return rule;
6137         }
6138
6139         return NULL;
6140 }
6141
6142 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
6143                                      struct hclge_fd_rule *rule)
6144 {
6145         rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6146                              BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
6147                              BIT(INNER_SRC_PORT);
6148         rule->action = 0;
6149         rule->vf_id = 0;
6150         rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
6151         if (tuples->ether_proto == ETH_P_IP) {
6152                 if (tuples->ip_proto == IPPROTO_TCP)
6153                         rule->flow_type = TCP_V4_FLOW;
6154                 else
6155                         rule->flow_type = UDP_V4_FLOW;
6156         } else {
6157                 if (tuples->ip_proto == IPPROTO_TCP)
6158                         rule->flow_type = TCP_V6_FLOW;
6159                 else
6160                         rule->flow_type = UDP_V6_FLOW;
6161         }
6162         memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
6163         memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
6164 }
6165
6166 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
6167                                       u16 flow_id, struct flow_keys *fkeys)
6168 {
6169         struct hclge_vport *vport = hclge_get_vport(handle);
6170         struct hclge_fd_rule_tuples new_tuples;
6171         struct hclge_dev *hdev = vport->back;
6172         struct hclge_fd_rule *rule;
6173         u16 tmp_queue_id;
6174         u16 bit_id;
6175         int ret;
6176
6177         if (!hnae3_dev_fd_supported(hdev))
6178                 return -EOPNOTSUPP;
6179
6180         memset(&new_tuples, 0, sizeof(new_tuples));
6181         hclge_fd_get_flow_tuples(fkeys, &new_tuples);
6182
6183         spin_lock_bh(&hdev->fd_rule_lock);
6184
6185         /* when there is already fd rule existed add by user,
6186          * arfs should not work
6187          */
6188         if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
6189                 spin_unlock_bh(&hdev->fd_rule_lock);
6190
6191                 return -EOPNOTSUPP;
6192         }
6193
6194         /* check is there flow director filter existed for this flow,
6195          * if not, create a new filter for it;
6196          * if filter exist with different queue id, modify the filter;
6197          * if filter exist with same queue id, do nothing
6198          */
6199         rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
6200         if (!rule) {
6201                 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
6202                 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6203                         spin_unlock_bh(&hdev->fd_rule_lock);
6204
6205                         return -ENOSPC;
6206                 }
6207
6208                 rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
6209                 if (!rule) {
6210                         spin_unlock_bh(&hdev->fd_rule_lock);
6211
6212                         return -ENOMEM;
6213                 }
6214
6215                 set_bit(bit_id, hdev->fd_bmap);
6216                 rule->location = bit_id;
6217                 rule->flow_id = flow_id;
6218                 rule->queue_id = queue_id;
6219                 hclge_fd_build_arfs_rule(&new_tuples, rule);
6220                 ret = hclge_fd_config_rule(hdev, rule);
6221
6222                 spin_unlock_bh(&hdev->fd_rule_lock);
6223
6224                 if (ret)
6225                         return ret;
6226
6227                 return rule->location;
6228         }
6229
6230         spin_unlock_bh(&hdev->fd_rule_lock);
6231
6232         if (rule->queue_id == queue_id)
6233                 return rule->location;
6234
6235         tmp_queue_id = rule->queue_id;
6236         rule->queue_id = queue_id;
6237         ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6238         if (ret) {
6239                 rule->queue_id = tmp_queue_id;
6240                 return ret;
6241         }
6242
6243         return rule->location;
6244 }
6245
6246 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
6247 {
6248 #ifdef CONFIG_RFS_ACCEL
6249         struct hnae3_handle *handle = &hdev->vport[0].nic;
6250         struct hclge_fd_rule *rule;
6251         struct hlist_node *node;
6252         HLIST_HEAD(del_list);
6253
6254         spin_lock_bh(&hdev->fd_rule_lock);
6255         if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
6256                 spin_unlock_bh(&hdev->fd_rule_lock);
6257                 return;
6258         }
6259         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6260                 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
6261                                         rule->flow_id, rule->location)) {
6262                         hlist_del_init(&rule->rule_node);
6263                         hlist_add_head(&rule->rule_node, &del_list);
6264                         hdev->hclge_fd_rule_num--;
6265                         clear_bit(rule->location, hdev->fd_bmap);
6266                 }
6267         }
6268         spin_unlock_bh(&hdev->fd_rule_lock);
6269
6270         hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
6271                 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
6272                                      rule->location, NULL, false);
6273                 kfree(rule);
6274         }
6275 #endif
6276 }
6277
6278 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
6279 {
6280 #ifdef CONFIG_RFS_ACCEL
6281         struct hclge_vport *vport = hclge_get_vport(handle);
6282         struct hclge_dev *hdev = vport->back;
6283
6284         if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
6285                 hclge_del_all_fd_entries(handle, true);
6286 #endif
6287 }
6288
6289 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
6290 {
6291         struct hclge_vport *vport = hclge_get_vport(handle);
6292         struct hclge_dev *hdev = vport->back;
6293
6294         return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
6295                hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
6296 }
6297
6298 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
6299 {
6300         struct hclge_vport *vport = hclge_get_vport(handle);
6301         struct hclge_dev *hdev = vport->back;
6302
6303         return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
6304 }
6305
6306 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
6307 {
6308         struct hclge_vport *vport = hclge_get_vport(handle);
6309         struct hclge_dev *hdev = vport->back;
6310
6311         return hdev->rst_stats.hw_reset_done_cnt;
6312 }
6313
6314 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
6315 {
6316         struct hclge_vport *vport = hclge_get_vport(handle);
6317         struct hclge_dev *hdev = vport->back;
6318         bool clear;
6319
6320         hdev->fd_en = enable;
6321         clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
6322         if (!enable)
6323                 hclge_del_all_fd_entries(handle, clear);
6324         else
6325                 hclge_restore_fd_entries(handle);
6326 }
6327
6328 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
6329 {
6330         struct hclge_desc desc;
6331         struct hclge_config_mac_mode_cmd *req =
6332                 (struct hclge_config_mac_mode_cmd *)desc.data;
6333         u32 loop_en = 0;
6334         int ret;
6335
6336         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
6337
6338         if (enable) {
6339                 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
6340                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
6341                 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
6342                 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
6343                 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
6344                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
6345                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
6346                 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
6347                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
6348                 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
6349         }
6350
6351         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6352
6353         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6354         if (ret)
6355                 dev_err(&hdev->pdev->dev,
6356                         "mac enable fail, ret =%d.\n", ret);
6357 }
6358
6359 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
6360                                      u8 switch_param, u8 param_mask)
6361 {
6362         struct hclge_mac_vlan_switch_cmd *req;
6363         struct hclge_desc desc;
6364         u32 func_id;
6365         int ret;
6366
6367         func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
6368         req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
6369
6370         /* read current config parameter */
6371         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
6372                                    true);
6373         req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
6374         req->func_id = cpu_to_le32(func_id);
6375
6376         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6377         if (ret) {
6378                 dev_err(&hdev->pdev->dev,
6379                         "read mac vlan switch parameter fail, ret = %d\n", ret);
6380                 return ret;
6381         }
6382
6383         /* modify and write new config parameter */
6384         hclge_cmd_reuse_desc(&desc, false);
6385         req->switch_param = (req->switch_param & param_mask) | switch_param;
6386         req->param_mask = param_mask;
6387
6388         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6389         if (ret)
6390                 dev_err(&hdev->pdev->dev,
6391                         "set mac vlan switch parameter fail, ret = %d\n", ret);
6392         return ret;
6393 }
6394
6395 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
6396                                        int link_ret)
6397 {
6398 #define HCLGE_PHY_LINK_STATUS_NUM  200
6399
6400         struct phy_device *phydev = hdev->hw.mac.phydev;
6401         int i = 0;
6402         int ret;
6403
6404         do {
6405                 ret = phy_read_status(phydev);
6406                 if (ret) {
6407                         dev_err(&hdev->pdev->dev,
6408                                 "phy update link status fail, ret = %d\n", ret);
6409                         return;
6410                 }
6411
6412                 if (phydev->link == link_ret)
6413                         break;
6414
6415                 msleep(HCLGE_LINK_STATUS_MS);
6416         } while (++i < HCLGE_PHY_LINK_STATUS_NUM);
6417 }
6418
6419 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
6420 {
6421 #define HCLGE_MAC_LINK_STATUS_NUM  100
6422
6423         int i = 0;
6424         int ret;
6425
6426         do {
6427                 ret = hclge_get_mac_link_status(hdev);
6428                 if (ret < 0)
6429                         return ret;
6430                 else if (ret == link_ret)
6431                         return 0;
6432
6433                 msleep(HCLGE_LINK_STATUS_MS);
6434         } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
6435         return -EBUSY;
6436 }
6437
6438 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
6439                                           bool is_phy)
6440 {
6441 #define HCLGE_LINK_STATUS_DOWN 0
6442 #define HCLGE_LINK_STATUS_UP   1
6443
6444         int link_ret;
6445
6446         link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
6447
6448         if (is_phy)
6449                 hclge_phy_link_status_wait(hdev, link_ret);
6450
6451         return hclge_mac_link_status_wait(hdev, link_ret);
6452 }
6453
6454 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
6455 {
6456         struct hclge_config_mac_mode_cmd *req;
6457         struct hclge_desc desc;
6458         u32 loop_en;
6459         int ret;
6460
6461         req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
6462         /* 1 Read out the MAC mode config at first */
6463         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
6464         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6465         if (ret) {
6466                 dev_err(&hdev->pdev->dev,
6467                         "mac loopback get fail, ret =%d.\n", ret);
6468                 return ret;
6469         }
6470
6471         /* 2 Then setup the loopback flag */
6472         loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
6473         hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
6474         hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
6475         hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
6476
6477         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6478
6479         /* 3 Config mac work mode with loopback flag
6480          * and its original configure parameters
6481          */
6482         hclge_cmd_reuse_desc(&desc, false);
6483         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6484         if (ret)
6485                 dev_err(&hdev->pdev->dev,
6486                         "mac loopback set fail, ret =%d.\n", ret);
6487         return ret;
6488 }
6489
6490 static int hclge_cfg_serdes_loopback(struct hclge_dev *hdev, bool en,
6491                                      enum hnae3_loop loop_mode)
6492 {
6493 #define HCLGE_SERDES_RETRY_MS   10
6494 #define HCLGE_SERDES_RETRY_NUM  100
6495
6496         struct hclge_serdes_lb_cmd *req;
6497         struct hclge_desc desc;
6498         int ret, i = 0;
6499         u8 loop_mode_b;
6500
6501         req = (struct hclge_serdes_lb_cmd *)desc.data;
6502         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
6503
6504         switch (loop_mode) {
6505         case HNAE3_LOOP_SERIAL_SERDES:
6506                 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
6507                 break;
6508         case HNAE3_LOOP_PARALLEL_SERDES:
6509                 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
6510                 break;
6511         default:
6512                 dev_err(&hdev->pdev->dev,
6513                         "unsupported serdes loopback mode %d\n", loop_mode);
6514                 return -ENOTSUPP;
6515         }
6516
6517         if (en) {
6518                 req->enable = loop_mode_b;
6519                 req->mask = loop_mode_b;
6520         } else {
6521                 req->mask = loop_mode_b;
6522         }
6523
6524         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6525         if (ret) {
6526                 dev_err(&hdev->pdev->dev,
6527                         "serdes loopback set fail, ret = %d\n", ret);
6528                 return ret;
6529         }
6530
6531         do {
6532                 msleep(HCLGE_SERDES_RETRY_MS);
6533                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
6534                                            true);
6535                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6536                 if (ret) {
6537                         dev_err(&hdev->pdev->dev,
6538                                 "serdes loopback get, ret = %d\n", ret);
6539                         return ret;
6540                 }
6541         } while (++i < HCLGE_SERDES_RETRY_NUM &&
6542                  !(req->result & HCLGE_CMD_SERDES_DONE_B));
6543
6544         if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
6545                 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
6546                 return -EBUSY;
6547         } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
6548                 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
6549                 return -EIO;
6550         }
6551         return ret;
6552 }
6553
6554 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
6555                                      enum hnae3_loop loop_mode)
6556 {
6557         int ret;
6558
6559         ret = hclge_cfg_serdes_loopback(hdev, en, loop_mode);
6560         if (ret)
6561                 return ret;
6562
6563         hclge_cfg_mac_mode(hdev, en);
6564
6565         ret = hclge_mac_phy_link_status_wait(hdev, en, FALSE);
6566         if (ret)
6567                 dev_err(&hdev->pdev->dev,
6568                         "serdes loopback config mac mode timeout\n");
6569
6570         return ret;
6571 }
6572
6573 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
6574                                      struct phy_device *phydev)
6575 {
6576         int ret;
6577
6578         if (!phydev->suspended) {
6579                 ret = phy_suspend(phydev);
6580                 if (ret)
6581                         return ret;
6582         }
6583
6584         ret = phy_resume(phydev);
6585         if (ret)
6586                 return ret;
6587
6588         return phy_loopback(phydev, true);
6589 }
6590
6591 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
6592                                       struct phy_device *phydev)
6593 {
6594         int ret;
6595
6596         ret = phy_loopback(phydev, false);
6597         if (ret)
6598                 return ret;
6599
6600         return phy_suspend(phydev);
6601 }
6602
6603 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
6604 {
6605         struct phy_device *phydev = hdev->hw.mac.phydev;
6606         int ret;
6607
6608         if (!phydev)
6609                 return -ENOTSUPP;
6610
6611         if (en)
6612                 ret = hclge_enable_phy_loopback(hdev, phydev);
6613         else
6614                 ret = hclge_disable_phy_loopback(hdev, phydev);
6615         if (ret) {
6616                 dev_err(&hdev->pdev->dev,
6617                         "set phy loopback fail, ret = %d\n", ret);
6618                 return ret;
6619         }
6620
6621         hclge_cfg_mac_mode(hdev, en);
6622
6623         ret = hclge_mac_phy_link_status_wait(hdev, en, TRUE);
6624         if (ret)
6625                 dev_err(&hdev->pdev->dev,
6626                         "phy loopback config mac mode timeout\n");
6627
6628         return ret;
6629 }
6630
6631 static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
6632                             int stream_id, bool enable)
6633 {
6634         struct hclge_desc desc;
6635         struct hclge_cfg_com_tqp_queue_cmd *req =
6636                 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
6637         int ret;
6638
6639         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
6640         req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
6641         req->stream_id = cpu_to_le16(stream_id);
6642         if (enable)
6643                 req->enable |= 1U << HCLGE_TQP_ENABLE_B;
6644
6645         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6646         if (ret)
6647                 dev_err(&hdev->pdev->dev,
6648                         "Tqp enable fail, status =%d.\n", ret);
6649         return ret;
6650 }
6651
6652 static int hclge_set_loopback(struct hnae3_handle *handle,
6653                               enum hnae3_loop loop_mode, bool en)
6654 {
6655         struct hclge_vport *vport = hclge_get_vport(handle);
6656         struct hnae3_knic_private_info *kinfo;
6657         struct hclge_dev *hdev = vport->back;
6658         int i, ret;
6659
6660         /* Loopback can be enabled in three places: SSU, MAC, and serdes. By
6661          * default, SSU loopback is enabled, so if the SMAC and the DMAC are
6662          * the same, the packets are looped back in the SSU. If SSU loopback
6663          * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
6664          */
6665         if (hdev->pdev->revision >= 0x21) {
6666                 u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
6667
6668                 ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
6669                                                 HCLGE_SWITCH_ALW_LPBK_MASK);
6670                 if (ret)
6671                         return ret;
6672         }
6673
6674         switch (loop_mode) {
6675         case HNAE3_LOOP_APP:
6676                 ret = hclge_set_app_loopback(hdev, en);
6677                 break;
6678         case HNAE3_LOOP_SERIAL_SERDES:
6679         case HNAE3_LOOP_PARALLEL_SERDES:
6680                 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
6681                 break;
6682         case HNAE3_LOOP_PHY:
6683                 ret = hclge_set_phy_loopback(hdev, en);
6684                 break;
6685         default:
6686                 ret = -ENOTSUPP;
6687                 dev_err(&hdev->pdev->dev,
6688                         "loop_mode %d is not supported\n", loop_mode);
6689                 break;
6690         }
6691
6692         if (ret)
6693                 return ret;
6694
6695         kinfo = &vport->nic.kinfo;
6696         for (i = 0; i < kinfo->num_tqps; i++) {
6697                 ret = hclge_tqp_enable(hdev, i, 0, en);
6698                 if (ret)
6699                         return ret;
6700         }
6701
6702         return 0;
6703 }
6704
6705 static int hclge_set_default_loopback(struct hclge_dev *hdev)
6706 {
6707         int ret;
6708
6709         ret = hclge_set_app_loopback(hdev, false);
6710         if (ret)
6711                 return ret;
6712
6713         ret = hclge_cfg_serdes_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
6714         if (ret)
6715                 return ret;
6716
6717         return hclge_cfg_serdes_loopback(hdev, false,
6718                                          HNAE3_LOOP_PARALLEL_SERDES);
6719 }
6720
6721 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
6722 {
6723         struct hclge_vport *vport = hclge_get_vport(handle);
6724         struct hnae3_knic_private_info *kinfo;
6725         struct hnae3_queue *queue;
6726         struct hclge_tqp *tqp;
6727         int i;
6728
6729         kinfo = &vport->nic.kinfo;
6730         for (i = 0; i < kinfo->num_tqps; i++) {
6731                 queue = handle->kinfo.tqp[i];
6732                 tqp = container_of(queue, struct hclge_tqp, q);
6733                 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
6734         }
6735 }
6736
6737 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
6738 {
6739         struct hclge_vport *vport = hclge_get_vport(handle);
6740         struct hclge_dev *hdev = vport->back;
6741
6742         if (enable) {
6743                 hclge_task_schedule(hdev, round_jiffies_relative(HZ));
6744         } else {
6745                 /* Set the DOWN flag here to disable the service to be
6746                  * scheduled again
6747                  */
6748                 set_bit(HCLGE_STATE_DOWN, &hdev->state);
6749                 cancel_delayed_work_sync(&hdev->service_task);
6750                 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
6751         }
6752 }
6753
6754 static int hclge_ae_start(struct hnae3_handle *handle)
6755 {
6756         struct hclge_vport *vport = hclge_get_vport(handle);
6757         struct hclge_dev *hdev = vport->back;
6758
6759         /* mac enable */
6760         hclge_cfg_mac_mode(hdev, true);
6761         clear_bit(HCLGE_STATE_DOWN, &hdev->state);
6762         hdev->hw.mac.link = 0;
6763
6764         /* reset tqp stats */
6765         hclge_reset_tqp_stats(handle);
6766
6767         hclge_mac_start_phy(hdev);
6768
6769         return 0;
6770 }
6771
6772 static void hclge_ae_stop(struct hnae3_handle *handle)
6773 {
6774         struct hclge_vport *vport = hclge_get_vport(handle);
6775         struct hclge_dev *hdev = vport->back;
6776         int i;
6777
6778         set_bit(HCLGE_STATE_DOWN, &hdev->state);
6779
6780         hclge_clear_arfs_rules(handle);
6781
6782         /* If it is not PF reset, the firmware will disable the MAC,
6783          * so it only need to stop phy here.
6784          */
6785         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
6786             hdev->reset_type != HNAE3_FUNC_RESET) {
6787                 hclge_mac_stop_phy(hdev);
6788                 hclge_update_link_status(hdev);
6789                 return;
6790         }
6791
6792         for (i = 0; i < handle->kinfo.num_tqps; i++)
6793                 hclge_reset_tqp(handle, i);
6794
6795         hclge_config_mac_tnl_int(hdev, false);
6796
6797         /* Mac disable */
6798         hclge_cfg_mac_mode(hdev, false);
6799
6800         hclge_mac_stop_phy(hdev);
6801
6802         /* reset tqp stats */
6803         hclge_reset_tqp_stats(handle);
6804         hclge_update_link_status(hdev);
6805 }
6806
6807 int hclge_vport_start(struct hclge_vport *vport)
6808 {
6809         set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6810         vport->last_active_jiffies = jiffies;
6811         return 0;
6812 }
6813
6814 void hclge_vport_stop(struct hclge_vport *vport)
6815 {
6816         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6817 }
6818
6819 static int hclge_client_start(struct hnae3_handle *handle)
6820 {
6821         struct hclge_vport *vport = hclge_get_vport(handle);
6822
6823         return hclge_vport_start(vport);
6824 }
6825
6826 static void hclge_client_stop(struct hnae3_handle *handle)
6827 {
6828         struct hclge_vport *vport = hclge_get_vport(handle);
6829
6830         hclge_vport_stop(vport);
6831 }
6832
6833 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
6834                                          u16 cmdq_resp, u8  resp_code,
6835                                          enum hclge_mac_vlan_tbl_opcode op)
6836 {
6837         struct hclge_dev *hdev = vport->back;
6838
6839         if (cmdq_resp) {
6840                 dev_err(&hdev->pdev->dev,
6841                         "cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
6842                         cmdq_resp);
6843                 return -EIO;
6844         }
6845
6846         if (op == HCLGE_MAC_VLAN_ADD) {
6847                 if ((!resp_code) || (resp_code == 1)) {
6848                         return 0;
6849                 } else if (resp_code == HCLGE_ADD_UC_OVERFLOW) {
6850                         dev_err(&hdev->pdev->dev,
6851                                 "add mac addr failed for uc_overflow.\n");
6852                         return -ENOSPC;
6853                 } else if (resp_code == HCLGE_ADD_MC_OVERFLOW) {
6854                         dev_err(&hdev->pdev->dev,
6855                                 "add mac addr failed for mc_overflow.\n");
6856                         return -ENOSPC;
6857                 }
6858
6859                 dev_err(&hdev->pdev->dev,
6860                         "add mac addr failed for undefined, code=%u.\n",
6861                         resp_code);
6862                 return -EIO;
6863         } else if (op == HCLGE_MAC_VLAN_REMOVE) {
6864                 if (!resp_code) {
6865                         return 0;
6866                 } else if (resp_code == 1) {
6867                         dev_dbg(&hdev->pdev->dev,
6868                                 "remove mac addr failed for miss.\n");
6869                         return -ENOENT;
6870                 }
6871
6872                 dev_err(&hdev->pdev->dev,
6873                         "remove mac addr failed for undefined, code=%u.\n",
6874                         resp_code);
6875                 return -EIO;
6876         } else if (op == HCLGE_MAC_VLAN_LKUP) {
6877                 if (!resp_code) {
6878                         return 0;
6879                 } else if (resp_code == 1) {
6880                         dev_dbg(&hdev->pdev->dev,
6881                                 "lookup mac addr failed for miss.\n");
6882                         return -ENOENT;
6883                 }
6884
6885                 dev_err(&hdev->pdev->dev,
6886                         "lookup mac addr failed for undefined, code=%u.\n",
6887                         resp_code);
6888                 return -EIO;
6889         }
6890
6891         dev_err(&hdev->pdev->dev,
6892                 "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
6893
6894         return -EINVAL;
6895 }
6896
6897 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
6898 {
6899 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
6900
6901         unsigned int word_num;
6902         unsigned int bit_num;
6903
6904         if (vfid > 255 || vfid < 0)
6905                 return -EIO;
6906
6907         if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
6908                 word_num = vfid / 32;
6909                 bit_num  = vfid % 32;
6910                 if (clr)
6911                         desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6912                 else
6913                         desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
6914         } else {
6915                 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
6916                 bit_num  = vfid % 32;
6917                 if (clr)
6918                         desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6919                 else
6920                         desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
6921         }
6922
6923         return 0;
6924 }
6925
6926 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
6927 {
6928 #define HCLGE_DESC_NUMBER 3
6929 #define HCLGE_FUNC_NUMBER_PER_DESC 6
6930         int i, j;
6931
6932         for (i = 1; i < HCLGE_DESC_NUMBER; i++)
6933                 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
6934                         if (desc[i].data[j])
6935                                 return false;
6936
6937         return true;
6938 }
6939
6940 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
6941                                    const u8 *addr, bool is_mc)
6942 {
6943         const unsigned char *mac_addr = addr;
6944         u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
6945                        (mac_addr[0]) | (mac_addr[1] << 8);
6946         u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
6947
6948         hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6949         if (is_mc) {
6950                 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
6951                 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6952         }
6953
6954         new_req->mac_addr_hi32 = cpu_to_le32(high_val);
6955         new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
6956 }
6957
6958 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
6959                                      struct hclge_mac_vlan_tbl_entry_cmd *req)
6960 {
6961         struct hclge_dev *hdev = vport->back;
6962         struct hclge_desc desc;
6963         u8 resp_code;
6964         u16 retval;
6965         int ret;
6966
6967         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
6968
6969         memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6970
6971         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6972         if (ret) {
6973                 dev_err(&hdev->pdev->dev,
6974                         "del mac addr failed for cmd_send, ret =%d.\n",
6975                         ret);
6976                 return ret;
6977         }
6978         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6979         retval = le16_to_cpu(desc.retval);
6980
6981         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
6982                                              HCLGE_MAC_VLAN_REMOVE);
6983 }
6984
6985 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
6986                                      struct hclge_mac_vlan_tbl_entry_cmd *req,
6987                                      struct hclge_desc *desc,
6988                                      bool is_mc)
6989 {
6990         struct hclge_dev *hdev = vport->back;
6991         u8 resp_code;
6992         u16 retval;
6993         int ret;
6994
6995         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
6996         if (is_mc) {
6997                 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6998                 memcpy(desc[0].data,
6999                        req,
7000                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7001                 hclge_cmd_setup_basic_desc(&desc[1],
7002                                            HCLGE_OPC_MAC_VLAN_ADD,
7003                                            true);
7004                 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7005                 hclge_cmd_setup_basic_desc(&desc[2],
7006                                            HCLGE_OPC_MAC_VLAN_ADD,
7007                                            true);
7008                 ret = hclge_cmd_send(&hdev->hw, desc, 3);
7009         } else {
7010                 memcpy(desc[0].data,
7011                        req,
7012                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7013                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
7014         }
7015         if (ret) {
7016                 dev_err(&hdev->pdev->dev,
7017                         "lookup mac addr failed for cmd_send, ret =%d.\n",
7018                         ret);
7019                 return ret;
7020         }
7021         resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
7022         retval = le16_to_cpu(desc[0].retval);
7023
7024         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7025                                              HCLGE_MAC_VLAN_LKUP);
7026 }
7027
7028 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
7029                                   struct hclge_mac_vlan_tbl_entry_cmd *req,
7030                                   struct hclge_desc *mc_desc)
7031 {
7032         struct hclge_dev *hdev = vport->back;
7033         int cfg_status;
7034         u8 resp_code;
7035         u16 retval;
7036         int ret;
7037
7038         if (!mc_desc) {
7039                 struct hclge_desc desc;
7040
7041                 hclge_cmd_setup_basic_desc(&desc,
7042                                            HCLGE_OPC_MAC_VLAN_ADD,
7043                                            false);
7044                 memcpy(desc.data, req,
7045                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7046                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7047                 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7048                 retval = le16_to_cpu(desc.retval);
7049
7050                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7051                                                            resp_code,
7052                                                            HCLGE_MAC_VLAN_ADD);
7053         } else {
7054                 hclge_cmd_reuse_desc(&mc_desc[0], false);
7055                 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7056                 hclge_cmd_reuse_desc(&mc_desc[1], false);
7057                 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7058                 hclge_cmd_reuse_desc(&mc_desc[2], false);
7059                 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
7060                 memcpy(mc_desc[0].data, req,
7061                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7062                 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
7063                 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
7064                 retval = le16_to_cpu(mc_desc[0].retval);
7065
7066                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7067                                                            resp_code,
7068                                                            HCLGE_MAC_VLAN_ADD);
7069         }
7070
7071         if (ret) {
7072                 dev_err(&hdev->pdev->dev,
7073                         "add mac addr failed for cmd_send, ret =%d.\n",
7074                         ret);
7075                 return ret;
7076         }
7077
7078         return cfg_status;
7079 }
7080
7081 static int hclge_init_umv_space(struct hclge_dev *hdev)
7082 {
7083         u16 allocated_size = 0;
7084         int ret;
7085
7086         ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
7087                                   true);
7088         if (ret)
7089                 return ret;
7090
7091         if (allocated_size < hdev->wanted_umv_size)
7092                 dev_warn(&hdev->pdev->dev,
7093                          "Alloc umv space failed, want %u, get %u\n",
7094                          hdev->wanted_umv_size, allocated_size);
7095
7096         mutex_init(&hdev->umv_mutex);
7097         hdev->max_umv_size = allocated_size;
7098         /* divide max_umv_size by (hdev->num_req_vfs + 2), in order to
7099          * preserve some unicast mac vlan table entries shared by pf
7100          * and its vfs.
7101          */
7102         hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
7103         hdev->share_umv_size = hdev->priv_umv_size +
7104                         hdev->max_umv_size % (hdev->num_req_vfs + 2);
7105
7106         return 0;
7107 }
7108
7109 static int hclge_uninit_umv_space(struct hclge_dev *hdev)
7110 {
7111         int ret;
7112
7113         if (hdev->max_umv_size > 0) {
7114                 ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
7115                                           false);
7116                 if (ret)
7117                         return ret;
7118                 hdev->max_umv_size = 0;
7119         }
7120         mutex_destroy(&hdev->umv_mutex);
7121
7122         return 0;
7123 }
7124
7125 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
7126                                u16 *allocated_size, bool is_alloc)
7127 {
7128         struct hclge_umv_spc_alc_cmd *req;
7129         struct hclge_desc desc;
7130         int ret;
7131
7132         req = (struct hclge_umv_spc_alc_cmd *)desc.data;
7133         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
7134         if (!is_alloc)
7135                 hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, 1);
7136
7137         req->space_size = cpu_to_le32(space_size);
7138
7139         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7140         if (ret) {
7141                 dev_err(&hdev->pdev->dev,
7142                         "%s umv space failed for cmd_send, ret =%d\n",
7143                         is_alloc ? "allocate" : "free", ret);
7144                 return ret;
7145         }
7146
7147         if (is_alloc && allocated_size)
7148                 *allocated_size = le32_to_cpu(desc.data[1]);
7149
7150         return 0;
7151 }
7152
7153 static void hclge_reset_umv_space(struct hclge_dev *hdev)
7154 {
7155         struct hclge_vport *vport;
7156         int i;
7157
7158         for (i = 0; i < hdev->num_alloc_vport; i++) {
7159                 vport = &hdev->vport[i];
7160                 vport->used_umv_num = 0;
7161         }
7162
7163         mutex_lock(&hdev->umv_mutex);
7164         hdev->share_umv_size = hdev->priv_umv_size +
7165                         hdev->max_umv_size % (hdev->num_req_vfs + 2);
7166         mutex_unlock(&hdev->umv_mutex);
7167 }
7168
7169 static bool hclge_is_umv_space_full(struct hclge_vport *vport)
7170 {
7171         struct hclge_dev *hdev = vport->back;
7172         bool is_full;
7173
7174         mutex_lock(&hdev->umv_mutex);
7175         is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
7176                    hdev->share_umv_size == 0);
7177         mutex_unlock(&hdev->umv_mutex);
7178
7179         return is_full;
7180 }
7181
7182 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
7183 {
7184         struct hclge_dev *hdev = vport->back;
7185
7186         mutex_lock(&hdev->umv_mutex);
7187         if (is_free) {
7188                 if (vport->used_umv_num > hdev->priv_umv_size)
7189                         hdev->share_umv_size++;
7190
7191                 if (vport->used_umv_num > 0)
7192                         vport->used_umv_num--;
7193         } else {
7194                 if (vport->used_umv_num >= hdev->priv_umv_size &&
7195                     hdev->share_umv_size > 0)
7196                         hdev->share_umv_size--;
7197                 vport->used_umv_num++;
7198         }
7199         mutex_unlock(&hdev->umv_mutex);
7200 }
7201
7202 static int hclge_add_uc_addr(struct hnae3_handle *handle,
7203                              const unsigned char *addr)
7204 {
7205         struct hclge_vport *vport = hclge_get_vport(handle);
7206
7207         return hclge_add_uc_addr_common(vport, addr);
7208 }
7209
7210 int hclge_add_uc_addr_common(struct hclge_vport *vport,
7211                              const unsigned char *addr)
7212 {
7213         struct hclge_dev *hdev = vport->back;
7214         struct hclge_mac_vlan_tbl_entry_cmd req;
7215         struct hclge_desc desc;
7216         u16 egress_port = 0;
7217         int ret;
7218
7219         /* mac addr check */
7220         if (is_zero_ether_addr(addr) ||
7221             is_broadcast_ether_addr(addr) ||
7222             is_multicast_ether_addr(addr)) {
7223                 dev_err(&hdev->pdev->dev,
7224                         "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
7225                          addr, is_zero_ether_addr(addr),
7226                          is_broadcast_ether_addr(addr),
7227                          is_multicast_ether_addr(addr));
7228                 return -EINVAL;
7229         }
7230
7231         memset(&req, 0, sizeof(req));
7232
7233         hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
7234                         HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
7235
7236         req.egress_port = cpu_to_le16(egress_port);
7237
7238         hclge_prepare_mac_addr(&req, addr, false);
7239
7240         /* Lookup the mac address in the mac_vlan table, and add
7241          * it if the entry is inexistent. Repeated unicast entry
7242          * is not allowed in the mac vlan table.
7243          */
7244         ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
7245         if (ret == -ENOENT) {
7246                 if (!hclge_is_umv_space_full(vport)) {
7247                         ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
7248                         if (!ret)
7249                                 hclge_update_umv_space(vport, false);
7250                         return ret;
7251                 }
7252
7253                 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
7254                         hdev->priv_umv_size);
7255
7256                 return -ENOSPC;
7257         }
7258
7259         /* check if we just hit the duplicate */
7260         if (!ret) {
7261                 dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n",
7262                          vport->vport_id, addr);
7263                 return 0;
7264         }
7265
7266         dev_err(&hdev->pdev->dev,
7267                 "PF failed to add unicast entry(%pM) in the MAC table\n",
7268                 addr);
7269
7270         return ret;
7271 }
7272
7273 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
7274                             const unsigned char *addr)
7275 {
7276         struct hclge_vport *vport = hclge_get_vport(handle);
7277
7278         return hclge_rm_uc_addr_common(vport, addr);
7279 }
7280
7281 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
7282                             const unsigned char *addr)
7283 {
7284         struct hclge_dev *hdev = vport->back;
7285         struct hclge_mac_vlan_tbl_entry_cmd req;
7286         int ret;
7287
7288         /* mac addr check */
7289         if (is_zero_ether_addr(addr) ||
7290             is_broadcast_ether_addr(addr) ||
7291             is_multicast_ether_addr(addr)) {
7292                 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
7293                         addr);
7294                 return -EINVAL;
7295         }
7296
7297         memset(&req, 0, sizeof(req));
7298         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7299         hclge_prepare_mac_addr(&req, addr, false);
7300         ret = hclge_remove_mac_vlan_tbl(vport, &req);
7301         if (!ret)
7302                 hclge_update_umv_space(vport, true);
7303
7304         return ret;
7305 }
7306
7307 static int hclge_add_mc_addr(struct hnae3_handle *handle,
7308                              const unsigned char *addr)
7309 {
7310         struct hclge_vport *vport = hclge_get_vport(handle);
7311
7312         return hclge_add_mc_addr_common(vport, addr);
7313 }
7314
7315 int hclge_add_mc_addr_common(struct hclge_vport *vport,
7316                              const unsigned char *addr)
7317 {
7318         struct hclge_dev *hdev = vport->back;
7319         struct hclge_mac_vlan_tbl_entry_cmd req;
7320         struct hclge_desc desc[3];
7321         int status;
7322
7323         /* mac addr check */
7324         if (!is_multicast_ether_addr(addr)) {
7325                 dev_err(&hdev->pdev->dev,
7326                         "Add mc mac err! invalid mac:%pM.\n",
7327                          addr);
7328                 return -EINVAL;
7329         }
7330         memset(&req, 0, sizeof(req));
7331         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7332         hclge_prepare_mac_addr(&req, addr, true);
7333         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7334         if (status) {
7335                 /* This mac addr do not exist, add new entry for it */
7336                 memset(desc[0].data, 0, sizeof(desc[0].data));
7337                 memset(desc[1].data, 0, sizeof(desc[0].data));
7338                 memset(desc[2].data, 0, sizeof(desc[0].data));
7339         }
7340         status = hclge_update_desc_vfid(desc, vport->vport_id, false);
7341         if (status)
7342                 return status;
7343         status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7344
7345         if (status == -ENOSPC)
7346                 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
7347
7348         return status;
7349 }
7350
7351 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
7352                             const unsigned char *addr)
7353 {
7354         struct hclge_vport *vport = hclge_get_vport(handle);
7355
7356         return hclge_rm_mc_addr_common(vport, addr);
7357 }
7358
7359 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
7360                             const unsigned char *addr)
7361 {
7362         struct hclge_dev *hdev = vport->back;
7363         struct hclge_mac_vlan_tbl_entry_cmd req;
7364         enum hclge_cmd_status status;
7365         struct hclge_desc desc[3];
7366
7367         /* mac addr check */
7368         if (!is_multicast_ether_addr(addr)) {
7369                 dev_dbg(&hdev->pdev->dev,
7370                         "Remove mc mac err! invalid mac:%pM.\n",
7371                          addr);
7372                 return -EINVAL;
7373         }
7374
7375         memset(&req, 0, sizeof(req));
7376         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7377         hclge_prepare_mac_addr(&req, addr, true);
7378         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7379         if (!status) {
7380                 /* This mac addr exist, remove this handle's VFID for it */
7381                 status = hclge_update_desc_vfid(desc, vport->vport_id, true);
7382                 if (status)
7383                         return status;
7384
7385                 if (hclge_is_all_function_id_zero(desc))
7386                         /* All the vfid is zero, so need to delete this entry */
7387                         status = hclge_remove_mac_vlan_tbl(vport, &req);
7388                 else
7389                         /* Not all the vfid is zero, update the vfid */
7390                         status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7391
7392         } else {
7393                 /* Maybe this mac address is in mta table, but it cannot be
7394                  * deleted here because an entry of mta represents an address
7395                  * range rather than a specific address. the delete action to
7396                  * all entries will take effect in update_mta_status called by
7397                  * hns3_nic_set_rx_mode.
7398                  */
7399                 status = 0;
7400         }
7401
7402         return status;
7403 }
7404
7405 void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
7406                                enum HCLGE_MAC_ADDR_TYPE mac_type)
7407 {
7408         struct hclge_vport_mac_addr_cfg *mac_cfg;
7409         struct list_head *list;
7410
7411         if (!vport->vport_id)
7412                 return;
7413
7414         mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL);
7415         if (!mac_cfg)
7416                 return;
7417
7418         mac_cfg->hd_tbl_status = true;
7419         memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN);
7420
7421         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7422                &vport->uc_mac_list : &vport->mc_mac_list;
7423
7424         list_add_tail(&mac_cfg->node, list);
7425 }
7426
7427 void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
7428                               bool is_write_tbl,
7429                               enum HCLGE_MAC_ADDR_TYPE mac_type)
7430 {
7431         struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
7432         struct list_head *list;
7433         bool uc_flag, mc_flag;
7434
7435         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7436                &vport->uc_mac_list : &vport->mc_mac_list;
7437
7438         uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC;
7439         mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
7440
7441         list_for_each_entry_safe(mac_cfg, tmp, list, node) {
7442                 if (ether_addr_equal(mac_cfg->mac_addr, mac_addr)) {
7443                         if (uc_flag && mac_cfg->hd_tbl_status)
7444                                 hclge_rm_uc_addr_common(vport, mac_addr);
7445
7446                         if (mc_flag && mac_cfg->hd_tbl_status)
7447                                 hclge_rm_mc_addr_common(vport, mac_addr);
7448
7449                         list_del(&mac_cfg->node);
7450                         kfree(mac_cfg);
7451                         break;
7452                 }
7453         }
7454 }
7455
7456 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
7457                                   enum HCLGE_MAC_ADDR_TYPE mac_type)
7458 {
7459         struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
7460         struct list_head *list;
7461
7462         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7463                &vport->uc_mac_list : &vport->mc_mac_list;
7464
7465         list_for_each_entry_safe(mac_cfg, tmp, list, node) {
7466                 if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status)
7467                         hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr);
7468
7469                 if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status)
7470                         hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr);
7471
7472                 mac_cfg->hd_tbl_status = false;
7473                 if (is_del_list) {
7474                         list_del(&mac_cfg->node);
7475                         kfree(mac_cfg);
7476                 }
7477         }
7478 }
7479
7480 void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
7481 {
7482         struct hclge_vport_mac_addr_cfg *mac, *tmp;
7483         struct hclge_vport *vport;
7484         int i;
7485
7486         mutex_lock(&hdev->vport_cfg_mutex);
7487         for (i = 0; i < hdev->num_alloc_vport; i++) {
7488                 vport = &hdev->vport[i];
7489                 list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
7490                         list_del(&mac->node);
7491                         kfree(mac);
7492                 }
7493
7494                 list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) {
7495                         list_del(&mac->node);
7496                         kfree(mac);
7497                 }
7498         }
7499         mutex_unlock(&hdev->vport_cfg_mutex);
7500 }
7501
7502 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
7503                                               u16 cmdq_resp, u8 resp_code)
7504 {
7505 #define HCLGE_ETHERTYPE_SUCCESS_ADD             0
7506 #define HCLGE_ETHERTYPE_ALREADY_ADD             1
7507 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW        2
7508 #define HCLGE_ETHERTYPE_KEY_CONFLICT            3
7509
7510         int return_status;
7511
7512         if (cmdq_resp) {
7513                 dev_err(&hdev->pdev->dev,
7514                         "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
7515                         cmdq_resp);
7516                 return -EIO;
7517         }
7518
7519         switch (resp_code) {
7520         case HCLGE_ETHERTYPE_SUCCESS_ADD:
7521         case HCLGE_ETHERTYPE_ALREADY_ADD:
7522                 return_status = 0;
7523                 break;
7524         case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
7525                 dev_err(&hdev->pdev->dev,
7526                         "add mac ethertype failed for manager table overflow.\n");
7527                 return_status = -EIO;
7528                 break;
7529         case HCLGE_ETHERTYPE_KEY_CONFLICT:
7530                 dev_err(&hdev->pdev->dev,
7531                         "add mac ethertype failed for key conflict.\n");
7532                 return_status = -EIO;
7533                 break;
7534         default:
7535                 dev_err(&hdev->pdev->dev,
7536                         "add mac ethertype failed for undefined, code=%u.\n",
7537                         resp_code);
7538                 return_status = -EIO;
7539         }
7540
7541         return return_status;
7542 }
7543
7544 static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
7545                                      u8 *mac_addr)
7546 {
7547         struct hclge_mac_vlan_tbl_entry_cmd req;
7548         struct hclge_dev *hdev = vport->back;
7549         struct hclge_desc desc;
7550         u16 egress_port = 0;
7551         int i;
7552
7553         if (is_zero_ether_addr(mac_addr))
7554                 return false;
7555
7556         memset(&req, 0, sizeof(req));
7557         hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
7558                         HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
7559         req.egress_port = cpu_to_le16(egress_port);
7560         hclge_prepare_mac_addr(&req, mac_addr, false);
7561
7562         if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
7563                 return true;
7564
7565         vf_idx += HCLGE_VF_VPORT_START_NUM;
7566         for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++)
7567                 if (i != vf_idx &&
7568                     ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
7569                         return true;
7570
7571         return false;
7572 }
7573
7574 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
7575                             u8 *mac_addr)
7576 {
7577         struct hclge_vport *vport = hclge_get_vport(handle);
7578         struct hclge_dev *hdev = vport->back;
7579
7580         vport = hclge_get_vf_vport(hdev, vf);
7581         if (!vport)
7582                 return -EINVAL;
7583
7584         if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
7585                 dev_info(&hdev->pdev->dev,
7586                          "Specified MAC(=%pM) is same as before, no change committed!\n",
7587                          mac_addr);
7588                 return 0;
7589         }
7590
7591         if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
7592                 dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
7593                         mac_addr);
7594                 return -EEXIST;
7595         }
7596
7597         ether_addr_copy(vport->vf_info.mac, mac_addr);
7598         dev_info(&hdev->pdev->dev,
7599                  "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
7600                  vf, mac_addr);
7601
7602         return hclge_inform_reset_assert_to_vf(vport);
7603 }
7604
7605 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
7606                              const struct hclge_mac_mgr_tbl_entry_cmd *req)
7607 {
7608         struct hclge_desc desc;
7609         u8 resp_code;
7610         u16 retval;
7611         int ret;
7612
7613         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
7614         memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
7615
7616         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7617         if (ret) {
7618                 dev_err(&hdev->pdev->dev,
7619                         "add mac ethertype failed for cmd_send, ret =%d.\n",
7620                         ret);
7621                 return ret;
7622         }
7623
7624         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7625         retval = le16_to_cpu(desc.retval);
7626
7627         return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
7628 }
7629
7630 static int init_mgr_tbl(struct hclge_dev *hdev)
7631 {
7632         int ret;
7633         int i;
7634
7635         for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
7636                 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
7637                 if (ret) {
7638                         dev_err(&hdev->pdev->dev,
7639                                 "add mac ethertype failed, ret =%d.\n",
7640                                 ret);
7641                         return ret;
7642                 }
7643         }
7644
7645         return 0;
7646 }
7647
7648 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
7649 {
7650         struct hclge_vport *vport = hclge_get_vport(handle);
7651         struct hclge_dev *hdev = vport->back;
7652
7653         ether_addr_copy(p, hdev->hw.mac.mac_addr);
7654 }
7655
7656 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
7657                               bool is_first)
7658 {
7659         const unsigned char *new_addr = (const unsigned char *)p;
7660         struct hclge_vport *vport = hclge_get_vport(handle);
7661         struct hclge_dev *hdev = vport->back;
7662         int ret;
7663
7664         /* mac addr check */
7665         if (is_zero_ether_addr(new_addr) ||
7666             is_broadcast_ether_addr(new_addr) ||
7667             is_multicast_ether_addr(new_addr)) {
7668                 dev_err(&hdev->pdev->dev,
7669                         "Change uc mac err! invalid mac:%pM.\n",
7670                          new_addr);
7671                 return -EINVAL;
7672         }
7673
7674         if ((!is_first || is_kdump_kernel()) &&
7675             hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
7676                 dev_warn(&hdev->pdev->dev,
7677                          "remove old uc mac address fail.\n");
7678
7679         ret = hclge_add_uc_addr(handle, new_addr);
7680         if (ret) {
7681                 dev_err(&hdev->pdev->dev,
7682                         "add uc mac address fail, ret =%d.\n",
7683                         ret);
7684
7685                 if (!is_first &&
7686                     hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
7687                         dev_err(&hdev->pdev->dev,
7688                                 "restore uc mac address fail.\n");
7689
7690                 return -EIO;
7691         }
7692
7693         ret = hclge_pause_addr_cfg(hdev, new_addr);
7694         if (ret) {
7695                 dev_err(&hdev->pdev->dev,
7696                         "configure mac pause address fail, ret =%d.\n",
7697                         ret);
7698                 return -EIO;
7699         }
7700
7701         ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
7702
7703         return 0;
7704 }
7705
7706 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
7707                           int cmd)
7708 {
7709         struct hclge_vport *vport = hclge_get_vport(handle);
7710         struct hclge_dev *hdev = vport->back;
7711
7712         if (!hdev->hw.mac.phydev)
7713                 return -EOPNOTSUPP;
7714
7715         return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
7716 }
7717
7718 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
7719                                       u8 fe_type, bool filter_en, u8 vf_id)
7720 {
7721         struct hclge_vlan_filter_ctrl_cmd *req;
7722         struct hclge_desc desc;
7723         int ret;
7724
7725         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
7726
7727         req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
7728         req->vlan_type = vlan_type;
7729         req->vlan_fe = filter_en ? fe_type : 0;
7730         req->vf_id = vf_id;
7731
7732         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7733         if (ret)
7734                 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
7735                         ret);
7736
7737         return ret;
7738 }
7739
7740 #define HCLGE_FILTER_TYPE_VF            0
7741 #define HCLGE_FILTER_TYPE_PORT          1
7742 #define HCLGE_FILTER_FE_EGRESS_V1_B     BIT(0)
7743 #define HCLGE_FILTER_FE_NIC_INGRESS_B   BIT(0)
7744 #define HCLGE_FILTER_FE_NIC_EGRESS_B    BIT(1)
7745 #define HCLGE_FILTER_FE_ROCE_INGRESS_B  BIT(2)
7746 #define HCLGE_FILTER_FE_ROCE_EGRESS_B   BIT(3)
7747 #define HCLGE_FILTER_FE_EGRESS          (HCLGE_FILTER_FE_NIC_EGRESS_B \
7748                                         | HCLGE_FILTER_FE_ROCE_EGRESS_B)
7749 #define HCLGE_FILTER_FE_INGRESS         (HCLGE_FILTER_FE_NIC_INGRESS_B \
7750                                         | HCLGE_FILTER_FE_ROCE_INGRESS_B)
7751
7752 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
7753 {
7754         struct hclge_vport *vport = hclge_get_vport(handle);
7755         struct hclge_dev *hdev = vport->back;
7756
7757         if (hdev->pdev->revision >= 0x21) {
7758                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7759                                            HCLGE_FILTER_FE_EGRESS, enable, 0);
7760                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7761                                            HCLGE_FILTER_FE_INGRESS, enable, 0);
7762         } else {
7763                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7764                                            HCLGE_FILTER_FE_EGRESS_V1_B, enable,
7765                                            0);
7766         }
7767         if (enable)
7768                 handle->netdev_flags |= HNAE3_VLAN_FLTR;
7769         else
7770                 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
7771 }
7772
7773 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
7774                                     bool is_kill, u16 vlan,
7775                                     __be16 proto)
7776 {
7777         struct hclge_vport *vport = &hdev->vport[vfid];
7778         struct hclge_vlan_filter_vf_cfg_cmd *req0;
7779         struct hclge_vlan_filter_vf_cfg_cmd *req1;
7780         struct hclge_desc desc[2];
7781         u8 vf_byte_val;
7782         u8 vf_byte_off;
7783         int ret;
7784
7785         /* if vf vlan table is full, firmware will close vf vlan filter, it
7786          * is unable and unnecessary to add new vlan id to vf vlan filter.
7787          * If spoof check is enable, and vf vlan is full, it shouldn't add
7788          * new vlan, because tx packets with these vlan id will be dropped.
7789          */
7790         if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
7791                 if (vport->vf_info.spoofchk && vlan) {
7792                         dev_err(&hdev->pdev->dev,
7793                                 "Can't add vlan due to spoof check is on and vf vlan table is full\n");
7794                         return -EPERM;
7795                 }
7796                 return 0;
7797         }
7798
7799         hclge_cmd_setup_basic_desc(&desc[0],
7800                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7801         hclge_cmd_setup_basic_desc(&desc[1],
7802                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7803
7804         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7805
7806         vf_byte_off = vfid / 8;
7807         vf_byte_val = 1 << (vfid % 8);
7808
7809         req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
7810         req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
7811
7812         req0->vlan_id  = cpu_to_le16(vlan);
7813         req0->vlan_cfg = is_kill;
7814
7815         if (vf_byte_off < HCLGE_MAX_VF_BYTES)
7816                 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
7817         else
7818                 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
7819
7820         ret = hclge_cmd_send(&hdev->hw, desc, 2);
7821         if (ret) {
7822                 dev_err(&hdev->pdev->dev,
7823                         "Send vf vlan command fail, ret =%d.\n",
7824                         ret);
7825                 return ret;
7826         }
7827
7828         if (!is_kill) {
7829 #define HCLGE_VF_VLAN_NO_ENTRY  2
7830                 if (!req0->resp_code || req0->resp_code == 1)
7831                         return 0;
7832
7833                 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
7834                         set_bit(vfid, hdev->vf_vlan_full);
7835                         dev_warn(&hdev->pdev->dev,
7836                                  "vf vlan table is full, vf vlan filter is disabled\n");
7837                         return 0;
7838                 }
7839
7840                 dev_err(&hdev->pdev->dev,
7841                         "Add vf vlan filter fail, ret =%u.\n",
7842                         req0->resp_code);
7843         } else {
7844 #define HCLGE_VF_VLAN_DEL_NO_FOUND      1
7845                 if (!req0->resp_code)
7846                         return 0;
7847
7848                 /* vf vlan filter is disabled when vf vlan table is full,
7849                  * then new vlan id will not be added into vf vlan table.
7850                  * Just return 0 without warning, avoid massive verbose
7851                  * print logs when unload.
7852                  */
7853                 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
7854                         return 0;
7855
7856                 dev_err(&hdev->pdev->dev,
7857                         "Kill vf vlan filter fail, ret =%u.\n",
7858                         req0->resp_code);
7859         }
7860
7861         return -EIO;
7862 }
7863
7864 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
7865                                       u16 vlan_id, bool is_kill)
7866 {
7867         struct hclge_vlan_filter_pf_cfg_cmd *req;
7868         struct hclge_desc desc;
7869         u8 vlan_offset_byte_val;
7870         u8 vlan_offset_byte;
7871         u8 vlan_offset_160;
7872         int ret;
7873
7874         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
7875
7876         vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
7877         vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
7878                            HCLGE_VLAN_BYTE_SIZE;
7879         vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
7880
7881         req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
7882         req->vlan_offset = vlan_offset_160;
7883         req->vlan_cfg = is_kill;
7884         req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
7885
7886         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7887         if (ret)
7888                 dev_err(&hdev->pdev->dev,
7889                         "port vlan command, send fail, ret =%d.\n", ret);
7890         return ret;
7891 }
7892
7893 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
7894                                     u16 vport_id, u16 vlan_id,
7895                                     bool is_kill)
7896 {
7897         u16 vport_idx, vport_num = 0;
7898         int ret;
7899
7900         if (is_kill && !vlan_id)
7901                 return 0;
7902
7903         ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
7904                                        proto);
7905         if (ret) {
7906                 dev_err(&hdev->pdev->dev,
7907                         "Set %u vport vlan filter config fail, ret =%d.\n",
7908                         vport_id, ret);
7909                 return ret;
7910         }
7911
7912         /* vlan 0 may be added twice when 8021q module is enabled */
7913         if (!is_kill && !vlan_id &&
7914             test_bit(vport_id, hdev->vlan_table[vlan_id]))
7915                 return 0;
7916
7917         if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
7918                 dev_err(&hdev->pdev->dev,
7919                         "Add port vlan failed, vport %u is already in vlan %u\n",
7920                         vport_id, vlan_id);
7921                 return -EINVAL;
7922         }
7923
7924         if (is_kill &&
7925             !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
7926                 dev_err(&hdev->pdev->dev,
7927                         "Delete port vlan failed, vport %u is not in vlan %u\n",
7928                         vport_id, vlan_id);
7929                 return -EINVAL;
7930         }
7931
7932         for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
7933                 vport_num++;
7934
7935         if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
7936                 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
7937                                                  is_kill);
7938
7939         return ret;
7940 }
7941
7942 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
7943 {
7944         struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
7945         struct hclge_vport_vtag_tx_cfg_cmd *req;
7946         struct hclge_dev *hdev = vport->back;
7947         struct hclge_desc desc;
7948         u16 bmap_index;
7949         int status;
7950
7951         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
7952
7953         req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
7954         req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
7955         req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
7956         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
7957                       vcfg->accept_tag1 ? 1 : 0);
7958         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
7959                       vcfg->accept_untag1 ? 1 : 0);
7960         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
7961                       vcfg->accept_tag2 ? 1 : 0);
7962         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
7963                       vcfg->accept_untag2 ? 1 : 0);
7964         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
7965                       vcfg->insert_tag1_en ? 1 : 0);
7966         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
7967                       vcfg->insert_tag2_en ? 1 : 0);
7968         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
7969
7970         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7971         bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
7972                         HCLGE_VF_NUM_PER_BYTE;
7973         req->vf_bitmap[bmap_index] =
7974                 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7975
7976         status = hclge_cmd_send(&hdev->hw, &desc, 1);
7977         if (status)
7978                 dev_err(&hdev->pdev->dev,
7979                         "Send port txvlan cfg command fail, ret =%d\n",
7980                         status);
7981
7982         return status;
7983 }
7984
7985 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
7986 {
7987         struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
7988         struct hclge_vport_vtag_rx_cfg_cmd *req;
7989         struct hclge_dev *hdev = vport->back;
7990         struct hclge_desc desc;
7991         u16 bmap_index;
7992         int status;
7993
7994         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
7995
7996         req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
7997         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
7998                       vcfg->strip_tag1_en ? 1 : 0);
7999         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
8000                       vcfg->strip_tag2_en ? 1 : 0);
8001         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
8002                       vcfg->vlan1_vlan_prionly ? 1 : 0);
8003         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
8004                       vcfg->vlan2_vlan_prionly ? 1 : 0);
8005
8006         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
8007         bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
8008                         HCLGE_VF_NUM_PER_BYTE;
8009         req->vf_bitmap[bmap_index] =
8010                 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
8011
8012         status = hclge_cmd_send(&hdev->hw, &desc, 1);
8013         if (status)
8014                 dev_err(&hdev->pdev->dev,
8015                         "Send port rxvlan cfg command fail, ret =%d\n",
8016                         status);
8017
8018         return status;
8019 }
8020
8021 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
8022                                   u16 port_base_vlan_state,
8023                                   u16 vlan_tag)
8024 {
8025         int ret;
8026
8027         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8028                 vport->txvlan_cfg.accept_tag1 = true;
8029                 vport->txvlan_cfg.insert_tag1_en = false;
8030                 vport->txvlan_cfg.default_tag1 = 0;
8031         } else {
8032                 vport->txvlan_cfg.accept_tag1 = false;
8033                 vport->txvlan_cfg.insert_tag1_en = true;
8034                 vport->txvlan_cfg.default_tag1 = vlan_tag;
8035         }
8036
8037         vport->txvlan_cfg.accept_untag1 = true;
8038
8039         /* accept_tag2 and accept_untag2 are not supported on
8040          * pdev revision(0x20), new revision support them,
8041          * this two fields can not be configured by user.
8042          */
8043         vport->txvlan_cfg.accept_tag2 = true;
8044         vport->txvlan_cfg.accept_untag2 = true;
8045         vport->txvlan_cfg.insert_tag2_en = false;
8046         vport->txvlan_cfg.default_tag2 = 0;
8047
8048         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8049                 vport->rxvlan_cfg.strip_tag1_en = false;
8050                 vport->rxvlan_cfg.strip_tag2_en =
8051                                 vport->rxvlan_cfg.rx_vlan_offload_en;
8052         } else {
8053                 vport->rxvlan_cfg.strip_tag1_en =
8054                                 vport->rxvlan_cfg.rx_vlan_offload_en;
8055                 vport->rxvlan_cfg.strip_tag2_en = true;
8056         }
8057         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8058         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
8059
8060         ret = hclge_set_vlan_tx_offload_cfg(vport);
8061         if (ret)
8062                 return ret;
8063
8064         return hclge_set_vlan_rx_offload_cfg(vport);
8065 }
8066
8067 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
8068 {
8069         struct hclge_rx_vlan_type_cfg_cmd *rx_req;
8070         struct hclge_tx_vlan_type_cfg_cmd *tx_req;
8071         struct hclge_desc desc;
8072         int status;
8073
8074         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
8075         rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
8076         rx_req->ot_fst_vlan_type =
8077                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
8078         rx_req->ot_sec_vlan_type =
8079                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
8080         rx_req->in_fst_vlan_type =
8081                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
8082         rx_req->in_sec_vlan_type =
8083                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
8084
8085         status = hclge_cmd_send(&hdev->hw, &desc, 1);
8086         if (status) {
8087                 dev_err(&hdev->pdev->dev,
8088                         "Send rxvlan protocol type command fail, ret =%d\n",
8089                         status);
8090                 return status;
8091         }
8092
8093         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
8094
8095         tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
8096         tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
8097         tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
8098
8099         status = hclge_cmd_send(&hdev->hw, &desc, 1);
8100         if (status)
8101                 dev_err(&hdev->pdev->dev,
8102                         "Send txvlan protocol type command fail, ret =%d\n",
8103                         status);
8104
8105         return status;
8106 }
8107
8108 static int hclge_init_vlan_config(struct hclge_dev *hdev)
8109 {
8110 #define HCLGE_DEF_VLAN_TYPE             0x8100
8111
8112         struct hnae3_handle *handle = &hdev->vport[0].nic;
8113         struct hclge_vport *vport;
8114         int ret;
8115         int i;
8116
8117         if (hdev->pdev->revision >= 0x21) {
8118                 /* for revision 0x21, vf vlan filter is per function */
8119                 for (i = 0; i < hdev->num_alloc_vport; i++) {
8120                         vport = &hdev->vport[i];
8121                         ret = hclge_set_vlan_filter_ctrl(hdev,
8122                                                          HCLGE_FILTER_TYPE_VF,
8123                                                          HCLGE_FILTER_FE_EGRESS,
8124                                                          true,
8125                                                          vport->vport_id);
8126                         if (ret)
8127                                 return ret;
8128                 }
8129
8130                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
8131                                                  HCLGE_FILTER_FE_INGRESS, true,
8132                                                  0);
8133                 if (ret)
8134                         return ret;
8135         } else {
8136                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8137                                                  HCLGE_FILTER_FE_EGRESS_V1_B,
8138                                                  true, 0);
8139                 if (ret)
8140                         return ret;
8141         }
8142
8143         handle->netdev_flags |= HNAE3_VLAN_FLTR;
8144
8145         hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
8146         hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
8147         hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
8148         hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
8149         hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
8150         hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
8151
8152         ret = hclge_set_vlan_protocol_type(hdev);
8153         if (ret)
8154                 return ret;
8155
8156         for (i = 0; i < hdev->num_alloc_vport; i++) {
8157                 u16 vlan_tag;
8158
8159                 vport = &hdev->vport[i];
8160                 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
8161
8162                 ret = hclge_vlan_offload_cfg(vport,
8163                                              vport->port_base_vlan_cfg.state,
8164                                              vlan_tag);
8165                 if (ret)
8166                         return ret;
8167         }
8168
8169         return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
8170 }
8171
8172 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8173                                        bool writen_to_tbl)
8174 {
8175         struct hclge_vport_vlan_cfg *vlan;
8176
8177         vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
8178         if (!vlan)
8179                 return;
8180
8181         vlan->hd_tbl_status = writen_to_tbl;
8182         vlan->vlan_id = vlan_id;
8183
8184         list_add_tail(&vlan->node, &vport->vlan_list);
8185 }
8186
8187 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
8188 {
8189         struct hclge_vport_vlan_cfg *vlan, *tmp;
8190         struct hclge_dev *hdev = vport->back;
8191         int ret;
8192
8193         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8194                 if (!vlan->hd_tbl_status) {
8195                         ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8196                                                        vport->vport_id,
8197                                                        vlan->vlan_id, false);
8198                         if (ret) {
8199                                 dev_err(&hdev->pdev->dev,
8200                                         "restore vport vlan list failed, ret=%d\n",
8201                                         ret);
8202                                 return ret;
8203                         }
8204                 }
8205                 vlan->hd_tbl_status = true;
8206         }
8207
8208         return 0;
8209 }
8210
8211 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8212                                       bool is_write_tbl)
8213 {
8214         struct hclge_vport_vlan_cfg *vlan, *tmp;
8215         struct hclge_dev *hdev = vport->back;
8216
8217         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8218                 if (vlan->vlan_id == vlan_id) {
8219                         if (is_write_tbl && vlan->hd_tbl_status)
8220                                 hclge_set_vlan_filter_hw(hdev,
8221                                                          htons(ETH_P_8021Q),
8222                                                          vport->vport_id,
8223                                                          vlan_id,
8224                                                          true);
8225
8226                         list_del(&vlan->node);
8227                         kfree(vlan);
8228                         break;
8229                 }
8230         }
8231 }
8232
8233 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
8234 {
8235         struct hclge_vport_vlan_cfg *vlan, *tmp;
8236         struct hclge_dev *hdev = vport->back;
8237
8238         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8239                 if (vlan->hd_tbl_status)
8240                         hclge_set_vlan_filter_hw(hdev,
8241                                                  htons(ETH_P_8021Q),
8242                                                  vport->vport_id,
8243                                                  vlan->vlan_id,
8244                                                  true);
8245
8246                 vlan->hd_tbl_status = false;
8247                 if (is_del_list) {
8248                         list_del(&vlan->node);
8249                         kfree(vlan);
8250                 }
8251         }
8252 }
8253
8254 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
8255 {
8256         struct hclge_vport_vlan_cfg *vlan, *tmp;
8257         struct hclge_vport *vport;
8258         int i;
8259
8260         mutex_lock(&hdev->vport_cfg_mutex);
8261         for (i = 0; i < hdev->num_alloc_vport; i++) {
8262                 vport = &hdev->vport[i];
8263                 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8264                         list_del(&vlan->node);
8265                         kfree(vlan);
8266                 }
8267         }
8268         mutex_unlock(&hdev->vport_cfg_mutex);
8269 }
8270
8271 static void hclge_restore_vlan_table(struct hnae3_handle *handle)
8272 {
8273         struct hclge_vport *vport = hclge_get_vport(handle);
8274         struct hclge_vport_vlan_cfg *vlan, *tmp;
8275         struct hclge_dev *hdev = vport->back;
8276         u16 vlan_proto;
8277         u16 state, vlan_id;
8278         int i;
8279
8280         mutex_lock(&hdev->vport_cfg_mutex);
8281         for (i = 0; i < hdev->num_alloc_vport; i++) {
8282                 vport = &hdev->vport[i];
8283                 vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
8284                 vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
8285                 state = vport->port_base_vlan_cfg.state;
8286
8287                 if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
8288                         hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
8289                                                  vport->vport_id, vlan_id,
8290                                                  false);
8291                         continue;
8292                 }
8293
8294                 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8295                         int ret;
8296
8297                         if (!vlan->hd_tbl_status)
8298                                 continue;
8299                         ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8300                                                        vport->vport_id,
8301                                                        vlan->vlan_id, false);
8302                         if (ret)
8303                                 break;
8304                 }
8305         }
8306
8307         mutex_unlock(&hdev->vport_cfg_mutex);
8308 }
8309
8310 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
8311 {
8312         struct hclge_vport *vport = hclge_get_vport(handle);
8313
8314         if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8315                 vport->rxvlan_cfg.strip_tag1_en = false;
8316                 vport->rxvlan_cfg.strip_tag2_en = enable;
8317         } else {
8318                 vport->rxvlan_cfg.strip_tag1_en = enable;
8319                 vport->rxvlan_cfg.strip_tag2_en = true;
8320         }
8321         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8322         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
8323         vport->rxvlan_cfg.rx_vlan_offload_en = enable;
8324
8325         return hclge_set_vlan_rx_offload_cfg(vport);
8326 }
8327
8328 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
8329                                             u16 port_base_vlan_state,
8330                                             struct hclge_vlan_info *new_info,
8331                                             struct hclge_vlan_info *old_info)
8332 {
8333         struct hclge_dev *hdev = vport->back;
8334         int ret;
8335
8336         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
8337                 hclge_rm_vport_all_vlan_table(vport, false);
8338                 return hclge_set_vlan_filter_hw(hdev,
8339                                                  htons(new_info->vlan_proto),
8340                                                  vport->vport_id,
8341                                                  new_info->vlan_tag,
8342                                                  false);
8343         }
8344
8345         ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
8346                                        vport->vport_id, old_info->vlan_tag,
8347                                        true);
8348         if (ret)
8349                 return ret;
8350
8351         return hclge_add_vport_all_vlan_table(vport);
8352 }
8353
8354 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
8355                                     struct hclge_vlan_info *vlan_info)
8356 {
8357         struct hnae3_handle *nic = &vport->nic;
8358         struct hclge_vlan_info *old_vlan_info;
8359         struct hclge_dev *hdev = vport->back;
8360         int ret;
8361
8362         old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
8363
8364         ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
8365         if (ret)
8366                 return ret;
8367
8368         if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
8369                 /* add new VLAN tag */
8370                 ret = hclge_set_vlan_filter_hw(hdev,
8371                                                htons(vlan_info->vlan_proto),
8372                                                vport->vport_id,
8373                                                vlan_info->vlan_tag,
8374                                                false);
8375                 if (ret)
8376                         return ret;
8377
8378                 /* remove old VLAN tag */
8379                 ret = hclge_set_vlan_filter_hw(hdev,
8380                                                htons(old_vlan_info->vlan_proto),
8381                                                vport->vport_id,
8382                                                old_vlan_info->vlan_tag,
8383                                                true);
8384                 if (ret)
8385                         return ret;
8386
8387                 goto update;
8388         }
8389
8390         ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
8391                                                old_vlan_info);
8392         if (ret)
8393                 return ret;
8394
8395         /* update state only when disable/enable port based VLAN */
8396         vport->port_base_vlan_cfg.state = state;
8397         if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
8398                 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
8399         else
8400                 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
8401
8402 update:
8403         vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
8404         vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
8405         vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
8406
8407         return 0;
8408 }
8409
8410 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
8411                                           enum hnae3_port_base_vlan_state state,
8412                                           u16 vlan)
8413 {
8414         if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8415                 if (!vlan)
8416                         return HNAE3_PORT_BASE_VLAN_NOCHANGE;
8417                 else
8418                         return HNAE3_PORT_BASE_VLAN_ENABLE;
8419         } else {
8420                 if (!vlan)
8421                         return HNAE3_PORT_BASE_VLAN_DISABLE;
8422                 else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
8423                         return HNAE3_PORT_BASE_VLAN_NOCHANGE;
8424                 else
8425                         return HNAE3_PORT_BASE_VLAN_MODIFY;
8426         }
8427 }
8428
8429 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
8430                                     u16 vlan, u8 qos, __be16 proto)
8431 {
8432         struct hclge_vport *vport = hclge_get_vport(handle);
8433         struct hclge_dev *hdev = vport->back;
8434         struct hclge_vlan_info vlan_info;
8435         u16 state;
8436         int ret;
8437
8438         if (hdev->pdev->revision == 0x20)
8439                 return -EOPNOTSUPP;
8440
8441         vport = hclge_get_vf_vport(hdev, vfid);
8442         if (!vport)
8443                 return -EINVAL;
8444
8445         /* qos is a 3 bits value, so can not be bigger than 7 */
8446         if (vlan > VLAN_N_VID - 1 || qos > 7)
8447                 return -EINVAL;
8448         if (proto != htons(ETH_P_8021Q))
8449                 return -EPROTONOSUPPORT;
8450
8451         state = hclge_get_port_base_vlan_state(vport,
8452                                                vport->port_base_vlan_cfg.state,
8453                                                vlan);
8454         if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
8455                 return 0;
8456
8457         vlan_info.vlan_tag = vlan;
8458         vlan_info.qos = qos;
8459         vlan_info.vlan_proto = ntohs(proto);
8460
8461         if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
8462                 return hclge_update_port_base_vlan_cfg(vport, state,
8463                                                        &vlan_info);
8464         } else {
8465                 ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
8466                                                         vport->vport_id, state,
8467                                                         vlan, qos,
8468                                                         ntohs(proto));
8469                 return ret;
8470         }
8471 }
8472
8473 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
8474                           u16 vlan_id, bool is_kill)
8475 {
8476         struct hclge_vport *vport = hclge_get_vport(handle);
8477         struct hclge_dev *hdev = vport->back;
8478         bool writen_to_tbl = false;
8479         int ret = 0;
8480
8481         /* When device is resetting, firmware is unable to handle
8482          * mailbox. Just record the vlan id, and remove it after
8483          * reset finished.
8484          */
8485         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && is_kill) {
8486                 set_bit(vlan_id, vport->vlan_del_fail_bmap);
8487                 return -EBUSY;
8488         }
8489
8490         /* when port base vlan enabled, we use port base vlan as the vlan
8491          * filter entry. In this case, we don't update vlan filter table
8492          * when user add new vlan or remove exist vlan, just update the vport
8493          * vlan list. The vlan id in vlan list will be writen in vlan filter
8494          * table until port base vlan disabled
8495          */
8496         if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8497                 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
8498                                                vlan_id, is_kill);
8499                 writen_to_tbl = true;
8500         }
8501
8502         if (!ret) {
8503                 if (is_kill)
8504                         hclge_rm_vport_vlan_table(vport, vlan_id, false);
8505                 else
8506                         hclge_add_vport_vlan_table(vport, vlan_id,
8507                                                    writen_to_tbl);
8508         } else if (is_kill) {
8509                 /* when remove hw vlan filter failed, record the vlan id,
8510                  * and try to remove it from hw later, to be consistence
8511                  * with stack
8512                  */
8513                 set_bit(vlan_id, vport->vlan_del_fail_bmap);
8514         }
8515         return ret;
8516 }
8517
8518 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
8519 {
8520 #define HCLGE_MAX_SYNC_COUNT    60
8521
8522         int i, ret, sync_cnt = 0;
8523         u16 vlan_id;
8524
8525         /* start from vport 1 for PF is always alive */
8526         for (i = 0; i < hdev->num_alloc_vport; i++) {
8527                 struct hclge_vport *vport = &hdev->vport[i];
8528
8529                 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
8530                                          VLAN_N_VID);
8531                 while (vlan_id != VLAN_N_VID) {
8532                         ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8533                                                        vport->vport_id, vlan_id,
8534                                                        true);
8535                         if (ret && ret != -EINVAL)
8536                                 return;
8537
8538                         clear_bit(vlan_id, vport->vlan_del_fail_bmap);
8539                         hclge_rm_vport_vlan_table(vport, vlan_id, false);
8540
8541                         sync_cnt++;
8542                         if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
8543                                 return;
8544
8545                         vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
8546                                                  VLAN_N_VID);
8547                 }
8548         }
8549 }
8550
8551 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
8552 {
8553         struct hclge_config_max_frm_size_cmd *req;
8554         struct hclge_desc desc;
8555
8556         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
8557
8558         req = (struct hclge_config_max_frm_size_cmd *)desc.data;
8559         req->max_frm_size = cpu_to_le16(new_mps);
8560         req->min_frm_size = HCLGE_MAC_MIN_FRAME;
8561
8562         return hclge_cmd_send(&hdev->hw, &desc, 1);
8563 }
8564
8565 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
8566 {
8567         struct hclge_vport *vport = hclge_get_vport(handle);
8568
8569         return hclge_set_vport_mtu(vport, new_mtu);
8570 }
8571
8572 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
8573 {
8574         struct hclge_dev *hdev = vport->back;
8575         int i, max_frm_size, ret;
8576
8577         /* HW supprt 2 layer vlan */
8578         max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
8579         if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
8580             max_frm_size > HCLGE_MAC_MAX_FRAME)
8581                 return -EINVAL;
8582
8583         max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
8584         mutex_lock(&hdev->vport_lock);
8585         /* VF's mps must fit within hdev->mps */
8586         if (vport->vport_id && max_frm_size > hdev->mps) {
8587                 mutex_unlock(&hdev->vport_lock);
8588                 return -EINVAL;
8589         } else if (vport->vport_id) {
8590                 vport->mps = max_frm_size;
8591                 mutex_unlock(&hdev->vport_lock);
8592                 return 0;
8593         }
8594
8595         /* PF's mps must be greater then VF's mps */
8596         for (i = 1; i < hdev->num_alloc_vport; i++)
8597                 if (max_frm_size < hdev->vport[i].mps) {
8598                         mutex_unlock(&hdev->vport_lock);
8599                         return -EINVAL;
8600                 }
8601
8602         hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
8603
8604         ret = hclge_set_mac_mtu(hdev, max_frm_size);
8605         if (ret) {
8606                 dev_err(&hdev->pdev->dev,
8607                         "Change mtu fail, ret =%d\n", ret);
8608                 goto out;
8609         }
8610
8611         hdev->mps = max_frm_size;
8612         vport->mps = max_frm_size;
8613
8614         ret = hclge_buffer_alloc(hdev);
8615         if (ret)
8616                 dev_err(&hdev->pdev->dev,
8617                         "Allocate buffer fail, ret =%d\n", ret);
8618
8619 out:
8620         hclge_notify_client(hdev, HNAE3_UP_CLIENT);
8621         mutex_unlock(&hdev->vport_lock);
8622         return ret;
8623 }
8624
8625 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
8626                                     bool enable)
8627 {
8628         struct hclge_reset_tqp_queue_cmd *req;
8629         struct hclge_desc desc;
8630         int ret;
8631
8632         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
8633
8634         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
8635         req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
8636         if (enable)
8637                 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
8638
8639         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8640         if (ret) {
8641                 dev_err(&hdev->pdev->dev,
8642                         "Send tqp reset cmd error, status =%d\n", ret);
8643                 return ret;
8644         }
8645
8646         return 0;
8647 }
8648
8649 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
8650 {
8651         struct hclge_reset_tqp_queue_cmd *req;
8652         struct hclge_desc desc;
8653         int ret;
8654
8655         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
8656
8657         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
8658         req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
8659
8660         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8661         if (ret) {
8662                 dev_err(&hdev->pdev->dev,
8663                         "Get reset status error, status =%d\n", ret);
8664                 return ret;
8665         }
8666
8667         return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
8668 }
8669
8670 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
8671 {
8672         struct hnae3_queue *queue;
8673         struct hclge_tqp *tqp;
8674
8675         queue = handle->kinfo.tqp[queue_id];
8676         tqp = container_of(queue, struct hclge_tqp, q);
8677
8678         return tqp->index;
8679 }
8680
8681 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
8682 {
8683         struct hclge_vport *vport = hclge_get_vport(handle);
8684         struct hclge_dev *hdev = vport->back;
8685         int reset_try_times = 0;
8686         int reset_status;
8687         u16 queue_gid;
8688         int ret;
8689
8690         queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
8691
8692         ret = hclge_tqp_enable(hdev, queue_id, 0, false);
8693         if (ret) {
8694                 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
8695                 return ret;
8696         }
8697
8698         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
8699         if (ret) {
8700                 dev_err(&hdev->pdev->dev,
8701                         "Send reset tqp cmd fail, ret = %d\n", ret);
8702                 return ret;
8703         }
8704
8705         while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
8706                 reset_status = hclge_get_reset_status(hdev, queue_gid);
8707                 if (reset_status)
8708                         break;
8709
8710                 /* Wait for tqp hw reset */
8711                 usleep_range(1000, 1200);
8712         }
8713
8714         if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
8715                 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
8716                 return ret;
8717         }
8718
8719         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
8720         if (ret)
8721                 dev_err(&hdev->pdev->dev,
8722                         "Deassert the soft reset fail, ret = %d\n", ret);
8723
8724         return ret;
8725 }
8726
8727 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
8728 {
8729         struct hclge_dev *hdev = vport->back;
8730         int reset_try_times = 0;
8731         int reset_status;
8732         u16 queue_gid;
8733         int ret;
8734
8735         queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
8736
8737         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
8738         if (ret) {
8739                 dev_warn(&hdev->pdev->dev,
8740                          "Send reset tqp cmd fail, ret = %d\n", ret);
8741                 return;
8742         }
8743
8744         while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
8745                 reset_status = hclge_get_reset_status(hdev, queue_gid);
8746                 if (reset_status)
8747                         break;
8748
8749                 /* Wait for tqp hw reset */
8750                 usleep_range(1000, 1200);
8751         }
8752
8753         if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
8754                 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
8755                 return;
8756         }
8757
8758         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
8759         if (ret)
8760                 dev_warn(&hdev->pdev->dev,
8761                          "Deassert the soft reset fail, ret = %d\n", ret);
8762 }
8763
8764 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
8765 {
8766         struct hclge_vport *vport = hclge_get_vport(handle);
8767         struct hclge_dev *hdev = vport->back;
8768
8769         return hdev->fw_version;
8770 }
8771
8772 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
8773 {
8774         struct phy_device *phydev = hdev->hw.mac.phydev;
8775
8776         if (!phydev)
8777                 return;
8778
8779         phy_set_asym_pause(phydev, rx_en, tx_en);
8780 }
8781
8782 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
8783 {
8784         int ret;
8785
8786         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
8787                 return 0;
8788
8789         ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
8790         if (ret)
8791                 dev_err(&hdev->pdev->dev,
8792                         "configure pauseparam error, ret = %d.\n", ret);
8793
8794         return ret;
8795 }
8796
8797 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
8798 {
8799         struct phy_device *phydev = hdev->hw.mac.phydev;
8800         u16 remote_advertising = 0;
8801         u16 local_advertising;
8802         u32 rx_pause, tx_pause;
8803         u8 flowctl;
8804
8805         if (!phydev->link || !phydev->autoneg)
8806                 return 0;
8807
8808         local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
8809
8810         if (phydev->pause)
8811                 remote_advertising = LPA_PAUSE_CAP;
8812
8813         if (phydev->asym_pause)
8814                 remote_advertising |= LPA_PAUSE_ASYM;
8815
8816         flowctl = mii_resolve_flowctrl_fdx(local_advertising,
8817                                            remote_advertising);
8818         tx_pause = flowctl & FLOW_CTRL_TX;
8819         rx_pause = flowctl & FLOW_CTRL_RX;
8820
8821         if (phydev->duplex == HCLGE_MAC_HALF) {
8822                 tx_pause = 0;
8823                 rx_pause = 0;
8824         }
8825
8826         return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
8827 }
8828
8829 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
8830                                  u32 *rx_en, u32 *tx_en)
8831 {
8832         struct hclge_vport *vport = hclge_get_vport(handle);
8833         struct hclge_dev *hdev = vport->back;
8834         struct phy_device *phydev = hdev->hw.mac.phydev;
8835
8836         *auto_neg = phydev ? hclge_get_autoneg(handle) : 0;
8837
8838         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8839                 *rx_en = 0;
8840                 *tx_en = 0;
8841                 return;
8842         }
8843
8844         if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
8845                 *rx_en = 1;
8846                 *tx_en = 0;
8847         } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
8848                 *tx_en = 1;
8849                 *rx_en = 0;
8850         } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
8851                 *rx_en = 1;
8852                 *tx_en = 1;
8853         } else {
8854                 *rx_en = 0;
8855                 *tx_en = 0;
8856         }
8857 }
8858
8859 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
8860                                          u32 rx_en, u32 tx_en)
8861 {
8862         if (rx_en && tx_en)
8863                 hdev->fc_mode_last_time = HCLGE_FC_FULL;
8864         else if (rx_en && !tx_en)
8865                 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
8866         else if (!rx_en && tx_en)
8867                 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
8868         else
8869                 hdev->fc_mode_last_time = HCLGE_FC_NONE;
8870
8871         hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
8872 }
8873
8874 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
8875                                 u32 rx_en, u32 tx_en)
8876 {
8877         struct hclge_vport *vport = hclge_get_vport(handle);
8878         struct hclge_dev *hdev = vport->back;
8879         struct phy_device *phydev = hdev->hw.mac.phydev;
8880         u32 fc_autoneg;
8881
8882         if (phydev) {
8883                 fc_autoneg = hclge_get_autoneg(handle);
8884                 if (auto_neg != fc_autoneg) {
8885                         dev_info(&hdev->pdev->dev,
8886                                  "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
8887                         return -EOPNOTSUPP;
8888                 }
8889         }
8890
8891         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8892                 dev_info(&hdev->pdev->dev,
8893                          "Priority flow control enabled. Cannot set link flow control.\n");
8894                 return -EOPNOTSUPP;
8895         }
8896
8897         hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
8898
8899         hclge_record_user_pauseparam(hdev, rx_en, tx_en);
8900
8901         if (!auto_neg)
8902                 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
8903
8904         if (phydev)
8905                 return phy_start_aneg(phydev);
8906
8907         return -EOPNOTSUPP;
8908 }
8909
8910 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
8911                                           u8 *auto_neg, u32 *speed, u8 *duplex)
8912 {
8913         struct hclge_vport *vport = hclge_get_vport(handle);
8914         struct hclge_dev *hdev = vport->back;
8915
8916         if (speed)
8917                 *speed = hdev->hw.mac.speed;
8918         if (duplex)
8919                 *duplex = hdev->hw.mac.duplex;
8920         if (auto_neg)
8921                 *auto_neg = hdev->hw.mac.autoneg;
8922 }
8923
8924 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
8925                                  u8 *module_type)
8926 {
8927         struct hclge_vport *vport = hclge_get_vport(handle);
8928         struct hclge_dev *hdev = vport->back;
8929
8930         if (media_type)
8931                 *media_type = hdev->hw.mac.media_type;
8932
8933         if (module_type)
8934                 *module_type = hdev->hw.mac.module_type;
8935 }
8936
8937 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
8938                                 u8 *tp_mdix_ctrl, u8 *tp_mdix)
8939 {
8940         struct hclge_vport *vport = hclge_get_vport(handle);
8941         struct hclge_dev *hdev = vport->back;
8942         struct phy_device *phydev = hdev->hw.mac.phydev;
8943         int mdix_ctrl, mdix, is_resolved;
8944         unsigned int retval;
8945
8946         if (!phydev) {
8947                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8948                 *tp_mdix = ETH_TP_MDI_INVALID;
8949                 return;
8950         }
8951
8952         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
8953
8954         retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
8955         mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
8956                                     HCLGE_PHY_MDIX_CTRL_S);
8957
8958         retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
8959         mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
8960         is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
8961
8962         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
8963
8964         switch (mdix_ctrl) {
8965         case 0x0:
8966                 *tp_mdix_ctrl = ETH_TP_MDI;
8967                 break;
8968         case 0x1:
8969                 *tp_mdix_ctrl = ETH_TP_MDI_X;
8970                 break;
8971         case 0x3:
8972                 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
8973                 break;
8974         default:
8975                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8976                 break;
8977         }
8978
8979         if (!is_resolved)
8980                 *tp_mdix = ETH_TP_MDI_INVALID;
8981         else if (mdix)
8982                 *tp_mdix = ETH_TP_MDI_X;
8983         else
8984                 *tp_mdix = ETH_TP_MDI;
8985 }
8986
8987 static void hclge_info_show(struct hclge_dev *hdev)
8988 {
8989         struct device *dev = &hdev->pdev->dev;
8990
8991         dev_info(dev, "PF info begin:\n");
8992
8993         dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
8994         dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
8995         dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
8996         dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
8997         dev_info(dev, "Numbers of vmdp vports: %u\n", hdev->num_vmdq_vport);
8998         dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
8999         dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
9000         dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
9001         dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
9002         dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
9003         dev_info(dev, "This is %s PF\n",
9004                  hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
9005         dev_info(dev, "DCB %s\n",
9006                  hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
9007         dev_info(dev, "MQPRIO %s\n",
9008                  hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
9009
9010         dev_info(dev, "PF info end.\n");
9011 }
9012
9013 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
9014                                           struct hclge_vport *vport)
9015 {
9016         struct hnae3_client *client = vport->nic.client;
9017         struct hclge_dev *hdev = ae_dev->priv;
9018         int rst_cnt = hdev->rst_stats.reset_cnt;
9019         int ret;
9020
9021         ret = client->ops->init_instance(&vport->nic);
9022         if (ret)
9023                 return ret;
9024
9025         set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9026         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9027             rst_cnt != hdev->rst_stats.reset_cnt) {
9028                 ret = -EBUSY;
9029                 goto init_nic_err;
9030         }
9031
9032         /* Enable nic hw error interrupts */
9033         ret = hclge_config_nic_hw_error(hdev, true);
9034         if (ret) {
9035                 dev_err(&ae_dev->pdev->dev,
9036                         "fail(%d) to enable hw error interrupts\n", ret);
9037                 goto init_nic_err;
9038         }
9039
9040         hnae3_set_client_init_flag(client, ae_dev, 1);
9041
9042         if (netif_msg_drv(&hdev->vport->nic))
9043                 hclge_info_show(hdev);
9044
9045         return ret;
9046
9047 init_nic_err:
9048         clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9049         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9050                 msleep(HCLGE_WAIT_RESET_DONE);
9051
9052         client->ops->uninit_instance(&vport->nic, 0);
9053
9054         return ret;
9055 }
9056
9057 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
9058                                            struct hclge_vport *vport)
9059 {
9060         struct hnae3_client *client = vport->roce.client;
9061         struct hclge_dev *hdev = ae_dev->priv;
9062         int rst_cnt;
9063         int ret;
9064
9065         if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
9066             !hdev->nic_client)
9067                 return 0;
9068
9069         client = hdev->roce_client;
9070         ret = hclge_init_roce_base_info(vport);
9071         if (ret)
9072                 return ret;
9073
9074         rst_cnt = hdev->rst_stats.reset_cnt;
9075         ret = client->ops->init_instance(&vport->roce);
9076         if (ret)
9077                 return ret;
9078
9079         set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9080         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9081             rst_cnt != hdev->rst_stats.reset_cnt) {
9082                 ret = -EBUSY;
9083                 goto init_roce_err;
9084         }
9085
9086         /* Enable roce ras interrupts */
9087         ret = hclge_config_rocee_ras_interrupt(hdev, true);
9088         if (ret) {
9089                 dev_err(&ae_dev->pdev->dev,
9090                         "fail(%d) to enable roce ras interrupts\n", ret);
9091                 goto init_roce_err;
9092         }
9093
9094         hnae3_set_client_init_flag(client, ae_dev, 1);
9095
9096         return 0;
9097
9098 init_roce_err:
9099         clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9100         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9101                 msleep(HCLGE_WAIT_RESET_DONE);
9102
9103         hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
9104
9105         return ret;
9106 }
9107
9108 static int hclge_init_client_instance(struct hnae3_client *client,
9109                                       struct hnae3_ae_dev *ae_dev)
9110 {
9111         struct hclge_dev *hdev = ae_dev->priv;
9112         struct hclge_vport *vport;
9113         int i, ret;
9114
9115         for (i = 0; i <  hdev->num_vmdq_vport + 1; i++) {
9116                 vport = &hdev->vport[i];
9117
9118                 switch (client->type) {
9119                 case HNAE3_CLIENT_KNIC:
9120                         hdev->nic_client = client;
9121                         vport->nic.client = client;
9122                         ret = hclge_init_nic_client_instance(ae_dev, vport);
9123                         if (ret)
9124                                 goto clear_nic;
9125
9126                         ret = hclge_init_roce_client_instance(ae_dev, vport);
9127                         if (ret)
9128                                 goto clear_roce;
9129
9130                         break;
9131                 case HNAE3_CLIENT_ROCE:
9132                         if (hnae3_dev_roce_supported(hdev)) {
9133                                 hdev->roce_client = client;
9134                                 vport->roce.client = client;
9135                         }
9136
9137                         ret = hclge_init_roce_client_instance(ae_dev, vport);
9138                         if (ret)
9139                                 goto clear_roce;
9140
9141                         break;
9142                 default:
9143                         return -EINVAL;
9144                 }
9145         }
9146
9147         return 0;
9148
9149 clear_nic:
9150         hdev->nic_client = NULL;
9151         vport->nic.client = NULL;
9152         return ret;
9153 clear_roce:
9154         hdev->roce_client = NULL;
9155         vport->roce.client = NULL;
9156         return ret;
9157 }
9158
9159 static void hclge_uninit_client_instance(struct hnae3_client *client,
9160                                          struct hnae3_ae_dev *ae_dev)
9161 {
9162         struct hclge_dev *hdev = ae_dev->priv;
9163         struct hclge_vport *vport;
9164         int i;
9165
9166         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
9167                 vport = &hdev->vport[i];
9168                 if (hdev->roce_client) {
9169                         clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9170                         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9171                                 msleep(HCLGE_WAIT_RESET_DONE);
9172
9173                         hdev->roce_client->ops->uninit_instance(&vport->roce,
9174                                                                 0);
9175                         hdev->roce_client = NULL;
9176                         vport->roce.client = NULL;
9177                 }
9178                 if (client->type == HNAE3_CLIENT_ROCE)
9179                         return;
9180                 if (hdev->nic_client && client->ops->uninit_instance) {
9181                         clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9182                         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9183                                 msleep(HCLGE_WAIT_RESET_DONE);
9184
9185                         client->ops->uninit_instance(&vport->nic, 0);
9186                         hdev->nic_client = NULL;
9187                         vport->nic.client = NULL;
9188                 }
9189         }
9190 }
9191
9192 static int hclge_pci_init(struct hclge_dev *hdev)
9193 {
9194         struct pci_dev *pdev = hdev->pdev;
9195         struct hclge_hw *hw;
9196         int ret;
9197
9198         ret = pci_enable_device(pdev);
9199         if (ret) {
9200                 dev_err(&pdev->dev, "failed to enable PCI device\n");
9201                 return ret;
9202         }
9203
9204         ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
9205         if (ret) {
9206                 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9207                 if (ret) {
9208                         dev_err(&pdev->dev,
9209                                 "can't set consistent PCI DMA");
9210                         goto err_disable_device;
9211                 }
9212                 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
9213         }
9214
9215         ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
9216         if (ret) {
9217                 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
9218                 goto err_disable_device;
9219         }
9220
9221         pci_set_master(pdev);
9222         hw = &hdev->hw;
9223         hw->io_base = pcim_iomap(pdev, 2, 0);
9224         if (!hw->io_base) {
9225                 dev_err(&pdev->dev, "Can't map configuration register space\n");
9226                 ret = -ENOMEM;
9227                 goto err_clr_master;
9228         }
9229
9230         hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
9231
9232         return 0;
9233 err_clr_master:
9234         pci_clear_master(pdev);
9235         pci_release_regions(pdev);
9236 err_disable_device:
9237         pci_disable_device(pdev);
9238
9239         return ret;
9240 }
9241
9242 static void hclge_pci_uninit(struct hclge_dev *hdev)
9243 {
9244         struct pci_dev *pdev = hdev->pdev;
9245
9246         pcim_iounmap(pdev, hdev->hw.io_base);
9247         pci_free_irq_vectors(pdev);
9248         pci_clear_master(pdev);
9249         pci_release_mem_regions(pdev);
9250         pci_disable_device(pdev);
9251 }
9252
9253 static void hclge_state_init(struct hclge_dev *hdev)
9254 {
9255         set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
9256         set_bit(HCLGE_STATE_DOWN, &hdev->state);
9257         clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
9258         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9259         clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
9260         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
9261 }
9262
9263 static void hclge_state_uninit(struct hclge_dev *hdev)
9264 {
9265         set_bit(HCLGE_STATE_DOWN, &hdev->state);
9266         set_bit(HCLGE_STATE_REMOVING, &hdev->state);
9267
9268         if (hdev->reset_timer.function)
9269                 del_timer_sync(&hdev->reset_timer);
9270         if (hdev->service_task.work.func)
9271                 cancel_delayed_work_sync(&hdev->service_task);
9272         if (hdev->rst_service_task.func)
9273                 cancel_work_sync(&hdev->rst_service_task);
9274         if (hdev->mbx_service_task.func)
9275                 cancel_work_sync(&hdev->mbx_service_task);
9276 }
9277
9278 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
9279 {
9280 #define HCLGE_FLR_WAIT_MS       100
9281 #define HCLGE_FLR_WAIT_CNT      50
9282         struct hclge_dev *hdev = ae_dev->priv;
9283         int cnt = 0;
9284
9285         clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
9286         clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
9287         set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
9288         hclge_reset_event(hdev->pdev, NULL);
9289
9290         while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
9291                cnt++ < HCLGE_FLR_WAIT_CNT)
9292                 msleep(HCLGE_FLR_WAIT_MS);
9293
9294         if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
9295                 dev_err(&hdev->pdev->dev,
9296                         "flr wait down timeout: %d\n", cnt);
9297 }
9298
9299 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
9300 {
9301         struct hclge_dev *hdev = ae_dev->priv;
9302
9303         set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
9304 }
9305
9306 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
9307 {
9308         u16 i;
9309
9310         for (i = 0; i < hdev->num_alloc_vport; i++) {
9311                 struct hclge_vport *vport = &hdev->vport[i];
9312                 int ret;
9313
9314                  /* Send cmd to clear VF's FUNC_RST_ING */
9315                 ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
9316                 if (ret)
9317                         dev_warn(&hdev->pdev->dev,
9318                                  "clear vf(%u) rst failed %d!\n",
9319                                  vport->vport_id, ret);
9320         }
9321 }
9322
9323 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
9324 {
9325         struct pci_dev *pdev = ae_dev->pdev;
9326         struct hclge_dev *hdev;
9327         int ret;
9328
9329         hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
9330         if (!hdev) {
9331                 ret = -ENOMEM;
9332                 goto out;
9333         }
9334
9335         hdev->pdev = pdev;
9336         hdev->ae_dev = ae_dev;
9337         hdev->reset_type = HNAE3_NONE_RESET;
9338         hdev->reset_level = HNAE3_FUNC_RESET;
9339         ae_dev->priv = hdev;
9340
9341         /* HW supprt 2 layer vlan */
9342         hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
9343
9344         mutex_init(&hdev->vport_lock);
9345         mutex_init(&hdev->vport_cfg_mutex);
9346         spin_lock_init(&hdev->fd_rule_lock);
9347
9348         ret = hclge_pci_init(hdev);
9349         if (ret) {
9350                 dev_err(&pdev->dev, "PCI init failed\n");
9351                 goto out;
9352         }
9353
9354         /* Firmware command queue initialize */
9355         ret = hclge_cmd_queue_init(hdev);
9356         if (ret) {
9357                 dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
9358                 goto err_pci_uninit;
9359         }
9360
9361         /* Firmware command initialize */
9362         ret = hclge_cmd_init(hdev);
9363         if (ret)
9364                 goto err_cmd_uninit;
9365
9366         ret = hclge_get_cap(hdev);
9367         if (ret) {
9368                 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
9369                         ret);
9370                 goto err_cmd_uninit;
9371         }
9372
9373         ret = hclge_configure(hdev);
9374         if (ret) {
9375                 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
9376                 goto err_cmd_uninit;
9377         }
9378
9379         ret = hclge_init_msi(hdev);
9380         if (ret) {
9381                 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
9382                 goto err_cmd_uninit;
9383         }
9384
9385         ret = hclge_misc_irq_init(hdev);
9386         if (ret) {
9387                 dev_err(&pdev->dev,
9388                         "Misc IRQ(vector0) init error, ret = %d.\n",
9389                         ret);
9390                 goto err_msi_uninit;
9391         }
9392
9393         ret = hclge_alloc_tqps(hdev);
9394         if (ret) {
9395                 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
9396                 goto err_msi_irq_uninit;
9397         }
9398
9399         ret = hclge_alloc_vport(hdev);
9400         if (ret) {
9401                 dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
9402                 goto err_msi_irq_uninit;
9403         }
9404
9405         ret = hclge_map_tqp(hdev);
9406         if (ret) {
9407                 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
9408                 goto err_msi_irq_uninit;
9409         }
9410
9411         if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
9412                 ret = hclge_mac_mdio_config(hdev);
9413                 if (ret) {
9414                         dev_err(&hdev->pdev->dev,
9415                                 "mdio config fail ret=%d\n", ret);
9416                         goto err_msi_irq_uninit;
9417                 }
9418         }
9419
9420         ret = hclge_init_umv_space(hdev);
9421         if (ret) {
9422                 dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
9423                 goto err_mdiobus_unreg;
9424         }
9425
9426         ret = hclge_mac_init(hdev);
9427         if (ret) {
9428                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
9429                 goto err_mdiobus_unreg;
9430         }
9431
9432         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
9433         if (ret) {
9434                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
9435                 goto err_mdiobus_unreg;
9436         }
9437
9438         ret = hclge_config_gro(hdev, true);
9439         if (ret)
9440                 goto err_mdiobus_unreg;
9441
9442         ret = hclge_init_vlan_config(hdev);
9443         if (ret) {
9444                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
9445                 goto err_mdiobus_unreg;
9446         }
9447
9448         ret = hclge_tm_schd_init(hdev);
9449         if (ret) {
9450                 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
9451                 goto err_mdiobus_unreg;
9452         }
9453
9454         hclge_rss_init_cfg(hdev);
9455         ret = hclge_rss_init_hw(hdev);
9456         if (ret) {
9457                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
9458                 goto err_mdiobus_unreg;
9459         }
9460
9461         ret = init_mgr_tbl(hdev);
9462         if (ret) {
9463                 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
9464                 goto err_mdiobus_unreg;
9465         }
9466
9467         ret = hclge_init_fd_config(hdev);
9468         if (ret) {
9469                 dev_err(&pdev->dev,
9470                         "fd table init fail, ret=%d\n", ret);
9471                 goto err_mdiobus_unreg;
9472         }
9473
9474         INIT_KFIFO(hdev->mac_tnl_log);
9475
9476         hclge_dcb_ops_set(hdev);
9477
9478         timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
9479         INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
9480         INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
9481         INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
9482
9483         /* Setup affinity after service timer setup because add_timer_on
9484          * is called in affinity notify.
9485          */
9486         hclge_misc_affinity_setup(hdev);
9487
9488         hclge_clear_all_event_cause(hdev);
9489         hclge_clear_resetting_state(hdev);
9490
9491         /* Log and clear the hw errors those already occurred */
9492         hclge_handle_all_hns_hw_errors(ae_dev);
9493
9494         /* request delayed reset for the error recovery because an immediate
9495          * global reset on a PF affecting pending initialization of other PFs
9496          */
9497         if (ae_dev->hw_err_reset_req) {
9498                 enum hnae3_reset_type reset_level;
9499
9500                 reset_level = hclge_get_reset_level(ae_dev,
9501                                                     &ae_dev->hw_err_reset_req);
9502                 hclge_set_def_reset_request(ae_dev, reset_level);
9503                 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
9504         }
9505
9506         /* Enable MISC vector(vector0) */
9507         hclge_enable_vector(&hdev->misc_vector, true);
9508
9509         hclge_state_init(hdev);
9510         hdev->last_reset_time = jiffies;
9511
9512         dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
9513                  HCLGE_DRIVER_NAME);
9514
9515         return 0;
9516
9517 err_mdiobus_unreg:
9518         if (hdev->hw.mac.phydev)
9519                 mdiobus_unregister(hdev->hw.mac.mdio_bus);
9520 err_msi_irq_uninit:
9521         hclge_misc_irq_uninit(hdev);
9522 err_msi_uninit:
9523         pci_free_irq_vectors(pdev);
9524 err_cmd_uninit:
9525         hclge_cmd_uninit(hdev);
9526 err_pci_uninit:
9527         pcim_iounmap(pdev, hdev->hw.io_base);
9528         pci_clear_master(pdev);
9529         pci_release_regions(pdev);
9530         pci_disable_device(pdev);
9531 out:
9532         return ret;
9533 }
9534
9535 static void hclge_stats_clear(struct hclge_dev *hdev)
9536 {
9537         memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
9538 }
9539
9540 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
9541 {
9542         return hclge_config_switch_param(hdev, vf, enable,
9543                                          HCLGE_SWITCH_ANTI_SPOOF_MASK);
9544 }
9545
9546 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
9547 {
9548         return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9549                                           HCLGE_FILTER_FE_NIC_INGRESS_B,
9550                                           enable, vf);
9551 }
9552
9553 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
9554 {
9555         int ret;
9556
9557         ret = hclge_set_mac_spoofchk(hdev, vf, enable);
9558         if (ret) {
9559                 dev_err(&hdev->pdev->dev,
9560                         "Set vf %d mac spoof check %s failed, ret=%d\n",
9561                         vf, enable ? "on" : "off", ret);
9562                 return ret;
9563         }
9564
9565         ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
9566         if (ret)
9567                 dev_err(&hdev->pdev->dev,
9568                         "Set vf %d vlan spoof check %s failed, ret=%d\n",
9569                         vf, enable ? "on" : "off", ret);
9570
9571         return ret;
9572 }
9573
9574 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
9575                                  bool enable)
9576 {
9577         struct hclge_vport *vport = hclge_get_vport(handle);
9578         struct hclge_dev *hdev = vport->back;
9579         u32 new_spoofchk = enable ? 1 : 0;
9580         int ret;
9581
9582         if (hdev->pdev->revision == 0x20)
9583                 return -EOPNOTSUPP;
9584
9585         vport = hclge_get_vf_vport(hdev, vf);
9586         if (!vport)
9587                 return -EINVAL;
9588
9589         if (vport->vf_info.spoofchk == new_spoofchk)
9590                 return 0;
9591
9592         if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
9593                 dev_warn(&hdev->pdev->dev,
9594                          "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
9595                          vf);
9596         else if (enable && hclge_is_umv_space_full(vport))
9597                 dev_warn(&hdev->pdev->dev,
9598                          "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
9599                          vf);
9600
9601         ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
9602         if (ret)
9603                 return ret;
9604
9605         vport->vf_info.spoofchk = new_spoofchk;
9606         return 0;
9607 }
9608
9609 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
9610 {
9611         struct hclge_vport *vport = hdev->vport;
9612         int ret;
9613         int i;
9614
9615         if (hdev->pdev->revision == 0x20)
9616                 return 0;
9617
9618         /* resume the vf spoof check state after reset */
9619         for (i = 0; i < hdev->num_alloc_vport; i++) {
9620                 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
9621                                                vport->vf_info.spoofchk);
9622                 if (ret)
9623                         return ret;
9624
9625                 vport++;
9626         }
9627
9628         return 0;
9629 }
9630
9631 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
9632 {
9633         struct hclge_vport *vport = hclge_get_vport(handle);
9634         struct hclge_dev *hdev = vport->back;
9635         u32 new_trusted = enable ? 1 : 0;
9636         bool en_bc_pmc;
9637         int ret;
9638
9639         vport = hclge_get_vf_vport(hdev, vf);
9640         if (!vport)
9641                 return -EINVAL;
9642
9643         if (vport->vf_info.trusted == new_trusted)
9644                 return 0;
9645
9646         /* Disable promisc mode for VF if it is not trusted any more. */
9647         if (!enable && vport->vf_info.promisc_enable) {
9648                 en_bc_pmc = hdev->pdev->revision != 0x20;
9649                 ret = hclge_set_vport_promisc_mode(vport, false, false,
9650                                                    en_bc_pmc);
9651                 if (ret)
9652                         return ret;
9653                 vport->vf_info.promisc_enable = 0;
9654                 hclge_inform_vf_promisc_info(vport);
9655         }
9656
9657         vport->vf_info.trusted = new_trusted;
9658
9659         return 0;
9660 }
9661
9662 static void hclge_reset_vf_rate(struct hclge_dev *hdev)
9663 {
9664         int ret;
9665         int vf;
9666
9667         /* reset vf rate to default value */
9668         for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
9669                 struct hclge_vport *vport = &hdev->vport[vf];
9670
9671                 vport->vf_info.max_tx_rate = 0;
9672                 ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
9673                 if (ret)
9674                         dev_err(&hdev->pdev->dev,
9675                                 "vf%d failed to reset to default, ret=%d\n",
9676                                 vf - HCLGE_VF_VPORT_START_NUM, ret);
9677         }
9678 }
9679
9680 static int hclge_vf_rate_param_check(struct hclge_dev *hdev, int vf,
9681                                      int min_tx_rate, int max_tx_rate)
9682 {
9683         if (min_tx_rate != 0 ||
9684             max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
9685                 dev_err(&hdev->pdev->dev,
9686                         "min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
9687                         min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
9688                 return -EINVAL;
9689         }
9690
9691         return 0;
9692 }
9693
9694 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
9695                              int min_tx_rate, int max_tx_rate, bool force)
9696 {
9697         struct hclge_vport *vport = hclge_get_vport(handle);
9698         struct hclge_dev *hdev = vport->back;
9699         int ret;
9700
9701         ret = hclge_vf_rate_param_check(hdev, vf, min_tx_rate, max_tx_rate);
9702         if (ret)
9703                 return ret;
9704
9705         vport = hclge_get_vf_vport(hdev, vf);
9706         if (!vport)
9707                 return -EINVAL;
9708
9709         if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
9710                 return 0;
9711
9712         ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
9713         if (ret)
9714                 return ret;
9715
9716         vport->vf_info.max_tx_rate = max_tx_rate;
9717
9718         return 0;
9719 }
9720
9721 static int hclge_resume_vf_rate(struct hclge_dev *hdev)
9722 {
9723         struct hnae3_handle *handle = &hdev->vport->nic;
9724         struct hclge_vport *vport;
9725         int ret;
9726         int vf;
9727
9728         /* resume the vf max_tx_rate after reset */
9729         for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
9730                 vport = hclge_get_vf_vport(hdev, vf);
9731                 if (!vport)
9732                         return -EINVAL;
9733
9734                 /* zero means max rate, after reset, firmware already set it to
9735                  * max rate, so just continue.
9736                  */
9737                 if (!vport->vf_info.max_tx_rate)
9738                         continue;
9739
9740                 ret = hclge_set_vf_rate(handle, vf, 0,
9741                                         vport->vf_info.max_tx_rate, true);
9742                 if (ret) {
9743                         dev_err(&hdev->pdev->dev,
9744                                 "vf%d failed to resume tx_rate:%u, ret=%d\n",
9745                                 vf, vport->vf_info.max_tx_rate, ret);
9746                         return ret;
9747                 }
9748         }
9749
9750         return 0;
9751 }
9752
9753 static void hclge_reset_vport_state(struct hclge_dev *hdev)
9754 {
9755         struct hclge_vport *vport = hdev->vport;
9756         int i;
9757
9758         for (i = 0; i < hdev->num_alloc_vport; i++) {
9759                 hclge_vport_stop(vport);
9760                 vport++;
9761         }
9762 }
9763
9764 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
9765 {
9766         struct hclge_dev *hdev = ae_dev->priv;
9767         struct pci_dev *pdev = ae_dev->pdev;
9768         int ret;
9769
9770         set_bit(HCLGE_STATE_DOWN, &hdev->state);
9771
9772         hclge_stats_clear(hdev);
9773         memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
9774         memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
9775
9776         ret = hclge_cmd_init(hdev);
9777         if (ret) {
9778                 dev_err(&pdev->dev, "Cmd queue init failed\n");
9779                 return ret;
9780         }
9781
9782         ret = hclge_map_tqp(hdev);
9783         if (ret) {
9784                 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
9785                 return ret;
9786         }
9787
9788         hclge_reset_umv_space(hdev);
9789
9790         ret = hclge_mac_init(hdev);
9791         if (ret) {
9792                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
9793                 return ret;
9794         }
9795
9796         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
9797         if (ret) {
9798                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
9799                 return ret;
9800         }
9801
9802         ret = hclge_config_gro(hdev, true);
9803         if (ret)
9804                 return ret;
9805
9806         ret = hclge_init_vlan_config(hdev);
9807         if (ret) {
9808                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
9809                 return ret;
9810         }
9811
9812         ret = hclge_tm_init_hw(hdev, true);
9813         if (ret) {
9814                 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
9815                 return ret;
9816         }
9817
9818         ret = hclge_rss_init_hw(hdev);
9819         if (ret) {
9820                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
9821                 return ret;
9822         }
9823
9824         ret = hclge_init_fd_config(hdev);
9825         if (ret) {
9826                 dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
9827                 return ret;
9828         }
9829
9830         /* Log and clear the hw errors those already occurred */
9831         hclge_handle_all_hns_hw_errors(ae_dev);
9832
9833         /* Re-enable the hw error interrupts because
9834          * the interrupts get disabled on global reset.
9835          */
9836         ret = hclge_config_nic_hw_error(hdev, true);
9837         if (ret) {
9838                 dev_err(&pdev->dev,
9839                         "fail(%d) to re-enable NIC hw error interrupts\n",
9840                         ret);
9841                 return ret;
9842         }
9843
9844         if (hdev->roce_client) {
9845                 ret = hclge_config_rocee_ras_interrupt(hdev, true);
9846                 if (ret) {
9847                         dev_err(&pdev->dev,
9848                                 "fail(%d) to re-enable roce ras interrupts\n",
9849                                 ret);
9850                         return ret;
9851                 }
9852         }
9853
9854         hclge_reset_vport_state(hdev);
9855         ret = hclge_reset_vport_spoofchk(hdev);
9856         if (ret)
9857                 return ret;
9858
9859         ret = hclge_resume_vf_rate(hdev);
9860         if (ret)
9861                 return ret;
9862
9863         dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
9864                  HCLGE_DRIVER_NAME);
9865
9866         return 0;
9867 }
9868
9869 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
9870 {
9871         struct hclge_dev *hdev = ae_dev->priv;
9872         struct hclge_mac *mac = &hdev->hw.mac;
9873
9874         hclge_reset_vf_rate(hdev);
9875         hclge_misc_affinity_teardown(hdev);
9876         hclge_state_uninit(hdev);
9877
9878         if (mac->phydev)
9879                 mdiobus_unregister(mac->mdio_bus);
9880
9881         hclge_uninit_umv_space(hdev);
9882
9883         /* Disable MISC vector(vector0) */
9884         hclge_enable_vector(&hdev->misc_vector, false);
9885         synchronize_irq(hdev->misc_vector.vector_irq);
9886
9887         /* Disable all hw interrupts */
9888         hclge_config_mac_tnl_int(hdev, false);
9889         hclge_config_nic_hw_error(hdev, false);
9890         hclge_config_rocee_ras_interrupt(hdev, false);
9891
9892         hclge_cmd_uninit(hdev);
9893         hclge_misc_irq_uninit(hdev);
9894         hclge_pci_uninit(hdev);
9895         mutex_destroy(&hdev->vport_lock);
9896         hclge_uninit_vport_mac_table(hdev);
9897         hclge_uninit_vport_vlan_table(hdev);
9898         mutex_destroy(&hdev->vport_cfg_mutex);
9899         ae_dev->priv = NULL;
9900 }
9901
9902 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
9903 {
9904         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9905         struct hclge_vport *vport = hclge_get_vport(handle);
9906         struct hclge_dev *hdev = vport->back;
9907
9908         return min_t(u32, hdev->rss_size_max,
9909                      vport->alloc_tqps / kinfo->num_tc);
9910 }
9911
9912 static void hclge_get_channels(struct hnae3_handle *handle,
9913                                struct ethtool_channels *ch)
9914 {
9915         ch->max_combined = hclge_get_max_channels(handle);
9916         ch->other_count = 1;
9917         ch->max_other = 1;
9918         ch->combined_count = handle->kinfo.rss_size;
9919 }
9920
9921 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
9922                                         u16 *alloc_tqps, u16 *max_rss_size)
9923 {
9924         struct hclge_vport *vport = hclge_get_vport(handle);
9925         struct hclge_dev *hdev = vport->back;
9926
9927         *alloc_tqps = vport->alloc_tqps;
9928         *max_rss_size = hdev->rss_size_max;
9929 }
9930
9931 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
9932                               bool rxfh_configured)
9933 {
9934         struct hclge_vport *vport = hclge_get_vport(handle);
9935         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
9936         u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
9937         struct hclge_dev *hdev = vport->back;
9938         u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
9939         u16 cur_rss_size = kinfo->rss_size;
9940         u16 cur_tqps = kinfo->num_tqps;
9941         u16 tc_valid[HCLGE_MAX_TC_NUM];
9942         u16 roundup_size;
9943         u32 *rss_indir;
9944         unsigned int i;
9945         int ret;
9946
9947         kinfo->req_rss_size = new_tqps_num;
9948
9949         ret = hclge_tm_vport_map_update(hdev);
9950         if (ret) {
9951                 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
9952                 return ret;
9953         }
9954
9955         roundup_size = roundup_pow_of_two(kinfo->rss_size);
9956         roundup_size = ilog2(roundup_size);
9957         /* Set the RSS TC mode according to the new RSS size */
9958         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
9959                 tc_valid[i] = 0;
9960
9961                 if (!(hdev->hw_tc_map & BIT(i)))
9962                         continue;
9963
9964                 tc_valid[i] = 1;
9965                 tc_size[i] = roundup_size;
9966                 tc_offset[i] = kinfo->rss_size * i;
9967         }
9968         ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
9969         if (ret)
9970                 return ret;
9971
9972         /* RSS indirection table has been configuared by user */
9973         if (rxfh_configured)
9974                 goto out;
9975
9976         /* Reinitializes the rss indirect table according to the new RSS size */
9977         rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
9978         if (!rss_indir)
9979                 return -ENOMEM;
9980
9981         for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
9982                 rss_indir[i] = i % kinfo->rss_size;
9983
9984         ret = hclge_set_rss(handle, rss_indir, NULL, 0);
9985         if (ret)
9986                 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
9987                         ret);
9988
9989         kfree(rss_indir);
9990
9991 out:
9992         if (!ret)
9993                 dev_info(&hdev->pdev->dev,
9994                          "Channels changed, rss_size from %u to %u, tqps from %u to %u",
9995                          cur_rss_size, kinfo->rss_size,
9996                          cur_tqps, kinfo->rss_size * kinfo->num_tc);
9997
9998         return ret;
9999 }
10000
10001 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
10002                               u32 *regs_num_64_bit)
10003 {
10004         struct hclge_desc desc;
10005         u32 total_num;
10006         int ret;
10007
10008         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
10009         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10010         if (ret) {
10011                 dev_err(&hdev->pdev->dev,
10012                         "Query register number cmd failed, ret = %d.\n", ret);
10013                 return ret;
10014         }
10015
10016         *regs_num_32_bit = le32_to_cpu(desc.data[0]);
10017         *regs_num_64_bit = le32_to_cpu(desc.data[1]);
10018
10019         total_num = *regs_num_32_bit + *regs_num_64_bit;
10020         if (!total_num)
10021                 return -EINVAL;
10022
10023         return 0;
10024 }
10025
10026 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
10027                                  void *data)
10028 {
10029 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
10030 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
10031
10032         struct hclge_desc *desc;
10033         u32 *reg_val = data;
10034         __le32 *desc_data;
10035         int nodata_num;
10036         int cmd_num;
10037         int i, k, n;
10038         int ret;
10039
10040         if (regs_num == 0)
10041                 return 0;
10042
10043         nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
10044         cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
10045                                HCLGE_32_BIT_REG_RTN_DATANUM);
10046         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
10047         if (!desc)
10048                 return -ENOMEM;
10049
10050         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
10051         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
10052         if (ret) {
10053                 dev_err(&hdev->pdev->dev,
10054                         "Query 32 bit register cmd failed, ret = %d.\n", ret);
10055                 kfree(desc);
10056                 return ret;
10057         }
10058
10059         for (i = 0; i < cmd_num; i++) {
10060                 if (i == 0) {
10061                         desc_data = (__le32 *)(&desc[i].data[0]);
10062                         n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
10063                 } else {
10064                         desc_data = (__le32 *)(&desc[i]);
10065                         n = HCLGE_32_BIT_REG_RTN_DATANUM;
10066                 }
10067                 for (k = 0; k < n; k++) {
10068                         *reg_val++ = le32_to_cpu(*desc_data++);
10069
10070                         regs_num--;
10071                         if (!regs_num)
10072                                 break;
10073                 }
10074         }
10075
10076         kfree(desc);
10077         return 0;
10078 }
10079
10080 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
10081                                  void *data)
10082 {
10083 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
10084 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
10085
10086         struct hclge_desc *desc;
10087         u64 *reg_val = data;
10088         __le64 *desc_data;
10089         int nodata_len;
10090         int cmd_num;
10091         int i, k, n;
10092         int ret;
10093
10094         if (regs_num == 0)
10095                 return 0;
10096
10097         nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
10098         cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
10099                                HCLGE_64_BIT_REG_RTN_DATANUM);
10100         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
10101         if (!desc)
10102                 return -ENOMEM;
10103
10104         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
10105         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
10106         if (ret) {
10107                 dev_err(&hdev->pdev->dev,
10108                         "Query 64 bit register cmd failed, ret = %d.\n", ret);
10109                 kfree(desc);
10110                 return ret;
10111         }
10112
10113         for (i = 0; i < cmd_num; i++) {
10114                 if (i == 0) {
10115                         desc_data = (__le64 *)(&desc[i].data[0]);
10116                         n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
10117                 } else {
10118                         desc_data = (__le64 *)(&desc[i]);
10119                         n = HCLGE_64_BIT_REG_RTN_DATANUM;
10120                 }
10121                 for (k = 0; k < n; k++) {
10122                         *reg_val++ = le64_to_cpu(*desc_data++);
10123
10124                         regs_num--;
10125                         if (!regs_num)
10126                                 break;
10127                 }
10128         }
10129
10130         kfree(desc);
10131         return 0;
10132 }
10133
10134 #define MAX_SEPARATE_NUM        4
10135 #define SEPARATOR_VALUE         0xFDFCFBFA
10136 #define REG_NUM_PER_LINE        4
10137 #define REG_LEN_PER_LINE        (REG_NUM_PER_LINE * sizeof(u32))
10138 #define REG_SEPARATOR_LINE      1
10139 #define REG_NUM_REMAIN_MASK     3
10140 #define BD_LIST_MAX_NUM         30
10141
10142 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
10143 {
10144         /*prepare 4 commands to query DFX BD number*/
10145         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_DFX_BD_NUM, true);
10146         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10147         hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_DFX_BD_NUM, true);
10148         desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10149         hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_DFX_BD_NUM, true);
10150         desc[2].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10151         hclge_cmd_setup_basic_desc(&desc[3], HCLGE_OPC_DFX_BD_NUM, true);
10152
10153         return hclge_cmd_send(&hdev->hw, desc, 4);
10154 }
10155
10156 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
10157                                     int *bd_num_list,
10158                                     u32 type_num)
10159 {
10160 #define HCLGE_DFX_REG_BD_NUM    4
10161
10162         u32 entries_per_desc, desc_index, index, offset, i;
10163         struct hclge_desc desc[HCLGE_DFX_REG_BD_NUM];
10164         int ret;
10165
10166         ret = hclge_query_bd_num_cmd_send(hdev, desc);
10167         if (ret) {
10168                 dev_err(&hdev->pdev->dev,
10169                         "Get dfx bd num fail, status is %d.\n", ret);
10170                 return ret;
10171         }
10172
10173         entries_per_desc = ARRAY_SIZE(desc[0].data);
10174         for (i = 0; i < type_num; i++) {
10175                 offset = hclge_dfx_bd_offset_list[i];
10176                 index = offset % entries_per_desc;
10177                 desc_index = offset / entries_per_desc;
10178                 bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
10179         }
10180
10181         return ret;
10182 }
10183
10184 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
10185                                   struct hclge_desc *desc_src, int bd_num,
10186                                   enum hclge_opcode_type cmd)
10187 {
10188         struct hclge_desc *desc = desc_src;
10189         int i, ret;
10190
10191         hclge_cmd_setup_basic_desc(desc, cmd, true);
10192         for (i = 0; i < bd_num - 1; i++) {
10193                 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10194                 desc++;
10195                 hclge_cmd_setup_basic_desc(desc, cmd, true);
10196         }
10197
10198         desc = desc_src;
10199         ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
10200         if (ret)
10201                 dev_err(&hdev->pdev->dev,
10202                         "Query dfx reg cmd(0x%x) send fail, status is %d.\n",
10203                         cmd, ret);
10204
10205         return ret;
10206 }
10207
10208 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
10209                                     void *data)
10210 {
10211         int entries_per_desc, reg_num, separator_num, desc_index, index, i;
10212         struct hclge_desc *desc = desc_src;
10213         u32 *reg = data;
10214
10215         entries_per_desc = ARRAY_SIZE(desc->data);
10216         reg_num = entries_per_desc * bd_num;
10217         separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
10218         for (i = 0; i < reg_num; i++) {
10219                 index = i % entries_per_desc;
10220                 desc_index = i / entries_per_desc;
10221                 *reg++ = le32_to_cpu(desc[desc_index].data[index]);
10222         }
10223         for (i = 0; i < separator_num; i++)
10224                 *reg++ = SEPARATOR_VALUE;
10225
10226         return reg_num + separator_num;
10227 }
10228
10229 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
10230 {
10231         u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
10232         int data_len_per_desc, data_len, bd_num, i;
10233         int bd_num_list[BD_LIST_MAX_NUM];
10234         int ret;
10235
10236         ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
10237         if (ret) {
10238                 dev_err(&hdev->pdev->dev,
10239                         "Get dfx reg bd num fail, status is %d.\n", ret);
10240                 return ret;
10241         }
10242
10243         data_len_per_desc = sizeof_field(struct hclge_desc, data);
10244         *len = 0;
10245         for (i = 0; i < dfx_reg_type_num; i++) {
10246                 bd_num = bd_num_list[i];
10247                 data_len = data_len_per_desc * bd_num;
10248                 *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
10249         }
10250
10251         return ret;
10252 }
10253
10254 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
10255 {
10256         u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
10257         int bd_num, bd_num_max, buf_len, i;
10258         int bd_num_list[BD_LIST_MAX_NUM];
10259         struct hclge_desc *desc_src;
10260         u32 *reg = data;
10261         int ret;
10262
10263         ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
10264         if (ret) {
10265                 dev_err(&hdev->pdev->dev,
10266                         "Get dfx reg bd num fail, status is %d.\n", ret);
10267                 return ret;
10268         }
10269
10270         bd_num_max = bd_num_list[0];
10271         for (i = 1; i < dfx_reg_type_num; i++)
10272                 bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
10273
10274         buf_len = sizeof(*desc_src) * bd_num_max;
10275         desc_src = kzalloc(buf_len, GFP_KERNEL);
10276         if (!desc_src) {
10277                 dev_err(&hdev->pdev->dev, "%s kzalloc failed\n", __func__);
10278                 return -ENOMEM;
10279         }
10280
10281         for (i = 0; i < dfx_reg_type_num; i++) {
10282                 bd_num = bd_num_list[i];
10283                 ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
10284                                              hclge_dfx_reg_opcode_list[i]);
10285                 if (ret) {
10286                         dev_err(&hdev->pdev->dev,
10287                                 "Get dfx reg fail, status is %d.\n", ret);
10288                         break;
10289                 }
10290
10291                 reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
10292         }
10293
10294         kfree(desc_src);
10295         return ret;
10296 }
10297
10298 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
10299                               struct hnae3_knic_private_info *kinfo)
10300 {
10301 #define HCLGE_RING_REG_OFFSET           0x200
10302 #define HCLGE_RING_INT_REG_OFFSET       0x4
10303
10304         int i, j, reg_num, separator_num;
10305         int data_num_sum;
10306         u32 *reg = data;
10307
10308         /* fetching per-PF registers valus from PF PCIe register space */
10309         reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
10310         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10311         for (i = 0; i < reg_num; i++)
10312                 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
10313         for (i = 0; i < separator_num; i++)
10314                 *reg++ = SEPARATOR_VALUE;
10315         data_num_sum = reg_num + separator_num;
10316
10317         reg_num = ARRAY_SIZE(common_reg_addr_list);
10318         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10319         for (i = 0; i < reg_num; i++)
10320                 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
10321         for (i = 0; i < separator_num; i++)
10322                 *reg++ = SEPARATOR_VALUE;
10323         data_num_sum += reg_num + separator_num;
10324
10325         reg_num = ARRAY_SIZE(ring_reg_addr_list);
10326         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10327         for (j = 0; j < kinfo->num_tqps; j++) {
10328                 for (i = 0; i < reg_num; i++)
10329                         *reg++ = hclge_read_dev(&hdev->hw,
10330                                                 ring_reg_addr_list[i] +
10331                                                 HCLGE_RING_REG_OFFSET * j);
10332                 for (i = 0; i < separator_num; i++)
10333                         *reg++ = SEPARATOR_VALUE;
10334         }
10335         data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
10336
10337         reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
10338         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10339         for (j = 0; j < hdev->num_msi_used - 1; j++) {
10340                 for (i = 0; i < reg_num; i++)
10341                         *reg++ = hclge_read_dev(&hdev->hw,
10342                                                 tqp_intr_reg_addr_list[i] +
10343                                                 HCLGE_RING_INT_REG_OFFSET * j);
10344                 for (i = 0; i < separator_num; i++)
10345                         *reg++ = SEPARATOR_VALUE;
10346         }
10347         data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
10348
10349         return data_num_sum;
10350 }
10351
10352 static int hclge_get_regs_len(struct hnae3_handle *handle)
10353 {
10354         int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
10355         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
10356         struct hclge_vport *vport = hclge_get_vport(handle);
10357         struct hclge_dev *hdev = vport->back;
10358         int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
10359         int regs_lines_32_bit, regs_lines_64_bit;
10360         int ret;
10361
10362         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
10363         if (ret) {
10364                 dev_err(&hdev->pdev->dev,
10365                         "Get register number failed, ret = %d.\n", ret);
10366                 return ret;
10367         }
10368
10369         ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
10370         if (ret) {
10371                 dev_err(&hdev->pdev->dev,
10372                         "Get dfx reg len failed, ret = %d.\n", ret);
10373                 return ret;
10374         }
10375
10376         cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
10377                 REG_SEPARATOR_LINE;
10378         common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
10379                 REG_SEPARATOR_LINE;
10380         ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
10381                 REG_SEPARATOR_LINE;
10382         tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
10383                 REG_SEPARATOR_LINE;
10384         regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
10385                 REG_SEPARATOR_LINE;
10386         regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
10387                 REG_SEPARATOR_LINE;
10388
10389         return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
10390                 tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
10391                 regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
10392 }
10393
10394 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
10395                            void *data)
10396 {
10397         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
10398         struct hclge_vport *vport = hclge_get_vport(handle);
10399         struct hclge_dev *hdev = vport->back;
10400         u32 regs_num_32_bit, regs_num_64_bit;
10401         int i, reg_num, separator_num, ret;
10402         u32 *reg = data;
10403
10404         *version = hdev->fw_version;
10405
10406         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
10407         if (ret) {
10408                 dev_err(&hdev->pdev->dev,
10409                         "Get register number failed, ret = %d.\n", ret);
10410                 return;
10411         }
10412
10413         reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
10414
10415         ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
10416         if (ret) {
10417                 dev_err(&hdev->pdev->dev,
10418                         "Get 32 bit register failed, ret = %d.\n", ret);
10419                 return;
10420         }
10421         reg_num = regs_num_32_bit;
10422         reg += reg_num;
10423         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10424         for (i = 0; i < separator_num; i++)
10425                 *reg++ = SEPARATOR_VALUE;
10426
10427         ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
10428         if (ret) {
10429                 dev_err(&hdev->pdev->dev,
10430                         "Get 64 bit register failed, ret = %d.\n", ret);
10431                 return;
10432         }
10433         reg_num = regs_num_64_bit * 2;
10434         reg += reg_num;
10435         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10436         for (i = 0; i < separator_num; i++)
10437                 *reg++ = SEPARATOR_VALUE;
10438
10439         ret = hclge_get_dfx_reg(hdev, reg);
10440         if (ret)
10441                 dev_err(&hdev->pdev->dev,
10442                         "Get dfx register failed, ret = %d.\n", ret);
10443 }
10444
10445 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
10446 {
10447         struct hclge_set_led_state_cmd *req;
10448         struct hclge_desc desc;
10449         int ret;
10450
10451         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
10452
10453         req = (struct hclge_set_led_state_cmd *)desc.data;
10454         hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
10455                         HCLGE_LED_LOCATE_STATE_S, locate_led_status);
10456
10457         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10458         if (ret)
10459                 dev_err(&hdev->pdev->dev,
10460                         "Send set led state cmd error, ret =%d\n", ret);
10461
10462         return ret;
10463 }
10464
10465 enum hclge_led_status {
10466         HCLGE_LED_OFF,
10467         HCLGE_LED_ON,
10468         HCLGE_LED_NO_CHANGE = 0xFF,
10469 };
10470
10471 static int hclge_set_led_id(struct hnae3_handle *handle,
10472                             enum ethtool_phys_id_state status)
10473 {
10474         struct hclge_vport *vport = hclge_get_vport(handle);
10475         struct hclge_dev *hdev = vport->back;
10476
10477         switch (status) {
10478         case ETHTOOL_ID_ACTIVE:
10479                 return hclge_set_led_status(hdev, HCLGE_LED_ON);
10480         case ETHTOOL_ID_INACTIVE:
10481                 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
10482         default:
10483                 return -EINVAL;
10484         }
10485 }
10486
10487 static void hclge_get_link_mode(struct hnae3_handle *handle,
10488                                 unsigned long *supported,
10489                                 unsigned long *advertising)
10490 {
10491         unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
10492         struct hclge_vport *vport = hclge_get_vport(handle);
10493         struct hclge_dev *hdev = vport->back;
10494         unsigned int idx = 0;
10495
10496         for (; idx < size; idx++) {
10497                 supported[idx] = hdev->hw.mac.supported[idx];
10498                 advertising[idx] = hdev->hw.mac.advertising[idx];
10499         }
10500 }
10501
10502 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
10503 {
10504         struct hclge_vport *vport = hclge_get_vport(handle);
10505         struct hclge_dev *hdev = vport->back;
10506
10507         return hclge_config_gro(hdev, enable);
10508 }
10509
10510 static const struct hnae3_ae_ops hclge_ops = {
10511         .init_ae_dev = hclge_init_ae_dev,
10512         .uninit_ae_dev = hclge_uninit_ae_dev,
10513         .flr_prepare = hclge_flr_prepare,
10514         .flr_done = hclge_flr_done,
10515         .init_client_instance = hclge_init_client_instance,
10516         .uninit_client_instance = hclge_uninit_client_instance,
10517         .map_ring_to_vector = hclge_map_ring_to_vector,
10518         .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
10519         .get_vector = hclge_get_vector,
10520         .put_vector = hclge_put_vector,
10521         .set_promisc_mode = hclge_set_promisc_mode,
10522         .set_loopback = hclge_set_loopback,
10523         .start = hclge_ae_start,
10524         .stop = hclge_ae_stop,
10525         .client_start = hclge_client_start,
10526         .client_stop = hclge_client_stop,
10527         .get_status = hclge_get_status,
10528         .get_ksettings_an_result = hclge_get_ksettings_an_result,
10529         .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
10530         .get_media_type = hclge_get_media_type,
10531         .check_port_speed = hclge_check_port_speed,
10532         .get_fec = hclge_get_fec,
10533         .set_fec = hclge_set_fec,
10534         .get_rss_key_size = hclge_get_rss_key_size,
10535         .get_rss_indir_size = hclge_get_rss_indir_size,
10536         .get_rss = hclge_get_rss,
10537         .set_rss = hclge_set_rss,
10538         .set_rss_tuple = hclge_set_rss_tuple,
10539         .get_rss_tuple = hclge_get_rss_tuple,
10540         .get_tc_size = hclge_get_tc_size,
10541         .get_mac_addr = hclge_get_mac_addr,
10542         .set_mac_addr = hclge_set_mac_addr,
10543         .do_ioctl = hclge_do_ioctl,
10544         .add_uc_addr = hclge_add_uc_addr,
10545         .rm_uc_addr = hclge_rm_uc_addr,
10546         .add_mc_addr = hclge_add_mc_addr,
10547         .rm_mc_addr = hclge_rm_mc_addr,
10548         .set_autoneg = hclge_set_autoneg,
10549         .get_autoneg = hclge_get_autoneg,
10550         .restart_autoneg = hclge_restart_autoneg,
10551         .halt_autoneg = hclge_halt_autoneg,
10552         .get_pauseparam = hclge_get_pauseparam,
10553         .set_pauseparam = hclge_set_pauseparam,
10554         .set_mtu = hclge_set_mtu,
10555         .reset_queue = hclge_reset_tqp,
10556         .get_stats = hclge_get_stats,
10557         .get_mac_stats = hclge_get_mac_stat,
10558         .update_stats = hclge_update_stats,
10559         .get_strings = hclge_get_strings,
10560         .get_sset_count = hclge_get_sset_count,
10561         .get_fw_version = hclge_get_fw_version,
10562         .get_mdix_mode = hclge_get_mdix_mode,
10563         .enable_vlan_filter = hclge_enable_vlan_filter,
10564         .set_vlan_filter = hclge_set_vlan_filter,
10565         .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
10566         .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
10567         .reset_event = hclge_reset_event,
10568         .get_reset_level = hclge_get_reset_level,
10569         .set_default_reset_request = hclge_set_def_reset_request,
10570         .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
10571         .set_channels = hclge_set_channels,
10572         .get_channels = hclge_get_channels,
10573         .get_regs_len = hclge_get_regs_len,
10574         .get_regs = hclge_get_regs,
10575         .set_led_id = hclge_set_led_id,
10576         .get_link_mode = hclge_get_link_mode,
10577         .add_fd_entry = hclge_add_fd_entry,
10578         .del_fd_entry = hclge_del_fd_entry,
10579         .del_all_fd_entries = hclge_del_all_fd_entries,
10580         .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
10581         .get_fd_rule_info = hclge_get_fd_rule_info,
10582         .get_fd_all_rules = hclge_get_all_rules,
10583         .restore_fd_rules = hclge_restore_fd_entries,
10584         .enable_fd = hclge_enable_fd,
10585         .add_arfs_entry = hclge_add_fd_entry_by_arfs,
10586         .dbg_run_cmd = hclge_dbg_run_cmd,
10587         .handle_hw_ras_error = hclge_handle_hw_ras_error,
10588         .get_hw_reset_stat = hclge_get_hw_reset_stat,
10589         .ae_dev_resetting = hclge_ae_dev_resetting,
10590         .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
10591         .set_gro_en = hclge_gro_en,
10592         .get_global_queue_id = hclge_covert_handle_qid_global,
10593         .set_timer_task = hclge_set_timer_task,
10594         .mac_connect_phy = hclge_mac_connect_phy,
10595         .mac_disconnect_phy = hclge_mac_disconnect_phy,
10596         .restore_vlan_table = hclge_restore_vlan_table,
10597         .get_vf_config = hclge_get_vf_config,
10598         .set_vf_link_state = hclge_set_vf_link_state,
10599         .set_vf_spoofchk = hclge_set_vf_spoofchk,
10600         .set_vf_trust = hclge_set_vf_trust,
10601         .set_vf_rate = hclge_set_vf_rate,
10602         .set_vf_mac = hclge_set_vf_mac,
10603 };
10604
10605 static struct hnae3_ae_algo ae_algo = {
10606         .ops = &hclge_ops,
10607         .pdev_id_table = ae_algo_pci_tbl,
10608 };
10609
10610 static int hclge_init(void)
10611 {
10612         pr_info("%s is initializing\n", HCLGE_NAME);
10613
10614         hnae3_register_ae_algo(&ae_algo);
10615
10616         return 0;
10617 }
10618
10619 static void hclge_exit(void)
10620 {
10621         hnae3_unregister_ae_algo(&ae_algo);
10622 }
10623 module_init(hclge_init);
10624 module_exit(hclge_exit);
10625
10626 MODULE_LICENSE("GPL");
10627 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
10628 MODULE_DESCRIPTION("HCLGE Driver");
10629 MODULE_VERSION(HCLGE_MOD_VERSION);