OSDN Git Service

6deeb964ea6b057dbd908a6c563ddada90cb2a60
[tomoyo/tomoyo-test1.git] / drivers / net / ethernet / hisilicon / hns3 / hns3pf / hclge_main.c
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
22 #include "hclge_tm.h"
23 #include "hclge_err.h"
24 #include "hnae3.h"
25
26 #define HCLGE_NAME                      "hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
29
30 #define HCLGE_BUF_SIZE_UNIT     256U
31 #define HCLGE_BUF_MUL_BY        2
32 #define HCLGE_BUF_DIV_BY        2
33 #define NEED_RESERVE_TC_NUM     2
34 #define BUF_MAX_PERCENT         100
35 #define BUF_RESERVE_PERCENT     90
36
37 #define HCLGE_RESET_MAX_FAIL_CNT        5
38 #define HCLGE_RESET_SYNC_TIME           100
39 #define HCLGE_PF_RESET_SYNC_TIME        20
40 #define HCLGE_PF_RESET_SYNC_CNT         1500
41
42 /* Get DFX BD number offset */
43 #define HCLGE_DFX_BIOS_BD_OFFSET        1
44 #define HCLGE_DFX_SSU_0_BD_OFFSET       2
45 #define HCLGE_DFX_SSU_1_BD_OFFSET       3
46 #define HCLGE_DFX_IGU_BD_OFFSET         4
47 #define HCLGE_DFX_RPU_0_BD_OFFSET       5
48 #define HCLGE_DFX_RPU_1_BD_OFFSET       6
49 #define HCLGE_DFX_NCSI_BD_OFFSET        7
50 #define HCLGE_DFX_RTC_BD_OFFSET         8
51 #define HCLGE_DFX_PPP_BD_OFFSET         9
52 #define HCLGE_DFX_RCB_BD_OFFSET         10
53 #define HCLGE_DFX_TQP_BD_OFFSET         11
54 #define HCLGE_DFX_SSU_2_BD_OFFSET       12
55
56 #define HCLGE_LINK_STATUS_MS    10
57
58 #define HCLGE_VF_VPORT_START_NUM        1
59
60 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
61 static int hclge_init_vlan_config(struct hclge_dev *hdev);
62 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
63 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
64 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
65 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
66                                u16 *allocated_size, bool is_alloc);
67 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
68 static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
69 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
70                                                    unsigned long *addr);
71 static int hclge_set_default_loopback(struct hclge_dev *hdev);
72
73 static struct hnae3_ae_algo ae_algo;
74
75 static struct workqueue_struct *hclge_wq;
76
77 static const struct pci_device_id ae_algo_pci_tbl[] = {
78         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
79         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
80         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
81         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
82         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
83         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
84         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
85         /* required last entry */
86         {0, }
87 };
88
89 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
90
91 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
92                                          HCLGE_CMDQ_TX_ADDR_H_REG,
93                                          HCLGE_CMDQ_TX_DEPTH_REG,
94                                          HCLGE_CMDQ_TX_TAIL_REG,
95                                          HCLGE_CMDQ_TX_HEAD_REG,
96                                          HCLGE_CMDQ_RX_ADDR_L_REG,
97                                          HCLGE_CMDQ_RX_ADDR_H_REG,
98                                          HCLGE_CMDQ_RX_DEPTH_REG,
99                                          HCLGE_CMDQ_RX_TAIL_REG,
100                                          HCLGE_CMDQ_RX_HEAD_REG,
101                                          HCLGE_VECTOR0_CMDQ_SRC_REG,
102                                          HCLGE_CMDQ_INTR_STS_REG,
103                                          HCLGE_CMDQ_INTR_EN_REG,
104                                          HCLGE_CMDQ_INTR_GEN_REG};
105
106 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
107                                            HCLGE_VECTOR0_OTER_EN_REG,
108                                            HCLGE_MISC_RESET_STS_REG,
109                                            HCLGE_MISC_VECTOR_INT_STS,
110                                            HCLGE_GLOBAL_RESET_REG,
111                                            HCLGE_FUN_RST_ING,
112                                            HCLGE_GRO_EN_REG};
113
114 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
115                                          HCLGE_RING_RX_ADDR_H_REG,
116                                          HCLGE_RING_RX_BD_NUM_REG,
117                                          HCLGE_RING_RX_BD_LENGTH_REG,
118                                          HCLGE_RING_RX_MERGE_EN_REG,
119                                          HCLGE_RING_RX_TAIL_REG,
120                                          HCLGE_RING_RX_HEAD_REG,
121                                          HCLGE_RING_RX_FBD_NUM_REG,
122                                          HCLGE_RING_RX_OFFSET_REG,
123                                          HCLGE_RING_RX_FBD_OFFSET_REG,
124                                          HCLGE_RING_RX_STASH_REG,
125                                          HCLGE_RING_RX_BD_ERR_REG,
126                                          HCLGE_RING_TX_ADDR_L_REG,
127                                          HCLGE_RING_TX_ADDR_H_REG,
128                                          HCLGE_RING_TX_BD_NUM_REG,
129                                          HCLGE_RING_TX_PRIORITY_REG,
130                                          HCLGE_RING_TX_TC_REG,
131                                          HCLGE_RING_TX_MERGE_EN_REG,
132                                          HCLGE_RING_TX_TAIL_REG,
133                                          HCLGE_RING_TX_HEAD_REG,
134                                          HCLGE_RING_TX_FBD_NUM_REG,
135                                          HCLGE_RING_TX_OFFSET_REG,
136                                          HCLGE_RING_TX_EBD_NUM_REG,
137                                          HCLGE_RING_TX_EBD_OFFSET_REG,
138                                          HCLGE_RING_TX_BD_ERR_REG,
139                                          HCLGE_RING_EN_REG};
140
141 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
142                                              HCLGE_TQP_INTR_GL0_REG,
143                                              HCLGE_TQP_INTR_GL1_REG,
144                                              HCLGE_TQP_INTR_GL2_REG,
145                                              HCLGE_TQP_INTR_RL_REG};
146
147 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
148         "App    Loopback test",
149         "Serdes serial Loopback test",
150         "Serdes parallel Loopback test",
151         "Phy    Loopback test"
152 };
153
154 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
155         {"mac_tx_mac_pause_num",
156                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
157         {"mac_rx_mac_pause_num",
158                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
159         {"mac_tx_control_pkt_num",
160                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
161         {"mac_rx_control_pkt_num",
162                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
163         {"mac_tx_pfc_pkt_num",
164                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
165         {"mac_tx_pfc_pri0_pkt_num",
166                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
167         {"mac_tx_pfc_pri1_pkt_num",
168                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
169         {"mac_tx_pfc_pri2_pkt_num",
170                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
171         {"mac_tx_pfc_pri3_pkt_num",
172                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
173         {"mac_tx_pfc_pri4_pkt_num",
174                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
175         {"mac_tx_pfc_pri5_pkt_num",
176                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
177         {"mac_tx_pfc_pri6_pkt_num",
178                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
179         {"mac_tx_pfc_pri7_pkt_num",
180                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
181         {"mac_rx_pfc_pkt_num",
182                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
183         {"mac_rx_pfc_pri0_pkt_num",
184                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
185         {"mac_rx_pfc_pri1_pkt_num",
186                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
187         {"mac_rx_pfc_pri2_pkt_num",
188                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
189         {"mac_rx_pfc_pri3_pkt_num",
190                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
191         {"mac_rx_pfc_pri4_pkt_num",
192                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
193         {"mac_rx_pfc_pri5_pkt_num",
194                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
195         {"mac_rx_pfc_pri6_pkt_num",
196                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
197         {"mac_rx_pfc_pri7_pkt_num",
198                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
199         {"mac_tx_total_pkt_num",
200                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
201         {"mac_tx_total_oct_num",
202                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
203         {"mac_tx_good_pkt_num",
204                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
205         {"mac_tx_bad_pkt_num",
206                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
207         {"mac_tx_good_oct_num",
208                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
209         {"mac_tx_bad_oct_num",
210                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
211         {"mac_tx_uni_pkt_num",
212                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
213         {"mac_tx_multi_pkt_num",
214                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
215         {"mac_tx_broad_pkt_num",
216                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
217         {"mac_tx_undersize_pkt_num",
218                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
219         {"mac_tx_oversize_pkt_num",
220                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
221         {"mac_tx_64_oct_pkt_num",
222                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
223         {"mac_tx_65_127_oct_pkt_num",
224                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
225         {"mac_tx_128_255_oct_pkt_num",
226                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
227         {"mac_tx_256_511_oct_pkt_num",
228                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
229         {"mac_tx_512_1023_oct_pkt_num",
230                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
231         {"mac_tx_1024_1518_oct_pkt_num",
232                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
233         {"mac_tx_1519_2047_oct_pkt_num",
234                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
235         {"mac_tx_2048_4095_oct_pkt_num",
236                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
237         {"mac_tx_4096_8191_oct_pkt_num",
238                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
239         {"mac_tx_8192_9216_oct_pkt_num",
240                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
241         {"mac_tx_9217_12287_oct_pkt_num",
242                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
243         {"mac_tx_12288_16383_oct_pkt_num",
244                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
245         {"mac_tx_1519_max_good_pkt_num",
246                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
247         {"mac_tx_1519_max_bad_pkt_num",
248                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
249         {"mac_rx_total_pkt_num",
250                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
251         {"mac_rx_total_oct_num",
252                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
253         {"mac_rx_good_pkt_num",
254                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
255         {"mac_rx_bad_pkt_num",
256                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
257         {"mac_rx_good_oct_num",
258                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
259         {"mac_rx_bad_oct_num",
260                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
261         {"mac_rx_uni_pkt_num",
262                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
263         {"mac_rx_multi_pkt_num",
264                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
265         {"mac_rx_broad_pkt_num",
266                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
267         {"mac_rx_undersize_pkt_num",
268                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
269         {"mac_rx_oversize_pkt_num",
270                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
271         {"mac_rx_64_oct_pkt_num",
272                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
273         {"mac_rx_65_127_oct_pkt_num",
274                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
275         {"mac_rx_128_255_oct_pkt_num",
276                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
277         {"mac_rx_256_511_oct_pkt_num",
278                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
279         {"mac_rx_512_1023_oct_pkt_num",
280                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
281         {"mac_rx_1024_1518_oct_pkt_num",
282                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
283         {"mac_rx_1519_2047_oct_pkt_num",
284                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
285         {"mac_rx_2048_4095_oct_pkt_num",
286                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
287         {"mac_rx_4096_8191_oct_pkt_num",
288                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
289         {"mac_rx_8192_9216_oct_pkt_num",
290                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
291         {"mac_rx_9217_12287_oct_pkt_num",
292                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
293         {"mac_rx_12288_16383_oct_pkt_num",
294                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
295         {"mac_rx_1519_max_good_pkt_num",
296                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
297         {"mac_rx_1519_max_bad_pkt_num",
298                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
299
300         {"mac_tx_fragment_pkt_num",
301                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
302         {"mac_tx_undermin_pkt_num",
303                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
304         {"mac_tx_jabber_pkt_num",
305                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
306         {"mac_tx_err_all_pkt_num",
307                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
308         {"mac_tx_from_app_good_pkt_num",
309                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
310         {"mac_tx_from_app_bad_pkt_num",
311                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
312         {"mac_rx_fragment_pkt_num",
313                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
314         {"mac_rx_undermin_pkt_num",
315                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
316         {"mac_rx_jabber_pkt_num",
317                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
318         {"mac_rx_fcs_err_pkt_num",
319                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
320         {"mac_rx_send_app_good_pkt_num",
321                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
322         {"mac_rx_send_app_bad_pkt_num",
323                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
324 };
325
326 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
327         {
328                 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
329                 .ethter_type = cpu_to_le16(ETH_P_LLDP),
330                 .mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
331                 .i_port_bitmap = 0x1,
332         },
333 };
334
335 static const u8 hclge_hash_key[] = {
336         0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
337         0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
338         0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
339         0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
340         0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
341 };
342
343 static const u32 hclge_dfx_bd_offset_list[] = {
344         HCLGE_DFX_BIOS_BD_OFFSET,
345         HCLGE_DFX_SSU_0_BD_OFFSET,
346         HCLGE_DFX_SSU_1_BD_OFFSET,
347         HCLGE_DFX_IGU_BD_OFFSET,
348         HCLGE_DFX_RPU_0_BD_OFFSET,
349         HCLGE_DFX_RPU_1_BD_OFFSET,
350         HCLGE_DFX_NCSI_BD_OFFSET,
351         HCLGE_DFX_RTC_BD_OFFSET,
352         HCLGE_DFX_PPP_BD_OFFSET,
353         HCLGE_DFX_RCB_BD_OFFSET,
354         HCLGE_DFX_TQP_BD_OFFSET,
355         HCLGE_DFX_SSU_2_BD_OFFSET
356 };
357
358 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
359         HCLGE_OPC_DFX_BIOS_COMMON_REG,
360         HCLGE_OPC_DFX_SSU_REG_0,
361         HCLGE_OPC_DFX_SSU_REG_1,
362         HCLGE_OPC_DFX_IGU_EGU_REG,
363         HCLGE_OPC_DFX_RPU_REG_0,
364         HCLGE_OPC_DFX_RPU_REG_1,
365         HCLGE_OPC_DFX_NCSI_REG,
366         HCLGE_OPC_DFX_RTC_REG,
367         HCLGE_OPC_DFX_PPP_REG,
368         HCLGE_OPC_DFX_RCB_REG,
369         HCLGE_OPC_DFX_TQP_REG,
370         HCLGE_OPC_DFX_SSU_REG_2
371 };
372
373 static const struct key_info meta_data_key_info[] = {
374         { PACKET_TYPE_ID, 6},
375         { IP_FRAGEMENT, 1},
376         { ROCE_TYPE, 1},
377         { NEXT_KEY, 5},
378         { VLAN_NUMBER, 2},
379         { SRC_VPORT, 12},
380         { DST_VPORT, 12},
381         { TUNNEL_PACKET, 1},
382 };
383
384 static const struct key_info tuple_key_info[] = {
385         { OUTER_DST_MAC, 48},
386         { OUTER_SRC_MAC, 48},
387         { OUTER_VLAN_TAG_FST, 16},
388         { OUTER_VLAN_TAG_SEC, 16},
389         { OUTER_ETH_TYPE, 16},
390         { OUTER_L2_RSV, 16},
391         { OUTER_IP_TOS, 8},
392         { OUTER_IP_PROTO, 8},
393         { OUTER_SRC_IP, 32},
394         { OUTER_DST_IP, 32},
395         { OUTER_L3_RSV, 16},
396         { OUTER_SRC_PORT, 16},
397         { OUTER_DST_PORT, 16},
398         { OUTER_L4_RSV, 32},
399         { OUTER_TUN_VNI, 24},
400         { OUTER_TUN_FLOW_ID, 8},
401         { INNER_DST_MAC, 48},
402         { INNER_SRC_MAC, 48},
403         { INNER_VLAN_TAG_FST, 16},
404         { INNER_VLAN_TAG_SEC, 16},
405         { INNER_ETH_TYPE, 16},
406         { INNER_L2_RSV, 16},
407         { INNER_IP_TOS, 8},
408         { INNER_IP_PROTO, 8},
409         { INNER_SRC_IP, 32},
410         { INNER_DST_IP, 32},
411         { INNER_L3_RSV, 16},
412         { INNER_SRC_PORT, 16},
413         { INNER_DST_PORT, 16},
414         { INNER_L4_RSV, 32},
415 };
416
417 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
418 {
419 #define HCLGE_MAC_CMD_NUM 21
420
421         u64 *data = (u64 *)(&hdev->mac_stats);
422         struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
423         __le64 *desc_data;
424         int i, k, n;
425         int ret;
426
427         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
428         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
429         if (ret) {
430                 dev_err(&hdev->pdev->dev,
431                         "Get MAC pkt stats fail, status = %d.\n", ret);
432
433                 return ret;
434         }
435
436         for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
437                 /* for special opcode 0032, only the first desc has the head */
438                 if (unlikely(i == 0)) {
439                         desc_data = (__le64 *)(&desc[i].data[0]);
440                         n = HCLGE_RD_FIRST_STATS_NUM;
441                 } else {
442                         desc_data = (__le64 *)(&desc[i]);
443                         n = HCLGE_RD_OTHER_STATS_NUM;
444                 }
445
446                 for (k = 0; k < n; k++) {
447                         *data += le64_to_cpu(*desc_data);
448                         data++;
449                         desc_data++;
450                 }
451         }
452
453         return 0;
454 }
455
456 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
457 {
458         u64 *data = (u64 *)(&hdev->mac_stats);
459         struct hclge_desc *desc;
460         __le64 *desc_data;
461         u16 i, k, n;
462         int ret;
463
464         /* This may be called inside atomic sections,
465          * so GFP_ATOMIC is more suitalbe here
466          */
467         desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
468         if (!desc)
469                 return -ENOMEM;
470
471         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
472         ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
473         if (ret) {
474                 kfree(desc);
475                 return ret;
476         }
477
478         for (i = 0; i < desc_num; i++) {
479                 /* for special opcode 0034, only the first desc has the head */
480                 if (i == 0) {
481                         desc_data = (__le64 *)(&desc[i].data[0]);
482                         n = HCLGE_RD_FIRST_STATS_NUM;
483                 } else {
484                         desc_data = (__le64 *)(&desc[i]);
485                         n = HCLGE_RD_OTHER_STATS_NUM;
486                 }
487
488                 for (k = 0; k < n; k++) {
489                         *data += le64_to_cpu(*desc_data);
490                         data++;
491                         desc_data++;
492                 }
493         }
494
495         kfree(desc);
496
497         return 0;
498 }
499
500 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
501 {
502         struct hclge_desc desc;
503         __le32 *desc_data;
504         u32 reg_num;
505         int ret;
506
507         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
508         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
509         if (ret)
510                 return ret;
511
512         desc_data = (__le32 *)(&desc.data[0]);
513         reg_num = le32_to_cpu(*desc_data);
514
515         *desc_num = 1 + ((reg_num - 3) >> 2) +
516                     (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
517
518         return 0;
519 }
520
521 static int hclge_mac_update_stats(struct hclge_dev *hdev)
522 {
523         u32 desc_num;
524         int ret;
525
526         ret = hclge_mac_query_reg_num(hdev, &desc_num);
527
528         /* The firmware supports the new statistics acquisition method */
529         if (!ret)
530                 ret = hclge_mac_update_stats_complete(hdev, desc_num);
531         else if (ret == -EOPNOTSUPP)
532                 ret = hclge_mac_update_stats_defective(hdev);
533         else
534                 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
535
536         return ret;
537 }
538
539 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
540 {
541         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
542         struct hclge_vport *vport = hclge_get_vport(handle);
543         struct hclge_dev *hdev = vport->back;
544         struct hnae3_queue *queue;
545         struct hclge_desc desc[1];
546         struct hclge_tqp *tqp;
547         int ret, i;
548
549         for (i = 0; i < kinfo->num_tqps; i++) {
550                 queue = handle->kinfo.tqp[i];
551                 tqp = container_of(queue, struct hclge_tqp, q);
552                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
553                 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATUS,
554                                            true);
555
556                 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
557                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
558                 if (ret) {
559                         dev_err(&hdev->pdev->dev,
560                                 "Query tqp stat fail, status = %d,queue = %d\n",
561                                 ret, i);
562                         return ret;
563                 }
564                 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
565                         le32_to_cpu(desc[0].data[1]);
566         }
567
568         for (i = 0; i < kinfo->num_tqps; i++) {
569                 queue = handle->kinfo.tqp[i];
570                 tqp = container_of(queue, struct hclge_tqp, q);
571                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
572                 hclge_cmd_setup_basic_desc(&desc[0],
573                                            HCLGE_OPC_QUERY_TX_STATUS,
574                                            true);
575
576                 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
577                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
578                 if (ret) {
579                         dev_err(&hdev->pdev->dev,
580                                 "Query tqp stat fail, status = %d,queue = %d\n",
581                                 ret, i);
582                         return ret;
583                 }
584                 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
585                         le32_to_cpu(desc[0].data[1]);
586         }
587
588         return 0;
589 }
590
591 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
592 {
593         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
594         struct hclge_tqp *tqp;
595         u64 *buff = data;
596         int i;
597
598         for (i = 0; i < kinfo->num_tqps; i++) {
599                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
600                 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
601         }
602
603         for (i = 0; i < kinfo->num_tqps; i++) {
604                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
605                 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
606         }
607
608         return buff;
609 }
610
611 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
612 {
613         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
614
615         /* each tqp has TX & RX two queues */
616         return kinfo->num_tqps * (2);
617 }
618
619 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
620 {
621         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
622         u8 *buff = data;
623         int i = 0;
624
625         for (i = 0; i < kinfo->num_tqps; i++) {
626                 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
627                         struct hclge_tqp, q);
628                 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
629                          tqp->index);
630                 buff = buff + ETH_GSTRING_LEN;
631         }
632
633         for (i = 0; i < kinfo->num_tqps; i++) {
634                 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
635                         struct hclge_tqp, q);
636                 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
637                          tqp->index);
638                 buff = buff + ETH_GSTRING_LEN;
639         }
640
641         return buff;
642 }
643
644 static u64 *hclge_comm_get_stats(const void *comm_stats,
645                                  const struct hclge_comm_stats_str strs[],
646                                  int size, u64 *data)
647 {
648         u64 *buf = data;
649         u32 i;
650
651         for (i = 0; i < size; i++)
652                 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
653
654         return buf + size;
655 }
656
657 static u8 *hclge_comm_get_strings(u32 stringset,
658                                   const struct hclge_comm_stats_str strs[],
659                                   int size, u8 *data)
660 {
661         char *buff = (char *)data;
662         u32 i;
663
664         if (stringset != ETH_SS_STATS)
665                 return buff;
666
667         for (i = 0; i < size; i++) {
668                 snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
669                 buff = buff + ETH_GSTRING_LEN;
670         }
671
672         return (u8 *)buff;
673 }
674
675 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
676 {
677         struct hnae3_handle *handle;
678         int status;
679
680         handle = &hdev->vport[0].nic;
681         if (handle->client) {
682                 status = hclge_tqps_update_stats(handle);
683                 if (status) {
684                         dev_err(&hdev->pdev->dev,
685                                 "Update TQPS stats fail, status = %d.\n",
686                                 status);
687                 }
688         }
689
690         status = hclge_mac_update_stats(hdev);
691         if (status)
692                 dev_err(&hdev->pdev->dev,
693                         "Update MAC stats fail, status = %d.\n", status);
694 }
695
696 static void hclge_update_stats(struct hnae3_handle *handle,
697                                struct net_device_stats *net_stats)
698 {
699         struct hclge_vport *vport = hclge_get_vport(handle);
700         struct hclge_dev *hdev = vport->back;
701         int status;
702
703         if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
704                 return;
705
706         status = hclge_mac_update_stats(hdev);
707         if (status)
708                 dev_err(&hdev->pdev->dev,
709                         "Update MAC stats fail, status = %d.\n",
710                         status);
711
712         status = hclge_tqps_update_stats(handle);
713         if (status)
714                 dev_err(&hdev->pdev->dev,
715                         "Update TQPS stats fail, status = %d.\n",
716                         status);
717
718         clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
719 }
720
721 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
722 {
723 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
724                 HNAE3_SUPPORT_PHY_LOOPBACK |\
725                 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
726                 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
727
728         struct hclge_vport *vport = hclge_get_vport(handle);
729         struct hclge_dev *hdev = vport->back;
730         int count = 0;
731
732         /* Loopback test support rules:
733          * mac: only GE mode support
734          * serdes: all mac mode will support include GE/XGE/LGE/CGE
735          * phy: only support when phy device exist on board
736          */
737         if (stringset == ETH_SS_TEST) {
738                 /* clear loopback bit flags at first */
739                 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
740                 if (hdev->pdev->revision >= 0x21 ||
741                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
742                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
743                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
744                         count += 1;
745                         handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
746                 }
747
748                 count += 2;
749                 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
750                 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
751
752                 if (hdev->hw.mac.phydev) {
753                         count += 1;
754                         handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
755                 }
756
757         } else if (stringset == ETH_SS_STATS) {
758                 count = ARRAY_SIZE(g_mac_stats_string) +
759                         hclge_tqps_get_sset_count(handle, stringset);
760         }
761
762         return count;
763 }
764
765 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
766                               u8 *data)
767 {
768         u8 *p = (char *)data;
769         int size;
770
771         if (stringset == ETH_SS_STATS) {
772                 size = ARRAY_SIZE(g_mac_stats_string);
773                 p = hclge_comm_get_strings(stringset, g_mac_stats_string,
774                                            size, p);
775                 p = hclge_tqps_get_strings(handle, p);
776         } else if (stringset == ETH_SS_TEST) {
777                 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
778                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
779                                ETH_GSTRING_LEN);
780                         p += ETH_GSTRING_LEN;
781                 }
782                 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
783                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
784                                ETH_GSTRING_LEN);
785                         p += ETH_GSTRING_LEN;
786                 }
787                 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
788                         memcpy(p,
789                                hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
790                                ETH_GSTRING_LEN);
791                         p += ETH_GSTRING_LEN;
792                 }
793                 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
794                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
795                                ETH_GSTRING_LEN);
796                         p += ETH_GSTRING_LEN;
797                 }
798         }
799 }
800
801 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
802 {
803         struct hclge_vport *vport = hclge_get_vport(handle);
804         struct hclge_dev *hdev = vport->back;
805         u64 *p;
806
807         p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
808                                  ARRAY_SIZE(g_mac_stats_string), data);
809         p = hclge_tqps_get_stats(handle, p);
810 }
811
812 static void hclge_get_mac_stat(struct hnae3_handle *handle,
813                                struct hns3_mac_stats *mac_stats)
814 {
815         struct hclge_vport *vport = hclge_get_vport(handle);
816         struct hclge_dev *hdev = vport->back;
817
818         hclge_update_stats(handle, NULL);
819
820         mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
821         mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
822 }
823
824 static int hclge_parse_func_status(struct hclge_dev *hdev,
825                                    struct hclge_func_status_cmd *status)
826 {
827         if (!(status->pf_state & HCLGE_PF_STATE_DONE))
828                 return -EINVAL;
829
830         /* Set the pf to main pf */
831         if (status->pf_state & HCLGE_PF_STATE_MAIN)
832                 hdev->flag |= HCLGE_FLAG_MAIN;
833         else
834                 hdev->flag &= ~HCLGE_FLAG_MAIN;
835
836         return 0;
837 }
838
839 static int hclge_query_function_status(struct hclge_dev *hdev)
840 {
841 #define HCLGE_QUERY_MAX_CNT     5
842
843         struct hclge_func_status_cmd *req;
844         struct hclge_desc desc;
845         int timeout = 0;
846         int ret;
847
848         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
849         req = (struct hclge_func_status_cmd *)desc.data;
850
851         do {
852                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
853                 if (ret) {
854                         dev_err(&hdev->pdev->dev,
855                                 "query function status failed %d.\n", ret);
856                         return ret;
857                 }
858
859                 /* Check pf reset is done */
860                 if (req->pf_state)
861                         break;
862                 usleep_range(1000, 2000);
863         } while (timeout++ < HCLGE_QUERY_MAX_CNT);
864
865         return hclge_parse_func_status(hdev, req);
866 }
867
868 static int hclge_query_pf_resource(struct hclge_dev *hdev)
869 {
870         struct hclge_pf_res_cmd *req;
871         struct hclge_desc desc;
872         int ret;
873
874         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
875         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
876         if (ret) {
877                 dev_err(&hdev->pdev->dev,
878                         "query pf resource failed %d.\n", ret);
879                 return ret;
880         }
881
882         req = (struct hclge_pf_res_cmd *)desc.data;
883         hdev->num_tqps = le16_to_cpu(req->tqp_num);
884         hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
885
886         if (req->tx_buf_size)
887                 hdev->tx_buf_size =
888                         le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
889         else
890                 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
891
892         hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
893
894         if (req->dv_buf_size)
895                 hdev->dv_buf_size =
896                         le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
897         else
898                 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
899
900         hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
901
902         if (hnae3_dev_roce_supported(hdev)) {
903                 hdev->roce_base_msix_offset =
904                 hnae3_get_field(le16_to_cpu(req->msixcap_localid_ba_rocee),
905                                 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
906                 hdev->num_roce_msi =
907                 hnae3_get_field(le16_to_cpu(req->pf_intr_vector_number),
908                                 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
909
910                 /* nic's msix numbers is always equals to the roce's. */
911                 hdev->num_nic_msi = hdev->num_roce_msi;
912
913                 /* PF should have NIC vectors and Roce vectors,
914                  * NIC vectors are queued before Roce vectors.
915                  */
916                 hdev->num_msi = hdev->num_roce_msi +
917                                 hdev->roce_base_msix_offset;
918         } else {
919                 hdev->num_msi =
920                 hnae3_get_field(le16_to_cpu(req->pf_intr_vector_number),
921                                 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
922
923                 hdev->num_nic_msi = hdev->num_msi;
924         }
925
926         if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
927                 dev_err(&hdev->pdev->dev,
928                         "Just %u msi resources, not enough for pf(min:2).\n",
929                         hdev->num_nic_msi);
930                 return -EINVAL;
931         }
932
933         return 0;
934 }
935
936 static int hclge_parse_speed(int speed_cmd, int *speed)
937 {
938         switch (speed_cmd) {
939         case 6:
940                 *speed = HCLGE_MAC_SPEED_10M;
941                 break;
942         case 7:
943                 *speed = HCLGE_MAC_SPEED_100M;
944                 break;
945         case 0:
946                 *speed = HCLGE_MAC_SPEED_1G;
947                 break;
948         case 1:
949                 *speed = HCLGE_MAC_SPEED_10G;
950                 break;
951         case 2:
952                 *speed = HCLGE_MAC_SPEED_25G;
953                 break;
954         case 3:
955                 *speed = HCLGE_MAC_SPEED_40G;
956                 break;
957         case 4:
958                 *speed = HCLGE_MAC_SPEED_50G;
959                 break;
960         case 5:
961                 *speed = HCLGE_MAC_SPEED_100G;
962                 break;
963         default:
964                 return -EINVAL;
965         }
966
967         return 0;
968 }
969
970 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
971 {
972         struct hclge_vport *vport = hclge_get_vport(handle);
973         struct hclge_dev *hdev = vport->back;
974         u32 speed_ability = hdev->hw.mac.speed_ability;
975         u32 speed_bit = 0;
976
977         switch (speed) {
978         case HCLGE_MAC_SPEED_10M:
979                 speed_bit = HCLGE_SUPPORT_10M_BIT;
980                 break;
981         case HCLGE_MAC_SPEED_100M:
982                 speed_bit = HCLGE_SUPPORT_100M_BIT;
983                 break;
984         case HCLGE_MAC_SPEED_1G:
985                 speed_bit = HCLGE_SUPPORT_1G_BIT;
986                 break;
987         case HCLGE_MAC_SPEED_10G:
988                 speed_bit = HCLGE_SUPPORT_10G_BIT;
989                 break;
990         case HCLGE_MAC_SPEED_25G:
991                 speed_bit = HCLGE_SUPPORT_25G_BIT;
992                 break;
993         case HCLGE_MAC_SPEED_40G:
994                 speed_bit = HCLGE_SUPPORT_40G_BIT;
995                 break;
996         case HCLGE_MAC_SPEED_50G:
997                 speed_bit = HCLGE_SUPPORT_50G_BIT;
998                 break;
999         case HCLGE_MAC_SPEED_100G:
1000                 speed_bit = HCLGE_SUPPORT_100G_BIT;
1001                 break;
1002         default:
1003                 return -EINVAL;
1004         }
1005
1006         if (speed_bit & speed_ability)
1007                 return 0;
1008
1009         return -EINVAL;
1010 }
1011
1012 static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability)
1013 {
1014         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1015                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1016                                  mac->supported);
1017         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1018                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1019                                  mac->supported);
1020         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1021                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1022                                  mac->supported);
1023         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1024                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1025                                  mac->supported);
1026         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1027                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1028                                  mac->supported);
1029 }
1030
1031 static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability)
1032 {
1033         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1034                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1035                                  mac->supported);
1036         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1037                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1038                                  mac->supported);
1039         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1040                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1041                                  mac->supported);
1042         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1043                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1044                                  mac->supported);
1045         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1046                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1047                                  mac->supported);
1048 }
1049
1050 static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability)
1051 {
1052         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1053                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1054                                  mac->supported);
1055         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1056                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1057                                  mac->supported);
1058         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1059                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1060                                  mac->supported);
1061         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1062                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1063                                  mac->supported);
1064         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1065                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1066                                  mac->supported);
1067 }
1068
1069 static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability)
1070 {
1071         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1072                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1073                                  mac->supported);
1074         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1075                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1076                                  mac->supported);
1077         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1078                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1079                                  mac->supported);
1080         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1081                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1082                                  mac->supported);
1083         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1084                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1085                                  mac->supported);
1086         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1087                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1088                                  mac->supported);
1089 }
1090
1091 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1092 {
1093         linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1094         linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1095
1096         switch (mac->speed) {
1097         case HCLGE_MAC_SPEED_10G:
1098         case HCLGE_MAC_SPEED_40G:
1099                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1100                                  mac->supported);
1101                 mac->fec_ability =
1102                         BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1103                 break;
1104         case HCLGE_MAC_SPEED_25G:
1105         case HCLGE_MAC_SPEED_50G:
1106                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1107                                  mac->supported);
1108                 mac->fec_ability =
1109                         BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1110                         BIT(HNAE3_FEC_AUTO);
1111                 break;
1112         case HCLGE_MAC_SPEED_100G:
1113                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1114                 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1115                 break;
1116         default:
1117                 mac->fec_ability = 0;
1118                 break;
1119         }
1120 }
1121
1122 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1123                                         u8 speed_ability)
1124 {
1125         struct hclge_mac *mac = &hdev->hw.mac;
1126
1127         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1128                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1129                                  mac->supported);
1130
1131         hclge_convert_setting_sr(mac, speed_ability);
1132         hclge_convert_setting_lr(mac, speed_ability);
1133         hclge_convert_setting_cr(mac, speed_ability);
1134         if (hdev->pdev->revision >= 0x21)
1135                 hclge_convert_setting_fec(mac);
1136
1137         linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1138         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1139         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1140 }
1141
1142 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1143                                             u8 speed_ability)
1144 {
1145         struct hclge_mac *mac = &hdev->hw.mac;
1146
1147         hclge_convert_setting_kr(mac, speed_ability);
1148         if (hdev->pdev->revision >= 0x21)
1149                 hclge_convert_setting_fec(mac);
1150         linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1151         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1152         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1153 }
1154
1155 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1156                                          u8 speed_ability)
1157 {
1158         unsigned long *supported = hdev->hw.mac.supported;
1159
1160         /* default to support all speed for GE port */
1161         if (!speed_ability)
1162                 speed_ability = HCLGE_SUPPORT_GE;
1163
1164         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1165                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1166                                  supported);
1167
1168         if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1169                 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1170                                  supported);
1171                 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1172                                  supported);
1173         }
1174
1175         if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1176                 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1177                 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1178         }
1179
1180         linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1181         linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1182         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1183         linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1184 }
1185
1186 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
1187 {
1188         u8 media_type = hdev->hw.mac.media_type;
1189
1190         if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1191                 hclge_parse_fiber_link_mode(hdev, speed_ability);
1192         else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1193                 hclge_parse_copper_link_mode(hdev, speed_ability);
1194         else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1195                 hclge_parse_backplane_link_mode(hdev, speed_ability);
1196 }
1197
1198 static u32 hclge_get_max_speed(u8 speed_ability)
1199 {
1200         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1201                 return HCLGE_MAC_SPEED_100G;
1202
1203         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1204                 return HCLGE_MAC_SPEED_50G;
1205
1206         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1207                 return HCLGE_MAC_SPEED_40G;
1208
1209         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1210                 return HCLGE_MAC_SPEED_25G;
1211
1212         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1213                 return HCLGE_MAC_SPEED_10G;
1214
1215         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1216                 return HCLGE_MAC_SPEED_1G;
1217
1218         if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1219                 return HCLGE_MAC_SPEED_100M;
1220
1221         if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1222                 return HCLGE_MAC_SPEED_10M;
1223
1224         return HCLGE_MAC_SPEED_1G;
1225 }
1226
1227 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1228 {
1229         struct hclge_cfg_param_cmd *req;
1230         u64 mac_addr_tmp_high;
1231         u64 mac_addr_tmp;
1232         unsigned int i;
1233
1234         req = (struct hclge_cfg_param_cmd *)desc[0].data;
1235
1236         /* get the configuration */
1237         cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1238                                               HCLGE_CFG_VMDQ_M,
1239                                               HCLGE_CFG_VMDQ_S);
1240         cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1241                                       HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1242         cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1243                                             HCLGE_CFG_TQP_DESC_N_M,
1244                                             HCLGE_CFG_TQP_DESC_N_S);
1245
1246         cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1247                                         HCLGE_CFG_PHY_ADDR_M,
1248                                         HCLGE_CFG_PHY_ADDR_S);
1249         cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1250                                           HCLGE_CFG_MEDIA_TP_M,
1251                                           HCLGE_CFG_MEDIA_TP_S);
1252         cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1253                                           HCLGE_CFG_RX_BUF_LEN_M,
1254                                           HCLGE_CFG_RX_BUF_LEN_S);
1255         /* get mac_address */
1256         mac_addr_tmp = __le32_to_cpu(req->param[2]);
1257         mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1258                                             HCLGE_CFG_MAC_ADDR_H_M,
1259                                             HCLGE_CFG_MAC_ADDR_H_S);
1260
1261         mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1262
1263         cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1264                                              HCLGE_CFG_DEFAULT_SPEED_M,
1265                                              HCLGE_CFG_DEFAULT_SPEED_S);
1266         cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1267                                             HCLGE_CFG_RSS_SIZE_M,
1268                                             HCLGE_CFG_RSS_SIZE_S);
1269
1270         for (i = 0; i < ETH_ALEN; i++)
1271                 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1272
1273         req = (struct hclge_cfg_param_cmd *)desc[1].data;
1274         cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1275
1276         cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1277                                              HCLGE_CFG_SPEED_ABILITY_M,
1278                                              HCLGE_CFG_SPEED_ABILITY_S);
1279         cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1280                                          HCLGE_CFG_UMV_TBL_SPACE_M,
1281                                          HCLGE_CFG_UMV_TBL_SPACE_S);
1282         if (!cfg->umv_space)
1283                 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1284 }
1285
1286 /* hclge_get_cfg: query the static parameter from flash
1287  * @hdev: pointer to struct hclge_dev
1288  * @hcfg: the config structure to be getted
1289  */
1290 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1291 {
1292         struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1293         struct hclge_cfg_param_cmd *req;
1294         unsigned int i;
1295         int ret;
1296
1297         for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1298                 u32 offset = 0;
1299
1300                 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1301                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1302                                            true);
1303                 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1304                                 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1305                 /* Len should be united by 4 bytes when send to hardware */
1306                 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1307                                 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1308                 req->offset = cpu_to_le32(offset);
1309         }
1310
1311         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1312         if (ret) {
1313                 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1314                 return ret;
1315         }
1316
1317         hclge_parse_cfg(hcfg, desc);
1318
1319         return 0;
1320 }
1321
1322 static int hclge_get_cap(struct hclge_dev *hdev)
1323 {
1324         int ret;
1325
1326         ret = hclge_query_function_status(hdev);
1327         if (ret) {
1328                 dev_err(&hdev->pdev->dev,
1329                         "query function status error %d.\n", ret);
1330                 return ret;
1331         }
1332
1333         /* get pf resource */
1334         return hclge_query_pf_resource(hdev);
1335 }
1336
1337 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1338 {
1339 #define HCLGE_MIN_TX_DESC       64
1340 #define HCLGE_MIN_RX_DESC       64
1341
1342         if (!is_kdump_kernel())
1343                 return;
1344
1345         dev_info(&hdev->pdev->dev,
1346                  "Running kdump kernel. Using minimal resources\n");
1347
1348         /* minimal queue pairs equals to the number of vports */
1349         hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1350         hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1351         hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1352 }
1353
1354 static int hclge_configure(struct hclge_dev *hdev)
1355 {
1356         struct hclge_cfg cfg;
1357         unsigned int i;
1358         int ret;
1359
1360         ret = hclge_get_cfg(hdev, &cfg);
1361         if (ret) {
1362                 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1363                 return ret;
1364         }
1365
1366         hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1367         hdev->base_tqp_pid = 0;
1368         hdev->rss_size_max = cfg.rss_size_max;
1369         hdev->rx_buf_len = cfg.rx_buf_len;
1370         ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1371         hdev->hw.mac.media_type = cfg.media_type;
1372         hdev->hw.mac.phy_addr = cfg.phy_addr;
1373         hdev->num_tx_desc = cfg.tqp_desc_num;
1374         hdev->num_rx_desc = cfg.tqp_desc_num;
1375         hdev->tm_info.num_pg = 1;
1376         hdev->tc_max = cfg.tc_num;
1377         hdev->tm_info.hw_pfc_map = 0;
1378         hdev->wanted_umv_size = cfg.umv_space;
1379
1380         if (hnae3_dev_fd_supported(hdev)) {
1381                 hdev->fd_en = true;
1382                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1383         }
1384
1385         ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1386         if (ret) {
1387                 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1388                 return ret;
1389         }
1390
1391         hclge_parse_link_mode(hdev, cfg.speed_ability);
1392
1393         hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1394
1395         if ((hdev->tc_max > HNAE3_MAX_TC) ||
1396             (hdev->tc_max < 1)) {
1397                 dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1398                          hdev->tc_max);
1399                 hdev->tc_max = 1;
1400         }
1401
1402         /* Dev does not support DCB */
1403         if (!hnae3_dev_dcb_supported(hdev)) {
1404                 hdev->tc_max = 1;
1405                 hdev->pfc_max = 0;
1406         } else {
1407                 hdev->pfc_max = hdev->tc_max;
1408         }
1409
1410         hdev->tm_info.num_tc = 1;
1411
1412         /* Currently not support uncontiuous tc */
1413         for (i = 0; i < hdev->tm_info.num_tc; i++)
1414                 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1415
1416         hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1417
1418         hclge_init_kdump_kernel_config(hdev);
1419
1420         /* Set the init affinity based on pci func number */
1421         i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
1422         i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
1423         cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
1424                         &hdev->affinity_mask);
1425
1426         return ret;
1427 }
1428
1429 static int hclge_config_tso(struct hclge_dev *hdev, unsigned int tso_mss_min,
1430                             unsigned int tso_mss_max)
1431 {
1432         struct hclge_cfg_tso_status_cmd *req;
1433         struct hclge_desc desc;
1434         u16 tso_mss;
1435
1436         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1437
1438         req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1439
1440         tso_mss = 0;
1441         hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1442                         HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1443         req->tso_mss_min = cpu_to_le16(tso_mss);
1444
1445         tso_mss = 0;
1446         hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1447                         HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1448         req->tso_mss_max = cpu_to_le16(tso_mss);
1449
1450         return hclge_cmd_send(&hdev->hw, &desc, 1);
1451 }
1452
1453 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1454 {
1455         struct hclge_cfg_gro_status_cmd *req;
1456         struct hclge_desc desc;
1457         int ret;
1458
1459         if (!hnae3_dev_gro_supported(hdev))
1460                 return 0;
1461
1462         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1463         req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1464
1465         req->gro_en = cpu_to_le16(en ? 1 : 0);
1466
1467         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1468         if (ret)
1469                 dev_err(&hdev->pdev->dev,
1470                         "GRO hardware config cmd failed, ret = %d\n", ret);
1471
1472         return ret;
1473 }
1474
1475 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1476 {
1477         struct hclge_tqp *tqp;
1478         int i;
1479
1480         hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1481                                   sizeof(struct hclge_tqp), GFP_KERNEL);
1482         if (!hdev->htqp)
1483                 return -ENOMEM;
1484
1485         tqp = hdev->htqp;
1486
1487         for (i = 0; i < hdev->num_tqps; i++) {
1488                 tqp->dev = &hdev->pdev->dev;
1489                 tqp->index = i;
1490
1491                 tqp->q.ae_algo = &ae_algo;
1492                 tqp->q.buf_size = hdev->rx_buf_len;
1493                 tqp->q.tx_desc_num = hdev->num_tx_desc;
1494                 tqp->q.rx_desc_num = hdev->num_rx_desc;
1495                 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1496                         i * HCLGE_TQP_REG_SIZE;
1497
1498                 tqp++;
1499         }
1500
1501         return 0;
1502 }
1503
1504 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1505                                   u16 tqp_pid, u16 tqp_vid, bool is_pf)
1506 {
1507         struct hclge_tqp_map_cmd *req;
1508         struct hclge_desc desc;
1509         int ret;
1510
1511         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1512
1513         req = (struct hclge_tqp_map_cmd *)desc.data;
1514         req->tqp_id = cpu_to_le16(tqp_pid);
1515         req->tqp_vf = func_id;
1516         req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1517         if (!is_pf)
1518                 req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1519         req->tqp_vid = cpu_to_le16(tqp_vid);
1520
1521         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1522         if (ret)
1523                 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1524
1525         return ret;
1526 }
1527
1528 static int  hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1529 {
1530         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1531         struct hclge_dev *hdev = vport->back;
1532         int i, alloced;
1533
1534         for (i = 0, alloced = 0; i < hdev->num_tqps &&
1535              alloced < num_tqps; i++) {
1536                 if (!hdev->htqp[i].alloced) {
1537                         hdev->htqp[i].q.handle = &vport->nic;
1538                         hdev->htqp[i].q.tqp_index = alloced;
1539                         hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1540                         hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1541                         kinfo->tqp[alloced] = &hdev->htqp[i].q;
1542                         hdev->htqp[i].alloced = true;
1543                         alloced++;
1544                 }
1545         }
1546         vport->alloc_tqps = alloced;
1547         kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1548                                 vport->alloc_tqps / hdev->tm_info.num_tc);
1549
1550         /* ensure one to one mapping between irq and queue at default */
1551         kinfo->rss_size = min_t(u16, kinfo->rss_size,
1552                                 (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1553
1554         return 0;
1555 }
1556
1557 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1558                             u16 num_tx_desc, u16 num_rx_desc)
1559
1560 {
1561         struct hnae3_handle *nic = &vport->nic;
1562         struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1563         struct hclge_dev *hdev = vport->back;
1564         int ret;
1565
1566         kinfo->num_tx_desc = num_tx_desc;
1567         kinfo->num_rx_desc = num_rx_desc;
1568
1569         kinfo->rx_buf_len = hdev->rx_buf_len;
1570
1571         kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1572                                   sizeof(struct hnae3_queue *), GFP_KERNEL);
1573         if (!kinfo->tqp)
1574                 return -ENOMEM;
1575
1576         ret = hclge_assign_tqp(vport, num_tqps);
1577         if (ret)
1578                 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1579
1580         return ret;
1581 }
1582
1583 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1584                                   struct hclge_vport *vport)
1585 {
1586         struct hnae3_handle *nic = &vport->nic;
1587         struct hnae3_knic_private_info *kinfo;
1588         u16 i;
1589
1590         kinfo = &nic->kinfo;
1591         for (i = 0; i < vport->alloc_tqps; i++) {
1592                 struct hclge_tqp *q =
1593                         container_of(kinfo->tqp[i], struct hclge_tqp, q);
1594                 bool is_pf;
1595                 int ret;
1596
1597                 is_pf = !(vport->vport_id);
1598                 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1599                                              i, is_pf);
1600                 if (ret)
1601                         return ret;
1602         }
1603
1604         return 0;
1605 }
1606
1607 static int hclge_map_tqp(struct hclge_dev *hdev)
1608 {
1609         struct hclge_vport *vport = hdev->vport;
1610         u16 i, num_vport;
1611
1612         num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1613         for (i = 0; i < num_vport; i++) {
1614                 int ret;
1615
1616                 ret = hclge_map_tqp_to_vport(hdev, vport);
1617                 if (ret)
1618                         return ret;
1619
1620                 vport++;
1621         }
1622
1623         return 0;
1624 }
1625
1626 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1627 {
1628         struct hnae3_handle *nic = &vport->nic;
1629         struct hclge_dev *hdev = vport->back;
1630         int ret;
1631
1632         nic->pdev = hdev->pdev;
1633         nic->ae_algo = &ae_algo;
1634         nic->numa_node_mask = hdev->numa_node_mask;
1635
1636         ret = hclge_knic_setup(vport, num_tqps,
1637                                hdev->num_tx_desc, hdev->num_rx_desc);
1638         if (ret)
1639                 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1640
1641         return ret;
1642 }
1643
1644 static int hclge_alloc_vport(struct hclge_dev *hdev)
1645 {
1646         struct pci_dev *pdev = hdev->pdev;
1647         struct hclge_vport *vport;
1648         u32 tqp_main_vport;
1649         u32 tqp_per_vport;
1650         int num_vport, i;
1651         int ret;
1652
1653         /* We need to alloc a vport for main NIC of PF */
1654         num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1655
1656         if (hdev->num_tqps < num_vport) {
1657                 dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1658                         hdev->num_tqps, num_vport);
1659                 return -EINVAL;
1660         }
1661
1662         /* Alloc the same number of TQPs for every vport */
1663         tqp_per_vport = hdev->num_tqps / num_vport;
1664         tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1665
1666         vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1667                              GFP_KERNEL);
1668         if (!vport)
1669                 return -ENOMEM;
1670
1671         hdev->vport = vport;
1672         hdev->num_alloc_vport = num_vport;
1673
1674         if (IS_ENABLED(CONFIG_PCI_IOV))
1675                 hdev->num_alloc_vfs = hdev->num_req_vfs;
1676
1677         for (i = 0; i < num_vport; i++) {
1678                 vport->back = hdev;
1679                 vport->vport_id = i;
1680                 vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1681                 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1682                 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1683                 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1684                 INIT_LIST_HEAD(&vport->vlan_list);
1685                 INIT_LIST_HEAD(&vport->uc_mac_list);
1686                 INIT_LIST_HEAD(&vport->mc_mac_list);
1687
1688                 if (i == 0)
1689                         ret = hclge_vport_setup(vport, tqp_main_vport);
1690                 else
1691                         ret = hclge_vport_setup(vport, tqp_per_vport);
1692                 if (ret) {
1693                         dev_err(&pdev->dev,
1694                                 "vport setup failed for vport %d, %d\n",
1695                                 i, ret);
1696                         return ret;
1697                 }
1698
1699                 vport++;
1700         }
1701
1702         return 0;
1703 }
1704
1705 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1706                                     struct hclge_pkt_buf_alloc *buf_alloc)
1707 {
1708 /* TX buffer size is unit by 128 byte */
1709 #define HCLGE_BUF_SIZE_UNIT_SHIFT       7
1710 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK    BIT(15)
1711         struct hclge_tx_buff_alloc_cmd *req;
1712         struct hclge_desc desc;
1713         int ret;
1714         u8 i;
1715
1716         req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1717
1718         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1719         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1720                 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1721
1722                 req->tx_pkt_buff[i] =
1723                         cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1724                                      HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1725         }
1726
1727         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1728         if (ret)
1729                 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1730                         ret);
1731
1732         return ret;
1733 }
1734
1735 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1736                                  struct hclge_pkt_buf_alloc *buf_alloc)
1737 {
1738         int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1739
1740         if (ret)
1741                 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1742
1743         return ret;
1744 }
1745
1746 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1747 {
1748         unsigned int i;
1749         u32 cnt = 0;
1750
1751         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1752                 if (hdev->hw_tc_map & BIT(i))
1753                         cnt++;
1754         return cnt;
1755 }
1756
1757 /* Get the number of pfc enabled TCs, which have private buffer */
1758 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1759                                   struct hclge_pkt_buf_alloc *buf_alloc)
1760 {
1761         struct hclge_priv_buf *priv;
1762         unsigned int i;
1763         int cnt = 0;
1764
1765         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1766                 priv = &buf_alloc->priv_buf[i];
1767                 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1768                     priv->enable)
1769                         cnt++;
1770         }
1771
1772         return cnt;
1773 }
1774
1775 /* Get the number of pfc disabled TCs, which have private buffer */
1776 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1777                                      struct hclge_pkt_buf_alloc *buf_alloc)
1778 {
1779         struct hclge_priv_buf *priv;
1780         unsigned int i;
1781         int cnt = 0;
1782
1783         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1784                 priv = &buf_alloc->priv_buf[i];
1785                 if (hdev->hw_tc_map & BIT(i) &&
1786                     !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1787                     priv->enable)
1788                         cnt++;
1789         }
1790
1791         return cnt;
1792 }
1793
1794 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1795 {
1796         struct hclge_priv_buf *priv;
1797         u32 rx_priv = 0;
1798         int i;
1799
1800         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1801                 priv = &buf_alloc->priv_buf[i];
1802                 if (priv->enable)
1803                         rx_priv += priv->buf_size;
1804         }
1805         return rx_priv;
1806 }
1807
1808 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1809 {
1810         u32 i, total_tx_size = 0;
1811
1812         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1813                 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1814
1815         return total_tx_size;
1816 }
1817
1818 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1819                                 struct hclge_pkt_buf_alloc *buf_alloc,
1820                                 u32 rx_all)
1821 {
1822         u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1823         u32 tc_num = hclge_get_tc_num(hdev);
1824         u32 shared_buf, aligned_mps;
1825         u32 rx_priv;
1826         int i;
1827
1828         aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1829
1830         if (hnae3_dev_dcb_supported(hdev))
1831                 shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
1832                                         hdev->dv_buf_size;
1833         else
1834                 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1835                                         + hdev->dv_buf_size;
1836
1837         shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1838         shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1839                              HCLGE_BUF_SIZE_UNIT);
1840
1841         rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1842         if (rx_all < rx_priv + shared_std)
1843                 return false;
1844
1845         shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1846         buf_alloc->s_buf.buf_size = shared_buf;
1847         if (hnae3_dev_dcb_supported(hdev)) {
1848                 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1849                 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1850                         - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
1851                                   HCLGE_BUF_SIZE_UNIT);
1852         } else {
1853                 buf_alloc->s_buf.self.high = aligned_mps +
1854                                                 HCLGE_NON_DCB_ADDITIONAL_BUF;
1855                 buf_alloc->s_buf.self.low = aligned_mps;
1856         }
1857
1858         if (hnae3_dev_dcb_supported(hdev)) {
1859                 hi_thrd = shared_buf - hdev->dv_buf_size;
1860
1861                 if (tc_num <= NEED_RESERVE_TC_NUM)
1862                         hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
1863                                         / BUF_MAX_PERCENT;
1864
1865                 if (tc_num)
1866                         hi_thrd = hi_thrd / tc_num;
1867
1868                 hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
1869                 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1870                 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
1871         } else {
1872                 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1873                 lo_thrd = aligned_mps;
1874         }
1875
1876         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1877                 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1878                 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
1879         }
1880
1881         return true;
1882 }
1883
1884 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1885                                 struct hclge_pkt_buf_alloc *buf_alloc)
1886 {
1887         u32 i, total_size;
1888
1889         total_size = hdev->pkt_buf_size;
1890
1891         /* alloc tx buffer for all enabled tc */
1892         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1893                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1894
1895                 if (hdev->hw_tc_map & BIT(i)) {
1896                         if (total_size < hdev->tx_buf_size)
1897                                 return -ENOMEM;
1898
1899                         priv->tx_buf_size = hdev->tx_buf_size;
1900                 } else {
1901                         priv->tx_buf_size = 0;
1902                 }
1903
1904                 total_size -= priv->tx_buf_size;
1905         }
1906
1907         return 0;
1908 }
1909
1910 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
1911                                   struct hclge_pkt_buf_alloc *buf_alloc)
1912 {
1913         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1914         u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1915         unsigned int i;
1916
1917         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1918                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1919
1920                 priv->enable = 0;
1921                 priv->wl.low = 0;
1922                 priv->wl.high = 0;
1923                 priv->buf_size = 0;
1924
1925                 if (!(hdev->hw_tc_map & BIT(i)))
1926                         continue;
1927
1928                 priv->enable = 1;
1929
1930                 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1931                         priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
1932                         priv->wl.high = roundup(priv->wl.low + aligned_mps,
1933                                                 HCLGE_BUF_SIZE_UNIT);
1934                 } else {
1935                         priv->wl.low = 0;
1936                         priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
1937                                         aligned_mps;
1938                 }
1939
1940                 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1941         }
1942
1943         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1944 }
1945
1946 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
1947                                           struct hclge_pkt_buf_alloc *buf_alloc)
1948 {
1949         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1950         int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1951         int i;
1952
1953         /* let the last to be cleared first */
1954         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1955                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1956                 unsigned int mask = BIT((unsigned int)i);
1957
1958                 if (hdev->hw_tc_map & mask &&
1959                     !(hdev->tm_info.hw_pfc_map & mask)) {
1960                         /* Clear the no pfc TC private buffer */
1961                         priv->wl.low = 0;
1962                         priv->wl.high = 0;
1963                         priv->buf_size = 0;
1964                         priv->enable = 0;
1965                         no_pfc_priv_num--;
1966                 }
1967
1968                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1969                     no_pfc_priv_num == 0)
1970                         break;
1971         }
1972
1973         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1974 }
1975
1976 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
1977                                         struct hclge_pkt_buf_alloc *buf_alloc)
1978 {
1979         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1980         int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1981         int i;
1982
1983         /* let the last to be cleared first */
1984         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1985                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1986                 unsigned int mask = BIT((unsigned int)i);
1987
1988                 if (hdev->hw_tc_map & mask &&
1989                     hdev->tm_info.hw_pfc_map & mask) {
1990                         /* Reduce the number of pfc TC with private buffer */
1991                         priv->wl.low = 0;
1992                         priv->enable = 0;
1993                         priv->wl.high = 0;
1994                         priv->buf_size = 0;
1995                         pfc_priv_num--;
1996                 }
1997
1998                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1999                     pfc_priv_num == 0)
2000                         break;
2001         }
2002
2003         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2004 }
2005
2006 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2007                                       struct hclge_pkt_buf_alloc *buf_alloc)
2008 {
2009 #define COMPENSATE_BUFFER       0x3C00
2010 #define COMPENSATE_HALF_MPS_NUM 5
2011 #define PRIV_WL_GAP             0x1800
2012
2013         u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2014         u32 tc_num = hclge_get_tc_num(hdev);
2015         u32 half_mps = hdev->mps >> 1;
2016         u32 min_rx_priv;
2017         unsigned int i;
2018
2019         if (tc_num)
2020                 rx_priv = rx_priv / tc_num;
2021
2022         if (tc_num <= NEED_RESERVE_TC_NUM)
2023                 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2024
2025         min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2026                         COMPENSATE_HALF_MPS_NUM * half_mps;
2027         min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2028         rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2029
2030         if (rx_priv < min_rx_priv)
2031                 return false;
2032
2033         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2034                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2035
2036                 priv->enable = 0;
2037                 priv->wl.low = 0;
2038                 priv->wl.high = 0;
2039                 priv->buf_size = 0;
2040
2041                 if (!(hdev->hw_tc_map & BIT(i)))
2042                         continue;
2043
2044                 priv->enable = 1;
2045                 priv->buf_size = rx_priv;
2046                 priv->wl.high = rx_priv - hdev->dv_buf_size;
2047                 priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2048         }
2049
2050         buf_alloc->s_buf.buf_size = 0;
2051
2052         return true;
2053 }
2054
2055 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2056  * @hdev: pointer to struct hclge_dev
2057  * @buf_alloc: pointer to buffer calculation data
2058  * @return: 0: calculate sucessful, negative: fail
2059  */
2060 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2061                                 struct hclge_pkt_buf_alloc *buf_alloc)
2062 {
2063         /* When DCB is not supported, rx private buffer is not allocated. */
2064         if (!hnae3_dev_dcb_supported(hdev)) {
2065                 u32 rx_all = hdev->pkt_buf_size;
2066
2067                 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2068                 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2069                         return -ENOMEM;
2070
2071                 return 0;
2072         }
2073
2074         if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2075                 return 0;
2076
2077         if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2078                 return 0;
2079
2080         /* try to decrease the buffer size */
2081         if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2082                 return 0;
2083
2084         if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2085                 return 0;
2086
2087         if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2088                 return 0;
2089
2090         return -ENOMEM;
2091 }
2092
2093 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2094                                    struct hclge_pkt_buf_alloc *buf_alloc)
2095 {
2096         struct hclge_rx_priv_buff_cmd *req;
2097         struct hclge_desc desc;
2098         int ret;
2099         int i;
2100
2101         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2102         req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2103
2104         /* Alloc private buffer TCs */
2105         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2106                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2107
2108                 req->buf_num[i] =
2109                         cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2110                 req->buf_num[i] |=
2111                         cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2112         }
2113
2114         req->shared_buf =
2115                 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2116                             (1 << HCLGE_TC0_PRI_BUF_EN_B));
2117
2118         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2119         if (ret)
2120                 dev_err(&hdev->pdev->dev,
2121                         "rx private buffer alloc cmd failed %d\n", ret);
2122
2123         return ret;
2124 }
2125
2126 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2127                                    struct hclge_pkt_buf_alloc *buf_alloc)
2128 {
2129         struct hclge_rx_priv_wl_buf *req;
2130         struct hclge_priv_buf *priv;
2131         struct hclge_desc desc[2];
2132         int i, j;
2133         int ret;
2134
2135         for (i = 0; i < 2; i++) {
2136                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2137                                            false);
2138                 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2139
2140                 /* The first descriptor set the NEXT bit to 1 */
2141                 if (i == 0)
2142                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2143                 else
2144                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2145
2146                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2147                         u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2148
2149                         priv = &buf_alloc->priv_buf[idx];
2150                         req->tc_wl[j].high =
2151                                 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2152                         req->tc_wl[j].high |=
2153                                 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2154                         req->tc_wl[j].low =
2155                                 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2156                         req->tc_wl[j].low |=
2157                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2158                 }
2159         }
2160
2161         /* Send 2 descriptor at one time */
2162         ret = hclge_cmd_send(&hdev->hw, desc, 2);
2163         if (ret)
2164                 dev_err(&hdev->pdev->dev,
2165                         "rx private waterline config cmd failed %d\n",
2166                         ret);
2167         return ret;
2168 }
2169
2170 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2171                                     struct hclge_pkt_buf_alloc *buf_alloc)
2172 {
2173         struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2174         struct hclge_rx_com_thrd *req;
2175         struct hclge_desc desc[2];
2176         struct hclge_tc_thrd *tc;
2177         int i, j;
2178         int ret;
2179
2180         for (i = 0; i < 2; i++) {
2181                 hclge_cmd_setup_basic_desc(&desc[i],
2182                                            HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2183                 req = (struct hclge_rx_com_thrd *)&desc[i].data;
2184
2185                 /* The first descriptor set the NEXT bit to 1 */
2186                 if (i == 0)
2187                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2188                 else
2189                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2190
2191                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2192                         tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2193
2194                         req->com_thrd[j].high =
2195                                 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2196                         req->com_thrd[j].high |=
2197                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2198                         req->com_thrd[j].low =
2199                                 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2200                         req->com_thrd[j].low |=
2201                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2202                 }
2203         }
2204
2205         /* Send 2 descriptors at one time */
2206         ret = hclge_cmd_send(&hdev->hw, desc, 2);
2207         if (ret)
2208                 dev_err(&hdev->pdev->dev,
2209                         "common threshold config cmd failed %d\n", ret);
2210         return ret;
2211 }
2212
2213 static int hclge_common_wl_config(struct hclge_dev *hdev,
2214                                   struct hclge_pkt_buf_alloc *buf_alloc)
2215 {
2216         struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2217         struct hclge_rx_com_wl *req;
2218         struct hclge_desc desc;
2219         int ret;
2220
2221         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2222
2223         req = (struct hclge_rx_com_wl *)desc.data;
2224         req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2225         req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2226
2227         req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2228         req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2229
2230         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2231         if (ret)
2232                 dev_err(&hdev->pdev->dev,
2233                         "common waterline config cmd failed %d\n", ret);
2234
2235         return ret;
2236 }
2237
2238 int hclge_buffer_alloc(struct hclge_dev *hdev)
2239 {
2240         struct hclge_pkt_buf_alloc *pkt_buf;
2241         int ret;
2242
2243         pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2244         if (!pkt_buf)
2245                 return -ENOMEM;
2246
2247         ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2248         if (ret) {
2249                 dev_err(&hdev->pdev->dev,
2250                         "could not calc tx buffer size for all TCs %d\n", ret);
2251                 goto out;
2252         }
2253
2254         ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2255         if (ret) {
2256                 dev_err(&hdev->pdev->dev,
2257                         "could not alloc tx buffers %d\n", ret);
2258                 goto out;
2259         }
2260
2261         ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2262         if (ret) {
2263                 dev_err(&hdev->pdev->dev,
2264                         "could not calc rx priv buffer size for all TCs %d\n",
2265                         ret);
2266                 goto out;
2267         }
2268
2269         ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2270         if (ret) {
2271                 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2272                         ret);
2273                 goto out;
2274         }
2275
2276         if (hnae3_dev_dcb_supported(hdev)) {
2277                 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2278                 if (ret) {
2279                         dev_err(&hdev->pdev->dev,
2280                                 "could not configure rx private waterline %d\n",
2281                                 ret);
2282                         goto out;
2283                 }
2284
2285                 ret = hclge_common_thrd_config(hdev, pkt_buf);
2286                 if (ret) {
2287                         dev_err(&hdev->pdev->dev,
2288                                 "could not configure common threshold %d\n",
2289                                 ret);
2290                         goto out;
2291                 }
2292         }
2293
2294         ret = hclge_common_wl_config(hdev, pkt_buf);
2295         if (ret)
2296                 dev_err(&hdev->pdev->dev,
2297                         "could not configure common waterline %d\n", ret);
2298
2299 out:
2300         kfree(pkt_buf);
2301         return ret;
2302 }
2303
2304 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2305 {
2306         struct hnae3_handle *roce = &vport->roce;
2307         struct hnae3_handle *nic = &vport->nic;
2308
2309         roce->rinfo.num_vectors = vport->back->num_roce_msi;
2310
2311         if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
2312             vport->back->num_msi_left == 0)
2313                 return -EINVAL;
2314
2315         roce->rinfo.base_vector = vport->back->roce_base_vector;
2316
2317         roce->rinfo.netdev = nic->kinfo.netdev;
2318         roce->rinfo.roce_io_base = vport->back->hw.io_base;
2319
2320         roce->pdev = nic->pdev;
2321         roce->ae_algo = nic->ae_algo;
2322         roce->numa_node_mask = nic->numa_node_mask;
2323
2324         return 0;
2325 }
2326
2327 static int hclge_init_msi(struct hclge_dev *hdev)
2328 {
2329         struct pci_dev *pdev = hdev->pdev;
2330         int vectors;
2331         int i;
2332
2333         vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2334                                         hdev->num_msi,
2335                                         PCI_IRQ_MSI | PCI_IRQ_MSIX);
2336         if (vectors < 0) {
2337                 dev_err(&pdev->dev,
2338                         "failed(%d) to allocate MSI/MSI-X vectors\n",
2339                         vectors);
2340                 return vectors;
2341         }
2342         if (vectors < hdev->num_msi)
2343                 dev_warn(&hdev->pdev->dev,
2344                          "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2345                          hdev->num_msi, vectors);
2346
2347         hdev->num_msi = vectors;
2348         hdev->num_msi_left = vectors;
2349
2350         hdev->base_msi_vector = pdev->irq;
2351         hdev->roce_base_vector = hdev->base_msi_vector +
2352                                 hdev->roce_base_msix_offset;
2353
2354         hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2355                                            sizeof(u16), GFP_KERNEL);
2356         if (!hdev->vector_status) {
2357                 pci_free_irq_vectors(pdev);
2358                 return -ENOMEM;
2359         }
2360
2361         for (i = 0; i < hdev->num_msi; i++)
2362                 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2363
2364         hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2365                                         sizeof(int), GFP_KERNEL);
2366         if (!hdev->vector_irq) {
2367                 pci_free_irq_vectors(pdev);
2368                 return -ENOMEM;
2369         }
2370
2371         return 0;
2372 }
2373
2374 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2375 {
2376         if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2377                 duplex = HCLGE_MAC_FULL;
2378
2379         return duplex;
2380 }
2381
2382 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2383                                       u8 duplex)
2384 {
2385         struct hclge_config_mac_speed_dup_cmd *req;
2386         struct hclge_desc desc;
2387         int ret;
2388
2389         req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2390
2391         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2392
2393         if (duplex)
2394                 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2395
2396         switch (speed) {
2397         case HCLGE_MAC_SPEED_10M:
2398                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2399                                 HCLGE_CFG_SPEED_S, 6);
2400                 break;
2401         case HCLGE_MAC_SPEED_100M:
2402                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2403                                 HCLGE_CFG_SPEED_S, 7);
2404                 break;
2405         case HCLGE_MAC_SPEED_1G:
2406                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2407                                 HCLGE_CFG_SPEED_S, 0);
2408                 break;
2409         case HCLGE_MAC_SPEED_10G:
2410                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2411                                 HCLGE_CFG_SPEED_S, 1);
2412                 break;
2413         case HCLGE_MAC_SPEED_25G:
2414                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2415                                 HCLGE_CFG_SPEED_S, 2);
2416                 break;
2417         case HCLGE_MAC_SPEED_40G:
2418                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2419                                 HCLGE_CFG_SPEED_S, 3);
2420                 break;
2421         case HCLGE_MAC_SPEED_50G:
2422                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2423                                 HCLGE_CFG_SPEED_S, 4);
2424                 break;
2425         case HCLGE_MAC_SPEED_100G:
2426                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2427                                 HCLGE_CFG_SPEED_S, 5);
2428                 break;
2429         default:
2430                 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2431                 return -EINVAL;
2432         }
2433
2434         hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2435                       1);
2436
2437         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2438         if (ret) {
2439                 dev_err(&hdev->pdev->dev,
2440                         "mac speed/duplex config cmd failed %d.\n", ret);
2441                 return ret;
2442         }
2443
2444         return 0;
2445 }
2446
2447 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2448 {
2449         struct hclge_mac *mac = &hdev->hw.mac;
2450         int ret;
2451
2452         duplex = hclge_check_speed_dup(duplex, speed);
2453         if (!mac->support_autoneg && mac->speed == speed &&
2454             mac->duplex == duplex)
2455                 return 0;
2456
2457         ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2458         if (ret)
2459                 return ret;
2460
2461         hdev->hw.mac.speed = speed;
2462         hdev->hw.mac.duplex = duplex;
2463
2464         return 0;
2465 }
2466
2467 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2468                                      u8 duplex)
2469 {
2470         struct hclge_vport *vport = hclge_get_vport(handle);
2471         struct hclge_dev *hdev = vport->back;
2472
2473         return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2474 }
2475
2476 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2477 {
2478         struct hclge_config_auto_neg_cmd *req;
2479         struct hclge_desc desc;
2480         u32 flag = 0;
2481         int ret;
2482
2483         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2484
2485         req = (struct hclge_config_auto_neg_cmd *)desc.data;
2486         if (enable)
2487                 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2488         req->cfg_an_cmd_flag = cpu_to_le32(flag);
2489
2490         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2491         if (ret)
2492                 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2493                         ret);
2494
2495         return ret;
2496 }
2497
2498 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2499 {
2500         struct hclge_vport *vport = hclge_get_vport(handle);
2501         struct hclge_dev *hdev = vport->back;
2502
2503         if (!hdev->hw.mac.support_autoneg) {
2504                 if (enable) {
2505                         dev_err(&hdev->pdev->dev,
2506                                 "autoneg is not supported by current port\n");
2507                         return -EOPNOTSUPP;
2508                 } else {
2509                         return 0;
2510                 }
2511         }
2512
2513         return hclge_set_autoneg_en(hdev, enable);
2514 }
2515
2516 static int hclge_get_autoneg(struct hnae3_handle *handle)
2517 {
2518         struct hclge_vport *vport = hclge_get_vport(handle);
2519         struct hclge_dev *hdev = vport->back;
2520         struct phy_device *phydev = hdev->hw.mac.phydev;
2521
2522         if (phydev)
2523                 return phydev->autoneg;
2524
2525         return hdev->hw.mac.autoneg;
2526 }
2527
2528 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2529 {
2530         struct hclge_vport *vport = hclge_get_vport(handle);
2531         struct hclge_dev *hdev = vport->back;
2532         int ret;
2533
2534         dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2535
2536         ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2537         if (ret)
2538                 return ret;
2539         return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2540 }
2541
2542 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2543 {
2544         struct hclge_vport *vport = hclge_get_vport(handle);
2545         struct hclge_dev *hdev = vport->back;
2546
2547         if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2548                 return hclge_set_autoneg_en(hdev, !halt);
2549
2550         return 0;
2551 }
2552
2553 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2554 {
2555         struct hclge_config_fec_cmd *req;
2556         struct hclge_desc desc;
2557         int ret;
2558
2559         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2560
2561         req = (struct hclge_config_fec_cmd *)desc.data;
2562         if (fec_mode & BIT(HNAE3_FEC_AUTO))
2563                 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2564         if (fec_mode & BIT(HNAE3_FEC_RS))
2565                 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2566                                 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2567         if (fec_mode & BIT(HNAE3_FEC_BASER))
2568                 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2569                                 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2570
2571         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2572         if (ret)
2573                 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2574
2575         return ret;
2576 }
2577
2578 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2579 {
2580         struct hclge_vport *vport = hclge_get_vport(handle);
2581         struct hclge_dev *hdev = vport->back;
2582         struct hclge_mac *mac = &hdev->hw.mac;
2583         int ret;
2584
2585         if (fec_mode && !(mac->fec_ability & fec_mode)) {
2586                 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2587                 return -EINVAL;
2588         }
2589
2590         ret = hclge_set_fec_hw(hdev, fec_mode);
2591         if (ret)
2592                 return ret;
2593
2594         mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2595         return 0;
2596 }
2597
2598 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2599                           u8 *fec_mode)
2600 {
2601         struct hclge_vport *vport = hclge_get_vport(handle);
2602         struct hclge_dev *hdev = vport->back;
2603         struct hclge_mac *mac = &hdev->hw.mac;
2604
2605         if (fec_ability)
2606                 *fec_ability = mac->fec_ability;
2607         if (fec_mode)
2608                 *fec_mode = mac->fec_mode;
2609 }
2610
2611 static int hclge_mac_init(struct hclge_dev *hdev)
2612 {
2613         struct hclge_mac *mac = &hdev->hw.mac;
2614         int ret;
2615
2616         hdev->support_sfp_query = true;
2617         hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2618         ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2619                                          hdev->hw.mac.duplex);
2620         if (ret)
2621                 return ret;
2622
2623         if (hdev->hw.mac.support_autoneg) {
2624                 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2625                 if (ret)
2626                         return ret;
2627         }
2628
2629         mac->link = 0;
2630
2631         if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2632                 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2633                 if (ret)
2634                         return ret;
2635         }
2636
2637         ret = hclge_set_mac_mtu(hdev, hdev->mps);
2638         if (ret) {
2639                 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2640                 return ret;
2641         }
2642
2643         ret = hclge_set_default_loopback(hdev);
2644         if (ret)
2645                 return ret;
2646
2647         ret = hclge_buffer_alloc(hdev);
2648         if (ret)
2649                 dev_err(&hdev->pdev->dev,
2650                         "allocate buffer fail, ret=%d\n", ret);
2651
2652         return ret;
2653 }
2654
2655 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2656 {
2657         if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2658             !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2659                 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2660                                     hclge_wq, &hdev->service_task, 0);
2661 }
2662
2663 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2664 {
2665         if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2666             !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2667                 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2668                                     hclge_wq, &hdev->service_task, 0);
2669 }
2670
2671 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2672 {
2673         if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2674             !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
2675                 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2676                                     hclge_wq, &hdev->service_task,
2677                                     delay_time);
2678 }
2679
2680 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2681 {
2682         struct hclge_link_status_cmd *req;
2683         struct hclge_desc desc;
2684         int link_status;
2685         int ret;
2686
2687         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2688         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2689         if (ret) {
2690                 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2691                         ret);
2692                 return ret;
2693         }
2694
2695         req = (struct hclge_link_status_cmd *)desc.data;
2696         link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2697
2698         return !!link_status;
2699 }
2700
2701 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2702 {
2703         unsigned int mac_state;
2704         int link_stat;
2705
2706         if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2707                 return 0;
2708
2709         mac_state = hclge_get_mac_link_status(hdev);
2710
2711         if (hdev->hw.mac.phydev) {
2712                 if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2713                         link_stat = mac_state &
2714                                 hdev->hw.mac.phydev->link;
2715                 else
2716                         link_stat = 0;
2717
2718         } else {
2719                 link_stat = mac_state;
2720         }
2721
2722         return !!link_stat;
2723 }
2724
2725 static void hclge_update_link_status(struct hclge_dev *hdev)
2726 {
2727         struct hnae3_client *rclient = hdev->roce_client;
2728         struct hnae3_client *client = hdev->nic_client;
2729         struct hnae3_handle *rhandle;
2730         struct hnae3_handle *handle;
2731         int state;
2732         int i;
2733
2734         if (!client)
2735                 return;
2736
2737         if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
2738                 return;
2739
2740         state = hclge_get_mac_phy_link(hdev);
2741         if (state != hdev->hw.mac.link) {
2742                 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2743                         handle = &hdev->vport[i].nic;
2744                         client->ops->link_status_change(handle, state);
2745                         hclge_config_mac_tnl_int(hdev, state);
2746                         rhandle = &hdev->vport[i].roce;
2747                         if (rclient && rclient->ops->link_status_change)
2748                                 rclient->ops->link_status_change(rhandle,
2749                                                                  state);
2750                 }
2751                 hdev->hw.mac.link = state;
2752         }
2753
2754         clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2755 }
2756
2757 static void hclge_update_port_capability(struct hclge_mac *mac)
2758 {
2759         /* update fec ability by speed */
2760         hclge_convert_setting_fec(mac);
2761
2762         /* firmware can not identify back plane type, the media type
2763          * read from configuration can help deal it
2764          */
2765         if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2766             mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2767                 mac->module_type = HNAE3_MODULE_TYPE_KR;
2768         else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2769                 mac->module_type = HNAE3_MODULE_TYPE_TP;
2770
2771         if (mac->support_autoneg) {
2772                 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2773                 linkmode_copy(mac->advertising, mac->supported);
2774         } else {
2775                 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2776                                    mac->supported);
2777                 linkmode_zero(mac->advertising);
2778         }
2779 }
2780
2781 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2782 {
2783         struct hclge_sfp_info_cmd *resp;
2784         struct hclge_desc desc;
2785         int ret;
2786
2787         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2788         resp = (struct hclge_sfp_info_cmd *)desc.data;
2789         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2790         if (ret == -EOPNOTSUPP) {
2791                 dev_warn(&hdev->pdev->dev,
2792                          "IMP do not support get SFP speed %d\n", ret);
2793                 return ret;
2794         } else if (ret) {
2795                 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2796                 return ret;
2797         }
2798
2799         *speed = le32_to_cpu(resp->speed);
2800
2801         return 0;
2802 }
2803
2804 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2805 {
2806         struct hclge_sfp_info_cmd *resp;
2807         struct hclge_desc desc;
2808         int ret;
2809
2810         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2811         resp = (struct hclge_sfp_info_cmd *)desc.data;
2812
2813         resp->query_type = QUERY_ACTIVE_SPEED;
2814
2815         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2816         if (ret == -EOPNOTSUPP) {
2817                 dev_warn(&hdev->pdev->dev,
2818                          "IMP does not support get SFP info %d\n", ret);
2819                 return ret;
2820         } else if (ret) {
2821                 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2822                 return ret;
2823         }
2824
2825         /* In some case, mac speed get from IMP may be 0, it shouldn't be
2826          * set to mac->speed.
2827          */
2828         if (!le32_to_cpu(resp->speed))
2829                 return 0;
2830
2831         mac->speed = le32_to_cpu(resp->speed);
2832         /* if resp->speed_ability is 0, it means it's an old version
2833          * firmware, do not update these params
2834          */
2835         if (resp->speed_ability) {
2836                 mac->module_type = le32_to_cpu(resp->module_type);
2837                 mac->speed_ability = le32_to_cpu(resp->speed_ability);
2838                 mac->autoneg = resp->autoneg;
2839                 mac->support_autoneg = resp->autoneg_ability;
2840                 mac->speed_type = QUERY_ACTIVE_SPEED;
2841                 if (!resp->active_fec)
2842                         mac->fec_mode = 0;
2843                 else
2844                         mac->fec_mode = BIT(resp->active_fec);
2845         } else {
2846                 mac->speed_type = QUERY_SFP_SPEED;
2847         }
2848
2849         return 0;
2850 }
2851
2852 static int hclge_update_port_info(struct hclge_dev *hdev)
2853 {
2854         struct hclge_mac *mac = &hdev->hw.mac;
2855         int speed = HCLGE_MAC_SPEED_UNKNOWN;
2856         int ret;
2857
2858         /* get the port info from SFP cmd if not copper port */
2859         if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2860                 return 0;
2861
2862         /* if IMP does not support get SFP/qSFP info, return directly */
2863         if (!hdev->support_sfp_query)
2864                 return 0;
2865
2866         if (hdev->pdev->revision >= 0x21)
2867                 ret = hclge_get_sfp_info(hdev, mac);
2868         else
2869                 ret = hclge_get_sfp_speed(hdev, &speed);
2870
2871         if (ret == -EOPNOTSUPP) {
2872                 hdev->support_sfp_query = false;
2873                 return ret;
2874         } else if (ret) {
2875                 return ret;
2876         }
2877
2878         if (hdev->pdev->revision >= 0x21) {
2879                 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
2880                         hclge_update_port_capability(mac);
2881                         return 0;
2882                 }
2883                 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
2884                                                HCLGE_MAC_FULL);
2885         } else {
2886                 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2887                         return 0; /* do nothing if no SFP */
2888
2889                 /* must config full duplex for SFP */
2890                 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2891         }
2892 }
2893
2894 static int hclge_get_status(struct hnae3_handle *handle)
2895 {
2896         struct hclge_vport *vport = hclge_get_vport(handle);
2897         struct hclge_dev *hdev = vport->back;
2898
2899         hclge_update_link_status(hdev);
2900
2901         return hdev->hw.mac.link;
2902 }
2903
2904 static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
2905 {
2906         if (!pci_num_vf(hdev->pdev)) {
2907                 dev_err(&hdev->pdev->dev,
2908                         "SRIOV is disabled, can not get vport(%d) info.\n", vf);
2909                 return NULL;
2910         }
2911
2912         if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
2913                 dev_err(&hdev->pdev->dev,
2914                         "vf id(%d) is out of range(0 <= vfid < %d)\n",
2915                         vf, pci_num_vf(hdev->pdev));
2916                 return NULL;
2917         }
2918
2919         /* VF start from 1 in vport */
2920         vf += HCLGE_VF_VPORT_START_NUM;
2921         return &hdev->vport[vf];
2922 }
2923
2924 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
2925                                struct ifla_vf_info *ivf)
2926 {
2927         struct hclge_vport *vport = hclge_get_vport(handle);
2928         struct hclge_dev *hdev = vport->back;
2929
2930         vport = hclge_get_vf_vport(hdev, vf);
2931         if (!vport)
2932                 return -EINVAL;
2933
2934         ivf->vf = vf;
2935         ivf->linkstate = vport->vf_info.link_state;
2936         ivf->spoofchk = vport->vf_info.spoofchk;
2937         ivf->trusted = vport->vf_info.trusted;
2938         ivf->min_tx_rate = 0;
2939         ivf->max_tx_rate = vport->vf_info.max_tx_rate;
2940         ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
2941         ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
2942         ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
2943         ether_addr_copy(ivf->mac, vport->vf_info.mac);
2944
2945         return 0;
2946 }
2947
2948 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
2949                                    int link_state)
2950 {
2951         struct hclge_vport *vport = hclge_get_vport(handle);
2952         struct hclge_dev *hdev = vport->back;
2953
2954         vport = hclge_get_vf_vport(hdev, vf);
2955         if (!vport)
2956                 return -EINVAL;
2957
2958         vport->vf_info.link_state = link_state;
2959
2960         return 0;
2961 }
2962
2963 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2964 {
2965         u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
2966
2967         /* fetch the events from their corresponding regs */
2968         rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2969         cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2970         msix_src_reg = hclge_read_dev(&hdev->hw,
2971                                       HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
2972
2973         /* Assumption: If by any chance reset and mailbox events are reported
2974          * together then we will only process reset event in this go and will
2975          * defer the processing of the mailbox events. Since, we would have not
2976          * cleared RX CMDQ event this time we would receive again another
2977          * interrupt from H/W just for the mailbox.
2978          *
2979          * check for vector0 reset event sources
2980          */
2981         if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2982                 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2983                 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2984                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2985                 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2986                 hdev->rst_stats.imp_rst_cnt++;
2987                 return HCLGE_VECTOR0_EVENT_RST;
2988         }
2989
2990         if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2991                 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2992                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2993                 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2994                 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2995                 hdev->rst_stats.global_rst_cnt++;
2996                 return HCLGE_VECTOR0_EVENT_RST;
2997         }
2998
2999         /* check for vector0 msix event source */
3000         if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
3001                 *clearval = msix_src_reg;
3002                 return HCLGE_VECTOR0_EVENT_ERR;
3003         }
3004
3005         /* check for vector0 mailbox(=CMDQ RX) event source */
3006         if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3007                 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3008                 *clearval = cmdq_src_reg;
3009                 return HCLGE_VECTOR0_EVENT_MBX;
3010         }
3011
3012         /* print other vector0 event source */
3013         dev_info(&hdev->pdev->dev,
3014                  "CMDQ INT status:0x%x, other INT status:0x%x\n",
3015                  cmdq_src_reg, msix_src_reg);
3016         *clearval = msix_src_reg;
3017
3018         return HCLGE_VECTOR0_EVENT_OTHER;
3019 }
3020
3021 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3022                                     u32 regclr)
3023 {
3024         switch (event_type) {
3025         case HCLGE_VECTOR0_EVENT_RST:
3026                 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3027                 break;
3028         case HCLGE_VECTOR0_EVENT_MBX:
3029                 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3030                 break;
3031         default:
3032                 break;
3033         }
3034 }
3035
3036 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3037 {
3038         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3039                                 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3040                                 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3041                                 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3042         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3043 }
3044
3045 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3046 {
3047         writel(enable ? 1 : 0, vector->addr);
3048 }
3049
3050 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3051 {
3052         struct hclge_dev *hdev = data;
3053         u32 clearval = 0;
3054         u32 event_cause;
3055
3056         hclge_enable_vector(&hdev->misc_vector, false);
3057         event_cause = hclge_check_event_cause(hdev, &clearval);
3058
3059         /* vector 0 interrupt is shared with reset and mailbox source events.*/
3060         switch (event_cause) {
3061         case HCLGE_VECTOR0_EVENT_ERR:
3062                 /* we do not know what type of reset is required now. This could
3063                  * only be decided after we fetch the type of errors which
3064                  * caused this event. Therefore, we will do below for now:
3065                  * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
3066                  *    have defered type of reset to be used.
3067                  * 2. Schedule the reset serivce task.
3068                  * 3. When service task receives  HNAE3_UNKNOWN_RESET type it
3069                  *    will fetch the correct type of reset.  This would be done
3070                  *    by first decoding the types of errors.
3071                  */
3072                 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
3073                 /* fall through */
3074         case HCLGE_VECTOR0_EVENT_RST:
3075                 hclge_reset_task_schedule(hdev);
3076                 break;
3077         case HCLGE_VECTOR0_EVENT_MBX:
3078                 /* If we are here then,
3079                  * 1. Either we are not handling any mbx task and we are not
3080                  *    scheduled as well
3081                  *                        OR
3082                  * 2. We could be handling a mbx task but nothing more is
3083                  *    scheduled.
3084                  * In both cases, we should schedule mbx task as there are more
3085                  * mbx messages reported by this interrupt.
3086                  */
3087                 hclge_mbx_task_schedule(hdev);
3088                 break;
3089         default:
3090                 dev_warn(&hdev->pdev->dev,
3091                          "received unknown or unhandled event of vector0\n");
3092                 break;
3093         }
3094
3095         hclge_clear_event_cause(hdev, event_cause, clearval);
3096
3097         /* Enable interrupt if it is not cause by reset. And when
3098          * clearval equal to 0, it means interrupt status may be
3099          * cleared by hardware before driver reads status register.
3100          * For this case, vector0 interrupt also should be enabled.
3101          */
3102         if (!clearval ||
3103             event_cause == HCLGE_VECTOR0_EVENT_MBX) {
3104                 hclge_enable_vector(&hdev->misc_vector, true);
3105         }
3106
3107         return IRQ_HANDLED;
3108 }
3109
3110 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3111 {
3112         if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3113                 dev_warn(&hdev->pdev->dev,
3114                          "vector(vector_id %d) has been freed.\n", vector_id);
3115                 return;
3116         }
3117
3118         hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3119         hdev->num_msi_left += 1;
3120         hdev->num_msi_used -= 1;
3121 }
3122
3123 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3124 {
3125         struct hclge_misc_vector *vector = &hdev->misc_vector;
3126
3127         vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3128
3129         vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3130         hdev->vector_status[0] = 0;
3131
3132         hdev->num_msi_left -= 1;
3133         hdev->num_msi_used += 1;
3134 }
3135
3136 static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3137                                       const cpumask_t *mask)
3138 {
3139         struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3140                                               affinity_notify);
3141
3142         cpumask_copy(&hdev->affinity_mask, mask);
3143 }
3144
3145 static void hclge_irq_affinity_release(struct kref *ref)
3146 {
3147 }
3148
3149 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3150 {
3151         irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3152                               &hdev->affinity_mask);
3153
3154         hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3155         hdev->affinity_notify.release = hclge_irq_affinity_release;
3156         irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3157                                   &hdev->affinity_notify);
3158 }
3159
3160 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3161 {
3162         irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3163         irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3164 }
3165
3166 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3167 {
3168         int ret;
3169
3170         hclge_get_misc_vector(hdev);
3171
3172         /* this would be explicitly freed in the end */
3173         snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
3174                  HCLGE_NAME, pci_name(hdev->pdev));
3175         ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3176                           0, hdev->misc_vector.name, hdev);
3177         if (ret) {
3178                 hclge_free_vector(hdev, 0);
3179                 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3180                         hdev->misc_vector.vector_irq);
3181         }
3182
3183         return ret;
3184 }
3185
3186 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3187 {
3188         free_irq(hdev->misc_vector.vector_irq, hdev);
3189         hclge_free_vector(hdev, 0);
3190 }
3191
3192 int hclge_notify_client(struct hclge_dev *hdev,
3193                         enum hnae3_reset_notify_type type)
3194 {
3195         struct hnae3_client *client = hdev->nic_client;
3196         u16 i;
3197
3198         if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3199                 return 0;
3200
3201         if (!client->ops->reset_notify)
3202                 return -EOPNOTSUPP;
3203
3204         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3205                 struct hnae3_handle *handle = &hdev->vport[i].nic;
3206                 int ret;
3207
3208                 ret = client->ops->reset_notify(handle, type);
3209                 if (ret) {
3210                         dev_err(&hdev->pdev->dev,
3211                                 "notify nic client failed %d(%d)\n", type, ret);
3212                         return ret;
3213                 }
3214         }
3215
3216         return 0;
3217 }
3218
3219 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3220                                     enum hnae3_reset_notify_type type)
3221 {
3222         struct hnae3_client *client = hdev->roce_client;
3223         int ret = 0;
3224         u16 i;
3225
3226         if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3227                 return 0;
3228
3229         if (!client->ops->reset_notify)
3230                 return -EOPNOTSUPP;
3231
3232         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3233                 struct hnae3_handle *handle = &hdev->vport[i].roce;
3234
3235                 ret = client->ops->reset_notify(handle, type);
3236                 if (ret) {
3237                         dev_err(&hdev->pdev->dev,
3238                                 "notify roce client failed %d(%d)",
3239                                 type, ret);
3240                         return ret;
3241                 }
3242         }
3243
3244         return ret;
3245 }
3246
3247 static int hclge_reset_wait(struct hclge_dev *hdev)
3248 {
3249 #define HCLGE_RESET_WATI_MS     100
3250 #define HCLGE_RESET_WAIT_CNT    350
3251
3252         u32 val, reg, reg_bit;
3253         u32 cnt = 0;
3254
3255         switch (hdev->reset_type) {
3256         case HNAE3_IMP_RESET:
3257                 reg = HCLGE_GLOBAL_RESET_REG;
3258                 reg_bit = HCLGE_IMP_RESET_BIT;
3259                 break;
3260         case HNAE3_GLOBAL_RESET:
3261                 reg = HCLGE_GLOBAL_RESET_REG;
3262                 reg_bit = HCLGE_GLOBAL_RESET_BIT;
3263                 break;
3264         case HNAE3_FUNC_RESET:
3265                 reg = HCLGE_FUN_RST_ING;
3266                 reg_bit = HCLGE_FUN_RST_ING_B;
3267                 break;
3268         default:
3269                 dev_err(&hdev->pdev->dev,
3270                         "Wait for unsupported reset type: %d\n",
3271                         hdev->reset_type);
3272                 return -EINVAL;
3273         }
3274
3275         val = hclge_read_dev(&hdev->hw, reg);
3276         while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3277                 msleep(HCLGE_RESET_WATI_MS);
3278                 val = hclge_read_dev(&hdev->hw, reg);
3279                 cnt++;
3280         }
3281
3282         if (cnt >= HCLGE_RESET_WAIT_CNT) {
3283                 dev_warn(&hdev->pdev->dev,
3284                          "Wait for reset timeout: %d\n", hdev->reset_type);
3285                 return -EBUSY;
3286         }
3287
3288         return 0;
3289 }
3290
3291 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3292 {
3293         struct hclge_vf_rst_cmd *req;
3294         struct hclge_desc desc;
3295
3296         req = (struct hclge_vf_rst_cmd *)desc.data;
3297         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3298         req->dest_vfid = func_id;
3299
3300         if (reset)
3301                 req->vf_rst = 0x1;
3302
3303         return hclge_cmd_send(&hdev->hw, &desc, 1);
3304 }
3305
3306 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3307 {
3308         int i;
3309
3310         for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
3311                 struct hclge_vport *vport = &hdev->vport[i];
3312                 int ret;
3313
3314                 /* Send cmd to set/clear VF's FUNC_RST_ING */
3315                 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3316                 if (ret) {
3317                         dev_err(&hdev->pdev->dev,
3318                                 "set vf(%u) rst failed %d!\n",
3319                                 vport->vport_id, ret);
3320                         return ret;
3321                 }
3322
3323                 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3324                         continue;
3325
3326                 /* Inform VF to process the reset.
3327                  * hclge_inform_reset_assert_to_vf may fail if VF
3328                  * driver is not loaded.
3329                  */
3330                 ret = hclge_inform_reset_assert_to_vf(vport);
3331                 if (ret)
3332                         dev_warn(&hdev->pdev->dev,
3333                                  "inform reset to vf(%u) failed %d!\n",
3334                                  vport->vport_id, ret);
3335         }
3336
3337         return 0;
3338 }
3339
3340 static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3341 {
3342         if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3343             test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
3344             test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3345                 return;
3346
3347         hclge_mbx_handler(hdev);
3348
3349         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3350 }
3351
3352 static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3353 {
3354         struct hclge_pf_rst_sync_cmd *req;
3355         struct hclge_desc desc;
3356         int cnt = 0;
3357         int ret;
3358
3359         req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3360         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3361
3362         do {
3363                 /* vf need to down netdev by mbx during PF or FLR reset */
3364                 hclge_mailbox_service_task(hdev);
3365
3366                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3367                 /* for compatible with old firmware, wait
3368                  * 100 ms for VF to stop IO
3369                  */
3370                 if (ret == -EOPNOTSUPP) {
3371                         msleep(HCLGE_RESET_SYNC_TIME);
3372                         return;
3373                 } else if (ret) {
3374                         dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
3375                                  ret);
3376                         return;
3377                 } else if (req->all_vf_ready) {
3378                         return;
3379                 }
3380                 msleep(HCLGE_PF_RESET_SYNC_TIME);
3381                 hclge_cmd_reuse_desc(&desc, true);
3382         } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3383
3384         dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
3385 }
3386
3387 void hclge_report_hw_error(struct hclge_dev *hdev,
3388                            enum hnae3_hw_error_type type)
3389 {
3390         struct hnae3_client *client = hdev->nic_client;
3391         u16 i;
3392
3393         if (!client || !client->ops->process_hw_error ||
3394             !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3395                 return;
3396
3397         for (i = 0; i < hdev->num_vmdq_vport + 1; i++)
3398                 client->ops->process_hw_error(&hdev->vport[i].nic, type);
3399 }
3400
3401 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3402 {
3403         u32 reg_val;
3404
3405         reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3406         if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3407                 hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3408                 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3409                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3410         }
3411
3412         if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3413                 hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3414                 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3415                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3416         }
3417 }
3418
3419 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3420 {
3421         struct hclge_desc desc;
3422         struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3423         int ret;
3424
3425         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3426         hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3427         req->fun_reset_vfid = func_id;
3428
3429         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3430         if (ret)
3431                 dev_err(&hdev->pdev->dev,
3432                         "send function reset cmd fail, status =%d\n", ret);
3433
3434         return ret;
3435 }
3436
3437 static void hclge_do_reset(struct hclge_dev *hdev)
3438 {
3439         struct hnae3_handle *handle = &hdev->vport[0].nic;
3440         struct pci_dev *pdev = hdev->pdev;
3441         u32 val;
3442
3443         if (hclge_get_hw_reset_stat(handle)) {
3444                 dev_info(&pdev->dev, "Hardware reset not finish\n");
3445                 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3446                          hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3447                          hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3448                 return;
3449         }
3450
3451         switch (hdev->reset_type) {
3452         case HNAE3_GLOBAL_RESET:
3453                 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3454                 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3455                 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3456                 dev_info(&pdev->dev, "Global Reset requested\n");
3457                 break;
3458         case HNAE3_FUNC_RESET:
3459                 dev_info(&pdev->dev, "PF Reset requested\n");
3460                 /* schedule again to check later */
3461                 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3462                 hclge_reset_task_schedule(hdev);
3463                 break;
3464         default:
3465                 dev_warn(&pdev->dev,
3466                          "Unsupported reset type: %d\n", hdev->reset_type);
3467                 break;
3468         }
3469 }
3470
3471 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3472                                                    unsigned long *addr)
3473 {
3474         enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3475         struct hclge_dev *hdev = ae_dev->priv;
3476
3477         /* first, resolve any unknown reset type to the known type(s) */
3478         if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3479                 u32 msix_sts_reg = hclge_read_dev(&hdev->hw,
3480                                         HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
3481                 /* we will intentionally ignore any errors from this function
3482                  *  as we will end up in *some* reset request in any case
3483                  */
3484                 if (hclge_handle_hw_msix_error(hdev, addr))
3485                         dev_info(&hdev->pdev->dev, "received msix interrupt 0x%x\n",
3486                                  msix_sts_reg);
3487
3488                 clear_bit(HNAE3_UNKNOWN_RESET, addr);
3489                 /* We defered the clearing of the error event which caused
3490                  * interrupt since it was not posssible to do that in
3491                  * interrupt context (and this is the reason we introduced
3492                  * new UNKNOWN reset type). Now, the errors have been
3493                  * handled and cleared in hardware we can safely enable
3494                  * interrupts. This is an exception to the norm.
3495                  */
3496                 hclge_enable_vector(&hdev->misc_vector, true);
3497         }
3498
3499         /* return the highest priority reset level amongst all */
3500         if (test_bit(HNAE3_IMP_RESET, addr)) {
3501                 rst_level = HNAE3_IMP_RESET;
3502                 clear_bit(HNAE3_IMP_RESET, addr);
3503                 clear_bit(HNAE3_GLOBAL_RESET, addr);
3504                 clear_bit(HNAE3_FUNC_RESET, addr);
3505         } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3506                 rst_level = HNAE3_GLOBAL_RESET;
3507                 clear_bit(HNAE3_GLOBAL_RESET, addr);
3508                 clear_bit(HNAE3_FUNC_RESET, addr);
3509         } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3510                 rst_level = HNAE3_FUNC_RESET;
3511                 clear_bit(HNAE3_FUNC_RESET, addr);
3512         } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3513                 rst_level = HNAE3_FLR_RESET;
3514                 clear_bit(HNAE3_FLR_RESET, addr);
3515         }
3516
3517         if (hdev->reset_type != HNAE3_NONE_RESET &&
3518             rst_level < hdev->reset_type)
3519                 return HNAE3_NONE_RESET;
3520
3521         return rst_level;
3522 }
3523
3524 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3525 {
3526         u32 clearval = 0;
3527
3528         switch (hdev->reset_type) {
3529         case HNAE3_IMP_RESET:
3530                 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3531                 break;
3532         case HNAE3_GLOBAL_RESET:
3533                 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3534                 break;
3535         default:
3536                 break;
3537         }
3538
3539         if (!clearval)
3540                 return;
3541
3542         /* For revision 0x20, the reset interrupt source
3543          * can only be cleared after hardware reset done
3544          */
3545         if (hdev->pdev->revision == 0x20)
3546                 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3547                                 clearval);
3548
3549         hclge_enable_vector(&hdev->misc_vector, true);
3550 }
3551
3552 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3553 {
3554         u32 reg_val;
3555
3556         reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3557         if (enable)
3558                 reg_val |= HCLGE_NIC_SW_RST_RDY;
3559         else
3560                 reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3561
3562         hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3563 }
3564
3565 static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
3566 {
3567         int ret;
3568
3569         ret = hclge_set_all_vf_rst(hdev, true);
3570         if (ret)
3571                 return ret;
3572
3573         hclge_func_reset_sync_vf(hdev);
3574
3575         return 0;
3576 }
3577
3578 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3579 {
3580         u32 reg_val;
3581         int ret = 0;
3582
3583         switch (hdev->reset_type) {
3584         case HNAE3_FUNC_RESET:
3585                 ret = hclge_func_reset_notify_vf(hdev);
3586                 if (ret)
3587                         return ret;
3588
3589                 ret = hclge_func_reset_cmd(hdev, 0);
3590                 if (ret) {
3591                         dev_err(&hdev->pdev->dev,
3592                                 "asserting function reset fail %d!\n", ret);
3593                         return ret;
3594                 }
3595
3596                 /* After performaning pf reset, it is not necessary to do the
3597                  * mailbox handling or send any command to firmware, because
3598                  * any mailbox handling or command to firmware is only valid
3599                  * after hclge_cmd_init is called.
3600                  */
3601                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3602                 hdev->rst_stats.pf_rst_cnt++;
3603                 break;
3604         case HNAE3_FLR_RESET:
3605                 ret = hclge_func_reset_notify_vf(hdev);
3606                 if (ret)
3607                         return ret;
3608                 break;
3609         case HNAE3_IMP_RESET:
3610                 hclge_handle_imp_error(hdev);
3611                 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3612                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3613                                 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3614                 break;
3615         default:
3616                 break;
3617         }
3618
3619         /* inform hardware that preparatory work is done */
3620         msleep(HCLGE_RESET_SYNC_TIME);
3621         hclge_reset_handshake(hdev, true);
3622         dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3623
3624         return ret;
3625 }
3626
3627 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3628 {
3629 #define MAX_RESET_FAIL_CNT 5
3630
3631         if (hdev->reset_pending) {
3632                 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3633                          hdev->reset_pending);
3634                 return true;
3635         } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3636                    HCLGE_RESET_INT_M) {
3637                 dev_info(&hdev->pdev->dev,
3638                          "reset failed because new reset interrupt\n");
3639                 hclge_clear_reset_cause(hdev);
3640                 return false;
3641         } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3642                 hdev->rst_stats.reset_fail_cnt++;
3643                 set_bit(hdev->reset_type, &hdev->reset_pending);
3644                 dev_info(&hdev->pdev->dev,
3645                          "re-schedule reset task(%u)\n",
3646                          hdev->rst_stats.reset_fail_cnt);
3647                 return true;
3648         }
3649
3650         hclge_clear_reset_cause(hdev);
3651
3652         /* recover the handshake status when reset fail */
3653         hclge_reset_handshake(hdev, true);
3654
3655         dev_err(&hdev->pdev->dev, "Reset fail!\n");
3656
3657         hclge_dbg_dump_rst_info(hdev);
3658
3659         set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3660
3661         return false;
3662 }
3663
3664 static int hclge_set_rst_done(struct hclge_dev *hdev)
3665 {
3666         struct hclge_pf_rst_done_cmd *req;
3667         struct hclge_desc desc;
3668         int ret;
3669
3670         req = (struct hclge_pf_rst_done_cmd *)desc.data;
3671         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
3672         req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
3673
3674         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3675         /* To be compatible with the old firmware, which does not support
3676          * command HCLGE_OPC_PF_RST_DONE, just print a warning and
3677          * return success
3678          */
3679         if (ret == -EOPNOTSUPP) {
3680                 dev_warn(&hdev->pdev->dev,
3681                          "current firmware does not support command(0x%x)!\n",
3682                          HCLGE_OPC_PF_RST_DONE);
3683                 return 0;
3684         } else if (ret) {
3685                 dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
3686                         ret);
3687         }
3688
3689         return ret;
3690 }
3691
3692 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3693 {
3694         int ret = 0;
3695
3696         switch (hdev->reset_type) {
3697         case HNAE3_FUNC_RESET:
3698                 /* fall through */
3699         case HNAE3_FLR_RESET:
3700                 ret = hclge_set_all_vf_rst(hdev, false);
3701                 break;
3702         case HNAE3_GLOBAL_RESET:
3703                 /* fall through */
3704         case HNAE3_IMP_RESET:
3705                 ret = hclge_set_rst_done(hdev);
3706                 break;
3707         default:
3708                 break;
3709         }
3710
3711         /* clear up the handshake status after re-initialize done */
3712         hclge_reset_handshake(hdev, false);
3713
3714         return ret;
3715 }
3716
3717 static int hclge_reset_stack(struct hclge_dev *hdev)
3718 {
3719         int ret;
3720
3721         ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3722         if (ret)
3723                 return ret;
3724
3725         ret = hclge_reset_ae_dev(hdev->ae_dev);
3726         if (ret)
3727                 return ret;
3728
3729         ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3730         if (ret)
3731                 return ret;
3732
3733         return hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
3734 }
3735
3736 static int hclge_reset_prepare(struct hclge_dev *hdev)
3737 {
3738         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3739         int ret;
3740
3741         /* Initialize ae_dev reset status as well, in case enet layer wants to
3742          * know if device is undergoing reset
3743          */
3744         ae_dev->reset_type = hdev->reset_type;
3745         hdev->rst_stats.reset_cnt++;
3746         /* perform reset of the stack & ae device for a client */
3747         ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3748         if (ret)
3749                 return ret;
3750
3751         rtnl_lock();
3752         ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3753         rtnl_unlock();
3754         if (ret)
3755                 return ret;
3756
3757         return hclge_reset_prepare_wait(hdev);
3758 }
3759
3760 static int hclge_reset_rebuild(struct hclge_dev *hdev)
3761 {
3762         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3763         enum hnae3_reset_type reset_level;
3764         int ret;
3765
3766         hdev->rst_stats.hw_reset_done_cnt++;
3767
3768         ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3769         if (ret)
3770                 return ret;
3771
3772         rtnl_lock();
3773         ret = hclge_reset_stack(hdev);
3774         rtnl_unlock();
3775         if (ret)
3776                 return ret;
3777
3778         hclge_clear_reset_cause(hdev);
3779
3780         ret = hclge_reset_prepare_up(hdev);
3781         if (ret)
3782                 return ret;
3783
3784
3785         ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3786         /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
3787          * times
3788          */
3789         if (ret &&
3790             hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
3791                 return ret;
3792
3793         rtnl_lock();
3794         ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3795         rtnl_unlock();
3796         if (ret)
3797                 return ret;
3798
3799         ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3800         if (ret)
3801                 return ret;
3802
3803         hdev->last_reset_time = jiffies;
3804         hdev->rst_stats.reset_fail_cnt = 0;
3805         hdev->rst_stats.reset_done_cnt++;
3806         ae_dev->reset_type = HNAE3_NONE_RESET;
3807         clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3808
3809         /* if default_reset_request has a higher level reset request,
3810          * it should be handled as soon as possible. since some errors
3811          * need this kind of reset to fix.
3812          */
3813         reset_level = hclge_get_reset_level(ae_dev,
3814                                             &hdev->default_reset_request);
3815         if (reset_level != HNAE3_NONE_RESET)
3816                 set_bit(reset_level, &hdev->reset_request);
3817
3818         return 0;
3819 }
3820
3821 static void hclge_reset(struct hclge_dev *hdev)
3822 {
3823         if (hclge_reset_prepare(hdev))
3824                 goto err_reset;
3825
3826         if (hclge_reset_wait(hdev))
3827                 goto err_reset;
3828
3829         if (hclge_reset_rebuild(hdev))
3830                 goto err_reset;
3831
3832         return;
3833
3834 err_reset:
3835         if (hclge_reset_err_handle(hdev))
3836                 hclge_reset_task_schedule(hdev);
3837 }
3838
3839 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3840 {
3841         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3842         struct hclge_dev *hdev = ae_dev->priv;
3843
3844         /* We might end up getting called broadly because of 2 below cases:
3845          * 1. Recoverable error was conveyed through APEI and only way to bring
3846          *    normalcy is to reset.
3847          * 2. A new reset request from the stack due to timeout
3848          *
3849          * For the first case,error event might not have ae handle available.
3850          * check if this is a new reset request and we are not here just because
3851          * last reset attempt did not succeed and watchdog hit us again. We will
3852          * know this if last reset request did not occur very recently (watchdog
3853          * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3854          * In case of new request we reset the "reset level" to PF reset.
3855          * And if it is a repeat reset request of the most recent one then we
3856          * want to make sure we throttle the reset request. Therefore, we will
3857          * not allow it again before 3*HZ times.
3858          */
3859         if (!handle)
3860                 handle = &hdev->vport[0].nic;
3861
3862         if (time_before(jiffies, (hdev->last_reset_time +
3863                                   HCLGE_RESET_INTERVAL))) {
3864                 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
3865                 return;
3866         } else if (hdev->default_reset_request) {
3867                 hdev->reset_level =
3868                         hclge_get_reset_level(ae_dev,
3869                                               &hdev->default_reset_request);
3870         } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
3871                 hdev->reset_level = HNAE3_FUNC_RESET;
3872         }
3873
3874         dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
3875                  hdev->reset_level);
3876
3877         /* request reset & schedule reset task */
3878         set_bit(hdev->reset_level, &hdev->reset_request);
3879         hclge_reset_task_schedule(hdev);
3880
3881         if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3882                 hdev->reset_level++;
3883 }
3884
3885 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3886                                         enum hnae3_reset_type rst_type)
3887 {
3888         struct hclge_dev *hdev = ae_dev->priv;
3889
3890         set_bit(rst_type, &hdev->default_reset_request);
3891 }
3892
3893 static void hclge_reset_timer(struct timer_list *t)
3894 {
3895         struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3896
3897         /* if default_reset_request has no value, it means that this reset
3898          * request has already be handled, so just return here
3899          */
3900         if (!hdev->default_reset_request)
3901                 return;
3902
3903         dev_info(&hdev->pdev->dev,
3904                  "triggering reset in reset timer\n");
3905         hclge_reset_event(hdev->pdev, NULL);
3906 }
3907
3908 static void hclge_reset_subtask(struct hclge_dev *hdev)
3909 {
3910         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3911
3912         /* check if there is any ongoing reset in the hardware. This status can
3913          * be checked from reset_pending. If there is then, we need to wait for
3914          * hardware to complete reset.
3915          *    a. If we are able to figure out in reasonable time that hardware
3916          *       has fully resetted then, we can proceed with driver, client
3917          *       reset.
3918          *    b. else, we can come back later to check this status so re-sched
3919          *       now.
3920          */
3921         hdev->last_reset_time = jiffies;
3922         hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
3923         if (hdev->reset_type != HNAE3_NONE_RESET)
3924                 hclge_reset(hdev);
3925
3926         /* check if we got any *new* reset requests to be honored */
3927         hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
3928         if (hdev->reset_type != HNAE3_NONE_RESET)
3929                 hclge_do_reset(hdev);
3930
3931         hdev->reset_type = HNAE3_NONE_RESET;
3932 }
3933
3934 static void hclge_reset_service_task(struct hclge_dev *hdev)
3935 {
3936         if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
3937                 return;
3938
3939         down(&hdev->reset_sem);
3940         set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3941
3942         hclge_reset_subtask(hdev);
3943
3944         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3945         up(&hdev->reset_sem);
3946 }
3947
3948 static void hclge_update_vport_alive(struct hclge_dev *hdev)
3949 {
3950         int i;
3951
3952         /* start from vport 1 for PF is always alive */
3953         for (i = 1; i < hdev->num_alloc_vport; i++) {
3954                 struct hclge_vport *vport = &hdev->vport[i];
3955
3956                 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3957                         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
3958
3959                 /* If vf is not alive, set to default value */
3960                 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3961                         vport->mps = HCLGE_MAC_DEFAULT_FRAME;
3962         }
3963 }
3964
3965 static void hclge_periodic_service_task(struct hclge_dev *hdev)
3966 {
3967         unsigned long delta = round_jiffies_relative(HZ);
3968
3969         /* Always handle the link updating to make sure link state is
3970          * updated when it is triggered by mbx.
3971          */
3972         hclge_update_link_status(hdev);
3973
3974         if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
3975                 delta = jiffies - hdev->last_serv_processed;
3976
3977                 if (delta < round_jiffies_relative(HZ)) {
3978                         delta = round_jiffies_relative(HZ) - delta;
3979                         goto out;
3980                 }
3981         }
3982
3983         hdev->serv_processed_cnt++;
3984         hclge_update_vport_alive(hdev);
3985
3986         if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
3987                 hdev->last_serv_processed = jiffies;
3988                 goto out;
3989         }
3990
3991         if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
3992                 hclge_update_stats_for_all(hdev);
3993
3994         hclge_update_port_info(hdev);
3995         hclge_sync_vlan_filter(hdev);
3996
3997         if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
3998                 hclge_rfs_filter_expire(hdev);
3999
4000         hdev->last_serv_processed = jiffies;
4001
4002 out:
4003         hclge_task_schedule(hdev, delta);
4004 }
4005
4006 static void hclge_service_task(struct work_struct *work)
4007 {
4008         struct hclge_dev *hdev =
4009                 container_of(work, struct hclge_dev, service_task.work);
4010
4011         hclge_reset_service_task(hdev);
4012         hclge_mailbox_service_task(hdev);
4013         hclge_periodic_service_task(hdev);
4014
4015         /* Handle reset and mbx again in case periodical task delays the
4016          * handling by calling hclge_task_schedule() in
4017          * hclge_periodic_service_task().
4018          */
4019         hclge_reset_service_task(hdev);
4020         hclge_mailbox_service_task(hdev);
4021 }
4022
4023 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4024 {
4025         /* VF handle has no client */
4026         if (!handle->client)
4027                 return container_of(handle, struct hclge_vport, nic);
4028         else if (handle->client->type == HNAE3_CLIENT_ROCE)
4029                 return container_of(handle, struct hclge_vport, roce);
4030         else
4031                 return container_of(handle, struct hclge_vport, nic);
4032 }
4033
4034 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4035                             struct hnae3_vector_info *vector_info)
4036 {
4037         struct hclge_vport *vport = hclge_get_vport(handle);
4038         struct hnae3_vector_info *vector = vector_info;
4039         struct hclge_dev *hdev = vport->back;
4040         int alloc = 0;
4041         int i, j;
4042
4043         vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
4044         vector_num = min(hdev->num_msi_left, vector_num);
4045
4046         for (j = 0; j < vector_num; j++) {
4047                 for (i = 1; i < hdev->num_msi; i++) {
4048                         if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4049                                 vector->vector = pci_irq_vector(hdev->pdev, i);
4050                                 vector->io_addr = hdev->hw.io_base +
4051                                         HCLGE_VECTOR_REG_BASE +
4052                                         (i - 1) * HCLGE_VECTOR_REG_OFFSET +
4053                                         vport->vport_id *
4054                                         HCLGE_VECTOR_VF_OFFSET;
4055                                 hdev->vector_status[i] = vport->vport_id;
4056                                 hdev->vector_irq[i] = vector->vector;
4057
4058                                 vector++;
4059                                 alloc++;
4060
4061                                 break;
4062                         }
4063                 }
4064         }
4065         hdev->num_msi_left -= alloc;
4066         hdev->num_msi_used += alloc;
4067
4068         return alloc;
4069 }
4070
4071 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4072 {
4073         int i;
4074
4075         for (i = 0; i < hdev->num_msi; i++)
4076                 if (vector == hdev->vector_irq[i])
4077                         return i;
4078
4079         return -EINVAL;
4080 }
4081
4082 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4083 {
4084         struct hclge_vport *vport = hclge_get_vport(handle);
4085         struct hclge_dev *hdev = vport->back;
4086         int vector_id;
4087
4088         vector_id = hclge_get_vector_index(hdev, vector);
4089         if (vector_id < 0) {
4090                 dev_err(&hdev->pdev->dev,
4091                         "Get vector index fail. vector = %d\n", vector);
4092                 return vector_id;
4093         }
4094
4095         hclge_free_vector(hdev, vector_id);
4096
4097         return 0;
4098 }
4099
4100 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
4101 {
4102         return HCLGE_RSS_KEY_SIZE;
4103 }
4104
4105 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
4106 {
4107         return HCLGE_RSS_IND_TBL_SIZE;
4108 }
4109
4110 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
4111                                   const u8 hfunc, const u8 *key)
4112 {
4113         struct hclge_rss_config_cmd *req;
4114         unsigned int key_offset = 0;
4115         struct hclge_desc desc;
4116         int key_counts;
4117         int key_size;
4118         int ret;
4119
4120         key_counts = HCLGE_RSS_KEY_SIZE;
4121         req = (struct hclge_rss_config_cmd *)desc.data;
4122
4123         while (key_counts) {
4124                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4125                                            false);
4126
4127                 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4128                 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4129
4130                 key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
4131                 memcpy(req->hash_key,
4132                        key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4133
4134                 key_counts -= key_size;
4135                 key_offset++;
4136                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4137                 if (ret) {
4138                         dev_err(&hdev->pdev->dev,
4139                                 "Configure RSS config fail, status = %d\n",
4140                                 ret);
4141                         return ret;
4142                 }
4143         }
4144         return 0;
4145 }
4146
4147 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
4148 {
4149         struct hclge_rss_indirection_table_cmd *req;
4150         struct hclge_desc desc;
4151         int i, j;
4152         int ret;
4153
4154         req = (struct hclge_rss_indirection_table_cmd *)desc.data;
4155
4156         for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
4157                 hclge_cmd_setup_basic_desc
4158                         (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4159
4160                 req->start_table_index =
4161                         cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4162                 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
4163
4164                 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
4165                         req->rss_result[j] =
4166                                 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4167
4168                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4169                 if (ret) {
4170                         dev_err(&hdev->pdev->dev,
4171                                 "Configure rss indir table fail,status = %d\n",
4172                                 ret);
4173                         return ret;
4174                 }
4175         }
4176         return 0;
4177 }
4178
4179 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4180                                  u16 *tc_size, u16 *tc_offset)
4181 {
4182         struct hclge_rss_tc_mode_cmd *req;
4183         struct hclge_desc desc;
4184         int ret;
4185         int i;
4186
4187         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
4188         req = (struct hclge_rss_tc_mode_cmd *)desc.data;
4189
4190         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4191                 u16 mode = 0;
4192
4193                 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4194                 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4195                                 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4196                 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4197                                 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
4198
4199                 req->rss_tc_mode[i] = cpu_to_le16(mode);
4200         }
4201
4202         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4203         if (ret)
4204                 dev_err(&hdev->pdev->dev,
4205                         "Configure rss tc mode fail, status = %d\n", ret);
4206
4207         return ret;
4208 }
4209
4210 static void hclge_get_rss_type(struct hclge_vport *vport)
4211 {
4212         if (vport->rss_tuple_sets.ipv4_tcp_en ||
4213             vport->rss_tuple_sets.ipv4_udp_en ||
4214             vport->rss_tuple_sets.ipv4_sctp_en ||
4215             vport->rss_tuple_sets.ipv6_tcp_en ||
4216             vport->rss_tuple_sets.ipv6_udp_en ||
4217             vport->rss_tuple_sets.ipv6_sctp_en)
4218                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4219         else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4220                  vport->rss_tuple_sets.ipv6_fragment_en)
4221                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4222         else
4223                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4224 }
4225
4226 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4227 {
4228         struct hclge_rss_input_tuple_cmd *req;
4229         struct hclge_desc desc;
4230         int ret;
4231
4232         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4233
4234         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4235
4236         /* Get the tuple cfg from pf */
4237         req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4238         req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4239         req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4240         req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4241         req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4242         req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4243         req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4244         req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4245         hclge_get_rss_type(&hdev->vport[0]);
4246         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4247         if (ret)
4248                 dev_err(&hdev->pdev->dev,
4249                         "Configure rss input fail, status = %d\n", ret);
4250         return ret;
4251 }
4252
4253 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4254                          u8 *key, u8 *hfunc)
4255 {
4256         struct hclge_vport *vport = hclge_get_vport(handle);
4257         int i;
4258
4259         /* Get hash algorithm */
4260         if (hfunc) {
4261                 switch (vport->rss_algo) {
4262                 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4263                         *hfunc = ETH_RSS_HASH_TOP;
4264                         break;
4265                 case HCLGE_RSS_HASH_ALGO_SIMPLE:
4266                         *hfunc = ETH_RSS_HASH_XOR;
4267                         break;
4268                 default:
4269                         *hfunc = ETH_RSS_HASH_UNKNOWN;
4270                         break;
4271                 }
4272         }
4273
4274         /* Get the RSS Key required by the user */
4275         if (key)
4276                 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4277
4278         /* Get indirect table */
4279         if (indir)
4280                 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4281                         indir[i] =  vport->rss_indirection_tbl[i];
4282
4283         return 0;
4284 }
4285
4286 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4287                          const  u8 *key, const  u8 hfunc)
4288 {
4289         struct hclge_vport *vport = hclge_get_vport(handle);
4290         struct hclge_dev *hdev = vport->back;
4291         u8 hash_algo;
4292         int ret, i;
4293
4294         /* Set the RSS Hash Key if specififed by the user */
4295         if (key) {
4296                 switch (hfunc) {
4297                 case ETH_RSS_HASH_TOP:
4298                         hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4299                         break;
4300                 case ETH_RSS_HASH_XOR:
4301                         hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4302                         break;
4303                 case ETH_RSS_HASH_NO_CHANGE:
4304                         hash_algo = vport->rss_algo;
4305                         break;
4306                 default:
4307                         return -EINVAL;
4308                 }
4309
4310                 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4311                 if (ret)
4312                         return ret;
4313
4314                 /* Update the shadow RSS key with user specified qids */
4315                 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4316                 vport->rss_algo = hash_algo;
4317         }
4318
4319         /* Update the shadow RSS table with user specified qids */
4320         for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4321                 vport->rss_indirection_tbl[i] = indir[i];
4322
4323         /* Update the hardware */
4324         return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4325 }
4326
4327 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4328 {
4329         u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4330
4331         if (nfc->data & RXH_L4_B_2_3)
4332                 hash_sets |= HCLGE_D_PORT_BIT;
4333         else
4334                 hash_sets &= ~HCLGE_D_PORT_BIT;
4335
4336         if (nfc->data & RXH_IP_SRC)
4337                 hash_sets |= HCLGE_S_IP_BIT;
4338         else
4339                 hash_sets &= ~HCLGE_S_IP_BIT;
4340
4341         if (nfc->data & RXH_IP_DST)
4342                 hash_sets |= HCLGE_D_IP_BIT;
4343         else
4344                 hash_sets &= ~HCLGE_D_IP_BIT;
4345
4346         if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4347                 hash_sets |= HCLGE_V_TAG_BIT;
4348
4349         return hash_sets;
4350 }
4351
4352 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4353                                struct ethtool_rxnfc *nfc)
4354 {
4355         struct hclge_vport *vport = hclge_get_vport(handle);
4356         struct hclge_dev *hdev = vport->back;
4357         struct hclge_rss_input_tuple_cmd *req;
4358         struct hclge_desc desc;
4359         u8 tuple_sets;
4360         int ret;
4361
4362         if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4363                           RXH_L4_B_0_1 | RXH_L4_B_2_3))
4364                 return -EINVAL;
4365
4366         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4367         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4368
4369         req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4370         req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4371         req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4372         req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4373         req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4374         req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4375         req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4376         req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4377
4378         tuple_sets = hclge_get_rss_hash_bits(nfc);
4379         switch (nfc->flow_type) {
4380         case TCP_V4_FLOW:
4381                 req->ipv4_tcp_en = tuple_sets;
4382                 break;
4383         case TCP_V6_FLOW:
4384                 req->ipv6_tcp_en = tuple_sets;
4385                 break;
4386         case UDP_V4_FLOW:
4387                 req->ipv4_udp_en = tuple_sets;
4388                 break;
4389         case UDP_V6_FLOW:
4390                 req->ipv6_udp_en = tuple_sets;
4391                 break;
4392         case SCTP_V4_FLOW:
4393                 req->ipv4_sctp_en = tuple_sets;
4394                 break;
4395         case SCTP_V6_FLOW:
4396                 if ((nfc->data & RXH_L4_B_0_1) ||
4397                     (nfc->data & RXH_L4_B_2_3))
4398                         return -EINVAL;
4399
4400                 req->ipv6_sctp_en = tuple_sets;
4401                 break;
4402         case IPV4_FLOW:
4403                 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4404                 break;
4405         case IPV6_FLOW:
4406                 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4407                 break;
4408         default:
4409                 return -EINVAL;
4410         }
4411
4412         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4413         if (ret) {
4414                 dev_err(&hdev->pdev->dev,
4415                         "Set rss tuple fail, status = %d\n", ret);
4416                 return ret;
4417         }
4418
4419         vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4420         vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4421         vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4422         vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4423         vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4424         vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4425         vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4426         vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4427         hclge_get_rss_type(vport);
4428         return 0;
4429 }
4430
4431 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4432                                struct ethtool_rxnfc *nfc)
4433 {
4434         struct hclge_vport *vport = hclge_get_vport(handle);
4435         u8 tuple_sets;
4436
4437         nfc->data = 0;
4438
4439         switch (nfc->flow_type) {
4440         case TCP_V4_FLOW:
4441                 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4442                 break;
4443         case UDP_V4_FLOW:
4444                 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4445                 break;
4446         case TCP_V6_FLOW:
4447                 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4448                 break;
4449         case UDP_V6_FLOW:
4450                 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4451                 break;
4452         case SCTP_V4_FLOW:
4453                 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4454                 break;
4455         case SCTP_V6_FLOW:
4456                 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4457                 break;
4458         case IPV4_FLOW:
4459         case IPV6_FLOW:
4460                 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4461                 break;
4462         default:
4463                 return -EINVAL;
4464         }
4465
4466         if (!tuple_sets)
4467                 return 0;
4468
4469         if (tuple_sets & HCLGE_D_PORT_BIT)
4470                 nfc->data |= RXH_L4_B_2_3;
4471         if (tuple_sets & HCLGE_S_PORT_BIT)
4472                 nfc->data |= RXH_L4_B_0_1;
4473         if (tuple_sets & HCLGE_D_IP_BIT)
4474                 nfc->data |= RXH_IP_DST;
4475         if (tuple_sets & HCLGE_S_IP_BIT)
4476                 nfc->data |= RXH_IP_SRC;
4477
4478         return 0;
4479 }
4480
4481 static int hclge_get_tc_size(struct hnae3_handle *handle)
4482 {
4483         struct hclge_vport *vport = hclge_get_vport(handle);
4484         struct hclge_dev *hdev = vport->back;
4485
4486         return hdev->rss_size_max;
4487 }
4488
4489 int hclge_rss_init_hw(struct hclge_dev *hdev)
4490 {
4491         struct hclge_vport *vport = hdev->vport;
4492         u8 *rss_indir = vport[0].rss_indirection_tbl;
4493         u16 rss_size = vport[0].alloc_rss_size;
4494         u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4495         u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4496         u8 *key = vport[0].rss_hash_key;
4497         u8 hfunc = vport[0].rss_algo;
4498         u16 tc_valid[HCLGE_MAX_TC_NUM];
4499         u16 roundup_size;
4500         unsigned int i;
4501         int ret;
4502
4503         ret = hclge_set_rss_indir_table(hdev, rss_indir);
4504         if (ret)
4505                 return ret;
4506
4507         ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4508         if (ret)
4509                 return ret;
4510
4511         ret = hclge_set_rss_input_tuple(hdev);
4512         if (ret)
4513                 return ret;
4514
4515         /* Each TC have the same queue size, and tc_size set to hardware is
4516          * the log2 of roundup power of two of rss_size, the acutal queue
4517          * size is limited by indirection table.
4518          */
4519         if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
4520                 dev_err(&hdev->pdev->dev,
4521                         "Configure rss tc size failed, invalid TC_SIZE = %u\n",
4522                         rss_size);
4523                 return -EINVAL;
4524         }
4525
4526         roundup_size = roundup_pow_of_two(rss_size);
4527         roundup_size = ilog2(roundup_size);
4528
4529         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4530                 tc_valid[i] = 0;
4531
4532                 if (!(hdev->hw_tc_map & BIT(i)))
4533                         continue;
4534
4535                 tc_valid[i] = 1;
4536                 tc_size[i] = roundup_size;
4537                 tc_offset[i] = rss_size * i;
4538         }
4539
4540         return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4541 }
4542
4543 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4544 {
4545         struct hclge_vport *vport = hdev->vport;
4546         int i, j;
4547
4548         for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4549                 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4550                         vport[j].rss_indirection_tbl[i] =
4551                                 i % vport[j].alloc_rss_size;
4552         }
4553 }
4554
4555 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
4556 {
4557         int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4558         struct hclge_vport *vport = hdev->vport;
4559
4560         if (hdev->pdev->revision >= 0x21)
4561                 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4562
4563         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4564                 vport[i].rss_tuple_sets.ipv4_tcp_en =
4565                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4566                 vport[i].rss_tuple_sets.ipv4_udp_en =
4567                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4568                 vport[i].rss_tuple_sets.ipv4_sctp_en =
4569                         HCLGE_RSS_INPUT_TUPLE_SCTP;
4570                 vport[i].rss_tuple_sets.ipv4_fragment_en =
4571                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4572                 vport[i].rss_tuple_sets.ipv6_tcp_en =
4573                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4574                 vport[i].rss_tuple_sets.ipv6_udp_en =
4575                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4576                 vport[i].rss_tuple_sets.ipv6_sctp_en =
4577                         HCLGE_RSS_INPUT_TUPLE_SCTP;
4578                 vport[i].rss_tuple_sets.ipv6_fragment_en =
4579                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4580
4581                 vport[i].rss_algo = rss_algo;
4582
4583                 memcpy(vport[i].rss_hash_key, hclge_hash_key,
4584                        HCLGE_RSS_KEY_SIZE);
4585         }
4586
4587         hclge_rss_indir_init_cfg(hdev);
4588 }
4589
4590 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4591                                 int vector_id, bool en,
4592                                 struct hnae3_ring_chain_node *ring_chain)
4593 {
4594         struct hclge_dev *hdev = vport->back;
4595         struct hnae3_ring_chain_node *node;
4596         struct hclge_desc desc;
4597         struct hclge_ctrl_vector_chain_cmd *req =
4598                 (struct hclge_ctrl_vector_chain_cmd *)desc.data;
4599         enum hclge_cmd_status status;
4600         enum hclge_opcode_type op;
4601         u16 tqp_type_and_id;
4602         int i;
4603
4604         op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4605         hclge_cmd_setup_basic_desc(&desc, op, false);
4606         req->int_vector_id = vector_id;
4607
4608         i = 0;
4609         for (node = ring_chain; node; node = node->next) {
4610                 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4611                 hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
4612                                 HCLGE_INT_TYPE_S,
4613                                 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4614                 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4615                                 HCLGE_TQP_ID_S, node->tqp_index);
4616                 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4617                                 HCLGE_INT_GL_IDX_S,
4618                                 hnae3_get_field(node->int_gl_idx,
4619                                                 HNAE3_RING_GL_IDX_M,
4620                                                 HNAE3_RING_GL_IDX_S));
4621                 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4622                 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4623                         req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4624                         req->vfid = vport->vport_id;
4625
4626                         status = hclge_cmd_send(&hdev->hw, &desc, 1);
4627                         if (status) {
4628                                 dev_err(&hdev->pdev->dev,
4629                                         "Map TQP fail, status is %d.\n",
4630                                         status);
4631                                 return -EIO;
4632                         }
4633                         i = 0;
4634
4635                         hclge_cmd_setup_basic_desc(&desc,
4636                                                    op,
4637                                                    false);
4638                         req->int_vector_id = vector_id;
4639                 }
4640         }
4641
4642         if (i > 0) {
4643                 req->int_cause_num = i;
4644                 req->vfid = vport->vport_id;
4645                 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4646                 if (status) {
4647                         dev_err(&hdev->pdev->dev,
4648                                 "Map TQP fail, status is %d.\n", status);
4649                         return -EIO;
4650                 }
4651         }
4652
4653         return 0;
4654 }
4655
4656 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
4657                                     struct hnae3_ring_chain_node *ring_chain)
4658 {
4659         struct hclge_vport *vport = hclge_get_vport(handle);
4660         struct hclge_dev *hdev = vport->back;
4661         int vector_id;
4662
4663         vector_id = hclge_get_vector_index(hdev, vector);
4664         if (vector_id < 0) {
4665                 dev_err(&hdev->pdev->dev,
4666                         "failed to get vector index. vector=%d\n", vector);
4667                 return vector_id;
4668         }
4669
4670         return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4671 }
4672
4673 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
4674                                        struct hnae3_ring_chain_node *ring_chain)
4675 {
4676         struct hclge_vport *vport = hclge_get_vport(handle);
4677         struct hclge_dev *hdev = vport->back;
4678         int vector_id, ret;
4679
4680         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4681                 return 0;
4682
4683         vector_id = hclge_get_vector_index(hdev, vector);
4684         if (vector_id < 0) {
4685                 dev_err(&handle->pdev->dev,
4686                         "Get vector index fail. ret =%d\n", vector_id);
4687                 return vector_id;
4688         }
4689
4690         ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4691         if (ret)
4692                 dev_err(&handle->pdev->dev,
4693                         "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4694                         vector_id, ret);
4695
4696         return ret;
4697 }
4698
4699 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
4700                                       struct hclge_promisc_param *param)
4701 {
4702         struct hclge_promisc_cfg_cmd *req;
4703         struct hclge_desc desc;
4704         int ret;
4705
4706         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4707
4708         req = (struct hclge_promisc_cfg_cmd *)desc.data;
4709         req->vf_id = param->vf_id;
4710
4711         /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
4712          * pdev revision(0x20), new revision support them. The
4713          * value of this two fields will not return error when driver
4714          * send command to fireware in revision(0x20).
4715          */
4716         req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
4717                 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
4718
4719         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4720         if (ret)
4721                 dev_err(&hdev->pdev->dev,
4722                         "Set promisc mode fail, status is %d.\n", ret);
4723
4724         return ret;
4725 }
4726
4727 static void hclge_promisc_param_init(struct hclge_promisc_param *param,
4728                                      bool en_uc, bool en_mc, bool en_bc,
4729                                      int vport_id)
4730 {
4731         if (!param)
4732                 return;
4733
4734         memset(param, 0, sizeof(struct hclge_promisc_param));
4735         if (en_uc)
4736                 param->enable = HCLGE_PROMISC_EN_UC;
4737         if (en_mc)
4738                 param->enable |= HCLGE_PROMISC_EN_MC;
4739         if (en_bc)
4740                 param->enable |= HCLGE_PROMISC_EN_BC;
4741         param->vf_id = vport_id;
4742 }
4743
4744 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
4745                                  bool en_mc_pmc, bool en_bc_pmc)
4746 {
4747         struct hclge_dev *hdev = vport->back;
4748         struct hclge_promisc_param param;
4749
4750         hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, en_bc_pmc,
4751                                  vport->vport_id);
4752         return hclge_cmd_set_promisc_mode(hdev, &param);
4753 }
4754
4755 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4756                                   bool en_mc_pmc)
4757 {
4758         struct hclge_vport *vport = hclge_get_vport(handle);
4759         bool en_bc_pmc = true;
4760
4761         /* For revision 0x20, if broadcast promisc enabled, vlan filter is
4762          * always bypassed. So broadcast promisc should be disabled until
4763          * user enable promisc mode
4764          */
4765         if (handle->pdev->revision == 0x20)
4766                 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4767
4768         return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
4769                                             en_bc_pmc);
4770 }
4771
4772 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4773 {
4774         struct hclge_get_fd_mode_cmd *req;
4775         struct hclge_desc desc;
4776         int ret;
4777
4778         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4779
4780         req = (struct hclge_get_fd_mode_cmd *)desc.data;
4781
4782         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4783         if (ret) {
4784                 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4785                 return ret;
4786         }
4787
4788         *fd_mode = req->mode;
4789
4790         return ret;
4791 }
4792
4793 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4794                                    u32 *stage1_entry_num,
4795                                    u32 *stage2_entry_num,
4796                                    u16 *stage1_counter_num,
4797                                    u16 *stage2_counter_num)
4798 {
4799         struct hclge_get_fd_allocation_cmd *req;
4800         struct hclge_desc desc;
4801         int ret;
4802
4803         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4804
4805         req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4806
4807         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4808         if (ret) {
4809                 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4810                         ret);
4811                 return ret;
4812         }
4813
4814         *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4815         *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4816         *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4817         *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4818
4819         return ret;
4820 }
4821
4822 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
4823 {
4824         struct hclge_set_fd_key_config_cmd *req;
4825         struct hclge_fd_key_cfg *stage;
4826         struct hclge_desc desc;
4827         int ret;
4828
4829         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4830
4831         req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4832         stage = &hdev->fd_cfg.key_cfg[stage_num];
4833         req->stage = stage_num;
4834         req->key_select = stage->key_sel;
4835         req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4836         req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4837         req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4838         req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4839         req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4840         req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4841
4842         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4843         if (ret)
4844                 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4845
4846         return ret;
4847 }
4848
4849 static int hclge_init_fd_config(struct hclge_dev *hdev)
4850 {
4851 #define LOW_2_WORDS             0x03
4852         struct hclge_fd_key_cfg *key_cfg;
4853         int ret;
4854
4855         if (!hnae3_dev_fd_supported(hdev))
4856                 return 0;
4857
4858         ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4859         if (ret)
4860                 return ret;
4861
4862         switch (hdev->fd_cfg.fd_mode) {
4863         case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4864                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4865                 break;
4866         case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4867                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4868                 break;
4869         default:
4870                 dev_err(&hdev->pdev->dev,
4871                         "Unsupported flow director mode %u\n",
4872                         hdev->fd_cfg.fd_mode);
4873                 return -EOPNOTSUPP;
4874         }
4875
4876         hdev->fd_cfg.proto_support =
4877                 TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
4878                 UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
4879         key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4880         key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4881         key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4882         key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4883         key_cfg->outer_sipv6_word_en = 0;
4884         key_cfg->outer_dipv6_word_en = 0;
4885
4886         key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4887                                 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4888                                 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4889                                 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4890
4891         /* If use max 400bit key, we can support tuples for ether type */
4892         if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
4893                 hdev->fd_cfg.proto_support |= ETHER_FLOW;
4894                 key_cfg->tuple_active |=
4895                                 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
4896         }
4897
4898         /* roce_type is used to filter roce frames
4899          * dst_vport is used to specify the rule
4900          */
4901         key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4902
4903         ret = hclge_get_fd_allocation(hdev,
4904                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4905                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4906                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4907                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4908         if (ret)
4909                 return ret;
4910
4911         return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4912 }
4913
4914 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4915                                 int loc, u8 *key, bool is_add)
4916 {
4917         struct hclge_fd_tcam_config_1_cmd *req1;
4918         struct hclge_fd_tcam_config_2_cmd *req2;
4919         struct hclge_fd_tcam_config_3_cmd *req3;
4920         struct hclge_desc desc[3];
4921         int ret;
4922
4923         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4924         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4925         hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4926         desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4927         hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4928
4929         req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4930         req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4931         req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4932
4933         req1->stage = stage;
4934         req1->xy_sel = sel_x ? 1 : 0;
4935         hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4936         req1->index = cpu_to_le32(loc);
4937         req1->entry_vld = sel_x ? is_add : 0;
4938
4939         if (key) {
4940                 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4941                 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4942                        sizeof(req2->tcam_data));
4943                 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4944                        sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4945         }
4946
4947         ret = hclge_cmd_send(&hdev->hw, desc, 3);
4948         if (ret)
4949                 dev_err(&hdev->pdev->dev,
4950                         "config tcam key fail, ret=%d\n",
4951                         ret);
4952
4953         return ret;
4954 }
4955
4956 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4957                               struct hclge_fd_ad_data *action)
4958 {
4959         struct hclge_fd_ad_config_cmd *req;
4960         struct hclge_desc desc;
4961         u64 ad_data = 0;
4962         int ret;
4963
4964         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
4965
4966         req = (struct hclge_fd_ad_config_cmd *)desc.data;
4967         req->index = cpu_to_le32(loc);
4968         req->stage = stage;
4969
4970         hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
4971                       action->write_rule_id_to_bd);
4972         hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
4973                         action->rule_id);
4974         ad_data <<= 32;
4975         hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
4976         hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
4977                       action->forward_to_direct_queue);
4978         hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
4979                         action->queue_id);
4980         hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
4981         hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
4982                         HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
4983         hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
4984         hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
4985                         action->counter_id);
4986
4987         req->ad_data = cpu_to_le64(ad_data);
4988         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4989         if (ret)
4990                 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
4991
4992         return ret;
4993 }
4994
4995 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
4996                                    struct hclge_fd_rule *rule)
4997 {
4998         u16 tmp_x_s, tmp_y_s;
4999         u32 tmp_x_l, tmp_y_l;
5000         int i;
5001
5002         if (rule->unused_tuple & tuple_bit)
5003                 return true;
5004
5005         switch (tuple_bit) {
5006         case 0:
5007                 return false;
5008         case BIT(INNER_DST_MAC):
5009                 for (i = 0; i < ETH_ALEN; i++) {
5010                         calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5011                                rule->tuples_mask.dst_mac[i]);
5012                         calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5013                                rule->tuples_mask.dst_mac[i]);
5014                 }
5015
5016                 return true;
5017         case BIT(INNER_SRC_MAC):
5018                 for (i = 0; i < ETH_ALEN; i++) {
5019                         calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5020                                rule->tuples.src_mac[i]);
5021                         calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5022                                rule->tuples.src_mac[i]);
5023                 }
5024
5025                 return true;
5026         case BIT(INNER_VLAN_TAG_FST):
5027                 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
5028                        rule->tuples_mask.vlan_tag1);
5029                 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
5030                        rule->tuples_mask.vlan_tag1);
5031                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5032                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5033
5034                 return true;
5035         case BIT(INNER_ETH_TYPE):
5036                 calc_x(tmp_x_s, rule->tuples.ether_proto,
5037                        rule->tuples_mask.ether_proto);
5038                 calc_y(tmp_y_s, rule->tuples.ether_proto,
5039                        rule->tuples_mask.ether_proto);
5040                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5041                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5042
5043                 return true;
5044         case BIT(INNER_IP_TOS):
5045                 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5046                 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5047
5048                 return true;
5049         case BIT(INNER_IP_PROTO):
5050                 calc_x(*key_x, rule->tuples.ip_proto,
5051                        rule->tuples_mask.ip_proto);
5052                 calc_y(*key_y, rule->tuples.ip_proto,
5053                        rule->tuples_mask.ip_proto);
5054
5055                 return true;
5056         case BIT(INNER_SRC_IP):
5057                 calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
5058                        rule->tuples_mask.src_ip[IPV4_INDEX]);
5059                 calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
5060                        rule->tuples_mask.src_ip[IPV4_INDEX]);
5061                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5062                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5063
5064                 return true;
5065         case BIT(INNER_DST_IP):
5066                 calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
5067                        rule->tuples_mask.dst_ip[IPV4_INDEX]);
5068                 calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
5069                        rule->tuples_mask.dst_ip[IPV4_INDEX]);
5070                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5071                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5072
5073                 return true;
5074         case BIT(INNER_SRC_PORT):
5075                 calc_x(tmp_x_s, rule->tuples.src_port,
5076                        rule->tuples_mask.src_port);
5077                 calc_y(tmp_y_s, rule->tuples.src_port,
5078                        rule->tuples_mask.src_port);
5079                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5080                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5081
5082                 return true;
5083         case BIT(INNER_DST_PORT):
5084                 calc_x(tmp_x_s, rule->tuples.dst_port,
5085                        rule->tuples_mask.dst_port);
5086                 calc_y(tmp_y_s, rule->tuples.dst_port,
5087                        rule->tuples_mask.dst_port);
5088                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5089                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5090
5091                 return true;
5092         default:
5093                 return false;
5094         }
5095 }
5096
5097 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5098                                  u8 vf_id, u8 network_port_id)
5099 {
5100         u32 port_number = 0;
5101
5102         if (port_type == HOST_PORT) {
5103                 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5104                                 pf_id);
5105                 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5106                                 vf_id);
5107                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5108         } else {
5109                 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5110                                 HCLGE_NETWORK_PORT_ID_S, network_port_id);
5111                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5112         }
5113
5114         return port_number;
5115 }
5116
5117 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5118                                        __le32 *key_x, __le32 *key_y,
5119                                        struct hclge_fd_rule *rule)
5120 {
5121         u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5122         u8 cur_pos = 0, tuple_size, shift_bits;
5123         unsigned int i;
5124
5125         for (i = 0; i < MAX_META_DATA; i++) {
5126                 tuple_size = meta_data_key_info[i].key_length;
5127                 tuple_bit = key_cfg->meta_data_active & BIT(i);
5128
5129                 switch (tuple_bit) {
5130                 case BIT(ROCE_TYPE):
5131                         hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5132                         cur_pos += tuple_size;
5133                         break;
5134                 case BIT(DST_VPORT):
5135                         port_number = hclge_get_port_number(HOST_PORT, 0,
5136                                                             rule->vf_id, 0);
5137                         hnae3_set_field(meta_data,
5138                                         GENMASK(cur_pos + tuple_size, cur_pos),
5139                                         cur_pos, port_number);
5140                         cur_pos += tuple_size;
5141                         break;
5142                 default:
5143                         break;
5144                 }
5145         }
5146
5147         calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5148         calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5149         shift_bits = sizeof(meta_data) * 8 - cur_pos;
5150
5151         *key_x = cpu_to_le32(tmp_x << shift_bits);
5152         *key_y = cpu_to_le32(tmp_y << shift_bits);
5153 }
5154
5155 /* A complete key is combined with meta data key and tuple key.
5156  * Meta data key is stored at the MSB region, and tuple key is stored at
5157  * the LSB region, unused bits will be filled 0.
5158  */
5159 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5160                             struct hclge_fd_rule *rule)
5161 {
5162         struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5163         u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5164         u8 *cur_key_x, *cur_key_y;
5165         unsigned int i;
5166         int ret, tuple_size;
5167         u8 meta_data_region;
5168
5169         memset(key_x, 0, sizeof(key_x));
5170         memset(key_y, 0, sizeof(key_y));
5171         cur_key_x = key_x;
5172         cur_key_y = key_y;
5173
5174         for (i = 0 ; i < MAX_TUPLE; i++) {
5175                 bool tuple_valid;
5176                 u32 check_tuple;
5177
5178                 tuple_size = tuple_key_info[i].key_length / 8;
5179                 check_tuple = key_cfg->tuple_active & BIT(i);
5180
5181                 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
5182                                                      cur_key_y, rule);
5183                 if (tuple_valid) {
5184                         cur_key_x += tuple_size;
5185                         cur_key_y += tuple_size;
5186                 }
5187         }
5188
5189         meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5190                         MAX_META_DATA_LENGTH / 8;
5191
5192         hclge_fd_convert_meta_data(key_cfg,
5193                                    (__le32 *)(key_x + meta_data_region),
5194                                    (__le32 *)(key_y + meta_data_region),
5195                                    rule);
5196
5197         ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5198                                    true);
5199         if (ret) {
5200                 dev_err(&hdev->pdev->dev,
5201                         "fd key_y config fail, loc=%u, ret=%d\n",
5202                         rule->queue_id, ret);
5203                 return ret;
5204         }
5205
5206         ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5207                                    true);
5208         if (ret)
5209                 dev_err(&hdev->pdev->dev,
5210                         "fd key_x config fail, loc=%u, ret=%d\n",
5211                         rule->queue_id, ret);
5212         return ret;
5213 }
5214
5215 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5216                                struct hclge_fd_rule *rule)
5217 {
5218         struct hclge_fd_ad_data ad_data;
5219
5220         ad_data.ad_id = rule->location;
5221
5222         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5223                 ad_data.drop_packet = true;
5224                 ad_data.forward_to_direct_queue = false;
5225                 ad_data.queue_id = 0;
5226         } else {
5227                 ad_data.drop_packet = false;
5228                 ad_data.forward_to_direct_queue = true;
5229                 ad_data.queue_id = rule->queue_id;
5230         }
5231
5232         ad_data.use_counter = false;
5233         ad_data.counter_id = 0;
5234
5235         ad_data.use_next_stage = false;
5236         ad_data.next_input_key = 0;
5237
5238         ad_data.write_rule_id_to_bd = true;
5239         ad_data.rule_id = rule->location;
5240
5241         return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
5242 }
5243
5244 static int hclge_fd_check_spec(struct hclge_dev *hdev,
5245                                struct ethtool_rx_flow_spec *fs, u32 *unused)
5246 {
5247         struct ethtool_tcpip4_spec *tcp_ip4_spec;
5248         struct ethtool_usrip4_spec *usr_ip4_spec;
5249         struct ethtool_tcpip6_spec *tcp_ip6_spec;
5250         struct ethtool_usrip6_spec *usr_ip6_spec;
5251         struct ethhdr *ether_spec;
5252
5253         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5254                 return -EINVAL;
5255
5256         if (!(fs->flow_type & hdev->fd_cfg.proto_support))
5257                 return -EOPNOTSUPP;
5258
5259         if ((fs->flow_type & FLOW_EXT) &&
5260             (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
5261                 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
5262                 return -EOPNOTSUPP;
5263         }
5264
5265         switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5266         case SCTP_V4_FLOW:
5267         case TCP_V4_FLOW:
5268         case UDP_V4_FLOW:
5269                 tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
5270                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5271
5272                 if (!tcp_ip4_spec->ip4src)
5273                         *unused |= BIT(INNER_SRC_IP);
5274
5275                 if (!tcp_ip4_spec->ip4dst)
5276                         *unused |= BIT(INNER_DST_IP);
5277
5278                 if (!tcp_ip4_spec->psrc)
5279                         *unused |= BIT(INNER_SRC_PORT);
5280
5281                 if (!tcp_ip4_spec->pdst)
5282                         *unused |= BIT(INNER_DST_PORT);
5283
5284                 if (!tcp_ip4_spec->tos)
5285                         *unused |= BIT(INNER_IP_TOS);
5286
5287                 break;
5288         case IP_USER_FLOW:
5289                 usr_ip4_spec = &fs->h_u.usr_ip4_spec;
5290                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5291                         BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5292
5293                 if (!usr_ip4_spec->ip4src)
5294                         *unused |= BIT(INNER_SRC_IP);
5295
5296                 if (!usr_ip4_spec->ip4dst)
5297                         *unused |= BIT(INNER_DST_IP);
5298
5299                 if (!usr_ip4_spec->tos)
5300                         *unused |= BIT(INNER_IP_TOS);
5301
5302                 if (!usr_ip4_spec->proto)
5303                         *unused |= BIT(INNER_IP_PROTO);
5304
5305                 if (usr_ip4_spec->l4_4_bytes)
5306                         return -EOPNOTSUPP;
5307
5308                 if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
5309                         return -EOPNOTSUPP;
5310
5311                 break;
5312         case SCTP_V6_FLOW:
5313         case TCP_V6_FLOW:
5314         case UDP_V6_FLOW:
5315                 tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
5316                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5317                         BIT(INNER_IP_TOS);
5318
5319                 /* check whether src/dst ip address used */
5320                 if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
5321                     !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
5322                         *unused |= BIT(INNER_SRC_IP);
5323
5324                 if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
5325                     !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
5326                         *unused |= BIT(INNER_DST_IP);
5327
5328                 if (!tcp_ip6_spec->psrc)
5329                         *unused |= BIT(INNER_SRC_PORT);
5330
5331                 if (!tcp_ip6_spec->pdst)
5332                         *unused |= BIT(INNER_DST_PORT);
5333
5334                 if (tcp_ip6_spec->tclass)
5335                         return -EOPNOTSUPP;
5336
5337                 break;
5338         case IPV6_USER_FLOW:
5339                 usr_ip6_spec = &fs->h_u.usr_ip6_spec;
5340                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5341                         BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
5342                         BIT(INNER_DST_PORT);
5343
5344                 /* check whether src/dst ip address used */
5345                 if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
5346                     !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
5347                         *unused |= BIT(INNER_SRC_IP);
5348
5349                 if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
5350                     !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
5351                         *unused |= BIT(INNER_DST_IP);
5352
5353                 if (!usr_ip6_spec->l4_proto)
5354                         *unused |= BIT(INNER_IP_PROTO);
5355
5356                 if (usr_ip6_spec->tclass)
5357                         return -EOPNOTSUPP;
5358
5359                 if (usr_ip6_spec->l4_4_bytes)
5360                         return -EOPNOTSUPP;
5361
5362                 break;
5363         case ETHER_FLOW:
5364                 ether_spec = &fs->h_u.ether_spec;
5365                 *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5366                         BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
5367                         BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
5368
5369                 if (is_zero_ether_addr(ether_spec->h_source))
5370                         *unused |= BIT(INNER_SRC_MAC);
5371
5372                 if (is_zero_ether_addr(ether_spec->h_dest))
5373                         *unused |= BIT(INNER_DST_MAC);
5374
5375                 if (!ether_spec->h_proto)
5376                         *unused |= BIT(INNER_ETH_TYPE);
5377
5378                 break;
5379         default:
5380                 return -EOPNOTSUPP;
5381         }
5382
5383         if ((fs->flow_type & FLOW_EXT)) {
5384                 if (fs->h_ext.vlan_etype)
5385                         return -EOPNOTSUPP;
5386                 if (!fs->h_ext.vlan_tci)
5387                         *unused |= BIT(INNER_VLAN_TAG_FST);
5388
5389                 if (fs->m_ext.vlan_tci) {
5390                         if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
5391                                 return -EINVAL;
5392                 }
5393         } else {
5394                 *unused |= BIT(INNER_VLAN_TAG_FST);
5395         }
5396
5397         if (fs->flow_type & FLOW_MAC_EXT) {
5398                 if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
5399                         return -EOPNOTSUPP;
5400
5401                 if (is_zero_ether_addr(fs->h_ext.h_dest))
5402                         *unused |= BIT(INNER_DST_MAC);
5403                 else
5404                         *unused &= ~(BIT(INNER_DST_MAC));
5405         }
5406
5407         return 0;
5408 }
5409
5410 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
5411 {
5412         struct hclge_fd_rule *rule = NULL;
5413         struct hlist_node *node2;
5414
5415         spin_lock_bh(&hdev->fd_rule_lock);
5416         hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5417                 if (rule->location >= location)
5418                         break;
5419         }
5420
5421         spin_unlock_bh(&hdev->fd_rule_lock);
5422
5423         return  rule && rule->location == location;
5424 }
5425
5426 /* make sure being called after lock up with fd_rule_lock */
5427 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
5428                                      struct hclge_fd_rule *new_rule,
5429                                      u16 location,
5430                                      bool is_add)
5431 {
5432         struct hclge_fd_rule *rule = NULL, *parent = NULL;
5433         struct hlist_node *node2;
5434
5435         if (is_add && !new_rule)
5436                 return -EINVAL;
5437
5438         hlist_for_each_entry_safe(rule, node2,
5439                                   &hdev->fd_rule_list, rule_node) {
5440                 if (rule->location >= location)
5441                         break;
5442                 parent = rule;
5443         }
5444
5445         if (rule && rule->location == location) {
5446                 hlist_del(&rule->rule_node);
5447                 kfree(rule);
5448                 hdev->hclge_fd_rule_num--;
5449
5450                 if (!is_add) {
5451                         if (!hdev->hclge_fd_rule_num)
5452                                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5453                         clear_bit(location, hdev->fd_bmap);
5454
5455                         return 0;
5456                 }
5457         } else if (!is_add) {
5458                 dev_err(&hdev->pdev->dev,
5459                         "delete fail, rule %u is inexistent\n",
5460                         location);
5461                 return -EINVAL;
5462         }
5463
5464         INIT_HLIST_NODE(&new_rule->rule_node);
5465
5466         if (parent)
5467                 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
5468         else
5469                 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
5470
5471         set_bit(location, hdev->fd_bmap);
5472         hdev->hclge_fd_rule_num++;
5473         hdev->fd_active_type = new_rule->rule_type;
5474
5475         return 0;
5476 }
5477
5478 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
5479                               struct ethtool_rx_flow_spec *fs,
5480                               struct hclge_fd_rule *rule)
5481 {
5482         u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5483
5484         switch (flow_type) {
5485         case SCTP_V4_FLOW:
5486         case TCP_V4_FLOW:
5487         case UDP_V4_FLOW:
5488                 rule->tuples.src_ip[IPV4_INDEX] =
5489                                 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
5490                 rule->tuples_mask.src_ip[IPV4_INDEX] =
5491                                 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
5492
5493                 rule->tuples.dst_ip[IPV4_INDEX] =
5494                                 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
5495                 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5496                                 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5497
5498                 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5499                 rule->tuples_mask.src_port =
5500                                 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5501
5502                 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5503                 rule->tuples_mask.dst_port =
5504                                 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5505
5506                 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5507                 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5508
5509                 rule->tuples.ether_proto = ETH_P_IP;
5510                 rule->tuples_mask.ether_proto = 0xFFFF;
5511
5512                 break;
5513         case IP_USER_FLOW:
5514                 rule->tuples.src_ip[IPV4_INDEX] =
5515                                 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5516                 rule->tuples_mask.src_ip[IPV4_INDEX] =
5517                                 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5518
5519                 rule->tuples.dst_ip[IPV4_INDEX] =
5520                                 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5521                 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5522                                 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5523
5524                 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5525                 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5526
5527                 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5528                 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5529
5530                 rule->tuples.ether_proto = ETH_P_IP;
5531                 rule->tuples_mask.ether_proto = 0xFFFF;
5532
5533                 break;
5534         case SCTP_V6_FLOW:
5535         case TCP_V6_FLOW:
5536         case UDP_V6_FLOW:
5537                 be32_to_cpu_array(rule->tuples.src_ip,
5538                                   fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5539                 be32_to_cpu_array(rule->tuples_mask.src_ip,
5540                                   fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5541
5542                 be32_to_cpu_array(rule->tuples.dst_ip,
5543                                   fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5544                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5545                                   fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5546
5547                 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5548                 rule->tuples_mask.src_port =
5549                                 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5550
5551                 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5552                 rule->tuples_mask.dst_port =
5553                                 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5554
5555                 rule->tuples.ether_proto = ETH_P_IPV6;
5556                 rule->tuples_mask.ether_proto = 0xFFFF;
5557
5558                 break;
5559         case IPV6_USER_FLOW:
5560                 be32_to_cpu_array(rule->tuples.src_ip,
5561                                   fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5562                 be32_to_cpu_array(rule->tuples_mask.src_ip,
5563                                   fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5564
5565                 be32_to_cpu_array(rule->tuples.dst_ip,
5566                                   fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5567                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5568                                   fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5569
5570                 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5571                 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5572
5573                 rule->tuples.ether_proto = ETH_P_IPV6;
5574                 rule->tuples_mask.ether_proto = 0xFFFF;
5575
5576                 break;
5577         case ETHER_FLOW:
5578                 ether_addr_copy(rule->tuples.src_mac,
5579                                 fs->h_u.ether_spec.h_source);
5580                 ether_addr_copy(rule->tuples_mask.src_mac,
5581                                 fs->m_u.ether_spec.h_source);
5582
5583                 ether_addr_copy(rule->tuples.dst_mac,
5584                                 fs->h_u.ether_spec.h_dest);
5585                 ether_addr_copy(rule->tuples_mask.dst_mac,
5586                                 fs->m_u.ether_spec.h_dest);
5587
5588                 rule->tuples.ether_proto =
5589                                 be16_to_cpu(fs->h_u.ether_spec.h_proto);
5590                 rule->tuples_mask.ether_proto =
5591                                 be16_to_cpu(fs->m_u.ether_spec.h_proto);
5592
5593                 break;
5594         default:
5595                 return -EOPNOTSUPP;
5596         }
5597
5598         switch (flow_type) {
5599         case SCTP_V4_FLOW:
5600         case SCTP_V6_FLOW:
5601                 rule->tuples.ip_proto = IPPROTO_SCTP;
5602                 rule->tuples_mask.ip_proto = 0xFF;
5603                 break;
5604         case TCP_V4_FLOW:
5605         case TCP_V6_FLOW:
5606                 rule->tuples.ip_proto = IPPROTO_TCP;
5607                 rule->tuples_mask.ip_proto = 0xFF;
5608                 break;
5609         case UDP_V4_FLOW:
5610         case UDP_V6_FLOW:
5611                 rule->tuples.ip_proto = IPPROTO_UDP;
5612                 rule->tuples_mask.ip_proto = 0xFF;
5613                 break;
5614         default:
5615                 break;
5616         }
5617
5618         if ((fs->flow_type & FLOW_EXT)) {
5619                 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5620                 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5621         }
5622
5623         if (fs->flow_type & FLOW_MAC_EXT) {
5624                 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5625                 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5626         }
5627
5628         return 0;
5629 }
5630
5631 /* make sure being called after lock up with fd_rule_lock */
5632 static int hclge_fd_config_rule(struct hclge_dev *hdev,
5633                                 struct hclge_fd_rule *rule)
5634 {
5635         int ret;
5636
5637         if (!rule) {
5638                 dev_err(&hdev->pdev->dev,
5639                         "The flow director rule is NULL\n");
5640                 return -EINVAL;
5641         }
5642
5643         /* it will never fail here, so needn't to check return value */
5644         hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5645
5646         ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5647         if (ret)
5648                 goto clear_rule;
5649
5650         ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5651         if (ret)
5652                 goto clear_rule;
5653
5654         return 0;
5655
5656 clear_rule:
5657         hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5658         return ret;
5659 }
5660
5661 static int hclge_add_fd_entry(struct hnae3_handle *handle,
5662                               struct ethtool_rxnfc *cmd)
5663 {
5664         struct hclge_vport *vport = hclge_get_vport(handle);
5665         struct hclge_dev *hdev = vport->back;
5666         u16 dst_vport_id = 0, q_index = 0;
5667         struct ethtool_rx_flow_spec *fs;
5668         struct hclge_fd_rule *rule;
5669         u32 unused = 0;
5670         u8 action;
5671         int ret;
5672
5673         if (!hnae3_dev_fd_supported(hdev))
5674                 return -EOPNOTSUPP;
5675
5676         if (!hdev->fd_en) {
5677                 dev_warn(&hdev->pdev->dev,
5678                          "Please enable flow director first\n");
5679                 return -EOPNOTSUPP;
5680         }
5681
5682         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5683
5684         ret = hclge_fd_check_spec(hdev, fs, &unused);
5685         if (ret) {
5686                 dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
5687                 return ret;
5688         }
5689
5690         if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5691                 action = HCLGE_FD_ACTION_DROP_PACKET;
5692         } else {
5693                 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5694                 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5695                 u16 tqps;
5696
5697                 if (vf > hdev->num_req_vfs) {
5698                         dev_err(&hdev->pdev->dev,
5699                                 "Error: vf id (%u) > max vf num (%u)\n",
5700                                 vf, hdev->num_req_vfs);
5701                         return -EINVAL;
5702                 }
5703
5704                 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
5705                 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
5706
5707                 if (ring >= tqps) {
5708                         dev_err(&hdev->pdev->dev,
5709                                 "Error: queue id (%u) > max tqp num (%u)\n",
5710                                 ring, tqps - 1);
5711                         return -EINVAL;
5712                 }
5713
5714                 action = HCLGE_FD_ACTION_ACCEPT_PACKET;
5715                 q_index = ring;
5716         }
5717
5718         rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5719         if (!rule)
5720                 return -ENOMEM;
5721
5722         ret = hclge_fd_get_tuple(hdev, fs, rule);
5723         if (ret) {
5724                 kfree(rule);
5725                 return ret;
5726         }
5727
5728         rule->flow_type = fs->flow_type;
5729
5730         rule->location = fs->location;
5731         rule->unused_tuple = unused;
5732         rule->vf_id = dst_vport_id;
5733         rule->queue_id = q_index;
5734         rule->action = action;
5735         rule->rule_type = HCLGE_FD_EP_ACTIVE;
5736
5737         /* to avoid rule conflict, when user configure rule by ethtool,
5738          * we need to clear all arfs rules
5739          */
5740         hclge_clear_arfs_rules(handle);
5741
5742         spin_lock_bh(&hdev->fd_rule_lock);
5743         ret = hclge_fd_config_rule(hdev, rule);
5744
5745         spin_unlock_bh(&hdev->fd_rule_lock);
5746
5747         return ret;
5748 }
5749
5750 static int hclge_del_fd_entry(struct hnae3_handle *handle,
5751                               struct ethtool_rxnfc *cmd)
5752 {
5753         struct hclge_vport *vport = hclge_get_vport(handle);
5754         struct hclge_dev *hdev = vport->back;
5755         struct ethtool_rx_flow_spec *fs;
5756         int ret;
5757
5758         if (!hnae3_dev_fd_supported(hdev))
5759                 return -EOPNOTSUPP;
5760
5761         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5762
5763         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5764                 return -EINVAL;
5765
5766         if (!hclge_fd_rule_exist(hdev, fs->location)) {
5767                 dev_err(&hdev->pdev->dev,
5768                         "Delete fail, rule %u is inexistent\n", fs->location);
5769                 return -ENOENT;
5770         }
5771
5772         ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
5773                                    NULL, false);
5774         if (ret)
5775                 return ret;
5776
5777         spin_lock_bh(&hdev->fd_rule_lock);
5778         ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
5779
5780         spin_unlock_bh(&hdev->fd_rule_lock);
5781
5782         return ret;
5783 }
5784
5785 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
5786                                      bool clear_list)
5787 {
5788         struct hclge_vport *vport = hclge_get_vport(handle);
5789         struct hclge_dev *hdev = vport->back;
5790         struct hclge_fd_rule *rule;
5791         struct hlist_node *node;
5792         u16 location;
5793
5794         if (!hnae3_dev_fd_supported(hdev))
5795                 return;
5796
5797         spin_lock_bh(&hdev->fd_rule_lock);
5798         for_each_set_bit(location, hdev->fd_bmap,
5799                          hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5800                 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
5801                                      NULL, false);
5802
5803         if (clear_list) {
5804                 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
5805                                           rule_node) {
5806                         hlist_del(&rule->rule_node);
5807                         kfree(rule);
5808                 }
5809                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5810                 hdev->hclge_fd_rule_num = 0;
5811                 bitmap_zero(hdev->fd_bmap,
5812                             hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
5813         }
5814
5815         spin_unlock_bh(&hdev->fd_rule_lock);
5816 }
5817
5818 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
5819 {
5820         struct hclge_vport *vport = hclge_get_vport(handle);
5821         struct hclge_dev *hdev = vport->back;
5822         struct hclge_fd_rule *rule;
5823         struct hlist_node *node;
5824         int ret;
5825
5826         /* Return ok here, because reset error handling will check this
5827          * return value. If error is returned here, the reset process will
5828          * fail.
5829          */
5830         if (!hnae3_dev_fd_supported(hdev))
5831                 return 0;
5832
5833         /* if fd is disabled, should not restore it when reset */
5834         if (!hdev->fd_en)
5835                 return 0;
5836
5837         spin_lock_bh(&hdev->fd_rule_lock);
5838         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5839                 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5840                 if (!ret)
5841                         ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5842
5843                 if (ret) {
5844                         dev_warn(&hdev->pdev->dev,
5845                                  "Restore rule %u failed, remove it\n",
5846                                  rule->location);
5847                         clear_bit(rule->location, hdev->fd_bmap);
5848                         hlist_del(&rule->rule_node);
5849                         kfree(rule);
5850                         hdev->hclge_fd_rule_num--;
5851                 }
5852         }
5853
5854         if (hdev->hclge_fd_rule_num)
5855                 hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
5856
5857         spin_unlock_bh(&hdev->fd_rule_lock);
5858
5859         return 0;
5860 }
5861
5862 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
5863                                  struct ethtool_rxnfc *cmd)
5864 {
5865         struct hclge_vport *vport = hclge_get_vport(handle);
5866         struct hclge_dev *hdev = vport->back;
5867
5868         if (!hnae3_dev_fd_supported(hdev))
5869                 return -EOPNOTSUPP;
5870
5871         cmd->rule_cnt = hdev->hclge_fd_rule_num;
5872         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5873
5874         return 0;
5875 }
5876
5877 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
5878                                   struct ethtool_rxnfc *cmd)
5879 {
5880         struct hclge_vport *vport = hclge_get_vport(handle);
5881         struct hclge_fd_rule *rule = NULL;
5882         struct hclge_dev *hdev = vport->back;
5883         struct ethtool_rx_flow_spec *fs;
5884         struct hlist_node *node2;
5885
5886         if (!hnae3_dev_fd_supported(hdev))
5887                 return -EOPNOTSUPP;
5888
5889         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5890
5891         spin_lock_bh(&hdev->fd_rule_lock);
5892
5893         hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5894                 if (rule->location >= fs->location)
5895                         break;
5896         }
5897
5898         if (!rule || fs->location != rule->location) {
5899                 spin_unlock_bh(&hdev->fd_rule_lock);
5900
5901                 return -ENOENT;
5902         }
5903
5904         fs->flow_type = rule->flow_type;
5905         switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5906         case SCTP_V4_FLOW:
5907         case TCP_V4_FLOW:
5908         case UDP_V4_FLOW:
5909                 fs->h_u.tcp_ip4_spec.ip4src =
5910                                 cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5911                 fs->m_u.tcp_ip4_spec.ip4src =
5912                         rule->unused_tuple & BIT(INNER_SRC_IP) ?
5913                         0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5914
5915                 fs->h_u.tcp_ip4_spec.ip4dst =
5916                                 cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5917                 fs->m_u.tcp_ip4_spec.ip4dst =
5918                         rule->unused_tuple & BIT(INNER_DST_IP) ?
5919                         0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5920
5921                 fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5922                 fs->m_u.tcp_ip4_spec.psrc =
5923                                 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5924                                 0 : cpu_to_be16(rule->tuples_mask.src_port);
5925
5926                 fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5927                 fs->m_u.tcp_ip4_spec.pdst =
5928                                 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5929                                 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5930
5931                 fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
5932                 fs->m_u.tcp_ip4_spec.tos =
5933                                 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5934                                 0 : rule->tuples_mask.ip_tos;
5935
5936                 break;
5937         case IP_USER_FLOW:
5938                 fs->h_u.usr_ip4_spec.ip4src =
5939                                 cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5940                 fs->m_u.tcp_ip4_spec.ip4src =
5941                         rule->unused_tuple & BIT(INNER_SRC_IP) ?
5942                         0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5943
5944                 fs->h_u.usr_ip4_spec.ip4dst =
5945                                 cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5946                 fs->m_u.usr_ip4_spec.ip4dst =
5947                         rule->unused_tuple & BIT(INNER_DST_IP) ?
5948                         0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5949
5950                 fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
5951                 fs->m_u.usr_ip4_spec.tos =
5952                                 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5953                                 0 : rule->tuples_mask.ip_tos;
5954
5955                 fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
5956                 fs->m_u.usr_ip4_spec.proto =
5957                                 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5958                                 0 : rule->tuples_mask.ip_proto;
5959
5960                 fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
5961
5962                 break;
5963         case SCTP_V6_FLOW:
5964         case TCP_V6_FLOW:
5965         case UDP_V6_FLOW:
5966                 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
5967                                   rule->tuples.src_ip, IPV6_SIZE);
5968                 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5969                         memset(fs->m_u.tcp_ip6_spec.ip6src, 0,
5970                                sizeof(int) * IPV6_SIZE);
5971                 else
5972                         cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
5973                                           rule->tuples_mask.src_ip, IPV6_SIZE);
5974
5975                 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
5976                                   rule->tuples.dst_ip, IPV6_SIZE);
5977                 if (rule->unused_tuple & BIT(INNER_DST_IP))
5978                         memset(fs->m_u.tcp_ip6_spec.ip6dst, 0,
5979                                sizeof(int) * IPV6_SIZE);
5980                 else
5981                         cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
5982                                           rule->tuples_mask.dst_ip, IPV6_SIZE);
5983
5984                 fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5985                 fs->m_u.tcp_ip6_spec.psrc =
5986                                 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5987                                 0 : cpu_to_be16(rule->tuples_mask.src_port);
5988
5989                 fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5990                 fs->m_u.tcp_ip6_spec.pdst =
5991                                 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5992                                 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5993
5994                 break;
5995         case IPV6_USER_FLOW:
5996                 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
5997                                   rule->tuples.src_ip, IPV6_SIZE);
5998                 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5999                         memset(fs->m_u.usr_ip6_spec.ip6src, 0,
6000                                sizeof(int) * IPV6_SIZE);
6001                 else
6002                         cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
6003                                           rule->tuples_mask.src_ip, IPV6_SIZE);
6004
6005                 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
6006                                   rule->tuples.dst_ip, IPV6_SIZE);
6007                 if (rule->unused_tuple & BIT(INNER_DST_IP))
6008                         memset(fs->m_u.usr_ip6_spec.ip6dst, 0,
6009                                sizeof(int) * IPV6_SIZE);
6010                 else
6011                         cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
6012                                           rule->tuples_mask.dst_ip, IPV6_SIZE);
6013
6014                 fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
6015                 fs->m_u.usr_ip6_spec.l4_proto =
6016                                 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6017                                 0 : rule->tuples_mask.ip_proto;
6018
6019                 break;
6020         case ETHER_FLOW:
6021                 ether_addr_copy(fs->h_u.ether_spec.h_source,
6022                                 rule->tuples.src_mac);
6023                 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6024                         eth_zero_addr(fs->m_u.ether_spec.h_source);
6025                 else
6026                         ether_addr_copy(fs->m_u.ether_spec.h_source,
6027                                         rule->tuples_mask.src_mac);
6028
6029                 ether_addr_copy(fs->h_u.ether_spec.h_dest,
6030                                 rule->tuples.dst_mac);
6031                 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6032                         eth_zero_addr(fs->m_u.ether_spec.h_dest);
6033                 else
6034                         ether_addr_copy(fs->m_u.ether_spec.h_dest,
6035                                         rule->tuples_mask.dst_mac);
6036
6037                 fs->h_u.ether_spec.h_proto =
6038                                 cpu_to_be16(rule->tuples.ether_proto);
6039                 fs->m_u.ether_spec.h_proto =
6040                                 rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6041                                 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6042
6043                 break;
6044         default:
6045                 spin_unlock_bh(&hdev->fd_rule_lock);
6046                 return -EOPNOTSUPP;
6047         }
6048
6049         if (fs->flow_type & FLOW_EXT) {
6050                 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
6051                 fs->m_ext.vlan_tci =
6052                                 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
6053                                 cpu_to_be16(VLAN_VID_MASK) :
6054                                 cpu_to_be16(rule->tuples_mask.vlan_tag1);
6055         }
6056
6057         if (fs->flow_type & FLOW_MAC_EXT) {
6058                 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
6059                 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6060                         eth_zero_addr(fs->m_u.ether_spec.h_dest);
6061                 else
6062                         ether_addr_copy(fs->m_u.ether_spec.h_dest,
6063                                         rule->tuples_mask.dst_mac);
6064         }
6065
6066         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
6067                 fs->ring_cookie = RX_CLS_FLOW_DISC;
6068         } else {
6069                 u64 vf_id;
6070
6071                 fs->ring_cookie = rule->queue_id;
6072                 vf_id = rule->vf_id;
6073                 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
6074                 fs->ring_cookie |= vf_id;
6075         }
6076
6077         spin_unlock_bh(&hdev->fd_rule_lock);
6078
6079         return 0;
6080 }
6081
6082 static int hclge_get_all_rules(struct hnae3_handle *handle,
6083                                struct ethtool_rxnfc *cmd, u32 *rule_locs)
6084 {
6085         struct hclge_vport *vport = hclge_get_vport(handle);
6086         struct hclge_dev *hdev = vport->back;
6087         struct hclge_fd_rule *rule;
6088         struct hlist_node *node2;
6089         int cnt = 0;
6090
6091         if (!hnae3_dev_fd_supported(hdev))
6092                 return -EOPNOTSUPP;
6093
6094         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6095
6096         spin_lock_bh(&hdev->fd_rule_lock);
6097         hlist_for_each_entry_safe(rule, node2,
6098                                   &hdev->fd_rule_list, rule_node) {
6099                 if (cnt == cmd->rule_cnt) {
6100                         spin_unlock_bh(&hdev->fd_rule_lock);
6101                         return -EMSGSIZE;
6102                 }
6103
6104                 rule_locs[cnt] = rule->location;
6105                 cnt++;
6106         }
6107
6108         spin_unlock_bh(&hdev->fd_rule_lock);
6109
6110         cmd->rule_cnt = cnt;
6111
6112         return 0;
6113 }
6114
6115 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
6116                                      struct hclge_fd_rule_tuples *tuples)
6117 {
6118 #define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
6119 #define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
6120
6121         tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
6122         tuples->ip_proto = fkeys->basic.ip_proto;
6123         tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
6124
6125         if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
6126                 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
6127                 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
6128         } else {
6129                 int i;
6130
6131                 for (i = 0; i < IPV6_SIZE; i++) {
6132                         tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
6133                         tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
6134                 }
6135         }
6136 }
6137
6138 /* traverse all rules, check whether an existed rule has the same tuples */
6139 static struct hclge_fd_rule *
6140 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
6141                           const struct hclge_fd_rule_tuples *tuples)
6142 {
6143         struct hclge_fd_rule *rule = NULL;
6144         struct hlist_node *node;
6145
6146         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6147                 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
6148                         return rule;
6149         }
6150
6151         return NULL;
6152 }
6153
6154 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
6155                                      struct hclge_fd_rule *rule)
6156 {
6157         rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6158                              BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
6159                              BIT(INNER_SRC_PORT);
6160         rule->action = 0;
6161         rule->vf_id = 0;
6162         rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
6163         if (tuples->ether_proto == ETH_P_IP) {
6164                 if (tuples->ip_proto == IPPROTO_TCP)
6165                         rule->flow_type = TCP_V4_FLOW;
6166                 else
6167                         rule->flow_type = UDP_V4_FLOW;
6168         } else {
6169                 if (tuples->ip_proto == IPPROTO_TCP)
6170                         rule->flow_type = TCP_V6_FLOW;
6171                 else
6172                         rule->flow_type = UDP_V6_FLOW;
6173         }
6174         memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
6175         memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
6176 }
6177
6178 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
6179                                       u16 flow_id, struct flow_keys *fkeys)
6180 {
6181         struct hclge_vport *vport = hclge_get_vport(handle);
6182         struct hclge_fd_rule_tuples new_tuples;
6183         struct hclge_dev *hdev = vport->back;
6184         struct hclge_fd_rule *rule;
6185         u16 tmp_queue_id;
6186         u16 bit_id;
6187         int ret;
6188
6189         if (!hnae3_dev_fd_supported(hdev))
6190                 return -EOPNOTSUPP;
6191
6192         memset(&new_tuples, 0, sizeof(new_tuples));
6193         hclge_fd_get_flow_tuples(fkeys, &new_tuples);
6194
6195         spin_lock_bh(&hdev->fd_rule_lock);
6196
6197         /* when there is already fd rule existed add by user,
6198          * arfs should not work
6199          */
6200         if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
6201                 spin_unlock_bh(&hdev->fd_rule_lock);
6202
6203                 return -EOPNOTSUPP;
6204         }
6205
6206         /* check is there flow director filter existed for this flow,
6207          * if not, create a new filter for it;
6208          * if filter exist with different queue id, modify the filter;
6209          * if filter exist with same queue id, do nothing
6210          */
6211         rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
6212         if (!rule) {
6213                 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
6214                 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6215                         spin_unlock_bh(&hdev->fd_rule_lock);
6216
6217                         return -ENOSPC;
6218                 }
6219
6220                 rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
6221                 if (!rule) {
6222                         spin_unlock_bh(&hdev->fd_rule_lock);
6223
6224                         return -ENOMEM;
6225                 }
6226
6227                 set_bit(bit_id, hdev->fd_bmap);
6228                 rule->location = bit_id;
6229                 rule->flow_id = flow_id;
6230                 rule->queue_id = queue_id;
6231                 hclge_fd_build_arfs_rule(&new_tuples, rule);
6232                 ret = hclge_fd_config_rule(hdev, rule);
6233
6234                 spin_unlock_bh(&hdev->fd_rule_lock);
6235
6236                 if (ret)
6237                         return ret;
6238
6239                 return rule->location;
6240         }
6241
6242         spin_unlock_bh(&hdev->fd_rule_lock);
6243
6244         if (rule->queue_id == queue_id)
6245                 return rule->location;
6246
6247         tmp_queue_id = rule->queue_id;
6248         rule->queue_id = queue_id;
6249         ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6250         if (ret) {
6251                 rule->queue_id = tmp_queue_id;
6252                 return ret;
6253         }
6254
6255         return rule->location;
6256 }
6257
6258 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
6259 {
6260 #ifdef CONFIG_RFS_ACCEL
6261         struct hnae3_handle *handle = &hdev->vport[0].nic;
6262         struct hclge_fd_rule *rule;
6263         struct hlist_node *node;
6264         HLIST_HEAD(del_list);
6265
6266         spin_lock_bh(&hdev->fd_rule_lock);
6267         if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
6268                 spin_unlock_bh(&hdev->fd_rule_lock);
6269                 return;
6270         }
6271         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6272                 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
6273                                         rule->flow_id, rule->location)) {
6274                         hlist_del_init(&rule->rule_node);
6275                         hlist_add_head(&rule->rule_node, &del_list);
6276                         hdev->hclge_fd_rule_num--;
6277                         clear_bit(rule->location, hdev->fd_bmap);
6278                 }
6279         }
6280         spin_unlock_bh(&hdev->fd_rule_lock);
6281
6282         hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
6283                 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
6284                                      rule->location, NULL, false);
6285                 kfree(rule);
6286         }
6287 #endif
6288 }
6289
6290 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
6291 {
6292 #ifdef CONFIG_RFS_ACCEL
6293         struct hclge_vport *vport = hclge_get_vport(handle);
6294         struct hclge_dev *hdev = vport->back;
6295
6296         if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
6297                 hclge_del_all_fd_entries(handle, true);
6298 #endif
6299 }
6300
6301 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
6302 {
6303         struct hclge_vport *vport = hclge_get_vport(handle);
6304         struct hclge_dev *hdev = vport->back;
6305
6306         return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
6307                hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
6308 }
6309
6310 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
6311 {
6312         struct hclge_vport *vport = hclge_get_vport(handle);
6313         struct hclge_dev *hdev = vport->back;
6314
6315         return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
6316 }
6317
6318 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
6319 {
6320         struct hclge_vport *vport = hclge_get_vport(handle);
6321         struct hclge_dev *hdev = vport->back;
6322
6323         return hdev->rst_stats.hw_reset_done_cnt;
6324 }
6325
6326 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
6327 {
6328         struct hclge_vport *vport = hclge_get_vport(handle);
6329         struct hclge_dev *hdev = vport->back;
6330         bool clear;
6331
6332         hdev->fd_en = enable;
6333         clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
6334         if (!enable)
6335                 hclge_del_all_fd_entries(handle, clear);
6336         else
6337                 hclge_restore_fd_entries(handle);
6338 }
6339
6340 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
6341 {
6342         struct hclge_desc desc;
6343         struct hclge_config_mac_mode_cmd *req =
6344                 (struct hclge_config_mac_mode_cmd *)desc.data;
6345         u32 loop_en = 0;
6346         int ret;
6347
6348         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
6349
6350         if (enable) {
6351                 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
6352                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
6353                 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
6354                 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
6355                 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
6356                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
6357                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
6358                 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
6359                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
6360                 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
6361         }
6362
6363         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6364
6365         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6366         if (ret)
6367                 dev_err(&hdev->pdev->dev,
6368                         "mac enable fail, ret =%d.\n", ret);
6369 }
6370
6371 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
6372                                      u8 switch_param, u8 param_mask)
6373 {
6374         struct hclge_mac_vlan_switch_cmd *req;
6375         struct hclge_desc desc;
6376         u32 func_id;
6377         int ret;
6378
6379         func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
6380         req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
6381
6382         /* read current config parameter */
6383         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
6384                                    true);
6385         req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
6386         req->func_id = cpu_to_le32(func_id);
6387
6388         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6389         if (ret) {
6390                 dev_err(&hdev->pdev->dev,
6391                         "read mac vlan switch parameter fail, ret = %d\n", ret);
6392                 return ret;
6393         }
6394
6395         /* modify and write new config parameter */
6396         hclge_cmd_reuse_desc(&desc, false);
6397         req->switch_param = (req->switch_param & param_mask) | switch_param;
6398         req->param_mask = param_mask;
6399
6400         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6401         if (ret)
6402                 dev_err(&hdev->pdev->dev,
6403                         "set mac vlan switch parameter fail, ret = %d\n", ret);
6404         return ret;
6405 }
6406
6407 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
6408                                        int link_ret)
6409 {
6410 #define HCLGE_PHY_LINK_STATUS_NUM  200
6411
6412         struct phy_device *phydev = hdev->hw.mac.phydev;
6413         int i = 0;
6414         int ret;
6415
6416         do {
6417                 ret = phy_read_status(phydev);
6418                 if (ret) {
6419                         dev_err(&hdev->pdev->dev,
6420                                 "phy update link status fail, ret = %d\n", ret);
6421                         return;
6422                 }
6423
6424                 if (phydev->link == link_ret)
6425                         break;
6426
6427                 msleep(HCLGE_LINK_STATUS_MS);
6428         } while (++i < HCLGE_PHY_LINK_STATUS_NUM);
6429 }
6430
6431 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
6432 {
6433 #define HCLGE_MAC_LINK_STATUS_NUM  100
6434
6435         int i = 0;
6436         int ret;
6437
6438         do {
6439                 ret = hclge_get_mac_link_status(hdev);
6440                 if (ret < 0)
6441                         return ret;
6442                 else if (ret == link_ret)
6443                         return 0;
6444
6445                 msleep(HCLGE_LINK_STATUS_MS);
6446         } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
6447         return -EBUSY;
6448 }
6449
6450 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
6451                                           bool is_phy)
6452 {
6453 #define HCLGE_LINK_STATUS_DOWN 0
6454 #define HCLGE_LINK_STATUS_UP   1
6455
6456         int link_ret;
6457
6458         link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
6459
6460         if (is_phy)
6461                 hclge_phy_link_status_wait(hdev, link_ret);
6462
6463         return hclge_mac_link_status_wait(hdev, link_ret);
6464 }
6465
6466 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
6467 {
6468         struct hclge_config_mac_mode_cmd *req;
6469         struct hclge_desc desc;
6470         u32 loop_en;
6471         int ret;
6472
6473         req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
6474         /* 1 Read out the MAC mode config at first */
6475         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
6476         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6477         if (ret) {
6478                 dev_err(&hdev->pdev->dev,
6479                         "mac loopback get fail, ret =%d.\n", ret);
6480                 return ret;
6481         }
6482
6483         /* 2 Then setup the loopback flag */
6484         loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
6485         hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
6486         hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
6487         hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
6488
6489         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6490
6491         /* 3 Config mac work mode with loopback flag
6492          * and its original configure parameters
6493          */
6494         hclge_cmd_reuse_desc(&desc, false);
6495         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6496         if (ret)
6497                 dev_err(&hdev->pdev->dev,
6498                         "mac loopback set fail, ret =%d.\n", ret);
6499         return ret;
6500 }
6501
6502 static int hclge_cfg_serdes_loopback(struct hclge_dev *hdev, bool en,
6503                                      enum hnae3_loop loop_mode)
6504 {
6505 #define HCLGE_SERDES_RETRY_MS   10
6506 #define HCLGE_SERDES_RETRY_NUM  100
6507
6508         struct hclge_serdes_lb_cmd *req;
6509         struct hclge_desc desc;
6510         int ret, i = 0;
6511         u8 loop_mode_b;
6512
6513         req = (struct hclge_serdes_lb_cmd *)desc.data;
6514         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
6515
6516         switch (loop_mode) {
6517         case HNAE3_LOOP_SERIAL_SERDES:
6518                 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
6519                 break;
6520         case HNAE3_LOOP_PARALLEL_SERDES:
6521                 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
6522                 break;
6523         default:
6524                 dev_err(&hdev->pdev->dev,
6525                         "unsupported serdes loopback mode %d\n", loop_mode);
6526                 return -ENOTSUPP;
6527         }
6528
6529         if (en) {
6530                 req->enable = loop_mode_b;
6531                 req->mask = loop_mode_b;
6532         } else {
6533                 req->mask = loop_mode_b;
6534         }
6535
6536         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6537         if (ret) {
6538                 dev_err(&hdev->pdev->dev,
6539                         "serdes loopback set fail, ret = %d\n", ret);
6540                 return ret;
6541         }
6542
6543         do {
6544                 msleep(HCLGE_SERDES_RETRY_MS);
6545                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
6546                                            true);
6547                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6548                 if (ret) {
6549                         dev_err(&hdev->pdev->dev,
6550                                 "serdes loopback get, ret = %d\n", ret);
6551                         return ret;
6552                 }
6553         } while (++i < HCLGE_SERDES_RETRY_NUM &&
6554                  !(req->result & HCLGE_CMD_SERDES_DONE_B));
6555
6556         if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
6557                 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
6558                 return -EBUSY;
6559         } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
6560                 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
6561                 return -EIO;
6562         }
6563         return ret;
6564 }
6565
6566 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
6567                                      enum hnae3_loop loop_mode)
6568 {
6569         int ret;
6570
6571         ret = hclge_cfg_serdes_loopback(hdev, en, loop_mode);
6572         if (ret)
6573                 return ret;
6574
6575         hclge_cfg_mac_mode(hdev, en);
6576
6577         ret = hclge_mac_phy_link_status_wait(hdev, en, false);
6578         if (ret)
6579                 dev_err(&hdev->pdev->dev,
6580                         "serdes loopback config mac mode timeout\n");
6581
6582         return ret;
6583 }
6584
6585 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
6586                                      struct phy_device *phydev)
6587 {
6588         int ret;
6589
6590         if (!phydev->suspended) {
6591                 ret = phy_suspend(phydev);
6592                 if (ret)
6593                         return ret;
6594         }
6595
6596         ret = phy_resume(phydev);
6597         if (ret)
6598                 return ret;
6599
6600         return phy_loopback(phydev, true);
6601 }
6602
6603 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
6604                                       struct phy_device *phydev)
6605 {
6606         int ret;
6607
6608         ret = phy_loopback(phydev, false);
6609         if (ret)
6610                 return ret;
6611
6612         return phy_suspend(phydev);
6613 }
6614
6615 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
6616 {
6617         struct phy_device *phydev = hdev->hw.mac.phydev;
6618         int ret;
6619
6620         if (!phydev)
6621                 return -ENOTSUPP;
6622
6623         if (en)
6624                 ret = hclge_enable_phy_loopback(hdev, phydev);
6625         else
6626                 ret = hclge_disable_phy_loopback(hdev, phydev);
6627         if (ret) {
6628                 dev_err(&hdev->pdev->dev,
6629                         "set phy loopback fail, ret = %d\n", ret);
6630                 return ret;
6631         }
6632
6633         hclge_cfg_mac_mode(hdev, en);
6634
6635         ret = hclge_mac_phy_link_status_wait(hdev, en, true);
6636         if (ret)
6637                 dev_err(&hdev->pdev->dev,
6638                         "phy loopback config mac mode timeout\n");
6639
6640         return ret;
6641 }
6642
6643 static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
6644                             int stream_id, bool enable)
6645 {
6646         struct hclge_desc desc;
6647         struct hclge_cfg_com_tqp_queue_cmd *req =
6648                 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
6649         int ret;
6650
6651         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
6652         req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
6653         req->stream_id = cpu_to_le16(stream_id);
6654         if (enable)
6655                 req->enable |= 1U << HCLGE_TQP_ENABLE_B;
6656
6657         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6658         if (ret)
6659                 dev_err(&hdev->pdev->dev,
6660                         "Tqp enable fail, status =%d.\n", ret);
6661         return ret;
6662 }
6663
6664 static int hclge_set_loopback(struct hnae3_handle *handle,
6665                               enum hnae3_loop loop_mode, bool en)
6666 {
6667         struct hclge_vport *vport = hclge_get_vport(handle);
6668         struct hnae3_knic_private_info *kinfo;
6669         struct hclge_dev *hdev = vport->back;
6670         int i, ret;
6671
6672         /* Loopback can be enabled in three places: SSU, MAC, and serdes. By
6673          * default, SSU loopback is enabled, so if the SMAC and the DMAC are
6674          * the same, the packets are looped back in the SSU. If SSU loopback
6675          * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
6676          */
6677         if (hdev->pdev->revision >= 0x21) {
6678                 u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
6679
6680                 ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
6681                                                 HCLGE_SWITCH_ALW_LPBK_MASK);
6682                 if (ret)
6683                         return ret;
6684         }
6685
6686         switch (loop_mode) {
6687         case HNAE3_LOOP_APP:
6688                 ret = hclge_set_app_loopback(hdev, en);
6689                 break;
6690         case HNAE3_LOOP_SERIAL_SERDES:
6691         case HNAE3_LOOP_PARALLEL_SERDES:
6692                 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
6693                 break;
6694         case HNAE3_LOOP_PHY:
6695                 ret = hclge_set_phy_loopback(hdev, en);
6696                 break;
6697         default:
6698                 ret = -ENOTSUPP;
6699                 dev_err(&hdev->pdev->dev,
6700                         "loop_mode %d is not supported\n", loop_mode);
6701                 break;
6702         }
6703
6704         if (ret)
6705                 return ret;
6706
6707         kinfo = &vport->nic.kinfo;
6708         for (i = 0; i < kinfo->num_tqps; i++) {
6709                 ret = hclge_tqp_enable(hdev, i, 0, en);
6710                 if (ret)
6711                         return ret;
6712         }
6713
6714         return 0;
6715 }
6716
6717 static int hclge_set_default_loopback(struct hclge_dev *hdev)
6718 {
6719         int ret;
6720
6721         ret = hclge_set_app_loopback(hdev, false);
6722         if (ret)
6723                 return ret;
6724
6725         ret = hclge_cfg_serdes_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
6726         if (ret)
6727                 return ret;
6728
6729         return hclge_cfg_serdes_loopback(hdev, false,
6730                                          HNAE3_LOOP_PARALLEL_SERDES);
6731 }
6732
6733 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
6734 {
6735         struct hclge_vport *vport = hclge_get_vport(handle);
6736         struct hnae3_knic_private_info *kinfo;
6737         struct hnae3_queue *queue;
6738         struct hclge_tqp *tqp;
6739         int i;
6740
6741         kinfo = &vport->nic.kinfo;
6742         for (i = 0; i < kinfo->num_tqps; i++) {
6743                 queue = handle->kinfo.tqp[i];
6744                 tqp = container_of(queue, struct hclge_tqp, q);
6745                 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
6746         }
6747 }
6748
6749 static void hclge_flush_link_update(struct hclge_dev *hdev)
6750 {
6751 #define HCLGE_FLUSH_LINK_TIMEOUT        100000
6752
6753         unsigned long last = hdev->serv_processed_cnt;
6754         int i = 0;
6755
6756         while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
6757                i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
6758                last == hdev->serv_processed_cnt)
6759                 usleep_range(1, 1);
6760 }
6761
6762 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
6763 {
6764         struct hclge_vport *vport = hclge_get_vport(handle);
6765         struct hclge_dev *hdev = vport->back;
6766
6767         if (enable) {
6768                 hclge_task_schedule(hdev, round_jiffies_relative(HZ));
6769         } else {
6770                 /* Set the DOWN flag here to disable link updating */
6771                 set_bit(HCLGE_STATE_DOWN, &hdev->state);
6772
6773                 /* flush memory to make sure DOWN is seen by service task */
6774                 smp_mb__before_atomic();
6775                 hclge_flush_link_update(hdev);
6776         }
6777 }
6778
6779 static int hclge_ae_start(struct hnae3_handle *handle)
6780 {
6781         struct hclge_vport *vport = hclge_get_vport(handle);
6782         struct hclge_dev *hdev = vport->back;
6783
6784         /* mac enable */
6785         hclge_cfg_mac_mode(hdev, true);
6786         clear_bit(HCLGE_STATE_DOWN, &hdev->state);
6787         hdev->hw.mac.link = 0;
6788
6789         /* reset tqp stats */
6790         hclge_reset_tqp_stats(handle);
6791
6792         hclge_mac_start_phy(hdev);
6793
6794         return 0;
6795 }
6796
6797 static void hclge_ae_stop(struct hnae3_handle *handle)
6798 {
6799         struct hclge_vport *vport = hclge_get_vport(handle);
6800         struct hclge_dev *hdev = vport->back;
6801         int i;
6802
6803         set_bit(HCLGE_STATE_DOWN, &hdev->state);
6804
6805         hclge_clear_arfs_rules(handle);
6806
6807         /* If it is not PF reset, the firmware will disable the MAC,
6808          * so it only need to stop phy here.
6809          */
6810         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
6811             hdev->reset_type != HNAE3_FUNC_RESET) {
6812                 hclge_mac_stop_phy(hdev);
6813                 hclge_update_link_status(hdev);
6814                 return;
6815         }
6816
6817         for (i = 0; i < handle->kinfo.num_tqps; i++)
6818                 hclge_reset_tqp(handle, i);
6819
6820         hclge_config_mac_tnl_int(hdev, false);
6821
6822         /* Mac disable */
6823         hclge_cfg_mac_mode(hdev, false);
6824
6825         hclge_mac_stop_phy(hdev);
6826
6827         /* reset tqp stats */
6828         hclge_reset_tqp_stats(handle);
6829         hclge_update_link_status(hdev);
6830 }
6831
6832 int hclge_vport_start(struct hclge_vport *vport)
6833 {
6834         set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6835         vport->last_active_jiffies = jiffies;
6836         return 0;
6837 }
6838
6839 void hclge_vport_stop(struct hclge_vport *vport)
6840 {
6841         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6842 }
6843
6844 static int hclge_client_start(struct hnae3_handle *handle)
6845 {
6846         struct hclge_vport *vport = hclge_get_vport(handle);
6847
6848         return hclge_vport_start(vport);
6849 }
6850
6851 static void hclge_client_stop(struct hnae3_handle *handle)
6852 {
6853         struct hclge_vport *vport = hclge_get_vport(handle);
6854
6855         hclge_vport_stop(vport);
6856 }
6857
6858 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
6859                                          u16 cmdq_resp, u8  resp_code,
6860                                          enum hclge_mac_vlan_tbl_opcode op)
6861 {
6862         struct hclge_dev *hdev = vport->back;
6863
6864         if (cmdq_resp) {
6865                 dev_err(&hdev->pdev->dev,
6866                         "cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
6867                         cmdq_resp);
6868                 return -EIO;
6869         }
6870
6871         if (op == HCLGE_MAC_VLAN_ADD) {
6872                 if ((!resp_code) || (resp_code == 1)) {
6873                         return 0;
6874                 } else if (resp_code == HCLGE_ADD_UC_OVERFLOW) {
6875                         dev_err(&hdev->pdev->dev,
6876                                 "add mac addr failed for uc_overflow.\n");
6877                         return -ENOSPC;
6878                 } else if (resp_code == HCLGE_ADD_MC_OVERFLOW) {
6879                         dev_err(&hdev->pdev->dev,
6880                                 "add mac addr failed for mc_overflow.\n");
6881                         return -ENOSPC;
6882                 }
6883
6884                 dev_err(&hdev->pdev->dev,
6885                         "add mac addr failed for undefined, code=%u.\n",
6886                         resp_code);
6887                 return -EIO;
6888         } else if (op == HCLGE_MAC_VLAN_REMOVE) {
6889                 if (!resp_code) {
6890                         return 0;
6891                 } else if (resp_code == 1) {
6892                         dev_dbg(&hdev->pdev->dev,
6893                                 "remove mac addr failed for miss.\n");
6894                         return -ENOENT;
6895                 }
6896
6897                 dev_err(&hdev->pdev->dev,
6898                         "remove mac addr failed for undefined, code=%u.\n",
6899                         resp_code);
6900                 return -EIO;
6901         } else if (op == HCLGE_MAC_VLAN_LKUP) {
6902                 if (!resp_code) {
6903                         return 0;
6904                 } else if (resp_code == 1) {
6905                         dev_dbg(&hdev->pdev->dev,
6906                                 "lookup mac addr failed for miss.\n");
6907                         return -ENOENT;
6908                 }
6909
6910                 dev_err(&hdev->pdev->dev,
6911                         "lookup mac addr failed for undefined, code=%u.\n",
6912                         resp_code);
6913                 return -EIO;
6914         }
6915
6916         dev_err(&hdev->pdev->dev,
6917                 "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
6918
6919         return -EINVAL;
6920 }
6921
6922 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
6923 {
6924 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
6925
6926         unsigned int word_num;
6927         unsigned int bit_num;
6928
6929         if (vfid > 255 || vfid < 0)
6930                 return -EIO;
6931
6932         if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
6933                 word_num = vfid / 32;
6934                 bit_num  = vfid % 32;
6935                 if (clr)
6936                         desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6937                 else
6938                         desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
6939         } else {
6940                 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
6941                 bit_num  = vfid % 32;
6942                 if (clr)
6943                         desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6944                 else
6945                         desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
6946         }
6947
6948         return 0;
6949 }
6950
6951 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
6952 {
6953 #define HCLGE_DESC_NUMBER 3
6954 #define HCLGE_FUNC_NUMBER_PER_DESC 6
6955         int i, j;
6956
6957         for (i = 1; i < HCLGE_DESC_NUMBER; i++)
6958                 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
6959                         if (desc[i].data[j])
6960                                 return false;
6961
6962         return true;
6963 }
6964
6965 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
6966                                    const u8 *addr, bool is_mc)
6967 {
6968         const unsigned char *mac_addr = addr;
6969         u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
6970                        (mac_addr[0]) | (mac_addr[1] << 8);
6971         u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
6972
6973         hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6974         if (is_mc) {
6975                 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
6976                 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6977         }
6978
6979         new_req->mac_addr_hi32 = cpu_to_le32(high_val);
6980         new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
6981 }
6982
6983 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
6984                                      struct hclge_mac_vlan_tbl_entry_cmd *req)
6985 {
6986         struct hclge_dev *hdev = vport->back;
6987         struct hclge_desc desc;
6988         u8 resp_code;
6989         u16 retval;
6990         int ret;
6991
6992         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
6993
6994         memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6995
6996         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6997         if (ret) {
6998                 dev_err(&hdev->pdev->dev,
6999                         "del mac addr failed for cmd_send, ret =%d.\n",
7000                         ret);
7001                 return ret;
7002         }
7003         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7004         retval = le16_to_cpu(desc.retval);
7005
7006         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7007                                              HCLGE_MAC_VLAN_REMOVE);
7008 }
7009
7010 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
7011                                      struct hclge_mac_vlan_tbl_entry_cmd *req,
7012                                      struct hclge_desc *desc,
7013                                      bool is_mc)
7014 {
7015         struct hclge_dev *hdev = vport->back;
7016         u8 resp_code;
7017         u16 retval;
7018         int ret;
7019
7020         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
7021         if (is_mc) {
7022                 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7023                 memcpy(desc[0].data,
7024                        req,
7025                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7026                 hclge_cmd_setup_basic_desc(&desc[1],
7027                                            HCLGE_OPC_MAC_VLAN_ADD,
7028                                            true);
7029                 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7030                 hclge_cmd_setup_basic_desc(&desc[2],
7031                                            HCLGE_OPC_MAC_VLAN_ADD,
7032                                            true);
7033                 ret = hclge_cmd_send(&hdev->hw, desc, 3);
7034         } else {
7035                 memcpy(desc[0].data,
7036                        req,
7037                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7038                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
7039         }
7040         if (ret) {
7041                 dev_err(&hdev->pdev->dev,
7042                         "lookup mac addr failed for cmd_send, ret =%d.\n",
7043                         ret);
7044                 return ret;
7045         }
7046         resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
7047         retval = le16_to_cpu(desc[0].retval);
7048
7049         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7050                                              HCLGE_MAC_VLAN_LKUP);
7051 }
7052
7053 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
7054                                   struct hclge_mac_vlan_tbl_entry_cmd *req,
7055                                   struct hclge_desc *mc_desc)
7056 {
7057         struct hclge_dev *hdev = vport->back;
7058         int cfg_status;
7059         u8 resp_code;
7060         u16 retval;
7061         int ret;
7062
7063         if (!mc_desc) {
7064                 struct hclge_desc desc;
7065
7066                 hclge_cmd_setup_basic_desc(&desc,
7067                                            HCLGE_OPC_MAC_VLAN_ADD,
7068                                            false);
7069                 memcpy(desc.data, req,
7070                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7071                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7072                 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7073                 retval = le16_to_cpu(desc.retval);
7074
7075                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7076                                                            resp_code,
7077                                                            HCLGE_MAC_VLAN_ADD);
7078         } else {
7079                 hclge_cmd_reuse_desc(&mc_desc[0], false);
7080                 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7081                 hclge_cmd_reuse_desc(&mc_desc[1], false);
7082                 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7083                 hclge_cmd_reuse_desc(&mc_desc[2], false);
7084                 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
7085                 memcpy(mc_desc[0].data, req,
7086                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7087                 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
7088                 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
7089                 retval = le16_to_cpu(mc_desc[0].retval);
7090
7091                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7092                                                            resp_code,
7093                                                            HCLGE_MAC_VLAN_ADD);
7094         }
7095
7096         if (ret) {
7097                 dev_err(&hdev->pdev->dev,
7098                         "add mac addr failed for cmd_send, ret =%d.\n",
7099                         ret);
7100                 return ret;
7101         }
7102
7103         return cfg_status;
7104 }
7105
7106 static int hclge_init_umv_space(struct hclge_dev *hdev)
7107 {
7108         u16 allocated_size = 0;
7109         int ret;
7110
7111         ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
7112                                   true);
7113         if (ret)
7114                 return ret;
7115
7116         if (allocated_size < hdev->wanted_umv_size)
7117                 dev_warn(&hdev->pdev->dev,
7118                          "Alloc umv space failed, want %u, get %u\n",
7119                          hdev->wanted_umv_size, allocated_size);
7120
7121         mutex_init(&hdev->umv_mutex);
7122         hdev->max_umv_size = allocated_size;
7123         /* divide max_umv_size by (hdev->num_req_vfs + 2), in order to
7124          * preserve some unicast mac vlan table entries shared by pf
7125          * and its vfs.
7126          */
7127         hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
7128         hdev->share_umv_size = hdev->priv_umv_size +
7129                         hdev->max_umv_size % (hdev->num_req_vfs + 2);
7130
7131         return 0;
7132 }
7133
7134 static int hclge_uninit_umv_space(struct hclge_dev *hdev)
7135 {
7136         int ret;
7137
7138         if (hdev->max_umv_size > 0) {
7139                 ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
7140                                           false);
7141                 if (ret)
7142                         return ret;
7143                 hdev->max_umv_size = 0;
7144         }
7145         mutex_destroy(&hdev->umv_mutex);
7146
7147         return 0;
7148 }
7149
7150 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
7151                                u16 *allocated_size, bool is_alloc)
7152 {
7153         struct hclge_umv_spc_alc_cmd *req;
7154         struct hclge_desc desc;
7155         int ret;
7156
7157         req = (struct hclge_umv_spc_alc_cmd *)desc.data;
7158         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
7159         if (!is_alloc)
7160                 hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, 1);
7161
7162         req->space_size = cpu_to_le32(space_size);
7163
7164         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7165         if (ret) {
7166                 dev_err(&hdev->pdev->dev,
7167                         "%s umv space failed for cmd_send, ret =%d\n",
7168                         is_alloc ? "allocate" : "free", ret);
7169                 return ret;
7170         }
7171
7172         if (is_alloc && allocated_size)
7173                 *allocated_size = le32_to_cpu(desc.data[1]);
7174
7175         return 0;
7176 }
7177
7178 static void hclge_reset_umv_space(struct hclge_dev *hdev)
7179 {
7180         struct hclge_vport *vport;
7181         int i;
7182
7183         for (i = 0; i < hdev->num_alloc_vport; i++) {
7184                 vport = &hdev->vport[i];
7185                 vport->used_umv_num = 0;
7186         }
7187
7188         mutex_lock(&hdev->umv_mutex);
7189         hdev->share_umv_size = hdev->priv_umv_size +
7190                         hdev->max_umv_size % (hdev->num_req_vfs + 2);
7191         mutex_unlock(&hdev->umv_mutex);
7192 }
7193
7194 static bool hclge_is_umv_space_full(struct hclge_vport *vport)
7195 {
7196         struct hclge_dev *hdev = vport->back;
7197         bool is_full;
7198
7199         mutex_lock(&hdev->umv_mutex);
7200         is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
7201                    hdev->share_umv_size == 0);
7202         mutex_unlock(&hdev->umv_mutex);
7203
7204         return is_full;
7205 }
7206
7207 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
7208 {
7209         struct hclge_dev *hdev = vport->back;
7210
7211         mutex_lock(&hdev->umv_mutex);
7212         if (is_free) {
7213                 if (vport->used_umv_num > hdev->priv_umv_size)
7214                         hdev->share_umv_size++;
7215
7216                 if (vport->used_umv_num > 0)
7217                         vport->used_umv_num--;
7218         } else {
7219                 if (vport->used_umv_num >= hdev->priv_umv_size &&
7220                     hdev->share_umv_size > 0)
7221                         hdev->share_umv_size--;
7222                 vport->used_umv_num++;
7223         }
7224         mutex_unlock(&hdev->umv_mutex);
7225 }
7226
7227 static int hclge_add_uc_addr(struct hnae3_handle *handle,
7228                              const unsigned char *addr)
7229 {
7230         struct hclge_vport *vport = hclge_get_vport(handle);
7231
7232         return hclge_add_uc_addr_common(vport, addr);
7233 }
7234
7235 int hclge_add_uc_addr_common(struct hclge_vport *vport,
7236                              const unsigned char *addr)
7237 {
7238         struct hclge_dev *hdev = vport->back;
7239         struct hclge_mac_vlan_tbl_entry_cmd req;
7240         struct hclge_desc desc;
7241         u16 egress_port = 0;
7242         int ret;
7243
7244         /* mac addr check */
7245         if (is_zero_ether_addr(addr) ||
7246             is_broadcast_ether_addr(addr) ||
7247             is_multicast_ether_addr(addr)) {
7248                 dev_err(&hdev->pdev->dev,
7249                         "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
7250                          addr, is_zero_ether_addr(addr),
7251                          is_broadcast_ether_addr(addr),
7252                          is_multicast_ether_addr(addr));
7253                 return -EINVAL;
7254         }
7255
7256         memset(&req, 0, sizeof(req));
7257
7258         hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
7259                         HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
7260
7261         req.egress_port = cpu_to_le16(egress_port);
7262
7263         hclge_prepare_mac_addr(&req, addr, false);
7264
7265         /* Lookup the mac address in the mac_vlan table, and add
7266          * it if the entry is inexistent. Repeated unicast entry
7267          * is not allowed in the mac vlan table.
7268          */
7269         ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
7270         if (ret == -ENOENT) {
7271                 if (!hclge_is_umv_space_full(vport)) {
7272                         ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
7273                         if (!ret)
7274                                 hclge_update_umv_space(vport, false);
7275                         return ret;
7276                 }
7277
7278                 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
7279                         hdev->priv_umv_size);
7280
7281                 return -ENOSPC;
7282         }
7283
7284         /* check if we just hit the duplicate */
7285         if (!ret) {
7286                 dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n",
7287                          vport->vport_id, addr);
7288                 return 0;
7289         }
7290
7291         dev_err(&hdev->pdev->dev,
7292                 "PF failed to add unicast entry(%pM) in the MAC table\n",
7293                 addr);
7294
7295         return ret;
7296 }
7297
7298 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
7299                             const unsigned char *addr)
7300 {
7301         struct hclge_vport *vport = hclge_get_vport(handle);
7302
7303         return hclge_rm_uc_addr_common(vport, addr);
7304 }
7305
7306 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
7307                             const unsigned char *addr)
7308 {
7309         struct hclge_dev *hdev = vport->back;
7310         struct hclge_mac_vlan_tbl_entry_cmd req;
7311         int ret;
7312
7313         /* mac addr check */
7314         if (is_zero_ether_addr(addr) ||
7315             is_broadcast_ether_addr(addr) ||
7316             is_multicast_ether_addr(addr)) {
7317                 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
7318                         addr);
7319                 return -EINVAL;
7320         }
7321
7322         memset(&req, 0, sizeof(req));
7323         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7324         hclge_prepare_mac_addr(&req, addr, false);
7325         ret = hclge_remove_mac_vlan_tbl(vport, &req);
7326         if (!ret)
7327                 hclge_update_umv_space(vport, true);
7328
7329         return ret;
7330 }
7331
7332 static int hclge_add_mc_addr(struct hnae3_handle *handle,
7333                              const unsigned char *addr)
7334 {
7335         struct hclge_vport *vport = hclge_get_vport(handle);
7336
7337         return hclge_add_mc_addr_common(vport, addr);
7338 }
7339
7340 int hclge_add_mc_addr_common(struct hclge_vport *vport,
7341                              const unsigned char *addr)
7342 {
7343         struct hclge_dev *hdev = vport->back;
7344         struct hclge_mac_vlan_tbl_entry_cmd req;
7345         struct hclge_desc desc[3];
7346         int status;
7347
7348         /* mac addr check */
7349         if (!is_multicast_ether_addr(addr)) {
7350                 dev_err(&hdev->pdev->dev,
7351                         "Add mc mac err! invalid mac:%pM.\n",
7352                          addr);
7353                 return -EINVAL;
7354         }
7355         memset(&req, 0, sizeof(req));
7356         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7357         hclge_prepare_mac_addr(&req, addr, true);
7358         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7359         if (status) {
7360                 /* This mac addr do not exist, add new entry for it */
7361                 memset(desc[0].data, 0, sizeof(desc[0].data));
7362                 memset(desc[1].data, 0, sizeof(desc[0].data));
7363                 memset(desc[2].data, 0, sizeof(desc[0].data));
7364         }
7365         status = hclge_update_desc_vfid(desc, vport->vport_id, false);
7366         if (status)
7367                 return status;
7368         status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7369
7370         if (status == -ENOSPC)
7371                 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
7372
7373         return status;
7374 }
7375
7376 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
7377                             const unsigned char *addr)
7378 {
7379         struct hclge_vport *vport = hclge_get_vport(handle);
7380
7381         return hclge_rm_mc_addr_common(vport, addr);
7382 }
7383
7384 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
7385                             const unsigned char *addr)
7386 {
7387         struct hclge_dev *hdev = vport->back;
7388         struct hclge_mac_vlan_tbl_entry_cmd req;
7389         enum hclge_cmd_status status;
7390         struct hclge_desc desc[3];
7391
7392         /* mac addr check */
7393         if (!is_multicast_ether_addr(addr)) {
7394                 dev_dbg(&hdev->pdev->dev,
7395                         "Remove mc mac err! invalid mac:%pM.\n",
7396                          addr);
7397                 return -EINVAL;
7398         }
7399
7400         memset(&req, 0, sizeof(req));
7401         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7402         hclge_prepare_mac_addr(&req, addr, true);
7403         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7404         if (!status) {
7405                 /* This mac addr exist, remove this handle's VFID for it */
7406                 status = hclge_update_desc_vfid(desc, vport->vport_id, true);
7407                 if (status)
7408                         return status;
7409
7410                 if (hclge_is_all_function_id_zero(desc))
7411                         /* All the vfid is zero, so need to delete this entry */
7412                         status = hclge_remove_mac_vlan_tbl(vport, &req);
7413                 else
7414                         /* Not all the vfid is zero, update the vfid */
7415                         status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7416
7417         } else {
7418                 /* Maybe this mac address is in mta table, but it cannot be
7419                  * deleted here because an entry of mta represents an address
7420                  * range rather than a specific address. the delete action to
7421                  * all entries will take effect in update_mta_status called by
7422                  * hns3_nic_set_rx_mode.
7423                  */
7424                 status = 0;
7425         }
7426
7427         return status;
7428 }
7429
7430 void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
7431                                enum HCLGE_MAC_ADDR_TYPE mac_type)
7432 {
7433         struct hclge_vport_mac_addr_cfg *mac_cfg;
7434         struct list_head *list;
7435
7436         if (!vport->vport_id)
7437                 return;
7438
7439         mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL);
7440         if (!mac_cfg)
7441                 return;
7442
7443         mac_cfg->hd_tbl_status = true;
7444         memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN);
7445
7446         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7447                &vport->uc_mac_list : &vport->mc_mac_list;
7448
7449         list_add_tail(&mac_cfg->node, list);
7450 }
7451
7452 void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
7453                               bool is_write_tbl,
7454                               enum HCLGE_MAC_ADDR_TYPE mac_type)
7455 {
7456         struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
7457         struct list_head *list;
7458         bool uc_flag, mc_flag;
7459
7460         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7461                &vport->uc_mac_list : &vport->mc_mac_list;
7462
7463         uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC;
7464         mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
7465
7466         list_for_each_entry_safe(mac_cfg, tmp, list, node) {
7467                 if (ether_addr_equal(mac_cfg->mac_addr, mac_addr)) {
7468                         if (uc_flag && mac_cfg->hd_tbl_status)
7469                                 hclge_rm_uc_addr_common(vport, mac_addr);
7470
7471                         if (mc_flag && mac_cfg->hd_tbl_status)
7472                                 hclge_rm_mc_addr_common(vport, mac_addr);
7473
7474                         list_del(&mac_cfg->node);
7475                         kfree(mac_cfg);
7476                         break;
7477                 }
7478         }
7479 }
7480
7481 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
7482                                   enum HCLGE_MAC_ADDR_TYPE mac_type)
7483 {
7484         struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
7485         struct list_head *list;
7486
7487         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7488                &vport->uc_mac_list : &vport->mc_mac_list;
7489
7490         list_for_each_entry_safe(mac_cfg, tmp, list, node) {
7491                 if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status)
7492                         hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr);
7493
7494                 if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status)
7495                         hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr);
7496
7497                 mac_cfg->hd_tbl_status = false;
7498                 if (is_del_list) {
7499                         list_del(&mac_cfg->node);
7500                         kfree(mac_cfg);
7501                 }
7502         }
7503 }
7504
7505 void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
7506 {
7507         struct hclge_vport_mac_addr_cfg *mac, *tmp;
7508         struct hclge_vport *vport;
7509         int i;
7510
7511         for (i = 0; i < hdev->num_alloc_vport; i++) {
7512                 vport = &hdev->vport[i];
7513                 list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
7514                         list_del(&mac->node);
7515                         kfree(mac);
7516                 }
7517
7518                 list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) {
7519                         list_del(&mac->node);
7520                         kfree(mac);
7521                 }
7522         }
7523 }
7524
7525 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
7526                                               u16 cmdq_resp, u8 resp_code)
7527 {
7528 #define HCLGE_ETHERTYPE_SUCCESS_ADD             0
7529 #define HCLGE_ETHERTYPE_ALREADY_ADD             1
7530 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW        2
7531 #define HCLGE_ETHERTYPE_KEY_CONFLICT            3
7532
7533         int return_status;
7534
7535         if (cmdq_resp) {
7536                 dev_err(&hdev->pdev->dev,
7537                         "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
7538                         cmdq_resp);
7539                 return -EIO;
7540         }
7541
7542         switch (resp_code) {
7543         case HCLGE_ETHERTYPE_SUCCESS_ADD:
7544         case HCLGE_ETHERTYPE_ALREADY_ADD:
7545                 return_status = 0;
7546                 break;
7547         case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
7548                 dev_err(&hdev->pdev->dev,
7549                         "add mac ethertype failed for manager table overflow.\n");
7550                 return_status = -EIO;
7551                 break;
7552         case HCLGE_ETHERTYPE_KEY_CONFLICT:
7553                 dev_err(&hdev->pdev->dev,
7554                         "add mac ethertype failed for key conflict.\n");
7555                 return_status = -EIO;
7556                 break;
7557         default:
7558                 dev_err(&hdev->pdev->dev,
7559                         "add mac ethertype failed for undefined, code=%u.\n",
7560                         resp_code);
7561                 return_status = -EIO;
7562         }
7563
7564         return return_status;
7565 }
7566
7567 static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
7568                                      u8 *mac_addr)
7569 {
7570         struct hclge_mac_vlan_tbl_entry_cmd req;
7571         struct hclge_dev *hdev = vport->back;
7572         struct hclge_desc desc;
7573         u16 egress_port = 0;
7574         int i;
7575
7576         if (is_zero_ether_addr(mac_addr))
7577                 return false;
7578
7579         memset(&req, 0, sizeof(req));
7580         hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
7581                         HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
7582         req.egress_port = cpu_to_le16(egress_port);
7583         hclge_prepare_mac_addr(&req, mac_addr, false);
7584
7585         if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
7586                 return true;
7587
7588         vf_idx += HCLGE_VF_VPORT_START_NUM;
7589         for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++)
7590                 if (i != vf_idx &&
7591                     ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
7592                         return true;
7593
7594         return false;
7595 }
7596
7597 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
7598                             u8 *mac_addr)
7599 {
7600         struct hclge_vport *vport = hclge_get_vport(handle);
7601         struct hclge_dev *hdev = vport->back;
7602
7603         vport = hclge_get_vf_vport(hdev, vf);
7604         if (!vport)
7605                 return -EINVAL;
7606
7607         if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
7608                 dev_info(&hdev->pdev->dev,
7609                          "Specified MAC(=%pM) is same as before, no change committed!\n",
7610                          mac_addr);
7611                 return 0;
7612         }
7613
7614         if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
7615                 dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
7616                         mac_addr);
7617                 return -EEXIST;
7618         }
7619
7620         ether_addr_copy(vport->vf_info.mac, mac_addr);
7621         dev_info(&hdev->pdev->dev,
7622                  "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
7623                  vf, mac_addr);
7624
7625         return hclge_inform_reset_assert_to_vf(vport);
7626 }
7627
7628 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
7629                              const struct hclge_mac_mgr_tbl_entry_cmd *req)
7630 {
7631         struct hclge_desc desc;
7632         u8 resp_code;
7633         u16 retval;
7634         int ret;
7635
7636         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
7637         memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
7638
7639         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7640         if (ret) {
7641                 dev_err(&hdev->pdev->dev,
7642                         "add mac ethertype failed for cmd_send, ret =%d.\n",
7643                         ret);
7644                 return ret;
7645         }
7646
7647         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7648         retval = le16_to_cpu(desc.retval);
7649
7650         return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
7651 }
7652
7653 static int init_mgr_tbl(struct hclge_dev *hdev)
7654 {
7655         int ret;
7656         int i;
7657
7658         for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
7659                 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
7660                 if (ret) {
7661                         dev_err(&hdev->pdev->dev,
7662                                 "add mac ethertype failed, ret =%d.\n",
7663                                 ret);
7664                         return ret;
7665                 }
7666         }
7667
7668         return 0;
7669 }
7670
7671 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
7672 {
7673         struct hclge_vport *vport = hclge_get_vport(handle);
7674         struct hclge_dev *hdev = vport->back;
7675
7676         ether_addr_copy(p, hdev->hw.mac.mac_addr);
7677 }
7678
7679 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
7680                               bool is_first)
7681 {
7682         const unsigned char *new_addr = (const unsigned char *)p;
7683         struct hclge_vport *vport = hclge_get_vport(handle);
7684         struct hclge_dev *hdev = vport->back;
7685         int ret;
7686
7687         /* mac addr check */
7688         if (is_zero_ether_addr(new_addr) ||
7689             is_broadcast_ether_addr(new_addr) ||
7690             is_multicast_ether_addr(new_addr)) {
7691                 dev_err(&hdev->pdev->dev,
7692                         "Change uc mac err! invalid mac:%pM.\n",
7693                          new_addr);
7694                 return -EINVAL;
7695         }
7696
7697         if ((!is_first || is_kdump_kernel()) &&
7698             hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
7699                 dev_warn(&hdev->pdev->dev,
7700                          "remove old uc mac address fail.\n");
7701
7702         ret = hclge_add_uc_addr(handle, new_addr);
7703         if (ret) {
7704                 dev_err(&hdev->pdev->dev,
7705                         "add uc mac address fail, ret =%d.\n",
7706                         ret);
7707
7708                 if (!is_first &&
7709                     hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
7710                         dev_err(&hdev->pdev->dev,
7711                                 "restore uc mac address fail.\n");
7712
7713                 return -EIO;
7714         }
7715
7716         ret = hclge_pause_addr_cfg(hdev, new_addr);
7717         if (ret) {
7718                 dev_err(&hdev->pdev->dev,
7719                         "configure mac pause address fail, ret =%d.\n",
7720                         ret);
7721                 return -EIO;
7722         }
7723
7724         ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
7725
7726         return 0;
7727 }
7728
7729 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
7730                           int cmd)
7731 {
7732         struct hclge_vport *vport = hclge_get_vport(handle);
7733         struct hclge_dev *hdev = vport->back;
7734
7735         if (!hdev->hw.mac.phydev)
7736                 return -EOPNOTSUPP;
7737
7738         return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
7739 }
7740
7741 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
7742                                       u8 fe_type, bool filter_en, u8 vf_id)
7743 {
7744         struct hclge_vlan_filter_ctrl_cmd *req;
7745         struct hclge_desc desc;
7746         int ret;
7747
7748         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
7749
7750         req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
7751         req->vlan_type = vlan_type;
7752         req->vlan_fe = filter_en ? fe_type : 0;
7753         req->vf_id = vf_id;
7754
7755         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7756         if (ret)
7757                 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
7758                         ret);
7759
7760         return ret;
7761 }
7762
7763 #define HCLGE_FILTER_TYPE_VF            0
7764 #define HCLGE_FILTER_TYPE_PORT          1
7765 #define HCLGE_FILTER_FE_EGRESS_V1_B     BIT(0)
7766 #define HCLGE_FILTER_FE_NIC_INGRESS_B   BIT(0)
7767 #define HCLGE_FILTER_FE_NIC_EGRESS_B    BIT(1)
7768 #define HCLGE_FILTER_FE_ROCE_INGRESS_B  BIT(2)
7769 #define HCLGE_FILTER_FE_ROCE_EGRESS_B   BIT(3)
7770 #define HCLGE_FILTER_FE_EGRESS          (HCLGE_FILTER_FE_NIC_EGRESS_B \
7771                                         | HCLGE_FILTER_FE_ROCE_EGRESS_B)
7772 #define HCLGE_FILTER_FE_INGRESS         (HCLGE_FILTER_FE_NIC_INGRESS_B \
7773                                         | HCLGE_FILTER_FE_ROCE_INGRESS_B)
7774
7775 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
7776 {
7777         struct hclge_vport *vport = hclge_get_vport(handle);
7778         struct hclge_dev *hdev = vport->back;
7779
7780         if (hdev->pdev->revision >= 0x21) {
7781                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7782                                            HCLGE_FILTER_FE_EGRESS, enable, 0);
7783                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7784                                            HCLGE_FILTER_FE_INGRESS, enable, 0);
7785         } else {
7786                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7787                                            HCLGE_FILTER_FE_EGRESS_V1_B, enable,
7788                                            0);
7789         }
7790         if (enable)
7791                 handle->netdev_flags |= HNAE3_VLAN_FLTR;
7792         else
7793                 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
7794 }
7795
7796 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
7797                                     bool is_kill, u16 vlan,
7798                                     __be16 proto)
7799 {
7800         struct hclge_vport *vport = &hdev->vport[vfid];
7801         struct hclge_vlan_filter_vf_cfg_cmd *req0;
7802         struct hclge_vlan_filter_vf_cfg_cmd *req1;
7803         struct hclge_desc desc[2];
7804         u8 vf_byte_val;
7805         u8 vf_byte_off;
7806         int ret;
7807
7808         /* if vf vlan table is full, firmware will close vf vlan filter, it
7809          * is unable and unnecessary to add new vlan id to vf vlan filter.
7810          * If spoof check is enable, and vf vlan is full, it shouldn't add
7811          * new vlan, because tx packets with these vlan id will be dropped.
7812          */
7813         if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
7814                 if (vport->vf_info.spoofchk && vlan) {
7815                         dev_err(&hdev->pdev->dev,
7816                                 "Can't add vlan due to spoof check is on and vf vlan table is full\n");
7817                         return -EPERM;
7818                 }
7819                 return 0;
7820         }
7821
7822         hclge_cmd_setup_basic_desc(&desc[0],
7823                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7824         hclge_cmd_setup_basic_desc(&desc[1],
7825                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7826
7827         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7828
7829         vf_byte_off = vfid / 8;
7830         vf_byte_val = 1 << (vfid % 8);
7831
7832         req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
7833         req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
7834
7835         req0->vlan_id  = cpu_to_le16(vlan);
7836         req0->vlan_cfg = is_kill;
7837
7838         if (vf_byte_off < HCLGE_MAX_VF_BYTES)
7839                 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
7840         else
7841                 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
7842
7843         ret = hclge_cmd_send(&hdev->hw, desc, 2);
7844         if (ret) {
7845                 dev_err(&hdev->pdev->dev,
7846                         "Send vf vlan command fail, ret =%d.\n",
7847                         ret);
7848                 return ret;
7849         }
7850
7851         if (!is_kill) {
7852 #define HCLGE_VF_VLAN_NO_ENTRY  2
7853                 if (!req0->resp_code || req0->resp_code == 1)
7854                         return 0;
7855
7856                 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
7857                         set_bit(vfid, hdev->vf_vlan_full);
7858                         dev_warn(&hdev->pdev->dev,
7859                                  "vf vlan table is full, vf vlan filter is disabled\n");
7860                         return 0;
7861                 }
7862
7863                 dev_err(&hdev->pdev->dev,
7864                         "Add vf vlan filter fail, ret =%u.\n",
7865                         req0->resp_code);
7866         } else {
7867 #define HCLGE_VF_VLAN_DEL_NO_FOUND      1
7868                 if (!req0->resp_code)
7869                         return 0;
7870
7871                 /* vf vlan filter is disabled when vf vlan table is full,
7872                  * then new vlan id will not be added into vf vlan table.
7873                  * Just return 0 without warning, avoid massive verbose
7874                  * print logs when unload.
7875                  */
7876                 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
7877                         return 0;
7878
7879                 dev_err(&hdev->pdev->dev,
7880                         "Kill vf vlan filter fail, ret =%u.\n",
7881                         req0->resp_code);
7882         }
7883
7884         return -EIO;
7885 }
7886
7887 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
7888                                       u16 vlan_id, bool is_kill)
7889 {
7890         struct hclge_vlan_filter_pf_cfg_cmd *req;
7891         struct hclge_desc desc;
7892         u8 vlan_offset_byte_val;
7893         u8 vlan_offset_byte;
7894         u8 vlan_offset_160;
7895         int ret;
7896
7897         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
7898
7899         vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
7900         vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
7901                            HCLGE_VLAN_BYTE_SIZE;
7902         vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
7903
7904         req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
7905         req->vlan_offset = vlan_offset_160;
7906         req->vlan_cfg = is_kill;
7907         req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
7908
7909         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7910         if (ret)
7911                 dev_err(&hdev->pdev->dev,
7912                         "port vlan command, send fail, ret =%d.\n", ret);
7913         return ret;
7914 }
7915
7916 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
7917                                     u16 vport_id, u16 vlan_id,
7918                                     bool is_kill)
7919 {
7920         u16 vport_idx, vport_num = 0;
7921         int ret;
7922
7923         if (is_kill && !vlan_id)
7924                 return 0;
7925
7926         ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
7927                                        proto);
7928         if (ret) {
7929                 dev_err(&hdev->pdev->dev,
7930                         "Set %u vport vlan filter config fail, ret =%d.\n",
7931                         vport_id, ret);
7932                 return ret;
7933         }
7934
7935         /* vlan 0 may be added twice when 8021q module is enabled */
7936         if (!is_kill && !vlan_id &&
7937             test_bit(vport_id, hdev->vlan_table[vlan_id]))
7938                 return 0;
7939
7940         if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
7941                 dev_err(&hdev->pdev->dev,
7942                         "Add port vlan failed, vport %u is already in vlan %u\n",
7943                         vport_id, vlan_id);
7944                 return -EINVAL;
7945         }
7946
7947         if (is_kill &&
7948             !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
7949                 dev_err(&hdev->pdev->dev,
7950                         "Delete port vlan failed, vport %u is not in vlan %u\n",
7951                         vport_id, vlan_id);
7952                 return -EINVAL;
7953         }
7954
7955         for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
7956                 vport_num++;
7957
7958         if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
7959                 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
7960                                                  is_kill);
7961
7962         return ret;
7963 }
7964
7965 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
7966 {
7967         struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
7968         struct hclge_vport_vtag_tx_cfg_cmd *req;
7969         struct hclge_dev *hdev = vport->back;
7970         struct hclge_desc desc;
7971         u16 bmap_index;
7972         int status;
7973
7974         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
7975
7976         req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
7977         req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
7978         req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
7979         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
7980                       vcfg->accept_tag1 ? 1 : 0);
7981         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
7982                       vcfg->accept_untag1 ? 1 : 0);
7983         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
7984                       vcfg->accept_tag2 ? 1 : 0);
7985         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
7986                       vcfg->accept_untag2 ? 1 : 0);
7987         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
7988                       vcfg->insert_tag1_en ? 1 : 0);
7989         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
7990                       vcfg->insert_tag2_en ? 1 : 0);
7991         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
7992
7993         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7994         bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
7995                         HCLGE_VF_NUM_PER_BYTE;
7996         req->vf_bitmap[bmap_index] =
7997                 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7998
7999         status = hclge_cmd_send(&hdev->hw, &desc, 1);
8000         if (status)
8001                 dev_err(&hdev->pdev->dev,
8002                         "Send port txvlan cfg command fail, ret =%d\n",
8003                         status);
8004
8005         return status;
8006 }
8007
8008 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
8009 {
8010         struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
8011         struct hclge_vport_vtag_rx_cfg_cmd *req;
8012         struct hclge_dev *hdev = vport->back;
8013         struct hclge_desc desc;
8014         u16 bmap_index;
8015         int status;
8016
8017         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
8018
8019         req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
8020         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
8021                       vcfg->strip_tag1_en ? 1 : 0);
8022         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
8023                       vcfg->strip_tag2_en ? 1 : 0);
8024         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
8025                       vcfg->vlan1_vlan_prionly ? 1 : 0);
8026         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
8027                       vcfg->vlan2_vlan_prionly ? 1 : 0);
8028
8029         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
8030         bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
8031                         HCLGE_VF_NUM_PER_BYTE;
8032         req->vf_bitmap[bmap_index] =
8033                 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
8034
8035         status = hclge_cmd_send(&hdev->hw, &desc, 1);
8036         if (status)
8037                 dev_err(&hdev->pdev->dev,
8038                         "Send port rxvlan cfg command fail, ret =%d\n",
8039                         status);
8040
8041         return status;
8042 }
8043
8044 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
8045                                   u16 port_base_vlan_state,
8046                                   u16 vlan_tag)
8047 {
8048         int ret;
8049
8050         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8051                 vport->txvlan_cfg.accept_tag1 = true;
8052                 vport->txvlan_cfg.insert_tag1_en = false;
8053                 vport->txvlan_cfg.default_tag1 = 0;
8054         } else {
8055                 vport->txvlan_cfg.accept_tag1 = false;
8056                 vport->txvlan_cfg.insert_tag1_en = true;
8057                 vport->txvlan_cfg.default_tag1 = vlan_tag;
8058         }
8059
8060         vport->txvlan_cfg.accept_untag1 = true;
8061
8062         /* accept_tag2 and accept_untag2 are not supported on
8063          * pdev revision(0x20), new revision support them,
8064          * this two fields can not be configured by user.
8065          */
8066         vport->txvlan_cfg.accept_tag2 = true;
8067         vport->txvlan_cfg.accept_untag2 = true;
8068         vport->txvlan_cfg.insert_tag2_en = false;
8069         vport->txvlan_cfg.default_tag2 = 0;
8070
8071         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8072                 vport->rxvlan_cfg.strip_tag1_en = false;
8073                 vport->rxvlan_cfg.strip_tag2_en =
8074                                 vport->rxvlan_cfg.rx_vlan_offload_en;
8075         } else {
8076                 vport->rxvlan_cfg.strip_tag1_en =
8077                                 vport->rxvlan_cfg.rx_vlan_offload_en;
8078                 vport->rxvlan_cfg.strip_tag2_en = true;
8079         }
8080         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8081         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
8082
8083         ret = hclge_set_vlan_tx_offload_cfg(vport);
8084         if (ret)
8085                 return ret;
8086
8087         return hclge_set_vlan_rx_offload_cfg(vport);
8088 }
8089
8090 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
8091 {
8092         struct hclge_rx_vlan_type_cfg_cmd *rx_req;
8093         struct hclge_tx_vlan_type_cfg_cmd *tx_req;
8094         struct hclge_desc desc;
8095         int status;
8096
8097         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
8098         rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
8099         rx_req->ot_fst_vlan_type =
8100                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
8101         rx_req->ot_sec_vlan_type =
8102                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
8103         rx_req->in_fst_vlan_type =
8104                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
8105         rx_req->in_sec_vlan_type =
8106                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
8107
8108         status = hclge_cmd_send(&hdev->hw, &desc, 1);
8109         if (status) {
8110                 dev_err(&hdev->pdev->dev,
8111                         "Send rxvlan protocol type command fail, ret =%d\n",
8112                         status);
8113                 return status;
8114         }
8115
8116         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
8117
8118         tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
8119         tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
8120         tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
8121
8122         status = hclge_cmd_send(&hdev->hw, &desc, 1);
8123         if (status)
8124                 dev_err(&hdev->pdev->dev,
8125                         "Send txvlan protocol type command fail, ret =%d\n",
8126                         status);
8127
8128         return status;
8129 }
8130
8131 static int hclge_init_vlan_config(struct hclge_dev *hdev)
8132 {
8133 #define HCLGE_DEF_VLAN_TYPE             0x8100
8134
8135         struct hnae3_handle *handle = &hdev->vport[0].nic;
8136         struct hclge_vport *vport;
8137         int ret;
8138         int i;
8139
8140         if (hdev->pdev->revision >= 0x21) {
8141                 /* for revision 0x21, vf vlan filter is per function */
8142                 for (i = 0; i < hdev->num_alloc_vport; i++) {
8143                         vport = &hdev->vport[i];
8144                         ret = hclge_set_vlan_filter_ctrl(hdev,
8145                                                          HCLGE_FILTER_TYPE_VF,
8146                                                          HCLGE_FILTER_FE_EGRESS,
8147                                                          true,
8148                                                          vport->vport_id);
8149                         if (ret)
8150                                 return ret;
8151                 }
8152
8153                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
8154                                                  HCLGE_FILTER_FE_INGRESS, true,
8155                                                  0);
8156                 if (ret)
8157                         return ret;
8158         } else {
8159                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8160                                                  HCLGE_FILTER_FE_EGRESS_V1_B,
8161                                                  true, 0);
8162                 if (ret)
8163                         return ret;
8164         }
8165
8166         handle->netdev_flags |= HNAE3_VLAN_FLTR;
8167
8168         hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
8169         hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
8170         hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
8171         hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
8172         hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
8173         hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
8174
8175         ret = hclge_set_vlan_protocol_type(hdev);
8176         if (ret)
8177                 return ret;
8178
8179         for (i = 0; i < hdev->num_alloc_vport; i++) {
8180                 u16 vlan_tag;
8181
8182                 vport = &hdev->vport[i];
8183                 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
8184
8185                 ret = hclge_vlan_offload_cfg(vport,
8186                                              vport->port_base_vlan_cfg.state,
8187                                              vlan_tag);
8188                 if (ret)
8189                         return ret;
8190         }
8191
8192         return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
8193 }
8194
8195 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8196                                        bool writen_to_tbl)
8197 {
8198         struct hclge_vport_vlan_cfg *vlan;
8199
8200         vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
8201         if (!vlan)
8202                 return;
8203
8204         vlan->hd_tbl_status = writen_to_tbl;
8205         vlan->vlan_id = vlan_id;
8206
8207         list_add_tail(&vlan->node, &vport->vlan_list);
8208 }
8209
8210 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
8211 {
8212         struct hclge_vport_vlan_cfg *vlan, *tmp;
8213         struct hclge_dev *hdev = vport->back;
8214         int ret;
8215
8216         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8217                 if (!vlan->hd_tbl_status) {
8218                         ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8219                                                        vport->vport_id,
8220                                                        vlan->vlan_id, false);
8221                         if (ret) {
8222                                 dev_err(&hdev->pdev->dev,
8223                                         "restore vport vlan list failed, ret=%d\n",
8224                                         ret);
8225                                 return ret;
8226                         }
8227                 }
8228                 vlan->hd_tbl_status = true;
8229         }
8230
8231         return 0;
8232 }
8233
8234 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8235                                       bool is_write_tbl)
8236 {
8237         struct hclge_vport_vlan_cfg *vlan, *tmp;
8238         struct hclge_dev *hdev = vport->back;
8239
8240         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8241                 if (vlan->vlan_id == vlan_id) {
8242                         if (is_write_tbl && vlan->hd_tbl_status)
8243                                 hclge_set_vlan_filter_hw(hdev,
8244                                                          htons(ETH_P_8021Q),
8245                                                          vport->vport_id,
8246                                                          vlan_id,
8247                                                          true);
8248
8249                         list_del(&vlan->node);
8250                         kfree(vlan);
8251                         break;
8252                 }
8253         }
8254 }
8255
8256 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
8257 {
8258         struct hclge_vport_vlan_cfg *vlan, *tmp;
8259         struct hclge_dev *hdev = vport->back;
8260
8261         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8262                 if (vlan->hd_tbl_status)
8263                         hclge_set_vlan_filter_hw(hdev,
8264                                                  htons(ETH_P_8021Q),
8265                                                  vport->vport_id,
8266                                                  vlan->vlan_id,
8267                                                  true);
8268
8269                 vlan->hd_tbl_status = false;
8270                 if (is_del_list) {
8271                         list_del(&vlan->node);
8272                         kfree(vlan);
8273                 }
8274         }
8275         clear_bit(vport->vport_id, hdev->vf_vlan_full);
8276 }
8277
8278 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
8279 {
8280         struct hclge_vport_vlan_cfg *vlan, *tmp;
8281         struct hclge_vport *vport;
8282         int i;
8283
8284         for (i = 0; i < hdev->num_alloc_vport; i++) {
8285                 vport = &hdev->vport[i];
8286                 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8287                         list_del(&vlan->node);
8288                         kfree(vlan);
8289                 }
8290         }
8291 }
8292
8293 static void hclge_restore_vlan_table(struct hnae3_handle *handle)
8294 {
8295         struct hclge_vport *vport = hclge_get_vport(handle);
8296         struct hclge_vport_vlan_cfg *vlan, *tmp;
8297         struct hclge_dev *hdev = vport->back;
8298         u16 vlan_proto;
8299         u16 state, vlan_id;
8300         int i;
8301
8302         for (i = 0; i < hdev->num_alloc_vport; i++) {
8303                 vport = &hdev->vport[i];
8304                 vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
8305                 vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
8306                 state = vport->port_base_vlan_cfg.state;
8307
8308                 if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
8309                         hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
8310                                                  vport->vport_id, vlan_id,
8311                                                  false);
8312                         continue;
8313                 }
8314
8315                 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8316                         int ret;
8317
8318                         if (!vlan->hd_tbl_status)
8319                                 continue;
8320                         ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8321                                                        vport->vport_id,
8322                                                        vlan->vlan_id, false);
8323                         if (ret)
8324                                 break;
8325                 }
8326         }
8327 }
8328
8329 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
8330 {
8331         struct hclge_vport *vport = hclge_get_vport(handle);
8332
8333         if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8334                 vport->rxvlan_cfg.strip_tag1_en = false;
8335                 vport->rxvlan_cfg.strip_tag2_en = enable;
8336         } else {
8337                 vport->rxvlan_cfg.strip_tag1_en = enable;
8338                 vport->rxvlan_cfg.strip_tag2_en = true;
8339         }
8340         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8341         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
8342         vport->rxvlan_cfg.rx_vlan_offload_en = enable;
8343
8344         return hclge_set_vlan_rx_offload_cfg(vport);
8345 }
8346
8347 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
8348                                             u16 port_base_vlan_state,
8349                                             struct hclge_vlan_info *new_info,
8350                                             struct hclge_vlan_info *old_info)
8351 {
8352         struct hclge_dev *hdev = vport->back;
8353         int ret;
8354
8355         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
8356                 hclge_rm_vport_all_vlan_table(vport, false);
8357                 return hclge_set_vlan_filter_hw(hdev,
8358                                                  htons(new_info->vlan_proto),
8359                                                  vport->vport_id,
8360                                                  new_info->vlan_tag,
8361                                                  false);
8362         }
8363
8364         ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
8365                                        vport->vport_id, old_info->vlan_tag,
8366                                        true);
8367         if (ret)
8368                 return ret;
8369
8370         return hclge_add_vport_all_vlan_table(vport);
8371 }
8372
8373 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
8374                                     struct hclge_vlan_info *vlan_info)
8375 {
8376         struct hnae3_handle *nic = &vport->nic;
8377         struct hclge_vlan_info *old_vlan_info;
8378         struct hclge_dev *hdev = vport->back;
8379         int ret;
8380
8381         old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
8382
8383         ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
8384         if (ret)
8385                 return ret;
8386
8387         if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
8388                 /* add new VLAN tag */
8389                 ret = hclge_set_vlan_filter_hw(hdev,
8390                                                htons(vlan_info->vlan_proto),
8391                                                vport->vport_id,
8392                                                vlan_info->vlan_tag,
8393                                                false);
8394                 if (ret)
8395                         return ret;
8396
8397                 /* remove old VLAN tag */
8398                 ret = hclge_set_vlan_filter_hw(hdev,
8399                                                htons(old_vlan_info->vlan_proto),
8400                                                vport->vport_id,
8401                                                old_vlan_info->vlan_tag,
8402                                                true);
8403                 if (ret)
8404                         return ret;
8405
8406                 goto update;
8407         }
8408
8409         ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
8410                                                old_vlan_info);
8411         if (ret)
8412                 return ret;
8413
8414         /* update state only when disable/enable port based VLAN */
8415         vport->port_base_vlan_cfg.state = state;
8416         if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
8417                 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
8418         else
8419                 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
8420
8421 update:
8422         vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
8423         vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
8424         vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
8425
8426         return 0;
8427 }
8428
8429 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
8430                                           enum hnae3_port_base_vlan_state state,
8431                                           u16 vlan)
8432 {
8433         if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8434                 if (!vlan)
8435                         return HNAE3_PORT_BASE_VLAN_NOCHANGE;
8436                 else
8437                         return HNAE3_PORT_BASE_VLAN_ENABLE;
8438         } else {
8439                 if (!vlan)
8440                         return HNAE3_PORT_BASE_VLAN_DISABLE;
8441                 else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
8442                         return HNAE3_PORT_BASE_VLAN_NOCHANGE;
8443                 else
8444                         return HNAE3_PORT_BASE_VLAN_MODIFY;
8445         }
8446 }
8447
8448 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
8449                                     u16 vlan, u8 qos, __be16 proto)
8450 {
8451         struct hclge_vport *vport = hclge_get_vport(handle);
8452         struct hclge_dev *hdev = vport->back;
8453         struct hclge_vlan_info vlan_info;
8454         u16 state;
8455         int ret;
8456
8457         if (hdev->pdev->revision == 0x20)
8458                 return -EOPNOTSUPP;
8459
8460         vport = hclge_get_vf_vport(hdev, vfid);
8461         if (!vport)
8462                 return -EINVAL;
8463
8464         /* qos is a 3 bits value, so can not be bigger than 7 */
8465         if (vlan > VLAN_N_VID - 1 || qos > 7)
8466                 return -EINVAL;
8467         if (proto != htons(ETH_P_8021Q))
8468                 return -EPROTONOSUPPORT;
8469
8470         state = hclge_get_port_base_vlan_state(vport,
8471                                                vport->port_base_vlan_cfg.state,
8472                                                vlan);
8473         if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
8474                 return 0;
8475
8476         vlan_info.vlan_tag = vlan;
8477         vlan_info.qos = qos;
8478         vlan_info.vlan_proto = ntohs(proto);
8479
8480         if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
8481                 return hclge_update_port_base_vlan_cfg(vport, state,
8482                                                        &vlan_info);
8483         } else {
8484                 ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
8485                                                         vport->vport_id, state,
8486                                                         vlan, qos,
8487                                                         ntohs(proto));
8488                 return ret;
8489         }
8490 }
8491
8492 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
8493                           u16 vlan_id, bool is_kill)
8494 {
8495         struct hclge_vport *vport = hclge_get_vport(handle);
8496         struct hclge_dev *hdev = vport->back;
8497         bool writen_to_tbl = false;
8498         int ret = 0;
8499
8500         /* When device is resetting, firmware is unable to handle
8501          * mailbox. Just record the vlan id, and remove it after
8502          * reset finished.
8503          */
8504         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && is_kill) {
8505                 set_bit(vlan_id, vport->vlan_del_fail_bmap);
8506                 return -EBUSY;
8507         }
8508
8509         /* when port base vlan enabled, we use port base vlan as the vlan
8510          * filter entry. In this case, we don't update vlan filter table
8511          * when user add new vlan or remove exist vlan, just update the vport
8512          * vlan list. The vlan id in vlan list will be writen in vlan filter
8513          * table until port base vlan disabled
8514          */
8515         if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8516                 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
8517                                                vlan_id, is_kill);
8518                 writen_to_tbl = true;
8519         }
8520
8521         if (!ret) {
8522                 if (is_kill)
8523                         hclge_rm_vport_vlan_table(vport, vlan_id, false);
8524                 else
8525                         hclge_add_vport_vlan_table(vport, vlan_id,
8526                                                    writen_to_tbl);
8527         } else if (is_kill) {
8528                 /* when remove hw vlan filter failed, record the vlan id,
8529                  * and try to remove it from hw later, to be consistence
8530                  * with stack
8531                  */
8532                 set_bit(vlan_id, vport->vlan_del_fail_bmap);
8533         }
8534         return ret;
8535 }
8536
8537 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
8538 {
8539 #define HCLGE_MAX_SYNC_COUNT    60
8540
8541         int i, ret, sync_cnt = 0;
8542         u16 vlan_id;
8543
8544         /* start from vport 1 for PF is always alive */
8545         for (i = 0; i < hdev->num_alloc_vport; i++) {
8546                 struct hclge_vport *vport = &hdev->vport[i];
8547
8548                 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
8549                                          VLAN_N_VID);
8550                 while (vlan_id != VLAN_N_VID) {
8551                         ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8552                                                        vport->vport_id, vlan_id,
8553                                                        true);
8554                         if (ret && ret != -EINVAL)
8555                                 return;
8556
8557                         clear_bit(vlan_id, vport->vlan_del_fail_bmap);
8558                         hclge_rm_vport_vlan_table(vport, vlan_id, false);
8559
8560                         sync_cnt++;
8561                         if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
8562                                 return;
8563
8564                         vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
8565                                                  VLAN_N_VID);
8566                 }
8567         }
8568 }
8569
8570 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
8571 {
8572         struct hclge_config_max_frm_size_cmd *req;
8573         struct hclge_desc desc;
8574
8575         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
8576
8577         req = (struct hclge_config_max_frm_size_cmd *)desc.data;
8578         req->max_frm_size = cpu_to_le16(new_mps);
8579         req->min_frm_size = HCLGE_MAC_MIN_FRAME;
8580
8581         return hclge_cmd_send(&hdev->hw, &desc, 1);
8582 }
8583
8584 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
8585 {
8586         struct hclge_vport *vport = hclge_get_vport(handle);
8587
8588         return hclge_set_vport_mtu(vport, new_mtu);
8589 }
8590
8591 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
8592 {
8593         struct hclge_dev *hdev = vport->back;
8594         int i, max_frm_size, ret;
8595
8596         /* HW supprt 2 layer vlan */
8597         max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
8598         if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
8599             max_frm_size > HCLGE_MAC_MAX_FRAME)
8600                 return -EINVAL;
8601
8602         max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
8603         mutex_lock(&hdev->vport_lock);
8604         /* VF's mps must fit within hdev->mps */
8605         if (vport->vport_id && max_frm_size > hdev->mps) {
8606                 mutex_unlock(&hdev->vport_lock);
8607                 return -EINVAL;
8608         } else if (vport->vport_id) {
8609                 vport->mps = max_frm_size;
8610                 mutex_unlock(&hdev->vport_lock);
8611                 return 0;
8612         }
8613
8614         /* PF's mps must be greater then VF's mps */
8615         for (i = 1; i < hdev->num_alloc_vport; i++)
8616                 if (max_frm_size < hdev->vport[i].mps) {
8617                         mutex_unlock(&hdev->vport_lock);
8618                         return -EINVAL;
8619                 }
8620
8621         hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
8622
8623         ret = hclge_set_mac_mtu(hdev, max_frm_size);
8624         if (ret) {
8625                 dev_err(&hdev->pdev->dev,
8626                         "Change mtu fail, ret =%d\n", ret);
8627                 goto out;
8628         }
8629
8630         hdev->mps = max_frm_size;
8631         vport->mps = max_frm_size;
8632
8633         ret = hclge_buffer_alloc(hdev);
8634         if (ret)
8635                 dev_err(&hdev->pdev->dev,
8636                         "Allocate buffer fail, ret =%d\n", ret);
8637
8638 out:
8639         hclge_notify_client(hdev, HNAE3_UP_CLIENT);
8640         mutex_unlock(&hdev->vport_lock);
8641         return ret;
8642 }
8643
8644 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
8645                                     bool enable)
8646 {
8647         struct hclge_reset_tqp_queue_cmd *req;
8648         struct hclge_desc desc;
8649         int ret;
8650
8651         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
8652
8653         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
8654         req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
8655         if (enable)
8656                 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
8657
8658         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8659         if (ret) {
8660                 dev_err(&hdev->pdev->dev,
8661                         "Send tqp reset cmd error, status =%d\n", ret);
8662                 return ret;
8663         }
8664
8665         return 0;
8666 }
8667
8668 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
8669 {
8670         struct hclge_reset_tqp_queue_cmd *req;
8671         struct hclge_desc desc;
8672         int ret;
8673
8674         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
8675
8676         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
8677         req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
8678
8679         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8680         if (ret) {
8681                 dev_err(&hdev->pdev->dev,
8682                         "Get reset status error, status =%d\n", ret);
8683                 return ret;
8684         }
8685
8686         return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
8687 }
8688
8689 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
8690 {
8691         struct hnae3_queue *queue;
8692         struct hclge_tqp *tqp;
8693
8694         queue = handle->kinfo.tqp[queue_id];
8695         tqp = container_of(queue, struct hclge_tqp, q);
8696
8697         return tqp->index;
8698 }
8699
8700 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
8701 {
8702         struct hclge_vport *vport = hclge_get_vport(handle);
8703         struct hclge_dev *hdev = vport->back;
8704         int reset_try_times = 0;
8705         int reset_status;
8706         u16 queue_gid;
8707         int ret;
8708
8709         queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
8710
8711         ret = hclge_tqp_enable(hdev, queue_id, 0, false);
8712         if (ret) {
8713                 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
8714                 return ret;
8715         }
8716
8717         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
8718         if (ret) {
8719                 dev_err(&hdev->pdev->dev,
8720                         "Send reset tqp cmd fail, ret = %d\n", ret);
8721                 return ret;
8722         }
8723
8724         while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
8725                 reset_status = hclge_get_reset_status(hdev, queue_gid);
8726                 if (reset_status)
8727                         break;
8728
8729                 /* Wait for tqp hw reset */
8730                 usleep_range(1000, 1200);
8731         }
8732
8733         if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
8734                 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
8735                 return ret;
8736         }
8737
8738         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
8739         if (ret)
8740                 dev_err(&hdev->pdev->dev,
8741                         "Deassert the soft reset fail, ret = %d\n", ret);
8742
8743         return ret;
8744 }
8745
8746 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
8747 {
8748         struct hclge_dev *hdev = vport->back;
8749         int reset_try_times = 0;
8750         int reset_status;
8751         u16 queue_gid;
8752         int ret;
8753
8754         queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
8755
8756         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
8757         if (ret) {
8758                 dev_warn(&hdev->pdev->dev,
8759                          "Send reset tqp cmd fail, ret = %d\n", ret);
8760                 return;
8761         }
8762
8763         while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
8764                 reset_status = hclge_get_reset_status(hdev, queue_gid);
8765                 if (reset_status)
8766                         break;
8767
8768                 /* Wait for tqp hw reset */
8769                 usleep_range(1000, 1200);
8770         }
8771
8772         if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
8773                 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
8774                 return;
8775         }
8776
8777         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
8778         if (ret)
8779                 dev_warn(&hdev->pdev->dev,
8780                          "Deassert the soft reset fail, ret = %d\n", ret);
8781 }
8782
8783 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
8784 {
8785         struct hclge_vport *vport = hclge_get_vport(handle);
8786         struct hclge_dev *hdev = vport->back;
8787
8788         return hdev->fw_version;
8789 }
8790
8791 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
8792 {
8793         struct phy_device *phydev = hdev->hw.mac.phydev;
8794
8795         if (!phydev)
8796                 return;
8797
8798         phy_set_asym_pause(phydev, rx_en, tx_en);
8799 }
8800
8801 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
8802 {
8803         int ret;
8804
8805         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
8806                 return 0;
8807
8808         ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
8809         if (ret)
8810                 dev_err(&hdev->pdev->dev,
8811                         "configure pauseparam error, ret = %d.\n", ret);
8812
8813         return ret;
8814 }
8815
8816 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
8817 {
8818         struct phy_device *phydev = hdev->hw.mac.phydev;
8819         u16 remote_advertising = 0;
8820         u16 local_advertising;
8821         u32 rx_pause, tx_pause;
8822         u8 flowctl;
8823
8824         if (!phydev->link || !phydev->autoneg)
8825                 return 0;
8826
8827         local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
8828
8829         if (phydev->pause)
8830                 remote_advertising = LPA_PAUSE_CAP;
8831
8832         if (phydev->asym_pause)
8833                 remote_advertising |= LPA_PAUSE_ASYM;
8834
8835         flowctl = mii_resolve_flowctrl_fdx(local_advertising,
8836                                            remote_advertising);
8837         tx_pause = flowctl & FLOW_CTRL_TX;
8838         rx_pause = flowctl & FLOW_CTRL_RX;
8839
8840         if (phydev->duplex == HCLGE_MAC_HALF) {
8841                 tx_pause = 0;
8842                 rx_pause = 0;
8843         }
8844
8845         return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
8846 }
8847
8848 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
8849                                  u32 *rx_en, u32 *tx_en)
8850 {
8851         struct hclge_vport *vport = hclge_get_vport(handle);
8852         struct hclge_dev *hdev = vport->back;
8853         struct phy_device *phydev = hdev->hw.mac.phydev;
8854
8855         *auto_neg = phydev ? hclge_get_autoneg(handle) : 0;
8856
8857         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8858                 *rx_en = 0;
8859                 *tx_en = 0;
8860                 return;
8861         }
8862
8863         if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
8864                 *rx_en = 1;
8865                 *tx_en = 0;
8866         } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
8867                 *tx_en = 1;
8868                 *rx_en = 0;
8869         } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
8870                 *rx_en = 1;
8871                 *tx_en = 1;
8872         } else {
8873                 *rx_en = 0;
8874                 *tx_en = 0;
8875         }
8876 }
8877
8878 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
8879                                          u32 rx_en, u32 tx_en)
8880 {
8881         if (rx_en && tx_en)
8882                 hdev->fc_mode_last_time = HCLGE_FC_FULL;
8883         else if (rx_en && !tx_en)
8884                 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
8885         else if (!rx_en && tx_en)
8886                 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
8887         else
8888                 hdev->fc_mode_last_time = HCLGE_FC_NONE;
8889
8890         hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
8891 }
8892
8893 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
8894                                 u32 rx_en, u32 tx_en)
8895 {
8896         struct hclge_vport *vport = hclge_get_vport(handle);
8897         struct hclge_dev *hdev = vport->back;
8898         struct phy_device *phydev = hdev->hw.mac.phydev;
8899         u32 fc_autoneg;
8900
8901         if (phydev) {
8902                 fc_autoneg = hclge_get_autoneg(handle);
8903                 if (auto_neg != fc_autoneg) {
8904                         dev_info(&hdev->pdev->dev,
8905                                  "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
8906                         return -EOPNOTSUPP;
8907                 }
8908         }
8909
8910         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8911                 dev_info(&hdev->pdev->dev,
8912                          "Priority flow control enabled. Cannot set link flow control.\n");
8913                 return -EOPNOTSUPP;
8914         }
8915
8916         hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
8917
8918         hclge_record_user_pauseparam(hdev, rx_en, tx_en);
8919
8920         if (!auto_neg)
8921                 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
8922
8923         if (phydev)
8924                 return phy_start_aneg(phydev);
8925
8926         return -EOPNOTSUPP;
8927 }
8928
8929 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
8930                                           u8 *auto_neg, u32 *speed, u8 *duplex)
8931 {
8932         struct hclge_vport *vport = hclge_get_vport(handle);
8933         struct hclge_dev *hdev = vport->back;
8934
8935         if (speed)
8936                 *speed = hdev->hw.mac.speed;
8937         if (duplex)
8938                 *duplex = hdev->hw.mac.duplex;
8939         if (auto_neg)
8940                 *auto_neg = hdev->hw.mac.autoneg;
8941 }
8942
8943 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
8944                                  u8 *module_type)
8945 {
8946         struct hclge_vport *vport = hclge_get_vport(handle);
8947         struct hclge_dev *hdev = vport->back;
8948
8949         if (media_type)
8950                 *media_type = hdev->hw.mac.media_type;
8951
8952         if (module_type)
8953                 *module_type = hdev->hw.mac.module_type;
8954 }
8955
8956 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
8957                                 u8 *tp_mdix_ctrl, u8 *tp_mdix)
8958 {
8959         struct hclge_vport *vport = hclge_get_vport(handle);
8960         struct hclge_dev *hdev = vport->back;
8961         struct phy_device *phydev = hdev->hw.mac.phydev;
8962         int mdix_ctrl, mdix, is_resolved;
8963         unsigned int retval;
8964
8965         if (!phydev) {
8966                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8967                 *tp_mdix = ETH_TP_MDI_INVALID;
8968                 return;
8969         }
8970
8971         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
8972
8973         retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
8974         mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
8975                                     HCLGE_PHY_MDIX_CTRL_S);
8976
8977         retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
8978         mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
8979         is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
8980
8981         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
8982
8983         switch (mdix_ctrl) {
8984         case 0x0:
8985                 *tp_mdix_ctrl = ETH_TP_MDI;
8986                 break;
8987         case 0x1:
8988                 *tp_mdix_ctrl = ETH_TP_MDI_X;
8989                 break;
8990         case 0x3:
8991                 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
8992                 break;
8993         default:
8994                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8995                 break;
8996         }
8997
8998         if (!is_resolved)
8999                 *tp_mdix = ETH_TP_MDI_INVALID;
9000         else if (mdix)
9001                 *tp_mdix = ETH_TP_MDI_X;
9002         else
9003                 *tp_mdix = ETH_TP_MDI;
9004 }
9005
9006 static void hclge_info_show(struct hclge_dev *hdev)
9007 {
9008         struct device *dev = &hdev->pdev->dev;
9009
9010         dev_info(dev, "PF info begin:\n");
9011
9012         dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
9013         dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
9014         dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
9015         dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
9016         dev_info(dev, "Numbers of vmdp vports: %u\n", hdev->num_vmdq_vport);
9017         dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
9018         dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
9019         dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
9020         dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
9021         dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
9022         dev_info(dev, "This is %s PF\n",
9023                  hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
9024         dev_info(dev, "DCB %s\n",
9025                  hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
9026         dev_info(dev, "MQPRIO %s\n",
9027                  hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
9028
9029         dev_info(dev, "PF info end.\n");
9030 }
9031
9032 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
9033                                           struct hclge_vport *vport)
9034 {
9035         struct hnae3_client *client = vport->nic.client;
9036         struct hclge_dev *hdev = ae_dev->priv;
9037         int rst_cnt = hdev->rst_stats.reset_cnt;
9038         int ret;
9039
9040         ret = client->ops->init_instance(&vport->nic);
9041         if (ret)
9042                 return ret;
9043
9044         set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9045         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9046             rst_cnt != hdev->rst_stats.reset_cnt) {
9047                 ret = -EBUSY;
9048                 goto init_nic_err;
9049         }
9050
9051         /* Enable nic hw error interrupts */
9052         ret = hclge_config_nic_hw_error(hdev, true);
9053         if (ret) {
9054                 dev_err(&ae_dev->pdev->dev,
9055                         "fail(%d) to enable hw error interrupts\n", ret);
9056                 goto init_nic_err;
9057         }
9058
9059         hnae3_set_client_init_flag(client, ae_dev, 1);
9060
9061         if (netif_msg_drv(&hdev->vport->nic))
9062                 hclge_info_show(hdev);
9063
9064         return ret;
9065
9066 init_nic_err:
9067         clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9068         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9069                 msleep(HCLGE_WAIT_RESET_DONE);
9070
9071         client->ops->uninit_instance(&vport->nic, 0);
9072
9073         return ret;
9074 }
9075
9076 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
9077                                            struct hclge_vport *vport)
9078 {
9079         struct hnae3_client *client = vport->roce.client;
9080         struct hclge_dev *hdev = ae_dev->priv;
9081         int rst_cnt;
9082         int ret;
9083
9084         if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
9085             !hdev->nic_client)
9086                 return 0;
9087
9088         client = hdev->roce_client;
9089         ret = hclge_init_roce_base_info(vport);
9090         if (ret)
9091                 return ret;
9092
9093         rst_cnt = hdev->rst_stats.reset_cnt;
9094         ret = client->ops->init_instance(&vport->roce);
9095         if (ret)
9096                 return ret;
9097
9098         set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9099         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9100             rst_cnt != hdev->rst_stats.reset_cnt) {
9101                 ret = -EBUSY;
9102                 goto init_roce_err;
9103         }
9104
9105         /* Enable roce ras interrupts */
9106         ret = hclge_config_rocee_ras_interrupt(hdev, true);
9107         if (ret) {
9108                 dev_err(&ae_dev->pdev->dev,
9109                         "fail(%d) to enable roce ras interrupts\n", ret);
9110                 goto init_roce_err;
9111         }
9112
9113         hnae3_set_client_init_flag(client, ae_dev, 1);
9114
9115         return 0;
9116
9117 init_roce_err:
9118         clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9119         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9120                 msleep(HCLGE_WAIT_RESET_DONE);
9121
9122         hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
9123
9124         return ret;
9125 }
9126
9127 static int hclge_init_client_instance(struct hnae3_client *client,
9128                                       struct hnae3_ae_dev *ae_dev)
9129 {
9130         struct hclge_dev *hdev = ae_dev->priv;
9131         struct hclge_vport *vport;
9132         int i, ret;
9133
9134         for (i = 0; i <  hdev->num_vmdq_vport + 1; i++) {
9135                 vport = &hdev->vport[i];
9136
9137                 switch (client->type) {
9138                 case HNAE3_CLIENT_KNIC:
9139                         hdev->nic_client = client;
9140                         vport->nic.client = client;
9141                         ret = hclge_init_nic_client_instance(ae_dev, vport);
9142                         if (ret)
9143                                 goto clear_nic;
9144
9145                         ret = hclge_init_roce_client_instance(ae_dev, vport);
9146                         if (ret)
9147                                 goto clear_roce;
9148
9149                         break;
9150                 case HNAE3_CLIENT_ROCE:
9151                         if (hnae3_dev_roce_supported(hdev)) {
9152                                 hdev->roce_client = client;
9153                                 vport->roce.client = client;
9154                         }
9155
9156                         ret = hclge_init_roce_client_instance(ae_dev, vport);
9157                         if (ret)
9158                                 goto clear_roce;
9159
9160                         break;
9161                 default:
9162                         return -EINVAL;
9163                 }
9164         }
9165
9166         return 0;
9167
9168 clear_nic:
9169         hdev->nic_client = NULL;
9170         vport->nic.client = NULL;
9171         return ret;
9172 clear_roce:
9173         hdev->roce_client = NULL;
9174         vport->roce.client = NULL;
9175         return ret;
9176 }
9177
9178 static void hclge_uninit_client_instance(struct hnae3_client *client,
9179                                          struct hnae3_ae_dev *ae_dev)
9180 {
9181         struct hclge_dev *hdev = ae_dev->priv;
9182         struct hclge_vport *vport;
9183         int i;
9184
9185         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
9186                 vport = &hdev->vport[i];
9187                 if (hdev->roce_client) {
9188                         clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9189                         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9190                                 msleep(HCLGE_WAIT_RESET_DONE);
9191
9192                         hdev->roce_client->ops->uninit_instance(&vport->roce,
9193                                                                 0);
9194                         hdev->roce_client = NULL;
9195                         vport->roce.client = NULL;
9196                 }
9197                 if (client->type == HNAE3_CLIENT_ROCE)
9198                         return;
9199                 if (hdev->nic_client && client->ops->uninit_instance) {
9200                         clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9201                         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9202                                 msleep(HCLGE_WAIT_RESET_DONE);
9203
9204                         client->ops->uninit_instance(&vport->nic, 0);
9205                         hdev->nic_client = NULL;
9206                         vport->nic.client = NULL;
9207                 }
9208         }
9209 }
9210
9211 static int hclge_pci_init(struct hclge_dev *hdev)
9212 {
9213         struct pci_dev *pdev = hdev->pdev;
9214         struct hclge_hw *hw;
9215         int ret;
9216
9217         ret = pci_enable_device(pdev);
9218         if (ret) {
9219                 dev_err(&pdev->dev, "failed to enable PCI device\n");
9220                 return ret;
9221         }
9222
9223         ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
9224         if (ret) {
9225                 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9226                 if (ret) {
9227                         dev_err(&pdev->dev,
9228                                 "can't set consistent PCI DMA");
9229                         goto err_disable_device;
9230                 }
9231                 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
9232         }
9233
9234         ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
9235         if (ret) {
9236                 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
9237                 goto err_disable_device;
9238         }
9239
9240         pci_set_master(pdev);
9241         hw = &hdev->hw;
9242         hw->io_base = pcim_iomap(pdev, 2, 0);
9243         if (!hw->io_base) {
9244                 dev_err(&pdev->dev, "Can't map configuration register space\n");
9245                 ret = -ENOMEM;
9246                 goto err_clr_master;
9247         }
9248
9249         hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
9250
9251         return 0;
9252 err_clr_master:
9253         pci_clear_master(pdev);
9254         pci_release_regions(pdev);
9255 err_disable_device:
9256         pci_disable_device(pdev);
9257
9258         return ret;
9259 }
9260
9261 static void hclge_pci_uninit(struct hclge_dev *hdev)
9262 {
9263         struct pci_dev *pdev = hdev->pdev;
9264
9265         pcim_iounmap(pdev, hdev->hw.io_base);
9266         pci_free_irq_vectors(pdev);
9267         pci_clear_master(pdev);
9268         pci_release_mem_regions(pdev);
9269         pci_disable_device(pdev);
9270 }
9271
9272 static void hclge_state_init(struct hclge_dev *hdev)
9273 {
9274         set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
9275         set_bit(HCLGE_STATE_DOWN, &hdev->state);
9276         clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
9277         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9278         clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
9279         clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
9280         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
9281 }
9282
9283 static void hclge_state_uninit(struct hclge_dev *hdev)
9284 {
9285         set_bit(HCLGE_STATE_DOWN, &hdev->state);
9286         set_bit(HCLGE_STATE_REMOVING, &hdev->state);
9287
9288         if (hdev->reset_timer.function)
9289                 del_timer_sync(&hdev->reset_timer);
9290         if (hdev->service_task.work.func)
9291                 cancel_delayed_work_sync(&hdev->service_task);
9292 }
9293
9294 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
9295 {
9296 #define HCLGE_FLR_RETRY_WAIT_MS 500
9297 #define HCLGE_FLR_RETRY_CNT     5
9298
9299         struct hclge_dev *hdev = ae_dev->priv;
9300         int retry_cnt = 0;
9301         int ret;
9302
9303 retry:
9304         down(&hdev->reset_sem);
9305         set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9306         hdev->reset_type = HNAE3_FLR_RESET;
9307         ret = hclge_reset_prepare(hdev);
9308         if (ret) {
9309                 dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n",
9310                         ret);
9311                 if (hdev->reset_pending ||
9312                     retry_cnt++ < HCLGE_FLR_RETRY_CNT) {
9313                         dev_err(&hdev->pdev->dev,
9314                                 "reset_pending:0x%lx, retry_cnt:%d\n",
9315                                 hdev->reset_pending, retry_cnt);
9316                         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9317                         up(&hdev->reset_sem);
9318                         msleep(HCLGE_FLR_RETRY_WAIT_MS);
9319                         goto retry;
9320                 }
9321         }
9322
9323         /* disable misc vector before FLR done */
9324         hclge_enable_vector(&hdev->misc_vector, false);
9325         set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
9326         hdev->rst_stats.flr_rst_cnt++;
9327 }
9328
9329 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
9330 {
9331         struct hclge_dev *hdev = ae_dev->priv;
9332         int ret;
9333
9334         hclge_enable_vector(&hdev->misc_vector, true);
9335
9336         ret = hclge_reset_rebuild(hdev);
9337         if (ret)
9338                 dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
9339
9340         hdev->reset_type = HNAE3_NONE_RESET;
9341         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9342         up(&hdev->reset_sem);
9343 }
9344
9345 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
9346 {
9347         u16 i;
9348
9349         for (i = 0; i < hdev->num_alloc_vport; i++) {
9350                 struct hclge_vport *vport = &hdev->vport[i];
9351                 int ret;
9352
9353                  /* Send cmd to clear VF's FUNC_RST_ING */
9354                 ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
9355                 if (ret)
9356                         dev_warn(&hdev->pdev->dev,
9357                                  "clear vf(%u) rst failed %d!\n",
9358                                  vport->vport_id, ret);
9359         }
9360 }
9361
9362 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
9363 {
9364         struct pci_dev *pdev = ae_dev->pdev;
9365         struct hclge_dev *hdev;
9366         int ret;
9367
9368         hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
9369         if (!hdev) {
9370                 ret = -ENOMEM;
9371                 goto out;
9372         }
9373
9374         hdev->pdev = pdev;
9375         hdev->ae_dev = ae_dev;
9376         hdev->reset_type = HNAE3_NONE_RESET;
9377         hdev->reset_level = HNAE3_FUNC_RESET;
9378         ae_dev->priv = hdev;
9379
9380         /* HW supprt 2 layer vlan */
9381         hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
9382
9383         mutex_init(&hdev->vport_lock);
9384         spin_lock_init(&hdev->fd_rule_lock);
9385         sema_init(&hdev->reset_sem, 1);
9386
9387         ret = hclge_pci_init(hdev);
9388         if (ret)
9389                 goto out;
9390
9391         /* Firmware command queue initialize */
9392         ret = hclge_cmd_queue_init(hdev);
9393         if (ret)
9394                 goto err_pci_uninit;
9395
9396         /* Firmware command initialize */
9397         ret = hclge_cmd_init(hdev);
9398         if (ret)
9399                 goto err_cmd_uninit;
9400
9401         ret = hclge_get_cap(hdev);
9402         if (ret)
9403                 goto err_cmd_uninit;
9404
9405         ret = hclge_configure(hdev);
9406         if (ret) {
9407                 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
9408                 goto err_cmd_uninit;
9409         }
9410
9411         ret = hclge_init_msi(hdev);
9412         if (ret) {
9413                 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
9414                 goto err_cmd_uninit;
9415         }
9416
9417         ret = hclge_misc_irq_init(hdev);
9418         if (ret)
9419                 goto err_msi_uninit;
9420
9421         ret = hclge_alloc_tqps(hdev);
9422         if (ret) {
9423                 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
9424                 goto err_msi_irq_uninit;
9425         }
9426
9427         ret = hclge_alloc_vport(hdev);
9428         if (ret)
9429                 goto err_msi_irq_uninit;
9430
9431         ret = hclge_map_tqp(hdev);
9432         if (ret)
9433                 goto err_msi_irq_uninit;
9434
9435         if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
9436                 ret = hclge_mac_mdio_config(hdev);
9437                 if (ret)
9438                         goto err_msi_irq_uninit;
9439         }
9440
9441         ret = hclge_init_umv_space(hdev);
9442         if (ret)
9443                 goto err_mdiobus_unreg;
9444
9445         ret = hclge_mac_init(hdev);
9446         if (ret) {
9447                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
9448                 goto err_mdiobus_unreg;
9449         }
9450
9451         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
9452         if (ret) {
9453                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
9454                 goto err_mdiobus_unreg;
9455         }
9456
9457         ret = hclge_config_gro(hdev, true);
9458         if (ret)
9459                 goto err_mdiobus_unreg;
9460
9461         ret = hclge_init_vlan_config(hdev);
9462         if (ret) {
9463                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
9464                 goto err_mdiobus_unreg;
9465         }
9466
9467         ret = hclge_tm_schd_init(hdev);
9468         if (ret) {
9469                 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
9470                 goto err_mdiobus_unreg;
9471         }
9472
9473         hclge_rss_init_cfg(hdev);
9474         ret = hclge_rss_init_hw(hdev);
9475         if (ret) {
9476                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
9477                 goto err_mdiobus_unreg;
9478         }
9479
9480         ret = init_mgr_tbl(hdev);
9481         if (ret) {
9482                 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
9483                 goto err_mdiobus_unreg;
9484         }
9485
9486         ret = hclge_init_fd_config(hdev);
9487         if (ret) {
9488                 dev_err(&pdev->dev,
9489                         "fd table init fail, ret=%d\n", ret);
9490                 goto err_mdiobus_unreg;
9491         }
9492
9493         INIT_KFIFO(hdev->mac_tnl_log);
9494
9495         hclge_dcb_ops_set(hdev);
9496
9497         timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
9498         INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
9499
9500         /* Setup affinity after service timer setup because add_timer_on
9501          * is called in affinity notify.
9502          */
9503         hclge_misc_affinity_setup(hdev);
9504
9505         hclge_clear_all_event_cause(hdev);
9506         hclge_clear_resetting_state(hdev);
9507
9508         /* Log and clear the hw errors those already occurred */
9509         hclge_handle_all_hns_hw_errors(ae_dev);
9510
9511         /* request delayed reset for the error recovery because an immediate
9512          * global reset on a PF affecting pending initialization of other PFs
9513          */
9514         if (ae_dev->hw_err_reset_req) {
9515                 enum hnae3_reset_type reset_level;
9516
9517                 reset_level = hclge_get_reset_level(ae_dev,
9518                                                     &ae_dev->hw_err_reset_req);
9519                 hclge_set_def_reset_request(ae_dev, reset_level);
9520                 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
9521         }
9522
9523         /* Enable MISC vector(vector0) */
9524         hclge_enable_vector(&hdev->misc_vector, true);
9525
9526         hclge_state_init(hdev);
9527         hdev->last_reset_time = jiffies;
9528
9529         dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
9530                  HCLGE_DRIVER_NAME);
9531
9532         hclge_task_schedule(hdev, round_jiffies_relative(HZ));
9533
9534         return 0;
9535
9536 err_mdiobus_unreg:
9537         if (hdev->hw.mac.phydev)
9538                 mdiobus_unregister(hdev->hw.mac.mdio_bus);
9539 err_msi_irq_uninit:
9540         hclge_misc_irq_uninit(hdev);
9541 err_msi_uninit:
9542         pci_free_irq_vectors(pdev);
9543 err_cmd_uninit:
9544         hclge_cmd_uninit(hdev);
9545 err_pci_uninit:
9546         pcim_iounmap(pdev, hdev->hw.io_base);
9547         pci_clear_master(pdev);
9548         pci_release_regions(pdev);
9549         pci_disable_device(pdev);
9550 out:
9551         return ret;
9552 }
9553
9554 static void hclge_stats_clear(struct hclge_dev *hdev)
9555 {
9556         memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
9557 }
9558
9559 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
9560 {
9561         return hclge_config_switch_param(hdev, vf, enable,
9562                                          HCLGE_SWITCH_ANTI_SPOOF_MASK);
9563 }
9564
9565 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
9566 {
9567         return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9568                                           HCLGE_FILTER_FE_NIC_INGRESS_B,
9569                                           enable, vf);
9570 }
9571
9572 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
9573 {
9574         int ret;
9575
9576         ret = hclge_set_mac_spoofchk(hdev, vf, enable);
9577         if (ret) {
9578                 dev_err(&hdev->pdev->dev,
9579                         "Set vf %d mac spoof check %s failed, ret=%d\n",
9580                         vf, enable ? "on" : "off", ret);
9581                 return ret;
9582         }
9583
9584         ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
9585         if (ret)
9586                 dev_err(&hdev->pdev->dev,
9587                         "Set vf %d vlan spoof check %s failed, ret=%d\n",
9588                         vf, enable ? "on" : "off", ret);
9589
9590         return ret;
9591 }
9592
9593 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
9594                                  bool enable)
9595 {
9596         struct hclge_vport *vport = hclge_get_vport(handle);
9597         struct hclge_dev *hdev = vport->back;
9598         u32 new_spoofchk = enable ? 1 : 0;
9599         int ret;
9600
9601         if (hdev->pdev->revision == 0x20)
9602                 return -EOPNOTSUPP;
9603
9604         vport = hclge_get_vf_vport(hdev, vf);
9605         if (!vport)
9606                 return -EINVAL;
9607
9608         if (vport->vf_info.spoofchk == new_spoofchk)
9609                 return 0;
9610
9611         if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
9612                 dev_warn(&hdev->pdev->dev,
9613                          "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
9614                          vf);
9615         else if (enable && hclge_is_umv_space_full(vport))
9616                 dev_warn(&hdev->pdev->dev,
9617                          "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
9618                          vf);
9619
9620         ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
9621         if (ret)
9622                 return ret;
9623
9624         vport->vf_info.spoofchk = new_spoofchk;
9625         return 0;
9626 }
9627
9628 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
9629 {
9630         struct hclge_vport *vport = hdev->vport;
9631         int ret;
9632         int i;
9633
9634         if (hdev->pdev->revision == 0x20)
9635                 return 0;
9636
9637         /* resume the vf spoof check state after reset */
9638         for (i = 0; i < hdev->num_alloc_vport; i++) {
9639                 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
9640                                                vport->vf_info.spoofchk);
9641                 if (ret)
9642                         return ret;
9643
9644                 vport++;
9645         }
9646
9647         return 0;
9648 }
9649
9650 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
9651 {
9652         struct hclge_vport *vport = hclge_get_vport(handle);
9653         struct hclge_dev *hdev = vport->back;
9654         u32 new_trusted = enable ? 1 : 0;
9655         bool en_bc_pmc;
9656         int ret;
9657
9658         vport = hclge_get_vf_vport(hdev, vf);
9659         if (!vport)
9660                 return -EINVAL;
9661
9662         if (vport->vf_info.trusted == new_trusted)
9663                 return 0;
9664
9665         /* Disable promisc mode for VF if it is not trusted any more. */
9666         if (!enable && vport->vf_info.promisc_enable) {
9667                 en_bc_pmc = hdev->pdev->revision != 0x20;
9668                 ret = hclge_set_vport_promisc_mode(vport, false, false,
9669                                                    en_bc_pmc);
9670                 if (ret)
9671                         return ret;
9672                 vport->vf_info.promisc_enable = 0;
9673                 hclge_inform_vf_promisc_info(vport);
9674         }
9675
9676         vport->vf_info.trusted = new_trusted;
9677
9678         return 0;
9679 }
9680
9681 static void hclge_reset_vf_rate(struct hclge_dev *hdev)
9682 {
9683         int ret;
9684         int vf;
9685
9686         /* reset vf rate to default value */
9687         for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
9688                 struct hclge_vport *vport = &hdev->vport[vf];
9689
9690                 vport->vf_info.max_tx_rate = 0;
9691                 ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
9692                 if (ret)
9693                         dev_err(&hdev->pdev->dev,
9694                                 "vf%d failed to reset to default, ret=%d\n",
9695                                 vf - HCLGE_VF_VPORT_START_NUM, ret);
9696         }
9697 }
9698
9699 static int hclge_vf_rate_param_check(struct hclge_dev *hdev, int vf,
9700                                      int min_tx_rate, int max_tx_rate)
9701 {
9702         if (min_tx_rate != 0 ||
9703             max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
9704                 dev_err(&hdev->pdev->dev,
9705                         "min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
9706                         min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
9707                 return -EINVAL;
9708         }
9709
9710         return 0;
9711 }
9712
9713 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
9714                              int min_tx_rate, int max_tx_rate, bool force)
9715 {
9716         struct hclge_vport *vport = hclge_get_vport(handle);
9717         struct hclge_dev *hdev = vport->back;
9718         int ret;
9719
9720         ret = hclge_vf_rate_param_check(hdev, vf, min_tx_rate, max_tx_rate);
9721         if (ret)
9722                 return ret;
9723
9724         vport = hclge_get_vf_vport(hdev, vf);
9725         if (!vport)
9726                 return -EINVAL;
9727
9728         if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
9729                 return 0;
9730
9731         ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
9732         if (ret)
9733                 return ret;
9734
9735         vport->vf_info.max_tx_rate = max_tx_rate;
9736
9737         return 0;
9738 }
9739
9740 static int hclge_resume_vf_rate(struct hclge_dev *hdev)
9741 {
9742         struct hnae3_handle *handle = &hdev->vport->nic;
9743         struct hclge_vport *vport;
9744         int ret;
9745         int vf;
9746
9747         /* resume the vf max_tx_rate after reset */
9748         for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
9749                 vport = hclge_get_vf_vport(hdev, vf);
9750                 if (!vport)
9751                         return -EINVAL;
9752
9753                 /* zero means max rate, after reset, firmware already set it to
9754                  * max rate, so just continue.
9755                  */
9756                 if (!vport->vf_info.max_tx_rate)
9757                         continue;
9758
9759                 ret = hclge_set_vf_rate(handle, vf, 0,
9760                                         vport->vf_info.max_tx_rate, true);
9761                 if (ret) {
9762                         dev_err(&hdev->pdev->dev,
9763                                 "vf%d failed to resume tx_rate:%u, ret=%d\n",
9764                                 vf, vport->vf_info.max_tx_rate, ret);
9765                         return ret;
9766                 }
9767         }
9768
9769         return 0;
9770 }
9771
9772 static void hclge_reset_vport_state(struct hclge_dev *hdev)
9773 {
9774         struct hclge_vport *vport = hdev->vport;
9775         int i;
9776
9777         for (i = 0; i < hdev->num_alloc_vport; i++) {
9778                 hclge_vport_stop(vport);
9779                 vport++;
9780         }
9781 }
9782
9783 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
9784 {
9785         struct hclge_dev *hdev = ae_dev->priv;
9786         struct pci_dev *pdev = ae_dev->pdev;
9787         int ret;
9788
9789         set_bit(HCLGE_STATE_DOWN, &hdev->state);
9790
9791         hclge_stats_clear(hdev);
9792         memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
9793         memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
9794
9795         ret = hclge_cmd_init(hdev);
9796         if (ret) {
9797                 dev_err(&pdev->dev, "Cmd queue init failed\n");
9798                 return ret;
9799         }
9800
9801         ret = hclge_map_tqp(hdev);
9802         if (ret) {
9803                 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
9804                 return ret;
9805         }
9806
9807         hclge_reset_umv_space(hdev);
9808
9809         ret = hclge_mac_init(hdev);
9810         if (ret) {
9811                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
9812                 return ret;
9813         }
9814
9815         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
9816         if (ret) {
9817                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
9818                 return ret;
9819         }
9820
9821         ret = hclge_config_gro(hdev, true);
9822         if (ret)
9823                 return ret;
9824
9825         ret = hclge_init_vlan_config(hdev);
9826         if (ret) {
9827                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
9828                 return ret;
9829         }
9830
9831         ret = hclge_tm_init_hw(hdev, true);
9832         if (ret) {
9833                 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
9834                 return ret;
9835         }
9836
9837         ret = hclge_rss_init_hw(hdev);
9838         if (ret) {
9839                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
9840                 return ret;
9841         }
9842
9843         ret = init_mgr_tbl(hdev);
9844         if (ret) {
9845                 dev_err(&pdev->dev,
9846                         "failed to reinit manager table, ret = %d\n", ret);
9847                 return ret;
9848         }
9849
9850         ret = hclge_init_fd_config(hdev);
9851         if (ret) {
9852                 dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
9853                 return ret;
9854         }
9855
9856         /* Log and clear the hw errors those already occurred */
9857         hclge_handle_all_hns_hw_errors(ae_dev);
9858
9859         /* Re-enable the hw error interrupts because
9860          * the interrupts get disabled on global reset.
9861          */
9862         ret = hclge_config_nic_hw_error(hdev, true);
9863         if (ret) {
9864                 dev_err(&pdev->dev,
9865                         "fail(%d) to re-enable NIC hw error interrupts\n",
9866                         ret);
9867                 return ret;
9868         }
9869
9870         if (hdev->roce_client) {
9871                 ret = hclge_config_rocee_ras_interrupt(hdev, true);
9872                 if (ret) {
9873                         dev_err(&pdev->dev,
9874                                 "fail(%d) to re-enable roce ras interrupts\n",
9875                                 ret);
9876                         return ret;
9877                 }
9878         }
9879
9880         hclge_reset_vport_state(hdev);
9881         ret = hclge_reset_vport_spoofchk(hdev);
9882         if (ret)
9883                 return ret;
9884
9885         ret = hclge_resume_vf_rate(hdev);
9886         if (ret)
9887                 return ret;
9888
9889         dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
9890                  HCLGE_DRIVER_NAME);
9891
9892         return 0;
9893 }
9894
9895 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
9896 {
9897         struct hclge_dev *hdev = ae_dev->priv;
9898         struct hclge_mac *mac = &hdev->hw.mac;
9899
9900         hclge_reset_vf_rate(hdev);
9901         hclge_misc_affinity_teardown(hdev);
9902         hclge_state_uninit(hdev);
9903
9904         if (mac->phydev)
9905                 mdiobus_unregister(mac->mdio_bus);
9906
9907         hclge_uninit_umv_space(hdev);
9908
9909         /* Disable MISC vector(vector0) */
9910         hclge_enable_vector(&hdev->misc_vector, false);
9911         synchronize_irq(hdev->misc_vector.vector_irq);
9912
9913         /* Disable all hw interrupts */
9914         hclge_config_mac_tnl_int(hdev, false);
9915         hclge_config_nic_hw_error(hdev, false);
9916         hclge_config_rocee_ras_interrupt(hdev, false);
9917
9918         hclge_cmd_uninit(hdev);
9919         hclge_misc_irq_uninit(hdev);
9920         hclge_pci_uninit(hdev);
9921         mutex_destroy(&hdev->vport_lock);
9922         hclge_uninit_vport_mac_table(hdev);
9923         hclge_uninit_vport_vlan_table(hdev);
9924         ae_dev->priv = NULL;
9925 }
9926
9927 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
9928 {
9929         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9930         struct hclge_vport *vport = hclge_get_vport(handle);
9931         struct hclge_dev *hdev = vport->back;
9932
9933         return min_t(u32, hdev->rss_size_max,
9934                      vport->alloc_tqps / kinfo->num_tc);
9935 }
9936
9937 static void hclge_get_channels(struct hnae3_handle *handle,
9938                                struct ethtool_channels *ch)
9939 {
9940         ch->max_combined = hclge_get_max_channels(handle);
9941         ch->other_count = 1;
9942         ch->max_other = 1;
9943         ch->combined_count = handle->kinfo.rss_size;
9944 }
9945
9946 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
9947                                         u16 *alloc_tqps, u16 *max_rss_size)
9948 {
9949         struct hclge_vport *vport = hclge_get_vport(handle);
9950         struct hclge_dev *hdev = vport->back;
9951
9952         *alloc_tqps = vport->alloc_tqps;
9953         *max_rss_size = hdev->rss_size_max;
9954 }
9955
9956 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
9957                               bool rxfh_configured)
9958 {
9959         struct hclge_vport *vport = hclge_get_vport(handle);
9960         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
9961         u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
9962         struct hclge_dev *hdev = vport->back;
9963         u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
9964         u16 cur_rss_size = kinfo->rss_size;
9965         u16 cur_tqps = kinfo->num_tqps;
9966         u16 tc_valid[HCLGE_MAX_TC_NUM];
9967         u16 roundup_size;
9968         u32 *rss_indir;
9969         unsigned int i;
9970         int ret;
9971
9972         kinfo->req_rss_size = new_tqps_num;
9973
9974         ret = hclge_tm_vport_map_update(hdev);
9975         if (ret) {
9976                 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
9977                 return ret;
9978         }
9979
9980         roundup_size = roundup_pow_of_two(kinfo->rss_size);
9981         roundup_size = ilog2(roundup_size);
9982         /* Set the RSS TC mode according to the new RSS size */
9983         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
9984                 tc_valid[i] = 0;
9985
9986                 if (!(hdev->hw_tc_map & BIT(i)))
9987                         continue;
9988
9989                 tc_valid[i] = 1;
9990                 tc_size[i] = roundup_size;
9991                 tc_offset[i] = kinfo->rss_size * i;
9992         }
9993         ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
9994         if (ret)
9995                 return ret;
9996
9997         /* RSS indirection table has been configuared by user */
9998         if (rxfh_configured)
9999                 goto out;
10000
10001         /* Reinitializes the rss indirect table according to the new RSS size */
10002         rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
10003         if (!rss_indir)
10004                 return -ENOMEM;
10005
10006         for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
10007                 rss_indir[i] = i % kinfo->rss_size;
10008
10009         ret = hclge_set_rss(handle, rss_indir, NULL, 0);
10010         if (ret)
10011                 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
10012                         ret);
10013
10014         kfree(rss_indir);
10015
10016 out:
10017         if (!ret)
10018                 dev_info(&hdev->pdev->dev,
10019                          "Channels changed, rss_size from %u to %u, tqps from %u to %u",
10020                          cur_rss_size, kinfo->rss_size,
10021                          cur_tqps, kinfo->rss_size * kinfo->num_tc);
10022
10023         return ret;
10024 }
10025
10026 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
10027                               u32 *regs_num_64_bit)
10028 {
10029         struct hclge_desc desc;
10030         u32 total_num;
10031         int ret;
10032
10033         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
10034         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10035         if (ret) {
10036                 dev_err(&hdev->pdev->dev,
10037                         "Query register number cmd failed, ret = %d.\n", ret);
10038                 return ret;
10039         }
10040
10041         *regs_num_32_bit = le32_to_cpu(desc.data[0]);
10042         *regs_num_64_bit = le32_to_cpu(desc.data[1]);
10043
10044         total_num = *regs_num_32_bit + *regs_num_64_bit;
10045         if (!total_num)
10046                 return -EINVAL;
10047
10048         return 0;
10049 }
10050
10051 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
10052                                  void *data)
10053 {
10054 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
10055 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
10056
10057         struct hclge_desc *desc;
10058         u32 *reg_val = data;
10059         __le32 *desc_data;
10060         int nodata_num;
10061         int cmd_num;
10062         int i, k, n;
10063         int ret;
10064
10065         if (regs_num == 0)
10066                 return 0;
10067
10068         nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
10069         cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
10070                                HCLGE_32_BIT_REG_RTN_DATANUM);
10071         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
10072         if (!desc)
10073                 return -ENOMEM;
10074
10075         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
10076         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
10077         if (ret) {
10078                 dev_err(&hdev->pdev->dev,
10079                         "Query 32 bit register cmd failed, ret = %d.\n", ret);
10080                 kfree(desc);
10081                 return ret;
10082         }
10083
10084         for (i = 0; i < cmd_num; i++) {
10085                 if (i == 0) {
10086                         desc_data = (__le32 *)(&desc[i].data[0]);
10087                         n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
10088                 } else {
10089                         desc_data = (__le32 *)(&desc[i]);
10090                         n = HCLGE_32_BIT_REG_RTN_DATANUM;
10091                 }
10092                 for (k = 0; k < n; k++) {
10093                         *reg_val++ = le32_to_cpu(*desc_data++);
10094
10095                         regs_num--;
10096                         if (!regs_num)
10097                                 break;
10098                 }
10099         }
10100
10101         kfree(desc);
10102         return 0;
10103 }
10104
10105 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
10106                                  void *data)
10107 {
10108 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
10109 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
10110
10111         struct hclge_desc *desc;
10112         u64 *reg_val = data;
10113         __le64 *desc_data;
10114         int nodata_len;
10115         int cmd_num;
10116         int i, k, n;
10117         int ret;
10118
10119         if (regs_num == 0)
10120                 return 0;
10121
10122         nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
10123         cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
10124                                HCLGE_64_BIT_REG_RTN_DATANUM);
10125         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
10126         if (!desc)
10127                 return -ENOMEM;
10128
10129         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
10130         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
10131         if (ret) {
10132                 dev_err(&hdev->pdev->dev,
10133                         "Query 64 bit register cmd failed, ret = %d.\n", ret);
10134                 kfree(desc);
10135                 return ret;
10136         }
10137
10138         for (i = 0; i < cmd_num; i++) {
10139                 if (i == 0) {
10140                         desc_data = (__le64 *)(&desc[i].data[0]);
10141                         n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
10142                 } else {
10143                         desc_data = (__le64 *)(&desc[i]);
10144                         n = HCLGE_64_BIT_REG_RTN_DATANUM;
10145                 }
10146                 for (k = 0; k < n; k++) {
10147                         *reg_val++ = le64_to_cpu(*desc_data++);
10148
10149                         regs_num--;
10150                         if (!regs_num)
10151                                 break;
10152                 }
10153         }
10154
10155         kfree(desc);
10156         return 0;
10157 }
10158
10159 #define MAX_SEPARATE_NUM        4
10160 #define SEPARATOR_VALUE         0xFDFCFBFA
10161 #define REG_NUM_PER_LINE        4
10162 #define REG_LEN_PER_LINE        (REG_NUM_PER_LINE * sizeof(u32))
10163 #define REG_SEPARATOR_LINE      1
10164 #define REG_NUM_REMAIN_MASK     3
10165 #define BD_LIST_MAX_NUM         30
10166
10167 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
10168 {
10169         /*prepare 4 commands to query DFX BD number*/
10170         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_DFX_BD_NUM, true);
10171         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10172         hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_DFX_BD_NUM, true);
10173         desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10174         hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_DFX_BD_NUM, true);
10175         desc[2].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10176         hclge_cmd_setup_basic_desc(&desc[3], HCLGE_OPC_DFX_BD_NUM, true);
10177
10178         return hclge_cmd_send(&hdev->hw, desc, 4);
10179 }
10180
10181 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
10182                                     int *bd_num_list,
10183                                     u32 type_num)
10184 {
10185         u32 entries_per_desc, desc_index, index, offset, i;
10186         struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
10187         int ret;
10188
10189         ret = hclge_query_bd_num_cmd_send(hdev, desc);
10190         if (ret) {
10191                 dev_err(&hdev->pdev->dev,
10192                         "Get dfx bd num fail, status is %d.\n", ret);
10193                 return ret;
10194         }
10195
10196         entries_per_desc = ARRAY_SIZE(desc[0].data);
10197         for (i = 0; i < type_num; i++) {
10198                 offset = hclge_dfx_bd_offset_list[i];
10199                 index = offset % entries_per_desc;
10200                 desc_index = offset / entries_per_desc;
10201                 bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
10202         }
10203
10204         return ret;
10205 }
10206
10207 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
10208                                   struct hclge_desc *desc_src, int bd_num,
10209                                   enum hclge_opcode_type cmd)
10210 {
10211         struct hclge_desc *desc = desc_src;
10212         int i, ret;
10213
10214         hclge_cmd_setup_basic_desc(desc, cmd, true);
10215         for (i = 0; i < bd_num - 1; i++) {
10216                 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10217                 desc++;
10218                 hclge_cmd_setup_basic_desc(desc, cmd, true);
10219         }
10220
10221         desc = desc_src;
10222         ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
10223         if (ret)
10224                 dev_err(&hdev->pdev->dev,
10225                         "Query dfx reg cmd(0x%x) send fail, status is %d.\n",
10226                         cmd, ret);
10227
10228         return ret;
10229 }
10230
10231 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
10232                                     void *data)
10233 {
10234         int entries_per_desc, reg_num, separator_num, desc_index, index, i;
10235         struct hclge_desc *desc = desc_src;
10236         u32 *reg = data;
10237
10238         entries_per_desc = ARRAY_SIZE(desc->data);
10239         reg_num = entries_per_desc * bd_num;
10240         separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
10241         for (i = 0; i < reg_num; i++) {
10242                 index = i % entries_per_desc;
10243                 desc_index = i / entries_per_desc;
10244                 *reg++ = le32_to_cpu(desc[desc_index].data[index]);
10245         }
10246         for (i = 0; i < separator_num; i++)
10247                 *reg++ = SEPARATOR_VALUE;
10248
10249         return reg_num + separator_num;
10250 }
10251
10252 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
10253 {
10254         u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
10255         int data_len_per_desc, data_len, bd_num, i;
10256         int bd_num_list[BD_LIST_MAX_NUM];
10257         int ret;
10258
10259         ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
10260         if (ret) {
10261                 dev_err(&hdev->pdev->dev,
10262                         "Get dfx reg bd num fail, status is %d.\n", ret);
10263                 return ret;
10264         }
10265
10266         data_len_per_desc = sizeof_field(struct hclge_desc, data);
10267         *len = 0;
10268         for (i = 0; i < dfx_reg_type_num; i++) {
10269                 bd_num = bd_num_list[i];
10270                 data_len = data_len_per_desc * bd_num;
10271                 *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
10272         }
10273
10274         return ret;
10275 }
10276
10277 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
10278 {
10279         u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
10280         int bd_num, bd_num_max, buf_len, i;
10281         int bd_num_list[BD_LIST_MAX_NUM];
10282         struct hclge_desc *desc_src;
10283         u32 *reg = data;
10284         int ret;
10285
10286         ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
10287         if (ret) {
10288                 dev_err(&hdev->pdev->dev,
10289                         "Get dfx reg bd num fail, status is %d.\n", ret);
10290                 return ret;
10291         }
10292
10293         bd_num_max = bd_num_list[0];
10294         for (i = 1; i < dfx_reg_type_num; i++)
10295                 bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
10296
10297         buf_len = sizeof(*desc_src) * bd_num_max;
10298         desc_src = kzalloc(buf_len, GFP_KERNEL);
10299         if (!desc_src)
10300                 return -ENOMEM;
10301
10302         for (i = 0; i < dfx_reg_type_num; i++) {
10303                 bd_num = bd_num_list[i];
10304                 ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
10305                                              hclge_dfx_reg_opcode_list[i]);
10306                 if (ret) {
10307                         dev_err(&hdev->pdev->dev,
10308                                 "Get dfx reg fail, status is %d.\n", ret);
10309                         break;
10310                 }
10311
10312                 reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
10313         }
10314
10315         kfree(desc_src);
10316         return ret;
10317 }
10318
10319 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
10320                               struct hnae3_knic_private_info *kinfo)
10321 {
10322 #define HCLGE_RING_REG_OFFSET           0x200
10323 #define HCLGE_RING_INT_REG_OFFSET       0x4
10324
10325         int i, j, reg_num, separator_num;
10326         int data_num_sum;
10327         u32 *reg = data;
10328
10329         /* fetching per-PF registers valus from PF PCIe register space */
10330         reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
10331         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10332         for (i = 0; i < reg_num; i++)
10333                 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
10334         for (i = 0; i < separator_num; i++)
10335                 *reg++ = SEPARATOR_VALUE;
10336         data_num_sum = reg_num + separator_num;
10337
10338         reg_num = ARRAY_SIZE(common_reg_addr_list);
10339         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10340         for (i = 0; i < reg_num; i++)
10341                 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
10342         for (i = 0; i < separator_num; i++)
10343                 *reg++ = SEPARATOR_VALUE;
10344         data_num_sum += reg_num + separator_num;
10345
10346         reg_num = ARRAY_SIZE(ring_reg_addr_list);
10347         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10348         for (j = 0; j < kinfo->num_tqps; j++) {
10349                 for (i = 0; i < reg_num; i++)
10350                         *reg++ = hclge_read_dev(&hdev->hw,
10351                                                 ring_reg_addr_list[i] +
10352                                                 HCLGE_RING_REG_OFFSET * j);
10353                 for (i = 0; i < separator_num; i++)
10354                         *reg++ = SEPARATOR_VALUE;
10355         }
10356         data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
10357
10358         reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
10359         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10360         for (j = 0; j < hdev->num_msi_used - 1; j++) {
10361                 for (i = 0; i < reg_num; i++)
10362                         *reg++ = hclge_read_dev(&hdev->hw,
10363                                                 tqp_intr_reg_addr_list[i] +
10364                                                 HCLGE_RING_INT_REG_OFFSET * j);
10365                 for (i = 0; i < separator_num; i++)
10366                         *reg++ = SEPARATOR_VALUE;
10367         }
10368         data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
10369
10370         return data_num_sum;
10371 }
10372
10373 static int hclge_get_regs_len(struct hnae3_handle *handle)
10374 {
10375         int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
10376         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
10377         struct hclge_vport *vport = hclge_get_vport(handle);
10378         struct hclge_dev *hdev = vport->back;
10379         int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
10380         int regs_lines_32_bit, regs_lines_64_bit;
10381         int ret;
10382
10383         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
10384         if (ret) {
10385                 dev_err(&hdev->pdev->dev,
10386                         "Get register number failed, ret = %d.\n", ret);
10387                 return ret;
10388         }
10389
10390         ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
10391         if (ret) {
10392                 dev_err(&hdev->pdev->dev,
10393                         "Get dfx reg len failed, ret = %d.\n", ret);
10394                 return ret;
10395         }
10396
10397         cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
10398                 REG_SEPARATOR_LINE;
10399         common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
10400                 REG_SEPARATOR_LINE;
10401         ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
10402                 REG_SEPARATOR_LINE;
10403         tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
10404                 REG_SEPARATOR_LINE;
10405         regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
10406                 REG_SEPARATOR_LINE;
10407         regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
10408                 REG_SEPARATOR_LINE;
10409
10410         return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
10411                 tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
10412                 regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
10413 }
10414
10415 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
10416                            void *data)
10417 {
10418         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
10419         struct hclge_vport *vport = hclge_get_vport(handle);
10420         struct hclge_dev *hdev = vport->back;
10421         u32 regs_num_32_bit, regs_num_64_bit;
10422         int i, reg_num, separator_num, ret;
10423         u32 *reg = data;
10424
10425         *version = hdev->fw_version;
10426
10427         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
10428         if (ret) {
10429                 dev_err(&hdev->pdev->dev,
10430                         "Get register number failed, ret = %d.\n", ret);
10431                 return;
10432         }
10433
10434         reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
10435
10436         ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
10437         if (ret) {
10438                 dev_err(&hdev->pdev->dev,
10439                         "Get 32 bit register failed, ret = %d.\n", ret);
10440                 return;
10441         }
10442         reg_num = regs_num_32_bit;
10443         reg += reg_num;
10444         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10445         for (i = 0; i < separator_num; i++)
10446                 *reg++ = SEPARATOR_VALUE;
10447
10448         ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
10449         if (ret) {
10450                 dev_err(&hdev->pdev->dev,
10451                         "Get 64 bit register failed, ret = %d.\n", ret);
10452                 return;
10453         }
10454         reg_num = regs_num_64_bit * 2;
10455         reg += reg_num;
10456         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10457         for (i = 0; i < separator_num; i++)
10458                 *reg++ = SEPARATOR_VALUE;
10459
10460         ret = hclge_get_dfx_reg(hdev, reg);
10461         if (ret)
10462                 dev_err(&hdev->pdev->dev,
10463                         "Get dfx register failed, ret = %d.\n", ret);
10464 }
10465
10466 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
10467 {
10468         struct hclge_set_led_state_cmd *req;
10469         struct hclge_desc desc;
10470         int ret;
10471
10472         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
10473
10474         req = (struct hclge_set_led_state_cmd *)desc.data;
10475         hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
10476                         HCLGE_LED_LOCATE_STATE_S, locate_led_status);
10477
10478         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10479         if (ret)
10480                 dev_err(&hdev->pdev->dev,
10481                         "Send set led state cmd error, ret =%d\n", ret);
10482
10483         return ret;
10484 }
10485
10486 enum hclge_led_status {
10487         HCLGE_LED_OFF,
10488         HCLGE_LED_ON,
10489         HCLGE_LED_NO_CHANGE = 0xFF,
10490 };
10491
10492 static int hclge_set_led_id(struct hnae3_handle *handle,
10493                             enum ethtool_phys_id_state status)
10494 {
10495         struct hclge_vport *vport = hclge_get_vport(handle);
10496         struct hclge_dev *hdev = vport->back;
10497
10498         switch (status) {
10499         case ETHTOOL_ID_ACTIVE:
10500                 return hclge_set_led_status(hdev, HCLGE_LED_ON);
10501         case ETHTOOL_ID_INACTIVE:
10502                 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
10503         default:
10504                 return -EINVAL;
10505         }
10506 }
10507
10508 static void hclge_get_link_mode(struct hnae3_handle *handle,
10509                                 unsigned long *supported,
10510                                 unsigned long *advertising)
10511 {
10512         unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
10513         struct hclge_vport *vport = hclge_get_vport(handle);
10514         struct hclge_dev *hdev = vport->back;
10515         unsigned int idx = 0;
10516
10517         for (; idx < size; idx++) {
10518                 supported[idx] = hdev->hw.mac.supported[idx];
10519                 advertising[idx] = hdev->hw.mac.advertising[idx];
10520         }
10521 }
10522
10523 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
10524 {
10525         struct hclge_vport *vport = hclge_get_vport(handle);
10526         struct hclge_dev *hdev = vport->back;
10527
10528         return hclge_config_gro(hdev, enable);
10529 }
10530
10531 static const struct hnae3_ae_ops hclge_ops = {
10532         .init_ae_dev = hclge_init_ae_dev,
10533         .uninit_ae_dev = hclge_uninit_ae_dev,
10534         .flr_prepare = hclge_flr_prepare,
10535         .flr_done = hclge_flr_done,
10536         .init_client_instance = hclge_init_client_instance,
10537         .uninit_client_instance = hclge_uninit_client_instance,
10538         .map_ring_to_vector = hclge_map_ring_to_vector,
10539         .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
10540         .get_vector = hclge_get_vector,
10541         .put_vector = hclge_put_vector,
10542         .set_promisc_mode = hclge_set_promisc_mode,
10543         .set_loopback = hclge_set_loopback,
10544         .start = hclge_ae_start,
10545         .stop = hclge_ae_stop,
10546         .client_start = hclge_client_start,
10547         .client_stop = hclge_client_stop,
10548         .get_status = hclge_get_status,
10549         .get_ksettings_an_result = hclge_get_ksettings_an_result,
10550         .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
10551         .get_media_type = hclge_get_media_type,
10552         .check_port_speed = hclge_check_port_speed,
10553         .get_fec = hclge_get_fec,
10554         .set_fec = hclge_set_fec,
10555         .get_rss_key_size = hclge_get_rss_key_size,
10556         .get_rss_indir_size = hclge_get_rss_indir_size,
10557         .get_rss = hclge_get_rss,
10558         .set_rss = hclge_set_rss,
10559         .set_rss_tuple = hclge_set_rss_tuple,
10560         .get_rss_tuple = hclge_get_rss_tuple,
10561         .get_tc_size = hclge_get_tc_size,
10562         .get_mac_addr = hclge_get_mac_addr,
10563         .set_mac_addr = hclge_set_mac_addr,
10564         .do_ioctl = hclge_do_ioctl,
10565         .add_uc_addr = hclge_add_uc_addr,
10566         .rm_uc_addr = hclge_rm_uc_addr,
10567         .add_mc_addr = hclge_add_mc_addr,
10568         .rm_mc_addr = hclge_rm_mc_addr,
10569         .set_autoneg = hclge_set_autoneg,
10570         .get_autoneg = hclge_get_autoneg,
10571         .restart_autoneg = hclge_restart_autoneg,
10572         .halt_autoneg = hclge_halt_autoneg,
10573         .get_pauseparam = hclge_get_pauseparam,
10574         .set_pauseparam = hclge_set_pauseparam,
10575         .set_mtu = hclge_set_mtu,
10576         .reset_queue = hclge_reset_tqp,
10577         .get_stats = hclge_get_stats,
10578         .get_mac_stats = hclge_get_mac_stat,
10579         .update_stats = hclge_update_stats,
10580         .get_strings = hclge_get_strings,
10581         .get_sset_count = hclge_get_sset_count,
10582         .get_fw_version = hclge_get_fw_version,
10583         .get_mdix_mode = hclge_get_mdix_mode,
10584         .enable_vlan_filter = hclge_enable_vlan_filter,
10585         .set_vlan_filter = hclge_set_vlan_filter,
10586         .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
10587         .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
10588         .reset_event = hclge_reset_event,
10589         .get_reset_level = hclge_get_reset_level,
10590         .set_default_reset_request = hclge_set_def_reset_request,
10591         .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
10592         .set_channels = hclge_set_channels,
10593         .get_channels = hclge_get_channels,
10594         .get_regs_len = hclge_get_regs_len,
10595         .get_regs = hclge_get_regs,
10596         .set_led_id = hclge_set_led_id,
10597         .get_link_mode = hclge_get_link_mode,
10598         .add_fd_entry = hclge_add_fd_entry,
10599         .del_fd_entry = hclge_del_fd_entry,
10600         .del_all_fd_entries = hclge_del_all_fd_entries,
10601         .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
10602         .get_fd_rule_info = hclge_get_fd_rule_info,
10603         .get_fd_all_rules = hclge_get_all_rules,
10604         .restore_fd_rules = hclge_restore_fd_entries,
10605         .enable_fd = hclge_enable_fd,
10606         .add_arfs_entry = hclge_add_fd_entry_by_arfs,
10607         .dbg_run_cmd = hclge_dbg_run_cmd,
10608         .handle_hw_ras_error = hclge_handle_hw_ras_error,
10609         .get_hw_reset_stat = hclge_get_hw_reset_stat,
10610         .ae_dev_resetting = hclge_ae_dev_resetting,
10611         .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
10612         .set_gro_en = hclge_gro_en,
10613         .get_global_queue_id = hclge_covert_handle_qid_global,
10614         .set_timer_task = hclge_set_timer_task,
10615         .mac_connect_phy = hclge_mac_connect_phy,
10616         .mac_disconnect_phy = hclge_mac_disconnect_phy,
10617         .restore_vlan_table = hclge_restore_vlan_table,
10618         .get_vf_config = hclge_get_vf_config,
10619         .set_vf_link_state = hclge_set_vf_link_state,
10620         .set_vf_spoofchk = hclge_set_vf_spoofchk,
10621         .set_vf_trust = hclge_set_vf_trust,
10622         .set_vf_rate = hclge_set_vf_rate,
10623         .set_vf_mac = hclge_set_vf_mac,
10624 };
10625
10626 static struct hnae3_ae_algo ae_algo = {
10627         .ops = &hclge_ops,
10628         .pdev_id_table = ae_algo_pci_tbl,
10629 };
10630
10631 static int hclge_init(void)
10632 {
10633         pr_info("%s is initializing\n", HCLGE_NAME);
10634
10635         hclge_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, HCLGE_NAME);
10636         if (!hclge_wq) {
10637                 pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
10638                 return -ENOMEM;
10639         }
10640
10641         hnae3_register_ae_algo(&ae_algo);
10642
10643         return 0;
10644 }
10645
10646 static void hclge_exit(void)
10647 {
10648         hnae3_unregister_ae_algo(&ae_algo);
10649         destroy_workqueue(hclge_wq);
10650 }
10651 module_init(hclge_init);
10652 module_exit(hclge_exit);
10653
10654 MODULE_LICENSE("GPL");
10655 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
10656 MODULE_DESCRIPTION("HCLGE Driver");
10657 MODULE_VERSION(HCLGE_MOD_VERSION);