OSDN Git Service

net: hns3: disable sriov before unload hclge layer
[uclinux-h8/linux.git] / drivers / net / ethernet / hisilicon / hns3 / hns3pf / hclge_main.c
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/ipv6.h>
17 #include <net/rtnetlink.h>
18 #include "hclge_cmd.h"
19 #include "hclge_dcb.h"
20 #include "hclge_main.h"
21 #include "hclge_mbx.h"
22 #include "hclge_mdio.h"
23 #include "hclge_tm.h"
24 #include "hclge_err.h"
25 #include "hnae3.h"
26 #include "hclge_devlink.h"
27
28 #define HCLGE_NAME                      "hclge"
29 #define HCLGE_STATS_READ(p, offset) (*(u64 *)((u8 *)(p) + (offset)))
30 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
31
32 #define HCLGE_BUF_SIZE_UNIT     256U
33 #define HCLGE_BUF_MUL_BY        2
34 #define HCLGE_BUF_DIV_BY        2
35 #define NEED_RESERVE_TC_NUM     2
36 #define BUF_MAX_PERCENT         100
37 #define BUF_RESERVE_PERCENT     90
38
39 #define HCLGE_RESET_MAX_FAIL_CNT        5
40 #define HCLGE_RESET_SYNC_TIME           100
41 #define HCLGE_PF_RESET_SYNC_TIME        20
42 #define HCLGE_PF_RESET_SYNC_CNT         1500
43
44 /* Get DFX BD number offset */
45 #define HCLGE_DFX_BIOS_BD_OFFSET        1
46 #define HCLGE_DFX_SSU_0_BD_OFFSET       2
47 #define HCLGE_DFX_SSU_1_BD_OFFSET       3
48 #define HCLGE_DFX_IGU_BD_OFFSET         4
49 #define HCLGE_DFX_RPU_0_BD_OFFSET       5
50 #define HCLGE_DFX_RPU_1_BD_OFFSET       6
51 #define HCLGE_DFX_NCSI_BD_OFFSET        7
52 #define HCLGE_DFX_RTC_BD_OFFSET         8
53 #define HCLGE_DFX_PPP_BD_OFFSET         9
54 #define HCLGE_DFX_RCB_BD_OFFSET         10
55 #define HCLGE_DFX_TQP_BD_OFFSET         11
56 #define HCLGE_DFX_SSU_2_BD_OFFSET       12
57
58 #define HCLGE_LINK_STATUS_MS    10
59
60 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
61 static int hclge_init_vlan_config(struct hclge_dev *hdev);
62 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
63 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
64 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
65 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
66 static int hclge_clear_arfs_rules(struct hclge_dev *hdev);
67 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
68                                                    unsigned long *addr);
69 static int hclge_set_default_loopback(struct hclge_dev *hdev);
70
71 static void hclge_sync_mac_table(struct hclge_dev *hdev);
72 static void hclge_restore_hw_table(struct hclge_dev *hdev);
73 static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
74 static void hclge_sync_fd_table(struct hclge_dev *hdev);
75
76 static struct hnae3_ae_algo ae_algo;
77
78 static struct workqueue_struct *hclge_wq;
79
80 static const struct pci_device_id ae_algo_pci_tbl[] = {
81         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
82         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
83         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
84         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
85         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
86         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
87         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
88         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0},
89         /* required last entry */
90         {0, }
91 };
92
93 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
94
95 static const u32 cmdq_reg_addr_list[] = {HCLGE_NIC_CSQ_BASEADDR_L_REG,
96                                          HCLGE_NIC_CSQ_BASEADDR_H_REG,
97                                          HCLGE_NIC_CSQ_DEPTH_REG,
98                                          HCLGE_NIC_CSQ_TAIL_REG,
99                                          HCLGE_NIC_CSQ_HEAD_REG,
100                                          HCLGE_NIC_CRQ_BASEADDR_L_REG,
101                                          HCLGE_NIC_CRQ_BASEADDR_H_REG,
102                                          HCLGE_NIC_CRQ_DEPTH_REG,
103                                          HCLGE_NIC_CRQ_TAIL_REG,
104                                          HCLGE_NIC_CRQ_HEAD_REG,
105                                          HCLGE_VECTOR0_CMDQ_SRC_REG,
106                                          HCLGE_CMDQ_INTR_STS_REG,
107                                          HCLGE_CMDQ_INTR_EN_REG,
108                                          HCLGE_CMDQ_INTR_GEN_REG};
109
110 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
111                                            HCLGE_PF_OTHER_INT_REG,
112                                            HCLGE_MISC_RESET_STS_REG,
113                                            HCLGE_MISC_VECTOR_INT_STS,
114                                            HCLGE_GLOBAL_RESET_REG,
115                                            HCLGE_FUN_RST_ING,
116                                            HCLGE_GRO_EN_REG};
117
118 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
119                                          HCLGE_RING_RX_ADDR_H_REG,
120                                          HCLGE_RING_RX_BD_NUM_REG,
121                                          HCLGE_RING_RX_BD_LENGTH_REG,
122                                          HCLGE_RING_RX_MERGE_EN_REG,
123                                          HCLGE_RING_RX_TAIL_REG,
124                                          HCLGE_RING_RX_HEAD_REG,
125                                          HCLGE_RING_RX_FBD_NUM_REG,
126                                          HCLGE_RING_RX_OFFSET_REG,
127                                          HCLGE_RING_RX_FBD_OFFSET_REG,
128                                          HCLGE_RING_RX_STASH_REG,
129                                          HCLGE_RING_RX_BD_ERR_REG,
130                                          HCLGE_RING_TX_ADDR_L_REG,
131                                          HCLGE_RING_TX_ADDR_H_REG,
132                                          HCLGE_RING_TX_BD_NUM_REG,
133                                          HCLGE_RING_TX_PRIORITY_REG,
134                                          HCLGE_RING_TX_TC_REG,
135                                          HCLGE_RING_TX_MERGE_EN_REG,
136                                          HCLGE_RING_TX_TAIL_REG,
137                                          HCLGE_RING_TX_HEAD_REG,
138                                          HCLGE_RING_TX_FBD_NUM_REG,
139                                          HCLGE_RING_TX_OFFSET_REG,
140                                          HCLGE_RING_TX_EBD_NUM_REG,
141                                          HCLGE_RING_TX_EBD_OFFSET_REG,
142                                          HCLGE_RING_TX_BD_ERR_REG,
143                                          HCLGE_RING_EN_REG};
144
145 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
146                                              HCLGE_TQP_INTR_GL0_REG,
147                                              HCLGE_TQP_INTR_GL1_REG,
148                                              HCLGE_TQP_INTR_GL2_REG,
149                                              HCLGE_TQP_INTR_RL_REG};
150
151 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
152         "App    Loopback test",
153         "Serdes serial Loopback test",
154         "Serdes parallel Loopback test",
155         "Phy    Loopback test"
156 };
157
158 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
159         {"mac_tx_mac_pause_num",
160                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
161         {"mac_rx_mac_pause_num",
162                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
163         {"mac_tx_control_pkt_num",
164                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
165         {"mac_rx_control_pkt_num",
166                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
167         {"mac_tx_pfc_pkt_num",
168                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
169         {"mac_tx_pfc_pri0_pkt_num",
170                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
171         {"mac_tx_pfc_pri1_pkt_num",
172                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
173         {"mac_tx_pfc_pri2_pkt_num",
174                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
175         {"mac_tx_pfc_pri3_pkt_num",
176                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
177         {"mac_tx_pfc_pri4_pkt_num",
178                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
179         {"mac_tx_pfc_pri5_pkt_num",
180                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
181         {"mac_tx_pfc_pri6_pkt_num",
182                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
183         {"mac_tx_pfc_pri7_pkt_num",
184                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
185         {"mac_rx_pfc_pkt_num",
186                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
187         {"mac_rx_pfc_pri0_pkt_num",
188                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
189         {"mac_rx_pfc_pri1_pkt_num",
190                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
191         {"mac_rx_pfc_pri2_pkt_num",
192                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
193         {"mac_rx_pfc_pri3_pkt_num",
194                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
195         {"mac_rx_pfc_pri4_pkt_num",
196                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
197         {"mac_rx_pfc_pri5_pkt_num",
198                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
199         {"mac_rx_pfc_pri6_pkt_num",
200                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
201         {"mac_rx_pfc_pri7_pkt_num",
202                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
203         {"mac_tx_total_pkt_num",
204                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
205         {"mac_tx_total_oct_num",
206                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
207         {"mac_tx_good_pkt_num",
208                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
209         {"mac_tx_bad_pkt_num",
210                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
211         {"mac_tx_good_oct_num",
212                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
213         {"mac_tx_bad_oct_num",
214                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
215         {"mac_tx_uni_pkt_num",
216                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
217         {"mac_tx_multi_pkt_num",
218                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
219         {"mac_tx_broad_pkt_num",
220                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
221         {"mac_tx_undersize_pkt_num",
222                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
223         {"mac_tx_oversize_pkt_num",
224                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
225         {"mac_tx_64_oct_pkt_num",
226                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
227         {"mac_tx_65_127_oct_pkt_num",
228                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
229         {"mac_tx_128_255_oct_pkt_num",
230                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
231         {"mac_tx_256_511_oct_pkt_num",
232                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
233         {"mac_tx_512_1023_oct_pkt_num",
234                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
235         {"mac_tx_1024_1518_oct_pkt_num",
236                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
237         {"mac_tx_1519_2047_oct_pkt_num",
238                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
239         {"mac_tx_2048_4095_oct_pkt_num",
240                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
241         {"mac_tx_4096_8191_oct_pkt_num",
242                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
243         {"mac_tx_8192_9216_oct_pkt_num",
244                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
245         {"mac_tx_9217_12287_oct_pkt_num",
246                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
247         {"mac_tx_12288_16383_oct_pkt_num",
248                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
249         {"mac_tx_1519_max_good_pkt_num",
250                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
251         {"mac_tx_1519_max_bad_pkt_num",
252                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
253         {"mac_rx_total_pkt_num",
254                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
255         {"mac_rx_total_oct_num",
256                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
257         {"mac_rx_good_pkt_num",
258                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
259         {"mac_rx_bad_pkt_num",
260                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
261         {"mac_rx_good_oct_num",
262                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
263         {"mac_rx_bad_oct_num",
264                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
265         {"mac_rx_uni_pkt_num",
266                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
267         {"mac_rx_multi_pkt_num",
268                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
269         {"mac_rx_broad_pkt_num",
270                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
271         {"mac_rx_undersize_pkt_num",
272                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
273         {"mac_rx_oversize_pkt_num",
274                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
275         {"mac_rx_64_oct_pkt_num",
276                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
277         {"mac_rx_65_127_oct_pkt_num",
278                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
279         {"mac_rx_128_255_oct_pkt_num",
280                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
281         {"mac_rx_256_511_oct_pkt_num",
282                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
283         {"mac_rx_512_1023_oct_pkt_num",
284                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
285         {"mac_rx_1024_1518_oct_pkt_num",
286                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
287         {"mac_rx_1519_2047_oct_pkt_num",
288                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
289         {"mac_rx_2048_4095_oct_pkt_num",
290                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
291         {"mac_rx_4096_8191_oct_pkt_num",
292                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
293         {"mac_rx_8192_9216_oct_pkt_num",
294                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
295         {"mac_rx_9217_12287_oct_pkt_num",
296                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
297         {"mac_rx_12288_16383_oct_pkt_num",
298                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
299         {"mac_rx_1519_max_good_pkt_num",
300                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
301         {"mac_rx_1519_max_bad_pkt_num",
302                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
303
304         {"mac_tx_fragment_pkt_num",
305                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
306         {"mac_tx_undermin_pkt_num",
307                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
308         {"mac_tx_jabber_pkt_num",
309                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
310         {"mac_tx_err_all_pkt_num",
311                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
312         {"mac_tx_from_app_good_pkt_num",
313                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
314         {"mac_tx_from_app_bad_pkt_num",
315                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
316         {"mac_rx_fragment_pkt_num",
317                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
318         {"mac_rx_undermin_pkt_num",
319                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
320         {"mac_rx_jabber_pkt_num",
321                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
322         {"mac_rx_fcs_err_pkt_num",
323                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
324         {"mac_rx_send_app_good_pkt_num",
325                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
326         {"mac_rx_send_app_bad_pkt_num",
327                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
328 };
329
330 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
331         {
332                 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
333                 .ethter_type = cpu_to_le16(ETH_P_LLDP),
334                 .mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
335                 .i_port_bitmap = 0x1,
336         },
337 };
338
339 static const u8 hclge_hash_key[] = {
340         0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
341         0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
342         0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
343         0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
344         0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
345 };
346
347 static const u32 hclge_dfx_bd_offset_list[] = {
348         HCLGE_DFX_BIOS_BD_OFFSET,
349         HCLGE_DFX_SSU_0_BD_OFFSET,
350         HCLGE_DFX_SSU_1_BD_OFFSET,
351         HCLGE_DFX_IGU_BD_OFFSET,
352         HCLGE_DFX_RPU_0_BD_OFFSET,
353         HCLGE_DFX_RPU_1_BD_OFFSET,
354         HCLGE_DFX_NCSI_BD_OFFSET,
355         HCLGE_DFX_RTC_BD_OFFSET,
356         HCLGE_DFX_PPP_BD_OFFSET,
357         HCLGE_DFX_RCB_BD_OFFSET,
358         HCLGE_DFX_TQP_BD_OFFSET,
359         HCLGE_DFX_SSU_2_BD_OFFSET
360 };
361
362 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
363         HCLGE_OPC_DFX_BIOS_COMMON_REG,
364         HCLGE_OPC_DFX_SSU_REG_0,
365         HCLGE_OPC_DFX_SSU_REG_1,
366         HCLGE_OPC_DFX_IGU_EGU_REG,
367         HCLGE_OPC_DFX_RPU_REG_0,
368         HCLGE_OPC_DFX_RPU_REG_1,
369         HCLGE_OPC_DFX_NCSI_REG,
370         HCLGE_OPC_DFX_RTC_REG,
371         HCLGE_OPC_DFX_PPP_REG,
372         HCLGE_OPC_DFX_RCB_REG,
373         HCLGE_OPC_DFX_TQP_REG,
374         HCLGE_OPC_DFX_SSU_REG_2
375 };
376
377 static const struct key_info meta_data_key_info[] = {
378         { PACKET_TYPE_ID, 6 },
379         { IP_FRAGEMENT, 1 },
380         { ROCE_TYPE, 1 },
381         { NEXT_KEY, 5 },
382         { VLAN_NUMBER, 2 },
383         { SRC_VPORT, 12 },
384         { DST_VPORT, 12 },
385         { TUNNEL_PACKET, 1 },
386 };
387
388 static const struct key_info tuple_key_info[] = {
389         { OUTER_DST_MAC, 48, KEY_OPT_MAC, -1, -1 },
390         { OUTER_SRC_MAC, 48, KEY_OPT_MAC, -1, -1 },
391         { OUTER_VLAN_TAG_FST, 16, KEY_OPT_LE16, -1, -1 },
392         { OUTER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
393         { OUTER_ETH_TYPE, 16, KEY_OPT_LE16, -1, -1 },
394         { OUTER_L2_RSV, 16, KEY_OPT_LE16, -1, -1 },
395         { OUTER_IP_TOS, 8, KEY_OPT_U8, -1, -1 },
396         { OUTER_IP_PROTO, 8, KEY_OPT_U8, -1, -1 },
397         { OUTER_SRC_IP, 32, KEY_OPT_IP, -1, -1 },
398         { OUTER_DST_IP, 32, KEY_OPT_IP, -1, -1 },
399         { OUTER_L3_RSV, 16, KEY_OPT_LE16, -1, -1 },
400         { OUTER_SRC_PORT, 16, KEY_OPT_LE16, -1, -1 },
401         { OUTER_DST_PORT, 16, KEY_OPT_LE16, -1, -1 },
402         { OUTER_L4_RSV, 32, KEY_OPT_LE32, -1, -1 },
403         { OUTER_TUN_VNI, 24, KEY_OPT_VNI, -1, -1 },
404         { OUTER_TUN_FLOW_ID, 8, KEY_OPT_U8, -1, -1 },
405         { INNER_DST_MAC, 48, KEY_OPT_MAC,
406           offsetof(struct hclge_fd_rule, tuples.dst_mac),
407           offsetof(struct hclge_fd_rule, tuples_mask.dst_mac) },
408         { INNER_SRC_MAC, 48, KEY_OPT_MAC,
409           offsetof(struct hclge_fd_rule, tuples.src_mac),
410           offsetof(struct hclge_fd_rule, tuples_mask.src_mac) },
411         { INNER_VLAN_TAG_FST, 16, KEY_OPT_LE16,
412           offsetof(struct hclge_fd_rule, tuples.vlan_tag1),
413           offsetof(struct hclge_fd_rule, tuples_mask.vlan_tag1) },
414         { INNER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
415         { INNER_ETH_TYPE, 16, KEY_OPT_LE16,
416           offsetof(struct hclge_fd_rule, tuples.ether_proto),
417           offsetof(struct hclge_fd_rule, tuples_mask.ether_proto) },
418         { INNER_L2_RSV, 16, KEY_OPT_LE16,
419           offsetof(struct hclge_fd_rule, tuples.l2_user_def),
420           offsetof(struct hclge_fd_rule, tuples_mask.l2_user_def) },
421         { INNER_IP_TOS, 8, KEY_OPT_U8,
422           offsetof(struct hclge_fd_rule, tuples.ip_tos),
423           offsetof(struct hclge_fd_rule, tuples_mask.ip_tos) },
424         { INNER_IP_PROTO, 8, KEY_OPT_U8,
425           offsetof(struct hclge_fd_rule, tuples.ip_proto),
426           offsetof(struct hclge_fd_rule, tuples_mask.ip_proto) },
427         { INNER_SRC_IP, 32, KEY_OPT_IP,
428           offsetof(struct hclge_fd_rule, tuples.src_ip),
429           offsetof(struct hclge_fd_rule, tuples_mask.src_ip) },
430         { INNER_DST_IP, 32, KEY_OPT_IP,
431           offsetof(struct hclge_fd_rule, tuples.dst_ip),
432           offsetof(struct hclge_fd_rule, tuples_mask.dst_ip) },
433         { INNER_L3_RSV, 16, KEY_OPT_LE16,
434           offsetof(struct hclge_fd_rule, tuples.l3_user_def),
435           offsetof(struct hclge_fd_rule, tuples_mask.l3_user_def) },
436         { INNER_SRC_PORT, 16, KEY_OPT_LE16,
437           offsetof(struct hclge_fd_rule, tuples.src_port),
438           offsetof(struct hclge_fd_rule, tuples_mask.src_port) },
439         { INNER_DST_PORT, 16, KEY_OPT_LE16,
440           offsetof(struct hclge_fd_rule, tuples.dst_port),
441           offsetof(struct hclge_fd_rule, tuples_mask.dst_port) },
442         { INNER_L4_RSV, 32, KEY_OPT_LE32,
443           offsetof(struct hclge_fd_rule, tuples.l4_user_def),
444           offsetof(struct hclge_fd_rule, tuples_mask.l4_user_def) },
445 };
446
447 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
448 {
449 #define HCLGE_MAC_CMD_NUM 21
450
451         u64 *data = (u64 *)(&hdev->mac_stats);
452         struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
453         __le64 *desc_data;
454         int i, k, n;
455         int ret;
456
457         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
458         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
459         if (ret) {
460                 dev_err(&hdev->pdev->dev,
461                         "Get MAC pkt stats fail, status = %d.\n", ret);
462
463                 return ret;
464         }
465
466         for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
467                 /* for special opcode 0032, only the first desc has the head */
468                 if (unlikely(i == 0)) {
469                         desc_data = (__le64 *)(&desc[i].data[0]);
470                         n = HCLGE_RD_FIRST_STATS_NUM;
471                 } else {
472                         desc_data = (__le64 *)(&desc[i]);
473                         n = HCLGE_RD_OTHER_STATS_NUM;
474                 }
475
476                 for (k = 0; k < n; k++) {
477                         *data += le64_to_cpu(*desc_data);
478                         data++;
479                         desc_data++;
480                 }
481         }
482
483         return 0;
484 }
485
486 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
487 {
488         u64 *data = (u64 *)(&hdev->mac_stats);
489         struct hclge_desc *desc;
490         __le64 *desc_data;
491         u16 i, k, n;
492         int ret;
493
494         /* This may be called inside atomic sections,
495          * so GFP_ATOMIC is more suitalbe here
496          */
497         desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
498         if (!desc)
499                 return -ENOMEM;
500
501         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
502         ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
503         if (ret) {
504                 kfree(desc);
505                 return ret;
506         }
507
508         for (i = 0; i < desc_num; i++) {
509                 /* for special opcode 0034, only the first desc has the head */
510                 if (i == 0) {
511                         desc_data = (__le64 *)(&desc[i].data[0]);
512                         n = HCLGE_RD_FIRST_STATS_NUM;
513                 } else {
514                         desc_data = (__le64 *)(&desc[i]);
515                         n = HCLGE_RD_OTHER_STATS_NUM;
516                 }
517
518                 for (k = 0; k < n; k++) {
519                         *data += le64_to_cpu(*desc_data);
520                         data++;
521                         desc_data++;
522                 }
523         }
524
525         kfree(desc);
526
527         return 0;
528 }
529
530 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
531 {
532         struct hclge_desc desc;
533         __le32 *desc_data;
534         u32 reg_num;
535         int ret;
536
537         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
538         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
539         if (ret)
540                 return ret;
541
542         desc_data = (__le32 *)(&desc.data[0]);
543         reg_num = le32_to_cpu(*desc_data);
544
545         *desc_num = 1 + ((reg_num - 3) >> 2) +
546                     (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
547
548         return 0;
549 }
550
551 static int hclge_mac_update_stats(struct hclge_dev *hdev)
552 {
553         u32 desc_num;
554         int ret;
555
556         ret = hclge_mac_query_reg_num(hdev, &desc_num);
557         /* The firmware supports the new statistics acquisition method */
558         if (!ret)
559                 ret = hclge_mac_update_stats_complete(hdev, desc_num);
560         else if (ret == -EOPNOTSUPP)
561                 ret = hclge_mac_update_stats_defective(hdev);
562         else
563                 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
564
565         return ret;
566 }
567
568 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
569 {
570         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
571         struct hclge_vport *vport = hclge_get_vport(handle);
572         struct hclge_dev *hdev = vport->back;
573         struct hnae3_queue *queue;
574         struct hclge_desc desc[1];
575         struct hclge_tqp *tqp;
576         int ret, i;
577
578         for (i = 0; i < kinfo->num_tqps; i++) {
579                 queue = handle->kinfo.tqp[i];
580                 tqp = container_of(queue, struct hclge_tqp, q);
581                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
582                 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATS,
583                                            true);
584
585                 desc[0].data[0] = cpu_to_le32(tqp->index);
586                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
587                 if (ret) {
588                         dev_err(&hdev->pdev->dev,
589                                 "Query tqp stat fail, status = %d,queue = %d\n",
590                                 ret, i);
591                         return ret;
592                 }
593                 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
594                         le32_to_cpu(desc[0].data[1]);
595         }
596
597         for (i = 0; i < kinfo->num_tqps; i++) {
598                 queue = handle->kinfo.tqp[i];
599                 tqp = container_of(queue, struct hclge_tqp, q);
600                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
601                 hclge_cmd_setup_basic_desc(&desc[0],
602                                            HCLGE_OPC_QUERY_TX_STATS,
603                                            true);
604
605                 desc[0].data[0] = cpu_to_le32(tqp->index);
606                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
607                 if (ret) {
608                         dev_err(&hdev->pdev->dev,
609                                 "Query tqp stat fail, status = %d,queue = %d\n",
610                                 ret, i);
611                         return ret;
612                 }
613                 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
614                         le32_to_cpu(desc[0].data[1]);
615         }
616
617         return 0;
618 }
619
620 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
621 {
622         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
623         struct hclge_tqp *tqp;
624         u64 *buff = data;
625         int i;
626
627         for (i = 0; i < kinfo->num_tqps; i++) {
628                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
629                 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
630         }
631
632         for (i = 0; i < kinfo->num_tqps; i++) {
633                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
634                 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
635         }
636
637         return buff;
638 }
639
640 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
641 {
642         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
643
644         /* each tqp has TX & RX two queues */
645         return kinfo->num_tqps * (2);
646 }
647
648 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
649 {
650         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
651         u8 *buff = data;
652         int i;
653
654         for (i = 0; i < kinfo->num_tqps; i++) {
655                 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
656                         struct hclge_tqp, q);
657                 snprintf(buff, ETH_GSTRING_LEN, "txq%u_pktnum_rcd",
658                          tqp->index);
659                 buff = buff + ETH_GSTRING_LEN;
660         }
661
662         for (i = 0; i < kinfo->num_tqps; i++) {
663                 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
664                         struct hclge_tqp, q);
665                 snprintf(buff, ETH_GSTRING_LEN, "rxq%u_pktnum_rcd",
666                          tqp->index);
667                 buff = buff + ETH_GSTRING_LEN;
668         }
669
670         return buff;
671 }
672
673 static u64 *hclge_comm_get_stats(const void *comm_stats,
674                                  const struct hclge_comm_stats_str strs[],
675                                  int size, u64 *data)
676 {
677         u64 *buf = data;
678         u32 i;
679
680         for (i = 0; i < size; i++)
681                 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
682
683         return buf + size;
684 }
685
686 static u8 *hclge_comm_get_strings(u32 stringset,
687                                   const struct hclge_comm_stats_str strs[],
688                                   int size, u8 *data)
689 {
690         char *buff = (char *)data;
691         u32 i;
692
693         if (stringset != ETH_SS_STATS)
694                 return buff;
695
696         for (i = 0; i < size; i++) {
697                 snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
698                 buff = buff + ETH_GSTRING_LEN;
699         }
700
701         return (u8 *)buff;
702 }
703
704 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
705 {
706         struct hnae3_handle *handle;
707         int status;
708
709         handle = &hdev->vport[0].nic;
710         if (handle->client) {
711                 status = hclge_tqps_update_stats(handle);
712                 if (status) {
713                         dev_err(&hdev->pdev->dev,
714                                 "Update TQPS stats fail, status = %d.\n",
715                                 status);
716                 }
717         }
718
719         status = hclge_mac_update_stats(hdev);
720         if (status)
721                 dev_err(&hdev->pdev->dev,
722                         "Update MAC stats fail, status = %d.\n", status);
723 }
724
725 static void hclge_update_stats(struct hnae3_handle *handle,
726                                struct net_device_stats *net_stats)
727 {
728         struct hclge_vport *vport = hclge_get_vport(handle);
729         struct hclge_dev *hdev = vport->back;
730         int status;
731
732         if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
733                 return;
734
735         status = hclge_mac_update_stats(hdev);
736         if (status)
737                 dev_err(&hdev->pdev->dev,
738                         "Update MAC stats fail, status = %d.\n",
739                         status);
740
741         status = hclge_tqps_update_stats(handle);
742         if (status)
743                 dev_err(&hdev->pdev->dev,
744                         "Update TQPS stats fail, status = %d.\n",
745                         status);
746
747         clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
748 }
749
750 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
751 {
752 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK | \
753                 HNAE3_SUPPORT_PHY_LOOPBACK | \
754                 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK | \
755                 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
756
757         struct hclge_vport *vport = hclge_get_vport(handle);
758         struct hclge_dev *hdev = vport->back;
759         int count = 0;
760
761         /* Loopback test support rules:
762          * mac: only GE mode support
763          * serdes: all mac mode will support include GE/XGE/LGE/CGE
764          * phy: only support when phy device exist on board
765          */
766         if (stringset == ETH_SS_TEST) {
767                 /* clear loopback bit flags at first */
768                 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
769                 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 ||
770                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
771                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
772                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
773                         count += 1;
774                         handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
775                 }
776
777                 count += 2;
778                 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
779                 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
780
781                 if ((hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv &&
782                      hdev->hw.mac.phydev->drv->set_loopback) ||
783                     hnae3_dev_phy_imp_supported(hdev)) {
784                         count += 1;
785                         handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
786                 }
787         } else if (stringset == ETH_SS_STATS) {
788                 count = ARRAY_SIZE(g_mac_stats_string) +
789                         hclge_tqps_get_sset_count(handle, stringset);
790         }
791
792         return count;
793 }
794
795 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
796                               u8 *data)
797 {
798         u8 *p = (char *)data;
799         int size;
800
801         if (stringset == ETH_SS_STATS) {
802                 size = ARRAY_SIZE(g_mac_stats_string);
803                 p = hclge_comm_get_strings(stringset, g_mac_stats_string,
804                                            size, p);
805                 p = hclge_tqps_get_strings(handle, p);
806         } else if (stringset == ETH_SS_TEST) {
807                 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
808                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
809                                ETH_GSTRING_LEN);
810                         p += ETH_GSTRING_LEN;
811                 }
812                 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
813                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
814                                ETH_GSTRING_LEN);
815                         p += ETH_GSTRING_LEN;
816                 }
817                 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
818                         memcpy(p,
819                                hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
820                                ETH_GSTRING_LEN);
821                         p += ETH_GSTRING_LEN;
822                 }
823                 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
824                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
825                                ETH_GSTRING_LEN);
826                         p += ETH_GSTRING_LEN;
827                 }
828         }
829 }
830
831 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
832 {
833         struct hclge_vport *vport = hclge_get_vport(handle);
834         struct hclge_dev *hdev = vport->back;
835         u64 *p;
836
837         p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
838                                  ARRAY_SIZE(g_mac_stats_string), data);
839         p = hclge_tqps_get_stats(handle, p);
840 }
841
842 static void hclge_get_mac_stat(struct hnae3_handle *handle,
843                                struct hns3_mac_stats *mac_stats)
844 {
845         struct hclge_vport *vport = hclge_get_vport(handle);
846         struct hclge_dev *hdev = vport->back;
847
848         hclge_update_stats(handle, NULL);
849
850         mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
851         mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
852 }
853
854 static int hclge_parse_func_status(struct hclge_dev *hdev,
855                                    struct hclge_func_status_cmd *status)
856 {
857 #define HCLGE_MAC_ID_MASK       0xF
858
859         if (!(status->pf_state & HCLGE_PF_STATE_DONE))
860                 return -EINVAL;
861
862         /* Set the pf to main pf */
863         if (status->pf_state & HCLGE_PF_STATE_MAIN)
864                 hdev->flag |= HCLGE_FLAG_MAIN;
865         else
866                 hdev->flag &= ~HCLGE_FLAG_MAIN;
867
868         hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK;
869         return 0;
870 }
871
872 static int hclge_query_function_status(struct hclge_dev *hdev)
873 {
874 #define HCLGE_QUERY_MAX_CNT     5
875
876         struct hclge_func_status_cmd *req;
877         struct hclge_desc desc;
878         int timeout = 0;
879         int ret;
880
881         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
882         req = (struct hclge_func_status_cmd *)desc.data;
883
884         do {
885                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
886                 if (ret) {
887                         dev_err(&hdev->pdev->dev,
888                                 "query function status failed %d.\n", ret);
889                         return ret;
890                 }
891
892                 /* Check pf reset is done */
893                 if (req->pf_state)
894                         break;
895                 usleep_range(1000, 2000);
896         } while (timeout++ < HCLGE_QUERY_MAX_CNT);
897
898         return hclge_parse_func_status(hdev, req);
899 }
900
901 static int hclge_query_pf_resource(struct hclge_dev *hdev)
902 {
903         struct hclge_pf_res_cmd *req;
904         struct hclge_desc desc;
905         int ret;
906
907         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
908         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
909         if (ret) {
910                 dev_err(&hdev->pdev->dev,
911                         "query pf resource failed %d.\n", ret);
912                 return ret;
913         }
914
915         req = (struct hclge_pf_res_cmd *)desc.data;
916         hdev->num_tqps = le16_to_cpu(req->tqp_num) +
917                          le16_to_cpu(req->ext_tqp_num);
918         hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
919
920         if (req->tx_buf_size)
921                 hdev->tx_buf_size =
922                         le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
923         else
924                 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
925
926         hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
927
928         if (req->dv_buf_size)
929                 hdev->dv_buf_size =
930                         le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
931         else
932                 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
933
934         hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
935
936         hdev->num_nic_msi = le16_to_cpu(req->msixcap_localid_number_nic);
937         if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
938                 dev_err(&hdev->pdev->dev,
939                         "only %u msi resources available, not enough for pf(min:2).\n",
940                         hdev->num_nic_msi);
941                 return -EINVAL;
942         }
943
944         if (hnae3_dev_roce_supported(hdev)) {
945                 hdev->num_roce_msi =
946                         le16_to_cpu(req->pf_intr_vector_number_roce);
947
948                 /* PF should have NIC vectors and Roce vectors,
949                  * NIC vectors are queued before Roce vectors.
950                  */
951                 hdev->num_msi = hdev->num_nic_msi + hdev->num_roce_msi;
952         } else {
953                 hdev->num_msi = hdev->num_nic_msi;
954         }
955
956         return 0;
957 }
958
959 static int hclge_parse_speed(u8 speed_cmd, u32 *speed)
960 {
961         switch (speed_cmd) {
962         case HCLGE_FW_MAC_SPEED_10M:
963                 *speed = HCLGE_MAC_SPEED_10M;
964                 break;
965         case HCLGE_FW_MAC_SPEED_100M:
966                 *speed = HCLGE_MAC_SPEED_100M;
967                 break;
968         case HCLGE_FW_MAC_SPEED_1G:
969                 *speed = HCLGE_MAC_SPEED_1G;
970                 break;
971         case HCLGE_FW_MAC_SPEED_10G:
972                 *speed = HCLGE_MAC_SPEED_10G;
973                 break;
974         case HCLGE_FW_MAC_SPEED_25G:
975                 *speed = HCLGE_MAC_SPEED_25G;
976                 break;
977         case HCLGE_FW_MAC_SPEED_40G:
978                 *speed = HCLGE_MAC_SPEED_40G;
979                 break;
980         case HCLGE_FW_MAC_SPEED_50G:
981                 *speed = HCLGE_MAC_SPEED_50G;
982                 break;
983         case HCLGE_FW_MAC_SPEED_100G:
984                 *speed = HCLGE_MAC_SPEED_100G;
985                 break;
986         case HCLGE_FW_MAC_SPEED_200G:
987                 *speed = HCLGE_MAC_SPEED_200G;
988                 break;
989         default:
990                 return -EINVAL;
991         }
992
993         return 0;
994 }
995
996 static const struct hclge_speed_bit_map speed_bit_map[] = {
997         {HCLGE_MAC_SPEED_10M, HCLGE_SUPPORT_10M_BIT},
998         {HCLGE_MAC_SPEED_100M, HCLGE_SUPPORT_100M_BIT},
999         {HCLGE_MAC_SPEED_1G, HCLGE_SUPPORT_1G_BIT},
1000         {HCLGE_MAC_SPEED_10G, HCLGE_SUPPORT_10G_BIT},
1001         {HCLGE_MAC_SPEED_25G, HCLGE_SUPPORT_25G_BIT},
1002         {HCLGE_MAC_SPEED_40G, HCLGE_SUPPORT_40G_BIT},
1003         {HCLGE_MAC_SPEED_50G, HCLGE_SUPPORT_50G_BIT},
1004         {HCLGE_MAC_SPEED_100G, HCLGE_SUPPORT_100G_BIT},
1005         {HCLGE_MAC_SPEED_200G, HCLGE_SUPPORT_200G_BIT},
1006 };
1007
1008 static int hclge_get_speed_bit(u32 speed, u32 *speed_bit)
1009 {
1010         u16 i;
1011
1012         for (i = 0; i < ARRAY_SIZE(speed_bit_map); i++) {
1013                 if (speed == speed_bit_map[i].speed) {
1014                         *speed_bit = speed_bit_map[i].speed_bit;
1015                         return 0;
1016                 }
1017         }
1018
1019         return -EINVAL;
1020 }
1021
1022 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
1023 {
1024         struct hclge_vport *vport = hclge_get_vport(handle);
1025         struct hclge_dev *hdev = vport->back;
1026         u32 speed_ability = hdev->hw.mac.speed_ability;
1027         u32 speed_bit = 0;
1028         int ret;
1029
1030         ret = hclge_get_speed_bit(speed, &speed_bit);
1031         if (ret)
1032                 return ret;
1033
1034         if (speed_bit & speed_ability)
1035                 return 0;
1036
1037         return -EINVAL;
1038 }
1039
1040 static void hclge_convert_setting_sr(struct hclge_mac *mac, u16 speed_ability)
1041 {
1042         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1043                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1044                                  mac->supported);
1045         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1046                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1047                                  mac->supported);
1048         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1049                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1050                                  mac->supported);
1051         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1052                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1053                                  mac->supported);
1054         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1055                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1056                                  mac->supported);
1057         if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1058                 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
1059                                  mac->supported);
1060 }
1061
1062 static void hclge_convert_setting_lr(struct hclge_mac *mac, u16 speed_ability)
1063 {
1064         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1065                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1066                                  mac->supported);
1067         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1068                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1069                                  mac->supported);
1070         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1071                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1072                                  mac->supported);
1073         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1074                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1075                                  mac->supported);
1076         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1077                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1078                                  mac->supported);
1079         if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1080                 linkmode_set_bit(
1081                         ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
1082                         mac->supported);
1083 }
1084
1085 static void hclge_convert_setting_cr(struct hclge_mac *mac, u16 speed_ability)
1086 {
1087         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1088                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1089                                  mac->supported);
1090         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1091                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1092                                  mac->supported);
1093         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1094                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1095                                  mac->supported);
1096         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1097                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1098                                  mac->supported);
1099         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1100                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1101                                  mac->supported);
1102         if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1103                 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
1104                                  mac->supported);
1105 }
1106
1107 static void hclge_convert_setting_kr(struct hclge_mac *mac, u16 speed_ability)
1108 {
1109         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1110                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1111                                  mac->supported);
1112         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1113                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1114                                  mac->supported);
1115         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1116                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1117                                  mac->supported);
1118         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1119                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1120                                  mac->supported);
1121         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1122                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1123                                  mac->supported);
1124         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1125                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1126                                  mac->supported);
1127         if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1128                 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
1129                                  mac->supported);
1130 }
1131
1132 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1133 {
1134         linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1135         linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1136
1137         switch (mac->speed) {
1138         case HCLGE_MAC_SPEED_10G:
1139         case HCLGE_MAC_SPEED_40G:
1140                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1141                                  mac->supported);
1142                 mac->fec_ability =
1143                         BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1144                 break;
1145         case HCLGE_MAC_SPEED_25G:
1146         case HCLGE_MAC_SPEED_50G:
1147                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1148                                  mac->supported);
1149                 mac->fec_ability =
1150                         BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1151                         BIT(HNAE3_FEC_AUTO);
1152                 break;
1153         case HCLGE_MAC_SPEED_100G:
1154         case HCLGE_MAC_SPEED_200G:
1155                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1156                 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1157                 break;
1158         default:
1159                 mac->fec_ability = 0;
1160                 break;
1161         }
1162 }
1163
1164 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1165                                         u16 speed_ability)
1166 {
1167         struct hclge_mac *mac = &hdev->hw.mac;
1168
1169         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1170                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1171                                  mac->supported);
1172
1173         hclge_convert_setting_sr(mac, speed_ability);
1174         hclge_convert_setting_lr(mac, speed_ability);
1175         hclge_convert_setting_cr(mac, speed_ability);
1176         if (hnae3_dev_fec_supported(hdev))
1177                 hclge_convert_setting_fec(mac);
1178
1179         if (hnae3_dev_pause_supported(hdev))
1180                 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1181
1182         linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1183         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1184 }
1185
1186 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1187                                             u16 speed_ability)
1188 {
1189         struct hclge_mac *mac = &hdev->hw.mac;
1190
1191         hclge_convert_setting_kr(mac, speed_ability);
1192         if (hnae3_dev_fec_supported(hdev))
1193                 hclge_convert_setting_fec(mac);
1194
1195         if (hnae3_dev_pause_supported(hdev))
1196                 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1197
1198         linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1199         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1200 }
1201
1202 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1203                                          u16 speed_ability)
1204 {
1205         unsigned long *supported = hdev->hw.mac.supported;
1206
1207         /* default to support all speed for GE port */
1208         if (!speed_ability)
1209                 speed_ability = HCLGE_SUPPORT_GE;
1210
1211         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1212                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1213                                  supported);
1214
1215         if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1216                 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1217                                  supported);
1218                 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1219                                  supported);
1220         }
1221
1222         if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1223                 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1224                 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1225         }
1226
1227         if (hnae3_dev_pause_supported(hdev)) {
1228                 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1229                 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1230         }
1231
1232         linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1233         linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1234 }
1235
1236 static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability)
1237 {
1238         u8 media_type = hdev->hw.mac.media_type;
1239
1240         if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1241                 hclge_parse_fiber_link_mode(hdev, speed_ability);
1242         else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1243                 hclge_parse_copper_link_mode(hdev, speed_ability);
1244         else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1245                 hclge_parse_backplane_link_mode(hdev, speed_ability);
1246 }
1247
1248 static u32 hclge_get_max_speed(u16 speed_ability)
1249 {
1250         if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1251                 return HCLGE_MAC_SPEED_200G;
1252
1253         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1254                 return HCLGE_MAC_SPEED_100G;
1255
1256         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1257                 return HCLGE_MAC_SPEED_50G;
1258
1259         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1260                 return HCLGE_MAC_SPEED_40G;
1261
1262         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1263                 return HCLGE_MAC_SPEED_25G;
1264
1265         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1266                 return HCLGE_MAC_SPEED_10G;
1267
1268         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1269                 return HCLGE_MAC_SPEED_1G;
1270
1271         if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1272                 return HCLGE_MAC_SPEED_100M;
1273
1274         if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1275                 return HCLGE_MAC_SPEED_10M;
1276
1277         return HCLGE_MAC_SPEED_1G;
1278 }
1279
1280 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1281 {
1282 #define HCLGE_TX_SPARE_SIZE_UNIT                4096
1283 #define SPEED_ABILITY_EXT_SHIFT                 8
1284
1285         struct hclge_cfg_param_cmd *req;
1286         u64 mac_addr_tmp_high;
1287         u16 speed_ability_ext;
1288         u64 mac_addr_tmp;
1289         unsigned int i;
1290
1291         req = (struct hclge_cfg_param_cmd *)desc[0].data;
1292
1293         /* get the configuration */
1294         cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1295                                       HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1296         cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1297                                             HCLGE_CFG_TQP_DESC_N_M,
1298                                             HCLGE_CFG_TQP_DESC_N_S);
1299
1300         cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1301                                         HCLGE_CFG_PHY_ADDR_M,
1302                                         HCLGE_CFG_PHY_ADDR_S);
1303         cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1304                                           HCLGE_CFG_MEDIA_TP_M,
1305                                           HCLGE_CFG_MEDIA_TP_S);
1306         cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1307                                           HCLGE_CFG_RX_BUF_LEN_M,
1308                                           HCLGE_CFG_RX_BUF_LEN_S);
1309         /* get mac_address */
1310         mac_addr_tmp = __le32_to_cpu(req->param[2]);
1311         mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1312                                             HCLGE_CFG_MAC_ADDR_H_M,
1313                                             HCLGE_CFG_MAC_ADDR_H_S);
1314
1315         mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1316
1317         cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1318                                              HCLGE_CFG_DEFAULT_SPEED_M,
1319                                              HCLGE_CFG_DEFAULT_SPEED_S);
1320         cfg->vf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1321                                                HCLGE_CFG_RSS_SIZE_M,
1322                                                HCLGE_CFG_RSS_SIZE_S);
1323
1324         for (i = 0; i < ETH_ALEN; i++)
1325                 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1326
1327         req = (struct hclge_cfg_param_cmd *)desc[1].data;
1328         cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1329
1330         cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1331                                              HCLGE_CFG_SPEED_ABILITY_M,
1332                                              HCLGE_CFG_SPEED_ABILITY_S);
1333         speed_ability_ext = hnae3_get_field(__le32_to_cpu(req->param[1]),
1334                                             HCLGE_CFG_SPEED_ABILITY_EXT_M,
1335                                             HCLGE_CFG_SPEED_ABILITY_EXT_S);
1336         cfg->speed_ability |= speed_ability_ext << SPEED_ABILITY_EXT_SHIFT;
1337
1338         cfg->vlan_fliter_cap = hnae3_get_field(__le32_to_cpu(req->param[1]),
1339                                                HCLGE_CFG_VLAN_FLTR_CAP_M,
1340                                                HCLGE_CFG_VLAN_FLTR_CAP_S);
1341
1342         cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1343                                          HCLGE_CFG_UMV_TBL_SPACE_M,
1344                                          HCLGE_CFG_UMV_TBL_SPACE_S);
1345         if (!cfg->umv_space)
1346                 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1347
1348         cfg->pf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[2]),
1349                                                HCLGE_CFG_PF_RSS_SIZE_M,
1350                                                HCLGE_CFG_PF_RSS_SIZE_S);
1351
1352         /* HCLGE_CFG_PF_RSS_SIZE_M is the PF max rss size, which is a
1353          * power of 2, instead of reading out directly. This would
1354          * be more flexible for future changes and expansions.
1355          * When VF max  rss size field is HCLGE_CFG_RSS_SIZE_S,
1356          * it does not make sense if PF's field is 0. In this case, PF and VF
1357          * has the same max rss size filed: HCLGE_CFG_RSS_SIZE_S.
1358          */
1359         cfg->pf_rss_size_max = cfg->pf_rss_size_max ?
1360                                1U << cfg->pf_rss_size_max :
1361                                cfg->vf_rss_size_max;
1362
1363         /* The unit of the tx spare buffer size queried from configuration
1364          * file is HCLGE_TX_SPARE_SIZE_UNIT(4096) bytes, so a conversion is
1365          * needed here.
1366          */
1367         cfg->tx_spare_buf_size = hnae3_get_field(__le32_to_cpu(req->param[2]),
1368                                                  HCLGE_CFG_TX_SPARE_BUF_SIZE_M,
1369                                                  HCLGE_CFG_TX_SPARE_BUF_SIZE_S);
1370         cfg->tx_spare_buf_size *= HCLGE_TX_SPARE_SIZE_UNIT;
1371 }
1372
1373 /* hclge_get_cfg: query the static parameter from flash
1374  * @hdev: pointer to struct hclge_dev
1375  * @hcfg: the config structure to be getted
1376  */
1377 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1378 {
1379         struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1380         struct hclge_cfg_param_cmd *req;
1381         unsigned int i;
1382         int ret;
1383
1384         for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1385                 u32 offset = 0;
1386
1387                 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1388                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1389                                            true);
1390                 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1391                                 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1392                 /* Len should be united by 4 bytes when send to hardware */
1393                 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1394                                 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1395                 req->offset = cpu_to_le32(offset);
1396         }
1397
1398         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1399         if (ret) {
1400                 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1401                 return ret;
1402         }
1403
1404         hclge_parse_cfg(hcfg, desc);
1405
1406         return 0;
1407 }
1408
1409 static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
1410 {
1411 #define HCLGE_MAX_NON_TSO_BD_NUM                        8U
1412
1413         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1414
1415         ae_dev->dev_specs.max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1416         ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1417         ae_dev->dev_specs.rss_key_size = HCLGE_RSS_KEY_SIZE;
1418         ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE;
1419         ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL;
1420         ae_dev->dev_specs.max_frm_size = HCLGE_MAC_MAX_FRAME;
1421         ae_dev->dev_specs.max_qset_num = HCLGE_MAX_QSET_NUM;
1422 }
1423
1424 static void hclge_parse_dev_specs(struct hclge_dev *hdev,
1425                                   struct hclge_desc *desc)
1426 {
1427         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1428         struct hclge_dev_specs_0_cmd *req0;
1429         struct hclge_dev_specs_1_cmd *req1;
1430
1431         req0 = (struct hclge_dev_specs_0_cmd *)desc[0].data;
1432         req1 = (struct hclge_dev_specs_1_cmd *)desc[1].data;
1433
1434         ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
1435         ae_dev->dev_specs.rss_ind_tbl_size =
1436                 le16_to_cpu(req0->rss_ind_tbl_size);
1437         ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max);
1438         ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
1439         ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate);
1440         ae_dev->dev_specs.max_qset_num = le16_to_cpu(req1->max_qset_num);
1441         ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
1442         ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size);
1443 }
1444
1445 static void hclge_check_dev_specs(struct hclge_dev *hdev)
1446 {
1447         struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs;
1448
1449         if (!dev_specs->max_non_tso_bd_num)
1450                 dev_specs->max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1451         if (!dev_specs->rss_ind_tbl_size)
1452                 dev_specs->rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1453         if (!dev_specs->rss_key_size)
1454                 dev_specs->rss_key_size = HCLGE_RSS_KEY_SIZE;
1455         if (!dev_specs->max_tm_rate)
1456                 dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE;
1457         if (!dev_specs->max_qset_num)
1458                 dev_specs->max_qset_num = HCLGE_MAX_QSET_NUM;
1459         if (!dev_specs->max_int_gl)
1460                 dev_specs->max_int_gl = HCLGE_DEF_MAX_INT_GL;
1461         if (!dev_specs->max_frm_size)
1462                 dev_specs->max_frm_size = HCLGE_MAC_MAX_FRAME;
1463 }
1464
1465 static int hclge_query_dev_specs(struct hclge_dev *hdev)
1466 {
1467         struct hclge_desc desc[HCLGE_QUERY_DEV_SPECS_BD_NUM];
1468         int ret;
1469         int i;
1470
1471         /* set default specifications as devices lower than version V3 do not
1472          * support querying specifications from firmware.
1473          */
1474         if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
1475                 hclge_set_default_dev_specs(hdev);
1476                 return 0;
1477         }
1478
1479         for (i = 0; i < HCLGE_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
1480                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS,
1481                                            true);
1482                 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1483         }
1484         hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true);
1485
1486         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_QUERY_DEV_SPECS_BD_NUM);
1487         if (ret)
1488                 return ret;
1489
1490         hclge_parse_dev_specs(hdev, desc);
1491         hclge_check_dev_specs(hdev);
1492
1493         return 0;
1494 }
1495
1496 static int hclge_get_cap(struct hclge_dev *hdev)
1497 {
1498         int ret;
1499
1500         ret = hclge_query_function_status(hdev);
1501         if (ret) {
1502                 dev_err(&hdev->pdev->dev,
1503                         "query function status error %d.\n", ret);
1504                 return ret;
1505         }
1506
1507         /* get pf resource */
1508         return hclge_query_pf_resource(hdev);
1509 }
1510
1511 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1512 {
1513 #define HCLGE_MIN_TX_DESC       64
1514 #define HCLGE_MIN_RX_DESC       64
1515
1516         if (!is_kdump_kernel())
1517                 return;
1518
1519         dev_info(&hdev->pdev->dev,
1520                  "Running kdump kernel. Using minimal resources\n");
1521
1522         /* minimal queue pairs equals to the number of vports */
1523         hdev->num_tqps = hdev->num_req_vfs + 1;
1524         hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1525         hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1526 }
1527
1528 static int hclge_configure(struct hclge_dev *hdev)
1529 {
1530         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1531         const struct cpumask *cpumask = cpu_online_mask;
1532         struct hclge_cfg cfg;
1533         unsigned int i;
1534         int node, ret;
1535
1536         ret = hclge_get_cfg(hdev, &cfg);
1537         if (ret)
1538                 return ret;
1539
1540         hdev->base_tqp_pid = 0;
1541         hdev->vf_rss_size_max = cfg.vf_rss_size_max;
1542         hdev->pf_rss_size_max = cfg.pf_rss_size_max;
1543         hdev->rx_buf_len = cfg.rx_buf_len;
1544         ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1545         hdev->hw.mac.media_type = cfg.media_type;
1546         hdev->hw.mac.phy_addr = cfg.phy_addr;
1547         hdev->num_tx_desc = cfg.tqp_desc_num;
1548         hdev->num_rx_desc = cfg.tqp_desc_num;
1549         hdev->tm_info.num_pg = 1;
1550         hdev->tc_max = cfg.tc_num;
1551         hdev->tm_info.hw_pfc_map = 0;
1552         hdev->wanted_umv_size = cfg.umv_space;
1553         hdev->tx_spare_buf_size = cfg.tx_spare_buf_size;
1554         hdev->gro_en = true;
1555         if (cfg.vlan_fliter_cap == HCLGE_VLAN_FLTR_CAN_MDF)
1556                 set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps);
1557
1558         if (hnae3_dev_fd_supported(hdev)) {
1559                 hdev->fd_en = true;
1560                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1561         }
1562
1563         ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1564         if (ret) {
1565                 dev_err(&hdev->pdev->dev, "failed to parse speed %u, ret = %d\n",
1566                         cfg.default_speed, ret);
1567                 return ret;
1568         }
1569
1570         hclge_parse_link_mode(hdev, cfg.speed_ability);
1571
1572         hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1573
1574         if ((hdev->tc_max > HNAE3_MAX_TC) ||
1575             (hdev->tc_max < 1)) {
1576                 dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1577                          hdev->tc_max);
1578                 hdev->tc_max = 1;
1579         }
1580
1581         /* Dev does not support DCB */
1582         if (!hnae3_dev_dcb_supported(hdev)) {
1583                 hdev->tc_max = 1;
1584                 hdev->pfc_max = 0;
1585         } else {
1586                 hdev->pfc_max = hdev->tc_max;
1587         }
1588
1589         hdev->tm_info.num_tc = 1;
1590
1591         /* Currently not support uncontiuous tc */
1592         for (i = 0; i < hdev->tm_info.num_tc; i++)
1593                 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1594
1595         hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1596
1597         hclge_init_kdump_kernel_config(hdev);
1598
1599         /* Set the affinity based on numa node */
1600         node = dev_to_node(&hdev->pdev->dev);
1601         if (node != NUMA_NO_NODE)
1602                 cpumask = cpumask_of_node(node);
1603
1604         cpumask_copy(&hdev->affinity_mask, cpumask);
1605
1606         return ret;
1607 }
1608
1609 static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min,
1610                             u16 tso_mss_max)
1611 {
1612         struct hclge_cfg_tso_status_cmd *req;
1613         struct hclge_desc desc;
1614
1615         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1616
1617         req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1618         req->tso_mss_min = cpu_to_le16(tso_mss_min);
1619         req->tso_mss_max = cpu_to_le16(tso_mss_max);
1620
1621         return hclge_cmd_send(&hdev->hw, &desc, 1);
1622 }
1623
1624 static int hclge_config_gro(struct hclge_dev *hdev)
1625 {
1626         struct hclge_cfg_gro_status_cmd *req;
1627         struct hclge_desc desc;
1628         int ret;
1629
1630         if (!hnae3_dev_gro_supported(hdev))
1631                 return 0;
1632
1633         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1634         req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1635
1636         req->gro_en = hdev->gro_en ? 1 : 0;
1637
1638         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1639         if (ret)
1640                 dev_err(&hdev->pdev->dev,
1641                         "GRO hardware config cmd failed, ret = %d\n", ret);
1642
1643         return ret;
1644 }
1645
1646 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1647 {
1648         struct hclge_tqp *tqp;
1649         int i;
1650
1651         hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1652                                   sizeof(struct hclge_tqp), GFP_KERNEL);
1653         if (!hdev->htqp)
1654                 return -ENOMEM;
1655
1656         tqp = hdev->htqp;
1657
1658         for (i = 0; i < hdev->num_tqps; i++) {
1659                 tqp->dev = &hdev->pdev->dev;
1660                 tqp->index = i;
1661
1662                 tqp->q.ae_algo = &ae_algo;
1663                 tqp->q.buf_size = hdev->rx_buf_len;
1664                 tqp->q.tx_desc_num = hdev->num_tx_desc;
1665                 tqp->q.rx_desc_num = hdev->num_rx_desc;
1666
1667                 /* need an extended offset to configure queues >=
1668                  * HCLGE_TQP_MAX_SIZE_DEV_V2
1669                  */
1670                 if (i < HCLGE_TQP_MAX_SIZE_DEV_V2)
1671                         tqp->q.io_base = hdev->hw.io_base +
1672                                          HCLGE_TQP_REG_OFFSET +
1673                                          i * HCLGE_TQP_REG_SIZE;
1674                 else
1675                         tqp->q.io_base = hdev->hw.io_base +
1676                                          HCLGE_TQP_REG_OFFSET +
1677                                          HCLGE_TQP_EXT_REG_OFFSET +
1678                                          (i - HCLGE_TQP_MAX_SIZE_DEV_V2) *
1679                                          HCLGE_TQP_REG_SIZE;
1680
1681                 tqp++;
1682         }
1683
1684         return 0;
1685 }
1686
1687 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1688                                   u16 tqp_pid, u16 tqp_vid, bool is_pf)
1689 {
1690         struct hclge_tqp_map_cmd *req;
1691         struct hclge_desc desc;
1692         int ret;
1693
1694         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1695
1696         req = (struct hclge_tqp_map_cmd *)desc.data;
1697         req->tqp_id = cpu_to_le16(tqp_pid);
1698         req->tqp_vf = func_id;
1699         req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1700         if (!is_pf)
1701                 req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1702         req->tqp_vid = cpu_to_le16(tqp_vid);
1703
1704         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1705         if (ret)
1706                 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1707
1708         return ret;
1709 }
1710
1711 static int  hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1712 {
1713         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1714         struct hclge_dev *hdev = vport->back;
1715         int i, alloced;
1716
1717         for (i = 0, alloced = 0; i < hdev->num_tqps &&
1718              alloced < num_tqps; i++) {
1719                 if (!hdev->htqp[i].alloced) {
1720                         hdev->htqp[i].q.handle = &vport->nic;
1721                         hdev->htqp[i].q.tqp_index = alloced;
1722                         hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1723                         hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1724                         kinfo->tqp[alloced] = &hdev->htqp[i].q;
1725                         hdev->htqp[i].alloced = true;
1726                         alloced++;
1727                 }
1728         }
1729         vport->alloc_tqps = alloced;
1730         kinfo->rss_size = min_t(u16, hdev->pf_rss_size_max,
1731                                 vport->alloc_tqps / hdev->tm_info.num_tc);
1732
1733         /* ensure one to one mapping between irq and queue at default */
1734         kinfo->rss_size = min_t(u16, kinfo->rss_size,
1735                                 (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1736
1737         return 0;
1738 }
1739
1740 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1741                             u16 num_tx_desc, u16 num_rx_desc)
1742
1743 {
1744         struct hnae3_handle *nic = &vport->nic;
1745         struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1746         struct hclge_dev *hdev = vport->back;
1747         int ret;
1748
1749         kinfo->num_tx_desc = num_tx_desc;
1750         kinfo->num_rx_desc = num_rx_desc;
1751
1752         kinfo->rx_buf_len = hdev->rx_buf_len;
1753         kinfo->tx_spare_buf_size = hdev->tx_spare_buf_size;
1754
1755         kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1756                                   sizeof(struct hnae3_queue *), GFP_KERNEL);
1757         if (!kinfo->tqp)
1758                 return -ENOMEM;
1759
1760         ret = hclge_assign_tqp(vport, num_tqps);
1761         if (ret)
1762                 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1763
1764         return ret;
1765 }
1766
1767 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1768                                   struct hclge_vport *vport)
1769 {
1770         struct hnae3_handle *nic = &vport->nic;
1771         struct hnae3_knic_private_info *kinfo;
1772         u16 i;
1773
1774         kinfo = &nic->kinfo;
1775         for (i = 0; i < vport->alloc_tqps; i++) {
1776                 struct hclge_tqp *q =
1777                         container_of(kinfo->tqp[i], struct hclge_tqp, q);
1778                 bool is_pf;
1779                 int ret;
1780
1781                 is_pf = !(vport->vport_id);
1782                 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1783                                              i, is_pf);
1784                 if (ret)
1785                         return ret;
1786         }
1787
1788         return 0;
1789 }
1790
1791 static int hclge_map_tqp(struct hclge_dev *hdev)
1792 {
1793         struct hclge_vport *vport = hdev->vport;
1794         u16 i, num_vport;
1795
1796         num_vport = hdev->num_req_vfs + 1;
1797         for (i = 0; i < num_vport; i++) {
1798                 int ret;
1799
1800                 ret = hclge_map_tqp_to_vport(hdev, vport);
1801                 if (ret)
1802                         return ret;
1803
1804                 vport++;
1805         }
1806
1807         return 0;
1808 }
1809
1810 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1811 {
1812         struct hnae3_handle *nic = &vport->nic;
1813         struct hclge_dev *hdev = vport->back;
1814         int ret;
1815
1816         nic->pdev = hdev->pdev;
1817         nic->ae_algo = &ae_algo;
1818         nic->numa_node_mask = hdev->numa_node_mask;
1819         nic->kinfo.io_base = hdev->hw.io_base;
1820
1821         ret = hclge_knic_setup(vport, num_tqps,
1822                                hdev->num_tx_desc, hdev->num_rx_desc);
1823         if (ret)
1824                 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1825
1826         return ret;
1827 }
1828
1829 static int hclge_alloc_vport(struct hclge_dev *hdev)
1830 {
1831         struct pci_dev *pdev = hdev->pdev;
1832         struct hclge_vport *vport;
1833         u32 tqp_main_vport;
1834         u32 tqp_per_vport;
1835         int num_vport, i;
1836         int ret;
1837
1838         /* We need to alloc a vport for main NIC of PF */
1839         num_vport = hdev->num_req_vfs + 1;
1840
1841         if (hdev->num_tqps < num_vport) {
1842                 dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1843                         hdev->num_tqps, num_vport);
1844                 return -EINVAL;
1845         }
1846
1847         /* Alloc the same number of TQPs for every vport */
1848         tqp_per_vport = hdev->num_tqps / num_vport;
1849         tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1850
1851         vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1852                              GFP_KERNEL);
1853         if (!vport)
1854                 return -ENOMEM;
1855
1856         hdev->vport = vport;
1857         hdev->num_alloc_vport = num_vport;
1858
1859         if (IS_ENABLED(CONFIG_PCI_IOV))
1860                 hdev->num_alloc_vfs = hdev->num_req_vfs;
1861
1862         for (i = 0; i < num_vport; i++) {
1863                 vport->back = hdev;
1864                 vport->vport_id = i;
1865                 vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1866                 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1867                 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1868                 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1869                 vport->req_vlan_fltr_en = true;
1870                 INIT_LIST_HEAD(&vport->vlan_list);
1871                 INIT_LIST_HEAD(&vport->uc_mac_list);
1872                 INIT_LIST_HEAD(&vport->mc_mac_list);
1873                 spin_lock_init(&vport->mac_list_lock);
1874
1875                 if (i == 0)
1876                         ret = hclge_vport_setup(vport, tqp_main_vport);
1877                 else
1878                         ret = hclge_vport_setup(vport, tqp_per_vport);
1879                 if (ret) {
1880                         dev_err(&pdev->dev,
1881                                 "vport setup failed for vport %d, %d\n",
1882                                 i, ret);
1883                         return ret;
1884                 }
1885
1886                 vport++;
1887         }
1888
1889         return 0;
1890 }
1891
1892 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1893                                     struct hclge_pkt_buf_alloc *buf_alloc)
1894 {
1895 /* TX buffer size is unit by 128 byte */
1896 #define HCLGE_BUF_SIZE_UNIT_SHIFT       7
1897 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK    BIT(15)
1898         struct hclge_tx_buff_alloc_cmd *req;
1899         struct hclge_desc desc;
1900         int ret;
1901         u8 i;
1902
1903         req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1904
1905         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1906         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1907                 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1908
1909                 req->tx_pkt_buff[i] =
1910                         cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1911                                      HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1912         }
1913
1914         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1915         if (ret)
1916                 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1917                         ret);
1918
1919         return ret;
1920 }
1921
1922 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1923                                  struct hclge_pkt_buf_alloc *buf_alloc)
1924 {
1925         int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1926
1927         if (ret)
1928                 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1929
1930         return ret;
1931 }
1932
1933 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1934 {
1935         unsigned int i;
1936         u32 cnt = 0;
1937
1938         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1939                 if (hdev->hw_tc_map & BIT(i))
1940                         cnt++;
1941         return cnt;
1942 }
1943
1944 /* Get the number of pfc enabled TCs, which have private buffer */
1945 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1946                                   struct hclge_pkt_buf_alloc *buf_alloc)
1947 {
1948         struct hclge_priv_buf *priv;
1949         unsigned int i;
1950         int cnt = 0;
1951
1952         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1953                 priv = &buf_alloc->priv_buf[i];
1954                 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1955                     priv->enable)
1956                         cnt++;
1957         }
1958
1959         return cnt;
1960 }
1961
1962 /* Get the number of pfc disabled TCs, which have private buffer */
1963 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1964                                      struct hclge_pkt_buf_alloc *buf_alloc)
1965 {
1966         struct hclge_priv_buf *priv;
1967         unsigned int i;
1968         int cnt = 0;
1969
1970         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1971                 priv = &buf_alloc->priv_buf[i];
1972                 if (hdev->hw_tc_map & BIT(i) &&
1973                     !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1974                     priv->enable)
1975                         cnt++;
1976         }
1977
1978         return cnt;
1979 }
1980
1981 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1982 {
1983         struct hclge_priv_buf *priv;
1984         u32 rx_priv = 0;
1985         int i;
1986
1987         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1988                 priv = &buf_alloc->priv_buf[i];
1989                 if (priv->enable)
1990                         rx_priv += priv->buf_size;
1991         }
1992         return rx_priv;
1993 }
1994
1995 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1996 {
1997         u32 i, total_tx_size = 0;
1998
1999         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
2000                 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
2001
2002         return total_tx_size;
2003 }
2004
2005 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
2006                                 struct hclge_pkt_buf_alloc *buf_alloc,
2007                                 u32 rx_all)
2008 {
2009         u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
2010         u32 tc_num = hclge_get_tc_num(hdev);
2011         u32 shared_buf, aligned_mps;
2012         u32 rx_priv;
2013         int i;
2014
2015         aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2016
2017         if (hnae3_dev_dcb_supported(hdev))
2018                 shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
2019                                         hdev->dv_buf_size;
2020         else
2021                 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
2022                                         + hdev->dv_buf_size;
2023
2024         shared_buf_tc = tc_num * aligned_mps + aligned_mps;
2025         shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
2026                              HCLGE_BUF_SIZE_UNIT);
2027
2028         rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
2029         if (rx_all < rx_priv + shared_std)
2030                 return false;
2031
2032         shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
2033         buf_alloc->s_buf.buf_size = shared_buf;
2034         if (hnae3_dev_dcb_supported(hdev)) {
2035                 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
2036                 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
2037                         - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
2038                                   HCLGE_BUF_SIZE_UNIT);
2039         } else {
2040                 buf_alloc->s_buf.self.high = aligned_mps +
2041                                                 HCLGE_NON_DCB_ADDITIONAL_BUF;
2042                 buf_alloc->s_buf.self.low = aligned_mps;
2043         }
2044
2045         if (hnae3_dev_dcb_supported(hdev)) {
2046                 hi_thrd = shared_buf - hdev->dv_buf_size;
2047
2048                 if (tc_num <= NEED_RESERVE_TC_NUM)
2049                         hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
2050                                         / BUF_MAX_PERCENT;
2051
2052                 if (tc_num)
2053                         hi_thrd = hi_thrd / tc_num;
2054
2055                 hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
2056                 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
2057                 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
2058         } else {
2059                 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
2060                 lo_thrd = aligned_mps;
2061         }
2062
2063         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2064                 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
2065                 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
2066         }
2067
2068         return true;
2069 }
2070
2071 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
2072                                 struct hclge_pkt_buf_alloc *buf_alloc)
2073 {
2074         u32 i, total_size;
2075
2076         total_size = hdev->pkt_buf_size;
2077
2078         /* alloc tx buffer for all enabled tc */
2079         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2080                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2081
2082                 if (hdev->hw_tc_map & BIT(i)) {
2083                         if (total_size < hdev->tx_buf_size)
2084                                 return -ENOMEM;
2085
2086                         priv->tx_buf_size = hdev->tx_buf_size;
2087                 } else {
2088                         priv->tx_buf_size = 0;
2089                 }
2090
2091                 total_size -= priv->tx_buf_size;
2092         }
2093
2094         return 0;
2095 }
2096
2097 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
2098                                   struct hclge_pkt_buf_alloc *buf_alloc)
2099 {
2100         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2101         u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2102         unsigned int i;
2103
2104         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2105                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2106
2107                 priv->enable = 0;
2108                 priv->wl.low = 0;
2109                 priv->wl.high = 0;
2110                 priv->buf_size = 0;
2111
2112                 if (!(hdev->hw_tc_map & BIT(i)))
2113                         continue;
2114
2115                 priv->enable = 1;
2116
2117                 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
2118                         priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
2119                         priv->wl.high = roundup(priv->wl.low + aligned_mps,
2120                                                 HCLGE_BUF_SIZE_UNIT);
2121                 } else {
2122                         priv->wl.low = 0;
2123                         priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
2124                                         aligned_mps;
2125                 }
2126
2127                 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
2128         }
2129
2130         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2131 }
2132
2133 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
2134                                           struct hclge_pkt_buf_alloc *buf_alloc)
2135 {
2136         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2137         int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
2138         int i;
2139
2140         /* let the last to be cleared first */
2141         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2142                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2143                 unsigned int mask = BIT((unsigned int)i);
2144
2145                 if (hdev->hw_tc_map & mask &&
2146                     !(hdev->tm_info.hw_pfc_map & mask)) {
2147                         /* Clear the no pfc TC private buffer */
2148                         priv->wl.low = 0;
2149                         priv->wl.high = 0;
2150                         priv->buf_size = 0;
2151                         priv->enable = 0;
2152                         no_pfc_priv_num--;
2153                 }
2154
2155                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2156                     no_pfc_priv_num == 0)
2157                         break;
2158         }
2159
2160         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2161 }
2162
2163 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
2164                                         struct hclge_pkt_buf_alloc *buf_alloc)
2165 {
2166         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2167         int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
2168         int i;
2169
2170         /* let the last to be cleared first */
2171         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2172                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2173                 unsigned int mask = BIT((unsigned int)i);
2174
2175                 if (hdev->hw_tc_map & mask &&
2176                     hdev->tm_info.hw_pfc_map & mask) {
2177                         /* Reduce the number of pfc TC with private buffer */
2178                         priv->wl.low = 0;
2179                         priv->enable = 0;
2180                         priv->wl.high = 0;
2181                         priv->buf_size = 0;
2182                         pfc_priv_num--;
2183                 }
2184
2185                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2186                     pfc_priv_num == 0)
2187                         break;
2188         }
2189
2190         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2191 }
2192
2193 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2194                                       struct hclge_pkt_buf_alloc *buf_alloc)
2195 {
2196 #define COMPENSATE_BUFFER       0x3C00
2197 #define COMPENSATE_HALF_MPS_NUM 5
2198 #define PRIV_WL_GAP             0x1800
2199
2200         u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2201         u32 tc_num = hclge_get_tc_num(hdev);
2202         u32 half_mps = hdev->mps >> 1;
2203         u32 min_rx_priv;
2204         unsigned int i;
2205
2206         if (tc_num)
2207                 rx_priv = rx_priv / tc_num;
2208
2209         if (tc_num <= NEED_RESERVE_TC_NUM)
2210                 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2211
2212         min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2213                         COMPENSATE_HALF_MPS_NUM * half_mps;
2214         min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2215         rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2216         if (rx_priv < min_rx_priv)
2217                 return false;
2218
2219         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2220                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2221
2222                 priv->enable = 0;
2223                 priv->wl.low = 0;
2224                 priv->wl.high = 0;
2225                 priv->buf_size = 0;
2226
2227                 if (!(hdev->hw_tc_map & BIT(i)))
2228                         continue;
2229
2230                 priv->enable = 1;
2231                 priv->buf_size = rx_priv;
2232                 priv->wl.high = rx_priv - hdev->dv_buf_size;
2233                 priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2234         }
2235
2236         buf_alloc->s_buf.buf_size = 0;
2237
2238         return true;
2239 }
2240
2241 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2242  * @hdev: pointer to struct hclge_dev
2243  * @buf_alloc: pointer to buffer calculation data
2244  * @return: 0: calculate successful, negative: fail
2245  */
2246 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2247                                 struct hclge_pkt_buf_alloc *buf_alloc)
2248 {
2249         /* When DCB is not supported, rx private buffer is not allocated. */
2250         if (!hnae3_dev_dcb_supported(hdev)) {
2251                 u32 rx_all = hdev->pkt_buf_size;
2252
2253                 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2254                 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2255                         return -ENOMEM;
2256
2257                 return 0;
2258         }
2259
2260         if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2261                 return 0;
2262
2263         if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2264                 return 0;
2265
2266         /* try to decrease the buffer size */
2267         if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2268                 return 0;
2269
2270         if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2271                 return 0;
2272
2273         if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2274                 return 0;
2275
2276         return -ENOMEM;
2277 }
2278
2279 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2280                                    struct hclge_pkt_buf_alloc *buf_alloc)
2281 {
2282         struct hclge_rx_priv_buff_cmd *req;
2283         struct hclge_desc desc;
2284         int ret;
2285         int i;
2286
2287         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2288         req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2289
2290         /* Alloc private buffer TCs */
2291         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2292                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2293
2294                 req->buf_num[i] =
2295                         cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2296                 req->buf_num[i] |=
2297                         cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2298         }
2299
2300         req->shared_buf =
2301                 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2302                             (1 << HCLGE_TC0_PRI_BUF_EN_B));
2303
2304         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2305         if (ret)
2306                 dev_err(&hdev->pdev->dev,
2307                         "rx private buffer alloc cmd failed %d\n", ret);
2308
2309         return ret;
2310 }
2311
2312 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2313                                    struct hclge_pkt_buf_alloc *buf_alloc)
2314 {
2315         struct hclge_rx_priv_wl_buf *req;
2316         struct hclge_priv_buf *priv;
2317         struct hclge_desc desc[2];
2318         int i, j;
2319         int ret;
2320
2321         for (i = 0; i < 2; i++) {
2322                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2323                                            false);
2324                 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2325
2326                 /* The first descriptor set the NEXT bit to 1 */
2327                 if (i == 0)
2328                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2329                 else
2330                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2331
2332                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2333                         u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2334
2335                         priv = &buf_alloc->priv_buf[idx];
2336                         req->tc_wl[j].high =
2337                                 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2338                         req->tc_wl[j].high |=
2339                                 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2340                         req->tc_wl[j].low =
2341                                 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2342                         req->tc_wl[j].low |=
2343                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2344                 }
2345         }
2346
2347         /* Send 2 descriptor at one time */
2348         ret = hclge_cmd_send(&hdev->hw, desc, 2);
2349         if (ret)
2350                 dev_err(&hdev->pdev->dev,
2351                         "rx private waterline config cmd failed %d\n",
2352                         ret);
2353         return ret;
2354 }
2355
2356 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2357                                     struct hclge_pkt_buf_alloc *buf_alloc)
2358 {
2359         struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2360         struct hclge_rx_com_thrd *req;
2361         struct hclge_desc desc[2];
2362         struct hclge_tc_thrd *tc;
2363         int i, j;
2364         int ret;
2365
2366         for (i = 0; i < 2; i++) {
2367                 hclge_cmd_setup_basic_desc(&desc[i],
2368                                            HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2369                 req = (struct hclge_rx_com_thrd *)&desc[i].data;
2370
2371                 /* The first descriptor set the NEXT bit to 1 */
2372                 if (i == 0)
2373                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2374                 else
2375                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2376
2377                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2378                         tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2379
2380                         req->com_thrd[j].high =
2381                                 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2382                         req->com_thrd[j].high |=
2383                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2384                         req->com_thrd[j].low =
2385                                 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2386                         req->com_thrd[j].low |=
2387                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2388                 }
2389         }
2390
2391         /* Send 2 descriptors at one time */
2392         ret = hclge_cmd_send(&hdev->hw, desc, 2);
2393         if (ret)
2394                 dev_err(&hdev->pdev->dev,
2395                         "common threshold config cmd failed %d\n", ret);
2396         return ret;
2397 }
2398
2399 static int hclge_common_wl_config(struct hclge_dev *hdev,
2400                                   struct hclge_pkt_buf_alloc *buf_alloc)
2401 {
2402         struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2403         struct hclge_rx_com_wl *req;
2404         struct hclge_desc desc;
2405         int ret;
2406
2407         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2408
2409         req = (struct hclge_rx_com_wl *)desc.data;
2410         req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2411         req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2412
2413         req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2414         req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2415
2416         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2417         if (ret)
2418                 dev_err(&hdev->pdev->dev,
2419                         "common waterline config cmd failed %d\n", ret);
2420
2421         return ret;
2422 }
2423
2424 int hclge_buffer_alloc(struct hclge_dev *hdev)
2425 {
2426         struct hclge_pkt_buf_alloc *pkt_buf;
2427         int ret;
2428
2429         pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2430         if (!pkt_buf)
2431                 return -ENOMEM;
2432
2433         ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2434         if (ret) {
2435                 dev_err(&hdev->pdev->dev,
2436                         "could not calc tx buffer size for all TCs %d\n", ret);
2437                 goto out;
2438         }
2439
2440         ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2441         if (ret) {
2442                 dev_err(&hdev->pdev->dev,
2443                         "could not alloc tx buffers %d\n", ret);
2444                 goto out;
2445         }
2446
2447         ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2448         if (ret) {
2449                 dev_err(&hdev->pdev->dev,
2450                         "could not calc rx priv buffer size for all TCs %d\n",
2451                         ret);
2452                 goto out;
2453         }
2454
2455         ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2456         if (ret) {
2457                 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2458                         ret);
2459                 goto out;
2460         }
2461
2462         if (hnae3_dev_dcb_supported(hdev)) {
2463                 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2464                 if (ret) {
2465                         dev_err(&hdev->pdev->dev,
2466                                 "could not configure rx private waterline %d\n",
2467                                 ret);
2468                         goto out;
2469                 }
2470
2471                 ret = hclge_common_thrd_config(hdev, pkt_buf);
2472                 if (ret) {
2473                         dev_err(&hdev->pdev->dev,
2474                                 "could not configure common threshold %d\n",
2475                                 ret);
2476                         goto out;
2477                 }
2478         }
2479
2480         ret = hclge_common_wl_config(hdev, pkt_buf);
2481         if (ret)
2482                 dev_err(&hdev->pdev->dev,
2483                         "could not configure common waterline %d\n", ret);
2484
2485 out:
2486         kfree(pkt_buf);
2487         return ret;
2488 }
2489
2490 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2491 {
2492         struct hnae3_handle *roce = &vport->roce;
2493         struct hnae3_handle *nic = &vport->nic;
2494         struct hclge_dev *hdev = vport->back;
2495
2496         roce->rinfo.num_vectors = vport->back->num_roce_msi;
2497
2498         if (hdev->num_msi < hdev->num_nic_msi + hdev->num_roce_msi)
2499                 return -EINVAL;
2500
2501         roce->rinfo.base_vector = hdev->roce_base_vector;
2502
2503         roce->rinfo.netdev = nic->kinfo.netdev;
2504         roce->rinfo.roce_io_base = hdev->hw.io_base;
2505         roce->rinfo.roce_mem_base = hdev->hw.mem_base;
2506
2507         roce->pdev = nic->pdev;
2508         roce->ae_algo = nic->ae_algo;
2509         roce->numa_node_mask = nic->numa_node_mask;
2510
2511         return 0;
2512 }
2513
2514 static int hclge_init_msi(struct hclge_dev *hdev)
2515 {
2516         struct pci_dev *pdev = hdev->pdev;
2517         int vectors;
2518         int i;
2519
2520         vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2521                                         hdev->num_msi,
2522                                         PCI_IRQ_MSI | PCI_IRQ_MSIX);
2523         if (vectors < 0) {
2524                 dev_err(&pdev->dev,
2525                         "failed(%d) to allocate MSI/MSI-X vectors\n",
2526                         vectors);
2527                 return vectors;
2528         }
2529         if (vectors < hdev->num_msi)
2530                 dev_warn(&hdev->pdev->dev,
2531                          "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2532                          hdev->num_msi, vectors);
2533
2534         hdev->num_msi = vectors;
2535         hdev->num_msi_left = vectors;
2536
2537         hdev->base_msi_vector = pdev->irq;
2538         hdev->roce_base_vector = hdev->base_msi_vector +
2539                                 hdev->num_nic_msi;
2540
2541         hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2542                                            sizeof(u16), GFP_KERNEL);
2543         if (!hdev->vector_status) {
2544                 pci_free_irq_vectors(pdev);
2545                 return -ENOMEM;
2546         }
2547
2548         for (i = 0; i < hdev->num_msi; i++)
2549                 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2550
2551         hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2552                                         sizeof(int), GFP_KERNEL);
2553         if (!hdev->vector_irq) {
2554                 pci_free_irq_vectors(pdev);
2555                 return -ENOMEM;
2556         }
2557
2558         return 0;
2559 }
2560
2561 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2562 {
2563         if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2564                 duplex = HCLGE_MAC_FULL;
2565
2566         return duplex;
2567 }
2568
2569 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2570                                       u8 duplex)
2571 {
2572         struct hclge_config_mac_speed_dup_cmd *req;
2573         struct hclge_desc desc;
2574         int ret;
2575
2576         req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2577
2578         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2579
2580         if (duplex)
2581                 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2582
2583         switch (speed) {
2584         case HCLGE_MAC_SPEED_10M:
2585                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2586                                 HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_10M);
2587                 break;
2588         case HCLGE_MAC_SPEED_100M:
2589                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2590                                 HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_100M);
2591                 break;
2592         case HCLGE_MAC_SPEED_1G:
2593                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2594                                 HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_1G);
2595                 break;
2596         case HCLGE_MAC_SPEED_10G:
2597                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2598                                 HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_10G);
2599                 break;
2600         case HCLGE_MAC_SPEED_25G:
2601                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2602                                 HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_25G);
2603                 break;
2604         case HCLGE_MAC_SPEED_40G:
2605                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2606                                 HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_40G);
2607                 break;
2608         case HCLGE_MAC_SPEED_50G:
2609                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2610                                 HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_50G);
2611                 break;
2612         case HCLGE_MAC_SPEED_100G:
2613                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2614                                 HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_100G);
2615                 break;
2616         case HCLGE_MAC_SPEED_200G:
2617                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2618                                 HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_200G);
2619                 break;
2620         default:
2621                 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2622                 return -EINVAL;
2623         }
2624
2625         hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2626                       1);
2627
2628         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2629         if (ret) {
2630                 dev_err(&hdev->pdev->dev,
2631                         "mac speed/duplex config cmd failed %d.\n", ret);
2632                 return ret;
2633         }
2634
2635         return 0;
2636 }
2637
2638 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2639 {
2640         struct hclge_mac *mac = &hdev->hw.mac;
2641         int ret;
2642
2643         duplex = hclge_check_speed_dup(duplex, speed);
2644         if (!mac->support_autoneg && mac->speed == speed &&
2645             mac->duplex == duplex)
2646                 return 0;
2647
2648         ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2649         if (ret)
2650                 return ret;
2651
2652         hdev->hw.mac.speed = speed;
2653         hdev->hw.mac.duplex = duplex;
2654
2655         return 0;
2656 }
2657
2658 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2659                                      u8 duplex)
2660 {
2661         struct hclge_vport *vport = hclge_get_vport(handle);
2662         struct hclge_dev *hdev = vport->back;
2663
2664         return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2665 }
2666
2667 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2668 {
2669         struct hclge_config_auto_neg_cmd *req;
2670         struct hclge_desc desc;
2671         u32 flag = 0;
2672         int ret;
2673
2674         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2675
2676         req = (struct hclge_config_auto_neg_cmd *)desc.data;
2677         if (enable)
2678                 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2679         req->cfg_an_cmd_flag = cpu_to_le32(flag);
2680
2681         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2682         if (ret)
2683                 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2684                         ret);
2685
2686         return ret;
2687 }
2688
2689 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2690 {
2691         struct hclge_vport *vport = hclge_get_vport(handle);
2692         struct hclge_dev *hdev = vport->back;
2693
2694         if (!hdev->hw.mac.support_autoneg) {
2695                 if (enable) {
2696                         dev_err(&hdev->pdev->dev,
2697                                 "autoneg is not supported by current port\n");
2698                         return -EOPNOTSUPP;
2699                 } else {
2700                         return 0;
2701                 }
2702         }
2703
2704         return hclge_set_autoneg_en(hdev, enable);
2705 }
2706
2707 static int hclge_get_autoneg(struct hnae3_handle *handle)
2708 {
2709         struct hclge_vport *vport = hclge_get_vport(handle);
2710         struct hclge_dev *hdev = vport->back;
2711         struct phy_device *phydev = hdev->hw.mac.phydev;
2712
2713         if (phydev)
2714                 return phydev->autoneg;
2715
2716         return hdev->hw.mac.autoneg;
2717 }
2718
2719 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2720 {
2721         struct hclge_vport *vport = hclge_get_vport(handle);
2722         struct hclge_dev *hdev = vport->back;
2723         int ret;
2724
2725         dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2726
2727         ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2728         if (ret)
2729                 return ret;
2730         return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2731 }
2732
2733 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2734 {
2735         struct hclge_vport *vport = hclge_get_vport(handle);
2736         struct hclge_dev *hdev = vport->back;
2737
2738         if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2739                 return hclge_set_autoneg_en(hdev, !halt);
2740
2741         return 0;
2742 }
2743
2744 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2745 {
2746         struct hclge_config_fec_cmd *req;
2747         struct hclge_desc desc;
2748         int ret;
2749
2750         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2751
2752         req = (struct hclge_config_fec_cmd *)desc.data;
2753         if (fec_mode & BIT(HNAE3_FEC_AUTO))
2754                 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2755         if (fec_mode & BIT(HNAE3_FEC_RS))
2756                 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2757                                 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2758         if (fec_mode & BIT(HNAE3_FEC_BASER))
2759                 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2760                                 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2761
2762         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2763         if (ret)
2764                 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2765
2766         return ret;
2767 }
2768
2769 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2770 {
2771         struct hclge_vport *vport = hclge_get_vport(handle);
2772         struct hclge_dev *hdev = vport->back;
2773         struct hclge_mac *mac = &hdev->hw.mac;
2774         int ret;
2775
2776         if (fec_mode && !(mac->fec_ability & fec_mode)) {
2777                 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2778                 return -EINVAL;
2779         }
2780
2781         ret = hclge_set_fec_hw(hdev, fec_mode);
2782         if (ret)
2783                 return ret;
2784
2785         mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2786         return 0;
2787 }
2788
2789 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2790                           u8 *fec_mode)
2791 {
2792         struct hclge_vport *vport = hclge_get_vport(handle);
2793         struct hclge_dev *hdev = vport->back;
2794         struct hclge_mac *mac = &hdev->hw.mac;
2795
2796         if (fec_ability)
2797                 *fec_ability = mac->fec_ability;
2798         if (fec_mode)
2799                 *fec_mode = mac->fec_mode;
2800 }
2801
2802 static int hclge_mac_init(struct hclge_dev *hdev)
2803 {
2804         struct hclge_mac *mac = &hdev->hw.mac;
2805         int ret;
2806
2807         hdev->support_sfp_query = true;
2808         hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2809         ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2810                                          hdev->hw.mac.duplex);
2811         if (ret)
2812                 return ret;
2813
2814         if (hdev->hw.mac.support_autoneg) {
2815                 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2816                 if (ret)
2817                         return ret;
2818         }
2819
2820         mac->link = 0;
2821
2822         if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2823                 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2824                 if (ret)
2825                         return ret;
2826         }
2827
2828         ret = hclge_set_mac_mtu(hdev, hdev->mps);
2829         if (ret) {
2830                 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2831                 return ret;
2832         }
2833
2834         ret = hclge_set_default_loopback(hdev);
2835         if (ret)
2836                 return ret;
2837
2838         ret = hclge_buffer_alloc(hdev);
2839         if (ret)
2840                 dev_err(&hdev->pdev->dev,
2841                         "allocate buffer fail, ret=%d\n", ret);
2842
2843         return ret;
2844 }
2845
2846 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2847 {
2848         if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2849             !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2850                 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2851                                     hclge_wq, &hdev->service_task, 0);
2852 }
2853
2854 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2855 {
2856         if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2857             !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2858                 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2859                                     hclge_wq, &hdev->service_task, 0);
2860 }
2861
2862 static void hclge_errhand_task_schedule(struct hclge_dev *hdev)
2863 {
2864         if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2865             !test_and_set_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state))
2866                 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2867                                     hclge_wq, &hdev->service_task, 0);
2868 }
2869
2870 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2871 {
2872         if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2873             !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
2874                 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2875                                     hclge_wq, &hdev->service_task,
2876                                     delay_time);
2877 }
2878
2879 static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status)
2880 {
2881         struct hclge_link_status_cmd *req;
2882         struct hclge_desc desc;
2883         int ret;
2884
2885         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2886         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2887         if (ret) {
2888                 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2889                         ret);
2890                 return ret;
2891         }
2892
2893         req = (struct hclge_link_status_cmd *)desc.data;
2894         *link_status = (req->status & HCLGE_LINK_STATUS_UP_M) > 0 ?
2895                 HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
2896
2897         return 0;
2898 }
2899
2900 static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status)
2901 {
2902         struct phy_device *phydev = hdev->hw.mac.phydev;
2903
2904         *link_status = HCLGE_LINK_STATUS_DOWN;
2905
2906         if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2907                 return 0;
2908
2909         if (phydev && (phydev->state != PHY_RUNNING || !phydev->link))
2910                 return 0;
2911
2912         return hclge_get_mac_link_status(hdev, link_status);
2913 }
2914
2915 static void hclge_push_link_status(struct hclge_dev *hdev)
2916 {
2917         struct hclge_vport *vport;
2918         int ret;
2919         u16 i;
2920
2921         for (i = 0; i < pci_num_vf(hdev->pdev); i++) {
2922                 vport = &hdev->vport[i + HCLGE_VF_VPORT_START_NUM];
2923
2924                 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state) ||
2925                     vport->vf_info.link_state != IFLA_VF_LINK_STATE_AUTO)
2926                         continue;
2927
2928                 ret = hclge_push_vf_link_status(vport);
2929                 if (ret) {
2930                         dev_err(&hdev->pdev->dev,
2931                                 "failed to push link status to vf%u, ret = %d\n",
2932                                 i, ret);
2933                 }
2934         }
2935 }
2936
2937 static void hclge_update_link_status(struct hclge_dev *hdev)
2938 {
2939         struct hnae3_handle *rhandle = &hdev->vport[0].roce;
2940         struct hnae3_handle *handle = &hdev->vport[0].nic;
2941         struct hnae3_client *rclient = hdev->roce_client;
2942         struct hnae3_client *client = hdev->nic_client;
2943         int state;
2944         int ret;
2945
2946         if (!client)
2947                 return;
2948
2949         if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
2950                 return;
2951
2952         ret = hclge_get_mac_phy_link(hdev, &state);
2953         if (ret) {
2954                 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2955                 return;
2956         }
2957
2958         if (state != hdev->hw.mac.link) {
2959                 hdev->hw.mac.link = state;
2960                 client->ops->link_status_change(handle, state);
2961                 hclge_config_mac_tnl_int(hdev, state);
2962                 if (rclient && rclient->ops->link_status_change)
2963                         rclient->ops->link_status_change(rhandle, state);
2964
2965                 hclge_push_link_status(hdev);
2966         }
2967
2968         clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2969 }
2970
2971 static void hclge_update_port_capability(struct hclge_dev *hdev,
2972                                          struct hclge_mac *mac)
2973 {
2974         if (hnae3_dev_fec_supported(hdev))
2975                 /* update fec ability by speed */
2976                 hclge_convert_setting_fec(mac);
2977
2978         /* firmware can not identify back plane type, the media type
2979          * read from configuration can help deal it
2980          */
2981         if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2982             mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2983                 mac->module_type = HNAE3_MODULE_TYPE_KR;
2984         else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2985                 mac->module_type = HNAE3_MODULE_TYPE_TP;
2986
2987         if (mac->support_autoneg) {
2988                 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2989                 linkmode_copy(mac->advertising, mac->supported);
2990         } else {
2991                 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2992                                    mac->supported);
2993                 linkmode_zero(mac->advertising);
2994         }
2995 }
2996
2997 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2998 {
2999         struct hclge_sfp_info_cmd *resp;
3000         struct hclge_desc desc;
3001         int ret;
3002
3003         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
3004         resp = (struct hclge_sfp_info_cmd *)desc.data;
3005         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3006         if (ret == -EOPNOTSUPP) {
3007                 dev_warn(&hdev->pdev->dev,
3008                          "IMP do not support get SFP speed %d\n", ret);
3009                 return ret;
3010         } else if (ret) {
3011                 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
3012                 return ret;
3013         }
3014
3015         *speed = le32_to_cpu(resp->speed);
3016
3017         return 0;
3018 }
3019
3020 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
3021 {
3022         struct hclge_sfp_info_cmd *resp;
3023         struct hclge_desc desc;
3024         int ret;
3025
3026         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
3027         resp = (struct hclge_sfp_info_cmd *)desc.data;
3028
3029         resp->query_type = QUERY_ACTIVE_SPEED;
3030
3031         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3032         if (ret == -EOPNOTSUPP) {
3033                 dev_warn(&hdev->pdev->dev,
3034                          "IMP does not support get SFP info %d\n", ret);
3035                 return ret;
3036         } else if (ret) {
3037                 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
3038                 return ret;
3039         }
3040
3041         /* In some case, mac speed get from IMP may be 0, it shouldn't be
3042          * set to mac->speed.
3043          */
3044         if (!le32_to_cpu(resp->speed))
3045                 return 0;
3046
3047         mac->speed = le32_to_cpu(resp->speed);
3048         /* if resp->speed_ability is 0, it means it's an old version
3049          * firmware, do not update these params
3050          */
3051         if (resp->speed_ability) {
3052                 mac->module_type = le32_to_cpu(resp->module_type);
3053                 mac->speed_ability = le32_to_cpu(resp->speed_ability);
3054                 mac->autoneg = resp->autoneg;
3055                 mac->support_autoneg = resp->autoneg_ability;
3056                 mac->speed_type = QUERY_ACTIVE_SPEED;
3057                 if (!resp->active_fec)
3058                         mac->fec_mode = 0;
3059                 else
3060                         mac->fec_mode = BIT(resp->active_fec);
3061         } else {
3062                 mac->speed_type = QUERY_SFP_SPEED;
3063         }
3064
3065         return 0;
3066 }
3067
3068 static int hclge_get_phy_link_ksettings(struct hnae3_handle *handle,
3069                                         struct ethtool_link_ksettings *cmd)
3070 {
3071         struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
3072         struct hclge_vport *vport = hclge_get_vport(handle);
3073         struct hclge_phy_link_ksetting_0_cmd *req0;
3074         struct hclge_phy_link_ksetting_1_cmd *req1;
3075         u32 supported, advertising, lp_advertising;
3076         struct hclge_dev *hdev = vport->back;
3077         int ret;
3078
3079         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
3080                                    true);
3081         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3082         hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
3083                                    true);
3084
3085         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
3086         if (ret) {
3087                 dev_err(&hdev->pdev->dev,
3088                         "failed to get phy link ksetting, ret = %d.\n", ret);
3089                 return ret;
3090         }
3091
3092         req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
3093         cmd->base.autoneg = req0->autoneg;
3094         cmd->base.speed = le32_to_cpu(req0->speed);
3095         cmd->base.duplex = req0->duplex;
3096         cmd->base.port = req0->port;
3097         cmd->base.transceiver = req0->transceiver;
3098         cmd->base.phy_address = req0->phy_address;
3099         cmd->base.eth_tp_mdix = req0->eth_tp_mdix;
3100         cmd->base.eth_tp_mdix_ctrl = req0->eth_tp_mdix_ctrl;
3101         supported = le32_to_cpu(req0->supported);
3102         advertising = le32_to_cpu(req0->advertising);
3103         lp_advertising = le32_to_cpu(req0->lp_advertising);
3104         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
3105                                                 supported);
3106         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
3107                                                 advertising);
3108         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
3109                                                 lp_advertising);
3110
3111         req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
3112         cmd->base.master_slave_cfg = req1->master_slave_cfg;
3113         cmd->base.master_slave_state = req1->master_slave_state;
3114
3115         return 0;
3116 }
3117
3118 static int
3119 hclge_set_phy_link_ksettings(struct hnae3_handle *handle,
3120                              const struct ethtool_link_ksettings *cmd)
3121 {
3122         struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
3123         struct hclge_vport *vport = hclge_get_vport(handle);
3124         struct hclge_phy_link_ksetting_0_cmd *req0;
3125         struct hclge_phy_link_ksetting_1_cmd *req1;
3126         struct hclge_dev *hdev = vport->back;
3127         u32 advertising;
3128         int ret;
3129
3130         if (cmd->base.autoneg == AUTONEG_DISABLE &&
3131             ((cmd->base.speed != SPEED_100 && cmd->base.speed != SPEED_10) ||
3132              (cmd->base.duplex != DUPLEX_HALF &&
3133               cmd->base.duplex != DUPLEX_FULL)))
3134                 return -EINVAL;
3135
3136         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
3137                                    false);
3138         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3139         hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
3140                                    false);
3141
3142         req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
3143         req0->autoneg = cmd->base.autoneg;
3144         req0->speed = cpu_to_le32(cmd->base.speed);
3145         req0->duplex = cmd->base.duplex;
3146         ethtool_convert_link_mode_to_legacy_u32(&advertising,
3147                                                 cmd->link_modes.advertising);
3148         req0->advertising = cpu_to_le32(advertising);
3149         req0->eth_tp_mdix_ctrl = cmd->base.eth_tp_mdix_ctrl;
3150
3151         req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
3152         req1->master_slave_cfg = cmd->base.master_slave_cfg;
3153
3154         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
3155         if (ret) {
3156                 dev_err(&hdev->pdev->dev,
3157                         "failed to set phy link ksettings, ret = %d.\n", ret);
3158                 return ret;
3159         }
3160
3161         hdev->hw.mac.autoneg = cmd->base.autoneg;
3162         hdev->hw.mac.speed = cmd->base.speed;
3163         hdev->hw.mac.duplex = cmd->base.duplex;
3164         linkmode_copy(hdev->hw.mac.advertising, cmd->link_modes.advertising);
3165
3166         return 0;
3167 }
3168
3169 static int hclge_update_tp_port_info(struct hclge_dev *hdev)
3170 {
3171         struct ethtool_link_ksettings cmd;
3172         int ret;
3173
3174         if (!hnae3_dev_phy_imp_supported(hdev))
3175                 return 0;
3176
3177         ret = hclge_get_phy_link_ksettings(&hdev->vport->nic, &cmd);
3178         if (ret)
3179                 return ret;
3180
3181         hdev->hw.mac.autoneg = cmd.base.autoneg;
3182         hdev->hw.mac.speed = cmd.base.speed;
3183         hdev->hw.mac.duplex = cmd.base.duplex;
3184
3185         return 0;
3186 }
3187
3188 static int hclge_tp_port_init(struct hclge_dev *hdev)
3189 {
3190         struct ethtool_link_ksettings cmd;
3191
3192         if (!hnae3_dev_phy_imp_supported(hdev))
3193                 return 0;
3194
3195         cmd.base.autoneg = hdev->hw.mac.autoneg;
3196         cmd.base.speed = hdev->hw.mac.speed;
3197         cmd.base.duplex = hdev->hw.mac.duplex;
3198         linkmode_copy(cmd.link_modes.advertising, hdev->hw.mac.advertising);
3199
3200         return hclge_set_phy_link_ksettings(&hdev->vport->nic, &cmd);
3201 }
3202
3203 static int hclge_update_port_info(struct hclge_dev *hdev)
3204 {
3205         struct hclge_mac *mac = &hdev->hw.mac;
3206         int speed = HCLGE_MAC_SPEED_UNKNOWN;
3207         int ret;
3208
3209         /* get the port info from SFP cmd if not copper port */
3210         if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
3211                 return hclge_update_tp_port_info(hdev);
3212
3213         /* if IMP does not support get SFP/qSFP info, return directly */
3214         if (!hdev->support_sfp_query)
3215                 return 0;
3216
3217         if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
3218                 ret = hclge_get_sfp_info(hdev, mac);
3219         else
3220                 ret = hclge_get_sfp_speed(hdev, &speed);
3221
3222         if (ret == -EOPNOTSUPP) {
3223                 hdev->support_sfp_query = false;
3224                 return ret;
3225         } else if (ret) {
3226                 return ret;
3227         }
3228
3229         if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
3230                 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
3231                         hclge_update_port_capability(hdev, mac);
3232                         return 0;
3233                 }
3234                 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
3235                                                HCLGE_MAC_FULL);
3236         } else {
3237                 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
3238                         return 0; /* do nothing if no SFP */
3239
3240                 /* must config full duplex for SFP */
3241                 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
3242         }
3243 }
3244
3245 static int hclge_get_status(struct hnae3_handle *handle)
3246 {
3247         struct hclge_vport *vport = hclge_get_vport(handle);
3248         struct hclge_dev *hdev = vport->back;
3249
3250         hclge_update_link_status(hdev);
3251
3252         return hdev->hw.mac.link;
3253 }
3254
3255 static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
3256 {
3257         if (!pci_num_vf(hdev->pdev)) {
3258                 dev_err(&hdev->pdev->dev,
3259                         "SRIOV is disabled, can not get vport(%d) info.\n", vf);
3260                 return NULL;
3261         }
3262
3263         if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
3264                 dev_err(&hdev->pdev->dev,
3265                         "vf id(%d) is out of range(0 <= vfid < %d)\n",
3266                         vf, pci_num_vf(hdev->pdev));
3267                 return NULL;
3268         }
3269
3270         /* VF start from 1 in vport */
3271         vf += HCLGE_VF_VPORT_START_NUM;
3272         return &hdev->vport[vf];
3273 }
3274
3275 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
3276                                struct ifla_vf_info *ivf)
3277 {
3278         struct hclge_vport *vport = hclge_get_vport(handle);
3279         struct hclge_dev *hdev = vport->back;
3280
3281         vport = hclge_get_vf_vport(hdev, vf);
3282         if (!vport)
3283                 return -EINVAL;
3284
3285         ivf->vf = vf;
3286         ivf->linkstate = vport->vf_info.link_state;
3287         ivf->spoofchk = vport->vf_info.spoofchk;
3288         ivf->trusted = vport->vf_info.trusted;
3289         ivf->min_tx_rate = 0;
3290         ivf->max_tx_rate = vport->vf_info.max_tx_rate;
3291         ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
3292         ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
3293         ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
3294         ether_addr_copy(ivf->mac, vport->vf_info.mac);
3295
3296         return 0;
3297 }
3298
3299 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
3300                                    int link_state)
3301 {
3302         struct hclge_vport *vport = hclge_get_vport(handle);
3303         struct hclge_dev *hdev = vport->back;
3304         int link_state_old;
3305         int ret;
3306
3307         vport = hclge_get_vf_vport(hdev, vf);
3308         if (!vport)
3309                 return -EINVAL;
3310
3311         link_state_old = vport->vf_info.link_state;
3312         vport->vf_info.link_state = link_state;
3313
3314         ret = hclge_push_vf_link_status(vport);
3315         if (ret) {
3316                 vport->vf_info.link_state = link_state_old;
3317                 dev_err(&hdev->pdev->dev,
3318                         "failed to push vf%d link status, ret = %d\n", vf, ret);
3319         }
3320
3321         return ret;
3322 }
3323
3324 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
3325 {
3326         u32 cmdq_src_reg, msix_src_reg, hw_err_src_reg;
3327
3328         /* fetch the events from their corresponding regs */
3329         cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
3330         msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
3331         hw_err_src_reg = hclge_read_dev(&hdev->hw,
3332                                         HCLGE_RAS_PF_OTHER_INT_STS_REG);
3333
3334         /* Assumption: If by any chance reset and mailbox events are reported
3335          * together then we will only process reset event in this go and will
3336          * defer the processing of the mailbox events. Since, we would have not
3337          * cleared RX CMDQ event this time we would receive again another
3338          * interrupt from H/W just for the mailbox.
3339          *
3340          * check for vector0 reset event sources
3341          */
3342         if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
3343                 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
3344                 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
3345                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3346                 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3347                 hdev->rst_stats.imp_rst_cnt++;
3348                 return HCLGE_VECTOR0_EVENT_RST;
3349         }
3350
3351         if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
3352                 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
3353                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3354                 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
3355                 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3356                 hdev->rst_stats.global_rst_cnt++;
3357                 return HCLGE_VECTOR0_EVENT_RST;
3358         }
3359
3360         /* check for vector0 msix event and hardware error event source */
3361         if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK ||
3362             hw_err_src_reg & HCLGE_RAS_REG_ERR_MASK)
3363                 return HCLGE_VECTOR0_EVENT_ERR;
3364
3365         /* check for vector0 ptp event source */
3366         if (BIT(HCLGE_VECTOR0_REG_PTP_INT_B) & msix_src_reg) {
3367                 *clearval = msix_src_reg;
3368                 return HCLGE_VECTOR0_EVENT_PTP;
3369         }
3370
3371         /* check for vector0 mailbox(=CMDQ RX) event source */
3372         if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3373                 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3374                 *clearval = cmdq_src_reg;
3375                 return HCLGE_VECTOR0_EVENT_MBX;
3376         }
3377
3378         /* print other vector0 event source */
3379         dev_info(&hdev->pdev->dev,
3380                  "INT status: CMDQ(%#x) HW errors(%#x) other(%#x)\n",
3381                  cmdq_src_reg, hw_err_src_reg, msix_src_reg);
3382
3383         return HCLGE_VECTOR0_EVENT_OTHER;
3384 }
3385
3386 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3387                                     u32 regclr)
3388 {
3389         switch (event_type) {
3390         case HCLGE_VECTOR0_EVENT_PTP:
3391         case HCLGE_VECTOR0_EVENT_RST:
3392                 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3393                 break;
3394         case HCLGE_VECTOR0_EVENT_MBX:
3395                 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3396                 break;
3397         default:
3398                 break;
3399         }
3400 }
3401
3402 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3403 {
3404         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3405                                 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3406                                 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3407                                 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3408         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3409 }
3410
3411 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3412 {
3413         writel(enable ? 1 : 0, vector->addr);
3414 }
3415
3416 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3417 {
3418         struct hclge_dev *hdev = data;
3419         unsigned long flags;
3420         u32 clearval = 0;
3421         u32 event_cause;
3422
3423         hclge_enable_vector(&hdev->misc_vector, false);
3424         event_cause = hclge_check_event_cause(hdev, &clearval);
3425
3426         /* vector 0 interrupt is shared with reset and mailbox source events. */
3427         switch (event_cause) {
3428         case HCLGE_VECTOR0_EVENT_ERR:
3429                 hclge_errhand_task_schedule(hdev);
3430                 break;
3431         case HCLGE_VECTOR0_EVENT_RST:
3432                 hclge_reset_task_schedule(hdev);
3433                 break;
3434         case HCLGE_VECTOR0_EVENT_PTP:
3435                 spin_lock_irqsave(&hdev->ptp->lock, flags);
3436                 hclge_ptp_clean_tx_hwts(hdev);
3437                 spin_unlock_irqrestore(&hdev->ptp->lock, flags);
3438                 break;
3439         case HCLGE_VECTOR0_EVENT_MBX:
3440                 /* If we are here then,
3441                  * 1. Either we are not handling any mbx task and we are not
3442                  *    scheduled as well
3443                  *                        OR
3444                  * 2. We could be handling a mbx task but nothing more is
3445                  *    scheduled.
3446                  * In both cases, we should schedule mbx task as there are more
3447                  * mbx messages reported by this interrupt.
3448                  */
3449                 hclge_mbx_task_schedule(hdev);
3450                 break;
3451         default:
3452                 dev_warn(&hdev->pdev->dev,
3453                          "received unknown or unhandled event of vector0\n");
3454                 break;
3455         }
3456
3457         hclge_clear_event_cause(hdev, event_cause, clearval);
3458
3459         /* Enable interrupt if it is not caused by reset event or error event */
3460         if (event_cause == HCLGE_VECTOR0_EVENT_PTP ||
3461             event_cause == HCLGE_VECTOR0_EVENT_MBX ||
3462             event_cause == HCLGE_VECTOR0_EVENT_OTHER)
3463                 hclge_enable_vector(&hdev->misc_vector, true);
3464
3465         return IRQ_HANDLED;
3466 }
3467
3468 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3469 {
3470         if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3471                 dev_warn(&hdev->pdev->dev,
3472                          "vector(vector_id %d) has been freed.\n", vector_id);
3473                 return;
3474         }
3475
3476         hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3477         hdev->num_msi_left += 1;
3478         hdev->num_msi_used -= 1;
3479 }
3480
3481 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3482 {
3483         struct hclge_misc_vector *vector = &hdev->misc_vector;
3484
3485         vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3486
3487         vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3488         hdev->vector_status[0] = 0;
3489
3490         hdev->num_msi_left -= 1;
3491         hdev->num_msi_used += 1;
3492 }
3493
3494 static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3495                                       const cpumask_t *mask)
3496 {
3497         struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3498                                               affinity_notify);
3499
3500         cpumask_copy(&hdev->affinity_mask, mask);
3501 }
3502
3503 static void hclge_irq_affinity_release(struct kref *ref)
3504 {
3505 }
3506
3507 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3508 {
3509         irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3510                               &hdev->affinity_mask);
3511
3512         hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3513         hdev->affinity_notify.release = hclge_irq_affinity_release;
3514         irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3515                                   &hdev->affinity_notify);
3516 }
3517
3518 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3519 {
3520         irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3521         irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3522 }
3523
3524 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3525 {
3526         int ret;
3527
3528         hclge_get_misc_vector(hdev);
3529
3530         /* this would be explicitly freed in the end */
3531         snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
3532                  HCLGE_NAME, pci_name(hdev->pdev));
3533         ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3534                           0, hdev->misc_vector.name, hdev);
3535         if (ret) {
3536                 hclge_free_vector(hdev, 0);
3537                 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3538                         hdev->misc_vector.vector_irq);
3539         }
3540
3541         return ret;
3542 }
3543
3544 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3545 {
3546         free_irq(hdev->misc_vector.vector_irq, hdev);
3547         hclge_free_vector(hdev, 0);
3548 }
3549
3550 int hclge_notify_client(struct hclge_dev *hdev,
3551                         enum hnae3_reset_notify_type type)
3552 {
3553         struct hnae3_handle *handle = &hdev->vport[0].nic;
3554         struct hnae3_client *client = hdev->nic_client;
3555         int ret;
3556
3557         if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3558                 return 0;
3559
3560         if (!client->ops->reset_notify)
3561                 return -EOPNOTSUPP;
3562
3563         ret = client->ops->reset_notify(handle, type);
3564         if (ret)
3565                 dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n",
3566                         type, ret);
3567
3568         return ret;
3569 }
3570
3571 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3572                                     enum hnae3_reset_notify_type type)
3573 {
3574         struct hnae3_handle *handle = &hdev->vport[0].roce;
3575         struct hnae3_client *client = hdev->roce_client;
3576         int ret;
3577
3578         if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3579                 return 0;
3580
3581         if (!client->ops->reset_notify)
3582                 return -EOPNOTSUPP;
3583
3584         ret = client->ops->reset_notify(handle, type);
3585         if (ret)
3586                 dev_err(&hdev->pdev->dev, "notify roce client failed %d(%d)",
3587                         type, ret);
3588
3589         return ret;
3590 }
3591
3592 static int hclge_reset_wait(struct hclge_dev *hdev)
3593 {
3594 #define HCLGE_RESET_WATI_MS     100
3595 #define HCLGE_RESET_WAIT_CNT    350
3596
3597         u32 val, reg, reg_bit;
3598         u32 cnt = 0;
3599
3600         switch (hdev->reset_type) {
3601         case HNAE3_IMP_RESET:
3602                 reg = HCLGE_GLOBAL_RESET_REG;
3603                 reg_bit = HCLGE_IMP_RESET_BIT;
3604                 break;
3605         case HNAE3_GLOBAL_RESET:
3606                 reg = HCLGE_GLOBAL_RESET_REG;
3607                 reg_bit = HCLGE_GLOBAL_RESET_BIT;
3608                 break;
3609         case HNAE3_FUNC_RESET:
3610                 reg = HCLGE_FUN_RST_ING;
3611                 reg_bit = HCLGE_FUN_RST_ING_B;
3612                 break;
3613         default:
3614                 dev_err(&hdev->pdev->dev,
3615                         "Wait for unsupported reset type: %d\n",
3616                         hdev->reset_type);
3617                 return -EINVAL;
3618         }
3619
3620         val = hclge_read_dev(&hdev->hw, reg);
3621         while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3622                 msleep(HCLGE_RESET_WATI_MS);
3623                 val = hclge_read_dev(&hdev->hw, reg);
3624                 cnt++;
3625         }
3626
3627         if (cnt >= HCLGE_RESET_WAIT_CNT) {
3628                 dev_warn(&hdev->pdev->dev,
3629                          "Wait for reset timeout: %d\n", hdev->reset_type);
3630                 return -EBUSY;
3631         }
3632
3633         return 0;
3634 }
3635
3636 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3637 {
3638         struct hclge_vf_rst_cmd *req;
3639         struct hclge_desc desc;
3640
3641         req = (struct hclge_vf_rst_cmd *)desc.data;
3642         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3643         req->dest_vfid = func_id;
3644
3645         if (reset)
3646                 req->vf_rst = 0x1;
3647
3648         return hclge_cmd_send(&hdev->hw, &desc, 1);
3649 }
3650
3651 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3652 {
3653         int i;
3654
3655         for (i = HCLGE_VF_VPORT_START_NUM; i < hdev->num_alloc_vport; i++) {
3656                 struct hclge_vport *vport = &hdev->vport[i];
3657                 int ret;
3658
3659                 /* Send cmd to set/clear VF's FUNC_RST_ING */
3660                 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3661                 if (ret) {
3662                         dev_err(&hdev->pdev->dev,
3663                                 "set vf(%u) rst failed %d!\n",
3664                                 vport->vport_id - HCLGE_VF_VPORT_START_NUM,
3665                                 ret);
3666                         return ret;
3667                 }
3668
3669                 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3670                         continue;
3671
3672                 /* Inform VF to process the reset.
3673                  * hclge_inform_reset_assert_to_vf may fail if VF
3674                  * driver is not loaded.
3675                  */
3676                 ret = hclge_inform_reset_assert_to_vf(vport);
3677                 if (ret)
3678                         dev_warn(&hdev->pdev->dev,
3679                                  "inform reset to vf(%u) failed %d!\n",
3680                                  vport->vport_id - HCLGE_VF_VPORT_START_NUM,
3681                                  ret);
3682         }
3683
3684         return 0;
3685 }
3686
3687 static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3688 {
3689         if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3690             test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
3691             test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3692                 return;
3693
3694         hclge_mbx_handler(hdev);
3695
3696         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3697 }
3698
3699 static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3700 {
3701         struct hclge_pf_rst_sync_cmd *req;
3702         struct hclge_desc desc;
3703         int cnt = 0;
3704         int ret;
3705
3706         req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3707         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3708
3709         do {
3710                 /* vf need to down netdev by mbx during PF or FLR reset */
3711                 hclge_mailbox_service_task(hdev);
3712
3713                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3714                 /* for compatible with old firmware, wait
3715                  * 100 ms for VF to stop IO
3716                  */
3717                 if (ret == -EOPNOTSUPP) {
3718                         msleep(HCLGE_RESET_SYNC_TIME);
3719                         return;
3720                 } else if (ret) {
3721                         dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
3722                                  ret);
3723                         return;
3724                 } else if (req->all_vf_ready) {
3725                         return;
3726                 }
3727                 msleep(HCLGE_PF_RESET_SYNC_TIME);
3728                 hclge_cmd_reuse_desc(&desc, true);
3729         } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3730
3731         dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
3732 }
3733
3734 void hclge_report_hw_error(struct hclge_dev *hdev,
3735                            enum hnae3_hw_error_type type)
3736 {
3737         struct hnae3_client *client = hdev->nic_client;
3738
3739         if (!client || !client->ops->process_hw_error ||
3740             !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3741                 return;
3742
3743         client->ops->process_hw_error(&hdev->vport[0].nic, type);
3744 }
3745
3746 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3747 {
3748         u32 reg_val;
3749
3750         reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3751         if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3752                 hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3753                 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3754                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3755         }
3756
3757         if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3758                 hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3759                 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3760                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3761         }
3762 }
3763
3764 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3765 {
3766         struct hclge_desc desc;
3767         struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3768         int ret;
3769
3770         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3771         hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3772         req->fun_reset_vfid = func_id;
3773
3774         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3775         if (ret)
3776                 dev_err(&hdev->pdev->dev,
3777                         "send function reset cmd fail, status =%d\n", ret);
3778
3779         return ret;
3780 }
3781
3782 static void hclge_do_reset(struct hclge_dev *hdev)
3783 {
3784         struct hnae3_handle *handle = &hdev->vport[0].nic;
3785         struct pci_dev *pdev = hdev->pdev;
3786         u32 val;
3787
3788         if (hclge_get_hw_reset_stat(handle)) {
3789                 dev_info(&pdev->dev, "hardware reset not finish\n");
3790                 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3791                          hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3792                          hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3793                 return;
3794         }
3795
3796         switch (hdev->reset_type) {
3797         case HNAE3_IMP_RESET:
3798                 dev_info(&pdev->dev, "IMP reset requested\n");
3799                 val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3800                 hnae3_set_bit(val, HCLGE_TRIGGER_IMP_RESET_B, 1);
3801                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, val);
3802                 break;
3803         case HNAE3_GLOBAL_RESET:
3804                 dev_info(&pdev->dev, "global reset requested\n");
3805                 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3806                 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3807                 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3808                 break;
3809         case HNAE3_FUNC_RESET:
3810                 dev_info(&pdev->dev, "PF reset requested\n");
3811                 /* schedule again to check later */
3812                 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3813                 hclge_reset_task_schedule(hdev);
3814                 break;
3815         default:
3816                 dev_warn(&pdev->dev,
3817                          "unsupported reset type: %d\n", hdev->reset_type);
3818                 break;
3819         }
3820 }
3821
3822 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3823                                                    unsigned long *addr)
3824 {
3825         enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3826         struct hclge_dev *hdev = ae_dev->priv;
3827
3828         /* return the highest priority reset level amongst all */
3829         if (test_bit(HNAE3_IMP_RESET, addr)) {
3830                 rst_level = HNAE3_IMP_RESET;
3831                 clear_bit(HNAE3_IMP_RESET, addr);
3832                 clear_bit(HNAE3_GLOBAL_RESET, addr);
3833                 clear_bit(HNAE3_FUNC_RESET, addr);
3834         } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3835                 rst_level = HNAE3_GLOBAL_RESET;
3836                 clear_bit(HNAE3_GLOBAL_RESET, addr);
3837                 clear_bit(HNAE3_FUNC_RESET, addr);
3838         } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3839                 rst_level = HNAE3_FUNC_RESET;
3840                 clear_bit(HNAE3_FUNC_RESET, addr);
3841         } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3842                 rst_level = HNAE3_FLR_RESET;
3843                 clear_bit(HNAE3_FLR_RESET, addr);
3844         }
3845
3846         if (hdev->reset_type != HNAE3_NONE_RESET &&
3847             rst_level < hdev->reset_type)
3848                 return HNAE3_NONE_RESET;
3849
3850         return rst_level;
3851 }
3852
3853 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3854 {
3855         u32 clearval = 0;
3856
3857         switch (hdev->reset_type) {
3858         case HNAE3_IMP_RESET:
3859                 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3860                 break;
3861         case HNAE3_GLOBAL_RESET:
3862                 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3863                 break;
3864         default:
3865                 break;
3866         }
3867
3868         if (!clearval)
3869                 return;
3870
3871         /* For revision 0x20, the reset interrupt source
3872          * can only be cleared after hardware reset done
3873          */
3874         if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
3875                 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3876                                 clearval);
3877
3878         hclge_enable_vector(&hdev->misc_vector, true);
3879 }
3880
3881 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3882 {
3883         u32 reg_val;
3884
3885         reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3886         if (enable)
3887                 reg_val |= HCLGE_NIC_SW_RST_RDY;
3888         else
3889                 reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3890
3891         hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3892 }
3893
3894 static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
3895 {
3896         int ret;
3897
3898         ret = hclge_set_all_vf_rst(hdev, true);
3899         if (ret)
3900                 return ret;
3901
3902         hclge_func_reset_sync_vf(hdev);
3903
3904         return 0;
3905 }
3906
3907 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3908 {
3909         u32 reg_val;
3910         int ret = 0;
3911
3912         switch (hdev->reset_type) {
3913         case HNAE3_FUNC_RESET:
3914                 ret = hclge_func_reset_notify_vf(hdev);
3915                 if (ret)
3916                         return ret;
3917
3918                 ret = hclge_func_reset_cmd(hdev, 0);
3919                 if (ret) {
3920                         dev_err(&hdev->pdev->dev,
3921                                 "asserting function reset fail %d!\n", ret);
3922                         return ret;
3923                 }
3924
3925                 /* After performaning pf reset, it is not necessary to do the
3926                  * mailbox handling or send any command to firmware, because
3927                  * any mailbox handling or command to firmware is only valid
3928                  * after hclge_cmd_init is called.
3929                  */
3930                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3931                 hdev->rst_stats.pf_rst_cnt++;
3932                 break;
3933         case HNAE3_FLR_RESET:
3934                 ret = hclge_func_reset_notify_vf(hdev);
3935                 if (ret)
3936                         return ret;
3937                 break;
3938         case HNAE3_IMP_RESET:
3939                 hclge_handle_imp_error(hdev);
3940                 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3941                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3942                                 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3943                 break;
3944         default:
3945                 break;
3946         }
3947
3948         /* inform hardware that preparatory work is done */
3949         msleep(HCLGE_RESET_SYNC_TIME);
3950         hclge_reset_handshake(hdev, true);
3951         dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3952
3953         return ret;
3954 }
3955
3956 static void hclge_show_rst_info(struct hclge_dev *hdev)
3957 {
3958         char *buf;
3959
3960         buf = kzalloc(HCLGE_DBG_RESET_INFO_LEN, GFP_KERNEL);
3961         if (!buf)
3962                 return;
3963
3964         hclge_dbg_dump_rst_info(hdev, buf, HCLGE_DBG_RESET_INFO_LEN);
3965
3966         dev_info(&hdev->pdev->dev, "dump reset info:\n%s", buf);
3967
3968         kfree(buf);
3969 }
3970
3971 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3972 {
3973 #define MAX_RESET_FAIL_CNT 5
3974
3975         if (hdev->reset_pending) {
3976                 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3977                          hdev->reset_pending);
3978                 return true;
3979         } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3980                    HCLGE_RESET_INT_M) {
3981                 dev_info(&hdev->pdev->dev,
3982                          "reset failed because new reset interrupt\n");
3983                 hclge_clear_reset_cause(hdev);
3984                 return false;
3985         } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3986                 hdev->rst_stats.reset_fail_cnt++;
3987                 set_bit(hdev->reset_type, &hdev->reset_pending);
3988                 dev_info(&hdev->pdev->dev,
3989                          "re-schedule reset task(%u)\n",
3990                          hdev->rst_stats.reset_fail_cnt);
3991                 return true;
3992         }
3993
3994         hclge_clear_reset_cause(hdev);
3995
3996         /* recover the handshake status when reset fail */
3997         hclge_reset_handshake(hdev, true);
3998
3999         dev_err(&hdev->pdev->dev, "Reset fail!\n");
4000
4001         hclge_show_rst_info(hdev);
4002
4003         set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
4004
4005         return false;
4006 }
4007
4008 static void hclge_update_reset_level(struct hclge_dev *hdev)
4009 {
4010         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4011         enum hnae3_reset_type reset_level;
4012
4013         /* reset request will not be set during reset, so clear
4014          * pending reset request to avoid unnecessary reset
4015          * caused by the same reason.
4016          */
4017         hclge_get_reset_level(ae_dev, &hdev->reset_request);
4018
4019         /* if default_reset_request has a higher level reset request,
4020          * it should be handled as soon as possible. since some errors
4021          * need this kind of reset to fix.
4022          */
4023         reset_level = hclge_get_reset_level(ae_dev,
4024                                             &hdev->default_reset_request);
4025         if (reset_level != HNAE3_NONE_RESET)
4026                 set_bit(reset_level, &hdev->reset_request);
4027 }
4028
4029 static int hclge_set_rst_done(struct hclge_dev *hdev)
4030 {
4031         struct hclge_pf_rst_done_cmd *req;
4032         struct hclge_desc desc;
4033         int ret;
4034
4035         req = (struct hclge_pf_rst_done_cmd *)desc.data;
4036         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
4037         req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
4038
4039         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4040         /* To be compatible with the old firmware, which does not support
4041          * command HCLGE_OPC_PF_RST_DONE, just print a warning and
4042          * return success
4043          */
4044         if (ret == -EOPNOTSUPP) {
4045                 dev_warn(&hdev->pdev->dev,
4046                          "current firmware does not support command(0x%x)!\n",
4047                          HCLGE_OPC_PF_RST_DONE);
4048                 return 0;
4049         } else if (ret) {
4050                 dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
4051                         ret);
4052         }
4053
4054         return ret;
4055 }
4056
4057 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
4058 {
4059         int ret = 0;
4060
4061         switch (hdev->reset_type) {
4062         case HNAE3_FUNC_RESET:
4063         case HNAE3_FLR_RESET:
4064                 ret = hclge_set_all_vf_rst(hdev, false);
4065                 break;
4066         case HNAE3_GLOBAL_RESET:
4067         case HNAE3_IMP_RESET:
4068                 ret = hclge_set_rst_done(hdev);
4069                 break;
4070         default:
4071                 break;
4072         }
4073
4074         /* clear up the handshake status after re-initialize done */
4075         hclge_reset_handshake(hdev, false);
4076
4077         return ret;
4078 }
4079
4080 static int hclge_reset_stack(struct hclge_dev *hdev)
4081 {
4082         int ret;
4083
4084         ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
4085         if (ret)
4086                 return ret;
4087
4088         ret = hclge_reset_ae_dev(hdev->ae_dev);
4089         if (ret)
4090                 return ret;
4091
4092         return hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
4093 }
4094
4095 static int hclge_reset_prepare(struct hclge_dev *hdev)
4096 {
4097         int ret;
4098
4099         hdev->rst_stats.reset_cnt++;
4100         /* perform reset of the stack & ae device for a client */
4101         ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
4102         if (ret)
4103                 return ret;
4104
4105         rtnl_lock();
4106         ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
4107         rtnl_unlock();
4108         if (ret)
4109                 return ret;
4110
4111         return hclge_reset_prepare_wait(hdev);
4112 }
4113
4114 static int hclge_reset_rebuild(struct hclge_dev *hdev)
4115 {
4116         int ret;
4117
4118         hdev->rst_stats.hw_reset_done_cnt++;
4119
4120         ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
4121         if (ret)
4122                 return ret;
4123
4124         rtnl_lock();
4125         ret = hclge_reset_stack(hdev);
4126         rtnl_unlock();
4127         if (ret)
4128                 return ret;
4129
4130         hclge_clear_reset_cause(hdev);
4131
4132         ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
4133         /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
4134          * times
4135          */
4136         if (ret &&
4137             hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
4138                 return ret;
4139
4140         ret = hclge_reset_prepare_up(hdev);
4141         if (ret)
4142                 return ret;
4143
4144         rtnl_lock();
4145         ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
4146         rtnl_unlock();
4147         if (ret)
4148                 return ret;
4149
4150         ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
4151         if (ret)
4152                 return ret;
4153
4154         hdev->last_reset_time = jiffies;
4155         hdev->rst_stats.reset_fail_cnt = 0;
4156         hdev->rst_stats.reset_done_cnt++;
4157         clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
4158
4159         hclge_update_reset_level(hdev);
4160
4161         return 0;
4162 }
4163
4164 static void hclge_reset(struct hclge_dev *hdev)
4165 {
4166         if (hclge_reset_prepare(hdev))
4167                 goto err_reset;
4168
4169         if (hclge_reset_wait(hdev))
4170                 goto err_reset;
4171
4172         if (hclge_reset_rebuild(hdev))
4173                 goto err_reset;
4174
4175         return;
4176
4177 err_reset:
4178         if (hclge_reset_err_handle(hdev))
4179                 hclge_reset_task_schedule(hdev);
4180 }
4181
4182 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
4183 {
4184         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
4185         struct hclge_dev *hdev = ae_dev->priv;
4186
4187         /* We might end up getting called broadly because of 2 below cases:
4188          * 1. Recoverable error was conveyed through APEI and only way to bring
4189          *    normalcy is to reset.
4190          * 2. A new reset request from the stack due to timeout
4191          *
4192          * check if this is a new reset request and we are not here just because
4193          * last reset attempt did not succeed and watchdog hit us again. We will
4194          * know this if last reset request did not occur very recently (watchdog
4195          * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
4196          * In case of new request we reset the "reset level" to PF reset.
4197          * And if it is a repeat reset request of the most recent one then we
4198          * want to make sure we throttle the reset request. Therefore, we will
4199          * not allow it again before 3*HZ times.
4200          */
4201
4202         if (time_before(jiffies, (hdev->last_reset_time +
4203                                   HCLGE_RESET_INTERVAL))) {
4204                 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
4205                 return;
4206         }
4207
4208         if (hdev->default_reset_request) {
4209                 hdev->reset_level =
4210                         hclge_get_reset_level(ae_dev,
4211                                               &hdev->default_reset_request);
4212         } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
4213                 hdev->reset_level = HNAE3_FUNC_RESET;
4214         }
4215
4216         dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
4217                  hdev->reset_level);
4218
4219         /* request reset & schedule reset task */
4220         set_bit(hdev->reset_level, &hdev->reset_request);
4221         hclge_reset_task_schedule(hdev);
4222
4223         if (hdev->reset_level < HNAE3_GLOBAL_RESET)
4224                 hdev->reset_level++;
4225 }
4226
4227 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
4228                                         enum hnae3_reset_type rst_type)
4229 {
4230         struct hclge_dev *hdev = ae_dev->priv;
4231
4232         set_bit(rst_type, &hdev->default_reset_request);
4233 }
4234
4235 static void hclge_reset_timer(struct timer_list *t)
4236 {
4237         struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
4238
4239         /* if default_reset_request has no value, it means that this reset
4240          * request has already be handled, so just return here
4241          */
4242         if (!hdev->default_reset_request)
4243                 return;
4244
4245         dev_info(&hdev->pdev->dev,
4246                  "triggering reset in reset timer\n");
4247         hclge_reset_event(hdev->pdev, NULL);
4248 }
4249
4250 static void hclge_reset_subtask(struct hclge_dev *hdev)
4251 {
4252         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4253
4254         /* check if there is any ongoing reset in the hardware. This status can
4255          * be checked from reset_pending. If there is then, we need to wait for
4256          * hardware to complete reset.
4257          *    a. If we are able to figure out in reasonable time that hardware
4258          *       has fully resetted then, we can proceed with driver, client
4259          *       reset.
4260          *    b. else, we can come back later to check this status so re-sched
4261          *       now.
4262          */
4263         hdev->last_reset_time = jiffies;
4264         hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
4265         if (hdev->reset_type != HNAE3_NONE_RESET)
4266                 hclge_reset(hdev);
4267
4268         /* check if we got any *new* reset requests to be honored */
4269         hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
4270         if (hdev->reset_type != HNAE3_NONE_RESET)
4271                 hclge_do_reset(hdev);
4272
4273         hdev->reset_type = HNAE3_NONE_RESET;
4274 }
4275
4276 static void hclge_handle_err_reset_request(struct hclge_dev *hdev)
4277 {
4278         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4279         enum hnae3_reset_type reset_type;
4280
4281         if (ae_dev->hw_err_reset_req) {
4282                 reset_type = hclge_get_reset_level(ae_dev,
4283                                                    &ae_dev->hw_err_reset_req);
4284                 hclge_set_def_reset_request(ae_dev, reset_type);
4285         }
4286
4287         if (hdev->default_reset_request && ae_dev->ops->reset_event)
4288                 ae_dev->ops->reset_event(hdev->pdev, NULL);
4289
4290         /* enable interrupt after error handling complete */
4291         hclge_enable_vector(&hdev->misc_vector, true);
4292 }
4293
4294 static void hclge_handle_err_recovery(struct hclge_dev *hdev)
4295 {
4296         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4297
4298         ae_dev->hw_err_reset_req = 0;
4299
4300         if (hclge_find_error_source(hdev)) {
4301                 hclge_handle_error_info_log(ae_dev);
4302                 hclge_handle_mac_tnl(hdev);
4303         }
4304
4305         hclge_handle_err_reset_request(hdev);
4306 }
4307
4308 static void hclge_misc_err_recovery(struct hclge_dev *hdev)
4309 {
4310         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4311         struct device *dev = &hdev->pdev->dev;
4312         u32 msix_sts_reg;
4313
4314         msix_sts_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
4315         if (msix_sts_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
4316                 if (hclge_handle_hw_msix_error
4317                                 (hdev, &hdev->default_reset_request))
4318                         dev_info(dev, "received msix interrupt 0x%x\n",
4319                                  msix_sts_reg);
4320         }
4321
4322         hclge_handle_hw_ras_error(ae_dev);
4323
4324         hclge_handle_err_reset_request(hdev);
4325 }
4326
4327 static void hclge_errhand_service_task(struct hclge_dev *hdev)
4328 {
4329         if (!test_and_clear_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state))
4330                 return;
4331
4332         if (hnae3_dev_ras_imp_supported(hdev))
4333                 hclge_handle_err_recovery(hdev);
4334         else
4335                 hclge_misc_err_recovery(hdev);
4336 }
4337
4338 static void hclge_reset_service_task(struct hclge_dev *hdev)
4339 {
4340         if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
4341                 return;
4342
4343         down(&hdev->reset_sem);
4344         set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4345
4346         hclge_reset_subtask(hdev);
4347
4348         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4349         up(&hdev->reset_sem);
4350 }
4351
4352 static void hclge_update_vport_alive(struct hclge_dev *hdev)
4353 {
4354         int i;
4355
4356         /* start from vport 1 for PF is always alive */
4357         for (i = 1; i < hdev->num_alloc_vport; i++) {
4358                 struct hclge_vport *vport = &hdev->vport[i];
4359
4360                 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
4361                         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
4362
4363                 /* If vf is not alive, set to default value */
4364                 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
4365                         vport->mps = HCLGE_MAC_DEFAULT_FRAME;
4366         }
4367 }
4368
4369 static void hclge_periodic_service_task(struct hclge_dev *hdev)
4370 {
4371         unsigned long delta = round_jiffies_relative(HZ);
4372
4373         if (test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
4374                 return;
4375
4376         /* Always handle the link updating to make sure link state is
4377          * updated when it is triggered by mbx.
4378          */
4379         hclge_update_link_status(hdev);
4380         hclge_sync_mac_table(hdev);
4381         hclge_sync_promisc_mode(hdev);
4382         hclge_sync_fd_table(hdev);
4383
4384         if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
4385                 delta = jiffies - hdev->last_serv_processed;
4386
4387                 if (delta < round_jiffies_relative(HZ)) {
4388                         delta = round_jiffies_relative(HZ) - delta;
4389                         goto out;
4390                 }
4391         }
4392
4393         hdev->serv_processed_cnt++;
4394         hclge_update_vport_alive(hdev);
4395
4396         if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
4397                 hdev->last_serv_processed = jiffies;
4398                 goto out;
4399         }
4400
4401         if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
4402                 hclge_update_stats_for_all(hdev);
4403
4404         hclge_update_port_info(hdev);
4405         hclge_sync_vlan_filter(hdev);
4406
4407         if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
4408                 hclge_rfs_filter_expire(hdev);
4409
4410         hdev->last_serv_processed = jiffies;
4411
4412 out:
4413         hclge_task_schedule(hdev, delta);
4414 }
4415
4416 static void hclge_ptp_service_task(struct hclge_dev *hdev)
4417 {
4418         unsigned long flags;
4419
4420         if (!test_bit(HCLGE_STATE_PTP_EN, &hdev->state) ||
4421             !test_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state) ||
4422             !time_is_before_jiffies(hdev->ptp->tx_start + HZ))
4423                 return;
4424
4425         /* to prevent concurrence with the irq handler */
4426         spin_lock_irqsave(&hdev->ptp->lock, flags);
4427
4428         /* check HCLGE_STATE_PTP_TX_HANDLING here again, since the irq
4429          * handler may handle it just before spin_lock_irqsave().
4430          */
4431         if (test_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state))
4432                 hclge_ptp_clean_tx_hwts(hdev);
4433
4434         spin_unlock_irqrestore(&hdev->ptp->lock, flags);
4435 }
4436
4437 static void hclge_service_task(struct work_struct *work)
4438 {
4439         struct hclge_dev *hdev =
4440                 container_of(work, struct hclge_dev, service_task.work);
4441
4442         hclge_errhand_service_task(hdev);
4443         hclge_reset_service_task(hdev);
4444         hclge_ptp_service_task(hdev);
4445         hclge_mailbox_service_task(hdev);
4446         hclge_periodic_service_task(hdev);
4447
4448         /* Handle error recovery, reset and mbx again in case periodical task
4449          * delays the handling by calling hclge_task_schedule() in
4450          * hclge_periodic_service_task().
4451          */
4452         hclge_errhand_service_task(hdev);
4453         hclge_reset_service_task(hdev);
4454         hclge_mailbox_service_task(hdev);
4455 }
4456
4457 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4458 {
4459         /* VF handle has no client */
4460         if (!handle->client)
4461                 return container_of(handle, struct hclge_vport, nic);
4462         else if (handle->client->type == HNAE3_CLIENT_ROCE)
4463                 return container_of(handle, struct hclge_vport, roce);
4464         else
4465                 return container_of(handle, struct hclge_vport, nic);
4466 }
4467
4468 static void hclge_get_vector_info(struct hclge_dev *hdev, u16 idx,
4469                                   struct hnae3_vector_info *vector_info)
4470 {
4471 #define HCLGE_PF_MAX_VECTOR_NUM_DEV_V2  64
4472
4473         vector_info->vector = pci_irq_vector(hdev->pdev, idx);
4474
4475         /* need an extend offset to config vector >= 64 */
4476         if (idx - 1 < HCLGE_PF_MAX_VECTOR_NUM_DEV_V2)
4477                 vector_info->io_addr = hdev->hw.io_base +
4478                                 HCLGE_VECTOR_REG_BASE +
4479                                 (idx - 1) * HCLGE_VECTOR_REG_OFFSET;
4480         else
4481                 vector_info->io_addr = hdev->hw.io_base +
4482                                 HCLGE_VECTOR_EXT_REG_BASE +
4483                                 (idx - 1) / HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4484                                 HCLGE_VECTOR_REG_OFFSET_H +
4485                                 (idx - 1) % HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4486                                 HCLGE_VECTOR_REG_OFFSET;
4487
4488         hdev->vector_status[idx] = hdev->vport[0].vport_id;
4489         hdev->vector_irq[idx] = vector_info->vector;
4490 }
4491
4492 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4493                             struct hnae3_vector_info *vector_info)
4494 {
4495         struct hclge_vport *vport = hclge_get_vport(handle);
4496         struct hnae3_vector_info *vector = vector_info;
4497         struct hclge_dev *hdev = vport->back;
4498         int alloc = 0;
4499         u16 i = 0;
4500         u16 j;
4501
4502         vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
4503         vector_num = min(hdev->num_msi_left, vector_num);
4504
4505         for (j = 0; j < vector_num; j++) {
4506                 while (++i < hdev->num_nic_msi) {
4507                         if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4508                                 hclge_get_vector_info(hdev, i, vector);
4509                                 vector++;
4510                                 alloc++;
4511
4512                                 break;
4513                         }
4514                 }
4515         }
4516         hdev->num_msi_left -= alloc;
4517         hdev->num_msi_used += alloc;
4518
4519         return alloc;
4520 }
4521
4522 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4523 {
4524         int i;
4525
4526         for (i = 0; i < hdev->num_msi; i++)
4527                 if (vector == hdev->vector_irq[i])
4528                         return i;
4529
4530         return -EINVAL;
4531 }
4532
4533 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4534 {
4535         struct hclge_vport *vport = hclge_get_vport(handle);
4536         struct hclge_dev *hdev = vport->back;
4537         int vector_id;
4538
4539         vector_id = hclge_get_vector_index(hdev, vector);
4540         if (vector_id < 0) {
4541                 dev_err(&hdev->pdev->dev,
4542                         "Get vector index fail. vector = %d\n", vector);
4543                 return vector_id;
4544         }
4545
4546         hclge_free_vector(hdev, vector_id);
4547
4548         return 0;
4549 }
4550
4551 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
4552 {
4553         return HCLGE_RSS_KEY_SIZE;
4554 }
4555
4556 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
4557                                   const u8 hfunc, const u8 *key)
4558 {
4559         struct hclge_rss_config_cmd *req;
4560         unsigned int key_offset = 0;
4561         struct hclge_desc desc;
4562         int key_counts;
4563         int key_size;
4564         int ret;
4565
4566         key_counts = HCLGE_RSS_KEY_SIZE;
4567         req = (struct hclge_rss_config_cmd *)desc.data;
4568
4569         while (key_counts) {
4570                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4571                                            false);
4572
4573                 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4574                 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4575
4576                 key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
4577                 memcpy(req->hash_key,
4578                        key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4579
4580                 key_counts -= key_size;
4581                 key_offset++;
4582                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4583                 if (ret) {
4584                         dev_err(&hdev->pdev->dev,
4585                                 "Configure RSS config fail, status = %d\n",
4586                                 ret);
4587                         return ret;
4588                 }
4589         }
4590         return 0;
4591 }
4592
4593 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u16 *indir)
4594 {
4595         struct hclge_rss_indirection_table_cmd *req;
4596         struct hclge_desc desc;
4597         int rss_cfg_tbl_num;
4598         u8 rss_msb_oft;
4599         u8 rss_msb_val;
4600         int ret;
4601         u16 qid;
4602         int i;
4603         u32 j;
4604
4605         req = (struct hclge_rss_indirection_table_cmd *)desc.data;
4606         rss_cfg_tbl_num = hdev->ae_dev->dev_specs.rss_ind_tbl_size /
4607                           HCLGE_RSS_CFG_TBL_SIZE;
4608
4609         for (i = 0; i < rss_cfg_tbl_num; i++) {
4610                 hclge_cmd_setup_basic_desc
4611                         (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4612
4613                 req->start_table_index =
4614                         cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4615                 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
4616                 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++) {
4617                         qid = indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4618                         req->rss_qid_l[j] = qid & 0xff;
4619                         rss_msb_oft =
4620                                 j * HCLGE_RSS_CFG_TBL_BW_H / BITS_PER_BYTE;
4621                         rss_msb_val = (qid >> HCLGE_RSS_CFG_TBL_BW_L & 0x1) <<
4622                                 (j * HCLGE_RSS_CFG_TBL_BW_H % BITS_PER_BYTE);
4623                         req->rss_qid_h[rss_msb_oft] |= rss_msb_val;
4624                 }
4625                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4626                 if (ret) {
4627                         dev_err(&hdev->pdev->dev,
4628                                 "Configure rss indir table fail,status = %d\n",
4629                                 ret);
4630                         return ret;
4631                 }
4632         }
4633         return 0;
4634 }
4635
4636 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4637                                  u16 *tc_size, u16 *tc_offset)
4638 {
4639         struct hclge_rss_tc_mode_cmd *req;
4640         struct hclge_desc desc;
4641         int ret;
4642         int i;
4643
4644         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
4645         req = (struct hclge_rss_tc_mode_cmd *)desc.data;
4646
4647         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4648                 u16 mode = 0;
4649
4650                 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4651                 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4652                                 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4653                 hnae3_set_bit(mode, HCLGE_RSS_TC_SIZE_MSB_B,
4654                               tc_size[i] >> HCLGE_RSS_TC_SIZE_MSB_OFFSET & 0x1);
4655                 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4656                                 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
4657
4658                 req->rss_tc_mode[i] = cpu_to_le16(mode);
4659         }
4660
4661         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4662         if (ret)
4663                 dev_err(&hdev->pdev->dev,
4664                         "Configure rss tc mode fail, status = %d\n", ret);
4665
4666         return ret;
4667 }
4668
4669 static void hclge_get_rss_type(struct hclge_vport *vport)
4670 {
4671         if (vport->rss_tuple_sets.ipv4_tcp_en ||
4672             vport->rss_tuple_sets.ipv4_udp_en ||
4673             vport->rss_tuple_sets.ipv4_sctp_en ||
4674             vport->rss_tuple_sets.ipv6_tcp_en ||
4675             vport->rss_tuple_sets.ipv6_udp_en ||
4676             vport->rss_tuple_sets.ipv6_sctp_en)
4677                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4678         else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4679                  vport->rss_tuple_sets.ipv6_fragment_en)
4680                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4681         else
4682                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4683 }
4684
4685 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4686 {
4687         struct hclge_rss_input_tuple_cmd *req;
4688         struct hclge_desc desc;
4689         int ret;
4690
4691         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4692
4693         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4694
4695         /* Get the tuple cfg from pf */
4696         req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4697         req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4698         req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4699         req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4700         req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4701         req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4702         req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4703         req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4704         hclge_get_rss_type(&hdev->vport[0]);
4705         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4706         if (ret)
4707                 dev_err(&hdev->pdev->dev,
4708                         "Configure rss input fail, status = %d\n", ret);
4709         return ret;
4710 }
4711
4712 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4713                          u8 *key, u8 *hfunc)
4714 {
4715         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4716         struct hclge_vport *vport = hclge_get_vport(handle);
4717         int i;
4718
4719         /* Get hash algorithm */
4720         if (hfunc) {
4721                 switch (vport->rss_algo) {
4722                 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4723                         *hfunc = ETH_RSS_HASH_TOP;
4724                         break;
4725                 case HCLGE_RSS_HASH_ALGO_SIMPLE:
4726                         *hfunc = ETH_RSS_HASH_XOR;
4727                         break;
4728                 default:
4729                         *hfunc = ETH_RSS_HASH_UNKNOWN;
4730                         break;
4731                 }
4732         }
4733
4734         /* Get the RSS Key required by the user */
4735         if (key)
4736                 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4737
4738         /* Get indirect table */
4739         if (indir)
4740                 for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
4741                         indir[i] =  vport->rss_indirection_tbl[i];
4742
4743         return 0;
4744 }
4745
4746 static int hclge_parse_rss_hfunc(struct hclge_vport *vport, const u8 hfunc,
4747                                  u8 *hash_algo)
4748 {
4749         switch (hfunc) {
4750         case ETH_RSS_HASH_TOP:
4751                 *hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4752                 return 0;
4753         case ETH_RSS_HASH_XOR:
4754                 *hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4755                 return 0;
4756         case ETH_RSS_HASH_NO_CHANGE:
4757                 *hash_algo = vport->rss_algo;
4758                 return 0;
4759         default:
4760                 return -EINVAL;
4761         }
4762 }
4763
4764 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4765                          const  u8 *key, const  u8 hfunc)
4766 {
4767         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4768         struct hclge_vport *vport = hclge_get_vport(handle);
4769         struct hclge_dev *hdev = vport->back;
4770         u8 hash_algo;
4771         int ret, i;
4772
4773         ret = hclge_parse_rss_hfunc(vport, hfunc, &hash_algo);
4774         if (ret) {
4775                 dev_err(&hdev->pdev->dev, "invalid hfunc type %u\n", hfunc);
4776                 return ret;
4777         }
4778
4779         /* Set the RSS Hash Key if specififed by the user */
4780         if (key) {
4781                 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4782                 if (ret)
4783                         return ret;
4784
4785                 /* Update the shadow RSS key with user specified qids */
4786                 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4787         } else {
4788                 ret = hclge_set_rss_algo_key(hdev, hash_algo,
4789                                              vport->rss_hash_key);
4790                 if (ret)
4791                         return ret;
4792         }
4793         vport->rss_algo = hash_algo;
4794
4795         /* Update the shadow RSS table with user specified qids */
4796         for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
4797                 vport->rss_indirection_tbl[i] = indir[i];
4798
4799         /* Update the hardware */
4800         return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4801 }
4802
4803 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4804 {
4805         u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4806
4807         if (nfc->data & RXH_L4_B_2_3)
4808                 hash_sets |= HCLGE_D_PORT_BIT;
4809         else
4810                 hash_sets &= ~HCLGE_D_PORT_BIT;
4811
4812         if (nfc->data & RXH_IP_SRC)
4813                 hash_sets |= HCLGE_S_IP_BIT;
4814         else
4815                 hash_sets &= ~HCLGE_S_IP_BIT;
4816
4817         if (nfc->data & RXH_IP_DST)
4818                 hash_sets |= HCLGE_D_IP_BIT;
4819         else
4820                 hash_sets &= ~HCLGE_D_IP_BIT;
4821
4822         if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4823                 hash_sets |= HCLGE_V_TAG_BIT;
4824
4825         return hash_sets;
4826 }
4827
4828 static int hclge_init_rss_tuple_cmd(struct hclge_vport *vport,
4829                                     struct ethtool_rxnfc *nfc,
4830                                     struct hclge_rss_input_tuple_cmd *req)
4831 {
4832         struct hclge_dev *hdev = vport->back;
4833         u8 tuple_sets;
4834
4835         req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4836         req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4837         req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4838         req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4839         req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4840         req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4841         req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4842         req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4843
4844         tuple_sets = hclge_get_rss_hash_bits(nfc);
4845         switch (nfc->flow_type) {
4846         case TCP_V4_FLOW:
4847                 req->ipv4_tcp_en = tuple_sets;
4848                 break;
4849         case TCP_V6_FLOW:
4850                 req->ipv6_tcp_en = tuple_sets;
4851                 break;
4852         case UDP_V4_FLOW:
4853                 req->ipv4_udp_en = tuple_sets;
4854                 break;
4855         case UDP_V6_FLOW:
4856                 req->ipv6_udp_en = tuple_sets;
4857                 break;
4858         case SCTP_V4_FLOW:
4859                 req->ipv4_sctp_en = tuple_sets;
4860                 break;
4861         case SCTP_V6_FLOW:
4862                 if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
4863                     (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
4864                         return -EINVAL;
4865
4866                 req->ipv6_sctp_en = tuple_sets;
4867                 break;
4868         case IPV4_FLOW:
4869                 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4870                 break;
4871         case IPV6_FLOW:
4872                 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4873                 break;
4874         default:
4875                 return -EINVAL;
4876         }
4877
4878         return 0;
4879 }
4880
4881 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4882                                struct ethtool_rxnfc *nfc)
4883 {
4884         struct hclge_vport *vport = hclge_get_vport(handle);
4885         struct hclge_dev *hdev = vport->back;
4886         struct hclge_rss_input_tuple_cmd *req;
4887         struct hclge_desc desc;
4888         int ret;
4889
4890         if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4891                           RXH_L4_B_0_1 | RXH_L4_B_2_3))
4892                 return -EINVAL;
4893
4894         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4895         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4896
4897         ret = hclge_init_rss_tuple_cmd(vport, nfc, req);
4898         if (ret) {
4899                 dev_err(&hdev->pdev->dev,
4900                         "failed to init rss tuple cmd, ret = %d\n", ret);
4901                 return ret;
4902         }
4903
4904         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4905         if (ret) {
4906                 dev_err(&hdev->pdev->dev,
4907                         "Set rss tuple fail, status = %d\n", ret);
4908                 return ret;
4909         }
4910
4911         vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4912         vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4913         vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4914         vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4915         vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4916         vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4917         vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4918         vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4919         hclge_get_rss_type(vport);
4920         return 0;
4921 }
4922
4923 static int hclge_get_vport_rss_tuple(struct hclge_vport *vport, int flow_type,
4924                                      u8 *tuple_sets)
4925 {
4926         switch (flow_type) {
4927         case TCP_V4_FLOW:
4928                 *tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4929                 break;
4930         case UDP_V4_FLOW:
4931                 *tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4932                 break;
4933         case TCP_V6_FLOW:
4934                 *tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4935                 break;
4936         case UDP_V6_FLOW:
4937                 *tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4938                 break;
4939         case SCTP_V4_FLOW:
4940                 *tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4941                 break;
4942         case SCTP_V6_FLOW:
4943                 *tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4944                 break;
4945         case IPV4_FLOW:
4946         case IPV6_FLOW:
4947                 *tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4948                 break;
4949         default:
4950                 return -EINVAL;
4951         }
4952
4953         return 0;
4954 }
4955
4956 static u64 hclge_convert_rss_tuple(u8 tuple_sets)
4957 {
4958         u64 tuple_data = 0;
4959
4960         if (tuple_sets & HCLGE_D_PORT_BIT)
4961                 tuple_data |= RXH_L4_B_2_3;
4962         if (tuple_sets & HCLGE_S_PORT_BIT)
4963                 tuple_data |= RXH_L4_B_0_1;
4964         if (tuple_sets & HCLGE_D_IP_BIT)
4965                 tuple_data |= RXH_IP_DST;
4966         if (tuple_sets & HCLGE_S_IP_BIT)
4967                 tuple_data |= RXH_IP_SRC;
4968
4969         return tuple_data;
4970 }
4971
4972 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4973                                struct ethtool_rxnfc *nfc)
4974 {
4975         struct hclge_vport *vport = hclge_get_vport(handle);
4976         u8 tuple_sets;
4977         int ret;
4978
4979         nfc->data = 0;
4980
4981         ret = hclge_get_vport_rss_tuple(vport, nfc->flow_type, &tuple_sets);
4982         if (ret || !tuple_sets)
4983                 return ret;
4984
4985         nfc->data = hclge_convert_rss_tuple(tuple_sets);
4986
4987         return 0;
4988 }
4989
4990 static int hclge_get_tc_size(struct hnae3_handle *handle)
4991 {
4992         struct hclge_vport *vport = hclge_get_vport(handle);
4993         struct hclge_dev *hdev = vport->back;
4994
4995         return hdev->pf_rss_size_max;
4996 }
4997
4998 static int hclge_init_rss_tc_mode(struct hclge_dev *hdev)
4999 {
5000         struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
5001         struct hclge_vport *vport = hdev->vport;
5002         u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
5003         u16 tc_valid[HCLGE_MAX_TC_NUM] = {0};
5004         u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
5005         struct hnae3_tc_info *tc_info;
5006         u16 roundup_size;
5007         u16 rss_size;
5008         int i;
5009
5010         tc_info = &vport->nic.kinfo.tc_info;
5011         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
5012                 rss_size = tc_info->tqp_count[i];
5013                 tc_valid[i] = 0;
5014
5015                 if (!(hdev->hw_tc_map & BIT(i)))
5016                         continue;
5017
5018                 /* tc_size set to hardware is the log2 of roundup power of two
5019                  * of rss_size, the acutal queue size is limited by indirection
5020                  * table.
5021                  */
5022                 if (rss_size > ae_dev->dev_specs.rss_ind_tbl_size ||
5023                     rss_size == 0) {
5024                         dev_err(&hdev->pdev->dev,
5025                                 "Configure rss tc size failed, invalid TC_SIZE = %u\n",
5026                                 rss_size);
5027                         return -EINVAL;
5028                 }
5029
5030                 roundup_size = roundup_pow_of_two(rss_size);
5031                 roundup_size = ilog2(roundup_size);
5032
5033                 tc_valid[i] = 1;
5034                 tc_size[i] = roundup_size;
5035                 tc_offset[i] = tc_info->tqp_offset[i];
5036         }
5037
5038         return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
5039 }
5040
5041 int hclge_rss_init_hw(struct hclge_dev *hdev)
5042 {
5043         struct hclge_vport *vport = hdev->vport;
5044         u16 *rss_indir = vport[0].rss_indirection_tbl;
5045         u8 *key = vport[0].rss_hash_key;
5046         u8 hfunc = vport[0].rss_algo;
5047         int ret;
5048
5049         ret = hclge_set_rss_indir_table(hdev, rss_indir);
5050         if (ret)
5051                 return ret;
5052
5053         ret = hclge_set_rss_algo_key(hdev, hfunc, key);
5054         if (ret)
5055                 return ret;
5056
5057         ret = hclge_set_rss_input_tuple(hdev);
5058         if (ret)
5059                 return ret;
5060
5061         return hclge_init_rss_tc_mode(hdev);
5062 }
5063
5064 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
5065 {
5066         struct hclge_vport *vport = &hdev->vport[0];
5067         int i;
5068
5069         for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
5070                 vport->rss_indirection_tbl[i] = i % vport->alloc_rss_size;
5071 }
5072
5073 static int hclge_rss_init_cfg(struct hclge_dev *hdev)
5074 {
5075         u16 rss_ind_tbl_size = hdev->ae_dev->dev_specs.rss_ind_tbl_size;
5076         int rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
5077         struct hclge_vport *vport = &hdev->vport[0];
5078         u16 *rss_ind_tbl;
5079
5080         if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
5081                 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
5082
5083         vport->rss_tuple_sets.ipv4_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5084         vport->rss_tuple_sets.ipv4_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5085         vport->rss_tuple_sets.ipv4_sctp_en = HCLGE_RSS_INPUT_TUPLE_SCTP;
5086         vport->rss_tuple_sets.ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5087         vport->rss_tuple_sets.ipv6_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5088         vport->rss_tuple_sets.ipv6_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5089         vport->rss_tuple_sets.ipv6_sctp_en =
5090                 hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
5091                 HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT :
5092                 HCLGE_RSS_INPUT_TUPLE_SCTP;
5093         vport->rss_tuple_sets.ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5094
5095         vport->rss_algo = rss_algo;
5096
5097         rss_ind_tbl = devm_kcalloc(&hdev->pdev->dev, rss_ind_tbl_size,
5098                                    sizeof(*rss_ind_tbl), GFP_KERNEL);
5099         if (!rss_ind_tbl)
5100                 return -ENOMEM;
5101
5102         vport->rss_indirection_tbl = rss_ind_tbl;
5103         memcpy(vport->rss_hash_key, hclge_hash_key, HCLGE_RSS_KEY_SIZE);
5104
5105         hclge_rss_indir_init_cfg(hdev);
5106
5107         return 0;
5108 }
5109
5110 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
5111                                 int vector_id, bool en,
5112                                 struct hnae3_ring_chain_node *ring_chain)
5113 {
5114         struct hclge_dev *hdev = vport->back;
5115         struct hnae3_ring_chain_node *node;
5116         struct hclge_desc desc;
5117         struct hclge_ctrl_vector_chain_cmd *req =
5118                 (struct hclge_ctrl_vector_chain_cmd *)desc.data;
5119         enum hclge_cmd_status status;
5120         enum hclge_opcode_type op;
5121         u16 tqp_type_and_id;
5122         int i;
5123
5124         op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
5125         hclge_cmd_setup_basic_desc(&desc, op, false);
5126         req->int_vector_id_l = hnae3_get_field(vector_id,
5127                                                HCLGE_VECTOR_ID_L_M,
5128                                                HCLGE_VECTOR_ID_L_S);
5129         req->int_vector_id_h = hnae3_get_field(vector_id,
5130                                                HCLGE_VECTOR_ID_H_M,
5131                                                HCLGE_VECTOR_ID_H_S);
5132
5133         i = 0;
5134         for (node = ring_chain; node; node = node->next) {
5135                 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
5136                 hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
5137                                 HCLGE_INT_TYPE_S,
5138                                 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
5139                 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
5140                                 HCLGE_TQP_ID_S, node->tqp_index);
5141                 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
5142                                 HCLGE_INT_GL_IDX_S,
5143                                 hnae3_get_field(node->int_gl_idx,
5144                                                 HNAE3_RING_GL_IDX_M,
5145                                                 HNAE3_RING_GL_IDX_S));
5146                 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
5147                 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
5148                         req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
5149                         req->vfid = vport->vport_id;
5150
5151                         status = hclge_cmd_send(&hdev->hw, &desc, 1);
5152                         if (status) {
5153                                 dev_err(&hdev->pdev->dev,
5154                                         "Map TQP fail, status is %d.\n",
5155                                         status);
5156                                 return -EIO;
5157                         }
5158                         i = 0;
5159
5160                         hclge_cmd_setup_basic_desc(&desc,
5161                                                    op,
5162                                                    false);
5163                         req->int_vector_id_l =
5164                                 hnae3_get_field(vector_id,
5165                                                 HCLGE_VECTOR_ID_L_M,
5166                                                 HCLGE_VECTOR_ID_L_S);
5167                         req->int_vector_id_h =
5168                                 hnae3_get_field(vector_id,
5169                                                 HCLGE_VECTOR_ID_H_M,
5170                                                 HCLGE_VECTOR_ID_H_S);
5171                 }
5172         }
5173
5174         if (i > 0) {
5175                 req->int_cause_num = i;
5176                 req->vfid = vport->vport_id;
5177                 status = hclge_cmd_send(&hdev->hw, &desc, 1);
5178                 if (status) {
5179                         dev_err(&hdev->pdev->dev,
5180                                 "Map TQP fail, status is %d.\n", status);
5181                         return -EIO;
5182                 }
5183         }
5184
5185         return 0;
5186 }
5187
5188 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
5189                                     struct hnae3_ring_chain_node *ring_chain)
5190 {
5191         struct hclge_vport *vport = hclge_get_vport(handle);
5192         struct hclge_dev *hdev = vport->back;
5193         int vector_id;
5194
5195         vector_id = hclge_get_vector_index(hdev, vector);
5196         if (vector_id < 0) {
5197                 dev_err(&hdev->pdev->dev,
5198                         "failed to get vector index. vector=%d\n", vector);
5199                 return vector_id;
5200         }
5201
5202         return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
5203 }
5204
5205 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
5206                                        struct hnae3_ring_chain_node *ring_chain)
5207 {
5208         struct hclge_vport *vport = hclge_get_vport(handle);
5209         struct hclge_dev *hdev = vport->back;
5210         int vector_id, ret;
5211
5212         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
5213                 return 0;
5214
5215         vector_id = hclge_get_vector_index(hdev, vector);
5216         if (vector_id < 0) {
5217                 dev_err(&handle->pdev->dev,
5218                         "Get vector index fail. ret =%d\n", vector_id);
5219                 return vector_id;
5220         }
5221
5222         ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
5223         if (ret)
5224                 dev_err(&handle->pdev->dev,
5225                         "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
5226                         vector_id, ret);
5227
5228         return ret;
5229 }
5230
5231 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, u8 vf_id,
5232                                       bool en_uc, bool en_mc, bool en_bc)
5233 {
5234         struct hclge_vport *vport = &hdev->vport[vf_id];
5235         struct hnae3_handle *handle = &vport->nic;
5236         struct hclge_promisc_cfg_cmd *req;
5237         struct hclge_desc desc;
5238         bool uc_tx_en = en_uc;
5239         u8 promisc_cfg = 0;
5240         int ret;
5241
5242         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
5243
5244         req = (struct hclge_promisc_cfg_cmd *)desc.data;
5245         req->vf_id = vf_id;
5246
5247         if (test_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->priv_flags))
5248                 uc_tx_en = false;
5249
5250         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_RX_EN, en_uc ? 1 : 0);
5251         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_RX_EN, en_mc ? 1 : 0);
5252         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_RX_EN, en_bc ? 1 : 0);
5253         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_TX_EN, uc_tx_en ? 1 : 0);
5254         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_TX_EN, en_mc ? 1 : 0);
5255         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_TX_EN, en_bc ? 1 : 0);
5256         req->extend_promisc = promisc_cfg;
5257
5258         /* to be compatible with DEVICE_VERSION_V1/2 */
5259         promisc_cfg = 0;
5260         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_UC, en_uc ? 1 : 0);
5261         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_MC, en_mc ? 1 : 0);
5262         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_BC, en_bc ? 1 : 0);
5263         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_TX_EN, 1);
5264         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_RX_EN, 1);
5265         req->promisc = promisc_cfg;
5266
5267         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5268         if (ret)
5269                 dev_err(&hdev->pdev->dev,
5270                         "failed to set vport %u promisc mode, ret = %d.\n",
5271                         vf_id, ret);
5272
5273         return ret;
5274 }
5275
5276 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
5277                                  bool en_mc_pmc, bool en_bc_pmc)
5278 {
5279         return hclge_cmd_set_promisc_mode(vport->back, vport->vport_id,
5280                                           en_uc_pmc, en_mc_pmc, en_bc_pmc);
5281 }
5282
5283 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
5284                                   bool en_mc_pmc)
5285 {
5286         struct hclge_vport *vport = hclge_get_vport(handle);
5287         struct hclge_dev *hdev = vport->back;
5288         bool en_bc_pmc = true;
5289
5290         /* For device whose version below V2, if broadcast promisc enabled,
5291          * vlan filter is always bypassed. So broadcast promisc should be
5292          * disabled until user enable promisc mode
5293          */
5294         if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
5295                 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
5296
5297         return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
5298                                             en_bc_pmc);
5299 }
5300
5301 static void hclge_request_update_promisc_mode(struct hnae3_handle *handle)
5302 {
5303         struct hclge_vport *vport = hclge_get_vport(handle);
5304
5305         set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
5306 }
5307
5308 static void hclge_sync_fd_state(struct hclge_dev *hdev)
5309 {
5310         if (hlist_empty(&hdev->fd_rule_list))
5311                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5312 }
5313
5314 static void hclge_fd_inc_rule_cnt(struct hclge_dev *hdev, u16 location)
5315 {
5316         if (!test_bit(location, hdev->fd_bmap)) {
5317                 set_bit(location, hdev->fd_bmap);
5318                 hdev->hclge_fd_rule_num++;
5319         }
5320 }
5321
5322 static void hclge_fd_dec_rule_cnt(struct hclge_dev *hdev, u16 location)
5323 {
5324         if (test_bit(location, hdev->fd_bmap)) {
5325                 clear_bit(location, hdev->fd_bmap);
5326                 hdev->hclge_fd_rule_num--;
5327         }
5328 }
5329
5330 static void hclge_fd_free_node(struct hclge_dev *hdev,
5331                                struct hclge_fd_rule *rule)
5332 {
5333         hlist_del(&rule->rule_node);
5334         kfree(rule);
5335         hclge_sync_fd_state(hdev);
5336 }
5337
5338 static void hclge_update_fd_rule_node(struct hclge_dev *hdev,
5339                                       struct hclge_fd_rule *old_rule,
5340                                       struct hclge_fd_rule *new_rule,
5341                                       enum HCLGE_FD_NODE_STATE state)
5342 {
5343         switch (state) {
5344         case HCLGE_FD_TO_ADD:
5345         case HCLGE_FD_ACTIVE:
5346                 /* 1) if the new state is TO_ADD, just replace the old rule
5347                  * with the same location, no matter its state, because the
5348                  * new rule will be configured to the hardware.
5349                  * 2) if the new state is ACTIVE, it means the new rule
5350                  * has been configured to the hardware, so just replace
5351                  * the old rule node with the same location.
5352                  * 3) for it doesn't add a new node to the list, so it's
5353                  * unnecessary to update the rule number and fd_bmap.
5354                  */
5355                 new_rule->rule_node.next = old_rule->rule_node.next;
5356                 new_rule->rule_node.pprev = old_rule->rule_node.pprev;
5357                 memcpy(old_rule, new_rule, sizeof(*old_rule));
5358                 kfree(new_rule);
5359                 break;
5360         case HCLGE_FD_DELETED:
5361                 hclge_fd_dec_rule_cnt(hdev, old_rule->location);
5362                 hclge_fd_free_node(hdev, old_rule);
5363                 break;
5364         case HCLGE_FD_TO_DEL:
5365                 /* if new request is TO_DEL, and old rule is existent
5366                  * 1) the state of old rule is TO_DEL, we need do nothing,
5367                  * because we delete rule by location, other rule content
5368                  * is unncessary.
5369                  * 2) the state of old rule is ACTIVE, we need to change its
5370                  * state to TO_DEL, so the rule will be deleted when periodic
5371                  * task being scheduled.
5372                  * 3) the state of old rule is TO_ADD, it means the rule hasn't
5373                  * been added to hardware, so we just delete the rule node from
5374                  * fd_rule_list directly.
5375                  */
5376                 if (old_rule->state == HCLGE_FD_TO_ADD) {
5377                         hclge_fd_dec_rule_cnt(hdev, old_rule->location);
5378                         hclge_fd_free_node(hdev, old_rule);
5379                         return;
5380                 }
5381                 old_rule->state = HCLGE_FD_TO_DEL;
5382                 break;
5383         }
5384 }
5385
5386 static struct hclge_fd_rule *hclge_find_fd_rule(struct hlist_head *hlist,
5387                                                 u16 location,
5388                                                 struct hclge_fd_rule **parent)
5389 {
5390         struct hclge_fd_rule *rule;
5391         struct hlist_node *node;
5392
5393         hlist_for_each_entry_safe(rule, node, hlist, rule_node) {
5394                 if (rule->location == location)
5395                         return rule;
5396                 else if (rule->location > location)
5397                         return NULL;
5398                 /* record the parent node, use to keep the nodes in fd_rule_list
5399                  * in ascend order.
5400                  */
5401                 *parent = rule;
5402         }
5403
5404         return NULL;
5405 }
5406
5407 /* insert fd rule node in ascend order according to rule->location */
5408 static void hclge_fd_insert_rule_node(struct hlist_head *hlist,
5409                                       struct hclge_fd_rule *rule,
5410                                       struct hclge_fd_rule *parent)
5411 {
5412         INIT_HLIST_NODE(&rule->rule_node);
5413
5414         if (parent)
5415                 hlist_add_behind(&rule->rule_node, &parent->rule_node);
5416         else
5417                 hlist_add_head(&rule->rule_node, hlist);
5418 }
5419
5420 static int hclge_fd_set_user_def_cmd(struct hclge_dev *hdev,
5421                                      struct hclge_fd_user_def_cfg *cfg)
5422 {
5423         struct hclge_fd_user_def_cfg_cmd *req;
5424         struct hclge_desc desc;
5425         u16 data = 0;
5426         int ret;
5427
5428         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_USER_DEF_OP, false);
5429
5430         req = (struct hclge_fd_user_def_cfg_cmd *)desc.data;
5431
5432         hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[0].ref_cnt > 0);
5433         hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5434                         HCLGE_FD_USER_DEF_OFT_S, cfg[0].offset);
5435         req->ol2_cfg = cpu_to_le16(data);
5436
5437         data = 0;
5438         hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[1].ref_cnt > 0);
5439         hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5440                         HCLGE_FD_USER_DEF_OFT_S, cfg[1].offset);
5441         req->ol3_cfg = cpu_to_le16(data);
5442
5443         data = 0;
5444         hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[2].ref_cnt > 0);
5445         hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5446                         HCLGE_FD_USER_DEF_OFT_S, cfg[2].offset);
5447         req->ol4_cfg = cpu_to_le16(data);
5448
5449         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5450         if (ret)
5451                 dev_err(&hdev->pdev->dev,
5452                         "failed to set fd user def data, ret= %d\n", ret);
5453         return ret;
5454 }
5455
5456 static void hclge_sync_fd_user_def_cfg(struct hclge_dev *hdev, bool locked)
5457 {
5458         int ret;
5459
5460         if (!test_and_clear_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state))
5461                 return;
5462
5463         if (!locked)
5464                 spin_lock_bh(&hdev->fd_rule_lock);
5465
5466         ret = hclge_fd_set_user_def_cmd(hdev, hdev->fd_cfg.user_def_cfg);
5467         if (ret)
5468                 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5469
5470         if (!locked)
5471                 spin_unlock_bh(&hdev->fd_rule_lock);
5472 }
5473
5474 static int hclge_fd_check_user_def_refcnt(struct hclge_dev *hdev,
5475                                           struct hclge_fd_rule *rule)
5476 {
5477         struct hlist_head *hlist = &hdev->fd_rule_list;
5478         struct hclge_fd_rule *fd_rule, *parent = NULL;
5479         struct hclge_fd_user_def_info *info, *old_info;
5480         struct hclge_fd_user_def_cfg *cfg;
5481
5482         if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5483             rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5484                 return 0;
5485
5486         /* for valid layer is start from 1, so need minus 1 to get the cfg */
5487         cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5488         info = &rule->ep.user_def;
5489
5490         if (!cfg->ref_cnt || cfg->offset == info->offset)
5491                 return 0;
5492
5493         if (cfg->ref_cnt > 1)
5494                 goto error;
5495
5496         fd_rule = hclge_find_fd_rule(hlist, rule->location, &parent);
5497         if (fd_rule) {
5498                 old_info = &fd_rule->ep.user_def;
5499                 if (info->layer == old_info->layer)
5500                         return 0;
5501         }
5502
5503 error:
5504         dev_err(&hdev->pdev->dev,
5505                 "No available offset for layer%d fd rule, each layer only support one user def offset.\n",
5506                 info->layer + 1);
5507         return -ENOSPC;
5508 }
5509
5510 static void hclge_fd_inc_user_def_refcnt(struct hclge_dev *hdev,
5511                                          struct hclge_fd_rule *rule)
5512 {
5513         struct hclge_fd_user_def_cfg *cfg;
5514
5515         if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5516             rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5517                 return;
5518
5519         cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5520         if (!cfg->ref_cnt) {
5521                 cfg->offset = rule->ep.user_def.offset;
5522                 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5523         }
5524         cfg->ref_cnt++;
5525 }
5526
5527 static void hclge_fd_dec_user_def_refcnt(struct hclge_dev *hdev,
5528                                          struct hclge_fd_rule *rule)
5529 {
5530         struct hclge_fd_user_def_cfg *cfg;
5531
5532         if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5533             rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5534                 return;
5535
5536         cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5537         if (!cfg->ref_cnt)
5538                 return;
5539
5540         cfg->ref_cnt--;
5541         if (!cfg->ref_cnt) {
5542                 cfg->offset = 0;
5543                 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5544         }
5545 }
5546
5547 static void hclge_update_fd_list(struct hclge_dev *hdev,
5548                                  enum HCLGE_FD_NODE_STATE state, u16 location,
5549                                  struct hclge_fd_rule *new_rule)
5550 {
5551         struct hlist_head *hlist = &hdev->fd_rule_list;
5552         struct hclge_fd_rule *fd_rule, *parent = NULL;
5553
5554         fd_rule = hclge_find_fd_rule(hlist, location, &parent);
5555         if (fd_rule) {
5556                 hclge_fd_dec_user_def_refcnt(hdev, fd_rule);
5557                 if (state == HCLGE_FD_ACTIVE)
5558                         hclge_fd_inc_user_def_refcnt(hdev, new_rule);
5559                 hclge_sync_fd_user_def_cfg(hdev, true);
5560
5561                 hclge_update_fd_rule_node(hdev, fd_rule, new_rule, state);
5562                 return;
5563         }
5564
5565         /* it's unlikely to fail here, because we have checked the rule
5566          * exist before.
5567          */
5568         if (unlikely(state == HCLGE_FD_TO_DEL || state == HCLGE_FD_DELETED)) {
5569                 dev_warn(&hdev->pdev->dev,
5570                          "failed to delete fd rule %u, it's inexistent\n",
5571                          location);
5572                 return;
5573         }
5574
5575         hclge_fd_inc_user_def_refcnt(hdev, new_rule);
5576         hclge_sync_fd_user_def_cfg(hdev, true);
5577
5578         hclge_fd_insert_rule_node(hlist, new_rule, parent);
5579         hclge_fd_inc_rule_cnt(hdev, new_rule->location);
5580
5581         if (state == HCLGE_FD_TO_ADD) {
5582                 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
5583                 hclge_task_schedule(hdev, 0);
5584         }
5585 }
5586
5587 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
5588 {
5589         struct hclge_get_fd_mode_cmd *req;
5590         struct hclge_desc desc;
5591         int ret;
5592
5593         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
5594
5595         req = (struct hclge_get_fd_mode_cmd *)desc.data;
5596
5597         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5598         if (ret) {
5599                 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
5600                 return ret;
5601         }
5602
5603         *fd_mode = req->mode;
5604
5605         return ret;
5606 }
5607
5608 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
5609                                    u32 *stage1_entry_num,
5610                                    u32 *stage2_entry_num,
5611                                    u16 *stage1_counter_num,
5612                                    u16 *stage2_counter_num)
5613 {
5614         struct hclge_get_fd_allocation_cmd *req;
5615         struct hclge_desc desc;
5616         int ret;
5617
5618         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
5619
5620         req = (struct hclge_get_fd_allocation_cmd *)desc.data;
5621
5622         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5623         if (ret) {
5624                 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
5625                         ret);
5626                 return ret;
5627         }
5628
5629         *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
5630         *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
5631         *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
5632         *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
5633
5634         return ret;
5635 }
5636
5637 static int hclge_set_fd_key_config(struct hclge_dev *hdev,
5638                                    enum HCLGE_FD_STAGE stage_num)
5639 {
5640         struct hclge_set_fd_key_config_cmd *req;
5641         struct hclge_fd_key_cfg *stage;
5642         struct hclge_desc desc;
5643         int ret;
5644
5645         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
5646
5647         req = (struct hclge_set_fd_key_config_cmd *)desc.data;
5648         stage = &hdev->fd_cfg.key_cfg[stage_num];
5649         req->stage = stage_num;
5650         req->key_select = stage->key_sel;
5651         req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
5652         req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
5653         req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
5654         req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
5655         req->tuple_mask = cpu_to_le32(~stage->tuple_active);
5656         req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
5657
5658         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5659         if (ret)
5660                 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
5661
5662         return ret;
5663 }
5664
5665 static void hclge_fd_disable_user_def(struct hclge_dev *hdev)
5666 {
5667         struct hclge_fd_user_def_cfg *cfg = hdev->fd_cfg.user_def_cfg;
5668
5669         spin_lock_bh(&hdev->fd_rule_lock);
5670         memset(cfg, 0, sizeof(hdev->fd_cfg.user_def_cfg));
5671         spin_unlock_bh(&hdev->fd_rule_lock);
5672
5673         hclge_fd_set_user_def_cmd(hdev, cfg);
5674 }
5675
5676 static int hclge_init_fd_config(struct hclge_dev *hdev)
5677 {
5678 #define LOW_2_WORDS             0x03
5679         struct hclge_fd_key_cfg *key_cfg;
5680         int ret;
5681
5682         if (!hnae3_dev_fd_supported(hdev))
5683                 return 0;
5684
5685         ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
5686         if (ret)
5687                 return ret;
5688
5689         switch (hdev->fd_cfg.fd_mode) {
5690         case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
5691                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
5692                 break;
5693         case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
5694                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
5695                 break;
5696         default:
5697                 dev_err(&hdev->pdev->dev,
5698                         "Unsupported flow director mode %u\n",
5699                         hdev->fd_cfg.fd_mode);
5700                 return -EOPNOTSUPP;
5701         }
5702
5703         key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
5704         key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE;
5705         key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
5706         key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
5707         key_cfg->outer_sipv6_word_en = 0;
5708         key_cfg->outer_dipv6_word_en = 0;
5709
5710         key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
5711                                 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
5712                                 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5713                                 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5714
5715         /* If use max 400bit key, we can support tuples for ether type */
5716         if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5717                 key_cfg->tuple_active |=
5718                                 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
5719                 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
5720                         key_cfg->tuple_active |= HCLGE_FD_TUPLE_USER_DEF_TUPLES;
5721         }
5722
5723         /* roce_type is used to filter roce frames
5724          * dst_vport is used to specify the rule
5725          */
5726         key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
5727
5728         ret = hclge_get_fd_allocation(hdev,
5729                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
5730                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
5731                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
5732                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
5733         if (ret)
5734                 return ret;
5735
5736         return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
5737 }
5738
5739 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
5740                                 int loc, u8 *key, bool is_add)
5741 {
5742         struct hclge_fd_tcam_config_1_cmd *req1;
5743         struct hclge_fd_tcam_config_2_cmd *req2;
5744         struct hclge_fd_tcam_config_3_cmd *req3;
5745         struct hclge_desc desc[3];
5746         int ret;
5747
5748         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
5749         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5750         hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
5751         desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5752         hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
5753
5754         req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
5755         req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
5756         req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
5757
5758         req1->stage = stage;
5759         req1->xy_sel = sel_x ? 1 : 0;
5760         hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
5761         req1->index = cpu_to_le32(loc);
5762         req1->entry_vld = sel_x ? is_add : 0;
5763
5764         if (key) {
5765                 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
5766                 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
5767                        sizeof(req2->tcam_data));
5768                 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
5769                        sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
5770         }
5771
5772         ret = hclge_cmd_send(&hdev->hw, desc, 3);
5773         if (ret)
5774                 dev_err(&hdev->pdev->dev,
5775                         "config tcam key fail, ret=%d\n",
5776                         ret);
5777
5778         return ret;
5779 }
5780
5781 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
5782                               struct hclge_fd_ad_data *action)
5783 {
5784         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
5785         struct hclge_fd_ad_config_cmd *req;
5786         struct hclge_desc desc;
5787         u64 ad_data = 0;
5788         int ret;
5789
5790         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
5791
5792         req = (struct hclge_fd_ad_config_cmd *)desc.data;
5793         req->index = cpu_to_le32(loc);
5794         req->stage = stage;
5795
5796         hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
5797                       action->write_rule_id_to_bd);
5798         hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
5799                         action->rule_id);
5800         if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps)) {
5801                 hnae3_set_bit(ad_data, HCLGE_FD_AD_TC_OVRD_B,
5802                               action->override_tc);
5803                 hnae3_set_field(ad_data, HCLGE_FD_AD_TC_SIZE_M,
5804                                 HCLGE_FD_AD_TC_SIZE_S, (u32)action->tc_size);
5805         }
5806         ad_data <<= 32;
5807         hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
5808         hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
5809                       action->forward_to_direct_queue);
5810         hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
5811                         action->queue_id);
5812         hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
5813         hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
5814                         HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
5815         hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
5816         hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
5817                         action->counter_id);
5818
5819         req->ad_data = cpu_to_le64(ad_data);
5820         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5821         if (ret)
5822                 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
5823
5824         return ret;
5825 }
5826
5827 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
5828                                    struct hclge_fd_rule *rule)
5829 {
5830         int offset, moffset, ip_offset;
5831         enum HCLGE_FD_KEY_OPT key_opt;
5832         u16 tmp_x_s, tmp_y_s;
5833         u32 tmp_x_l, tmp_y_l;
5834         u8 *p = (u8 *)rule;
5835         int i;
5836
5837         if (rule->unused_tuple & BIT(tuple_bit))
5838                 return true;
5839
5840         key_opt = tuple_key_info[tuple_bit].key_opt;
5841         offset = tuple_key_info[tuple_bit].offset;
5842         moffset = tuple_key_info[tuple_bit].moffset;
5843
5844         switch (key_opt) {
5845         case KEY_OPT_U8:
5846                 calc_x(*key_x, p[offset], p[moffset]);
5847                 calc_y(*key_y, p[offset], p[moffset]);
5848
5849                 return true;
5850         case KEY_OPT_LE16:
5851                 calc_x(tmp_x_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset]));
5852                 calc_y(tmp_y_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset]));
5853                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5854                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5855
5856                 return true;
5857         case KEY_OPT_LE32:
5858                 calc_x(tmp_x_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset]));
5859                 calc_y(tmp_y_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset]));
5860                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5861                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5862
5863                 return true;
5864         case KEY_OPT_MAC:
5865                 for (i = 0; i < ETH_ALEN; i++) {
5866                         calc_x(key_x[ETH_ALEN - 1 - i], p[offset + i],
5867                                p[moffset + i]);
5868                         calc_y(key_y[ETH_ALEN - 1 - i], p[offset + i],
5869                                p[moffset + i]);
5870                 }
5871
5872                 return true;
5873         case KEY_OPT_IP:
5874                 ip_offset = IPV4_INDEX * sizeof(u32);
5875                 calc_x(tmp_x_l, *(u32 *)(&p[offset + ip_offset]),
5876                        *(u32 *)(&p[moffset + ip_offset]));
5877                 calc_y(tmp_y_l, *(u32 *)(&p[offset + ip_offset]),
5878                        *(u32 *)(&p[moffset + ip_offset]));
5879                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5880                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5881
5882                 return true;
5883         default:
5884                 return false;
5885         }
5886 }
5887
5888 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5889                                  u8 vf_id, u8 network_port_id)
5890 {
5891         u32 port_number = 0;
5892
5893         if (port_type == HOST_PORT) {
5894                 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5895                                 pf_id);
5896                 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5897                                 vf_id);
5898                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5899         } else {
5900                 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5901                                 HCLGE_NETWORK_PORT_ID_S, network_port_id);
5902                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5903         }
5904
5905         return port_number;
5906 }
5907
5908 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5909                                        __le32 *key_x, __le32 *key_y,
5910                                        struct hclge_fd_rule *rule)
5911 {
5912         u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5913         u8 cur_pos = 0, tuple_size, shift_bits;
5914         unsigned int i;
5915
5916         for (i = 0; i < MAX_META_DATA; i++) {
5917                 tuple_size = meta_data_key_info[i].key_length;
5918                 tuple_bit = key_cfg->meta_data_active & BIT(i);
5919
5920                 switch (tuple_bit) {
5921                 case BIT(ROCE_TYPE):
5922                         hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5923                         cur_pos += tuple_size;
5924                         break;
5925                 case BIT(DST_VPORT):
5926                         port_number = hclge_get_port_number(HOST_PORT, 0,
5927                                                             rule->vf_id, 0);
5928                         hnae3_set_field(meta_data,
5929                                         GENMASK(cur_pos + tuple_size, cur_pos),
5930                                         cur_pos, port_number);
5931                         cur_pos += tuple_size;
5932                         break;
5933                 default:
5934                         break;
5935                 }
5936         }
5937
5938         calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5939         calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5940         shift_bits = sizeof(meta_data) * 8 - cur_pos;
5941
5942         *key_x = cpu_to_le32(tmp_x << shift_bits);
5943         *key_y = cpu_to_le32(tmp_y << shift_bits);
5944 }
5945
5946 /* A complete key is combined with meta data key and tuple key.
5947  * Meta data key is stored at the MSB region, and tuple key is stored at
5948  * the LSB region, unused bits will be filled 0.
5949  */
5950 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5951                             struct hclge_fd_rule *rule)
5952 {
5953         struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5954         u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5955         u8 *cur_key_x, *cur_key_y;
5956         u8 meta_data_region;
5957         u8 tuple_size;
5958         int ret;
5959         u32 i;
5960
5961         memset(key_x, 0, sizeof(key_x));
5962         memset(key_y, 0, sizeof(key_y));
5963         cur_key_x = key_x;
5964         cur_key_y = key_y;
5965
5966         for (i = 0; i < MAX_TUPLE; i++) {
5967                 bool tuple_valid;
5968
5969                 tuple_size = tuple_key_info[i].key_length / 8;
5970                 if (!(key_cfg->tuple_active & BIT(i)))
5971                         continue;
5972
5973                 tuple_valid = hclge_fd_convert_tuple(i, cur_key_x,
5974                                                      cur_key_y, rule);
5975                 if (tuple_valid) {
5976                         cur_key_x += tuple_size;
5977                         cur_key_y += tuple_size;
5978                 }
5979         }
5980
5981         meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5982                         MAX_META_DATA_LENGTH / 8;
5983
5984         hclge_fd_convert_meta_data(key_cfg,
5985                                    (__le32 *)(key_x + meta_data_region),
5986                                    (__le32 *)(key_y + meta_data_region),
5987                                    rule);
5988
5989         ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5990                                    true);
5991         if (ret) {
5992                 dev_err(&hdev->pdev->dev,
5993                         "fd key_y config fail, loc=%u, ret=%d\n",
5994                         rule->queue_id, ret);
5995                 return ret;
5996         }
5997
5998         ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5999                                    true);
6000         if (ret)
6001                 dev_err(&hdev->pdev->dev,
6002                         "fd key_x config fail, loc=%u, ret=%d\n",
6003                         rule->queue_id, ret);
6004         return ret;
6005 }
6006
6007 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
6008                                struct hclge_fd_rule *rule)
6009 {
6010         struct hclge_vport *vport = hdev->vport;
6011         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
6012         struct hclge_fd_ad_data ad_data;
6013
6014         memset(&ad_data, 0, sizeof(struct hclge_fd_ad_data));
6015         ad_data.ad_id = rule->location;
6016
6017         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
6018                 ad_data.drop_packet = true;
6019         } else if (rule->action == HCLGE_FD_ACTION_SELECT_TC) {
6020                 ad_data.override_tc = true;
6021                 ad_data.queue_id =
6022                         kinfo->tc_info.tqp_offset[rule->cls_flower.tc];
6023                 ad_data.tc_size =
6024                         ilog2(kinfo->tc_info.tqp_count[rule->cls_flower.tc]);
6025         } else {
6026                 ad_data.forward_to_direct_queue = true;
6027                 ad_data.queue_id = rule->queue_id;
6028         }
6029
6030         if (hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1]) {
6031                 ad_data.use_counter = true;
6032                 ad_data.counter_id = rule->vf_id %
6033                                      hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1];
6034         } else {
6035                 ad_data.use_counter = false;
6036                 ad_data.counter_id = 0;
6037         }
6038
6039         ad_data.use_next_stage = false;
6040         ad_data.next_input_key = 0;
6041
6042         ad_data.write_rule_id_to_bd = true;
6043         ad_data.rule_id = rule->location;
6044
6045         return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
6046 }
6047
6048 static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec *spec,
6049                                        u32 *unused_tuple)
6050 {
6051         if (!spec || !unused_tuple)
6052                 return -EINVAL;
6053
6054         *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
6055
6056         if (!spec->ip4src)
6057                 *unused_tuple |= BIT(INNER_SRC_IP);
6058
6059         if (!spec->ip4dst)
6060                 *unused_tuple |= BIT(INNER_DST_IP);
6061
6062         if (!spec->psrc)
6063                 *unused_tuple |= BIT(INNER_SRC_PORT);
6064
6065         if (!spec->pdst)
6066                 *unused_tuple |= BIT(INNER_DST_PORT);
6067
6068         if (!spec->tos)
6069                 *unused_tuple |= BIT(INNER_IP_TOS);
6070
6071         return 0;
6072 }
6073
6074 static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec *spec,
6075                                     u32 *unused_tuple)
6076 {
6077         if (!spec || !unused_tuple)
6078                 return -EINVAL;
6079
6080         *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6081                 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
6082
6083         if (!spec->ip4src)
6084                 *unused_tuple |= BIT(INNER_SRC_IP);
6085
6086         if (!spec->ip4dst)
6087                 *unused_tuple |= BIT(INNER_DST_IP);
6088
6089         if (!spec->tos)
6090                 *unused_tuple |= BIT(INNER_IP_TOS);
6091
6092         if (!spec->proto)
6093                 *unused_tuple |= BIT(INNER_IP_PROTO);
6094
6095         if (spec->l4_4_bytes)
6096                 return -EOPNOTSUPP;
6097
6098         if (spec->ip_ver != ETH_RX_NFC_IP4)
6099                 return -EOPNOTSUPP;
6100
6101         return 0;
6102 }
6103
6104 static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
6105                                        u32 *unused_tuple)
6106 {
6107         if (!spec || !unused_tuple)
6108                 return -EINVAL;
6109
6110         *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
6111
6112         /* check whether src/dst ip address used */
6113         if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
6114                 *unused_tuple |= BIT(INNER_SRC_IP);
6115
6116         if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
6117                 *unused_tuple |= BIT(INNER_DST_IP);
6118
6119         if (!spec->psrc)
6120                 *unused_tuple |= BIT(INNER_SRC_PORT);
6121
6122         if (!spec->pdst)
6123                 *unused_tuple |= BIT(INNER_DST_PORT);
6124
6125         if (!spec->tclass)
6126                 *unused_tuple |= BIT(INNER_IP_TOS);
6127
6128         return 0;
6129 }
6130
6131 static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
6132                                     u32 *unused_tuple)
6133 {
6134         if (!spec || !unused_tuple)
6135                 return -EINVAL;
6136
6137         *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6138                         BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
6139
6140         /* check whether src/dst ip address used */
6141         if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
6142                 *unused_tuple |= BIT(INNER_SRC_IP);
6143
6144         if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
6145                 *unused_tuple |= BIT(INNER_DST_IP);
6146
6147         if (!spec->l4_proto)
6148                 *unused_tuple |= BIT(INNER_IP_PROTO);
6149
6150         if (!spec->tclass)
6151                 *unused_tuple |= BIT(INNER_IP_TOS);
6152
6153         if (spec->l4_4_bytes)
6154                 return -EOPNOTSUPP;
6155
6156         return 0;
6157 }
6158
6159 static int hclge_fd_check_ether_tuple(struct ethhdr *spec, u32 *unused_tuple)
6160 {
6161         if (!spec || !unused_tuple)
6162                 return -EINVAL;
6163
6164         *unused_tuple |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
6165                 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
6166                 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
6167
6168         if (is_zero_ether_addr(spec->h_source))
6169                 *unused_tuple |= BIT(INNER_SRC_MAC);
6170
6171         if (is_zero_ether_addr(spec->h_dest))
6172                 *unused_tuple |= BIT(INNER_DST_MAC);
6173
6174         if (!spec->h_proto)
6175                 *unused_tuple |= BIT(INNER_ETH_TYPE);
6176
6177         return 0;
6178 }
6179
6180 static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev,
6181                                     struct ethtool_rx_flow_spec *fs,
6182                                     u32 *unused_tuple)
6183 {
6184         if (fs->flow_type & FLOW_EXT) {
6185                 if (fs->h_ext.vlan_etype) {
6186                         dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n");
6187                         return -EOPNOTSUPP;
6188                 }
6189
6190                 if (!fs->h_ext.vlan_tci)
6191                         *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
6192
6193                 if (fs->m_ext.vlan_tci &&
6194                     be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) {
6195                         dev_err(&hdev->pdev->dev,
6196                                 "failed to config vlan_tci, invalid vlan_tci: %u, max is %d.\n",
6197                                 ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1);
6198                         return -EINVAL;
6199                 }
6200         } else {
6201                 *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
6202         }
6203
6204         if (fs->flow_type & FLOW_MAC_EXT) {
6205                 if (hdev->fd_cfg.fd_mode !=
6206                     HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
6207                         dev_err(&hdev->pdev->dev,
6208                                 "FLOW_MAC_EXT is not supported in current fd mode!\n");
6209                         return -EOPNOTSUPP;
6210                 }
6211
6212                 if (is_zero_ether_addr(fs->h_ext.h_dest))
6213                         *unused_tuple |= BIT(INNER_DST_MAC);
6214                 else
6215                         *unused_tuple &= ~BIT(INNER_DST_MAC);
6216         }
6217
6218         return 0;
6219 }
6220
6221 static int hclge_fd_get_user_def_layer(u32 flow_type, u32 *unused_tuple,
6222                                        struct hclge_fd_user_def_info *info)
6223 {
6224         switch (flow_type) {
6225         case ETHER_FLOW:
6226                 info->layer = HCLGE_FD_USER_DEF_L2;
6227                 *unused_tuple &= ~BIT(INNER_L2_RSV);
6228                 break;
6229         case IP_USER_FLOW:
6230         case IPV6_USER_FLOW:
6231                 info->layer = HCLGE_FD_USER_DEF_L3;
6232                 *unused_tuple &= ~BIT(INNER_L3_RSV);
6233                 break;
6234         case TCP_V4_FLOW:
6235         case UDP_V4_FLOW:
6236         case TCP_V6_FLOW:
6237         case UDP_V6_FLOW:
6238                 info->layer = HCLGE_FD_USER_DEF_L4;
6239                 *unused_tuple &= ~BIT(INNER_L4_RSV);
6240                 break;
6241         default:
6242                 return -EOPNOTSUPP;
6243         }
6244
6245         return 0;
6246 }
6247
6248 static bool hclge_fd_is_user_def_all_masked(struct ethtool_rx_flow_spec *fs)
6249 {
6250         return be32_to_cpu(fs->m_ext.data[1] | fs->m_ext.data[0]) == 0;
6251 }
6252
6253 static int hclge_fd_parse_user_def_field(struct hclge_dev *hdev,
6254                                          struct ethtool_rx_flow_spec *fs,
6255                                          u32 *unused_tuple,
6256                                          struct hclge_fd_user_def_info *info)
6257 {
6258         u32 tuple_active = hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1].tuple_active;
6259         u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6260         u16 data, offset, data_mask, offset_mask;
6261         int ret;
6262
6263         info->layer = HCLGE_FD_USER_DEF_NONE;
6264         *unused_tuple |= HCLGE_FD_TUPLE_USER_DEF_TUPLES;
6265
6266         if (!(fs->flow_type & FLOW_EXT) || hclge_fd_is_user_def_all_masked(fs))
6267                 return 0;
6268
6269         /* user-def data from ethtool is 64 bit value, the bit0~15 is used
6270          * for data, and bit32~47 is used for offset.
6271          */
6272         data = be32_to_cpu(fs->h_ext.data[1]) & HCLGE_FD_USER_DEF_DATA;
6273         data_mask = be32_to_cpu(fs->m_ext.data[1]) & HCLGE_FD_USER_DEF_DATA;
6274         offset = be32_to_cpu(fs->h_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET;
6275         offset_mask = be32_to_cpu(fs->m_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET;
6276
6277         if (!(tuple_active & HCLGE_FD_TUPLE_USER_DEF_TUPLES)) {
6278                 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
6279                 return -EOPNOTSUPP;
6280         }
6281
6282         if (offset > HCLGE_FD_MAX_USER_DEF_OFFSET) {
6283                 dev_err(&hdev->pdev->dev,
6284                         "user-def offset[%u] should be no more than %u\n",
6285                         offset, HCLGE_FD_MAX_USER_DEF_OFFSET);
6286                 return -EINVAL;
6287         }
6288
6289         if (offset_mask != HCLGE_FD_USER_DEF_OFFSET_UNMASK) {
6290                 dev_err(&hdev->pdev->dev, "user-def offset can't be masked\n");
6291                 return -EINVAL;
6292         }
6293
6294         ret = hclge_fd_get_user_def_layer(flow_type, unused_tuple, info);
6295         if (ret) {
6296                 dev_err(&hdev->pdev->dev,
6297                         "unsupported flow type for user-def bytes, ret = %d\n",
6298                         ret);
6299                 return ret;
6300         }
6301
6302         info->data = data;
6303         info->data_mask = data_mask;
6304         info->offset = offset;
6305
6306         return 0;
6307 }
6308
6309 static int hclge_fd_check_spec(struct hclge_dev *hdev,
6310                                struct ethtool_rx_flow_spec *fs,
6311                                u32 *unused_tuple,
6312                                struct hclge_fd_user_def_info *info)
6313 {
6314         u32 flow_type;
6315         int ret;
6316
6317         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6318                 dev_err(&hdev->pdev->dev,
6319                         "failed to config fd rules, invalid rule location: %u, max is %u\n.",
6320                         fs->location,
6321                         hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1);
6322                 return -EINVAL;
6323         }
6324
6325         ret = hclge_fd_parse_user_def_field(hdev, fs, unused_tuple, info);
6326         if (ret)
6327                 return ret;
6328
6329         flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6330         switch (flow_type) {
6331         case SCTP_V4_FLOW:
6332         case TCP_V4_FLOW:
6333         case UDP_V4_FLOW:
6334                 ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec,
6335                                                   unused_tuple);
6336                 break;
6337         case IP_USER_FLOW:
6338                 ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec,
6339                                                unused_tuple);
6340                 break;
6341         case SCTP_V6_FLOW:
6342         case TCP_V6_FLOW:
6343         case UDP_V6_FLOW:
6344                 ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec,
6345                                                   unused_tuple);
6346                 break;
6347         case IPV6_USER_FLOW:
6348                 ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec,
6349                                                unused_tuple);
6350                 break;
6351         case ETHER_FLOW:
6352                 if (hdev->fd_cfg.fd_mode !=
6353                         HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
6354                         dev_err(&hdev->pdev->dev,
6355                                 "ETHER_FLOW is not supported in current fd mode!\n");
6356                         return -EOPNOTSUPP;
6357                 }
6358
6359                 ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec,
6360                                                  unused_tuple);
6361                 break;
6362         default:
6363                 dev_err(&hdev->pdev->dev,
6364                         "unsupported protocol type, protocol type = %#x\n",
6365                         flow_type);
6366                 return -EOPNOTSUPP;
6367         }
6368
6369         if (ret) {
6370                 dev_err(&hdev->pdev->dev,
6371                         "failed to check flow union tuple, ret = %d\n",
6372                         ret);
6373                 return ret;
6374         }
6375
6376         return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple);
6377 }
6378
6379 static void hclge_fd_get_tcpip4_tuple(struct hclge_dev *hdev,
6380                                       struct ethtool_rx_flow_spec *fs,
6381                                       struct hclge_fd_rule *rule, u8 ip_proto)
6382 {
6383         rule->tuples.src_ip[IPV4_INDEX] =
6384                         be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
6385         rule->tuples_mask.src_ip[IPV4_INDEX] =
6386                         be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
6387
6388         rule->tuples.dst_ip[IPV4_INDEX] =
6389                         be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
6390         rule->tuples_mask.dst_ip[IPV4_INDEX] =
6391                         be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
6392
6393         rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
6394         rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
6395
6396         rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
6397         rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
6398
6399         rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
6400         rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
6401
6402         rule->tuples.ether_proto = ETH_P_IP;
6403         rule->tuples_mask.ether_proto = 0xFFFF;
6404
6405         rule->tuples.ip_proto = ip_proto;
6406         rule->tuples_mask.ip_proto = 0xFF;
6407 }
6408
6409 static void hclge_fd_get_ip4_tuple(struct hclge_dev *hdev,
6410                                    struct ethtool_rx_flow_spec *fs,
6411                                    struct hclge_fd_rule *rule)
6412 {
6413         rule->tuples.src_ip[IPV4_INDEX] =
6414                         be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
6415         rule->tuples_mask.src_ip[IPV4_INDEX] =
6416                         be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
6417
6418         rule->tuples.dst_ip[IPV4_INDEX] =
6419                         be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
6420         rule->tuples_mask.dst_ip[IPV4_INDEX] =
6421                         be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
6422
6423         rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
6424         rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
6425
6426         rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
6427         rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
6428
6429         rule->tuples.ether_proto = ETH_P_IP;
6430         rule->tuples_mask.ether_proto = 0xFFFF;
6431 }
6432
6433 static void hclge_fd_get_tcpip6_tuple(struct hclge_dev *hdev,
6434                                       struct ethtool_rx_flow_spec *fs,
6435                                       struct hclge_fd_rule *rule, u8 ip_proto)
6436 {
6437         be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.tcp_ip6_spec.ip6src,
6438                           IPV6_SIZE);
6439         be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.tcp_ip6_spec.ip6src,
6440                           IPV6_SIZE);
6441
6442         be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.tcp_ip6_spec.ip6dst,
6443                           IPV6_SIZE);
6444         be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.tcp_ip6_spec.ip6dst,
6445                           IPV6_SIZE);
6446
6447         rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
6448         rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
6449
6450         rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
6451         rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
6452
6453         rule->tuples.ether_proto = ETH_P_IPV6;
6454         rule->tuples_mask.ether_proto = 0xFFFF;
6455
6456         rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass;
6457         rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass;
6458
6459         rule->tuples.ip_proto = ip_proto;
6460         rule->tuples_mask.ip_proto = 0xFF;
6461 }
6462
6463 static void hclge_fd_get_ip6_tuple(struct hclge_dev *hdev,
6464                                    struct ethtool_rx_flow_spec *fs,
6465                                    struct hclge_fd_rule *rule)
6466 {
6467         be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.usr_ip6_spec.ip6src,
6468                           IPV6_SIZE);
6469         be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.usr_ip6_spec.ip6src,
6470                           IPV6_SIZE);
6471
6472         be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.usr_ip6_spec.ip6dst,
6473                           IPV6_SIZE);
6474         be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.usr_ip6_spec.ip6dst,
6475                           IPV6_SIZE);
6476
6477         rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
6478         rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
6479
6480         rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass;
6481         rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass;
6482
6483         rule->tuples.ether_proto = ETH_P_IPV6;
6484         rule->tuples_mask.ether_proto = 0xFFFF;
6485 }
6486
6487 static void hclge_fd_get_ether_tuple(struct hclge_dev *hdev,
6488                                      struct ethtool_rx_flow_spec *fs,
6489                                      struct hclge_fd_rule *rule)
6490 {
6491         ether_addr_copy(rule->tuples.src_mac, fs->h_u.ether_spec.h_source);
6492         ether_addr_copy(rule->tuples_mask.src_mac, fs->m_u.ether_spec.h_source);
6493
6494         ether_addr_copy(rule->tuples.dst_mac, fs->h_u.ether_spec.h_dest);
6495         ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_u.ether_spec.h_dest);
6496
6497         rule->tuples.ether_proto = be16_to_cpu(fs->h_u.ether_spec.h_proto);
6498         rule->tuples_mask.ether_proto = be16_to_cpu(fs->m_u.ether_spec.h_proto);
6499 }
6500
6501 static void hclge_fd_get_user_def_tuple(struct hclge_fd_user_def_info *info,
6502                                         struct hclge_fd_rule *rule)
6503 {
6504         switch (info->layer) {
6505         case HCLGE_FD_USER_DEF_L2:
6506                 rule->tuples.l2_user_def = info->data;
6507                 rule->tuples_mask.l2_user_def = info->data_mask;
6508                 break;
6509         case HCLGE_FD_USER_DEF_L3:
6510                 rule->tuples.l3_user_def = info->data;
6511                 rule->tuples_mask.l3_user_def = info->data_mask;
6512                 break;
6513         case HCLGE_FD_USER_DEF_L4:
6514                 rule->tuples.l4_user_def = (u32)info->data << 16;
6515                 rule->tuples_mask.l4_user_def = (u32)info->data_mask << 16;
6516                 break;
6517         default:
6518                 break;
6519         }
6520
6521         rule->ep.user_def = *info;
6522 }
6523
6524 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
6525                               struct ethtool_rx_flow_spec *fs,
6526                               struct hclge_fd_rule *rule,
6527                               struct hclge_fd_user_def_info *info)
6528 {
6529         u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6530
6531         switch (flow_type) {
6532         case SCTP_V4_FLOW:
6533                 hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_SCTP);
6534                 break;
6535         case TCP_V4_FLOW:
6536                 hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_TCP);
6537                 break;
6538         case UDP_V4_FLOW:
6539                 hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_UDP);
6540                 break;
6541         case IP_USER_FLOW:
6542                 hclge_fd_get_ip4_tuple(hdev, fs, rule);
6543                 break;
6544         case SCTP_V6_FLOW:
6545                 hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_SCTP);
6546                 break;
6547         case TCP_V6_FLOW:
6548                 hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_TCP);
6549                 break;
6550         case UDP_V6_FLOW:
6551                 hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_UDP);
6552                 break;
6553         case IPV6_USER_FLOW:
6554                 hclge_fd_get_ip6_tuple(hdev, fs, rule);
6555                 break;
6556         case ETHER_FLOW:
6557                 hclge_fd_get_ether_tuple(hdev, fs, rule);
6558                 break;
6559         default:
6560                 return -EOPNOTSUPP;
6561         }
6562
6563         if (fs->flow_type & FLOW_EXT) {
6564                 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
6565                 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
6566                 hclge_fd_get_user_def_tuple(info, rule);
6567         }
6568
6569         if (fs->flow_type & FLOW_MAC_EXT) {
6570                 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
6571                 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
6572         }
6573
6574         return 0;
6575 }
6576
6577 static int hclge_fd_config_rule(struct hclge_dev *hdev,
6578                                 struct hclge_fd_rule *rule)
6579 {
6580         int ret;
6581
6582         ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6583         if (ret)
6584                 return ret;
6585
6586         return hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
6587 }
6588
6589 static int hclge_add_fd_entry_common(struct hclge_dev *hdev,
6590                                      struct hclge_fd_rule *rule)
6591 {
6592         int ret;
6593
6594         spin_lock_bh(&hdev->fd_rule_lock);
6595
6596         if (hdev->fd_active_type != rule->rule_type &&
6597             (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE ||
6598              hdev->fd_active_type == HCLGE_FD_EP_ACTIVE)) {
6599                 dev_err(&hdev->pdev->dev,
6600                         "mode conflict(new type %d, active type %d), please delete existent rules first\n",
6601                         rule->rule_type, hdev->fd_active_type);
6602                 spin_unlock_bh(&hdev->fd_rule_lock);
6603                 return -EINVAL;
6604         }
6605
6606         ret = hclge_fd_check_user_def_refcnt(hdev, rule);
6607         if (ret)
6608                 goto out;
6609
6610         ret = hclge_clear_arfs_rules(hdev);
6611         if (ret)
6612                 goto out;
6613
6614         ret = hclge_fd_config_rule(hdev, rule);
6615         if (ret)
6616                 goto out;
6617
6618         rule->state = HCLGE_FD_ACTIVE;
6619         hdev->fd_active_type = rule->rule_type;
6620         hclge_update_fd_list(hdev, rule->state, rule->location, rule);
6621
6622 out:
6623         spin_unlock_bh(&hdev->fd_rule_lock);
6624         return ret;
6625 }
6626
6627 static bool hclge_is_cls_flower_active(struct hnae3_handle *handle)
6628 {
6629         struct hclge_vport *vport = hclge_get_vport(handle);
6630         struct hclge_dev *hdev = vport->back;
6631
6632         return hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE;
6633 }
6634
6635 static int hclge_fd_parse_ring_cookie(struct hclge_dev *hdev, u64 ring_cookie,
6636                                       u16 *vport_id, u8 *action, u16 *queue_id)
6637 {
6638         struct hclge_vport *vport = hdev->vport;
6639
6640         if (ring_cookie == RX_CLS_FLOW_DISC) {
6641                 *action = HCLGE_FD_ACTION_DROP_PACKET;
6642         } else {
6643                 u32 ring = ethtool_get_flow_spec_ring(ring_cookie);
6644                 u8 vf = ethtool_get_flow_spec_ring_vf(ring_cookie);
6645                 u16 tqps;
6646
6647                 /* To keep consistent with user's configuration, minus 1 when
6648                  * printing 'vf', because vf id from ethtool is added 1 for vf.
6649                  */
6650                 if (vf > hdev->num_req_vfs) {
6651                         dev_err(&hdev->pdev->dev,
6652                                 "Error: vf id (%u) should be less than %u\n",
6653                                 vf - 1, hdev->num_req_vfs);
6654                         return -EINVAL;
6655                 }
6656
6657                 *vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
6658                 tqps = hdev->vport[vf].nic.kinfo.num_tqps;
6659
6660                 if (ring >= tqps) {
6661                         dev_err(&hdev->pdev->dev,
6662                                 "Error: queue id (%u) > max tqp num (%u)\n",
6663                                 ring, tqps - 1);
6664                         return -EINVAL;
6665                 }
6666
6667                 *action = HCLGE_FD_ACTION_SELECT_QUEUE;
6668                 *queue_id = ring;
6669         }
6670
6671         return 0;
6672 }
6673
6674 static int hclge_add_fd_entry(struct hnae3_handle *handle,
6675                               struct ethtool_rxnfc *cmd)
6676 {
6677         struct hclge_vport *vport = hclge_get_vport(handle);
6678         struct hclge_dev *hdev = vport->back;
6679         struct hclge_fd_user_def_info info;
6680         u16 dst_vport_id = 0, q_index = 0;
6681         struct ethtool_rx_flow_spec *fs;
6682         struct hclge_fd_rule *rule;
6683         u32 unused = 0;
6684         u8 action;
6685         int ret;
6686
6687         if (!hnae3_dev_fd_supported(hdev)) {
6688                 dev_err(&hdev->pdev->dev,
6689                         "flow table director is not supported\n");
6690                 return -EOPNOTSUPP;
6691         }
6692
6693         if (!hdev->fd_en) {
6694                 dev_err(&hdev->pdev->dev,
6695                         "please enable flow director first\n");
6696                 return -EOPNOTSUPP;
6697         }
6698
6699         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6700
6701         ret = hclge_fd_check_spec(hdev, fs, &unused, &info);
6702         if (ret)
6703                 return ret;
6704
6705         ret = hclge_fd_parse_ring_cookie(hdev, fs->ring_cookie, &dst_vport_id,
6706                                          &action, &q_index);
6707         if (ret)
6708                 return ret;
6709
6710         rule = kzalloc(sizeof(*rule), GFP_KERNEL);
6711         if (!rule)
6712                 return -ENOMEM;
6713
6714         ret = hclge_fd_get_tuple(hdev, fs, rule, &info);
6715         if (ret) {
6716                 kfree(rule);
6717                 return ret;
6718         }
6719
6720         rule->flow_type = fs->flow_type;
6721         rule->location = fs->location;
6722         rule->unused_tuple = unused;
6723         rule->vf_id = dst_vport_id;
6724         rule->queue_id = q_index;
6725         rule->action = action;
6726         rule->rule_type = HCLGE_FD_EP_ACTIVE;
6727
6728         ret = hclge_add_fd_entry_common(hdev, rule);
6729         if (ret)
6730                 kfree(rule);
6731
6732         return ret;
6733 }
6734
6735 static int hclge_del_fd_entry(struct hnae3_handle *handle,
6736                               struct ethtool_rxnfc *cmd)
6737 {
6738         struct hclge_vport *vport = hclge_get_vport(handle);
6739         struct hclge_dev *hdev = vport->back;
6740         struct ethtool_rx_flow_spec *fs;
6741         int ret;
6742
6743         if (!hnae3_dev_fd_supported(hdev))
6744                 return -EOPNOTSUPP;
6745
6746         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6747
6748         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6749                 return -EINVAL;
6750
6751         spin_lock_bh(&hdev->fd_rule_lock);
6752         if (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE ||
6753             !test_bit(fs->location, hdev->fd_bmap)) {
6754                 dev_err(&hdev->pdev->dev,
6755                         "Delete fail, rule %u is inexistent\n", fs->location);
6756                 spin_unlock_bh(&hdev->fd_rule_lock);
6757                 return -ENOENT;
6758         }
6759
6760         ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
6761                                    NULL, false);
6762         if (ret)
6763                 goto out;
6764
6765         hclge_update_fd_list(hdev, HCLGE_FD_DELETED, fs->location, NULL);
6766
6767 out:
6768         spin_unlock_bh(&hdev->fd_rule_lock);
6769         return ret;
6770 }
6771
6772 static void hclge_clear_fd_rules_in_list(struct hclge_dev *hdev,
6773                                          bool clear_list)
6774 {
6775         struct hclge_fd_rule *rule;
6776         struct hlist_node *node;
6777         u16 location;
6778
6779         if (!hnae3_dev_fd_supported(hdev))
6780                 return;
6781
6782         spin_lock_bh(&hdev->fd_rule_lock);
6783
6784         for_each_set_bit(location, hdev->fd_bmap,
6785                          hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6786                 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
6787                                      NULL, false);
6788
6789         if (clear_list) {
6790                 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
6791                                           rule_node) {
6792                         hlist_del(&rule->rule_node);
6793                         kfree(rule);
6794                 }
6795                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
6796                 hdev->hclge_fd_rule_num = 0;
6797                 bitmap_zero(hdev->fd_bmap,
6798                             hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
6799         }
6800
6801         spin_unlock_bh(&hdev->fd_rule_lock);
6802 }
6803
6804 static void hclge_del_all_fd_entries(struct hclge_dev *hdev)
6805 {
6806         hclge_clear_fd_rules_in_list(hdev, true);
6807         hclge_fd_disable_user_def(hdev);
6808 }
6809
6810 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
6811 {
6812         struct hclge_vport *vport = hclge_get_vport(handle);
6813         struct hclge_dev *hdev = vport->back;
6814         struct hclge_fd_rule *rule;
6815         struct hlist_node *node;
6816
6817         /* Return ok here, because reset error handling will check this
6818          * return value. If error is returned here, the reset process will
6819          * fail.
6820          */
6821         if (!hnae3_dev_fd_supported(hdev))
6822                 return 0;
6823
6824         /* if fd is disabled, should not restore it when reset */
6825         if (!hdev->fd_en)
6826                 return 0;
6827
6828         spin_lock_bh(&hdev->fd_rule_lock);
6829         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6830                 if (rule->state == HCLGE_FD_ACTIVE)
6831                         rule->state = HCLGE_FD_TO_ADD;
6832         }
6833         spin_unlock_bh(&hdev->fd_rule_lock);
6834         set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
6835
6836         return 0;
6837 }
6838
6839 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
6840                                  struct ethtool_rxnfc *cmd)
6841 {
6842         struct hclge_vport *vport = hclge_get_vport(handle);
6843         struct hclge_dev *hdev = vport->back;
6844
6845         if (!hnae3_dev_fd_supported(hdev) || hclge_is_cls_flower_active(handle))
6846                 return -EOPNOTSUPP;
6847
6848         cmd->rule_cnt = hdev->hclge_fd_rule_num;
6849         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6850
6851         return 0;
6852 }
6853
6854 static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule,
6855                                      struct ethtool_tcpip4_spec *spec,
6856                                      struct ethtool_tcpip4_spec *spec_mask)
6857 {
6858         spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6859         spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6860                         0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6861
6862         spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6863         spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6864                         0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6865
6866         spec->psrc = cpu_to_be16(rule->tuples.src_port);
6867         spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6868                         0 : cpu_to_be16(rule->tuples_mask.src_port);
6869
6870         spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6871         spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6872                         0 : cpu_to_be16(rule->tuples_mask.dst_port);
6873
6874         spec->tos = rule->tuples.ip_tos;
6875         spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6876                         0 : rule->tuples_mask.ip_tos;
6877 }
6878
6879 static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule,
6880                                   struct ethtool_usrip4_spec *spec,
6881                                   struct ethtool_usrip4_spec *spec_mask)
6882 {
6883         spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6884         spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6885                         0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6886
6887         spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6888         spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6889                         0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6890
6891         spec->tos = rule->tuples.ip_tos;
6892         spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6893                         0 : rule->tuples_mask.ip_tos;
6894
6895         spec->proto = rule->tuples.ip_proto;
6896         spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6897                         0 : rule->tuples_mask.ip_proto;
6898
6899         spec->ip_ver = ETH_RX_NFC_IP4;
6900 }
6901
6902 static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
6903                                      struct ethtool_tcpip6_spec *spec,
6904                                      struct ethtool_tcpip6_spec *spec_mask)
6905 {
6906         cpu_to_be32_array(spec->ip6src,
6907                           rule->tuples.src_ip, IPV6_SIZE);
6908         cpu_to_be32_array(spec->ip6dst,
6909                           rule->tuples.dst_ip, IPV6_SIZE);
6910         if (rule->unused_tuple & BIT(INNER_SRC_IP))
6911                 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6912         else
6913                 cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip,
6914                                   IPV6_SIZE);
6915
6916         if (rule->unused_tuple & BIT(INNER_DST_IP))
6917                 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6918         else
6919                 cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip,
6920                                   IPV6_SIZE);
6921
6922         spec->tclass = rule->tuples.ip_tos;
6923         spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6924                         0 : rule->tuples_mask.ip_tos;
6925
6926         spec->psrc = cpu_to_be16(rule->tuples.src_port);
6927         spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6928                         0 : cpu_to_be16(rule->tuples_mask.src_port);
6929
6930         spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6931         spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6932                         0 : cpu_to_be16(rule->tuples_mask.dst_port);
6933 }
6934
6935 static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
6936                                   struct ethtool_usrip6_spec *spec,
6937                                   struct ethtool_usrip6_spec *spec_mask)
6938 {
6939         cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE);
6940         cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE);
6941         if (rule->unused_tuple & BIT(INNER_SRC_IP))
6942                 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6943         else
6944                 cpu_to_be32_array(spec_mask->ip6src,
6945                                   rule->tuples_mask.src_ip, IPV6_SIZE);
6946
6947         if (rule->unused_tuple & BIT(INNER_DST_IP))
6948                 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6949         else
6950                 cpu_to_be32_array(spec_mask->ip6dst,
6951                                   rule->tuples_mask.dst_ip, IPV6_SIZE);
6952
6953         spec->tclass = rule->tuples.ip_tos;
6954         spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6955                         0 : rule->tuples_mask.ip_tos;
6956
6957         spec->l4_proto = rule->tuples.ip_proto;
6958         spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6959                         0 : rule->tuples_mask.ip_proto;
6960 }
6961
6962 static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule,
6963                                     struct ethhdr *spec,
6964                                     struct ethhdr *spec_mask)
6965 {
6966         ether_addr_copy(spec->h_source, rule->tuples.src_mac);
6967         ether_addr_copy(spec->h_dest, rule->tuples.dst_mac);
6968
6969         if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6970                 eth_zero_addr(spec_mask->h_source);
6971         else
6972                 ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac);
6973
6974         if (rule->unused_tuple & BIT(INNER_DST_MAC))
6975                 eth_zero_addr(spec_mask->h_dest);
6976         else
6977                 ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac);
6978
6979         spec->h_proto = cpu_to_be16(rule->tuples.ether_proto);
6980         spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6981                         0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6982 }
6983
6984 static void hclge_fd_get_user_def_info(struct ethtool_rx_flow_spec *fs,
6985                                        struct hclge_fd_rule *rule)
6986 {
6987         if ((rule->unused_tuple & HCLGE_FD_TUPLE_USER_DEF_TUPLES) ==
6988             HCLGE_FD_TUPLE_USER_DEF_TUPLES) {
6989                 fs->h_ext.data[0] = 0;
6990                 fs->h_ext.data[1] = 0;
6991                 fs->m_ext.data[0] = 0;
6992                 fs->m_ext.data[1] = 0;
6993         } else {
6994                 fs->h_ext.data[0] = cpu_to_be32(rule->ep.user_def.offset);
6995                 fs->h_ext.data[1] = cpu_to_be32(rule->ep.user_def.data);
6996                 fs->m_ext.data[0] =
6997                                 cpu_to_be32(HCLGE_FD_USER_DEF_OFFSET_UNMASK);
6998                 fs->m_ext.data[1] = cpu_to_be32(rule->ep.user_def.data_mask);
6999         }
7000 }
7001
7002 static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
7003                                   struct hclge_fd_rule *rule)
7004 {
7005         if (fs->flow_type & FLOW_EXT) {
7006                 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
7007                 fs->m_ext.vlan_tci =
7008                                 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
7009                                 0 : cpu_to_be16(rule->tuples_mask.vlan_tag1);
7010
7011                 hclge_fd_get_user_def_info(fs, rule);
7012         }
7013
7014         if (fs->flow_type & FLOW_MAC_EXT) {
7015                 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
7016                 if (rule->unused_tuple & BIT(INNER_DST_MAC))
7017                         eth_zero_addr(fs->m_u.ether_spec.h_dest);
7018                 else
7019                         ether_addr_copy(fs->m_u.ether_spec.h_dest,
7020                                         rule->tuples_mask.dst_mac);
7021         }
7022 }
7023
7024 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
7025                                   struct ethtool_rxnfc *cmd)
7026 {
7027         struct hclge_vport *vport = hclge_get_vport(handle);
7028         struct hclge_fd_rule *rule = NULL;
7029         struct hclge_dev *hdev = vport->back;
7030         struct ethtool_rx_flow_spec *fs;
7031         struct hlist_node *node2;
7032
7033         if (!hnae3_dev_fd_supported(hdev))
7034                 return -EOPNOTSUPP;
7035
7036         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
7037
7038         spin_lock_bh(&hdev->fd_rule_lock);
7039
7040         hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
7041                 if (rule->location >= fs->location)
7042                         break;
7043         }
7044
7045         if (!rule || fs->location != rule->location) {
7046                 spin_unlock_bh(&hdev->fd_rule_lock);
7047
7048                 return -ENOENT;
7049         }
7050
7051         fs->flow_type = rule->flow_type;
7052         switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
7053         case SCTP_V4_FLOW:
7054         case TCP_V4_FLOW:
7055         case UDP_V4_FLOW:
7056                 hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec,
7057                                          &fs->m_u.tcp_ip4_spec);
7058                 break;
7059         case IP_USER_FLOW:
7060                 hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec,
7061                                       &fs->m_u.usr_ip4_spec);
7062                 break;
7063         case SCTP_V6_FLOW:
7064         case TCP_V6_FLOW:
7065         case UDP_V6_FLOW:
7066                 hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec,
7067                                          &fs->m_u.tcp_ip6_spec);
7068                 break;
7069         case IPV6_USER_FLOW:
7070                 hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec,
7071                                       &fs->m_u.usr_ip6_spec);
7072                 break;
7073         /* The flow type of fd rule has been checked before adding in to rule
7074          * list. As other flow types have been handled, it must be ETHER_FLOW
7075          * for the default case
7076          */
7077         default:
7078                 hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec,
7079                                         &fs->m_u.ether_spec);
7080                 break;
7081         }
7082
7083         hclge_fd_get_ext_info(fs, rule);
7084
7085         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
7086                 fs->ring_cookie = RX_CLS_FLOW_DISC;
7087         } else {
7088                 u64 vf_id;
7089
7090                 fs->ring_cookie = rule->queue_id;
7091                 vf_id = rule->vf_id;
7092                 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
7093                 fs->ring_cookie |= vf_id;
7094         }
7095
7096         spin_unlock_bh(&hdev->fd_rule_lock);
7097
7098         return 0;
7099 }
7100
7101 static int hclge_get_all_rules(struct hnae3_handle *handle,
7102                                struct ethtool_rxnfc *cmd, u32 *rule_locs)
7103 {
7104         struct hclge_vport *vport = hclge_get_vport(handle);
7105         struct hclge_dev *hdev = vport->back;
7106         struct hclge_fd_rule *rule;
7107         struct hlist_node *node2;
7108         int cnt = 0;
7109
7110         if (!hnae3_dev_fd_supported(hdev))
7111                 return -EOPNOTSUPP;
7112
7113         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
7114
7115         spin_lock_bh(&hdev->fd_rule_lock);
7116         hlist_for_each_entry_safe(rule, node2,
7117                                   &hdev->fd_rule_list, rule_node) {
7118                 if (cnt == cmd->rule_cnt) {
7119                         spin_unlock_bh(&hdev->fd_rule_lock);
7120                         return -EMSGSIZE;
7121                 }
7122
7123                 if (rule->state == HCLGE_FD_TO_DEL)
7124                         continue;
7125
7126                 rule_locs[cnt] = rule->location;
7127                 cnt++;
7128         }
7129
7130         spin_unlock_bh(&hdev->fd_rule_lock);
7131
7132         cmd->rule_cnt = cnt;
7133
7134         return 0;
7135 }
7136
7137 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
7138                                      struct hclge_fd_rule_tuples *tuples)
7139 {
7140 #define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
7141 #define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
7142
7143         tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
7144         tuples->ip_proto = fkeys->basic.ip_proto;
7145         tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
7146
7147         if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
7148                 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
7149                 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
7150         } else {
7151                 int i;
7152
7153                 for (i = 0; i < IPV6_SIZE; i++) {
7154                         tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
7155                         tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
7156                 }
7157         }
7158 }
7159
7160 /* traverse all rules, check whether an existed rule has the same tuples */
7161 static struct hclge_fd_rule *
7162 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
7163                           const struct hclge_fd_rule_tuples *tuples)
7164 {
7165         struct hclge_fd_rule *rule = NULL;
7166         struct hlist_node *node;
7167
7168         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7169                 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
7170                         return rule;
7171         }
7172
7173         return NULL;
7174 }
7175
7176 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
7177                                      struct hclge_fd_rule *rule)
7178 {
7179         rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
7180                              BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
7181                              BIT(INNER_SRC_PORT);
7182         rule->action = 0;
7183         rule->vf_id = 0;
7184         rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
7185         rule->state = HCLGE_FD_TO_ADD;
7186         if (tuples->ether_proto == ETH_P_IP) {
7187                 if (tuples->ip_proto == IPPROTO_TCP)
7188                         rule->flow_type = TCP_V4_FLOW;
7189                 else
7190                         rule->flow_type = UDP_V4_FLOW;
7191         } else {
7192                 if (tuples->ip_proto == IPPROTO_TCP)
7193                         rule->flow_type = TCP_V6_FLOW;
7194                 else
7195                         rule->flow_type = UDP_V6_FLOW;
7196         }
7197         memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
7198         memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
7199 }
7200
7201 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
7202                                       u16 flow_id, struct flow_keys *fkeys)
7203 {
7204         struct hclge_vport *vport = hclge_get_vport(handle);
7205         struct hclge_fd_rule_tuples new_tuples = {};
7206         struct hclge_dev *hdev = vport->back;
7207         struct hclge_fd_rule *rule;
7208         u16 bit_id;
7209
7210         if (!hnae3_dev_fd_supported(hdev))
7211                 return -EOPNOTSUPP;
7212
7213         /* when there is already fd rule existed add by user,
7214          * arfs should not work
7215          */
7216         spin_lock_bh(&hdev->fd_rule_lock);
7217         if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE &&
7218             hdev->fd_active_type != HCLGE_FD_RULE_NONE) {
7219                 spin_unlock_bh(&hdev->fd_rule_lock);
7220                 return -EOPNOTSUPP;
7221         }
7222
7223         hclge_fd_get_flow_tuples(fkeys, &new_tuples);
7224
7225         /* check is there flow director filter existed for this flow,
7226          * if not, create a new filter for it;
7227          * if filter exist with different queue id, modify the filter;
7228          * if filter exist with same queue id, do nothing
7229          */
7230         rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
7231         if (!rule) {
7232                 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
7233                 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
7234                         spin_unlock_bh(&hdev->fd_rule_lock);
7235                         return -ENOSPC;
7236                 }
7237
7238                 rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
7239                 if (!rule) {
7240                         spin_unlock_bh(&hdev->fd_rule_lock);
7241                         return -ENOMEM;
7242                 }
7243
7244                 rule->location = bit_id;
7245                 rule->arfs.flow_id = flow_id;
7246                 rule->queue_id = queue_id;
7247                 hclge_fd_build_arfs_rule(&new_tuples, rule);
7248                 hclge_update_fd_list(hdev, rule->state, rule->location, rule);
7249                 hdev->fd_active_type = HCLGE_FD_ARFS_ACTIVE;
7250         } else if (rule->queue_id != queue_id) {
7251                 rule->queue_id = queue_id;
7252                 rule->state = HCLGE_FD_TO_ADD;
7253                 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7254                 hclge_task_schedule(hdev, 0);
7255         }
7256         spin_unlock_bh(&hdev->fd_rule_lock);
7257         return rule->location;
7258 }
7259
7260 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
7261 {
7262 #ifdef CONFIG_RFS_ACCEL
7263         struct hnae3_handle *handle = &hdev->vport[0].nic;
7264         struct hclge_fd_rule *rule;
7265         struct hlist_node *node;
7266
7267         spin_lock_bh(&hdev->fd_rule_lock);
7268         if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
7269                 spin_unlock_bh(&hdev->fd_rule_lock);
7270                 return;
7271         }
7272         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7273                 if (rule->state != HCLGE_FD_ACTIVE)
7274                         continue;
7275                 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
7276                                         rule->arfs.flow_id, rule->location)) {
7277                         rule->state = HCLGE_FD_TO_DEL;
7278                         set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7279                 }
7280         }
7281         spin_unlock_bh(&hdev->fd_rule_lock);
7282 #endif
7283 }
7284
7285 /* make sure being called after lock up with fd_rule_lock */
7286 static int hclge_clear_arfs_rules(struct hclge_dev *hdev)
7287 {
7288 #ifdef CONFIG_RFS_ACCEL
7289         struct hclge_fd_rule *rule;
7290         struct hlist_node *node;
7291         int ret;
7292
7293         if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE)
7294                 return 0;
7295
7296         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7297                 switch (rule->state) {
7298                 case HCLGE_FD_TO_DEL:
7299                 case HCLGE_FD_ACTIVE:
7300                         ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
7301                                                    rule->location, NULL, false);
7302                         if (ret)
7303                                 return ret;
7304                         fallthrough;
7305                 case HCLGE_FD_TO_ADD:
7306                         hclge_fd_dec_rule_cnt(hdev, rule->location);
7307                         hlist_del(&rule->rule_node);
7308                         kfree(rule);
7309                         break;
7310                 default:
7311                         break;
7312                 }
7313         }
7314         hclge_sync_fd_state(hdev);
7315
7316 #endif
7317         return 0;
7318 }
7319
7320 static void hclge_get_cls_key_basic(const struct flow_rule *flow,
7321                                     struct hclge_fd_rule *rule)
7322 {
7323         if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_BASIC)) {
7324                 struct flow_match_basic match;
7325                 u16 ethtype_key, ethtype_mask;
7326
7327                 flow_rule_match_basic(flow, &match);
7328                 ethtype_key = ntohs(match.key->n_proto);
7329                 ethtype_mask = ntohs(match.mask->n_proto);
7330
7331                 if (ethtype_key == ETH_P_ALL) {
7332                         ethtype_key = 0;
7333                         ethtype_mask = 0;
7334                 }
7335                 rule->tuples.ether_proto = ethtype_key;
7336                 rule->tuples_mask.ether_proto = ethtype_mask;
7337                 rule->tuples.ip_proto = match.key->ip_proto;
7338                 rule->tuples_mask.ip_proto = match.mask->ip_proto;
7339         } else {
7340                 rule->unused_tuple |= BIT(INNER_IP_PROTO);
7341                 rule->unused_tuple |= BIT(INNER_ETH_TYPE);
7342         }
7343 }
7344
7345 static void hclge_get_cls_key_mac(const struct flow_rule *flow,
7346                                   struct hclge_fd_rule *rule)
7347 {
7348         if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
7349                 struct flow_match_eth_addrs match;
7350
7351                 flow_rule_match_eth_addrs(flow, &match);
7352                 ether_addr_copy(rule->tuples.dst_mac, match.key->dst);
7353                 ether_addr_copy(rule->tuples_mask.dst_mac, match.mask->dst);
7354                 ether_addr_copy(rule->tuples.src_mac, match.key->src);
7355                 ether_addr_copy(rule->tuples_mask.src_mac, match.mask->src);
7356         } else {
7357                 rule->unused_tuple |= BIT(INNER_DST_MAC);
7358                 rule->unused_tuple |= BIT(INNER_SRC_MAC);
7359         }
7360 }
7361
7362 static void hclge_get_cls_key_vlan(const struct flow_rule *flow,
7363                                    struct hclge_fd_rule *rule)
7364 {
7365         if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_VLAN)) {
7366                 struct flow_match_vlan match;
7367
7368                 flow_rule_match_vlan(flow, &match);
7369                 rule->tuples.vlan_tag1 = match.key->vlan_id |
7370                                 (match.key->vlan_priority << VLAN_PRIO_SHIFT);
7371                 rule->tuples_mask.vlan_tag1 = match.mask->vlan_id |
7372                                 (match.mask->vlan_priority << VLAN_PRIO_SHIFT);
7373         } else {
7374                 rule->unused_tuple |= BIT(INNER_VLAN_TAG_FST);
7375         }
7376 }
7377
7378 static void hclge_get_cls_key_ip(const struct flow_rule *flow,
7379                                  struct hclge_fd_rule *rule)
7380 {
7381         u16 addr_type = 0;
7382
7383         if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_CONTROL)) {
7384                 struct flow_match_control match;
7385
7386                 flow_rule_match_control(flow, &match);
7387                 addr_type = match.key->addr_type;
7388         }
7389
7390         if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
7391                 struct flow_match_ipv4_addrs match;
7392
7393                 flow_rule_match_ipv4_addrs(flow, &match);
7394                 rule->tuples.src_ip[IPV4_INDEX] = be32_to_cpu(match.key->src);
7395                 rule->tuples_mask.src_ip[IPV4_INDEX] =
7396                                                 be32_to_cpu(match.mask->src);
7397                 rule->tuples.dst_ip[IPV4_INDEX] = be32_to_cpu(match.key->dst);
7398                 rule->tuples_mask.dst_ip[IPV4_INDEX] =
7399                                                 be32_to_cpu(match.mask->dst);
7400         } else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
7401                 struct flow_match_ipv6_addrs match;
7402
7403                 flow_rule_match_ipv6_addrs(flow, &match);
7404                 be32_to_cpu_array(rule->tuples.src_ip, match.key->src.s6_addr32,
7405                                   IPV6_SIZE);
7406                 be32_to_cpu_array(rule->tuples_mask.src_ip,
7407                                   match.mask->src.s6_addr32, IPV6_SIZE);
7408                 be32_to_cpu_array(rule->tuples.dst_ip, match.key->dst.s6_addr32,
7409                                   IPV6_SIZE);
7410                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
7411                                   match.mask->dst.s6_addr32, IPV6_SIZE);
7412         } else {
7413                 rule->unused_tuple |= BIT(INNER_SRC_IP);
7414                 rule->unused_tuple |= BIT(INNER_DST_IP);
7415         }
7416 }
7417
7418 static void hclge_get_cls_key_port(const struct flow_rule *flow,
7419                                    struct hclge_fd_rule *rule)
7420 {
7421         if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_PORTS)) {
7422                 struct flow_match_ports match;
7423
7424                 flow_rule_match_ports(flow, &match);
7425
7426                 rule->tuples.src_port = be16_to_cpu(match.key->src);
7427                 rule->tuples_mask.src_port = be16_to_cpu(match.mask->src);
7428                 rule->tuples.dst_port = be16_to_cpu(match.key->dst);
7429                 rule->tuples_mask.dst_port = be16_to_cpu(match.mask->dst);
7430         } else {
7431                 rule->unused_tuple |= BIT(INNER_SRC_PORT);
7432                 rule->unused_tuple |= BIT(INNER_DST_PORT);
7433         }
7434 }
7435
7436 static int hclge_parse_cls_flower(struct hclge_dev *hdev,
7437                                   struct flow_cls_offload *cls_flower,
7438                                   struct hclge_fd_rule *rule)
7439 {
7440         struct flow_rule *flow = flow_cls_offload_flow_rule(cls_flower);
7441         struct flow_dissector *dissector = flow->match.dissector;
7442
7443         if (dissector->used_keys &
7444             ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
7445               BIT(FLOW_DISSECTOR_KEY_BASIC) |
7446               BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
7447               BIT(FLOW_DISSECTOR_KEY_VLAN) |
7448               BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
7449               BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
7450               BIT(FLOW_DISSECTOR_KEY_PORTS))) {
7451                 dev_err(&hdev->pdev->dev, "unsupported key set: %#x\n",
7452                         dissector->used_keys);
7453                 return -EOPNOTSUPP;
7454         }
7455
7456         hclge_get_cls_key_basic(flow, rule);
7457         hclge_get_cls_key_mac(flow, rule);
7458         hclge_get_cls_key_vlan(flow, rule);
7459         hclge_get_cls_key_ip(flow, rule);
7460         hclge_get_cls_key_port(flow, rule);
7461
7462         return 0;
7463 }
7464
7465 static int hclge_check_cls_flower(struct hclge_dev *hdev,
7466                                   struct flow_cls_offload *cls_flower, int tc)
7467 {
7468         u32 prio = cls_flower->common.prio;
7469
7470         if (tc < 0 || tc > hdev->tc_max) {
7471                 dev_err(&hdev->pdev->dev, "invalid traffic class\n");
7472                 return -EINVAL;
7473         }
7474
7475         if (prio == 0 ||
7476             prio > hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
7477                 dev_err(&hdev->pdev->dev,
7478                         "prio %u should be in range[1, %u]\n",
7479                         prio, hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
7480                 return -EINVAL;
7481         }
7482
7483         if (test_bit(prio - 1, hdev->fd_bmap)) {
7484                 dev_err(&hdev->pdev->dev, "prio %u is already used\n", prio);
7485                 return -EINVAL;
7486         }
7487         return 0;
7488 }
7489
7490 static int hclge_add_cls_flower(struct hnae3_handle *handle,
7491                                 struct flow_cls_offload *cls_flower,
7492                                 int tc)
7493 {
7494         struct hclge_vport *vport = hclge_get_vport(handle);
7495         struct hclge_dev *hdev = vport->back;
7496         struct hclge_fd_rule *rule;
7497         int ret;
7498
7499         ret = hclge_check_cls_flower(hdev, cls_flower, tc);
7500         if (ret) {
7501                 dev_err(&hdev->pdev->dev,
7502                         "failed to check cls flower params, ret = %d\n", ret);
7503                 return ret;
7504         }
7505
7506         rule = kzalloc(sizeof(*rule), GFP_KERNEL);
7507         if (!rule)
7508                 return -ENOMEM;
7509
7510         ret = hclge_parse_cls_flower(hdev, cls_flower, rule);
7511         if (ret) {
7512                 kfree(rule);
7513                 return ret;
7514         }
7515
7516         rule->action = HCLGE_FD_ACTION_SELECT_TC;
7517         rule->cls_flower.tc = tc;
7518         rule->location = cls_flower->common.prio - 1;
7519         rule->vf_id = 0;
7520         rule->cls_flower.cookie = cls_flower->cookie;
7521         rule->rule_type = HCLGE_FD_TC_FLOWER_ACTIVE;
7522
7523         ret = hclge_add_fd_entry_common(hdev, rule);
7524         if (ret)
7525                 kfree(rule);
7526
7527         return ret;
7528 }
7529
7530 static struct hclge_fd_rule *hclge_find_cls_flower(struct hclge_dev *hdev,
7531                                                    unsigned long cookie)
7532 {
7533         struct hclge_fd_rule *rule;
7534         struct hlist_node *node;
7535
7536         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7537                 if (rule->cls_flower.cookie == cookie)
7538                         return rule;
7539         }
7540
7541         return NULL;
7542 }
7543
7544 static int hclge_del_cls_flower(struct hnae3_handle *handle,
7545                                 struct flow_cls_offload *cls_flower)
7546 {
7547         struct hclge_vport *vport = hclge_get_vport(handle);
7548         struct hclge_dev *hdev = vport->back;
7549         struct hclge_fd_rule *rule;
7550         int ret;
7551
7552         spin_lock_bh(&hdev->fd_rule_lock);
7553
7554         rule = hclge_find_cls_flower(hdev, cls_flower->cookie);
7555         if (!rule) {
7556                 spin_unlock_bh(&hdev->fd_rule_lock);
7557                 return -EINVAL;
7558         }
7559
7560         ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, rule->location,
7561                                    NULL, false);
7562         if (ret) {
7563                 spin_unlock_bh(&hdev->fd_rule_lock);
7564                 return ret;
7565         }
7566
7567         hclge_update_fd_list(hdev, HCLGE_FD_DELETED, rule->location, NULL);
7568         spin_unlock_bh(&hdev->fd_rule_lock);
7569
7570         return 0;
7571 }
7572
7573 static void hclge_sync_fd_list(struct hclge_dev *hdev, struct hlist_head *hlist)
7574 {
7575         struct hclge_fd_rule *rule;
7576         struct hlist_node *node;
7577         int ret = 0;
7578
7579         if (!test_and_clear_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state))
7580                 return;
7581
7582         spin_lock_bh(&hdev->fd_rule_lock);
7583
7584         hlist_for_each_entry_safe(rule, node, hlist, rule_node) {
7585                 switch (rule->state) {
7586                 case HCLGE_FD_TO_ADD:
7587                         ret = hclge_fd_config_rule(hdev, rule);
7588                         if (ret)
7589                                 goto out;
7590                         rule->state = HCLGE_FD_ACTIVE;
7591                         break;
7592                 case HCLGE_FD_TO_DEL:
7593                         ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
7594                                                    rule->location, NULL, false);
7595                         if (ret)
7596                                 goto out;
7597                         hclge_fd_dec_rule_cnt(hdev, rule->location);
7598                         hclge_fd_free_node(hdev, rule);
7599                         break;
7600                 default:
7601                         break;
7602                 }
7603         }
7604
7605 out:
7606         if (ret)
7607                 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7608
7609         spin_unlock_bh(&hdev->fd_rule_lock);
7610 }
7611
7612 static void hclge_sync_fd_table(struct hclge_dev *hdev)
7613 {
7614         if (test_and_clear_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state)) {
7615                 bool clear_list = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
7616
7617                 hclge_clear_fd_rules_in_list(hdev, clear_list);
7618         }
7619
7620         hclge_sync_fd_user_def_cfg(hdev, false);
7621
7622         hclge_sync_fd_list(hdev, &hdev->fd_rule_list);
7623 }
7624
7625 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
7626 {
7627         struct hclge_vport *vport = hclge_get_vport(handle);
7628         struct hclge_dev *hdev = vport->back;
7629
7630         return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
7631                hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
7632 }
7633
7634 static bool hclge_get_cmdq_stat(struct hnae3_handle *handle)
7635 {
7636         struct hclge_vport *vport = hclge_get_vport(handle);
7637         struct hclge_dev *hdev = vport->back;
7638
7639         return test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
7640 }
7641
7642 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
7643 {
7644         struct hclge_vport *vport = hclge_get_vport(handle);
7645         struct hclge_dev *hdev = vport->back;
7646
7647         return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
7648 }
7649
7650 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
7651 {
7652         struct hclge_vport *vport = hclge_get_vport(handle);
7653         struct hclge_dev *hdev = vport->back;
7654
7655         return hdev->rst_stats.hw_reset_done_cnt;
7656 }
7657
7658 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
7659 {
7660         struct hclge_vport *vport = hclge_get_vport(handle);
7661         struct hclge_dev *hdev = vport->back;
7662
7663         hdev->fd_en = enable;
7664
7665         if (!enable)
7666                 set_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state);
7667         else
7668                 hclge_restore_fd_entries(handle);
7669
7670         hclge_task_schedule(hdev, 0);
7671 }
7672
7673 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
7674 {
7675         struct hclge_desc desc;
7676         struct hclge_config_mac_mode_cmd *req =
7677                 (struct hclge_config_mac_mode_cmd *)desc.data;
7678         u32 loop_en = 0;
7679         int ret;
7680
7681         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
7682
7683         if (enable) {
7684                 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
7685                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
7686                 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
7687                 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
7688                 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
7689                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
7690                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
7691                 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
7692                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
7693                 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
7694         }
7695
7696         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7697
7698         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7699         if (ret)
7700                 dev_err(&hdev->pdev->dev,
7701                         "mac enable fail, ret =%d.\n", ret);
7702 }
7703
7704 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
7705                                      u8 switch_param, u8 param_mask)
7706 {
7707         struct hclge_mac_vlan_switch_cmd *req;
7708         struct hclge_desc desc;
7709         u32 func_id;
7710         int ret;
7711
7712         func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
7713         req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
7714
7715         /* read current config parameter */
7716         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
7717                                    true);
7718         req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
7719         req->func_id = cpu_to_le32(func_id);
7720
7721         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7722         if (ret) {
7723                 dev_err(&hdev->pdev->dev,
7724                         "read mac vlan switch parameter fail, ret = %d\n", ret);
7725                 return ret;
7726         }
7727
7728         /* modify and write new config parameter */
7729         hclge_cmd_reuse_desc(&desc, false);
7730         req->switch_param = (req->switch_param & param_mask) | switch_param;
7731         req->param_mask = param_mask;
7732
7733         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7734         if (ret)
7735                 dev_err(&hdev->pdev->dev,
7736                         "set mac vlan switch parameter fail, ret = %d\n", ret);
7737         return ret;
7738 }
7739
7740 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
7741                                        int link_ret)
7742 {
7743 #define HCLGE_PHY_LINK_STATUS_NUM  200
7744
7745         struct phy_device *phydev = hdev->hw.mac.phydev;
7746         int i = 0;
7747         int ret;
7748
7749         do {
7750                 ret = phy_read_status(phydev);
7751                 if (ret) {
7752                         dev_err(&hdev->pdev->dev,
7753                                 "phy update link status fail, ret = %d\n", ret);
7754                         return;
7755                 }
7756
7757                 if (phydev->link == link_ret)
7758                         break;
7759
7760                 msleep(HCLGE_LINK_STATUS_MS);
7761         } while (++i < HCLGE_PHY_LINK_STATUS_NUM);
7762 }
7763
7764 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
7765 {
7766 #define HCLGE_MAC_LINK_STATUS_NUM  100
7767
7768         int link_status;
7769         int i = 0;
7770         int ret;
7771
7772         do {
7773                 ret = hclge_get_mac_link_status(hdev, &link_status);
7774                 if (ret)
7775                         return ret;
7776                 if (link_status == link_ret)
7777                         return 0;
7778
7779                 msleep(HCLGE_LINK_STATUS_MS);
7780         } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
7781         return -EBUSY;
7782 }
7783
7784 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
7785                                           bool is_phy)
7786 {
7787         int link_ret;
7788
7789         link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
7790
7791         if (is_phy)
7792                 hclge_phy_link_status_wait(hdev, link_ret);
7793
7794         return hclge_mac_link_status_wait(hdev, link_ret);
7795 }
7796
7797 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
7798 {
7799         struct hclge_config_mac_mode_cmd *req;
7800         struct hclge_desc desc;
7801         u32 loop_en;
7802         int ret;
7803
7804         req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
7805         /* 1 Read out the MAC mode config at first */
7806         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
7807         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7808         if (ret) {
7809                 dev_err(&hdev->pdev->dev,
7810                         "mac loopback get fail, ret =%d.\n", ret);
7811                 return ret;
7812         }
7813
7814         /* 2 Then setup the loopback flag */
7815         loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
7816         hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
7817
7818         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7819
7820         /* 3 Config mac work mode with loopback flag
7821          * and its original configure parameters
7822          */
7823         hclge_cmd_reuse_desc(&desc, false);
7824         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7825         if (ret)
7826                 dev_err(&hdev->pdev->dev,
7827                         "mac loopback set fail, ret =%d.\n", ret);
7828         return ret;
7829 }
7830
7831 static int hclge_cfg_common_loopback(struct hclge_dev *hdev, bool en,
7832                                      enum hnae3_loop loop_mode)
7833 {
7834 #define HCLGE_COMMON_LB_RETRY_MS        10
7835 #define HCLGE_COMMON_LB_RETRY_NUM       100
7836
7837         struct hclge_common_lb_cmd *req;
7838         struct hclge_desc desc;
7839         int ret, i = 0;
7840         u8 loop_mode_b;
7841
7842         req = (struct hclge_common_lb_cmd *)desc.data;
7843         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, false);
7844
7845         switch (loop_mode) {
7846         case HNAE3_LOOP_SERIAL_SERDES:
7847                 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
7848                 break;
7849         case HNAE3_LOOP_PARALLEL_SERDES:
7850                 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
7851                 break;
7852         case HNAE3_LOOP_PHY:
7853                 loop_mode_b = HCLGE_CMD_GE_PHY_INNER_LOOP_B;
7854                 break;
7855         default:
7856                 dev_err(&hdev->pdev->dev,
7857                         "unsupported common loopback mode %d\n", loop_mode);
7858                 return -ENOTSUPP;
7859         }
7860
7861         if (en) {
7862                 req->enable = loop_mode_b;
7863                 req->mask = loop_mode_b;
7864         } else {
7865                 req->mask = loop_mode_b;
7866         }
7867
7868         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7869         if (ret) {
7870                 dev_err(&hdev->pdev->dev,
7871                         "common loopback set fail, ret = %d\n", ret);
7872                 return ret;
7873         }
7874
7875         do {
7876                 msleep(HCLGE_COMMON_LB_RETRY_MS);
7877                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK,
7878                                            true);
7879                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7880                 if (ret) {
7881                         dev_err(&hdev->pdev->dev,
7882                                 "common loopback get, ret = %d\n", ret);
7883                         return ret;
7884                 }
7885         } while (++i < HCLGE_COMMON_LB_RETRY_NUM &&
7886                  !(req->result & HCLGE_CMD_COMMON_LB_DONE_B));
7887
7888         if (!(req->result & HCLGE_CMD_COMMON_LB_DONE_B)) {
7889                 dev_err(&hdev->pdev->dev, "common loopback set timeout\n");
7890                 return -EBUSY;
7891         } else if (!(req->result & HCLGE_CMD_COMMON_LB_SUCCESS_B)) {
7892                 dev_err(&hdev->pdev->dev, "common loopback set failed in fw\n");
7893                 return -EIO;
7894         }
7895         return ret;
7896 }
7897
7898 static int hclge_set_common_loopback(struct hclge_dev *hdev, bool en,
7899                                      enum hnae3_loop loop_mode)
7900 {
7901         int ret;
7902
7903         ret = hclge_cfg_common_loopback(hdev, en, loop_mode);
7904         if (ret)
7905                 return ret;
7906
7907         hclge_cfg_mac_mode(hdev, en);
7908
7909         ret = hclge_mac_phy_link_status_wait(hdev, en, false);
7910         if (ret)
7911                 dev_err(&hdev->pdev->dev,
7912                         "serdes loopback config mac mode timeout\n");
7913
7914         return ret;
7915 }
7916
7917 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
7918                                      struct phy_device *phydev)
7919 {
7920         int ret;
7921
7922         if (!phydev->suspended) {
7923                 ret = phy_suspend(phydev);
7924                 if (ret)
7925                         return ret;
7926         }
7927
7928         ret = phy_resume(phydev);
7929         if (ret)
7930                 return ret;
7931
7932         return phy_loopback(phydev, true);
7933 }
7934
7935 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
7936                                       struct phy_device *phydev)
7937 {
7938         int ret;
7939
7940         ret = phy_loopback(phydev, false);
7941         if (ret)
7942                 return ret;
7943
7944         return phy_suspend(phydev);
7945 }
7946
7947 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
7948 {
7949         struct phy_device *phydev = hdev->hw.mac.phydev;
7950         int ret;
7951
7952         if (!phydev) {
7953                 if (hnae3_dev_phy_imp_supported(hdev))
7954                         return hclge_set_common_loopback(hdev, en,
7955                                                          HNAE3_LOOP_PHY);
7956                 return -ENOTSUPP;
7957         }
7958
7959         if (en)
7960                 ret = hclge_enable_phy_loopback(hdev, phydev);
7961         else
7962                 ret = hclge_disable_phy_loopback(hdev, phydev);
7963         if (ret) {
7964                 dev_err(&hdev->pdev->dev,
7965                         "set phy loopback fail, ret = %d\n", ret);
7966                 return ret;
7967         }
7968
7969         hclge_cfg_mac_mode(hdev, en);
7970
7971         ret = hclge_mac_phy_link_status_wait(hdev, en, true);
7972         if (ret)
7973                 dev_err(&hdev->pdev->dev,
7974                         "phy loopback config mac mode timeout\n");
7975
7976         return ret;
7977 }
7978
7979 static int hclge_tqp_enable_cmd_send(struct hclge_dev *hdev, u16 tqp_id,
7980                                      u16 stream_id, bool enable)
7981 {
7982         struct hclge_desc desc;
7983         struct hclge_cfg_com_tqp_queue_cmd *req =
7984                 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
7985
7986         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
7987         req->tqp_id = cpu_to_le16(tqp_id);
7988         req->stream_id = cpu_to_le16(stream_id);
7989         if (enable)
7990                 req->enable |= 1U << HCLGE_TQP_ENABLE_B;
7991
7992         return hclge_cmd_send(&hdev->hw, &desc, 1);
7993 }
7994
7995 static int hclge_tqp_enable(struct hnae3_handle *handle, bool enable)
7996 {
7997         struct hclge_vport *vport = hclge_get_vport(handle);
7998         struct hclge_dev *hdev = vport->back;
7999         int ret;
8000         u16 i;
8001
8002         for (i = 0; i < handle->kinfo.num_tqps; i++) {
8003                 ret = hclge_tqp_enable_cmd_send(hdev, i, 0, enable);
8004                 if (ret)
8005                         return ret;
8006         }
8007         return 0;
8008 }
8009
8010 static int hclge_set_loopback(struct hnae3_handle *handle,
8011                               enum hnae3_loop loop_mode, bool en)
8012 {
8013         struct hclge_vport *vport = hclge_get_vport(handle);
8014         struct hclge_dev *hdev = vport->back;
8015         int ret;
8016
8017         /* Loopback can be enabled in three places: SSU, MAC, and serdes. By
8018          * default, SSU loopback is enabled, so if the SMAC and the DMAC are
8019          * the same, the packets are looped back in the SSU. If SSU loopback
8020          * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
8021          */
8022         if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
8023                 u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
8024
8025                 ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
8026                                                 HCLGE_SWITCH_ALW_LPBK_MASK);
8027                 if (ret)
8028                         return ret;
8029         }
8030
8031         switch (loop_mode) {
8032         case HNAE3_LOOP_APP:
8033                 ret = hclge_set_app_loopback(hdev, en);
8034                 break;
8035         case HNAE3_LOOP_SERIAL_SERDES:
8036         case HNAE3_LOOP_PARALLEL_SERDES:
8037                 ret = hclge_set_common_loopback(hdev, en, loop_mode);
8038                 break;
8039         case HNAE3_LOOP_PHY:
8040                 ret = hclge_set_phy_loopback(hdev, en);
8041                 break;
8042         default:
8043                 ret = -ENOTSUPP;
8044                 dev_err(&hdev->pdev->dev,
8045                         "loop_mode %d is not supported\n", loop_mode);
8046                 break;
8047         }
8048
8049         if (ret)
8050                 return ret;
8051
8052         ret = hclge_tqp_enable(handle, en);
8053         if (ret)
8054                 dev_err(&hdev->pdev->dev, "failed to %s tqp in loopback, ret = %d\n",
8055                         en ? "enable" : "disable", ret);
8056
8057         return ret;
8058 }
8059
8060 static int hclge_set_default_loopback(struct hclge_dev *hdev)
8061 {
8062         int ret;
8063
8064         ret = hclge_set_app_loopback(hdev, false);
8065         if (ret)
8066                 return ret;
8067
8068         ret = hclge_cfg_common_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
8069         if (ret)
8070                 return ret;
8071
8072         return hclge_cfg_common_loopback(hdev, false,
8073                                          HNAE3_LOOP_PARALLEL_SERDES);
8074 }
8075
8076 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
8077 {
8078         struct hclge_vport *vport = hclge_get_vport(handle);
8079         struct hnae3_knic_private_info *kinfo;
8080         struct hnae3_queue *queue;
8081         struct hclge_tqp *tqp;
8082         int i;
8083
8084         kinfo = &vport->nic.kinfo;
8085         for (i = 0; i < kinfo->num_tqps; i++) {
8086                 queue = handle->kinfo.tqp[i];
8087                 tqp = container_of(queue, struct hclge_tqp, q);
8088                 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
8089         }
8090 }
8091
8092 static void hclge_flush_link_update(struct hclge_dev *hdev)
8093 {
8094 #define HCLGE_FLUSH_LINK_TIMEOUT        100000
8095
8096         unsigned long last = hdev->serv_processed_cnt;
8097         int i = 0;
8098
8099         while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
8100                i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
8101                last == hdev->serv_processed_cnt)
8102                 usleep_range(1, 1);
8103 }
8104
8105 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
8106 {
8107         struct hclge_vport *vport = hclge_get_vport(handle);
8108         struct hclge_dev *hdev = vport->back;
8109
8110         if (enable) {
8111                 hclge_task_schedule(hdev, 0);
8112         } else {
8113                 /* Set the DOWN flag here to disable link updating */
8114                 set_bit(HCLGE_STATE_DOWN, &hdev->state);
8115
8116                 /* flush memory to make sure DOWN is seen by service task */
8117                 smp_mb__before_atomic();
8118                 hclge_flush_link_update(hdev);
8119         }
8120 }
8121
8122 static int hclge_ae_start(struct hnae3_handle *handle)
8123 {
8124         struct hclge_vport *vport = hclge_get_vport(handle);
8125         struct hclge_dev *hdev = vport->back;
8126
8127         /* mac enable */
8128         hclge_cfg_mac_mode(hdev, true);
8129         clear_bit(HCLGE_STATE_DOWN, &hdev->state);
8130         hdev->hw.mac.link = 0;
8131
8132         /* reset tqp stats */
8133         hclge_reset_tqp_stats(handle);
8134
8135         hclge_mac_start_phy(hdev);
8136
8137         return 0;
8138 }
8139
8140 static void hclge_ae_stop(struct hnae3_handle *handle)
8141 {
8142         struct hclge_vport *vport = hclge_get_vport(handle);
8143         struct hclge_dev *hdev = vport->back;
8144
8145         set_bit(HCLGE_STATE_DOWN, &hdev->state);
8146         spin_lock_bh(&hdev->fd_rule_lock);
8147         hclge_clear_arfs_rules(hdev);
8148         spin_unlock_bh(&hdev->fd_rule_lock);
8149
8150         /* If it is not PF reset or FLR, the firmware will disable the MAC,
8151          * so it only need to stop phy here.
8152          */
8153         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
8154             hdev->reset_type != HNAE3_FUNC_RESET &&
8155             hdev->reset_type != HNAE3_FLR_RESET) {
8156                 hclge_mac_stop_phy(hdev);
8157                 hclge_update_link_status(hdev);
8158                 return;
8159         }
8160
8161         hclge_reset_tqp(handle);
8162
8163         hclge_config_mac_tnl_int(hdev, false);
8164
8165         /* Mac disable */
8166         hclge_cfg_mac_mode(hdev, false);
8167
8168         hclge_mac_stop_phy(hdev);
8169
8170         /* reset tqp stats */
8171         hclge_reset_tqp_stats(handle);
8172         hclge_update_link_status(hdev);
8173 }
8174
8175 int hclge_vport_start(struct hclge_vport *vport)
8176 {
8177         struct hclge_dev *hdev = vport->back;
8178
8179         set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
8180         set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
8181         vport->last_active_jiffies = jiffies;
8182
8183         if (test_bit(vport->vport_id, hdev->vport_config_block)) {
8184                 if (vport->vport_id) {
8185                         hclge_restore_mac_table_common(vport);
8186                         hclge_restore_vport_vlan_table(vport);
8187                 } else {
8188                         hclge_restore_hw_table(hdev);
8189                 }
8190         }
8191
8192         clear_bit(vport->vport_id, hdev->vport_config_block);
8193
8194         return 0;
8195 }
8196
8197 void hclge_vport_stop(struct hclge_vport *vport)
8198 {
8199         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
8200 }
8201
8202 static int hclge_client_start(struct hnae3_handle *handle)
8203 {
8204         struct hclge_vport *vport = hclge_get_vport(handle);
8205
8206         return hclge_vport_start(vport);
8207 }
8208
8209 static void hclge_client_stop(struct hnae3_handle *handle)
8210 {
8211         struct hclge_vport *vport = hclge_get_vport(handle);
8212
8213         hclge_vport_stop(vport);
8214 }
8215
8216 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
8217                                          u16 cmdq_resp, u8  resp_code,
8218                                          enum hclge_mac_vlan_tbl_opcode op)
8219 {
8220         struct hclge_dev *hdev = vport->back;
8221
8222         if (cmdq_resp) {
8223                 dev_err(&hdev->pdev->dev,
8224                         "cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
8225                         cmdq_resp);
8226                 return -EIO;
8227         }
8228
8229         if (op == HCLGE_MAC_VLAN_ADD) {
8230                 if (!resp_code || resp_code == 1)
8231                         return 0;
8232                 else if (resp_code == HCLGE_ADD_UC_OVERFLOW ||
8233                          resp_code == HCLGE_ADD_MC_OVERFLOW)
8234                         return -ENOSPC;
8235
8236                 dev_err(&hdev->pdev->dev,
8237                         "add mac addr failed for undefined, code=%u.\n",
8238                         resp_code);
8239                 return -EIO;
8240         } else if (op == HCLGE_MAC_VLAN_REMOVE) {
8241                 if (!resp_code) {
8242                         return 0;
8243                 } else if (resp_code == 1) {
8244                         dev_dbg(&hdev->pdev->dev,
8245                                 "remove mac addr failed for miss.\n");
8246                         return -ENOENT;
8247                 }
8248
8249                 dev_err(&hdev->pdev->dev,
8250                         "remove mac addr failed for undefined, code=%u.\n",
8251                         resp_code);
8252                 return -EIO;
8253         } else if (op == HCLGE_MAC_VLAN_LKUP) {
8254                 if (!resp_code) {
8255                         return 0;
8256                 } else if (resp_code == 1) {
8257                         dev_dbg(&hdev->pdev->dev,
8258                                 "lookup mac addr failed for miss.\n");
8259                         return -ENOENT;
8260                 }
8261
8262                 dev_err(&hdev->pdev->dev,
8263                         "lookup mac addr failed for undefined, code=%u.\n",
8264                         resp_code);
8265                 return -EIO;
8266         }
8267
8268         dev_err(&hdev->pdev->dev,
8269                 "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
8270
8271         return -EINVAL;
8272 }
8273
8274 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
8275 {
8276 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
8277
8278         unsigned int word_num;
8279         unsigned int bit_num;
8280
8281         if (vfid > 255 || vfid < 0)
8282                 return -EIO;
8283
8284         if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
8285                 word_num = vfid / 32;
8286                 bit_num  = vfid % 32;
8287                 if (clr)
8288                         desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
8289                 else
8290                         desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
8291         } else {
8292                 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
8293                 bit_num  = vfid % 32;
8294                 if (clr)
8295                         desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
8296                 else
8297                         desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
8298         }
8299
8300         return 0;
8301 }
8302
8303 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
8304 {
8305 #define HCLGE_DESC_NUMBER 3
8306 #define HCLGE_FUNC_NUMBER_PER_DESC 6
8307         int i, j;
8308
8309         for (i = 1; i < HCLGE_DESC_NUMBER; i++)
8310                 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
8311                         if (desc[i].data[j])
8312                                 return false;
8313
8314         return true;
8315 }
8316
8317 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
8318                                    const u8 *addr, bool is_mc)
8319 {
8320         const unsigned char *mac_addr = addr;
8321         u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
8322                        (mac_addr[0]) | (mac_addr[1] << 8);
8323         u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
8324
8325         hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
8326         if (is_mc) {
8327                 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
8328                 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
8329         }
8330
8331         new_req->mac_addr_hi32 = cpu_to_le32(high_val);
8332         new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
8333 }
8334
8335 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
8336                                      struct hclge_mac_vlan_tbl_entry_cmd *req)
8337 {
8338         struct hclge_dev *hdev = vport->back;
8339         struct hclge_desc desc;
8340         u8 resp_code;
8341         u16 retval;
8342         int ret;
8343
8344         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
8345
8346         memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8347
8348         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8349         if (ret) {
8350                 dev_err(&hdev->pdev->dev,
8351                         "del mac addr failed for cmd_send, ret =%d.\n",
8352                         ret);
8353                 return ret;
8354         }
8355         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8356         retval = le16_to_cpu(desc.retval);
8357
8358         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
8359                                              HCLGE_MAC_VLAN_REMOVE);
8360 }
8361
8362 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
8363                                      struct hclge_mac_vlan_tbl_entry_cmd *req,
8364                                      struct hclge_desc *desc,
8365                                      bool is_mc)
8366 {
8367         struct hclge_dev *hdev = vport->back;
8368         u8 resp_code;
8369         u16 retval;
8370         int ret;
8371
8372         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
8373         if (is_mc) {
8374                 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8375                 memcpy(desc[0].data,
8376                        req,
8377                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8378                 hclge_cmd_setup_basic_desc(&desc[1],
8379                                            HCLGE_OPC_MAC_VLAN_ADD,
8380                                            true);
8381                 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8382                 hclge_cmd_setup_basic_desc(&desc[2],
8383                                            HCLGE_OPC_MAC_VLAN_ADD,
8384                                            true);
8385                 ret = hclge_cmd_send(&hdev->hw, desc, 3);
8386         } else {
8387                 memcpy(desc[0].data,
8388                        req,
8389                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8390                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
8391         }
8392         if (ret) {
8393                 dev_err(&hdev->pdev->dev,
8394                         "lookup mac addr failed for cmd_send, ret =%d.\n",
8395                         ret);
8396                 return ret;
8397         }
8398         resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
8399         retval = le16_to_cpu(desc[0].retval);
8400
8401         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
8402                                              HCLGE_MAC_VLAN_LKUP);
8403 }
8404
8405 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
8406                                   struct hclge_mac_vlan_tbl_entry_cmd *req,
8407                                   struct hclge_desc *mc_desc)
8408 {
8409         struct hclge_dev *hdev = vport->back;
8410         int cfg_status;
8411         u8 resp_code;
8412         u16 retval;
8413         int ret;
8414
8415         if (!mc_desc) {
8416                 struct hclge_desc desc;
8417
8418                 hclge_cmd_setup_basic_desc(&desc,
8419                                            HCLGE_OPC_MAC_VLAN_ADD,
8420                                            false);
8421                 memcpy(desc.data, req,
8422                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8423                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8424                 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8425                 retval = le16_to_cpu(desc.retval);
8426
8427                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
8428                                                            resp_code,
8429                                                            HCLGE_MAC_VLAN_ADD);
8430         } else {
8431                 hclge_cmd_reuse_desc(&mc_desc[0], false);
8432                 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8433                 hclge_cmd_reuse_desc(&mc_desc[1], false);
8434                 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8435                 hclge_cmd_reuse_desc(&mc_desc[2], false);
8436                 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
8437                 memcpy(mc_desc[0].data, req,
8438                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8439                 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
8440                 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
8441                 retval = le16_to_cpu(mc_desc[0].retval);
8442
8443                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
8444                                                            resp_code,
8445                                                            HCLGE_MAC_VLAN_ADD);
8446         }
8447
8448         if (ret) {
8449                 dev_err(&hdev->pdev->dev,
8450                         "add mac addr failed for cmd_send, ret =%d.\n",
8451                         ret);
8452                 return ret;
8453         }
8454
8455         return cfg_status;
8456 }
8457
8458 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
8459                                u16 *allocated_size)
8460 {
8461         struct hclge_umv_spc_alc_cmd *req;
8462         struct hclge_desc desc;
8463         int ret;
8464
8465         req = (struct hclge_umv_spc_alc_cmd *)desc.data;
8466         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
8467
8468         req->space_size = cpu_to_le32(space_size);
8469
8470         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8471         if (ret) {
8472                 dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n",
8473                         ret);
8474                 return ret;
8475         }
8476
8477         *allocated_size = le32_to_cpu(desc.data[1]);
8478
8479         return 0;
8480 }
8481
8482 static int hclge_init_umv_space(struct hclge_dev *hdev)
8483 {
8484         u16 allocated_size = 0;
8485         int ret;
8486
8487         ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size);
8488         if (ret)
8489                 return ret;
8490
8491         if (allocated_size < hdev->wanted_umv_size)
8492                 dev_warn(&hdev->pdev->dev,
8493                          "failed to alloc umv space, want %u, get %u\n",
8494                          hdev->wanted_umv_size, allocated_size);
8495
8496         hdev->max_umv_size = allocated_size;
8497         hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1);
8498         hdev->share_umv_size = hdev->priv_umv_size +
8499                         hdev->max_umv_size % (hdev->num_alloc_vport + 1);
8500
8501         return 0;
8502 }
8503
8504 static void hclge_reset_umv_space(struct hclge_dev *hdev)
8505 {
8506         struct hclge_vport *vport;
8507         int i;
8508
8509         for (i = 0; i < hdev->num_alloc_vport; i++) {
8510                 vport = &hdev->vport[i];
8511                 vport->used_umv_num = 0;
8512         }
8513
8514         mutex_lock(&hdev->vport_lock);
8515         hdev->share_umv_size = hdev->priv_umv_size +
8516                         hdev->max_umv_size % (hdev->num_alloc_vport + 1);
8517         mutex_unlock(&hdev->vport_lock);
8518 }
8519
8520 static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock)
8521 {
8522         struct hclge_dev *hdev = vport->back;
8523         bool is_full;
8524
8525         if (need_lock)
8526                 mutex_lock(&hdev->vport_lock);
8527
8528         is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
8529                    hdev->share_umv_size == 0);
8530
8531         if (need_lock)
8532                 mutex_unlock(&hdev->vport_lock);
8533
8534         return is_full;
8535 }
8536
8537 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
8538 {
8539         struct hclge_dev *hdev = vport->back;
8540
8541         if (is_free) {
8542                 if (vport->used_umv_num > hdev->priv_umv_size)
8543                         hdev->share_umv_size++;
8544
8545                 if (vport->used_umv_num > 0)
8546                         vport->used_umv_num--;
8547         } else {
8548                 if (vport->used_umv_num >= hdev->priv_umv_size &&
8549                     hdev->share_umv_size > 0)
8550                         hdev->share_umv_size--;
8551                 vport->used_umv_num++;
8552         }
8553 }
8554
8555 static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list,
8556                                                   const u8 *mac_addr)
8557 {
8558         struct hclge_mac_node *mac_node, *tmp;
8559
8560         list_for_each_entry_safe(mac_node, tmp, list, node)
8561                 if (ether_addr_equal(mac_addr, mac_node->mac_addr))
8562                         return mac_node;
8563
8564         return NULL;
8565 }
8566
8567 static void hclge_update_mac_node(struct hclge_mac_node *mac_node,
8568                                   enum HCLGE_MAC_NODE_STATE state)
8569 {
8570         switch (state) {
8571         /* from set_rx_mode or tmp_add_list */
8572         case HCLGE_MAC_TO_ADD:
8573                 if (mac_node->state == HCLGE_MAC_TO_DEL)
8574                         mac_node->state = HCLGE_MAC_ACTIVE;
8575                 break;
8576         /* only from set_rx_mode */
8577         case HCLGE_MAC_TO_DEL:
8578                 if (mac_node->state == HCLGE_MAC_TO_ADD) {
8579                         list_del(&mac_node->node);
8580                         kfree(mac_node);
8581                 } else {
8582                         mac_node->state = HCLGE_MAC_TO_DEL;
8583                 }
8584                 break;
8585         /* only from tmp_add_list, the mac_node->state won't be
8586          * ACTIVE.
8587          */
8588         case HCLGE_MAC_ACTIVE:
8589                 if (mac_node->state == HCLGE_MAC_TO_ADD)
8590                         mac_node->state = HCLGE_MAC_ACTIVE;
8591
8592                 break;
8593         }
8594 }
8595
8596 int hclge_update_mac_list(struct hclge_vport *vport,
8597                           enum HCLGE_MAC_NODE_STATE state,
8598                           enum HCLGE_MAC_ADDR_TYPE mac_type,
8599                           const unsigned char *addr)
8600 {
8601         struct hclge_dev *hdev = vport->back;
8602         struct hclge_mac_node *mac_node;
8603         struct list_head *list;
8604
8605         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8606                 &vport->uc_mac_list : &vport->mc_mac_list;
8607
8608         spin_lock_bh(&vport->mac_list_lock);
8609
8610         /* if the mac addr is already in the mac list, no need to add a new
8611          * one into it, just check the mac addr state, convert it to a new
8612          * state, or just remove it, or do nothing.
8613          */
8614         mac_node = hclge_find_mac_node(list, addr);
8615         if (mac_node) {
8616                 hclge_update_mac_node(mac_node, state);
8617                 spin_unlock_bh(&vport->mac_list_lock);
8618                 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8619                 return 0;
8620         }
8621
8622         /* if this address is never added, unnecessary to delete */
8623         if (state == HCLGE_MAC_TO_DEL) {
8624                 spin_unlock_bh(&vport->mac_list_lock);
8625                 dev_err(&hdev->pdev->dev,
8626                         "failed to delete address %pM from mac list\n",
8627                         addr);
8628                 return -ENOENT;
8629         }
8630
8631         mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
8632         if (!mac_node) {
8633                 spin_unlock_bh(&vport->mac_list_lock);
8634                 return -ENOMEM;
8635         }
8636
8637         set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8638
8639         mac_node->state = state;
8640         ether_addr_copy(mac_node->mac_addr, addr);
8641         list_add_tail(&mac_node->node, list);
8642
8643         spin_unlock_bh(&vport->mac_list_lock);
8644
8645         return 0;
8646 }
8647
8648 static int hclge_add_uc_addr(struct hnae3_handle *handle,
8649                              const unsigned char *addr)
8650 {
8651         struct hclge_vport *vport = hclge_get_vport(handle);
8652
8653         return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_UC,
8654                                      addr);
8655 }
8656
8657 int hclge_add_uc_addr_common(struct hclge_vport *vport,
8658                              const unsigned char *addr)
8659 {
8660         struct hclge_dev *hdev = vport->back;
8661         struct hclge_mac_vlan_tbl_entry_cmd req;
8662         struct hclge_desc desc;
8663         u16 egress_port = 0;
8664         int ret;
8665
8666         /* mac addr check */
8667         if (is_zero_ether_addr(addr) ||
8668             is_broadcast_ether_addr(addr) ||
8669             is_multicast_ether_addr(addr)) {
8670                 dev_err(&hdev->pdev->dev,
8671                         "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
8672                          addr, is_zero_ether_addr(addr),
8673                          is_broadcast_ether_addr(addr),
8674                          is_multicast_ether_addr(addr));
8675                 return -EINVAL;
8676         }
8677
8678         memset(&req, 0, sizeof(req));
8679
8680         hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
8681                         HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
8682
8683         req.egress_port = cpu_to_le16(egress_port);
8684
8685         hclge_prepare_mac_addr(&req, addr, false);
8686
8687         /* Lookup the mac address in the mac_vlan table, and add
8688          * it if the entry is inexistent. Repeated unicast entry
8689          * is not allowed in the mac vlan table.
8690          */
8691         ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
8692         if (ret == -ENOENT) {
8693                 mutex_lock(&hdev->vport_lock);
8694                 if (!hclge_is_umv_space_full(vport, false)) {
8695                         ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
8696                         if (!ret)
8697                                 hclge_update_umv_space(vport, false);
8698                         mutex_unlock(&hdev->vport_lock);
8699                         return ret;
8700                 }
8701                 mutex_unlock(&hdev->vport_lock);
8702
8703                 if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE))
8704                         dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
8705                                 hdev->priv_umv_size);
8706
8707                 return -ENOSPC;
8708         }
8709
8710         /* check if we just hit the duplicate */
8711         if (!ret)
8712                 return -EEXIST;
8713
8714         return ret;
8715 }
8716
8717 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
8718                             const unsigned char *addr)
8719 {
8720         struct hclge_vport *vport = hclge_get_vport(handle);
8721
8722         return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_UC,
8723                                      addr);
8724 }
8725
8726 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
8727                             const unsigned char *addr)
8728 {
8729         struct hclge_dev *hdev = vport->back;
8730         struct hclge_mac_vlan_tbl_entry_cmd req;
8731         int ret;
8732
8733         /* mac addr check */
8734         if (is_zero_ether_addr(addr) ||
8735             is_broadcast_ether_addr(addr) ||
8736             is_multicast_ether_addr(addr)) {
8737                 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
8738                         addr);
8739                 return -EINVAL;
8740         }
8741
8742         memset(&req, 0, sizeof(req));
8743         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
8744         hclge_prepare_mac_addr(&req, addr, false);
8745         ret = hclge_remove_mac_vlan_tbl(vport, &req);
8746         if (!ret) {
8747                 mutex_lock(&hdev->vport_lock);
8748                 hclge_update_umv_space(vport, true);
8749                 mutex_unlock(&hdev->vport_lock);
8750         } else if (ret == -ENOENT) {
8751                 ret = 0;
8752         }
8753
8754         return ret;
8755 }
8756
8757 static int hclge_add_mc_addr(struct hnae3_handle *handle,
8758                              const unsigned char *addr)
8759 {
8760         struct hclge_vport *vport = hclge_get_vport(handle);
8761
8762         return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_MC,
8763                                      addr);
8764 }
8765
8766 int hclge_add_mc_addr_common(struct hclge_vport *vport,
8767                              const unsigned char *addr)
8768 {
8769         struct hclge_dev *hdev = vport->back;
8770         struct hclge_mac_vlan_tbl_entry_cmd req;
8771         struct hclge_desc desc[3];
8772         int status;
8773
8774         /* mac addr check */
8775         if (!is_multicast_ether_addr(addr)) {
8776                 dev_err(&hdev->pdev->dev,
8777                         "Add mc mac err! invalid mac:%pM.\n",
8778                          addr);
8779                 return -EINVAL;
8780         }
8781         memset(&req, 0, sizeof(req));
8782         hclge_prepare_mac_addr(&req, addr, true);
8783         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8784         if (status) {
8785                 /* This mac addr do not exist, add new entry for it */
8786                 memset(desc[0].data, 0, sizeof(desc[0].data));
8787                 memset(desc[1].data, 0, sizeof(desc[0].data));
8788                 memset(desc[2].data, 0, sizeof(desc[0].data));
8789         }
8790         status = hclge_update_desc_vfid(desc, vport->vport_id, false);
8791         if (status)
8792                 return status;
8793         status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8794         /* if already overflow, not to print each time */
8795         if (status == -ENOSPC &&
8796             !(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE))
8797                 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
8798
8799         return status;
8800 }
8801
8802 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
8803                             const unsigned char *addr)
8804 {
8805         struct hclge_vport *vport = hclge_get_vport(handle);
8806
8807         return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_MC,
8808                                      addr);
8809 }
8810
8811 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
8812                             const unsigned char *addr)
8813 {
8814         struct hclge_dev *hdev = vport->back;
8815         struct hclge_mac_vlan_tbl_entry_cmd req;
8816         enum hclge_cmd_status status;
8817         struct hclge_desc desc[3];
8818
8819         /* mac addr check */
8820         if (!is_multicast_ether_addr(addr)) {
8821                 dev_dbg(&hdev->pdev->dev,
8822                         "Remove mc mac err! invalid mac:%pM.\n",
8823                          addr);
8824                 return -EINVAL;
8825         }
8826
8827         memset(&req, 0, sizeof(req));
8828         hclge_prepare_mac_addr(&req, addr, true);
8829         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8830         if (!status) {
8831                 /* This mac addr exist, remove this handle's VFID for it */
8832                 status = hclge_update_desc_vfid(desc, vport->vport_id, true);
8833                 if (status)
8834                         return status;
8835
8836                 if (hclge_is_all_function_id_zero(desc))
8837                         /* All the vfid is zero, so need to delete this entry */
8838                         status = hclge_remove_mac_vlan_tbl(vport, &req);
8839                 else
8840                         /* Not all the vfid is zero, update the vfid */
8841                         status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8842         } else if (status == -ENOENT) {
8843                 status = 0;
8844         }
8845
8846         return status;
8847 }
8848
8849 static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
8850                                       struct list_head *list,
8851                                       int (*sync)(struct hclge_vport *,
8852                                                   const unsigned char *))
8853 {
8854         struct hclge_mac_node *mac_node, *tmp;
8855         int ret;
8856
8857         list_for_each_entry_safe(mac_node, tmp, list, node) {
8858                 ret = sync(vport, mac_node->mac_addr);
8859                 if (!ret) {
8860                         mac_node->state = HCLGE_MAC_ACTIVE;
8861                 } else {
8862                         set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8863                                 &vport->state);
8864
8865                         /* If one unicast mac address is existing in hardware,
8866                          * we need to try whether other unicast mac addresses
8867                          * are new addresses that can be added.
8868                          */
8869                         if (ret != -EEXIST)
8870                                 break;
8871                 }
8872         }
8873 }
8874
8875 static void hclge_unsync_vport_mac_list(struct hclge_vport *vport,
8876                                         struct list_head *list,
8877                                         int (*unsync)(struct hclge_vport *,
8878                                                       const unsigned char *))
8879 {
8880         struct hclge_mac_node *mac_node, *tmp;
8881         int ret;
8882
8883         list_for_each_entry_safe(mac_node, tmp, list, node) {
8884                 ret = unsync(vport, mac_node->mac_addr);
8885                 if (!ret || ret == -ENOENT) {
8886                         list_del(&mac_node->node);
8887                         kfree(mac_node);
8888                 } else {
8889                         set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8890                                 &vport->state);
8891                         break;
8892                 }
8893         }
8894 }
8895
8896 static bool hclge_sync_from_add_list(struct list_head *add_list,
8897                                      struct list_head *mac_list)
8898 {
8899         struct hclge_mac_node *mac_node, *tmp, *new_node;
8900         bool all_added = true;
8901
8902         list_for_each_entry_safe(mac_node, tmp, add_list, node) {
8903                 if (mac_node->state == HCLGE_MAC_TO_ADD)
8904                         all_added = false;
8905
8906                 /* if the mac address from tmp_add_list is not in the
8907                  * uc/mc_mac_list, it means have received a TO_DEL request
8908                  * during the time window of adding the mac address into mac
8909                  * table. if mac_node state is ACTIVE, then change it to TO_DEL,
8910                  * then it will be removed at next time. else it must be TO_ADD,
8911                  * this address hasn't been added into mac table,
8912                  * so just remove the mac node.
8913                  */
8914                 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8915                 if (new_node) {
8916                         hclge_update_mac_node(new_node, mac_node->state);
8917                         list_del(&mac_node->node);
8918                         kfree(mac_node);
8919                 } else if (mac_node->state == HCLGE_MAC_ACTIVE) {
8920                         mac_node->state = HCLGE_MAC_TO_DEL;
8921                         list_move_tail(&mac_node->node, mac_list);
8922                 } else {
8923                         list_del(&mac_node->node);
8924                         kfree(mac_node);
8925                 }
8926         }
8927
8928         return all_added;
8929 }
8930
8931 static void hclge_sync_from_del_list(struct list_head *del_list,
8932                                      struct list_head *mac_list)
8933 {
8934         struct hclge_mac_node *mac_node, *tmp, *new_node;
8935
8936         list_for_each_entry_safe(mac_node, tmp, del_list, node) {
8937                 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8938                 if (new_node) {
8939                         /* If the mac addr exists in the mac list, it means
8940                          * received a new TO_ADD request during the time window
8941                          * of configuring the mac address. For the mac node
8942                          * state is TO_ADD, and the address is already in the
8943                          * in the hardware(due to delete fail), so we just need
8944                          * to change the mac node state to ACTIVE.
8945                          */
8946                         new_node->state = HCLGE_MAC_ACTIVE;
8947                         list_del(&mac_node->node);
8948                         kfree(mac_node);
8949                 } else {
8950                         list_move_tail(&mac_node->node, mac_list);
8951                 }
8952         }
8953 }
8954
8955 static void hclge_update_overflow_flags(struct hclge_vport *vport,
8956                                         enum HCLGE_MAC_ADDR_TYPE mac_type,
8957                                         bool is_all_added)
8958 {
8959         if (mac_type == HCLGE_MAC_ADDR_UC) {
8960                 if (is_all_added)
8961                         vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE;
8962                 else
8963                         vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE;
8964         } else {
8965                 if (is_all_added)
8966                         vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE;
8967                 else
8968                         vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
8969         }
8970 }
8971
8972 static void hclge_sync_vport_mac_table(struct hclge_vport *vport,
8973                                        enum HCLGE_MAC_ADDR_TYPE mac_type)
8974 {
8975         struct hclge_mac_node *mac_node, *tmp, *new_node;
8976         struct list_head tmp_add_list, tmp_del_list;
8977         struct list_head *list;
8978         bool all_added;
8979
8980         INIT_LIST_HEAD(&tmp_add_list);
8981         INIT_LIST_HEAD(&tmp_del_list);
8982
8983         /* move the mac addr to the tmp_add_list and tmp_del_list, then
8984          * we can add/delete these mac addr outside the spin lock
8985          */
8986         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8987                 &vport->uc_mac_list : &vport->mc_mac_list;
8988
8989         spin_lock_bh(&vport->mac_list_lock);
8990
8991         list_for_each_entry_safe(mac_node, tmp, list, node) {
8992                 switch (mac_node->state) {
8993                 case HCLGE_MAC_TO_DEL:
8994                         list_move_tail(&mac_node->node, &tmp_del_list);
8995                         break;
8996                 case HCLGE_MAC_TO_ADD:
8997                         new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
8998                         if (!new_node)
8999                                 goto stop_traverse;
9000                         ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
9001                         new_node->state = mac_node->state;
9002                         list_add_tail(&new_node->node, &tmp_add_list);
9003                         break;
9004                 default:
9005                         break;
9006                 }
9007         }
9008
9009 stop_traverse:
9010         spin_unlock_bh(&vport->mac_list_lock);
9011
9012         /* delete first, in order to get max mac table space for adding */
9013         if (mac_type == HCLGE_MAC_ADDR_UC) {
9014                 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
9015                                             hclge_rm_uc_addr_common);
9016                 hclge_sync_vport_mac_list(vport, &tmp_add_list,
9017                                           hclge_add_uc_addr_common);
9018         } else {
9019                 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
9020                                             hclge_rm_mc_addr_common);
9021                 hclge_sync_vport_mac_list(vport, &tmp_add_list,
9022                                           hclge_add_mc_addr_common);
9023         }
9024
9025         /* if some mac addresses were added/deleted fail, move back to the
9026          * mac_list, and retry at next time.
9027          */
9028         spin_lock_bh(&vport->mac_list_lock);
9029
9030         hclge_sync_from_del_list(&tmp_del_list, list);
9031         all_added = hclge_sync_from_add_list(&tmp_add_list, list);
9032
9033         spin_unlock_bh(&vport->mac_list_lock);
9034
9035         hclge_update_overflow_flags(vport, mac_type, all_added);
9036 }
9037
9038 static bool hclge_need_sync_mac_table(struct hclge_vport *vport)
9039 {
9040         struct hclge_dev *hdev = vport->back;
9041
9042         if (test_bit(vport->vport_id, hdev->vport_config_block))
9043                 return false;
9044
9045         if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state))
9046                 return true;
9047
9048         return false;
9049 }
9050
9051 static void hclge_sync_mac_table(struct hclge_dev *hdev)
9052 {
9053         int i;
9054
9055         for (i = 0; i < hdev->num_alloc_vport; i++) {
9056                 struct hclge_vport *vport = &hdev->vport[i];
9057
9058                 if (!hclge_need_sync_mac_table(vport))
9059                         continue;
9060
9061                 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_UC);
9062                 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_MC);
9063         }
9064 }
9065
9066 static void hclge_build_del_list(struct list_head *list,
9067                                  bool is_del_list,
9068                                  struct list_head *tmp_del_list)
9069 {
9070         struct hclge_mac_node *mac_cfg, *tmp;
9071
9072         list_for_each_entry_safe(mac_cfg, tmp, list, node) {
9073                 switch (mac_cfg->state) {
9074                 case HCLGE_MAC_TO_DEL:
9075                 case HCLGE_MAC_ACTIVE:
9076                         list_move_tail(&mac_cfg->node, tmp_del_list);
9077                         break;
9078                 case HCLGE_MAC_TO_ADD:
9079                         if (is_del_list) {
9080                                 list_del(&mac_cfg->node);
9081                                 kfree(mac_cfg);
9082                         }
9083                         break;
9084                 }
9085         }
9086 }
9087
9088 static void hclge_unsync_del_list(struct hclge_vport *vport,
9089                                   int (*unsync)(struct hclge_vport *vport,
9090                                                 const unsigned char *addr),
9091                                   bool is_del_list,
9092                                   struct list_head *tmp_del_list)
9093 {
9094         struct hclge_mac_node *mac_cfg, *tmp;
9095         int ret;
9096
9097         list_for_each_entry_safe(mac_cfg, tmp, tmp_del_list, node) {
9098                 ret = unsync(vport, mac_cfg->mac_addr);
9099                 if (!ret || ret == -ENOENT) {
9100                         /* clear all mac addr from hardware, but remain these
9101                          * mac addr in the mac list, and restore them after
9102                          * vf reset finished.
9103                          */
9104                         if (!is_del_list &&
9105                             mac_cfg->state == HCLGE_MAC_ACTIVE) {
9106                                 mac_cfg->state = HCLGE_MAC_TO_ADD;
9107                         } else {
9108                                 list_del(&mac_cfg->node);
9109                                 kfree(mac_cfg);
9110                         }
9111                 } else if (is_del_list) {
9112                         mac_cfg->state = HCLGE_MAC_TO_DEL;
9113                 }
9114         }
9115 }
9116
9117 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
9118                                   enum HCLGE_MAC_ADDR_TYPE mac_type)
9119 {
9120         int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
9121         struct hclge_dev *hdev = vport->back;
9122         struct list_head tmp_del_list, *list;
9123
9124         if (mac_type == HCLGE_MAC_ADDR_UC) {
9125                 list = &vport->uc_mac_list;
9126                 unsync = hclge_rm_uc_addr_common;
9127         } else {
9128                 list = &vport->mc_mac_list;
9129                 unsync = hclge_rm_mc_addr_common;
9130         }
9131
9132         INIT_LIST_HEAD(&tmp_del_list);
9133
9134         if (!is_del_list)
9135                 set_bit(vport->vport_id, hdev->vport_config_block);
9136
9137         spin_lock_bh(&vport->mac_list_lock);
9138
9139         hclge_build_del_list(list, is_del_list, &tmp_del_list);
9140
9141         spin_unlock_bh(&vport->mac_list_lock);
9142
9143         hclge_unsync_del_list(vport, unsync, is_del_list, &tmp_del_list);
9144
9145         spin_lock_bh(&vport->mac_list_lock);
9146
9147         hclge_sync_from_del_list(&tmp_del_list, list);
9148
9149         spin_unlock_bh(&vport->mac_list_lock);
9150 }
9151
9152 /* remove all mac address when uninitailize */
9153 static void hclge_uninit_vport_mac_list(struct hclge_vport *vport,
9154                                         enum HCLGE_MAC_ADDR_TYPE mac_type)
9155 {
9156         struct hclge_mac_node *mac_node, *tmp;
9157         struct hclge_dev *hdev = vport->back;
9158         struct list_head tmp_del_list, *list;
9159
9160         INIT_LIST_HEAD(&tmp_del_list);
9161
9162         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
9163                 &vport->uc_mac_list : &vport->mc_mac_list;
9164
9165         spin_lock_bh(&vport->mac_list_lock);
9166
9167         list_for_each_entry_safe(mac_node, tmp, list, node) {
9168                 switch (mac_node->state) {
9169                 case HCLGE_MAC_TO_DEL:
9170                 case HCLGE_MAC_ACTIVE:
9171                         list_move_tail(&mac_node->node, &tmp_del_list);
9172                         break;
9173                 case HCLGE_MAC_TO_ADD:
9174                         list_del(&mac_node->node);
9175                         kfree(mac_node);
9176                         break;
9177                 }
9178         }
9179
9180         spin_unlock_bh(&vport->mac_list_lock);
9181
9182         if (mac_type == HCLGE_MAC_ADDR_UC)
9183                 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
9184                                             hclge_rm_uc_addr_common);
9185         else
9186                 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
9187                                             hclge_rm_mc_addr_common);
9188
9189         if (!list_empty(&tmp_del_list))
9190                 dev_warn(&hdev->pdev->dev,
9191                          "uninit %s mac list for vport %u not completely.\n",
9192                          mac_type == HCLGE_MAC_ADDR_UC ? "uc" : "mc",
9193                          vport->vport_id);
9194
9195         list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) {
9196                 list_del(&mac_node->node);
9197                 kfree(mac_node);
9198         }
9199 }
9200
9201 static void hclge_uninit_mac_table(struct hclge_dev *hdev)
9202 {
9203         struct hclge_vport *vport;
9204         int i;
9205
9206         for (i = 0; i < hdev->num_alloc_vport; i++) {
9207                 vport = &hdev->vport[i];
9208                 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_UC);
9209                 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_MC);
9210         }
9211 }
9212
9213 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
9214                                               u16 cmdq_resp, u8 resp_code)
9215 {
9216 #define HCLGE_ETHERTYPE_SUCCESS_ADD             0
9217 #define HCLGE_ETHERTYPE_ALREADY_ADD             1
9218 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW        2
9219 #define HCLGE_ETHERTYPE_KEY_CONFLICT            3
9220
9221         int return_status;
9222
9223         if (cmdq_resp) {
9224                 dev_err(&hdev->pdev->dev,
9225                         "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
9226                         cmdq_resp);
9227                 return -EIO;
9228         }
9229
9230         switch (resp_code) {
9231         case HCLGE_ETHERTYPE_SUCCESS_ADD:
9232         case HCLGE_ETHERTYPE_ALREADY_ADD:
9233                 return_status = 0;
9234                 break;
9235         case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
9236                 dev_err(&hdev->pdev->dev,
9237                         "add mac ethertype failed for manager table overflow.\n");
9238                 return_status = -EIO;
9239                 break;
9240         case HCLGE_ETHERTYPE_KEY_CONFLICT:
9241                 dev_err(&hdev->pdev->dev,
9242                         "add mac ethertype failed for key conflict.\n");
9243                 return_status = -EIO;
9244                 break;
9245         default:
9246                 dev_err(&hdev->pdev->dev,
9247                         "add mac ethertype failed for undefined, code=%u.\n",
9248                         resp_code);
9249                 return_status = -EIO;
9250         }
9251
9252         return return_status;
9253 }
9254
9255 static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
9256                                      u8 *mac_addr)
9257 {
9258         struct hclge_mac_vlan_tbl_entry_cmd req;
9259         struct hclge_dev *hdev = vport->back;
9260         struct hclge_desc desc;
9261         u16 egress_port = 0;
9262         int i;
9263
9264         if (is_zero_ether_addr(mac_addr))
9265                 return false;
9266
9267         memset(&req, 0, sizeof(req));
9268         hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
9269                         HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
9270         req.egress_port = cpu_to_le16(egress_port);
9271         hclge_prepare_mac_addr(&req, mac_addr, false);
9272
9273         if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
9274                 return true;
9275
9276         vf_idx += HCLGE_VF_VPORT_START_NUM;
9277         for (i = HCLGE_VF_VPORT_START_NUM; i < hdev->num_alloc_vport; i++)
9278                 if (i != vf_idx &&
9279                     ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
9280                         return true;
9281
9282         return false;
9283 }
9284
9285 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
9286                             u8 *mac_addr)
9287 {
9288         struct hclge_vport *vport = hclge_get_vport(handle);
9289         struct hclge_dev *hdev = vport->back;
9290
9291         vport = hclge_get_vf_vport(hdev, vf);
9292         if (!vport)
9293                 return -EINVAL;
9294
9295         if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
9296                 dev_info(&hdev->pdev->dev,
9297                          "Specified MAC(=%pM) is same as before, no change committed!\n",
9298                          mac_addr);
9299                 return 0;
9300         }
9301
9302         if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
9303                 dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
9304                         mac_addr);
9305                 return -EEXIST;
9306         }
9307
9308         ether_addr_copy(vport->vf_info.mac, mac_addr);
9309
9310         if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
9311                 dev_info(&hdev->pdev->dev,
9312                          "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
9313                          vf, mac_addr);
9314                 return hclge_inform_reset_assert_to_vf(vport);
9315         }
9316
9317         dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %pM\n",
9318                  vf, mac_addr);
9319         return 0;
9320 }
9321
9322 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
9323                              const struct hclge_mac_mgr_tbl_entry_cmd *req)
9324 {
9325         struct hclge_desc desc;
9326         u8 resp_code;
9327         u16 retval;
9328         int ret;
9329
9330         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
9331         memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
9332
9333         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9334         if (ret) {
9335                 dev_err(&hdev->pdev->dev,
9336                         "add mac ethertype failed for cmd_send, ret =%d.\n",
9337                         ret);
9338                 return ret;
9339         }
9340
9341         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
9342         retval = le16_to_cpu(desc.retval);
9343
9344         return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
9345 }
9346
9347 static int init_mgr_tbl(struct hclge_dev *hdev)
9348 {
9349         int ret;
9350         int i;
9351
9352         for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
9353                 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
9354                 if (ret) {
9355                         dev_err(&hdev->pdev->dev,
9356                                 "add mac ethertype failed, ret =%d.\n",
9357                                 ret);
9358                         return ret;
9359                 }
9360         }
9361
9362         return 0;
9363 }
9364
9365 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
9366 {
9367         struct hclge_vport *vport = hclge_get_vport(handle);
9368         struct hclge_dev *hdev = vport->back;
9369
9370         ether_addr_copy(p, hdev->hw.mac.mac_addr);
9371 }
9372
9373 int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
9374                                        const u8 *old_addr, const u8 *new_addr)
9375 {
9376         struct list_head *list = &vport->uc_mac_list;
9377         struct hclge_mac_node *old_node, *new_node;
9378
9379         new_node = hclge_find_mac_node(list, new_addr);
9380         if (!new_node) {
9381                 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
9382                 if (!new_node)
9383                         return -ENOMEM;
9384
9385                 new_node->state = HCLGE_MAC_TO_ADD;
9386                 ether_addr_copy(new_node->mac_addr, new_addr);
9387                 list_add(&new_node->node, list);
9388         } else {
9389                 if (new_node->state == HCLGE_MAC_TO_DEL)
9390                         new_node->state = HCLGE_MAC_ACTIVE;
9391
9392                 /* make sure the new addr is in the list head, avoid dev
9393                  * addr may be not re-added into mac table for the umv space
9394                  * limitation after global/imp reset which will clear mac
9395                  * table by hardware.
9396                  */
9397                 list_move(&new_node->node, list);
9398         }
9399
9400         if (old_addr && !ether_addr_equal(old_addr, new_addr)) {
9401                 old_node = hclge_find_mac_node(list, old_addr);
9402                 if (old_node) {
9403                         if (old_node->state == HCLGE_MAC_TO_ADD) {
9404                                 list_del(&old_node->node);
9405                                 kfree(old_node);
9406                         } else {
9407                                 old_node->state = HCLGE_MAC_TO_DEL;
9408                         }
9409                 }
9410         }
9411
9412         set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
9413
9414         return 0;
9415 }
9416
9417 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
9418                               bool is_first)
9419 {
9420         const unsigned char *new_addr = (const unsigned char *)p;
9421         struct hclge_vport *vport = hclge_get_vport(handle);
9422         struct hclge_dev *hdev = vport->back;
9423         unsigned char *old_addr = NULL;
9424         int ret;
9425
9426         /* mac addr check */
9427         if (is_zero_ether_addr(new_addr) ||
9428             is_broadcast_ether_addr(new_addr) ||
9429             is_multicast_ether_addr(new_addr)) {
9430                 dev_err(&hdev->pdev->dev,
9431                         "change uc mac err! invalid mac: %pM.\n",
9432                          new_addr);
9433                 return -EINVAL;
9434         }
9435
9436         ret = hclge_pause_addr_cfg(hdev, new_addr);
9437         if (ret) {
9438                 dev_err(&hdev->pdev->dev,
9439                         "failed to configure mac pause address, ret = %d\n",
9440                         ret);
9441                 return ret;
9442         }
9443
9444         if (!is_first)
9445                 old_addr = hdev->hw.mac.mac_addr;
9446
9447         spin_lock_bh(&vport->mac_list_lock);
9448         ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr);
9449         if (ret) {
9450                 dev_err(&hdev->pdev->dev,
9451                         "failed to change the mac addr:%pM, ret = %d\n",
9452                         new_addr, ret);
9453                 spin_unlock_bh(&vport->mac_list_lock);
9454
9455                 if (!is_first)
9456                         hclge_pause_addr_cfg(hdev, old_addr);
9457
9458                 return ret;
9459         }
9460         /* we must update dev addr with spin lock protect, preventing dev addr
9461          * being removed by set_rx_mode path.
9462          */
9463         ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
9464         spin_unlock_bh(&vport->mac_list_lock);
9465
9466         hclge_task_schedule(hdev, 0);
9467
9468         return 0;
9469 }
9470
9471 static int hclge_mii_ioctl(struct hclge_dev *hdev, struct ifreq *ifr, int cmd)
9472 {
9473         struct mii_ioctl_data *data = if_mii(ifr);
9474
9475         if (!hnae3_dev_phy_imp_supported(hdev))
9476                 return -EOPNOTSUPP;
9477
9478         switch (cmd) {
9479         case SIOCGMIIPHY:
9480                 data->phy_id = hdev->hw.mac.phy_addr;
9481                 /* this command reads phy id and register at the same time */
9482                 fallthrough;
9483         case SIOCGMIIREG:
9484                 data->val_out = hclge_read_phy_reg(hdev, data->reg_num);
9485                 return 0;
9486
9487         case SIOCSMIIREG:
9488                 return hclge_write_phy_reg(hdev, data->reg_num, data->val_in);
9489         default:
9490                 return -EOPNOTSUPP;
9491         }
9492 }
9493
9494 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
9495                           int cmd)
9496 {
9497         struct hclge_vport *vport = hclge_get_vport(handle);
9498         struct hclge_dev *hdev = vport->back;
9499
9500         switch (cmd) {
9501         case SIOCGHWTSTAMP:
9502                 return hclge_ptp_get_cfg(hdev, ifr);
9503         case SIOCSHWTSTAMP:
9504                 return hclge_ptp_set_cfg(hdev, ifr);
9505         default:
9506                 if (!hdev->hw.mac.phydev)
9507                         return hclge_mii_ioctl(hdev, ifr, cmd);
9508         }
9509
9510         return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
9511 }
9512
9513 static int hclge_set_port_vlan_filter_bypass(struct hclge_dev *hdev, u8 vf_id,
9514                                              bool bypass_en)
9515 {
9516         struct hclge_port_vlan_filter_bypass_cmd *req;
9517         struct hclge_desc desc;
9518         int ret;
9519
9520         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PORT_VLAN_BYPASS, false);
9521         req = (struct hclge_port_vlan_filter_bypass_cmd *)desc.data;
9522         req->vf_id = vf_id;
9523         hnae3_set_bit(req->bypass_state, HCLGE_INGRESS_BYPASS_B,
9524                       bypass_en ? 1 : 0);
9525
9526         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9527         if (ret)
9528                 dev_err(&hdev->pdev->dev,
9529                         "failed to set vport%u port vlan filter bypass state, ret = %d.\n",
9530                         vf_id, ret);
9531
9532         return ret;
9533 }
9534
9535 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
9536                                       u8 fe_type, bool filter_en, u8 vf_id)
9537 {
9538         struct hclge_vlan_filter_ctrl_cmd *req;
9539         struct hclge_desc desc;
9540         int ret;
9541
9542         /* read current vlan filter parameter */
9543         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
9544         req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
9545         req->vlan_type = vlan_type;
9546         req->vf_id = vf_id;
9547
9548         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9549         if (ret) {
9550                 dev_err(&hdev->pdev->dev,
9551                         "failed to get vlan filter config, ret = %d.\n", ret);
9552                 return ret;
9553         }
9554
9555         /* modify and write new config parameter */
9556         hclge_cmd_reuse_desc(&desc, false);
9557         req->vlan_fe = filter_en ?
9558                         (req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type);
9559
9560         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9561         if (ret)
9562                 dev_err(&hdev->pdev->dev, "failed to set vlan filter, ret = %d.\n",
9563                         ret);
9564
9565         return ret;
9566 }
9567
9568 static int hclge_set_vport_vlan_filter(struct hclge_vport *vport, bool enable)
9569 {
9570         struct hclge_dev *hdev = vport->back;
9571         struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
9572         int ret;
9573
9574         if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
9575                 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9576                                                   HCLGE_FILTER_FE_EGRESS_V1_B,
9577                                                   enable, vport->vport_id);
9578
9579         ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9580                                          HCLGE_FILTER_FE_EGRESS, enable,
9581                                          vport->vport_id);
9582         if (ret)
9583                 return ret;
9584
9585         if (test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, ae_dev->caps)) {
9586                 ret = hclge_set_port_vlan_filter_bypass(hdev, vport->vport_id,
9587                                                         !enable);
9588         } else if (!vport->vport_id) {
9589                 if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps))
9590                         enable = false;
9591
9592                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
9593                                                  HCLGE_FILTER_FE_INGRESS,
9594                                                  enable, 0);
9595         }
9596
9597         return ret;
9598 }
9599
9600 static bool hclge_need_enable_vport_vlan_filter(struct hclge_vport *vport)
9601 {
9602         struct hnae3_handle *handle = &vport->nic;
9603         struct hclge_vport_vlan_cfg *vlan, *tmp;
9604         struct hclge_dev *hdev = vport->back;
9605
9606         if (vport->vport_id) {
9607                 if (vport->port_base_vlan_cfg.state !=
9608                         HNAE3_PORT_BASE_VLAN_DISABLE)
9609                         return true;
9610
9611                 if (vport->vf_info.trusted && vport->vf_info.request_uc_en)
9612                         return false;
9613         } else if (handle->netdev_flags & HNAE3_USER_UPE) {
9614                 return false;
9615         }
9616
9617         if (!vport->req_vlan_fltr_en)
9618                 return false;
9619
9620         /* compatible with former device, always enable vlan filter */
9621         if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps))
9622                 return true;
9623
9624         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node)
9625                 if (vlan->vlan_id != 0)
9626                         return true;
9627
9628         return false;
9629 }
9630
9631 int hclge_enable_vport_vlan_filter(struct hclge_vport *vport, bool request_en)
9632 {
9633         struct hclge_dev *hdev = vport->back;
9634         bool need_en;
9635         int ret;
9636
9637         mutex_lock(&hdev->vport_lock);
9638
9639         vport->req_vlan_fltr_en = request_en;
9640
9641         need_en = hclge_need_enable_vport_vlan_filter(vport);
9642         if (need_en == vport->cur_vlan_fltr_en) {
9643                 mutex_unlock(&hdev->vport_lock);
9644                 return 0;
9645         }
9646
9647         ret = hclge_set_vport_vlan_filter(vport, need_en);
9648         if (ret) {
9649                 mutex_unlock(&hdev->vport_lock);
9650                 return ret;
9651         }
9652
9653         vport->cur_vlan_fltr_en = need_en;
9654
9655         mutex_unlock(&hdev->vport_lock);
9656
9657         return 0;
9658 }
9659
9660 static int hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
9661 {
9662         struct hclge_vport *vport = hclge_get_vport(handle);
9663
9664         return hclge_enable_vport_vlan_filter(vport, enable);
9665 }
9666
9667 static int hclge_set_vf_vlan_filter_cmd(struct hclge_dev *hdev, u16 vfid,
9668                                         bool is_kill, u16 vlan,
9669                                         struct hclge_desc *desc)
9670 {
9671         struct hclge_vlan_filter_vf_cfg_cmd *req0;
9672         struct hclge_vlan_filter_vf_cfg_cmd *req1;
9673         u8 vf_byte_val;
9674         u8 vf_byte_off;
9675         int ret;
9676
9677         hclge_cmd_setup_basic_desc(&desc[0],
9678                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
9679         hclge_cmd_setup_basic_desc(&desc[1],
9680                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
9681
9682         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
9683
9684         vf_byte_off = vfid / 8;
9685         vf_byte_val = 1 << (vfid % 8);
9686
9687         req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
9688         req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
9689
9690         req0->vlan_id  = cpu_to_le16(vlan);
9691         req0->vlan_cfg = is_kill;
9692
9693         if (vf_byte_off < HCLGE_MAX_VF_BYTES)
9694                 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
9695         else
9696                 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
9697
9698         ret = hclge_cmd_send(&hdev->hw, desc, 2);
9699         if (ret) {
9700                 dev_err(&hdev->pdev->dev,
9701                         "Send vf vlan command fail, ret =%d.\n",
9702                         ret);
9703                 return ret;
9704         }
9705
9706         return 0;
9707 }
9708
9709 static int hclge_check_vf_vlan_cmd_status(struct hclge_dev *hdev, u16 vfid,
9710                                           bool is_kill, struct hclge_desc *desc)
9711 {
9712         struct hclge_vlan_filter_vf_cfg_cmd *req;
9713
9714         req = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
9715
9716         if (!is_kill) {
9717 #define HCLGE_VF_VLAN_NO_ENTRY  2
9718                 if (!req->resp_code || req->resp_code == 1)
9719                         return 0;
9720
9721                 if (req->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
9722                         set_bit(vfid, hdev->vf_vlan_full);
9723                         dev_warn(&hdev->pdev->dev,
9724                                  "vf vlan table is full, vf vlan filter is disabled\n");
9725                         return 0;
9726                 }
9727
9728                 dev_err(&hdev->pdev->dev,
9729                         "Add vf vlan filter fail, ret =%u.\n",
9730                         req->resp_code);
9731         } else {
9732 #define HCLGE_VF_VLAN_DEL_NO_FOUND      1
9733                 if (!req->resp_code)
9734                         return 0;
9735
9736                 /* vf vlan filter is disabled when vf vlan table is full,
9737                  * then new vlan id will not be added into vf vlan table.
9738                  * Just return 0 without warning, avoid massive verbose
9739                  * print logs when unload.
9740                  */
9741                 if (req->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
9742                         return 0;
9743
9744                 dev_err(&hdev->pdev->dev,
9745                         "Kill vf vlan filter fail, ret =%u.\n",
9746                         req->resp_code);
9747         }
9748
9749         return -EIO;
9750 }
9751
9752 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
9753                                     bool is_kill, u16 vlan)
9754 {
9755         struct hclge_vport *vport = &hdev->vport[vfid];
9756         struct hclge_desc desc[2];
9757         int ret;
9758
9759         /* if vf vlan table is full, firmware will close vf vlan filter, it
9760          * is unable and unnecessary to add new vlan id to vf vlan filter.
9761          * If spoof check is enable, and vf vlan is full, it shouldn't add
9762          * new vlan, because tx packets with these vlan id will be dropped.
9763          */
9764         if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
9765                 if (vport->vf_info.spoofchk && vlan) {
9766                         dev_err(&hdev->pdev->dev,
9767                                 "Can't add vlan due to spoof check is on and vf vlan table is full\n");
9768                         return -EPERM;
9769                 }
9770                 return 0;
9771         }
9772
9773         ret = hclge_set_vf_vlan_filter_cmd(hdev, vfid, is_kill, vlan, desc);
9774         if (ret)
9775                 return ret;
9776
9777         return hclge_check_vf_vlan_cmd_status(hdev, vfid, is_kill, desc);
9778 }
9779
9780 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
9781                                       u16 vlan_id, bool is_kill)
9782 {
9783         struct hclge_vlan_filter_pf_cfg_cmd *req;
9784         struct hclge_desc desc;
9785         u8 vlan_offset_byte_val;
9786         u8 vlan_offset_byte;
9787         u8 vlan_offset_160;
9788         int ret;
9789
9790         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
9791
9792         vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
9793         vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
9794                            HCLGE_VLAN_BYTE_SIZE;
9795         vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
9796
9797         req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
9798         req->vlan_offset = vlan_offset_160;
9799         req->vlan_cfg = is_kill;
9800         req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
9801
9802         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9803         if (ret)
9804                 dev_err(&hdev->pdev->dev,
9805                         "port vlan command, send fail, ret =%d.\n", ret);
9806         return ret;
9807 }
9808
9809 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
9810                                     u16 vport_id, u16 vlan_id,
9811                                     bool is_kill)
9812 {
9813         u16 vport_idx, vport_num = 0;
9814         int ret;
9815
9816         if (is_kill && !vlan_id)
9817                 return 0;
9818
9819         if (vlan_id >= VLAN_N_VID)
9820                 return -EINVAL;
9821
9822         ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id);
9823         if (ret) {
9824                 dev_err(&hdev->pdev->dev,
9825                         "Set %u vport vlan filter config fail, ret =%d.\n",
9826                         vport_id, ret);
9827                 return ret;
9828         }
9829
9830         /* vlan 0 may be added twice when 8021q module is enabled */
9831         if (!is_kill && !vlan_id &&
9832             test_bit(vport_id, hdev->vlan_table[vlan_id]))
9833                 return 0;
9834
9835         if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
9836                 dev_err(&hdev->pdev->dev,
9837                         "Add port vlan failed, vport %u is already in vlan %u\n",
9838                         vport_id, vlan_id);
9839                 return -EINVAL;
9840         }
9841
9842         if (is_kill &&
9843             !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
9844                 dev_err(&hdev->pdev->dev,
9845                         "Delete port vlan failed, vport %u is not in vlan %u\n",
9846                         vport_id, vlan_id);
9847                 return -EINVAL;
9848         }
9849
9850         for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
9851                 vport_num++;
9852
9853         if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
9854                 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
9855                                                  is_kill);
9856
9857         return ret;
9858 }
9859
9860 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
9861 {
9862         struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
9863         struct hclge_vport_vtag_tx_cfg_cmd *req;
9864         struct hclge_dev *hdev = vport->back;
9865         struct hclge_desc desc;
9866         u16 bmap_index;
9867         int status;
9868
9869         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
9870
9871         req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
9872         req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
9873         req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
9874         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
9875                       vcfg->accept_tag1 ? 1 : 0);
9876         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
9877                       vcfg->accept_untag1 ? 1 : 0);
9878         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
9879                       vcfg->accept_tag2 ? 1 : 0);
9880         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
9881                       vcfg->accept_untag2 ? 1 : 0);
9882         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
9883                       vcfg->insert_tag1_en ? 1 : 0);
9884         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
9885                       vcfg->insert_tag2_en ? 1 : 0);
9886         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_TAG_SHIFT_MODE_EN_B,
9887                       vcfg->tag_shift_mode_en ? 1 : 0);
9888         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
9889
9890         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9891         bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9892                         HCLGE_VF_NUM_PER_BYTE;
9893         req->vf_bitmap[bmap_index] =
9894                 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
9895
9896         status = hclge_cmd_send(&hdev->hw, &desc, 1);
9897         if (status)
9898                 dev_err(&hdev->pdev->dev,
9899                         "Send port txvlan cfg command fail, ret =%d\n",
9900                         status);
9901
9902         return status;
9903 }
9904
9905 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
9906 {
9907         struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
9908         struct hclge_vport_vtag_rx_cfg_cmd *req;
9909         struct hclge_dev *hdev = vport->back;
9910         struct hclge_desc desc;
9911         u16 bmap_index;
9912         int status;
9913
9914         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
9915
9916         req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
9917         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
9918                       vcfg->strip_tag1_en ? 1 : 0);
9919         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
9920                       vcfg->strip_tag2_en ? 1 : 0);
9921         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
9922                       vcfg->vlan1_vlan_prionly ? 1 : 0);
9923         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
9924                       vcfg->vlan2_vlan_prionly ? 1 : 0);
9925         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG1_EN_B,
9926                       vcfg->strip_tag1_discard_en ? 1 : 0);
9927         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG2_EN_B,
9928                       vcfg->strip_tag2_discard_en ? 1 : 0);
9929
9930         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9931         bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9932                         HCLGE_VF_NUM_PER_BYTE;
9933         req->vf_bitmap[bmap_index] =
9934                 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
9935
9936         status = hclge_cmd_send(&hdev->hw, &desc, 1);
9937         if (status)
9938                 dev_err(&hdev->pdev->dev,
9939                         "Send port rxvlan cfg command fail, ret =%d\n",
9940                         status);
9941
9942         return status;
9943 }
9944
9945 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
9946                                   u16 port_base_vlan_state,
9947                                   u16 vlan_tag, u8 qos)
9948 {
9949         int ret;
9950
9951         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9952                 vport->txvlan_cfg.accept_tag1 = true;
9953                 vport->txvlan_cfg.insert_tag1_en = false;
9954                 vport->txvlan_cfg.default_tag1 = 0;
9955         } else {
9956                 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(vport->nic.pdev);
9957
9958                 vport->txvlan_cfg.accept_tag1 =
9959                         ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3;
9960                 vport->txvlan_cfg.insert_tag1_en = true;
9961                 vport->txvlan_cfg.default_tag1 = (qos << VLAN_PRIO_SHIFT) |
9962                                                  vlan_tag;
9963         }
9964
9965         vport->txvlan_cfg.accept_untag1 = true;
9966
9967         /* accept_tag2 and accept_untag2 are not supported on
9968          * pdev revision(0x20), new revision support them,
9969          * this two fields can not be configured by user.
9970          */
9971         vport->txvlan_cfg.accept_tag2 = true;
9972         vport->txvlan_cfg.accept_untag2 = true;
9973         vport->txvlan_cfg.insert_tag2_en = false;
9974         vport->txvlan_cfg.default_tag2 = 0;
9975         vport->txvlan_cfg.tag_shift_mode_en = true;
9976
9977         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9978                 vport->rxvlan_cfg.strip_tag1_en = false;
9979                 vport->rxvlan_cfg.strip_tag2_en =
9980                                 vport->rxvlan_cfg.rx_vlan_offload_en;
9981                 vport->rxvlan_cfg.strip_tag2_discard_en = false;
9982         } else {
9983                 vport->rxvlan_cfg.strip_tag1_en =
9984                                 vport->rxvlan_cfg.rx_vlan_offload_en;
9985                 vport->rxvlan_cfg.strip_tag2_en = true;
9986                 vport->rxvlan_cfg.strip_tag2_discard_en = true;
9987         }
9988
9989         vport->rxvlan_cfg.strip_tag1_discard_en = false;
9990         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
9991         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
9992
9993         ret = hclge_set_vlan_tx_offload_cfg(vport);
9994         if (ret)
9995                 return ret;
9996
9997         return hclge_set_vlan_rx_offload_cfg(vport);
9998 }
9999
10000 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
10001 {
10002         struct hclge_rx_vlan_type_cfg_cmd *rx_req;
10003         struct hclge_tx_vlan_type_cfg_cmd *tx_req;
10004         struct hclge_desc desc;
10005         int status;
10006
10007         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
10008         rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
10009         rx_req->ot_fst_vlan_type =
10010                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
10011         rx_req->ot_sec_vlan_type =
10012                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
10013         rx_req->in_fst_vlan_type =
10014                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
10015         rx_req->in_sec_vlan_type =
10016                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
10017
10018         status = hclge_cmd_send(&hdev->hw, &desc, 1);
10019         if (status) {
10020                 dev_err(&hdev->pdev->dev,
10021                         "Send rxvlan protocol type command fail, ret =%d\n",
10022                         status);
10023                 return status;
10024         }
10025
10026         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
10027
10028         tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
10029         tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
10030         tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
10031
10032         status = hclge_cmd_send(&hdev->hw, &desc, 1);
10033         if (status)
10034                 dev_err(&hdev->pdev->dev,
10035                         "Send txvlan protocol type command fail, ret =%d\n",
10036                         status);
10037
10038         return status;
10039 }
10040
10041 static int hclge_init_vlan_config(struct hclge_dev *hdev)
10042 {
10043 #define HCLGE_DEF_VLAN_TYPE             0x8100
10044
10045         struct hnae3_handle *handle = &hdev->vport[0].nic;
10046         struct hclge_vport *vport;
10047         int ret;
10048         int i;
10049
10050         if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
10051                 /* for revision 0x21, vf vlan filter is per function */
10052                 for (i = 0; i < hdev->num_alloc_vport; i++) {
10053                         vport = &hdev->vport[i];
10054                         ret = hclge_set_vlan_filter_ctrl(hdev,
10055                                                          HCLGE_FILTER_TYPE_VF,
10056                                                          HCLGE_FILTER_FE_EGRESS,
10057                                                          true,
10058                                                          vport->vport_id);
10059                         if (ret)
10060                                 return ret;
10061                         vport->cur_vlan_fltr_en = true;
10062                 }
10063
10064                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
10065                                                  HCLGE_FILTER_FE_INGRESS, true,
10066                                                  0);
10067                 if (ret)
10068                         return ret;
10069         } else {
10070                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
10071                                                  HCLGE_FILTER_FE_EGRESS_V1_B,
10072                                                  true, 0);
10073                 if (ret)
10074                         return ret;
10075         }
10076
10077         hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
10078         hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
10079         hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
10080         hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
10081         hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
10082         hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
10083
10084         ret = hclge_set_vlan_protocol_type(hdev);
10085         if (ret)
10086                 return ret;
10087
10088         for (i = 0; i < hdev->num_alloc_vport; i++) {
10089                 u16 vlan_tag;
10090                 u8 qos;
10091
10092                 vport = &hdev->vport[i];
10093                 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
10094                 qos = vport->port_base_vlan_cfg.vlan_info.qos;
10095
10096                 ret = hclge_vlan_offload_cfg(vport,
10097                                              vport->port_base_vlan_cfg.state,
10098                                              vlan_tag, qos);
10099                 if (ret)
10100                         return ret;
10101         }
10102
10103         return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
10104 }
10105
10106 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
10107                                        bool writen_to_tbl)
10108 {
10109         struct hclge_vport_vlan_cfg *vlan, *tmp;
10110
10111         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node)
10112                 if (vlan->vlan_id == vlan_id)
10113                         return;
10114
10115         vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
10116         if (!vlan)
10117                 return;
10118
10119         vlan->hd_tbl_status = writen_to_tbl;
10120         vlan->vlan_id = vlan_id;
10121
10122         list_add_tail(&vlan->node, &vport->vlan_list);
10123 }
10124
10125 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
10126 {
10127         struct hclge_vport_vlan_cfg *vlan, *tmp;
10128         struct hclge_dev *hdev = vport->back;
10129         int ret;
10130
10131         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10132                 if (!vlan->hd_tbl_status) {
10133                         ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10134                                                        vport->vport_id,
10135                                                        vlan->vlan_id, false);
10136                         if (ret) {
10137                                 dev_err(&hdev->pdev->dev,
10138                                         "restore vport vlan list failed, ret=%d\n",
10139                                         ret);
10140                                 return ret;
10141                         }
10142                 }
10143                 vlan->hd_tbl_status = true;
10144         }
10145
10146         return 0;
10147 }
10148
10149 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
10150                                       bool is_write_tbl)
10151 {
10152         struct hclge_vport_vlan_cfg *vlan, *tmp;
10153         struct hclge_dev *hdev = vport->back;
10154
10155         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10156                 if (vlan->vlan_id == vlan_id) {
10157                         if (is_write_tbl && vlan->hd_tbl_status)
10158                                 hclge_set_vlan_filter_hw(hdev,
10159                                                          htons(ETH_P_8021Q),
10160                                                          vport->vport_id,
10161                                                          vlan_id,
10162                                                          true);
10163
10164                         list_del(&vlan->node);
10165                         kfree(vlan);
10166                         break;
10167                 }
10168         }
10169 }
10170
10171 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
10172 {
10173         struct hclge_vport_vlan_cfg *vlan, *tmp;
10174         struct hclge_dev *hdev = vport->back;
10175
10176         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10177                 if (vlan->hd_tbl_status)
10178                         hclge_set_vlan_filter_hw(hdev,
10179                                                  htons(ETH_P_8021Q),
10180                                                  vport->vport_id,
10181                                                  vlan->vlan_id,
10182                                                  true);
10183
10184                 vlan->hd_tbl_status = false;
10185                 if (is_del_list) {
10186                         list_del(&vlan->node);
10187                         kfree(vlan);
10188                 }
10189         }
10190         clear_bit(vport->vport_id, hdev->vf_vlan_full);
10191 }
10192
10193 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
10194 {
10195         struct hclge_vport_vlan_cfg *vlan, *tmp;
10196         struct hclge_vport *vport;
10197         int i;
10198
10199         for (i = 0; i < hdev->num_alloc_vport; i++) {
10200                 vport = &hdev->vport[i];
10201                 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10202                         list_del(&vlan->node);
10203                         kfree(vlan);
10204                 }
10205         }
10206 }
10207
10208 void hclge_restore_vport_vlan_table(struct hclge_vport *vport)
10209 {
10210         struct hclge_vport_vlan_cfg *vlan, *tmp;
10211         struct hclge_dev *hdev = vport->back;
10212         u16 vlan_proto;
10213         u16 vlan_id;
10214         u16 state;
10215         int ret;
10216
10217         vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
10218         vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
10219         state = vport->port_base_vlan_cfg.state;
10220
10221         if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
10222                 clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
10223                 hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
10224                                          vport->vport_id, vlan_id,
10225                                          false);
10226                 return;
10227         }
10228
10229         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10230                 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10231                                                vport->vport_id,
10232                                                vlan->vlan_id, false);
10233                 if (ret)
10234                         break;
10235                 vlan->hd_tbl_status = true;
10236         }
10237 }
10238
10239 /* For global reset and imp reset, hardware will clear the mac table,
10240  * so we change the mac address state from ACTIVE to TO_ADD, then they
10241  * can be restored in the service task after reset complete. Furtherly,
10242  * the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to
10243  * be restored after reset, so just remove these mac nodes from mac_list.
10244  */
10245 static void hclge_mac_node_convert_for_reset(struct list_head *list)
10246 {
10247         struct hclge_mac_node *mac_node, *tmp;
10248
10249         list_for_each_entry_safe(mac_node, tmp, list, node) {
10250                 if (mac_node->state == HCLGE_MAC_ACTIVE) {
10251                         mac_node->state = HCLGE_MAC_TO_ADD;
10252                 } else if (mac_node->state == HCLGE_MAC_TO_DEL) {
10253                         list_del(&mac_node->node);
10254                         kfree(mac_node);
10255                 }
10256         }
10257 }
10258
10259 void hclge_restore_mac_table_common(struct hclge_vport *vport)
10260 {
10261         spin_lock_bh(&vport->mac_list_lock);
10262
10263         hclge_mac_node_convert_for_reset(&vport->uc_mac_list);
10264         hclge_mac_node_convert_for_reset(&vport->mc_mac_list);
10265         set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
10266
10267         spin_unlock_bh(&vport->mac_list_lock);
10268 }
10269
10270 static void hclge_restore_hw_table(struct hclge_dev *hdev)
10271 {
10272         struct hclge_vport *vport = &hdev->vport[0];
10273         struct hnae3_handle *handle = &vport->nic;
10274
10275         hclge_restore_mac_table_common(vport);
10276         hclge_restore_vport_vlan_table(vport);
10277         set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
10278         hclge_restore_fd_entries(handle);
10279 }
10280
10281 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
10282 {
10283         struct hclge_vport *vport = hclge_get_vport(handle);
10284
10285         if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10286                 vport->rxvlan_cfg.strip_tag1_en = false;
10287                 vport->rxvlan_cfg.strip_tag2_en = enable;
10288                 vport->rxvlan_cfg.strip_tag2_discard_en = false;
10289         } else {
10290                 vport->rxvlan_cfg.strip_tag1_en = enable;
10291                 vport->rxvlan_cfg.strip_tag2_en = true;
10292                 vport->rxvlan_cfg.strip_tag2_discard_en = true;
10293         }
10294
10295         vport->rxvlan_cfg.strip_tag1_discard_en = false;
10296         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
10297         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
10298         vport->rxvlan_cfg.rx_vlan_offload_en = enable;
10299
10300         return hclge_set_vlan_rx_offload_cfg(vport);
10301 }
10302
10303 static void hclge_set_vport_vlan_fltr_change(struct hclge_vport *vport)
10304 {
10305         struct hclge_dev *hdev = vport->back;
10306
10307         if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps))
10308                 set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE, &vport->state);
10309 }
10310
10311 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
10312                                             u16 port_base_vlan_state,
10313                                             struct hclge_vlan_info *new_info,
10314                                             struct hclge_vlan_info *old_info)
10315 {
10316         struct hclge_dev *hdev = vport->back;
10317         int ret;
10318
10319         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
10320                 hclge_rm_vport_all_vlan_table(vport, false);
10321                 /* force clear VLAN 0 */
10322                 ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, true, 0);
10323                 if (ret)
10324                         return ret;
10325                 return hclge_set_vlan_filter_hw(hdev,
10326                                                  htons(new_info->vlan_proto),
10327                                                  vport->vport_id,
10328                                                  new_info->vlan_tag,
10329                                                  false);
10330         }
10331
10332         /* force add VLAN 0 */
10333         ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, false, 0);
10334         if (ret)
10335                 return ret;
10336
10337         ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
10338                                        vport->vport_id, old_info->vlan_tag,
10339                                        true);
10340         if (ret)
10341                 return ret;
10342
10343         return hclge_add_vport_all_vlan_table(vport);
10344 }
10345
10346 static bool hclge_need_update_vlan_filter(const struct hclge_vlan_info *new_cfg,
10347                                           const struct hclge_vlan_info *old_cfg)
10348 {
10349         if (new_cfg->vlan_tag != old_cfg->vlan_tag)
10350                 return true;
10351
10352         if (new_cfg->vlan_tag == 0 && (new_cfg->qos == 0 || old_cfg->qos == 0))
10353                 return true;
10354
10355         return false;
10356 }
10357
10358 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
10359                                     struct hclge_vlan_info *vlan_info)
10360 {
10361         struct hnae3_handle *nic = &vport->nic;
10362         struct hclge_vlan_info *old_vlan_info;
10363         struct hclge_dev *hdev = vport->back;
10364         int ret;
10365
10366         old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
10367
10368         ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag,
10369                                      vlan_info->qos);
10370         if (ret)
10371                 return ret;
10372
10373         if (!hclge_need_update_vlan_filter(vlan_info, old_vlan_info))
10374                 goto out;
10375
10376         if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
10377                 /* add new VLAN tag */
10378                 ret = hclge_set_vlan_filter_hw(hdev,
10379                                                htons(vlan_info->vlan_proto),
10380                                                vport->vport_id,
10381                                                vlan_info->vlan_tag,
10382                                                false);
10383                 if (ret)
10384                         return ret;
10385
10386                 /* remove old VLAN tag */
10387                 if (old_vlan_info->vlan_tag == 0)
10388                         ret = hclge_set_vf_vlan_common(hdev, vport->vport_id,
10389                                                        true, 0);
10390                 else
10391                         ret = hclge_set_vlan_filter_hw(hdev,
10392                                                        htons(ETH_P_8021Q),
10393                                                        vport->vport_id,
10394                                                        old_vlan_info->vlan_tag,
10395                                                        true);
10396                 if (ret) {
10397                         dev_err(&hdev->pdev->dev,
10398                                 "failed to clear vport%u port base vlan %u, ret = %d.\n",
10399                                 vport->vport_id, old_vlan_info->vlan_tag, ret);
10400                         return ret;
10401                 }
10402
10403                 goto out;
10404         }
10405
10406         ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
10407                                                old_vlan_info);
10408         if (ret)
10409                 return ret;
10410
10411 out:
10412         vport->port_base_vlan_cfg.state = state;
10413         if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
10414                 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
10415         else
10416                 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
10417
10418         vport->port_base_vlan_cfg.vlan_info = *vlan_info;
10419         hclge_set_vport_vlan_fltr_change(vport);
10420
10421         return 0;
10422 }
10423
10424 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
10425                                           enum hnae3_port_base_vlan_state state,
10426                                           u16 vlan, u8 qos)
10427 {
10428         if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10429                 if (!vlan && !qos)
10430                         return HNAE3_PORT_BASE_VLAN_NOCHANGE;
10431
10432                 return HNAE3_PORT_BASE_VLAN_ENABLE;
10433         }
10434
10435         if (!vlan && !qos)
10436                 return HNAE3_PORT_BASE_VLAN_DISABLE;
10437
10438         if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan &&
10439             vport->port_base_vlan_cfg.vlan_info.qos == qos)
10440                 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
10441
10442         return HNAE3_PORT_BASE_VLAN_MODIFY;
10443 }
10444
10445 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
10446                                     u16 vlan, u8 qos, __be16 proto)
10447 {
10448         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
10449         struct hclge_vport *vport = hclge_get_vport(handle);
10450         struct hclge_dev *hdev = vport->back;
10451         struct hclge_vlan_info vlan_info;
10452         u16 state;
10453         int ret;
10454
10455         if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
10456                 return -EOPNOTSUPP;
10457
10458         vport = hclge_get_vf_vport(hdev, vfid);
10459         if (!vport)
10460                 return -EINVAL;
10461
10462         /* qos is a 3 bits value, so can not be bigger than 7 */
10463         if (vlan > VLAN_N_VID - 1 || qos > 7)
10464                 return -EINVAL;
10465         if (proto != htons(ETH_P_8021Q))
10466                 return -EPROTONOSUPPORT;
10467
10468         state = hclge_get_port_base_vlan_state(vport,
10469                                                vport->port_base_vlan_cfg.state,
10470                                                vlan, qos);
10471         if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
10472                 return 0;
10473
10474         vlan_info.vlan_tag = vlan;
10475         vlan_info.qos = qos;
10476         vlan_info.vlan_proto = ntohs(proto);
10477
10478         ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
10479         if (ret) {
10480                 dev_err(&hdev->pdev->dev,
10481                         "failed to update port base vlan for vf %d, ret = %d\n",
10482                         vfid, ret);
10483                 return ret;
10484         }
10485
10486         /* for DEVICE_VERSION_V3, vf doesn't need to know about the port based
10487          * VLAN state.
10488          */
10489         if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3 &&
10490             test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
10491                 hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
10492                                                   vport->vport_id, state,
10493                                                   &vlan_info);
10494
10495         return 0;
10496 }
10497
10498 static void hclge_clear_vf_vlan(struct hclge_dev *hdev)
10499 {
10500         struct hclge_vlan_info *vlan_info;
10501         struct hclge_vport *vport;
10502         int ret;
10503         int vf;
10504
10505         /* clear port base vlan for all vf */
10506         for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
10507                 vport = &hdev->vport[vf];
10508                 vlan_info = &vport->port_base_vlan_cfg.vlan_info;
10509
10510                 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10511                                                vport->vport_id,
10512                                                vlan_info->vlan_tag, true);
10513                 if (ret)
10514                         dev_err(&hdev->pdev->dev,
10515                                 "failed to clear vf vlan for vf%d, ret = %d\n",
10516                                 vf - HCLGE_VF_VPORT_START_NUM, ret);
10517         }
10518 }
10519
10520 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
10521                           u16 vlan_id, bool is_kill)
10522 {
10523         struct hclge_vport *vport = hclge_get_vport(handle);
10524         struct hclge_dev *hdev = vport->back;
10525         bool writen_to_tbl = false;
10526         int ret = 0;
10527
10528         /* When device is resetting or reset failed, firmware is unable to
10529          * handle mailbox. Just record the vlan id, and remove it after
10530          * reset finished.
10531          */
10532         if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
10533              test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
10534                 set_bit(vlan_id, vport->vlan_del_fail_bmap);
10535                 return -EBUSY;
10536         }
10537
10538         /* when port base vlan enabled, we use port base vlan as the vlan
10539          * filter entry. In this case, we don't update vlan filter table
10540          * when user add new vlan or remove exist vlan, just update the vport
10541          * vlan list. The vlan id in vlan list will be writen in vlan filter
10542          * table until port base vlan disabled
10543          */
10544         if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10545                 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
10546                                                vlan_id, is_kill);
10547                 writen_to_tbl = true;
10548         }
10549
10550         if (!ret) {
10551                 if (is_kill)
10552                         hclge_rm_vport_vlan_table(vport, vlan_id, false);
10553                 else
10554                         hclge_add_vport_vlan_table(vport, vlan_id,
10555                                                    writen_to_tbl);
10556         } else if (is_kill) {
10557                 /* when remove hw vlan filter failed, record the vlan id,
10558                  * and try to remove it from hw later, to be consistence
10559                  * with stack
10560                  */
10561                 set_bit(vlan_id, vport->vlan_del_fail_bmap);
10562         }
10563
10564         hclge_set_vport_vlan_fltr_change(vport);
10565
10566         return ret;
10567 }
10568
10569 static void hclge_sync_vlan_fltr_state(struct hclge_dev *hdev)
10570 {
10571         struct hclge_vport *vport;
10572         int ret;
10573         u16 i;
10574
10575         for (i = 0; i < hdev->num_alloc_vport; i++) {
10576                 vport = &hdev->vport[i];
10577                 if (!test_and_clear_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
10578                                         &vport->state))
10579                         continue;
10580
10581                 ret = hclge_enable_vport_vlan_filter(vport,
10582                                                      vport->req_vlan_fltr_en);
10583                 if (ret) {
10584                         dev_err(&hdev->pdev->dev,
10585                                 "failed to sync vlan filter state for vport%u, ret = %d\n",
10586                                 vport->vport_id, ret);
10587                         set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
10588                                 &vport->state);
10589                         return;
10590                 }
10591         }
10592 }
10593
10594 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
10595 {
10596 #define HCLGE_MAX_SYNC_COUNT    60
10597
10598         int i, ret, sync_cnt = 0;
10599         u16 vlan_id;
10600
10601         /* start from vport 1 for PF is always alive */
10602         for (i = 0; i < hdev->num_alloc_vport; i++) {
10603                 struct hclge_vport *vport = &hdev->vport[i];
10604
10605                 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
10606                                          VLAN_N_VID);
10607                 while (vlan_id != VLAN_N_VID) {
10608                         ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10609                                                        vport->vport_id, vlan_id,
10610                                                        true);
10611                         if (ret && ret != -EINVAL)
10612                                 return;
10613
10614                         clear_bit(vlan_id, vport->vlan_del_fail_bmap);
10615                         hclge_rm_vport_vlan_table(vport, vlan_id, false);
10616                         hclge_set_vport_vlan_fltr_change(vport);
10617
10618                         sync_cnt++;
10619                         if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
10620                                 return;
10621
10622                         vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
10623                                                  VLAN_N_VID);
10624                 }
10625         }
10626
10627         hclge_sync_vlan_fltr_state(hdev);
10628 }
10629
10630 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
10631 {
10632         struct hclge_config_max_frm_size_cmd *req;
10633         struct hclge_desc desc;
10634
10635         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
10636
10637         req = (struct hclge_config_max_frm_size_cmd *)desc.data;
10638         req->max_frm_size = cpu_to_le16(new_mps);
10639         req->min_frm_size = HCLGE_MAC_MIN_FRAME;
10640
10641         return hclge_cmd_send(&hdev->hw, &desc, 1);
10642 }
10643
10644 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
10645 {
10646         struct hclge_vport *vport = hclge_get_vport(handle);
10647
10648         return hclge_set_vport_mtu(vport, new_mtu);
10649 }
10650
10651 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
10652 {
10653         struct hclge_dev *hdev = vport->back;
10654         int i, max_frm_size, ret;
10655
10656         /* HW supprt 2 layer vlan */
10657         max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
10658         if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
10659             max_frm_size > hdev->ae_dev->dev_specs.max_frm_size)
10660                 return -EINVAL;
10661
10662         max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
10663         mutex_lock(&hdev->vport_lock);
10664         /* VF's mps must fit within hdev->mps */
10665         if (vport->vport_id && max_frm_size > hdev->mps) {
10666                 mutex_unlock(&hdev->vport_lock);
10667                 return -EINVAL;
10668         } else if (vport->vport_id) {
10669                 vport->mps = max_frm_size;
10670                 mutex_unlock(&hdev->vport_lock);
10671                 return 0;
10672         }
10673
10674         /* PF's mps must be greater then VF's mps */
10675         for (i = 1; i < hdev->num_alloc_vport; i++)
10676                 if (max_frm_size < hdev->vport[i].mps) {
10677                         mutex_unlock(&hdev->vport_lock);
10678                         return -EINVAL;
10679                 }
10680
10681         hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
10682
10683         ret = hclge_set_mac_mtu(hdev, max_frm_size);
10684         if (ret) {
10685                 dev_err(&hdev->pdev->dev,
10686                         "Change mtu fail, ret =%d\n", ret);
10687                 goto out;
10688         }
10689
10690         hdev->mps = max_frm_size;
10691         vport->mps = max_frm_size;
10692
10693         ret = hclge_buffer_alloc(hdev);
10694         if (ret)
10695                 dev_err(&hdev->pdev->dev,
10696                         "Allocate buffer fail, ret =%d\n", ret);
10697
10698 out:
10699         hclge_notify_client(hdev, HNAE3_UP_CLIENT);
10700         mutex_unlock(&hdev->vport_lock);
10701         return ret;
10702 }
10703
10704 static int hclge_reset_tqp_cmd_send(struct hclge_dev *hdev, u16 queue_id,
10705                                     bool enable)
10706 {
10707         struct hclge_reset_tqp_queue_cmd *req;
10708         struct hclge_desc desc;
10709         int ret;
10710
10711         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
10712
10713         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
10714         req->tqp_id = cpu_to_le16(queue_id);
10715         if (enable)
10716                 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
10717
10718         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10719         if (ret) {
10720                 dev_err(&hdev->pdev->dev,
10721                         "Send tqp reset cmd error, status =%d\n", ret);
10722                 return ret;
10723         }
10724
10725         return 0;
10726 }
10727
10728 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id,
10729                                   u8 *reset_status)
10730 {
10731         struct hclge_reset_tqp_queue_cmd *req;
10732         struct hclge_desc desc;
10733         int ret;
10734
10735         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
10736
10737         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
10738         req->tqp_id = cpu_to_le16(queue_id);
10739
10740         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10741         if (ret) {
10742                 dev_err(&hdev->pdev->dev,
10743                         "Get reset status error, status =%d\n", ret);
10744                 return ret;
10745         }
10746
10747         *reset_status = hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
10748
10749         return 0;
10750 }
10751
10752 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
10753 {
10754         struct hnae3_queue *queue;
10755         struct hclge_tqp *tqp;
10756
10757         queue = handle->kinfo.tqp[queue_id];
10758         tqp = container_of(queue, struct hclge_tqp, q);
10759
10760         return tqp->index;
10761 }
10762
10763 static int hclge_reset_tqp_cmd(struct hnae3_handle *handle)
10764 {
10765         struct hclge_vport *vport = hclge_get_vport(handle);
10766         struct hclge_dev *hdev = vport->back;
10767         u16 reset_try_times = 0;
10768         u8 reset_status;
10769         u16 queue_gid;
10770         int ret;
10771         u16 i;
10772
10773         for (i = 0; i < handle->kinfo.num_tqps; i++) {
10774                 queue_gid = hclge_covert_handle_qid_global(handle, i);
10775                 ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, true);
10776                 if (ret) {
10777                         dev_err(&hdev->pdev->dev,
10778                                 "failed to send reset tqp cmd, ret = %d\n",
10779                                 ret);
10780                         return ret;
10781                 }
10782
10783                 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
10784                         ret = hclge_get_reset_status(hdev, queue_gid,
10785                                                      &reset_status);
10786                         if (ret)
10787                                 return ret;
10788
10789                         if (reset_status)
10790                                 break;
10791
10792                         /* Wait for tqp hw reset */
10793                         usleep_range(1000, 1200);
10794                 }
10795
10796                 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
10797                         dev_err(&hdev->pdev->dev,
10798                                 "wait for tqp hw reset timeout\n");
10799                         return -ETIME;
10800                 }
10801
10802                 ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, false);
10803                 if (ret) {
10804                         dev_err(&hdev->pdev->dev,
10805                                 "failed to deassert soft reset, ret = %d\n",
10806                                 ret);
10807                         return ret;
10808                 }
10809                 reset_try_times = 0;
10810         }
10811         return 0;
10812 }
10813
10814 static int hclge_reset_rcb(struct hnae3_handle *handle)
10815 {
10816 #define HCLGE_RESET_RCB_NOT_SUPPORT     0U
10817 #define HCLGE_RESET_RCB_SUCCESS         1U
10818
10819         struct hclge_vport *vport = hclge_get_vport(handle);
10820         struct hclge_dev *hdev = vport->back;
10821         struct hclge_reset_cmd *req;
10822         struct hclge_desc desc;
10823         u8 return_status;
10824         u16 queue_gid;
10825         int ret;
10826
10827         queue_gid = hclge_covert_handle_qid_global(handle, 0);
10828
10829         req = (struct hclge_reset_cmd *)desc.data;
10830         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
10831         hnae3_set_bit(req->fun_reset_rcb, HCLGE_CFG_RESET_RCB_B, 1);
10832         req->fun_reset_rcb_vqid_start = cpu_to_le16(queue_gid);
10833         req->fun_reset_rcb_vqid_num = cpu_to_le16(handle->kinfo.num_tqps);
10834
10835         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10836         if (ret) {
10837                 dev_err(&hdev->pdev->dev,
10838                         "failed to send rcb reset cmd, ret = %d\n", ret);
10839                 return ret;
10840         }
10841
10842         return_status = req->fun_reset_rcb_return_status;
10843         if (return_status == HCLGE_RESET_RCB_SUCCESS)
10844                 return 0;
10845
10846         if (return_status != HCLGE_RESET_RCB_NOT_SUPPORT) {
10847                 dev_err(&hdev->pdev->dev, "failed to reset rcb, ret = %u\n",
10848                         return_status);
10849                 return -EIO;
10850         }
10851
10852         /* if reset rcb cmd is unsupported, we need to send reset tqp cmd
10853          * again to reset all tqps
10854          */
10855         return hclge_reset_tqp_cmd(handle);
10856 }
10857
10858 int hclge_reset_tqp(struct hnae3_handle *handle)
10859 {
10860         struct hclge_vport *vport = hclge_get_vport(handle);
10861         struct hclge_dev *hdev = vport->back;
10862         int ret;
10863
10864         /* only need to disable PF's tqp */
10865         if (!vport->vport_id) {
10866                 ret = hclge_tqp_enable(handle, false);
10867                 if (ret) {
10868                         dev_err(&hdev->pdev->dev,
10869                                 "failed to disable tqp, ret = %d\n", ret);
10870                         return ret;
10871                 }
10872         }
10873
10874         return hclge_reset_rcb(handle);
10875 }
10876
10877 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
10878 {
10879         struct hclge_vport *vport = hclge_get_vport(handle);
10880         struct hclge_dev *hdev = vport->back;
10881
10882         return hdev->fw_version;
10883 }
10884
10885 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
10886 {
10887         struct phy_device *phydev = hdev->hw.mac.phydev;
10888
10889         if (!phydev)
10890                 return;
10891
10892         phy_set_asym_pause(phydev, rx_en, tx_en);
10893 }
10894
10895 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
10896 {
10897         int ret;
10898
10899         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
10900                 return 0;
10901
10902         ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
10903         if (ret)
10904                 dev_err(&hdev->pdev->dev,
10905                         "configure pauseparam error, ret = %d.\n", ret);
10906
10907         return ret;
10908 }
10909
10910 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
10911 {
10912         struct phy_device *phydev = hdev->hw.mac.phydev;
10913         u16 remote_advertising = 0;
10914         u16 local_advertising;
10915         u32 rx_pause, tx_pause;
10916         u8 flowctl;
10917
10918         if (!phydev->link || !phydev->autoneg)
10919                 return 0;
10920
10921         local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
10922
10923         if (phydev->pause)
10924                 remote_advertising = LPA_PAUSE_CAP;
10925
10926         if (phydev->asym_pause)
10927                 remote_advertising |= LPA_PAUSE_ASYM;
10928
10929         flowctl = mii_resolve_flowctrl_fdx(local_advertising,
10930                                            remote_advertising);
10931         tx_pause = flowctl & FLOW_CTRL_TX;
10932         rx_pause = flowctl & FLOW_CTRL_RX;
10933
10934         if (phydev->duplex == HCLGE_MAC_HALF) {
10935                 tx_pause = 0;
10936                 rx_pause = 0;
10937         }
10938
10939         return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
10940 }
10941
10942 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
10943                                  u32 *rx_en, u32 *tx_en)
10944 {
10945         struct hclge_vport *vport = hclge_get_vport(handle);
10946         struct hclge_dev *hdev = vport->back;
10947         u8 media_type = hdev->hw.mac.media_type;
10948
10949         *auto_neg = (media_type == HNAE3_MEDIA_TYPE_COPPER) ?
10950                     hclge_get_autoneg(handle) : 0;
10951
10952         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
10953                 *rx_en = 0;
10954                 *tx_en = 0;
10955                 return;
10956         }
10957
10958         if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
10959                 *rx_en = 1;
10960                 *tx_en = 0;
10961         } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
10962                 *tx_en = 1;
10963                 *rx_en = 0;
10964         } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
10965                 *rx_en = 1;
10966                 *tx_en = 1;
10967         } else {
10968                 *rx_en = 0;
10969                 *tx_en = 0;
10970         }
10971 }
10972
10973 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
10974                                          u32 rx_en, u32 tx_en)
10975 {
10976         if (rx_en && tx_en)
10977                 hdev->fc_mode_last_time = HCLGE_FC_FULL;
10978         else if (rx_en && !tx_en)
10979                 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
10980         else if (!rx_en && tx_en)
10981                 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
10982         else
10983                 hdev->fc_mode_last_time = HCLGE_FC_NONE;
10984
10985         hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
10986 }
10987
10988 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
10989                                 u32 rx_en, u32 tx_en)
10990 {
10991         struct hclge_vport *vport = hclge_get_vport(handle);
10992         struct hclge_dev *hdev = vport->back;
10993         struct phy_device *phydev = hdev->hw.mac.phydev;
10994         u32 fc_autoneg;
10995
10996         if (phydev || hnae3_dev_phy_imp_supported(hdev)) {
10997                 fc_autoneg = hclge_get_autoneg(handle);
10998                 if (auto_neg != fc_autoneg) {
10999                         dev_info(&hdev->pdev->dev,
11000                                  "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
11001                         return -EOPNOTSUPP;
11002                 }
11003         }
11004
11005         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
11006                 dev_info(&hdev->pdev->dev,
11007                          "Priority flow control enabled. Cannot set link flow control.\n");
11008                 return -EOPNOTSUPP;
11009         }
11010
11011         hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
11012
11013         hclge_record_user_pauseparam(hdev, rx_en, tx_en);
11014
11015         if (!auto_neg || hnae3_dev_phy_imp_supported(hdev))
11016                 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
11017
11018         if (phydev)
11019                 return phy_start_aneg(phydev);
11020
11021         return -EOPNOTSUPP;
11022 }
11023
11024 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
11025                                           u8 *auto_neg, u32 *speed, u8 *duplex)
11026 {
11027         struct hclge_vport *vport = hclge_get_vport(handle);
11028         struct hclge_dev *hdev = vport->back;
11029
11030         if (speed)
11031                 *speed = hdev->hw.mac.speed;
11032         if (duplex)
11033                 *duplex = hdev->hw.mac.duplex;
11034         if (auto_neg)
11035                 *auto_neg = hdev->hw.mac.autoneg;
11036 }
11037
11038 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
11039                                  u8 *module_type)
11040 {
11041         struct hclge_vport *vport = hclge_get_vport(handle);
11042         struct hclge_dev *hdev = vport->back;
11043
11044         /* When nic is down, the service task is not running, doesn't update
11045          * the port information per second. Query the port information before
11046          * return the media type, ensure getting the correct media information.
11047          */
11048         hclge_update_port_info(hdev);
11049
11050         if (media_type)
11051                 *media_type = hdev->hw.mac.media_type;
11052
11053         if (module_type)
11054                 *module_type = hdev->hw.mac.module_type;
11055 }
11056
11057 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
11058                                 u8 *tp_mdix_ctrl, u8 *tp_mdix)
11059 {
11060         struct hclge_vport *vport = hclge_get_vport(handle);
11061         struct hclge_dev *hdev = vport->back;
11062         struct phy_device *phydev = hdev->hw.mac.phydev;
11063         int mdix_ctrl, mdix, is_resolved;
11064         unsigned int retval;
11065
11066         if (!phydev) {
11067                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
11068                 *tp_mdix = ETH_TP_MDI_INVALID;
11069                 return;
11070         }
11071
11072         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
11073
11074         retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
11075         mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
11076                                     HCLGE_PHY_MDIX_CTRL_S);
11077
11078         retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
11079         mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
11080         is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
11081
11082         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
11083
11084         switch (mdix_ctrl) {
11085         case 0x0:
11086                 *tp_mdix_ctrl = ETH_TP_MDI;
11087                 break;
11088         case 0x1:
11089                 *tp_mdix_ctrl = ETH_TP_MDI_X;
11090                 break;
11091         case 0x3:
11092                 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
11093                 break;
11094         default:
11095                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
11096                 break;
11097         }
11098
11099         if (!is_resolved)
11100                 *tp_mdix = ETH_TP_MDI_INVALID;
11101         else if (mdix)
11102                 *tp_mdix = ETH_TP_MDI_X;
11103         else
11104                 *tp_mdix = ETH_TP_MDI;
11105 }
11106
11107 static void hclge_info_show(struct hclge_dev *hdev)
11108 {
11109         struct device *dev = &hdev->pdev->dev;
11110
11111         dev_info(dev, "PF info begin:\n");
11112
11113         dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
11114         dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
11115         dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
11116         dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
11117         dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
11118         dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
11119         dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
11120         dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
11121         dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
11122         dev_info(dev, "This is %s PF\n",
11123                  hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
11124         dev_info(dev, "DCB %s\n",
11125                  hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
11126         dev_info(dev, "MQPRIO %s\n",
11127                  hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
11128         dev_info(dev, "Default tx spare buffer size: %u\n",
11129                  hdev->tx_spare_buf_size);
11130
11131         dev_info(dev, "PF info end.\n");
11132 }
11133
11134 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
11135                                           struct hclge_vport *vport)
11136 {
11137         struct hnae3_client *client = vport->nic.client;
11138         struct hclge_dev *hdev = ae_dev->priv;
11139         int rst_cnt = hdev->rst_stats.reset_cnt;
11140         int ret;
11141
11142         ret = client->ops->init_instance(&vport->nic);
11143         if (ret)
11144                 return ret;
11145
11146         set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
11147         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
11148             rst_cnt != hdev->rst_stats.reset_cnt) {
11149                 ret = -EBUSY;
11150                 goto init_nic_err;
11151         }
11152
11153         /* Enable nic hw error interrupts */
11154         ret = hclge_config_nic_hw_error(hdev, true);
11155         if (ret) {
11156                 dev_err(&ae_dev->pdev->dev,
11157                         "fail(%d) to enable hw error interrupts\n", ret);
11158                 goto init_nic_err;
11159         }
11160
11161         hnae3_set_client_init_flag(client, ae_dev, 1);
11162
11163         if (netif_msg_drv(&hdev->vport->nic))
11164                 hclge_info_show(hdev);
11165
11166         return ret;
11167
11168 init_nic_err:
11169         clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
11170         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11171                 msleep(HCLGE_WAIT_RESET_DONE);
11172
11173         client->ops->uninit_instance(&vport->nic, 0);
11174
11175         return ret;
11176 }
11177
11178 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
11179                                            struct hclge_vport *vport)
11180 {
11181         struct hclge_dev *hdev = ae_dev->priv;
11182         struct hnae3_client *client;
11183         int rst_cnt;
11184         int ret;
11185
11186         if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
11187             !hdev->nic_client)
11188                 return 0;
11189
11190         client = hdev->roce_client;
11191         ret = hclge_init_roce_base_info(vport);
11192         if (ret)
11193                 return ret;
11194
11195         rst_cnt = hdev->rst_stats.reset_cnt;
11196         ret = client->ops->init_instance(&vport->roce);
11197         if (ret)
11198                 return ret;
11199
11200         set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
11201         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
11202             rst_cnt != hdev->rst_stats.reset_cnt) {
11203                 ret = -EBUSY;
11204                 goto init_roce_err;
11205         }
11206
11207         /* Enable roce ras interrupts */
11208         ret = hclge_config_rocee_ras_interrupt(hdev, true);
11209         if (ret) {
11210                 dev_err(&ae_dev->pdev->dev,
11211                         "fail(%d) to enable roce ras interrupts\n", ret);
11212                 goto init_roce_err;
11213         }
11214
11215         hnae3_set_client_init_flag(client, ae_dev, 1);
11216
11217         return 0;
11218
11219 init_roce_err:
11220         clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
11221         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11222                 msleep(HCLGE_WAIT_RESET_DONE);
11223
11224         hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
11225
11226         return ret;
11227 }
11228
11229 static int hclge_init_client_instance(struct hnae3_client *client,
11230                                       struct hnae3_ae_dev *ae_dev)
11231 {
11232         struct hclge_dev *hdev = ae_dev->priv;
11233         struct hclge_vport *vport = &hdev->vport[0];
11234         int ret;
11235
11236         switch (client->type) {
11237         case HNAE3_CLIENT_KNIC:
11238                 hdev->nic_client = client;
11239                 vport->nic.client = client;
11240                 ret = hclge_init_nic_client_instance(ae_dev, vport);
11241                 if (ret)
11242                         goto clear_nic;
11243
11244                 ret = hclge_init_roce_client_instance(ae_dev, vport);
11245                 if (ret)
11246                         goto clear_roce;
11247
11248                 break;
11249         case HNAE3_CLIENT_ROCE:
11250                 if (hnae3_dev_roce_supported(hdev)) {
11251                         hdev->roce_client = client;
11252                         vport->roce.client = client;
11253                 }
11254
11255                 ret = hclge_init_roce_client_instance(ae_dev, vport);
11256                 if (ret)
11257                         goto clear_roce;
11258
11259                 break;
11260         default:
11261                 return -EINVAL;
11262         }
11263
11264         return 0;
11265
11266 clear_nic:
11267         hdev->nic_client = NULL;
11268         vport->nic.client = NULL;
11269         return ret;
11270 clear_roce:
11271         hdev->roce_client = NULL;
11272         vport->roce.client = NULL;
11273         return ret;
11274 }
11275
11276 static void hclge_uninit_client_instance(struct hnae3_client *client,
11277                                          struct hnae3_ae_dev *ae_dev)
11278 {
11279         struct hclge_dev *hdev = ae_dev->priv;
11280         struct hclge_vport *vport = &hdev->vport[0];
11281
11282         if (hdev->roce_client) {
11283                 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
11284                 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11285                         msleep(HCLGE_WAIT_RESET_DONE);
11286
11287                 hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
11288                 hdev->roce_client = NULL;
11289                 vport->roce.client = NULL;
11290         }
11291         if (client->type == HNAE3_CLIENT_ROCE)
11292                 return;
11293         if (hdev->nic_client && client->ops->uninit_instance) {
11294                 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
11295                 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11296                         msleep(HCLGE_WAIT_RESET_DONE);
11297
11298                 client->ops->uninit_instance(&vport->nic, 0);
11299                 hdev->nic_client = NULL;
11300                 vport->nic.client = NULL;
11301         }
11302 }
11303
11304 static int hclge_dev_mem_map(struct hclge_dev *hdev)
11305 {
11306 #define HCLGE_MEM_BAR           4
11307
11308         struct pci_dev *pdev = hdev->pdev;
11309         struct hclge_hw *hw = &hdev->hw;
11310
11311         /* for device does not have device memory, return directly */
11312         if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGE_MEM_BAR)))
11313                 return 0;
11314
11315         hw->mem_base = devm_ioremap_wc(&pdev->dev,
11316                                        pci_resource_start(pdev, HCLGE_MEM_BAR),
11317                                        pci_resource_len(pdev, HCLGE_MEM_BAR));
11318         if (!hw->mem_base) {
11319                 dev_err(&pdev->dev, "failed to map device memory\n");
11320                 return -EFAULT;
11321         }
11322
11323         return 0;
11324 }
11325
11326 static int hclge_pci_init(struct hclge_dev *hdev)
11327 {
11328         struct pci_dev *pdev = hdev->pdev;
11329         struct hclge_hw *hw;
11330         int ret;
11331
11332         ret = pci_enable_device(pdev);
11333         if (ret) {
11334                 dev_err(&pdev->dev, "failed to enable PCI device\n");
11335                 return ret;
11336         }
11337
11338         ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
11339         if (ret) {
11340                 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
11341                 if (ret) {
11342                         dev_err(&pdev->dev,
11343                                 "can't set consistent PCI DMA");
11344                         goto err_disable_device;
11345                 }
11346                 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
11347         }
11348
11349         ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
11350         if (ret) {
11351                 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
11352                 goto err_disable_device;
11353         }
11354
11355         pci_set_master(pdev);
11356         hw = &hdev->hw;
11357         hw->io_base = pcim_iomap(pdev, 2, 0);
11358         if (!hw->io_base) {
11359                 dev_err(&pdev->dev, "Can't map configuration register space\n");
11360                 ret = -ENOMEM;
11361                 goto err_clr_master;
11362         }
11363
11364         ret = hclge_dev_mem_map(hdev);
11365         if (ret)
11366                 goto err_unmap_io_base;
11367
11368         hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
11369
11370         return 0;
11371
11372 err_unmap_io_base:
11373         pcim_iounmap(pdev, hdev->hw.io_base);
11374 err_clr_master:
11375         pci_clear_master(pdev);
11376         pci_release_regions(pdev);
11377 err_disable_device:
11378         pci_disable_device(pdev);
11379
11380         return ret;
11381 }
11382
11383 static void hclge_pci_uninit(struct hclge_dev *hdev)
11384 {
11385         struct pci_dev *pdev = hdev->pdev;
11386
11387         if (hdev->hw.mem_base)
11388                 devm_iounmap(&pdev->dev, hdev->hw.mem_base);
11389
11390         pcim_iounmap(pdev, hdev->hw.io_base);
11391         pci_free_irq_vectors(pdev);
11392         pci_clear_master(pdev);
11393         pci_release_mem_regions(pdev);
11394         pci_disable_device(pdev);
11395 }
11396
11397 static void hclge_state_init(struct hclge_dev *hdev)
11398 {
11399         set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
11400         set_bit(HCLGE_STATE_DOWN, &hdev->state);
11401         clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
11402         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11403         clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
11404         clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
11405         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
11406 }
11407
11408 static void hclge_state_uninit(struct hclge_dev *hdev)
11409 {
11410         set_bit(HCLGE_STATE_DOWN, &hdev->state);
11411         set_bit(HCLGE_STATE_REMOVING, &hdev->state);
11412
11413         if (hdev->reset_timer.function)
11414                 del_timer_sync(&hdev->reset_timer);
11415         if (hdev->service_task.work.func)
11416                 cancel_delayed_work_sync(&hdev->service_task);
11417 }
11418
11419 static void hclge_reset_prepare_general(struct hnae3_ae_dev *ae_dev,
11420                                         enum hnae3_reset_type rst_type)
11421 {
11422 #define HCLGE_RESET_RETRY_WAIT_MS       500
11423 #define HCLGE_RESET_RETRY_CNT   5
11424
11425         struct hclge_dev *hdev = ae_dev->priv;
11426         int retry_cnt = 0;
11427         int ret;
11428
11429 retry:
11430         down(&hdev->reset_sem);
11431         set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11432         hdev->reset_type = rst_type;
11433         ret = hclge_reset_prepare(hdev);
11434         if (ret || hdev->reset_pending) {
11435                 dev_err(&hdev->pdev->dev, "fail to prepare to reset, ret=%d\n",
11436                         ret);
11437                 if (hdev->reset_pending ||
11438                     retry_cnt++ < HCLGE_RESET_RETRY_CNT) {
11439                         dev_err(&hdev->pdev->dev,
11440                                 "reset_pending:0x%lx, retry_cnt:%d\n",
11441                                 hdev->reset_pending, retry_cnt);
11442                         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11443                         up(&hdev->reset_sem);
11444                         msleep(HCLGE_RESET_RETRY_WAIT_MS);
11445                         goto retry;
11446                 }
11447         }
11448
11449         /* disable misc vector before reset done */
11450         hclge_enable_vector(&hdev->misc_vector, false);
11451         set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
11452
11453         if (hdev->reset_type == HNAE3_FLR_RESET)
11454                 hdev->rst_stats.flr_rst_cnt++;
11455 }
11456
11457 static void hclge_reset_done(struct hnae3_ae_dev *ae_dev)
11458 {
11459         struct hclge_dev *hdev = ae_dev->priv;
11460         int ret;
11461
11462         hclge_enable_vector(&hdev->misc_vector, true);
11463
11464         ret = hclge_reset_rebuild(hdev);
11465         if (ret)
11466                 dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
11467
11468         hdev->reset_type = HNAE3_NONE_RESET;
11469         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11470         up(&hdev->reset_sem);
11471 }
11472
11473 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
11474 {
11475         u16 i;
11476
11477         for (i = 0; i < hdev->num_alloc_vport; i++) {
11478                 struct hclge_vport *vport = &hdev->vport[i];
11479                 int ret;
11480
11481                  /* Send cmd to clear vport's FUNC_RST_ING */
11482                 ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
11483                 if (ret)
11484                         dev_warn(&hdev->pdev->dev,
11485                                  "clear vport(%u) rst failed %d!\n",
11486                                  vport->vport_id, ret);
11487         }
11488 }
11489
11490 static int hclge_clear_hw_resource(struct hclge_dev *hdev)
11491 {
11492         struct hclge_desc desc;
11493         int ret;
11494
11495         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CLEAR_HW_RESOURCE, false);
11496
11497         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11498         /* This new command is only supported by new firmware, it will
11499          * fail with older firmware. Error value -EOPNOSUPP can only be
11500          * returned by older firmware running this command, to keep code
11501          * backward compatible we will override this value and return
11502          * success.
11503          */
11504         if (ret && ret != -EOPNOTSUPP) {
11505                 dev_err(&hdev->pdev->dev,
11506                         "failed to clear hw resource, ret = %d\n", ret);
11507                 return ret;
11508         }
11509         return 0;
11510 }
11511
11512 static void hclge_init_rxd_adv_layout(struct hclge_dev *hdev)
11513 {
11514         if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
11515                 hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 1);
11516 }
11517
11518 static void hclge_uninit_rxd_adv_layout(struct hclge_dev *hdev)
11519 {
11520         if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
11521                 hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 0);
11522 }
11523
11524 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
11525 {
11526         struct pci_dev *pdev = ae_dev->pdev;
11527         struct hclge_dev *hdev;
11528         int ret;
11529
11530         hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
11531         if (!hdev)
11532                 return -ENOMEM;
11533
11534         hdev->pdev = pdev;
11535         hdev->ae_dev = ae_dev;
11536         hdev->reset_type = HNAE3_NONE_RESET;
11537         hdev->reset_level = HNAE3_FUNC_RESET;
11538         ae_dev->priv = hdev;
11539
11540         /* HW supprt 2 layer vlan */
11541         hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
11542
11543         mutex_init(&hdev->vport_lock);
11544         spin_lock_init(&hdev->fd_rule_lock);
11545         sema_init(&hdev->reset_sem, 1);
11546
11547         ret = hclge_pci_init(hdev);
11548         if (ret)
11549                 goto out;
11550
11551         ret = hclge_devlink_init(hdev);
11552         if (ret)
11553                 goto err_pci_uninit;
11554
11555         /* Firmware command queue initialize */
11556         ret = hclge_cmd_queue_init(hdev);
11557         if (ret)
11558                 goto err_devlink_uninit;
11559
11560         /* Firmware command initialize */
11561         ret = hclge_cmd_init(hdev);
11562         if (ret)
11563                 goto err_cmd_uninit;
11564
11565         ret  = hclge_clear_hw_resource(hdev);
11566         if (ret)
11567                 goto err_cmd_uninit;
11568
11569         ret = hclge_get_cap(hdev);
11570         if (ret)
11571                 goto err_cmd_uninit;
11572
11573         ret = hclge_query_dev_specs(hdev);
11574         if (ret) {
11575                 dev_err(&pdev->dev, "failed to query dev specifications, ret = %d.\n",
11576                         ret);
11577                 goto err_cmd_uninit;
11578         }
11579
11580         ret = hclge_configure(hdev);
11581         if (ret) {
11582                 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
11583                 goto err_cmd_uninit;
11584         }
11585
11586         ret = hclge_init_msi(hdev);
11587         if (ret) {
11588                 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
11589                 goto err_cmd_uninit;
11590         }
11591
11592         ret = hclge_misc_irq_init(hdev);
11593         if (ret)
11594                 goto err_msi_uninit;
11595
11596         ret = hclge_alloc_tqps(hdev);
11597         if (ret) {
11598                 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
11599                 goto err_msi_irq_uninit;
11600         }
11601
11602         ret = hclge_alloc_vport(hdev);
11603         if (ret)
11604                 goto err_msi_irq_uninit;
11605
11606         ret = hclge_map_tqp(hdev);
11607         if (ret)
11608                 goto err_msi_irq_uninit;
11609
11610         if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER &&
11611             !hnae3_dev_phy_imp_supported(hdev)) {
11612                 ret = hclge_mac_mdio_config(hdev);
11613                 if (ret)
11614                         goto err_msi_irq_uninit;
11615         }
11616
11617         ret = hclge_init_umv_space(hdev);
11618         if (ret)
11619                 goto err_mdiobus_unreg;
11620
11621         ret = hclge_mac_init(hdev);
11622         if (ret) {
11623                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
11624                 goto err_mdiobus_unreg;
11625         }
11626
11627         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
11628         if (ret) {
11629                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
11630                 goto err_mdiobus_unreg;
11631         }
11632
11633         ret = hclge_config_gro(hdev);
11634         if (ret)
11635                 goto err_mdiobus_unreg;
11636
11637         ret = hclge_init_vlan_config(hdev);
11638         if (ret) {
11639                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
11640                 goto err_mdiobus_unreg;
11641         }
11642
11643         ret = hclge_tm_schd_init(hdev);
11644         if (ret) {
11645                 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
11646                 goto err_mdiobus_unreg;
11647         }
11648
11649         ret = hclge_rss_init_cfg(hdev);
11650         if (ret) {
11651                 dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret);
11652                 goto err_mdiobus_unreg;
11653         }
11654
11655         ret = hclge_rss_init_hw(hdev);
11656         if (ret) {
11657                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
11658                 goto err_mdiobus_unreg;
11659         }
11660
11661         ret = init_mgr_tbl(hdev);
11662         if (ret) {
11663                 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
11664                 goto err_mdiobus_unreg;
11665         }
11666
11667         ret = hclge_init_fd_config(hdev);
11668         if (ret) {
11669                 dev_err(&pdev->dev,
11670                         "fd table init fail, ret=%d\n", ret);
11671                 goto err_mdiobus_unreg;
11672         }
11673
11674         ret = hclge_ptp_init(hdev);
11675         if (ret)
11676                 goto err_mdiobus_unreg;
11677
11678         INIT_KFIFO(hdev->mac_tnl_log);
11679
11680         hclge_dcb_ops_set(hdev);
11681
11682         timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
11683         INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
11684
11685         /* Setup affinity after service timer setup because add_timer_on
11686          * is called in affinity notify.
11687          */
11688         hclge_misc_affinity_setup(hdev);
11689
11690         hclge_clear_all_event_cause(hdev);
11691         hclge_clear_resetting_state(hdev);
11692
11693         /* Log and clear the hw errors those already occurred */
11694         if (hnae3_dev_ras_imp_supported(hdev))
11695                 hclge_handle_occurred_error(hdev);
11696         else
11697                 hclge_handle_all_hns_hw_errors(ae_dev);
11698
11699         /* request delayed reset for the error recovery because an immediate
11700          * global reset on a PF affecting pending initialization of other PFs
11701          */
11702         if (ae_dev->hw_err_reset_req) {
11703                 enum hnae3_reset_type reset_level;
11704
11705                 reset_level = hclge_get_reset_level(ae_dev,
11706                                                     &ae_dev->hw_err_reset_req);
11707                 hclge_set_def_reset_request(ae_dev, reset_level);
11708                 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
11709         }
11710
11711         hclge_init_rxd_adv_layout(hdev);
11712
11713         /* Enable MISC vector(vector0) */
11714         hclge_enable_vector(&hdev->misc_vector, true);
11715
11716         hclge_state_init(hdev);
11717         hdev->last_reset_time = jiffies;
11718
11719         dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
11720                  HCLGE_DRIVER_NAME);
11721
11722         hclge_task_schedule(hdev, round_jiffies_relative(HZ));
11723
11724         return 0;
11725
11726 err_mdiobus_unreg:
11727         if (hdev->hw.mac.phydev)
11728                 mdiobus_unregister(hdev->hw.mac.mdio_bus);
11729 err_msi_irq_uninit:
11730         hclge_misc_irq_uninit(hdev);
11731 err_msi_uninit:
11732         pci_free_irq_vectors(pdev);
11733 err_cmd_uninit:
11734         hclge_cmd_uninit(hdev);
11735 err_devlink_uninit:
11736         hclge_devlink_uninit(hdev);
11737 err_pci_uninit:
11738         pcim_iounmap(pdev, hdev->hw.io_base);
11739         pci_clear_master(pdev);
11740         pci_release_regions(pdev);
11741         pci_disable_device(pdev);
11742 out:
11743         mutex_destroy(&hdev->vport_lock);
11744         return ret;
11745 }
11746
11747 static void hclge_stats_clear(struct hclge_dev *hdev)
11748 {
11749         memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
11750 }
11751
11752 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
11753 {
11754         return hclge_config_switch_param(hdev, vf, enable,
11755                                          HCLGE_SWITCH_ANTI_SPOOF_MASK);
11756 }
11757
11758 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
11759 {
11760         return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
11761                                           HCLGE_FILTER_FE_NIC_INGRESS_B,
11762                                           enable, vf);
11763 }
11764
11765 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
11766 {
11767         int ret;
11768
11769         ret = hclge_set_mac_spoofchk(hdev, vf, enable);
11770         if (ret) {
11771                 dev_err(&hdev->pdev->dev,
11772                         "Set vf %d mac spoof check %s failed, ret=%d\n",
11773                         vf, enable ? "on" : "off", ret);
11774                 return ret;
11775         }
11776
11777         ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
11778         if (ret)
11779                 dev_err(&hdev->pdev->dev,
11780                         "Set vf %d vlan spoof check %s failed, ret=%d\n",
11781                         vf, enable ? "on" : "off", ret);
11782
11783         return ret;
11784 }
11785
11786 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
11787                                  bool enable)
11788 {
11789         struct hclge_vport *vport = hclge_get_vport(handle);
11790         struct hclge_dev *hdev = vport->back;
11791         u32 new_spoofchk = enable ? 1 : 0;
11792         int ret;
11793
11794         if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
11795                 return -EOPNOTSUPP;
11796
11797         vport = hclge_get_vf_vport(hdev, vf);
11798         if (!vport)
11799                 return -EINVAL;
11800
11801         if (vport->vf_info.spoofchk == new_spoofchk)
11802                 return 0;
11803
11804         if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
11805                 dev_warn(&hdev->pdev->dev,
11806                          "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
11807                          vf);
11808         else if (enable && hclge_is_umv_space_full(vport, true))
11809                 dev_warn(&hdev->pdev->dev,
11810                          "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
11811                          vf);
11812
11813         ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
11814         if (ret)
11815                 return ret;
11816
11817         vport->vf_info.spoofchk = new_spoofchk;
11818         return 0;
11819 }
11820
11821 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
11822 {
11823         struct hclge_vport *vport = hdev->vport;
11824         int ret;
11825         int i;
11826
11827         if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
11828                 return 0;
11829
11830         /* resume the vf spoof check state after reset */
11831         for (i = 0; i < hdev->num_alloc_vport; i++) {
11832                 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
11833                                                vport->vf_info.spoofchk);
11834                 if (ret)
11835                         return ret;
11836
11837                 vport++;
11838         }
11839
11840         return 0;
11841 }
11842
11843 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
11844 {
11845         struct hclge_vport *vport = hclge_get_vport(handle);
11846         struct hclge_dev *hdev = vport->back;
11847         u32 new_trusted = enable ? 1 : 0;
11848
11849         vport = hclge_get_vf_vport(hdev, vf);
11850         if (!vport)
11851                 return -EINVAL;
11852
11853         if (vport->vf_info.trusted == new_trusted)
11854                 return 0;
11855
11856         vport->vf_info.trusted = new_trusted;
11857         set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
11858         hclge_task_schedule(hdev, 0);
11859
11860         return 0;
11861 }
11862
11863 static void hclge_reset_vf_rate(struct hclge_dev *hdev)
11864 {
11865         int ret;
11866         int vf;
11867
11868         /* reset vf rate to default value */
11869         for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
11870                 struct hclge_vport *vport = &hdev->vport[vf];
11871
11872                 vport->vf_info.max_tx_rate = 0;
11873                 ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
11874                 if (ret)
11875                         dev_err(&hdev->pdev->dev,
11876                                 "vf%d failed to reset to default, ret=%d\n",
11877                                 vf - HCLGE_VF_VPORT_START_NUM, ret);
11878         }
11879 }
11880
11881 static int hclge_vf_rate_param_check(struct hclge_dev *hdev,
11882                                      int min_tx_rate, int max_tx_rate)
11883 {
11884         if (min_tx_rate != 0 ||
11885             max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
11886                 dev_err(&hdev->pdev->dev,
11887                         "min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
11888                         min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
11889                 return -EINVAL;
11890         }
11891
11892         return 0;
11893 }
11894
11895 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
11896                              int min_tx_rate, int max_tx_rate, bool force)
11897 {
11898         struct hclge_vport *vport = hclge_get_vport(handle);
11899         struct hclge_dev *hdev = vport->back;
11900         int ret;
11901
11902         ret = hclge_vf_rate_param_check(hdev, min_tx_rate, max_tx_rate);
11903         if (ret)
11904                 return ret;
11905
11906         vport = hclge_get_vf_vport(hdev, vf);
11907         if (!vport)
11908                 return -EINVAL;
11909
11910         if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
11911                 return 0;
11912
11913         ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
11914         if (ret)
11915                 return ret;
11916
11917         vport->vf_info.max_tx_rate = max_tx_rate;
11918
11919         return 0;
11920 }
11921
11922 static int hclge_resume_vf_rate(struct hclge_dev *hdev)
11923 {
11924         struct hnae3_handle *handle = &hdev->vport->nic;
11925         struct hclge_vport *vport;
11926         int ret;
11927         int vf;
11928
11929         /* resume the vf max_tx_rate after reset */
11930         for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
11931                 vport = hclge_get_vf_vport(hdev, vf);
11932                 if (!vport)
11933                         return -EINVAL;
11934
11935                 /* zero means max rate, after reset, firmware already set it to
11936                  * max rate, so just continue.
11937                  */
11938                 if (!vport->vf_info.max_tx_rate)
11939                         continue;
11940
11941                 ret = hclge_set_vf_rate(handle, vf, 0,
11942                                         vport->vf_info.max_tx_rate, true);
11943                 if (ret) {
11944                         dev_err(&hdev->pdev->dev,
11945                                 "vf%d failed to resume tx_rate:%u, ret=%d\n",
11946                                 vf, vport->vf_info.max_tx_rate, ret);
11947                         return ret;
11948                 }
11949         }
11950
11951         return 0;
11952 }
11953
11954 static void hclge_reset_vport_state(struct hclge_dev *hdev)
11955 {
11956         struct hclge_vport *vport = hdev->vport;
11957         int i;
11958
11959         for (i = 0; i < hdev->num_alloc_vport; i++) {
11960                 hclge_vport_stop(vport);
11961                 vport++;
11962         }
11963 }
11964
11965 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
11966 {
11967         struct hclge_dev *hdev = ae_dev->priv;
11968         struct pci_dev *pdev = ae_dev->pdev;
11969         int ret;
11970
11971         set_bit(HCLGE_STATE_DOWN, &hdev->state);
11972
11973         hclge_stats_clear(hdev);
11974         /* NOTE: pf reset needn't to clear or restore pf and vf table entry.
11975          * so here should not clean table in memory.
11976          */
11977         if (hdev->reset_type == HNAE3_IMP_RESET ||
11978             hdev->reset_type == HNAE3_GLOBAL_RESET) {
11979                 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
11980                 memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
11981                 bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport);
11982                 hclge_reset_umv_space(hdev);
11983         }
11984
11985         ret = hclge_cmd_init(hdev);
11986         if (ret) {
11987                 dev_err(&pdev->dev, "Cmd queue init failed\n");
11988                 return ret;
11989         }
11990
11991         ret = hclge_map_tqp(hdev);
11992         if (ret) {
11993                 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
11994                 return ret;
11995         }
11996
11997         ret = hclge_mac_init(hdev);
11998         if (ret) {
11999                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
12000                 return ret;
12001         }
12002
12003         ret = hclge_tp_port_init(hdev);
12004         if (ret) {
12005                 dev_err(&pdev->dev, "failed to init tp port, ret = %d\n",
12006                         ret);
12007                 return ret;
12008         }
12009
12010         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
12011         if (ret) {
12012                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
12013                 return ret;
12014         }
12015
12016         ret = hclge_config_gro(hdev);
12017         if (ret)
12018                 return ret;
12019
12020         ret = hclge_init_vlan_config(hdev);
12021         if (ret) {
12022                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
12023                 return ret;
12024         }
12025
12026         ret = hclge_tm_init_hw(hdev, true);
12027         if (ret) {
12028                 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
12029                 return ret;
12030         }
12031
12032         ret = hclge_rss_init_hw(hdev);
12033         if (ret) {
12034                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
12035                 return ret;
12036         }
12037
12038         ret = init_mgr_tbl(hdev);
12039         if (ret) {
12040                 dev_err(&pdev->dev,
12041                         "failed to reinit manager table, ret = %d\n", ret);
12042                 return ret;
12043         }
12044
12045         ret = hclge_init_fd_config(hdev);
12046         if (ret) {
12047                 dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
12048                 return ret;
12049         }
12050
12051         ret = hclge_ptp_init(hdev);
12052         if (ret)
12053                 return ret;
12054
12055         /* Log and clear the hw errors those already occurred */
12056         if (hnae3_dev_ras_imp_supported(hdev))
12057                 hclge_handle_occurred_error(hdev);
12058         else
12059                 hclge_handle_all_hns_hw_errors(ae_dev);
12060
12061         /* Re-enable the hw error interrupts because
12062          * the interrupts get disabled on global reset.
12063          */
12064         ret = hclge_config_nic_hw_error(hdev, true);
12065         if (ret) {
12066                 dev_err(&pdev->dev,
12067                         "fail(%d) to re-enable NIC hw error interrupts\n",
12068                         ret);
12069                 return ret;
12070         }
12071
12072         if (hdev->roce_client) {
12073                 ret = hclge_config_rocee_ras_interrupt(hdev, true);
12074                 if (ret) {
12075                         dev_err(&pdev->dev,
12076                                 "fail(%d) to re-enable roce ras interrupts\n",
12077                                 ret);
12078                         return ret;
12079                 }
12080         }
12081
12082         hclge_reset_vport_state(hdev);
12083         ret = hclge_reset_vport_spoofchk(hdev);
12084         if (ret)
12085                 return ret;
12086
12087         ret = hclge_resume_vf_rate(hdev);
12088         if (ret)
12089                 return ret;
12090
12091         hclge_init_rxd_adv_layout(hdev);
12092
12093         dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
12094                  HCLGE_DRIVER_NAME);
12095
12096         return 0;
12097 }
12098
12099 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
12100 {
12101         struct hclge_dev *hdev = ae_dev->priv;
12102         struct hclge_mac *mac = &hdev->hw.mac;
12103
12104         hclge_reset_vf_rate(hdev);
12105         hclge_clear_vf_vlan(hdev);
12106         hclge_misc_affinity_teardown(hdev);
12107         hclge_state_uninit(hdev);
12108         hclge_ptp_uninit(hdev);
12109         hclge_uninit_rxd_adv_layout(hdev);
12110         hclge_uninit_mac_table(hdev);
12111         hclge_del_all_fd_entries(hdev);
12112
12113         if (mac->phydev)
12114                 mdiobus_unregister(mac->mdio_bus);
12115
12116         /* Disable MISC vector(vector0) */
12117         hclge_enable_vector(&hdev->misc_vector, false);
12118         synchronize_irq(hdev->misc_vector.vector_irq);
12119
12120         /* Disable all hw interrupts */
12121         hclge_config_mac_tnl_int(hdev, false);
12122         hclge_config_nic_hw_error(hdev, false);
12123         hclge_config_rocee_ras_interrupt(hdev, false);
12124
12125         hclge_cmd_uninit(hdev);
12126         hclge_misc_irq_uninit(hdev);
12127         hclge_devlink_uninit(hdev);
12128         hclge_pci_uninit(hdev);
12129         mutex_destroy(&hdev->vport_lock);
12130         hclge_uninit_vport_vlan_table(hdev);
12131         ae_dev->priv = NULL;
12132 }
12133
12134 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
12135 {
12136         struct hclge_vport *vport = hclge_get_vport(handle);
12137         struct hclge_dev *hdev = vport->back;
12138
12139         return min_t(u32, hdev->pf_rss_size_max, vport->alloc_tqps);
12140 }
12141
12142 static void hclge_get_channels(struct hnae3_handle *handle,
12143                                struct ethtool_channels *ch)
12144 {
12145         ch->max_combined = hclge_get_max_channels(handle);
12146         ch->other_count = 1;
12147         ch->max_other = 1;
12148         ch->combined_count = handle->kinfo.rss_size;
12149 }
12150
12151 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
12152                                         u16 *alloc_tqps, u16 *max_rss_size)
12153 {
12154         struct hclge_vport *vport = hclge_get_vport(handle);
12155         struct hclge_dev *hdev = vport->back;
12156
12157         *alloc_tqps = vport->alloc_tqps;
12158         *max_rss_size = hdev->pf_rss_size_max;
12159 }
12160
12161 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
12162                               bool rxfh_configured)
12163 {
12164         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
12165         struct hclge_vport *vport = hclge_get_vport(handle);
12166         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
12167         u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
12168         struct hclge_dev *hdev = vport->back;
12169         u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
12170         u16 cur_rss_size = kinfo->rss_size;
12171         u16 cur_tqps = kinfo->num_tqps;
12172         u16 tc_valid[HCLGE_MAX_TC_NUM];
12173         u16 roundup_size;
12174         u32 *rss_indir;
12175         unsigned int i;
12176         int ret;
12177
12178         kinfo->req_rss_size = new_tqps_num;
12179
12180         ret = hclge_tm_vport_map_update(hdev);
12181         if (ret) {
12182                 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
12183                 return ret;
12184         }
12185
12186         roundup_size = roundup_pow_of_two(kinfo->rss_size);
12187         roundup_size = ilog2(roundup_size);
12188         /* Set the RSS TC mode according to the new RSS size */
12189         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
12190                 tc_valid[i] = 0;
12191
12192                 if (!(hdev->hw_tc_map & BIT(i)))
12193                         continue;
12194
12195                 tc_valid[i] = 1;
12196                 tc_size[i] = roundup_size;
12197                 tc_offset[i] = kinfo->rss_size * i;
12198         }
12199         ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
12200         if (ret)
12201                 return ret;
12202
12203         /* RSS indirection table has been configured by user */
12204         if (rxfh_configured)
12205                 goto out;
12206
12207         /* Reinitializes the rss indirect table according to the new RSS size */
12208         rss_indir = kcalloc(ae_dev->dev_specs.rss_ind_tbl_size, sizeof(u32),
12209                             GFP_KERNEL);
12210         if (!rss_indir)
12211                 return -ENOMEM;
12212
12213         for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
12214                 rss_indir[i] = i % kinfo->rss_size;
12215
12216         ret = hclge_set_rss(handle, rss_indir, NULL, 0);
12217         if (ret)
12218                 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
12219                         ret);
12220
12221         kfree(rss_indir);
12222
12223 out:
12224         if (!ret)
12225                 dev_info(&hdev->pdev->dev,
12226                          "Channels changed, rss_size from %u to %u, tqps from %u to %u",
12227                          cur_rss_size, kinfo->rss_size,
12228                          cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc);
12229
12230         return ret;
12231 }
12232
12233 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
12234                               u32 *regs_num_64_bit)
12235 {
12236         struct hclge_desc desc;
12237         u32 total_num;
12238         int ret;
12239
12240         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
12241         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12242         if (ret) {
12243                 dev_err(&hdev->pdev->dev,
12244                         "Query register number cmd failed, ret = %d.\n", ret);
12245                 return ret;
12246         }
12247
12248         *regs_num_32_bit = le32_to_cpu(desc.data[0]);
12249         *regs_num_64_bit = le32_to_cpu(desc.data[1]);
12250
12251         total_num = *regs_num_32_bit + *regs_num_64_bit;
12252         if (!total_num)
12253                 return -EINVAL;
12254
12255         return 0;
12256 }
12257
12258 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
12259                                  void *data)
12260 {
12261 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
12262 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
12263
12264         struct hclge_desc *desc;
12265         u32 *reg_val = data;
12266         __le32 *desc_data;
12267         int nodata_num;
12268         int cmd_num;
12269         int i, k, n;
12270         int ret;
12271
12272         if (regs_num == 0)
12273                 return 0;
12274
12275         nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
12276         cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
12277                                HCLGE_32_BIT_REG_RTN_DATANUM);
12278         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
12279         if (!desc)
12280                 return -ENOMEM;
12281
12282         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
12283         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
12284         if (ret) {
12285                 dev_err(&hdev->pdev->dev,
12286                         "Query 32 bit register cmd failed, ret = %d.\n", ret);
12287                 kfree(desc);
12288                 return ret;
12289         }
12290
12291         for (i = 0; i < cmd_num; i++) {
12292                 if (i == 0) {
12293                         desc_data = (__le32 *)(&desc[i].data[0]);
12294                         n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
12295                 } else {
12296                         desc_data = (__le32 *)(&desc[i]);
12297                         n = HCLGE_32_BIT_REG_RTN_DATANUM;
12298                 }
12299                 for (k = 0; k < n; k++) {
12300                         *reg_val++ = le32_to_cpu(*desc_data++);
12301
12302                         regs_num--;
12303                         if (!regs_num)
12304                                 break;
12305                 }
12306         }
12307
12308         kfree(desc);
12309         return 0;
12310 }
12311
12312 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
12313                                  void *data)
12314 {
12315 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
12316 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
12317
12318         struct hclge_desc *desc;
12319         u64 *reg_val = data;
12320         __le64 *desc_data;
12321         int nodata_len;
12322         int cmd_num;
12323         int i, k, n;
12324         int ret;
12325
12326         if (regs_num == 0)
12327                 return 0;
12328
12329         nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
12330         cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
12331                                HCLGE_64_BIT_REG_RTN_DATANUM);
12332         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
12333         if (!desc)
12334                 return -ENOMEM;
12335
12336         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
12337         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
12338         if (ret) {
12339                 dev_err(&hdev->pdev->dev,
12340                         "Query 64 bit register cmd failed, ret = %d.\n", ret);
12341                 kfree(desc);
12342                 return ret;
12343         }
12344
12345         for (i = 0; i < cmd_num; i++) {
12346                 if (i == 0) {
12347                         desc_data = (__le64 *)(&desc[i].data[0]);
12348                         n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
12349                 } else {
12350                         desc_data = (__le64 *)(&desc[i]);
12351                         n = HCLGE_64_BIT_REG_RTN_DATANUM;
12352                 }
12353                 for (k = 0; k < n; k++) {
12354                         *reg_val++ = le64_to_cpu(*desc_data++);
12355
12356                         regs_num--;
12357                         if (!regs_num)
12358                                 break;
12359                 }
12360         }
12361
12362         kfree(desc);
12363         return 0;
12364 }
12365
12366 #define MAX_SEPARATE_NUM        4
12367 #define SEPARATOR_VALUE         0xFDFCFBFA
12368 #define REG_NUM_PER_LINE        4
12369 #define REG_LEN_PER_LINE        (REG_NUM_PER_LINE * sizeof(u32))
12370 #define REG_SEPARATOR_LINE      1
12371 #define REG_NUM_REMAIN_MASK     3
12372
12373 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
12374 {
12375         int i;
12376
12377         /* initialize command BD except the last one */
12378         for (i = 0; i < HCLGE_GET_DFX_REG_TYPE_CNT - 1; i++) {
12379                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM,
12380                                            true);
12381                 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
12382         }
12383
12384         /* initialize the last command BD */
12385         hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM, true);
12386
12387         return hclge_cmd_send(&hdev->hw, desc, HCLGE_GET_DFX_REG_TYPE_CNT);
12388 }
12389
12390 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
12391                                     int *bd_num_list,
12392                                     u32 type_num)
12393 {
12394         u32 entries_per_desc, desc_index, index, offset, i;
12395         struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
12396         int ret;
12397
12398         ret = hclge_query_bd_num_cmd_send(hdev, desc);
12399         if (ret) {
12400                 dev_err(&hdev->pdev->dev,
12401                         "Get dfx bd num fail, status is %d.\n", ret);
12402                 return ret;
12403         }
12404
12405         entries_per_desc = ARRAY_SIZE(desc[0].data);
12406         for (i = 0; i < type_num; i++) {
12407                 offset = hclge_dfx_bd_offset_list[i];
12408                 index = offset % entries_per_desc;
12409                 desc_index = offset / entries_per_desc;
12410                 bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
12411         }
12412
12413         return ret;
12414 }
12415
12416 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
12417                                   struct hclge_desc *desc_src, int bd_num,
12418                                   enum hclge_opcode_type cmd)
12419 {
12420         struct hclge_desc *desc = desc_src;
12421         int i, ret;
12422
12423         hclge_cmd_setup_basic_desc(desc, cmd, true);
12424         for (i = 0; i < bd_num - 1; i++) {
12425                 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
12426                 desc++;
12427                 hclge_cmd_setup_basic_desc(desc, cmd, true);
12428         }
12429
12430         desc = desc_src;
12431         ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
12432         if (ret)
12433                 dev_err(&hdev->pdev->dev,
12434                         "Query dfx reg cmd(0x%x) send fail, status is %d.\n",
12435                         cmd, ret);
12436
12437         return ret;
12438 }
12439
12440 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
12441                                     void *data)
12442 {
12443         int entries_per_desc, reg_num, separator_num, desc_index, index, i;
12444         struct hclge_desc *desc = desc_src;
12445         u32 *reg = data;
12446
12447         entries_per_desc = ARRAY_SIZE(desc->data);
12448         reg_num = entries_per_desc * bd_num;
12449         separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
12450         for (i = 0; i < reg_num; i++) {
12451                 index = i % entries_per_desc;
12452                 desc_index = i / entries_per_desc;
12453                 *reg++ = le32_to_cpu(desc[desc_index].data[index]);
12454         }
12455         for (i = 0; i < separator_num; i++)
12456                 *reg++ = SEPARATOR_VALUE;
12457
12458         return reg_num + separator_num;
12459 }
12460
12461 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
12462 {
12463         u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
12464         int data_len_per_desc, bd_num, i;
12465         int *bd_num_list;
12466         u32 data_len;
12467         int ret;
12468
12469         bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
12470         if (!bd_num_list)
12471                 return -ENOMEM;
12472
12473         ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
12474         if (ret) {
12475                 dev_err(&hdev->pdev->dev,
12476                         "Get dfx reg bd num fail, status is %d.\n", ret);
12477                 goto out;
12478         }
12479
12480         data_len_per_desc = sizeof_field(struct hclge_desc, data);
12481         *len = 0;
12482         for (i = 0; i < dfx_reg_type_num; i++) {
12483                 bd_num = bd_num_list[i];
12484                 data_len = data_len_per_desc * bd_num;
12485                 *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
12486         }
12487
12488 out:
12489         kfree(bd_num_list);
12490         return ret;
12491 }
12492
12493 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
12494 {
12495         u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
12496         int bd_num, bd_num_max, buf_len, i;
12497         struct hclge_desc *desc_src;
12498         int *bd_num_list;
12499         u32 *reg = data;
12500         int ret;
12501
12502         bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
12503         if (!bd_num_list)
12504                 return -ENOMEM;
12505
12506         ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
12507         if (ret) {
12508                 dev_err(&hdev->pdev->dev,
12509                         "Get dfx reg bd num fail, status is %d.\n", ret);
12510                 goto out;
12511         }
12512
12513         bd_num_max = bd_num_list[0];
12514         for (i = 1; i < dfx_reg_type_num; i++)
12515                 bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
12516
12517         buf_len = sizeof(*desc_src) * bd_num_max;
12518         desc_src = kzalloc(buf_len, GFP_KERNEL);
12519         if (!desc_src) {
12520                 ret = -ENOMEM;
12521                 goto out;
12522         }
12523
12524         for (i = 0; i < dfx_reg_type_num; i++) {
12525                 bd_num = bd_num_list[i];
12526                 ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
12527                                              hclge_dfx_reg_opcode_list[i]);
12528                 if (ret) {
12529                         dev_err(&hdev->pdev->dev,
12530                                 "Get dfx reg fail, status is %d.\n", ret);
12531                         break;
12532                 }
12533
12534                 reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
12535         }
12536
12537         kfree(desc_src);
12538 out:
12539         kfree(bd_num_list);
12540         return ret;
12541 }
12542
12543 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
12544                               struct hnae3_knic_private_info *kinfo)
12545 {
12546 #define HCLGE_RING_REG_OFFSET           0x200
12547 #define HCLGE_RING_INT_REG_OFFSET       0x4
12548
12549         int i, j, reg_num, separator_num;
12550         int data_num_sum;
12551         u32 *reg = data;
12552
12553         /* fetching per-PF registers valus from PF PCIe register space */
12554         reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
12555         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12556         for (i = 0; i < reg_num; i++)
12557                 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
12558         for (i = 0; i < separator_num; i++)
12559                 *reg++ = SEPARATOR_VALUE;
12560         data_num_sum = reg_num + separator_num;
12561
12562         reg_num = ARRAY_SIZE(common_reg_addr_list);
12563         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12564         for (i = 0; i < reg_num; i++)
12565                 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
12566         for (i = 0; i < separator_num; i++)
12567                 *reg++ = SEPARATOR_VALUE;
12568         data_num_sum += reg_num + separator_num;
12569
12570         reg_num = ARRAY_SIZE(ring_reg_addr_list);
12571         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12572         for (j = 0; j < kinfo->num_tqps; j++) {
12573                 for (i = 0; i < reg_num; i++)
12574                         *reg++ = hclge_read_dev(&hdev->hw,
12575                                                 ring_reg_addr_list[i] +
12576                                                 HCLGE_RING_REG_OFFSET * j);
12577                 for (i = 0; i < separator_num; i++)
12578                         *reg++ = SEPARATOR_VALUE;
12579         }
12580         data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
12581
12582         reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
12583         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12584         for (j = 0; j < hdev->num_msi_used - 1; j++) {
12585                 for (i = 0; i < reg_num; i++)
12586                         *reg++ = hclge_read_dev(&hdev->hw,
12587                                                 tqp_intr_reg_addr_list[i] +
12588                                                 HCLGE_RING_INT_REG_OFFSET * j);
12589                 for (i = 0; i < separator_num; i++)
12590                         *reg++ = SEPARATOR_VALUE;
12591         }
12592         data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
12593
12594         return data_num_sum;
12595 }
12596
12597 static int hclge_get_regs_len(struct hnae3_handle *handle)
12598 {
12599         int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
12600         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
12601         struct hclge_vport *vport = hclge_get_vport(handle);
12602         struct hclge_dev *hdev = vport->back;
12603         int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
12604         int regs_lines_32_bit, regs_lines_64_bit;
12605         int ret;
12606
12607         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
12608         if (ret) {
12609                 dev_err(&hdev->pdev->dev,
12610                         "Get register number failed, ret = %d.\n", ret);
12611                 return ret;
12612         }
12613
12614         ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
12615         if (ret) {
12616                 dev_err(&hdev->pdev->dev,
12617                         "Get dfx reg len failed, ret = %d.\n", ret);
12618                 return ret;
12619         }
12620
12621         cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
12622                 REG_SEPARATOR_LINE;
12623         common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
12624                 REG_SEPARATOR_LINE;
12625         ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
12626                 REG_SEPARATOR_LINE;
12627         tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
12628                 REG_SEPARATOR_LINE;
12629         regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
12630                 REG_SEPARATOR_LINE;
12631         regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
12632                 REG_SEPARATOR_LINE;
12633
12634         return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
12635                 tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
12636                 regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
12637 }
12638
12639 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
12640                            void *data)
12641 {
12642         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
12643         struct hclge_vport *vport = hclge_get_vport(handle);
12644         struct hclge_dev *hdev = vport->back;
12645         u32 regs_num_32_bit, regs_num_64_bit;
12646         int i, reg_num, separator_num, ret;
12647         u32 *reg = data;
12648
12649         *version = hdev->fw_version;
12650
12651         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
12652         if (ret) {
12653                 dev_err(&hdev->pdev->dev,
12654                         "Get register number failed, ret = %d.\n", ret);
12655                 return;
12656         }
12657
12658         reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
12659
12660         ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
12661         if (ret) {
12662                 dev_err(&hdev->pdev->dev,
12663                         "Get 32 bit register failed, ret = %d.\n", ret);
12664                 return;
12665         }
12666         reg_num = regs_num_32_bit;
12667         reg += reg_num;
12668         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12669         for (i = 0; i < separator_num; i++)
12670                 *reg++ = SEPARATOR_VALUE;
12671
12672         ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
12673         if (ret) {
12674                 dev_err(&hdev->pdev->dev,
12675                         "Get 64 bit register failed, ret = %d.\n", ret);
12676                 return;
12677         }
12678         reg_num = regs_num_64_bit * 2;
12679         reg += reg_num;
12680         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12681         for (i = 0; i < separator_num; i++)
12682                 *reg++ = SEPARATOR_VALUE;
12683
12684         ret = hclge_get_dfx_reg(hdev, reg);
12685         if (ret)
12686                 dev_err(&hdev->pdev->dev,
12687                         "Get dfx register failed, ret = %d.\n", ret);
12688 }
12689
12690 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
12691 {
12692         struct hclge_set_led_state_cmd *req;
12693         struct hclge_desc desc;
12694         int ret;
12695
12696         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
12697
12698         req = (struct hclge_set_led_state_cmd *)desc.data;
12699         hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
12700                         HCLGE_LED_LOCATE_STATE_S, locate_led_status);
12701
12702         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12703         if (ret)
12704                 dev_err(&hdev->pdev->dev,
12705                         "Send set led state cmd error, ret =%d\n", ret);
12706
12707         return ret;
12708 }
12709
12710 enum hclge_led_status {
12711         HCLGE_LED_OFF,
12712         HCLGE_LED_ON,
12713         HCLGE_LED_NO_CHANGE = 0xFF,
12714 };
12715
12716 static int hclge_set_led_id(struct hnae3_handle *handle,
12717                             enum ethtool_phys_id_state status)
12718 {
12719         struct hclge_vport *vport = hclge_get_vport(handle);
12720         struct hclge_dev *hdev = vport->back;
12721
12722         switch (status) {
12723         case ETHTOOL_ID_ACTIVE:
12724                 return hclge_set_led_status(hdev, HCLGE_LED_ON);
12725         case ETHTOOL_ID_INACTIVE:
12726                 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
12727         default:
12728                 return -EINVAL;
12729         }
12730 }
12731
12732 static void hclge_get_link_mode(struct hnae3_handle *handle,
12733                                 unsigned long *supported,
12734                                 unsigned long *advertising)
12735 {
12736         unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
12737         struct hclge_vport *vport = hclge_get_vport(handle);
12738         struct hclge_dev *hdev = vport->back;
12739         unsigned int idx = 0;
12740
12741         for (; idx < size; idx++) {
12742                 supported[idx] = hdev->hw.mac.supported[idx];
12743                 advertising[idx] = hdev->hw.mac.advertising[idx];
12744         }
12745 }
12746
12747 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
12748 {
12749         struct hclge_vport *vport = hclge_get_vport(handle);
12750         struct hclge_dev *hdev = vport->back;
12751         bool gro_en_old = hdev->gro_en;
12752         int ret;
12753
12754         hdev->gro_en = enable;
12755         ret = hclge_config_gro(hdev);
12756         if (ret)
12757                 hdev->gro_en = gro_en_old;
12758
12759         return ret;
12760 }
12761
12762 static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
12763 {
12764         struct hclge_vport *vport = &hdev->vport[0];
12765         struct hnae3_handle *handle = &vport->nic;
12766         u8 tmp_flags;
12767         int ret;
12768         u16 i;
12769
12770         if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
12771                 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
12772                 vport->last_promisc_flags = vport->overflow_promisc_flags;
12773         }
12774
12775         if (test_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state)) {
12776                 tmp_flags = handle->netdev_flags | vport->last_promisc_flags;
12777                 ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE,
12778                                              tmp_flags & HNAE3_MPE);
12779                 if (!ret) {
12780                         clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
12781                                   &vport->state);
12782                         set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
12783                                 &vport->state);
12784                 }
12785         }
12786
12787         for (i = 1; i < hdev->num_alloc_vport; i++) {
12788                 bool uc_en = false;
12789                 bool mc_en = false;
12790                 bool bc_en;
12791
12792                 vport = &hdev->vport[i];
12793
12794                 if (!test_and_clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
12795                                         &vport->state))
12796                         continue;
12797
12798                 if (vport->vf_info.trusted) {
12799                         uc_en = vport->vf_info.request_uc_en > 0 ||
12800                                 vport->overflow_promisc_flags &
12801                                 HNAE3_OVERFLOW_UPE;
12802                         mc_en = vport->vf_info.request_mc_en > 0 ||
12803                                 vport->overflow_promisc_flags &
12804                                 HNAE3_OVERFLOW_MPE;
12805                 }
12806                 bc_en = vport->vf_info.request_bc_en > 0;
12807
12808                 ret = hclge_cmd_set_promisc_mode(hdev, vport->vport_id, uc_en,
12809                                                  mc_en, bc_en);
12810                 if (ret) {
12811                         set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
12812                                 &vport->state);
12813                         return;
12814                 }
12815                 hclge_set_vport_vlan_fltr_change(vport);
12816         }
12817 }
12818
12819 static bool hclge_module_existed(struct hclge_dev *hdev)
12820 {
12821         struct hclge_desc desc;
12822         u32 existed;
12823         int ret;
12824
12825         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_EXIST, true);
12826         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12827         if (ret) {
12828                 dev_err(&hdev->pdev->dev,
12829                         "failed to get SFP exist state, ret = %d\n", ret);
12830                 return false;
12831         }
12832
12833         existed = le32_to_cpu(desc.data[0]);
12834
12835         return existed != 0;
12836 }
12837
12838 /* need 6 bds(total 140 bytes) in one reading
12839  * return the number of bytes actually read, 0 means read failed.
12840  */
12841 static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset,
12842                                      u32 len, u8 *data)
12843 {
12844         struct hclge_desc desc[HCLGE_SFP_INFO_CMD_NUM];
12845         struct hclge_sfp_info_bd0_cmd *sfp_info_bd0;
12846         u16 read_len;
12847         u16 copy_len;
12848         int ret;
12849         int i;
12850
12851         /* setup all 6 bds to read module eeprom info. */
12852         for (i = 0; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
12853                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_SFP_EEPROM,
12854                                            true);
12855
12856                 /* bd0~bd4 need next flag */
12857                 if (i < HCLGE_SFP_INFO_CMD_NUM - 1)
12858                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
12859         }
12860
12861         /* setup bd0, this bd contains offset and read length. */
12862         sfp_info_bd0 = (struct hclge_sfp_info_bd0_cmd *)desc[0].data;
12863         sfp_info_bd0->offset = cpu_to_le16((u16)offset);
12864         read_len = min_t(u16, len, HCLGE_SFP_INFO_MAX_LEN);
12865         sfp_info_bd0->read_len = cpu_to_le16(read_len);
12866
12867         ret = hclge_cmd_send(&hdev->hw, desc, i);
12868         if (ret) {
12869                 dev_err(&hdev->pdev->dev,
12870                         "failed to get SFP eeprom info, ret = %d\n", ret);
12871                 return 0;
12872         }
12873
12874         /* copy sfp info from bd0 to out buffer. */
12875         copy_len = min_t(u16, len, HCLGE_SFP_INFO_BD0_LEN);
12876         memcpy(data, sfp_info_bd0->data, copy_len);
12877         read_len = copy_len;
12878
12879         /* copy sfp info from bd1~bd5 to out buffer if needed. */
12880         for (i = 1; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
12881                 if (read_len >= len)
12882                         return read_len;
12883
12884                 copy_len = min_t(u16, len - read_len, HCLGE_SFP_INFO_BDX_LEN);
12885                 memcpy(data + read_len, desc[i].data, copy_len);
12886                 read_len += copy_len;
12887         }
12888
12889         return read_len;
12890 }
12891
12892 static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset,
12893                                    u32 len, u8 *data)
12894 {
12895         struct hclge_vport *vport = hclge_get_vport(handle);
12896         struct hclge_dev *hdev = vport->back;
12897         u32 read_len = 0;
12898         u16 data_len;
12899
12900         if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
12901                 return -EOPNOTSUPP;
12902
12903         if (!hclge_module_existed(hdev))
12904                 return -ENXIO;
12905
12906         while (read_len < len) {
12907                 data_len = hclge_get_sfp_eeprom_info(hdev,
12908                                                      offset + read_len,
12909                                                      len - read_len,
12910                                                      data + read_len);
12911                 if (!data_len)
12912                         return -EIO;
12913
12914                 read_len += data_len;
12915         }
12916
12917         return 0;
12918 }
12919
12920 static int hclge_get_link_diagnosis_info(struct hnae3_handle *handle,
12921                                          u32 *status_code)
12922 {
12923         struct hclge_vport *vport = hclge_get_vport(handle);
12924         struct hclge_dev *hdev = vport->back;
12925         struct hclge_desc desc;
12926         int ret;
12927
12928         if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2)
12929                 return -EOPNOTSUPP;
12930
12931         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_DIAGNOSIS, true);
12932         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12933         if (ret) {
12934                 dev_err(&hdev->pdev->dev,
12935                         "failed to query link diagnosis info, ret = %d\n", ret);
12936                 return ret;
12937         }
12938
12939         *status_code = le32_to_cpu(desc.data[0]);
12940         return 0;
12941 }
12942
12943 static const struct hnae3_ae_ops hclge_ops = {
12944         .init_ae_dev = hclge_init_ae_dev,
12945         .uninit_ae_dev = hclge_uninit_ae_dev,
12946         .reset_prepare = hclge_reset_prepare_general,
12947         .reset_done = hclge_reset_done,
12948         .init_client_instance = hclge_init_client_instance,
12949         .uninit_client_instance = hclge_uninit_client_instance,
12950         .map_ring_to_vector = hclge_map_ring_to_vector,
12951         .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
12952         .get_vector = hclge_get_vector,
12953         .put_vector = hclge_put_vector,
12954         .set_promisc_mode = hclge_set_promisc_mode,
12955         .request_update_promisc_mode = hclge_request_update_promisc_mode,
12956         .set_loopback = hclge_set_loopback,
12957         .start = hclge_ae_start,
12958         .stop = hclge_ae_stop,
12959         .client_start = hclge_client_start,
12960         .client_stop = hclge_client_stop,
12961         .get_status = hclge_get_status,
12962         .get_ksettings_an_result = hclge_get_ksettings_an_result,
12963         .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
12964         .get_media_type = hclge_get_media_type,
12965         .check_port_speed = hclge_check_port_speed,
12966         .get_fec = hclge_get_fec,
12967         .set_fec = hclge_set_fec,
12968         .get_rss_key_size = hclge_get_rss_key_size,
12969         .get_rss = hclge_get_rss,
12970         .set_rss = hclge_set_rss,
12971         .set_rss_tuple = hclge_set_rss_tuple,
12972         .get_rss_tuple = hclge_get_rss_tuple,
12973         .get_tc_size = hclge_get_tc_size,
12974         .get_mac_addr = hclge_get_mac_addr,
12975         .set_mac_addr = hclge_set_mac_addr,
12976         .do_ioctl = hclge_do_ioctl,
12977         .add_uc_addr = hclge_add_uc_addr,
12978         .rm_uc_addr = hclge_rm_uc_addr,
12979         .add_mc_addr = hclge_add_mc_addr,
12980         .rm_mc_addr = hclge_rm_mc_addr,
12981         .set_autoneg = hclge_set_autoneg,
12982         .get_autoneg = hclge_get_autoneg,
12983         .restart_autoneg = hclge_restart_autoneg,
12984         .halt_autoneg = hclge_halt_autoneg,
12985         .get_pauseparam = hclge_get_pauseparam,
12986         .set_pauseparam = hclge_set_pauseparam,
12987         .set_mtu = hclge_set_mtu,
12988         .reset_queue = hclge_reset_tqp,
12989         .get_stats = hclge_get_stats,
12990         .get_mac_stats = hclge_get_mac_stat,
12991         .update_stats = hclge_update_stats,
12992         .get_strings = hclge_get_strings,
12993         .get_sset_count = hclge_get_sset_count,
12994         .get_fw_version = hclge_get_fw_version,
12995         .get_mdix_mode = hclge_get_mdix_mode,
12996         .enable_vlan_filter = hclge_enable_vlan_filter,
12997         .set_vlan_filter = hclge_set_vlan_filter,
12998         .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
12999         .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
13000         .reset_event = hclge_reset_event,
13001         .get_reset_level = hclge_get_reset_level,
13002         .set_default_reset_request = hclge_set_def_reset_request,
13003         .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
13004         .set_channels = hclge_set_channels,
13005         .get_channels = hclge_get_channels,
13006         .get_regs_len = hclge_get_regs_len,
13007         .get_regs = hclge_get_regs,
13008         .set_led_id = hclge_set_led_id,
13009         .get_link_mode = hclge_get_link_mode,
13010         .add_fd_entry = hclge_add_fd_entry,
13011         .del_fd_entry = hclge_del_fd_entry,
13012         .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
13013         .get_fd_rule_info = hclge_get_fd_rule_info,
13014         .get_fd_all_rules = hclge_get_all_rules,
13015         .enable_fd = hclge_enable_fd,
13016         .add_arfs_entry = hclge_add_fd_entry_by_arfs,
13017         .dbg_read_cmd = hclge_dbg_read_cmd,
13018         .handle_hw_ras_error = hclge_handle_hw_ras_error,
13019         .get_hw_reset_stat = hclge_get_hw_reset_stat,
13020         .ae_dev_resetting = hclge_ae_dev_resetting,
13021         .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
13022         .set_gro_en = hclge_gro_en,
13023         .get_global_queue_id = hclge_covert_handle_qid_global,
13024         .set_timer_task = hclge_set_timer_task,
13025         .mac_connect_phy = hclge_mac_connect_phy,
13026         .mac_disconnect_phy = hclge_mac_disconnect_phy,
13027         .get_vf_config = hclge_get_vf_config,
13028         .set_vf_link_state = hclge_set_vf_link_state,
13029         .set_vf_spoofchk = hclge_set_vf_spoofchk,
13030         .set_vf_trust = hclge_set_vf_trust,
13031         .set_vf_rate = hclge_set_vf_rate,
13032         .set_vf_mac = hclge_set_vf_mac,
13033         .get_module_eeprom = hclge_get_module_eeprom,
13034         .get_cmdq_stat = hclge_get_cmdq_stat,
13035         .add_cls_flower = hclge_add_cls_flower,
13036         .del_cls_flower = hclge_del_cls_flower,
13037         .cls_flower_active = hclge_is_cls_flower_active,
13038         .get_phy_link_ksettings = hclge_get_phy_link_ksettings,
13039         .set_phy_link_ksettings = hclge_set_phy_link_ksettings,
13040         .set_tx_hwts_info = hclge_ptp_set_tx_info,
13041         .get_rx_hwts = hclge_ptp_get_rx_hwts,
13042         .get_ts_info = hclge_ptp_get_ts_info,
13043         .get_link_diagnosis_info = hclge_get_link_diagnosis_info,
13044 };
13045
13046 static struct hnae3_ae_algo ae_algo = {
13047         .ops = &hclge_ops,
13048         .pdev_id_table = ae_algo_pci_tbl,
13049 };
13050
13051 static int hclge_init(void)
13052 {
13053         pr_info("%s is initializing\n", HCLGE_NAME);
13054
13055         hclge_wq = alloc_workqueue("%s", 0, 0, HCLGE_NAME);
13056         if (!hclge_wq) {
13057                 pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
13058                 return -ENOMEM;
13059         }
13060
13061         hnae3_register_ae_algo(&ae_algo);
13062
13063         return 0;
13064 }
13065
13066 static void hclge_exit(void)
13067 {
13068         hnae3_unregister_ae_algo_prepare(&ae_algo);
13069         hnae3_unregister_ae_algo(&ae_algo);
13070         destroy_workqueue(hclge_wq);
13071 }
13072 module_init(hclge_init);
13073 module_exit(hclge_exit);
13074
13075 MODULE_LICENSE("GPL");
13076 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
13077 MODULE_DESCRIPTION("HCLGE Driver");
13078 MODULE_VERSION(HCLGE_MOD_VERSION);