1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
23 #include "hclge_err.h"
26 #define HCLGE_NAME "hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
30 #define HCLGE_BUF_SIZE_UNIT 256U
31 #define HCLGE_BUF_MUL_BY 2
32 #define HCLGE_BUF_DIV_BY 2
33 #define NEED_RESERVE_TC_NUM 2
34 #define BUF_MAX_PERCENT 100
35 #define BUF_RESERVE_PERCENT 90
37 #define HCLGE_RESET_MAX_FAIL_CNT 5
38 #define HCLGE_RESET_SYNC_TIME 100
39 #define HCLGE_PF_RESET_SYNC_TIME 20
40 #define HCLGE_PF_RESET_SYNC_CNT 1500
42 /* Get DFX BD number offset */
43 #define HCLGE_DFX_BIOS_BD_OFFSET 1
44 #define HCLGE_DFX_SSU_0_BD_OFFSET 2
45 #define HCLGE_DFX_SSU_1_BD_OFFSET 3
46 #define HCLGE_DFX_IGU_BD_OFFSET 4
47 #define HCLGE_DFX_RPU_0_BD_OFFSET 5
48 #define HCLGE_DFX_RPU_1_BD_OFFSET 6
49 #define HCLGE_DFX_NCSI_BD_OFFSET 7
50 #define HCLGE_DFX_RTC_BD_OFFSET 8
51 #define HCLGE_DFX_PPP_BD_OFFSET 9
52 #define HCLGE_DFX_RCB_BD_OFFSET 10
53 #define HCLGE_DFX_TQP_BD_OFFSET 11
54 #define HCLGE_DFX_SSU_2_BD_OFFSET 12
56 #define HCLGE_LINK_STATUS_MS 10
58 #define HCLGE_VF_VPORT_START_NUM 1
60 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
61 static int hclge_init_vlan_config(struct hclge_dev *hdev);
62 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
63 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
64 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
65 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
66 u16 *allocated_size, bool is_alloc);
67 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
68 static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
69 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
71 static int hclge_set_default_loopback(struct hclge_dev *hdev);
73 static struct hnae3_ae_algo ae_algo;
75 static const struct pci_device_id ae_algo_pci_tbl[] = {
76 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
77 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
78 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
79 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
80 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
81 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
82 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
83 /* required last entry */
87 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
89 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
90 HCLGE_CMDQ_TX_ADDR_H_REG,
91 HCLGE_CMDQ_TX_DEPTH_REG,
92 HCLGE_CMDQ_TX_TAIL_REG,
93 HCLGE_CMDQ_TX_HEAD_REG,
94 HCLGE_CMDQ_RX_ADDR_L_REG,
95 HCLGE_CMDQ_RX_ADDR_H_REG,
96 HCLGE_CMDQ_RX_DEPTH_REG,
97 HCLGE_CMDQ_RX_TAIL_REG,
98 HCLGE_CMDQ_RX_HEAD_REG,
99 HCLGE_VECTOR0_CMDQ_SRC_REG,
100 HCLGE_CMDQ_INTR_STS_REG,
101 HCLGE_CMDQ_INTR_EN_REG,
102 HCLGE_CMDQ_INTR_GEN_REG};
104 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
105 HCLGE_VECTOR0_OTER_EN_REG,
106 HCLGE_MISC_RESET_STS_REG,
107 HCLGE_MISC_VECTOR_INT_STS,
108 HCLGE_GLOBAL_RESET_REG,
112 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
113 HCLGE_RING_RX_ADDR_H_REG,
114 HCLGE_RING_RX_BD_NUM_REG,
115 HCLGE_RING_RX_BD_LENGTH_REG,
116 HCLGE_RING_RX_MERGE_EN_REG,
117 HCLGE_RING_RX_TAIL_REG,
118 HCLGE_RING_RX_HEAD_REG,
119 HCLGE_RING_RX_FBD_NUM_REG,
120 HCLGE_RING_RX_OFFSET_REG,
121 HCLGE_RING_RX_FBD_OFFSET_REG,
122 HCLGE_RING_RX_STASH_REG,
123 HCLGE_RING_RX_BD_ERR_REG,
124 HCLGE_RING_TX_ADDR_L_REG,
125 HCLGE_RING_TX_ADDR_H_REG,
126 HCLGE_RING_TX_BD_NUM_REG,
127 HCLGE_RING_TX_PRIORITY_REG,
128 HCLGE_RING_TX_TC_REG,
129 HCLGE_RING_TX_MERGE_EN_REG,
130 HCLGE_RING_TX_TAIL_REG,
131 HCLGE_RING_TX_HEAD_REG,
132 HCLGE_RING_TX_FBD_NUM_REG,
133 HCLGE_RING_TX_OFFSET_REG,
134 HCLGE_RING_TX_EBD_NUM_REG,
135 HCLGE_RING_TX_EBD_OFFSET_REG,
136 HCLGE_RING_TX_BD_ERR_REG,
139 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
140 HCLGE_TQP_INTR_GL0_REG,
141 HCLGE_TQP_INTR_GL1_REG,
142 HCLGE_TQP_INTR_GL2_REG,
143 HCLGE_TQP_INTR_RL_REG};
145 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
147 "Serdes serial Loopback test",
148 "Serdes parallel Loopback test",
152 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
153 {"mac_tx_mac_pause_num",
154 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
155 {"mac_rx_mac_pause_num",
156 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
157 {"mac_tx_control_pkt_num",
158 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
159 {"mac_rx_control_pkt_num",
160 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
161 {"mac_tx_pfc_pkt_num",
162 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
163 {"mac_tx_pfc_pri0_pkt_num",
164 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
165 {"mac_tx_pfc_pri1_pkt_num",
166 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
167 {"mac_tx_pfc_pri2_pkt_num",
168 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
169 {"mac_tx_pfc_pri3_pkt_num",
170 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
171 {"mac_tx_pfc_pri4_pkt_num",
172 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
173 {"mac_tx_pfc_pri5_pkt_num",
174 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
175 {"mac_tx_pfc_pri6_pkt_num",
176 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
177 {"mac_tx_pfc_pri7_pkt_num",
178 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
179 {"mac_rx_pfc_pkt_num",
180 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
181 {"mac_rx_pfc_pri0_pkt_num",
182 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
183 {"mac_rx_pfc_pri1_pkt_num",
184 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
185 {"mac_rx_pfc_pri2_pkt_num",
186 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
187 {"mac_rx_pfc_pri3_pkt_num",
188 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
189 {"mac_rx_pfc_pri4_pkt_num",
190 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
191 {"mac_rx_pfc_pri5_pkt_num",
192 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
193 {"mac_rx_pfc_pri6_pkt_num",
194 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
195 {"mac_rx_pfc_pri7_pkt_num",
196 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
197 {"mac_tx_total_pkt_num",
198 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
199 {"mac_tx_total_oct_num",
200 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
201 {"mac_tx_good_pkt_num",
202 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
203 {"mac_tx_bad_pkt_num",
204 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
205 {"mac_tx_good_oct_num",
206 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
207 {"mac_tx_bad_oct_num",
208 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
209 {"mac_tx_uni_pkt_num",
210 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
211 {"mac_tx_multi_pkt_num",
212 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
213 {"mac_tx_broad_pkt_num",
214 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
215 {"mac_tx_undersize_pkt_num",
216 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
217 {"mac_tx_oversize_pkt_num",
218 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
219 {"mac_tx_64_oct_pkt_num",
220 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
221 {"mac_tx_65_127_oct_pkt_num",
222 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
223 {"mac_tx_128_255_oct_pkt_num",
224 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
225 {"mac_tx_256_511_oct_pkt_num",
226 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
227 {"mac_tx_512_1023_oct_pkt_num",
228 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
229 {"mac_tx_1024_1518_oct_pkt_num",
230 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
231 {"mac_tx_1519_2047_oct_pkt_num",
232 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
233 {"mac_tx_2048_4095_oct_pkt_num",
234 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
235 {"mac_tx_4096_8191_oct_pkt_num",
236 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
237 {"mac_tx_8192_9216_oct_pkt_num",
238 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
239 {"mac_tx_9217_12287_oct_pkt_num",
240 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
241 {"mac_tx_12288_16383_oct_pkt_num",
242 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
243 {"mac_tx_1519_max_good_pkt_num",
244 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
245 {"mac_tx_1519_max_bad_pkt_num",
246 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
247 {"mac_rx_total_pkt_num",
248 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
249 {"mac_rx_total_oct_num",
250 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
251 {"mac_rx_good_pkt_num",
252 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
253 {"mac_rx_bad_pkt_num",
254 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
255 {"mac_rx_good_oct_num",
256 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
257 {"mac_rx_bad_oct_num",
258 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
259 {"mac_rx_uni_pkt_num",
260 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
261 {"mac_rx_multi_pkt_num",
262 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
263 {"mac_rx_broad_pkt_num",
264 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
265 {"mac_rx_undersize_pkt_num",
266 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
267 {"mac_rx_oversize_pkt_num",
268 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
269 {"mac_rx_64_oct_pkt_num",
270 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
271 {"mac_rx_65_127_oct_pkt_num",
272 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
273 {"mac_rx_128_255_oct_pkt_num",
274 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
275 {"mac_rx_256_511_oct_pkt_num",
276 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
277 {"mac_rx_512_1023_oct_pkt_num",
278 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
279 {"mac_rx_1024_1518_oct_pkt_num",
280 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
281 {"mac_rx_1519_2047_oct_pkt_num",
282 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
283 {"mac_rx_2048_4095_oct_pkt_num",
284 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
285 {"mac_rx_4096_8191_oct_pkt_num",
286 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
287 {"mac_rx_8192_9216_oct_pkt_num",
288 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
289 {"mac_rx_9217_12287_oct_pkt_num",
290 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
291 {"mac_rx_12288_16383_oct_pkt_num",
292 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
293 {"mac_rx_1519_max_good_pkt_num",
294 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
295 {"mac_rx_1519_max_bad_pkt_num",
296 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
298 {"mac_tx_fragment_pkt_num",
299 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
300 {"mac_tx_undermin_pkt_num",
301 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
302 {"mac_tx_jabber_pkt_num",
303 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
304 {"mac_tx_err_all_pkt_num",
305 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
306 {"mac_tx_from_app_good_pkt_num",
307 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
308 {"mac_tx_from_app_bad_pkt_num",
309 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
310 {"mac_rx_fragment_pkt_num",
311 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
312 {"mac_rx_undermin_pkt_num",
313 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
314 {"mac_rx_jabber_pkt_num",
315 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
316 {"mac_rx_fcs_err_pkt_num",
317 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
318 {"mac_rx_send_app_good_pkt_num",
319 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
320 {"mac_rx_send_app_bad_pkt_num",
321 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
324 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
326 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
327 .ethter_type = cpu_to_le16(ETH_P_LLDP),
328 .mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
329 .i_port_bitmap = 0x1,
333 static const u8 hclge_hash_key[] = {
334 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
335 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
336 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
337 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
338 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
341 static const u32 hclge_dfx_bd_offset_list[] = {
342 HCLGE_DFX_BIOS_BD_OFFSET,
343 HCLGE_DFX_SSU_0_BD_OFFSET,
344 HCLGE_DFX_SSU_1_BD_OFFSET,
345 HCLGE_DFX_IGU_BD_OFFSET,
346 HCLGE_DFX_RPU_0_BD_OFFSET,
347 HCLGE_DFX_RPU_1_BD_OFFSET,
348 HCLGE_DFX_NCSI_BD_OFFSET,
349 HCLGE_DFX_RTC_BD_OFFSET,
350 HCLGE_DFX_PPP_BD_OFFSET,
351 HCLGE_DFX_RCB_BD_OFFSET,
352 HCLGE_DFX_TQP_BD_OFFSET,
353 HCLGE_DFX_SSU_2_BD_OFFSET
356 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
357 HCLGE_OPC_DFX_BIOS_COMMON_REG,
358 HCLGE_OPC_DFX_SSU_REG_0,
359 HCLGE_OPC_DFX_SSU_REG_1,
360 HCLGE_OPC_DFX_IGU_EGU_REG,
361 HCLGE_OPC_DFX_RPU_REG_0,
362 HCLGE_OPC_DFX_RPU_REG_1,
363 HCLGE_OPC_DFX_NCSI_REG,
364 HCLGE_OPC_DFX_RTC_REG,
365 HCLGE_OPC_DFX_PPP_REG,
366 HCLGE_OPC_DFX_RCB_REG,
367 HCLGE_OPC_DFX_TQP_REG,
368 HCLGE_OPC_DFX_SSU_REG_2
371 static const struct key_info meta_data_key_info[] = {
372 { PACKET_TYPE_ID, 6},
382 static const struct key_info tuple_key_info[] = {
383 { OUTER_DST_MAC, 48},
384 { OUTER_SRC_MAC, 48},
385 { OUTER_VLAN_TAG_FST, 16},
386 { OUTER_VLAN_TAG_SEC, 16},
387 { OUTER_ETH_TYPE, 16},
390 { OUTER_IP_PROTO, 8},
394 { OUTER_SRC_PORT, 16},
395 { OUTER_DST_PORT, 16},
397 { OUTER_TUN_VNI, 24},
398 { OUTER_TUN_FLOW_ID, 8},
399 { INNER_DST_MAC, 48},
400 { INNER_SRC_MAC, 48},
401 { INNER_VLAN_TAG_FST, 16},
402 { INNER_VLAN_TAG_SEC, 16},
403 { INNER_ETH_TYPE, 16},
406 { INNER_IP_PROTO, 8},
410 { INNER_SRC_PORT, 16},
411 { INNER_DST_PORT, 16},
415 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
417 #define HCLGE_MAC_CMD_NUM 21
419 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
420 struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
425 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
426 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
428 dev_err(&hdev->pdev->dev,
429 "Get MAC pkt stats fail, status = %d.\n", ret);
434 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
435 /* for special opcode 0032, only the first desc has the head */
436 if (unlikely(i == 0)) {
437 desc_data = (__le64 *)(&desc[i].data[0]);
438 n = HCLGE_RD_FIRST_STATS_NUM;
440 desc_data = (__le64 *)(&desc[i]);
441 n = HCLGE_RD_OTHER_STATS_NUM;
444 for (k = 0; k < n; k++) {
445 *data += le64_to_cpu(*desc_data);
454 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
456 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
457 struct hclge_desc *desc;
462 /* This may be called inside atomic sections,
463 * so GFP_ATOMIC is more suitalbe here
465 desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
469 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
470 ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
476 for (i = 0; i < desc_num; i++) {
477 /* for special opcode 0034, only the first desc has the head */
479 desc_data = (__le64 *)(&desc[i].data[0]);
480 n = HCLGE_RD_FIRST_STATS_NUM;
482 desc_data = (__le64 *)(&desc[i]);
483 n = HCLGE_RD_OTHER_STATS_NUM;
486 for (k = 0; k < n; k++) {
487 *data += le64_to_cpu(*desc_data);
498 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
500 struct hclge_desc desc;
505 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
506 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
510 desc_data = (__le32 *)(&desc.data[0]);
511 reg_num = le32_to_cpu(*desc_data);
513 *desc_num = 1 + ((reg_num - 3) >> 2) +
514 (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
519 static int hclge_mac_update_stats(struct hclge_dev *hdev)
524 ret = hclge_mac_query_reg_num(hdev, &desc_num);
526 /* The firmware supports the new statistics acquisition method */
528 ret = hclge_mac_update_stats_complete(hdev, desc_num);
529 else if (ret == -EOPNOTSUPP)
530 ret = hclge_mac_update_stats_defective(hdev);
532 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
537 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
539 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
540 struct hclge_vport *vport = hclge_get_vport(handle);
541 struct hclge_dev *hdev = vport->back;
542 struct hnae3_queue *queue;
543 struct hclge_desc desc[1];
544 struct hclge_tqp *tqp;
547 for (i = 0; i < kinfo->num_tqps; i++) {
548 queue = handle->kinfo.tqp[i];
549 tqp = container_of(queue, struct hclge_tqp, q);
550 /* command : HCLGE_OPC_QUERY_IGU_STAT */
551 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATUS,
554 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
555 ret = hclge_cmd_send(&hdev->hw, desc, 1);
557 dev_err(&hdev->pdev->dev,
558 "Query tqp stat fail, status = %d,queue = %d\n",
562 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
563 le32_to_cpu(desc[0].data[1]);
566 for (i = 0; i < kinfo->num_tqps; i++) {
567 queue = handle->kinfo.tqp[i];
568 tqp = container_of(queue, struct hclge_tqp, q);
569 /* command : HCLGE_OPC_QUERY_IGU_STAT */
570 hclge_cmd_setup_basic_desc(&desc[0],
571 HCLGE_OPC_QUERY_TX_STATUS,
574 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
575 ret = hclge_cmd_send(&hdev->hw, desc, 1);
577 dev_err(&hdev->pdev->dev,
578 "Query tqp stat fail, status = %d,queue = %d\n",
582 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
583 le32_to_cpu(desc[0].data[1]);
589 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
591 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
592 struct hclge_tqp *tqp;
596 for (i = 0; i < kinfo->num_tqps; i++) {
597 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
598 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
601 for (i = 0; i < kinfo->num_tqps; i++) {
602 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
603 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
609 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
611 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
613 /* each tqp has TX & RX two queues */
614 return kinfo->num_tqps * (2);
617 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
619 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
623 for (i = 0; i < kinfo->num_tqps; i++) {
624 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
625 struct hclge_tqp, q);
626 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
628 buff = buff + ETH_GSTRING_LEN;
631 for (i = 0; i < kinfo->num_tqps; i++) {
632 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
633 struct hclge_tqp, q);
634 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
636 buff = buff + ETH_GSTRING_LEN;
642 static u64 *hclge_comm_get_stats(const void *comm_stats,
643 const struct hclge_comm_stats_str strs[],
649 for (i = 0; i < size; i++)
650 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
655 static u8 *hclge_comm_get_strings(u32 stringset,
656 const struct hclge_comm_stats_str strs[],
659 char *buff = (char *)data;
662 if (stringset != ETH_SS_STATS)
665 for (i = 0; i < size; i++) {
666 snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
667 buff = buff + ETH_GSTRING_LEN;
673 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
675 struct hnae3_handle *handle;
678 handle = &hdev->vport[0].nic;
679 if (handle->client) {
680 status = hclge_tqps_update_stats(handle);
682 dev_err(&hdev->pdev->dev,
683 "Update TQPS stats fail, status = %d.\n",
688 status = hclge_mac_update_stats(hdev);
690 dev_err(&hdev->pdev->dev,
691 "Update MAC stats fail, status = %d.\n", status);
694 static void hclge_update_stats(struct hnae3_handle *handle,
695 struct net_device_stats *net_stats)
697 struct hclge_vport *vport = hclge_get_vport(handle);
698 struct hclge_dev *hdev = vport->back;
701 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
704 status = hclge_mac_update_stats(hdev);
706 dev_err(&hdev->pdev->dev,
707 "Update MAC stats fail, status = %d.\n",
710 status = hclge_tqps_update_stats(handle);
712 dev_err(&hdev->pdev->dev,
713 "Update TQPS stats fail, status = %d.\n",
716 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
719 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
721 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
722 HNAE3_SUPPORT_PHY_LOOPBACK |\
723 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
724 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
726 struct hclge_vport *vport = hclge_get_vport(handle);
727 struct hclge_dev *hdev = vport->back;
730 /* Loopback test support rules:
731 * mac: only GE mode support
732 * serdes: all mac mode will support include GE/XGE/LGE/CGE
733 * phy: only support when phy device exist on board
735 if (stringset == ETH_SS_TEST) {
736 /* clear loopback bit flags at first */
737 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
738 if (hdev->pdev->revision >= 0x21 ||
739 hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
740 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
741 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
743 handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
747 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
748 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
750 if (hdev->hw.mac.phydev) {
752 handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
755 } else if (stringset == ETH_SS_STATS) {
756 count = ARRAY_SIZE(g_mac_stats_string) +
757 hclge_tqps_get_sset_count(handle, stringset);
763 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
766 u8 *p = (char *)data;
769 if (stringset == ETH_SS_STATS) {
770 size = ARRAY_SIZE(g_mac_stats_string);
771 p = hclge_comm_get_strings(stringset, g_mac_stats_string,
773 p = hclge_tqps_get_strings(handle, p);
774 } else if (stringset == ETH_SS_TEST) {
775 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
776 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
778 p += ETH_GSTRING_LEN;
780 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
781 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
783 p += ETH_GSTRING_LEN;
785 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
787 hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
789 p += ETH_GSTRING_LEN;
791 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
792 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
794 p += ETH_GSTRING_LEN;
799 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
801 struct hclge_vport *vport = hclge_get_vport(handle);
802 struct hclge_dev *hdev = vport->back;
805 p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats, g_mac_stats_string,
806 ARRAY_SIZE(g_mac_stats_string), data);
807 p = hclge_tqps_get_stats(handle, p);
810 static void hclge_get_mac_stat(struct hnae3_handle *handle,
811 struct hns3_mac_stats *mac_stats)
813 struct hclge_vport *vport = hclge_get_vport(handle);
814 struct hclge_dev *hdev = vport->back;
816 hclge_update_stats(handle, NULL);
818 mac_stats->tx_pause_cnt = hdev->hw_stats.mac_stats.mac_tx_mac_pause_num;
819 mac_stats->rx_pause_cnt = hdev->hw_stats.mac_stats.mac_rx_mac_pause_num;
822 static int hclge_parse_func_status(struct hclge_dev *hdev,
823 struct hclge_func_status_cmd *status)
825 if (!(status->pf_state & HCLGE_PF_STATE_DONE))
828 /* Set the pf to main pf */
829 if (status->pf_state & HCLGE_PF_STATE_MAIN)
830 hdev->flag |= HCLGE_FLAG_MAIN;
832 hdev->flag &= ~HCLGE_FLAG_MAIN;
837 static int hclge_query_function_status(struct hclge_dev *hdev)
839 #define HCLGE_QUERY_MAX_CNT 5
841 struct hclge_func_status_cmd *req;
842 struct hclge_desc desc;
846 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
847 req = (struct hclge_func_status_cmd *)desc.data;
850 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
852 dev_err(&hdev->pdev->dev,
853 "query function status failed %d.\n", ret);
857 /* Check pf reset is done */
860 usleep_range(1000, 2000);
861 } while (timeout++ < HCLGE_QUERY_MAX_CNT);
863 ret = hclge_parse_func_status(hdev, req);
868 static int hclge_query_pf_resource(struct hclge_dev *hdev)
870 struct hclge_pf_res_cmd *req;
871 struct hclge_desc desc;
874 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
875 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
877 dev_err(&hdev->pdev->dev,
878 "query pf resource failed %d.\n", ret);
882 req = (struct hclge_pf_res_cmd *)desc.data;
883 hdev->num_tqps = __le16_to_cpu(req->tqp_num);
884 hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
886 if (req->tx_buf_size)
888 __le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
890 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
892 hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
894 if (req->dv_buf_size)
896 __le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
898 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
900 hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
902 if (hnae3_dev_roce_supported(hdev)) {
903 hdev->roce_base_msix_offset =
904 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
905 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
907 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
908 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
910 /* nic's msix numbers is always equals to the roce's. */
911 hdev->num_nic_msi = hdev->num_roce_msi;
913 /* PF should have NIC vectors and Roce vectors,
914 * NIC vectors are queued before Roce vectors.
916 hdev->num_msi = hdev->num_roce_msi +
917 hdev->roce_base_msix_offset;
920 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
921 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
923 hdev->num_nic_msi = hdev->num_msi;
926 if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
927 dev_err(&hdev->pdev->dev,
928 "Just %u msi resources, not enough for pf(min:2).\n",
936 static int hclge_parse_speed(int speed_cmd, int *speed)
940 *speed = HCLGE_MAC_SPEED_10M;
943 *speed = HCLGE_MAC_SPEED_100M;
946 *speed = HCLGE_MAC_SPEED_1G;
949 *speed = HCLGE_MAC_SPEED_10G;
952 *speed = HCLGE_MAC_SPEED_25G;
955 *speed = HCLGE_MAC_SPEED_40G;
958 *speed = HCLGE_MAC_SPEED_50G;
961 *speed = HCLGE_MAC_SPEED_100G;
970 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
972 struct hclge_vport *vport = hclge_get_vport(handle);
973 struct hclge_dev *hdev = vport->back;
974 u32 speed_ability = hdev->hw.mac.speed_ability;
978 case HCLGE_MAC_SPEED_10M:
979 speed_bit = HCLGE_SUPPORT_10M_BIT;
981 case HCLGE_MAC_SPEED_100M:
982 speed_bit = HCLGE_SUPPORT_100M_BIT;
984 case HCLGE_MAC_SPEED_1G:
985 speed_bit = HCLGE_SUPPORT_1G_BIT;
987 case HCLGE_MAC_SPEED_10G:
988 speed_bit = HCLGE_SUPPORT_10G_BIT;
990 case HCLGE_MAC_SPEED_25G:
991 speed_bit = HCLGE_SUPPORT_25G_BIT;
993 case HCLGE_MAC_SPEED_40G:
994 speed_bit = HCLGE_SUPPORT_40G_BIT;
996 case HCLGE_MAC_SPEED_50G:
997 speed_bit = HCLGE_SUPPORT_50G_BIT;
999 case HCLGE_MAC_SPEED_100G:
1000 speed_bit = HCLGE_SUPPORT_100G_BIT;
1006 if (speed_bit & speed_ability)
1012 static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability)
1014 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1015 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1017 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1018 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1020 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1021 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1023 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1024 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1026 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1027 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1031 static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability)
1033 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1034 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1036 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1037 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1039 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1040 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1042 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1043 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1045 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1046 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1050 static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability)
1052 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1053 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1055 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1056 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1058 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1059 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1061 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1062 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1064 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1065 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1069 static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability)
1071 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1072 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1074 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1075 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1077 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1078 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1080 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1081 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1083 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1084 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1086 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1087 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1091 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1093 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1094 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1096 switch (mac->speed) {
1097 case HCLGE_MAC_SPEED_10G:
1098 case HCLGE_MAC_SPEED_40G:
1099 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1102 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1104 case HCLGE_MAC_SPEED_25G:
1105 case HCLGE_MAC_SPEED_50G:
1106 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1109 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1110 BIT(HNAE3_FEC_AUTO);
1112 case HCLGE_MAC_SPEED_100G:
1113 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1114 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1117 mac->fec_ability = 0;
1122 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1125 struct hclge_mac *mac = &hdev->hw.mac;
1127 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1128 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1131 hclge_convert_setting_sr(mac, speed_ability);
1132 hclge_convert_setting_lr(mac, speed_ability);
1133 hclge_convert_setting_cr(mac, speed_ability);
1134 if (hdev->pdev->revision >= 0x21)
1135 hclge_convert_setting_fec(mac);
1137 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1138 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1139 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1142 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1145 struct hclge_mac *mac = &hdev->hw.mac;
1147 hclge_convert_setting_kr(mac, speed_ability);
1148 if (hdev->pdev->revision >= 0x21)
1149 hclge_convert_setting_fec(mac);
1150 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1151 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1152 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1155 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1158 unsigned long *supported = hdev->hw.mac.supported;
1160 /* default to support all speed for GE port */
1162 speed_ability = HCLGE_SUPPORT_GE;
1164 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1165 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1168 if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1169 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1171 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1175 if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1176 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1177 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1180 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1181 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1182 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1183 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1186 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
1188 u8 media_type = hdev->hw.mac.media_type;
1190 if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1191 hclge_parse_fiber_link_mode(hdev, speed_ability);
1192 else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1193 hclge_parse_copper_link_mode(hdev, speed_ability);
1194 else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1195 hclge_parse_backplane_link_mode(hdev, speed_ability);
1198 static u32 hclge_get_max_speed(u8 speed_ability)
1200 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1201 return HCLGE_MAC_SPEED_100G;
1203 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1204 return HCLGE_MAC_SPEED_50G;
1206 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1207 return HCLGE_MAC_SPEED_40G;
1209 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1210 return HCLGE_MAC_SPEED_25G;
1212 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1213 return HCLGE_MAC_SPEED_10G;
1215 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1216 return HCLGE_MAC_SPEED_1G;
1218 if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1219 return HCLGE_MAC_SPEED_100M;
1221 if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1222 return HCLGE_MAC_SPEED_10M;
1224 return HCLGE_MAC_SPEED_1G;
1227 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1229 struct hclge_cfg_param_cmd *req;
1230 u64 mac_addr_tmp_high;
1234 req = (struct hclge_cfg_param_cmd *)desc[0].data;
1236 /* get the configuration */
1237 cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1240 cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1241 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1242 cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1243 HCLGE_CFG_TQP_DESC_N_M,
1244 HCLGE_CFG_TQP_DESC_N_S);
1246 cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1247 HCLGE_CFG_PHY_ADDR_M,
1248 HCLGE_CFG_PHY_ADDR_S);
1249 cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1250 HCLGE_CFG_MEDIA_TP_M,
1251 HCLGE_CFG_MEDIA_TP_S);
1252 cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1253 HCLGE_CFG_RX_BUF_LEN_M,
1254 HCLGE_CFG_RX_BUF_LEN_S);
1255 /* get mac_address */
1256 mac_addr_tmp = __le32_to_cpu(req->param[2]);
1257 mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1258 HCLGE_CFG_MAC_ADDR_H_M,
1259 HCLGE_CFG_MAC_ADDR_H_S);
1261 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1263 cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1264 HCLGE_CFG_DEFAULT_SPEED_M,
1265 HCLGE_CFG_DEFAULT_SPEED_S);
1266 cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1267 HCLGE_CFG_RSS_SIZE_M,
1268 HCLGE_CFG_RSS_SIZE_S);
1270 for (i = 0; i < ETH_ALEN; i++)
1271 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1273 req = (struct hclge_cfg_param_cmd *)desc[1].data;
1274 cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1276 cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1277 HCLGE_CFG_SPEED_ABILITY_M,
1278 HCLGE_CFG_SPEED_ABILITY_S);
1279 cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1280 HCLGE_CFG_UMV_TBL_SPACE_M,
1281 HCLGE_CFG_UMV_TBL_SPACE_S);
1282 if (!cfg->umv_space)
1283 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1286 /* hclge_get_cfg: query the static parameter from flash
1287 * @hdev: pointer to struct hclge_dev
1288 * @hcfg: the config structure to be getted
1290 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1292 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1293 struct hclge_cfg_param_cmd *req;
1297 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1300 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1301 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1303 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1304 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1305 /* Len should be united by 4 bytes when send to hardware */
1306 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1307 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1308 req->offset = cpu_to_le32(offset);
1311 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1313 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1317 hclge_parse_cfg(hcfg, desc);
1322 static int hclge_get_cap(struct hclge_dev *hdev)
1326 ret = hclge_query_function_status(hdev);
1328 dev_err(&hdev->pdev->dev,
1329 "query function status error %d.\n", ret);
1333 /* get pf resource */
1334 ret = hclge_query_pf_resource(hdev);
1336 dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
1341 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1343 #define HCLGE_MIN_TX_DESC 64
1344 #define HCLGE_MIN_RX_DESC 64
1346 if (!is_kdump_kernel())
1349 dev_info(&hdev->pdev->dev,
1350 "Running kdump kernel. Using minimal resources\n");
1352 /* minimal queue pairs equals to the number of vports */
1353 hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1354 hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1355 hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1358 static int hclge_configure(struct hclge_dev *hdev)
1360 struct hclge_cfg cfg;
1364 ret = hclge_get_cfg(hdev, &cfg);
1366 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1370 hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1371 hdev->base_tqp_pid = 0;
1372 hdev->rss_size_max = cfg.rss_size_max;
1373 hdev->rx_buf_len = cfg.rx_buf_len;
1374 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1375 hdev->hw.mac.media_type = cfg.media_type;
1376 hdev->hw.mac.phy_addr = cfg.phy_addr;
1377 hdev->num_tx_desc = cfg.tqp_desc_num;
1378 hdev->num_rx_desc = cfg.tqp_desc_num;
1379 hdev->tm_info.num_pg = 1;
1380 hdev->tc_max = cfg.tc_num;
1381 hdev->tm_info.hw_pfc_map = 0;
1382 hdev->wanted_umv_size = cfg.umv_space;
1384 if (hnae3_dev_fd_supported(hdev)) {
1386 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1389 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1391 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1395 hclge_parse_link_mode(hdev, cfg.speed_ability);
1397 hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1399 if ((hdev->tc_max > HNAE3_MAX_TC) ||
1400 (hdev->tc_max < 1)) {
1401 dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1406 /* Dev does not support DCB */
1407 if (!hnae3_dev_dcb_supported(hdev)) {
1411 hdev->pfc_max = hdev->tc_max;
1414 hdev->tm_info.num_tc = 1;
1416 /* Currently not support uncontiuous tc */
1417 for (i = 0; i < hdev->tm_info.num_tc; i++)
1418 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1420 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1422 hclge_init_kdump_kernel_config(hdev);
1424 /* Set the init affinity based on pci func number */
1425 i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
1426 i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
1427 cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
1428 &hdev->affinity_mask);
1433 static int hclge_config_tso(struct hclge_dev *hdev, unsigned int tso_mss_min,
1434 unsigned int tso_mss_max)
1436 struct hclge_cfg_tso_status_cmd *req;
1437 struct hclge_desc desc;
1440 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1442 req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1445 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1446 HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1447 req->tso_mss_min = cpu_to_le16(tso_mss);
1450 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1451 HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1452 req->tso_mss_max = cpu_to_le16(tso_mss);
1454 return hclge_cmd_send(&hdev->hw, &desc, 1);
1457 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1459 struct hclge_cfg_gro_status_cmd *req;
1460 struct hclge_desc desc;
1463 if (!hnae3_dev_gro_supported(hdev))
1466 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1467 req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1469 req->gro_en = cpu_to_le16(en ? 1 : 0);
1471 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1473 dev_err(&hdev->pdev->dev,
1474 "GRO hardware config cmd failed, ret = %d\n", ret);
1479 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1481 struct hclge_tqp *tqp;
1484 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1485 sizeof(struct hclge_tqp), GFP_KERNEL);
1491 for (i = 0; i < hdev->num_tqps; i++) {
1492 tqp->dev = &hdev->pdev->dev;
1495 tqp->q.ae_algo = &ae_algo;
1496 tqp->q.buf_size = hdev->rx_buf_len;
1497 tqp->q.tx_desc_num = hdev->num_tx_desc;
1498 tqp->q.rx_desc_num = hdev->num_rx_desc;
1499 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1500 i * HCLGE_TQP_REG_SIZE;
1508 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1509 u16 tqp_pid, u16 tqp_vid, bool is_pf)
1511 struct hclge_tqp_map_cmd *req;
1512 struct hclge_desc desc;
1515 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1517 req = (struct hclge_tqp_map_cmd *)desc.data;
1518 req->tqp_id = cpu_to_le16(tqp_pid);
1519 req->tqp_vf = func_id;
1520 req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1522 req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1523 req->tqp_vid = cpu_to_le16(tqp_vid);
1525 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1527 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1532 static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1534 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1535 struct hclge_dev *hdev = vport->back;
1538 for (i = 0, alloced = 0; i < hdev->num_tqps &&
1539 alloced < num_tqps; i++) {
1540 if (!hdev->htqp[i].alloced) {
1541 hdev->htqp[i].q.handle = &vport->nic;
1542 hdev->htqp[i].q.tqp_index = alloced;
1543 hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1544 hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1545 kinfo->tqp[alloced] = &hdev->htqp[i].q;
1546 hdev->htqp[i].alloced = true;
1550 vport->alloc_tqps = alloced;
1551 kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1552 vport->alloc_tqps / hdev->tm_info.num_tc);
1554 /* ensure one to one mapping between irq and queue at default */
1555 kinfo->rss_size = min_t(u16, kinfo->rss_size,
1556 (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1561 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1562 u16 num_tx_desc, u16 num_rx_desc)
1565 struct hnae3_handle *nic = &vport->nic;
1566 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1567 struct hclge_dev *hdev = vport->back;
1570 kinfo->num_tx_desc = num_tx_desc;
1571 kinfo->num_rx_desc = num_rx_desc;
1573 kinfo->rx_buf_len = hdev->rx_buf_len;
1575 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1576 sizeof(struct hnae3_queue *), GFP_KERNEL);
1580 ret = hclge_assign_tqp(vport, num_tqps);
1582 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1587 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1588 struct hclge_vport *vport)
1590 struct hnae3_handle *nic = &vport->nic;
1591 struct hnae3_knic_private_info *kinfo;
1594 kinfo = &nic->kinfo;
1595 for (i = 0; i < vport->alloc_tqps; i++) {
1596 struct hclge_tqp *q =
1597 container_of(kinfo->tqp[i], struct hclge_tqp, q);
1601 is_pf = !(vport->vport_id);
1602 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1611 static int hclge_map_tqp(struct hclge_dev *hdev)
1613 struct hclge_vport *vport = hdev->vport;
1616 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1617 for (i = 0; i < num_vport; i++) {
1620 ret = hclge_map_tqp_to_vport(hdev, vport);
1630 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1632 struct hnae3_handle *nic = &vport->nic;
1633 struct hclge_dev *hdev = vport->back;
1636 nic->pdev = hdev->pdev;
1637 nic->ae_algo = &ae_algo;
1638 nic->numa_node_mask = hdev->numa_node_mask;
1640 ret = hclge_knic_setup(vport, num_tqps,
1641 hdev->num_tx_desc, hdev->num_rx_desc);
1643 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1648 static int hclge_alloc_vport(struct hclge_dev *hdev)
1650 struct pci_dev *pdev = hdev->pdev;
1651 struct hclge_vport *vport;
1657 /* We need to alloc a vport for main NIC of PF */
1658 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1660 if (hdev->num_tqps < num_vport) {
1661 dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1662 hdev->num_tqps, num_vport);
1666 /* Alloc the same number of TQPs for every vport */
1667 tqp_per_vport = hdev->num_tqps / num_vport;
1668 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1670 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1675 hdev->vport = vport;
1676 hdev->num_alloc_vport = num_vport;
1678 if (IS_ENABLED(CONFIG_PCI_IOV))
1679 hdev->num_alloc_vfs = hdev->num_req_vfs;
1681 for (i = 0; i < num_vport; i++) {
1683 vport->vport_id = i;
1684 vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1685 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1686 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1687 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1688 INIT_LIST_HEAD(&vport->vlan_list);
1689 INIT_LIST_HEAD(&vport->uc_mac_list);
1690 INIT_LIST_HEAD(&vport->mc_mac_list);
1693 ret = hclge_vport_setup(vport, tqp_main_vport);
1695 ret = hclge_vport_setup(vport, tqp_per_vport);
1698 "vport setup failed for vport %d, %d\n",
1709 static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1710 struct hclge_pkt_buf_alloc *buf_alloc)
1712 /* TX buffer size is unit by 128 byte */
1713 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1714 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
1715 struct hclge_tx_buff_alloc_cmd *req;
1716 struct hclge_desc desc;
1720 req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1722 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1723 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1724 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1726 req->tx_pkt_buff[i] =
1727 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1728 HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1731 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1733 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1739 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1740 struct hclge_pkt_buf_alloc *buf_alloc)
1742 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1745 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1750 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1755 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1756 if (hdev->hw_tc_map & BIT(i))
1761 /* Get the number of pfc enabled TCs, which have private buffer */
1762 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1763 struct hclge_pkt_buf_alloc *buf_alloc)
1765 struct hclge_priv_buf *priv;
1769 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1770 priv = &buf_alloc->priv_buf[i];
1771 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1779 /* Get the number of pfc disabled TCs, which have private buffer */
1780 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1781 struct hclge_pkt_buf_alloc *buf_alloc)
1783 struct hclge_priv_buf *priv;
1787 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1788 priv = &buf_alloc->priv_buf[i];
1789 if (hdev->hw_tc_map & BIT(i) &&
1790 !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1798 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1800 struct hclge_priv_buf *priv;
1804 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1805 priv = &buf_alloc->priv_buf[i];
1807 rx_priv += priv->buf_size;
1812 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1814 u32 i, total_tx_size = 0;
1816 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1817 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1819 return total_tx_size;
1822 static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1823 struct hclge_pkt_buf_alloc *buf_alloc,
1826 u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1827 u32 tc_num = hclge_get_tc_num(hdev);
1828 u32 shared_buf, aligned_mps;
1832 aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1834 if (hnae3_dev_dcb_supported(hdev))
1835 shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
1838 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1839 + hdev->dv_buf_size;
1841 shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1842 shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1843 HCLGE_BUF_SIZE_UNIT);
1845 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1846 if (rx_all < rx_priv + shared_std)
1849 shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1850 buf_alloc->s_buf.buf_size = shared_buf;
1851 if (hnae3_dev_dcb_supported(hdev)) {
1852 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1853 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1854 - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
1855 HCLGE_BUF_SIZE_UNIT);
1857 buf_alloc->s_buf.self.high = aligned_mps +
1858 HCLGE_NON_DCB_ADDITIONAL_BUF;
1859 buf_alloc->s_buf.self.low = aligned_mps;
1862 if (hnae3_dev_dcb_supported(hdev)) {
1863 hi_thrd = shared_buf - hdev->dv_buf_size;
1865 if (tc_num <= NEED_RESERVE_TC_NUM)
1866 hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
1870 hi_thrd = hi_thrd / tc_num;
1872 hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
1873 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1874 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
1876 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1877 lo_thrd = aligned_mps;
1880 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1881 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1882 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
1888 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1889 struct hclge_pkt_buf_alloc *buf_alloc)
1893 total_size = hdev->pkt_buf_size;
1895 /* alloc tx buffer for all enabled tc */
1896 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1897 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1899 if (hdev->hw_tc_map & BIT(i)) {
1900 if (total_size < hdev->tx_buf_size)
1903 priv->tx_buf_size = hdev->tx_buf_size;
1905 priv->tx_buf_size = 0;
1908 total_size -= priv->tx_buf_size;
1914 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
1915 struct hclge_pkt_buf_alloc *buf_alloc)
1917 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1918 u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1921 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1922 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1929 if (!(hdev->hw_tc_map & BIT(i)))
1934 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1935 priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
1936 priv->wl.high = roundup(priv->wl.low + aligned_mps,
1937 HCLGE_BUF_SIZE_UNIT);
1940 priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
1944 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1947 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1950 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
1951 struct hclge_pkt_buf_alloc *buf_alloc)
1953 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1954 int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1957 /* let the last to be cleared first */
1958 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1959 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1960 unsigned int mask = BIT((unsigned int)i);
1962 if (hdev->hw_tc_map & mask &&
1963 !(hdev->tm_info.hw_pfc_map & mask)) {
1964 /* Clear the no pfc TC private buffer */
1972 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1973 no_pfc_priv_num == 0)
1977 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1980 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
1981 struct hclge_pkt_buf_alloc *buf_alloc)
1983 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1984 int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1987 /* let the last to be cleared first */
1988 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1989 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1990 unsigned int mask = BIT((unsigned int)i);
1992 if (hdev->hw_tc_map & mask &&
1993 hdev->tm_info.hw_pfc_map & mask) {
1994 /* Reduce the number of pfc TC with private buffer */
2002 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2007 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2010 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2011 struct hclge_pkt_buf_alloc *buf_alloc)
2013 #define COMPENSATE_BUFFER 0x3C00
2014 #define COMPENSATE_HALF_MPS_NUM 5
2015 #define PRIV_WL_GAP 0x1800
2017 u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2018 u32 tc_num = hclge_get_tc_num(hdev);
2019 u32 half_mps = hdev->mps >> 1;
2024 rx_priv = rx_priv / tc_num;
2026 if (tc_num <= NEED_RESERVE_TC_NUM)
2027 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2029 min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2030 COMPENSATE_HALF_MPS_NUM * half_mps;
2031 min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2032 rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2034 if (rx_priv < min_rx_priv)
2037 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2038 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2045 if (!(hdev->hw_tc_map & BIT(i)))
2049 priv->buf_size = rx_priv;
2050 priv->wl.high = rx_priv - hdev->dv_buf_size;
2051 priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2054 buf_alloc->s_buf.buf_size = 0;
2059 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2060 * @hdev: pointer to struct hclge_dev
2061 * @buf_alloc: pointer to buffer calculation data
2062 * @return: 0: calculate sucessful, negative: fail
2064 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2065 struct hclge_pkt_buf_alloc *buf_alloc)
2067 /* When DCB is not supported, rx private buffer is not allocated. */
2068 if (!hnae3_dev_dcb_supported(hdev)) {
2069 u32 rx_all = hdev->pkt_buf_size;
2071 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2072 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2078 if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2081 if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2084 /* try to decrease the buffer size */
2085 if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2088 if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2091 if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2097 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2098 struct hclge_pkt_buf_alloc *buf_alloc)
2100 struct hclge_rx_priv_buff_cmd *req;
2101 struct hclge_desc desc;
2105 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2106 req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2108 /* Alloc private buffer TCs */
2109 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2110 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2113 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2115 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2119 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2120 (1 << HCLGE_TC0_PRI_BUF_EN_B));
2122 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2124 dev_err(&hdev->pdev->dev,
2125 "rx private buffer alloc cmd failed %d\n", ret);
2130 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2131 struct hclge_pkt_buf_alloc *buf_alloc)
2133 struct hclge_rx_priv_wl_buf *req;
2134 struct hclge_priv_buf *priv;
2135 struct hclge_desc desc[2];
2139 for (i = 0; i < 2; i++) {
2140 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2142 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2144 /* The first descriptor set the NEXT bit to 1 */
2146 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2148 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2150 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2151 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2153 priv = &buf_alloc->priv_buf[idx];
2154 req->tc_wl[j].high =
2155 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2156 req->tc_wl[j].high |=
2157 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2159 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2160 req->tc_wl[j].low |=
2161 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2165 /* Send 2 descriptor at one time */
2166 ret = hclge_cmd_send(&hdev->hw, desc, 2);
2168 dev_err(&hdev->pdev->dev,
2169 "rx private waterline config cmd failed %d\n",
2174 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2175 struct hclge_pkt_buf_alloc *buf_alloc)
2177 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2178 struct hclge_rx_com_thrd *req;
2179 struct hclge_desc desc[2];
2180 struct hclge_tc_thrd *tc;
2184 for (i = 0; i < 2; i++) {
2185 hclge_cmd_setup_basic_desc(&desc[i],
2186 HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2187 req = (struct hclge_rx_com_thrd *)&desc[i].data;
2189 /* The first descriptor set the NEXT bit to 1 */
2191 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2193 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2195 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2196 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2198 req->com_thrd[j].high =
2199 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2200 req->com_thrd[j].high |=
2201 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2202 req->com_thrd[j].low =
2203 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2204 req->com_thrd[j].low |=
2205 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2209 /* Send 2 descriptors at one time */
2210 ret = hclge_cmd_send(&hdev->hw, desc, 2);
2212 dev_err(&hdev->pdev->dev,
2213 "common threshold config cmd failed %d\n", ret);
2217 static int hclge_common_wl_config(struct hclge_dev *hdev,
2218 struct hclge_pkt_buf_alloc *buf_alloc)
2220 struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2221 struct hclge_rx_com_wl *req;
2222 struct hclge_desc desc;
2225 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2227 req = (struct hclge_rx_com_wl *)desc.data;
2228 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2229 req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2231 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2232 req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2234 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2236 dev_err(&hdev->pdev->dev,
2237 "common waterline config cmd failed %d\n", ret);
2242 int hclge_buffer_alloc(struct hclge_dev *hdev)
2244 struct hclge_pkt_buf_alloc *pkt_buf;
2247 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2251 ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2253 dev_err(&hdev->pdev->dev,
2254 "could not calc tx buffer size for all TCs %d\n", ret);
2258 ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2260 dev_err(&hdev->pdev->dev,
2261 "could not alloc tx buffers %d\n", ret);
2265 ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2267 dev_err(&hdev->pdev->dev,
2268 "could not calc rx priv buffer size for all TCs %d\n",
2273 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2275 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2280 if (hnae3_dev_dcb_supported(hdev)) {
2281 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2283 dev_err(&hdev->pdev->dev,
2284 "could not configure rx private waterline %d\n",
2289 ret = hclge_common_thrd_config(hdev, pkt_buf);
2291 dev_err(&hdev->pdev->dev,
2292 "could not configure common threshold %d\n",
2298 ret = hclge_common_wl_config(hdev, pkt_buf);
2300 dev_err(&hdev->pdev->dev,
2301 "could not configure common waterline %d\n", ret);
2308 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2310 struct hnae3_handle *roce = &vport->roce;
2311 struct hnae3_handle *nic = &vport->nic;
2313 roce->rinfo.num_vectors = vport->back->num_roce_msi;
2315 if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
2316 vport->back->num_msi_left == 0)
2319 roce->rinfo.base_vector = vport->back->roce_base_vector;
2321 roce->rinfo.netdev = nic->kinfo.netdev;
2322 roce->rinfo.roce_io_base = vport->back->hw.io_base;
2324 roce->pdev = nic->pdev;
2325 roce->ae_algo = nic->ae_algo;
2326 roce->numa_node_mask = nic->numa_node_mask;
2331 static int hclge_init_msi(struct hclge_dev *hdev)
2333 struct pci_dev *pdev = hdev->pdev;
2337 vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2339 PCI_IRQ_MSI | PCI_IRQ_MSIX);
2342 "failed(%d) to allocate MSI/MSI-X vectors\n",
2346 if (vectors < hdev->num_msi)
2347 dev_warn(&hdev->pdev->dev,
2348 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2349 hdev->num_msi, vectors);
2351 hdev->num_msi = vectors;
2352 hdev->num_msi_left = vectors;
2354 hdev->base_msi_vector = pdev->irq;
2355 hdev->roce_base_vector = hdev->base_msi_vector +
2356 hdev->roce_base_msix_offset;
2358 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2359 sizeof(u16), GFP_KERNEL);
2360 if (!hdev->vector_status) {
2361 pci_free_irq_vectors(pdev);
2365 for (i = 0; i < hdev->num_msi; i++)
2366 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2368 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2369 sizeof(int), GFP_KERNEL);
2370 if (!hdev->vector_irq) {
2371 pci_free_irq_vectors(pdev);
2378 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2380 if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2381 duplex = HCLGE_MAC_FULL;
2386 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2389 struct hclge_config_mac_speed_dup_cmd *req;
2390 struct hclge_desc desc;
2393 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2395 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2398 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2401 case HCLGE_MAC_SPEED_10M:
2402 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2403 HCLGE_CFG_SPEED_S, 6);
2405 case HCLGE_MAC_SPEED_100M:
2406 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2407 HCLGE_CFG_SPEED_S, 7);
2409 case HCLGE_MAC_SPEED_1G:
2410 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2411 HCLGE_CFG_SPEED_S, 0);
2413 case HCLGE_MAC_SPEED_10G:
2414 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2415 HCLGE_CFG_SPEED_S, 1);
2417 case HCLGE_MAC_SPEED_25G:
2418 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2419 HCLGE_CFG_SPEED_S, 2);
2421 case HCLGE_MAC_SPEED_40G:
2422 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2423 HCLGE_CFG_SPEED_S, 3);
2425 case HCLGE_MAC_SPEED_50G:
2426 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2427 HCLGE_CFG_SPEED_S, 4);
2429 case HCLGE_MAC_SPEED_100G:
2430 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2431 HCLGE_CFG_SPEED_S, 5);
2434 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2438 hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2441 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2443 dev_err(&hdev->pdev->dev,
2444 "mac speed/duplex config cmd failed %d.\n", ret);
2451 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2455 duplex = hclge_check_speed_dup(duplex, speed);
2456 if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
2459 ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2463 hdev->hw.mac.speed = speed;
2464 hdev->hw.mac.duplex = duplex;
2469 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2472 struct hclge_vport *vport = hclge_get_vport(handle);
2473 struct hclge_dev *hdev = vport->back;
2475 return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2478 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2480 struct hclge_config_auto_neg_cmd *req;
2481 struct hclge_desc desc;
2485 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2487 req = (struct hclge_config_auto_neg_cmd *)desc.data;
2489 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2490 req->cfg_an_cmd_flag = cpu_to_le32(flag);
2492 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2494 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2500 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2502 struct hclge_vport *vport = hclge_get_vport(handle);
2503 struct hclge_dev *hdev = vport->back;
2505 if (!hdev->hw.mac.support_autoneg) {
2507 dev_err(&hdev->pdev->dev,
2508 "autoneg is not supported by current port\n");
2515 return hclge_set_autoneg_en(hdev, enable);
2518 static int hclge_get_autoneg(struct hnae3_handle *handle)
2520 struct hclge_vport *vport = hclge_get_vport(handle);
2521 struct hclge_dev *hdev = vport->back;
2522 struct phy_device *phydev = hdev->hw.mac.phydev;
2525 return phydev->autoneg;
2527 return hdev->hw.mac.autoneg;
2530 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2532 struct hclge_vport *vport = hclge_get_vport(handle);
2533 struct hclge_dev *hdev = vport->back;
2536 dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2538 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2541 return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2544 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2546 struct hclge_vport *vport = hclge_get_vport(handle);
2547 struct hclge_dev *hdev = vport->back;
2549 if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2550 return hclge_set_autoneg_en(hdev, !halt);
2555 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2557 struct hclge_config_fec_cmd *req;
2558 struct hclge_desc desc;
2561 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2563 req = (struct hclge_config_fec_cmd *)desc.data;
2564 if (fec_mode & BIT(HNAE3_FEC_AUTO))
2565 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2566 if (fec_mode & BIT(HNAE3_FEC_RS))
2567 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2568 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2569 if (fec_mode & BIT(HNAE3_FEC_BASER))
2570 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2571 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2573 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2575 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2580 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2582 struct hclge_vport *vport = hclge_get_vport(handle);
2583 struct hclge_dev *hdev = vport->back;
2584 struct hclge_mac *mac = &hdev->hw.mac;
2587 if (fec_mode && !(mac->fec_ability & fec_mode)) {
2588 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2592 ret = hclge_set_fec_hw(hdev, fec_mode);
2596 mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2600 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2603 struct hclge_vport *vport = hclge_get_vport(handle);
2604 struct hclge_dev *hdev = vport->back;
2605 struct hclge_mac *mac = &hdev->hw.mac;
2608 *fec_ability = mac->fec_ability;
2610 *fec_mode = mac->fec_mode;
2613 static int hclge_mac_init(struct hclge_dev *hdev)
2615 struct hclge_mac *mac = &hdev->hw.mac;
2618 hdev->support_sfp_query = true;
2619 hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2620 ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2621 hdev->hw.mac.duplex);
2623 dev_err(&hdev->pdev->dev,
2624 "Config mac speed dup fail ret=%d\n", ret);
2628 if (hdev->hw.mac.support_autoneg) {
2629 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2631 dev_err(&hdev->pdev->dev,
2632 "Config mac autoneg fail ret=%d\n", ret);
2639 if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2640 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2642 dev_err(&hdev->pdev->dev,
2643 "Fec mode init fail, ret = %d\n", ret);
2648 ret = hclge_set_mac_mtu(hdev, hdev->mps);
2650 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2654 ret = hclge_set_default_loopback(hdev);
2658 ret = hclge_buffer_alloc(hdev);
2660 dev_err(&hdev->pdev->dev,
2661 "allocate buffer fail, ret=%d\n", ret);
2666 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2668 if (!test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) &&
2669 !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2670 queue_work_on(cpumask_first(&hdev->affinity_mask), system_wq,
2671 &hdev->mbx_service_task);
2674 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2676 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2677 !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2678 queue_work_on(cpumask_first(&hdev->affinity_mask), system_wq,
2679 &hdev->rst_service_task);
2682 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2684 if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
2685 !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2686 !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state)) {
2687 hdev->hw_stats.stats_timer++;
2688 hdev->fd_arfs_expire_timer++;
2689 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2690 system_wq, &hdev->service_task,
2695 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2697 struct hclge_link_status_cmd *req;
2698 struct hclge_desc desc;
2702 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2703 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2705 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2710 req = (struct hclge_link_status_cmd *)desc.data;
2711 link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2713 return !!link_status;
2716 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2718 unsigned int mac_state;
2721 if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2724 mac_state = hclge_get_mac_link_status(hdev);
2726 if (hdev->hw.mac.phydev) {
2727 if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2728 link_stat = mac_state &
2729 hdev->hw.mac.phydev->link;
2734 link_stat = mac_state;
2740 static void hclge_update_link_status(struct hclge_dev *hdev)
2742 struct hnae3_client *rclient = hdev->roce_client;
2743 struct hnae3_client *client = hdev->nic_client;
2744 struct hnae3_handle *rhandle;
2745 struct hnae3_handle *handle;
2751 state = hclge_get_mac_phy_link(hdev);
2752 if (state != hdev->hw.mac.link) {
2753 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2754 handle = &hdev->vport[i].nic;
2755 client->ops->link_status_change(handle, state);
2756 hclge_config_mac_tnl_int(hdev, state);
2757 rhandle = &hdev->vport[i].roce;
2758 if (rclient && rclient->ops->link_status_change)
2759 rclient->ops->link_status_change(rhandle,
2762 hdev->hw.mac.link = state;
2766 static void hclge_update_port_capability(struct hclge_mac *mac)
2768 /* update fec ability by speed */
2769 hclge_convert_setting_fec(mac);
2771 /* firmware can not identify back plane type, the media type
2772 * read from configuration can help deal it
2774 if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2775 mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2776 mac->module_type = HNAE3_MODULE_TYPE_KR;
2777 else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2778 mac->module_type = HNAE3_MODULE_TYPE_TP;
2780 if (mac->support_autoneg) {
2781 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2782 linkmode_copy(mac->advertising, mac->supported);
2784 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2786 linkmode_zero(mac->advertising);
2790 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2792 struct hclge_sfp_info_cmd *resp;
2793 struct hclge_desc desc;
2796 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2797 resp = (struct hclge_sfp_info_cmd *)desc.data;
2798 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2799 if (ret == -EOPNOTSUPP) {
2800 dev_warn(&hdev->pdev->dev,
2801 "IMP do not support get SFP speed %d\n", ret);
2804 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2808 *speed = le32_to_cpu(resp->speed);
2813 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2815 struct hclge_sfp_info_cmd *resp;
2816 struct hclge_desc desc;
2819 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2820 resp = (struct hclge_sfp_info_cmd *)desc.data;
2822 resp->query_type = QUERY_ACTIVE_SPEED;
2824 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2825 if (ret == -EOPNOTSUPP) {
2826 dev_warn(&hdev->pdev->dev,
2827 "IMP does not support get SFP info %d\n", ret);
2830 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2834 mac->speed = le32_to_cpu(resp->speed);
2835 /* if resp->speed_ability is 0, it means it's an old version
2836 * firmware, do not update these params
2838 if (resp->speed_ability) {
2839 mac->module_type = le32_to_cpu(resp->module_type);
2840 mac->speed_ability = le32_to_cpu(resp->speed_ability);
2841 mac->autoneg = resp->autoneg;
2842 mac->support_autoneg = resp->autoneg_ability;
2843 mac->speed_type = QUERY_ACTIVE_SPEED;
2844 if (!resp->active_fec)
2847 mac->fec_mode = BIT(resp->active_fec);
2849 mac->speed_type = QUERY_SFP_SPEED;
2855 static int hclge_update_port_info(struct hclge_dev *hdev)
2857 struct hclge_mac *mac = &hdev->hw.mac;
2858 int speed = HCLGE_MAC_SPEED_UNKNOWN;
2861 /* get the port info from SFP cmd if not copper port */
2862 if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2865 /* if IMP does not support get SFP/qSFP info, return directly */
2866 if (!hdev->support_sfp_query)
2869 if (hdev->pdev->revision >= 0x21)
2870 ret = hclge_get_sfp_info(hdev, mac);
2872 ret = hclge_get_sfp_speed(hdev, &speed);
2874 if (ret == -EOPNOTSUPP) {
2875 hdev->support_sfp_query = false;
2881 if (hdev->pdev->revision >= 0x21) {
2882 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
2883 hclge_update_port_capability(mac);
2886 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
2889 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2890 return 0; /* do nothing if no SFP */
2892 /* must config full duplex for SFP */
2893 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2897 static int hclge_get_status(struct hnae3_handle *handle)
2899 struct hclge_vport *vport = hclge_get_vport(handle);
2900 struct hclge_dev *hdev = vport->back;
2902 hclge_update_link_status(hdev);
2904 return hdev->hw.mac.link;
2907 static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
2909 if (pci_num_vf(hdev->pdev) == 0) {
2910 dev_err(&hdev->pdev->dev,
2911 "SRIOV is disabled, can not get vport(%d) info.\n", vf);
2915 if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
2916 dev_err(&hdev->pdev->dev,
2917 "vf id(%d) is out of range(0 <= vfid < %d)\n",
2918 vf, pci_num_vf(hdev->pdev));
2922 /* VF start from 1 in vport */
2923 vf += HCLGE_VF_VPORT_START_NUM;
2924 return &hdev->vport[vf];
2927 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
2928 struct ifla_vf_info *ivf)
2930 struct hclge_vport *vport = hclge_get_vport(handle);
2931 struct hclge_dev *hdev = vport->back;
2933 vport = hclge_get_vf_vport(hdev, vf);
2938 ivf->linkstate = vport->vf_info.link_state;
2939 ivf->spoofchk = vport->vf_info.spoofchk;
2940 ivf->trusted = vport->vf_info.trusted;
2941 ivf->min_tx_rate = 0;
2942 ivf->max_tx_rate = vport->vf_info.max_tx_rate;
2943 ether_addr_copy(ivf->mac, vport->vf_info.mac);
2948 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
2951 struct hclge_vport *vport = hclge_get_vport(handle);
2952 struct hclge_dev *hdev = vport->back;
2954 vport = hclge_get_vf_vport(hdev, vf);
2958 vport->vf_info.link_state = link_state;
2963 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2965 u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
2967 /* fetch the events from their corresponding regs */
2968 rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2969 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2970 msix_src_reg = hclge_read_dev(&hdev->hw,
2971 HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
2973 /* Assumption: If by any chance reset and mailbox events are reported
2974 * together then we will only process reset event in this go and will
2975 * defer the processing of the mailbox events. Since, we would have not
2976 * cleared RX CMDQ event this time we would receive again another
2977 * interrupt from H/W just for the mailbox.
2979 * check for vector0 reset event sources
2981 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2982 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2983 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2984 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2985 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2986 hdev->rst_stats.imp_rst_cnt++;
2987 return HCLGE_VECTOR0_EVENT_RST;
2990 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2991 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2992 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2993 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2994 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2995 hdev->rst_stats.global_rst_cnt++;
2996 return HCLGE_VECTOR0_EVENT_RST;
2999 /* check for vector0 msix event source */
3000 if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
3001 dev_info(&hdev->pdev->dev, "received event 0x%x\n",
3003 *clearval = msix_src_reg;
3004 return HCLGE_VECTOR0_EVENT_ERR;
3007 /* check for vector0 mailbox(=CMDQ RX) event source */
3008 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3009 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3010 *clearval = cmdq_src_reg;
3011 return HCLGE_VECTOR0_EVENT_MBX;
3014 /* print other vector0 event source */
3015 dev_info(&hdev->pdev->dev,
3016 "CMDQ INT status:0x%x, other INT status:0x%x\n",
3017 cmdq_src_reg, msix_src_reg);
3018 *clearval = msix_src_reg;
3020 return HCLGE_VECTOR0_EVENT_OTHER;
3023 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3026 switch (event_type) {
3027 case HCLGE_VECTOR0_EVENT_RST:
3028 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3030 case HCLGE_VECTOR0_EVENT_MBX:
3031 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3038 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3040 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3041 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3042 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3043 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3044 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3047 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3049 writel(enable ? 1 : 0, vector->addr);
3052 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3054 struct hclge_dev *hdev = data;
3058 hclge_enable_vector(&hdev->misc_vector, false);
3059 event_cause = hclge_check_event_cause(hdev, &clearval);
3061 /* vector 0 interrupt is shared with reset and mailbox source events.*/
3062 switch (event_cause) {
3063 case HCLGE_VECTOR0_EVENT_ERR:
3064 /* we do not know what type of reset is required now. This could
3065 * only be decided after we fetch the type of errors which
3066 * caused this event. Therefore, we will do below for now:
3067 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
3068 * have defered type of reset to be used.
3069 * 2. Schedule the reset serivce task.
3070 * 3. When service task receives HNAE3_UNKNOWN_RESET type it
3071 * will fetch the correct type of reset. This would be done
3072 * by first decoding the types of errors.
3074 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
3076 case HCLGE_VECTOR0_EVENT_RST:
3077 hclge_reset_task_schedule(hdev);
3079 case HCLGE_VECTOR0_EVENT_MBX:
3080 /* If we are here then,
3081 * 1. Either we are not handling any mbx task and we are not
3084 * 2. We could be handling a mbx task but nothing more is
3086 * In both cases, we should schedule mbx task as there are more
3087 * mbx messages reported by this interrupt.
3089 hclge_mbx_task_schedule(hdev);
3092 dev_warn(&hdev->pdev->dev,
3093 "received unknown or unhandled event of vector0\n");
3097 hclge_clear_event_cause(hdev, event_cause, clearval);
3099 /* Enable interrupt if it is not cause by reset. And when
3100 * clearval equal to 0, it means interrupt status may be
3101 * cleared by hardware before driver reads status register.
3102 * For this case, vector0 interrupt also should be enabled.
3105 event_cause == HCLGE_VECTOR0_EVENT_MBX) {
3106 hclge_enable_vector(&hdev->misc_vector, true);
3112 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3114 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3115 dev_warn(&hdev->pdev->dev,
3116 "vector(vector_id %d) has been freed.\n", vector_id);
3120 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3121 hdev->num_msi_left += 1;
3122 hdev->num_msi_used -= 1;
3125 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3127 struct hclge_misc_vector *vector = &hdev->misc_vector;
3129 vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3131 vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3132 hdev->vector_status[0] = 0;
3134 hdev->num_msi_left -= 1;
3135 hdev->num_msi_used += 1;
3138 static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3139 const cpumask_t *mask)
3141 struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3144 cpumask_copy(&hdev->affinity_mask, mask);
3147 static void hclge_irq_affinity_release(struct kref *ref)
3151 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3153 irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3154 &hdev->affinity_mask);
3156 hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3157 hdev->affinity_notify.release = hclge_irq_affinity_release;
3158 irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3159 &hdev->affinity_notify);
3162 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3164 irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3165 irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3168 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3172 hclge_get_misc_vector(hdev);
3174 /* this would be explicitly freed in the end */
3175 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3176 0, "hclge_misc", hdev);
3178 hclge_free_vector(hdev, 0);
3179 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3180 hdev->misc_vector.vector_irq);
3186 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3188 free_irq(hdev->misc_vector.vector_irq, hdev);
3189 hclge_free_vector(hdev, 0);
3192 int hclge_notify_client(struct hclge_dev *hdev,
3193 enum hnae3_reset_notify_type type)
3195 struct hnae3_client *client = hdev->nic_client;
3198 if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3201 if (!client->ops->reset_notify)
3204 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3205 struct hnae3_handle *handle = &hdev->vport[i].nic;
3208 ret = client->ops->reset_notify(handle, type);
3210 dev_err(&hdev->pdev->dev,
3211 "notify nic client failed %d(%d)\n", type, ret);
3219 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3220 enum hnae3_reset_notify_type type)
3222 struct hnae3_client *client = hdev->roce_client;
3226 if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3229 if (!client->ops->reset_notify)
3232 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3233 struct hnae3_handle *handle = &hdev->vport[i].roce;
3235 ret = client->ops->reset_notify(handle, type);
3237 dev_err(&hdev->pdev->dev,
3238 "notify roce client failed %d(%d)",
3247 static int hclge_reset_wait(struct hclge_dev *hdev)
3249 #define HCLGE_RESET_WATI_MS 100
3250 #define HCLGE_RESET_WAIT_CNT 200
3251 u32 val, reg, reg_bit;
3254 switch (hdev->reset_type) {
3255 case HNAE3_IMP_RESET:
3256 reg = HCLGE_GLOBAL_RESET_REG;
3257 reg_bit = HCLGE_IMP_RESET_BIT;
3259 case HNAE3_GLOBAL_RESET:
3260 reg = HCLGE_GLOBAL_RESET_REG;
3261 reg_bit = HCLGE_GLOBAL_RESET_BIT;
3263 case HNAE3_FUNC_RESET:
3264 reg = HCLGE_FUN_RST_ING;
3265 reg_bit = HCLGE_FUN_RST_ING_B;
3267 case HNAE3_FLR_RESET:
3270 dev_err(&hdev->pdev->dev,
3271 "Wait for unsupported reset type: %d\n",
3276 if (hdev->reset_type == HNAE3_FLR_RESET) {
3277 while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
3278 cnt++ < HCLGE_RESET_WAIT_CNT)
3279 msleep(HCLGE_RESET_WATI_MS);
3281 if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
3282 dev_err(&hdev->pdev->dev,
3283 "flr wait timeout: %u\n", cnt);
3290 val = hclge_read_dev(&hdev->hw, reg);
3291 while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3292 msleep(HCLGE_RESET_WATI_MS);
3293 val = hclge_read_dev(&hdev->hw, reg);
3297 if (cnt >= HCLGE_RESET_WAIT_CNT) {
3298 dev_warn(&hdev->pdev->dev,
3299 "Wait for reset timeout: %d\n", hdev->reset_type);
3306 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3308 struct hclge_vf_rst_cmd *req;
3309 struct hclge_desc desc;
3311 req = (struct hclge_vf_rst_cmd *)desc.data;
3312 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3313 req->dest_vfid = func_id;
3318 return hclge_cmd_send(&hdev->hw, &desc, 1);
3321 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3325 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
3326 struct hclge_vport *vport = &hdev->vport[i];
3329 /* Send cmd to set/clear VF's FUNC_RST_ING */
3330 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3332 dev_err(&hdev->pdev->dev,
3333 "set vf(%u) rst failed %d!\n",
3334 vport->vport_id, ret);
3338 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3341 /* Inform VF to process the reset.
3342 * hclge_inform_reset_assert_to_vf may fail if VF
3343 * driver is not loaded.
3345 ret = hclge_inform_reset_assert_to_vf(vport);
3347 dev_warn(&hdev->pdev->dev,
3348 "inform reset to vf(%u) failed %d!\n",
3349 vport->vport_id, ret);
3355 static int hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3357 struct hclge_pf_rst_sync_cmd *req;
3358 struct hclge_desc desc;
3362 req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3363 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3366 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3367 /* for compatible with old firmware, wait
3368 * 100 ms for VF to stop IO
3370 if (ret == -EOPNOTSUPP) {
3371 msleep(HCLGE_RESET_SYNC_TIME);
3374 dev_err(&hdev->pdev->dev, "sync with VF fail %d!\n",
3377 } else if (req->all_vf_ready) {
3380 msleep(HCLGE_PF_RESET_SYNC_TIME);
3381 hclge_cmd_reuse_desc(&desc, true);
3382 } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3384 dev_err(&hdev->pdev->dev, "sync with VF timeout!\n");
3388 void hclge_report_hw_error(struct hclge_dev *hdev,
3389 enum hnae3_hw_error_type type)
3391 struct hnae3_client *client = hdev->nic_client;
3394 if (!client || !client->ops->process_hw_error ||
3395 !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3398 for (i = 0; i < hdev->num_vmdq_vport + 1; i++)
3399 client->ops->process_hw_error(&hdev->vport[i].nic, type);
3402 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3406 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3407 if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3408 hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3409 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3410 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3413 if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3414 hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3415 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3416 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3420 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3422 struct hclge_desc desc;
3423 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3426 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3427 hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3428 req->fun_reset_vfid = func_id;
3430 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3432 dev_err(&hdev->pdev->dev,
3433 "send function reset cmd fail, status =%d\n", ret);
3438 static void hclge_do_reset(struct hclge_dev *hdev)
3440 struct hnae3_handle *handle = &hdev->vport[0].nic;
3441 struct pci_dev *pdev = hdev->pdev;
3444 if (hclge_get_hw_reset_stat(handle)) {
3445 dev_info(&pdev->dev, "Hardware reset not finish\n");
3446 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3447 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3448 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3452 switch (hdev->reset_type) {
3453 case HNAE3_GLOBAL_RESET:
3454 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3455 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3456 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3457 dev_info(&pdev->dev, "Global Reset requested\n");
3459 case HNAE3_FUNC_RESET:
3460 dev_info(&pdev->dev, "PF Reset requested\n");
3461 /* schedule again to check later */
3462 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3463 hclge_reset_task_schedule(hdev);
3465 case HNAE3_FLR_RESET:
3466 dev_info(&pdev->dev, "FLR requested\n");
3467 /* schedule again to check later */
3468 set_bit(HNAE3_FLR_RESET, &hdev->reset_pending);
3469 hclge_reset_task_schedule(hdev);
3472 dev_warn(&pdev->dev,
3473 "Unsupported reset type: %d\n", hdev->reset_type);
3478 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3479 unsigned long *addr)
3481 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3482 struct hclge_dev *hdev = ae_dev->priv;
3484 /* first, resolve any unknown reset type to the known type(s) */
3485 if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3486 /* we will intentionally ignore any errors from this function
3487 * as we will end up in *some* reset request in any case
3489 hclge_handle_hw_msix_error(hdev, addr);
3490 clear_bit(HNAE3_UNKNOWN_RESET, addr);
3491 /* We defered the clearing of the error event which caused
3492 * interrupt since it was not posssible to do that in
3493 * interrupt context (and this is the reason we introduced
3494 * new UNKNOWN reset type). Now, the errors have been
3495 * handled and cleared in hardware we can safely enable
3496 * interrupts. This is an exception to the norm.
3498 hclge_enable_vector(&hdev->misc_vector, true);
3501 /* return the highest priority reset level amongst all */
3502 if (test_bit(HNAE3_IMP_RESET, addr)) {
3503 rst_level = HNAE3_IMP_RESET;
3504 clear_bit(HNAE3_IMP_RESET, addr);
3505 clear_bit(HNAE3_GLOBAL_RESET, addr);
3506 clear_bit(HNAE3_FUNC_RESET, addr);
3507 } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3508 rst_level = HNAE3_GLOBAL_RESET;
3509 clear_bit(HNAE3_GLOBAL_RESET, addr);
3510 clear_bit(HNAE3_FUNC_RESET, addr);
3511 } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3512 rst_level = HNAE3_FUNC_RESET;
3513 clear_bit(HNAE3_FUNC_RESET, addr);
3514 } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3515 rst_level = HNAE3_FLR_RESET;
3516 clear_bit(HNAE3_FLR_RESET, addr);
3519 if (hdev->reset_type != HNAE3_NONE_RESET &&
3520 rst_level < hdev->reset_type)
3521 return HNAE3_NONE_RESET;
3526 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3530 switch (hdev->reset_type) {
3531 case HNAE3_IMP_RESET:
3532 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3534 case HNAE3_GLOBAL_RESET:
3535 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3544 /* For revision 0x20, the reset interrupt source
3545 * can only be cleared after hardware reset done
3547 if (hdev->pdev->revision == 0x20)
3548 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3551 hclge_enable_vector(&hdev->misc_vector, true);
3554 static int hclge_reset_prepare_down(struct hclge_dev *hdev)
3558 switch (hdev->reset_type) {
3559 case HNAE3_FUNC_RESET:
3561 case HNAE3_FLR_RESET:
3562 ret = hclge_set_all_vf_rst(hdev, true);
3571 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3575 reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3577 reg_val |= HCLGE_NIC_SW_RST_RDY;
3579 reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3581 hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3584 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3589 switch (hdev->reset_type) {
3590 case HNAE3_FUNC_RESET:
3591 /* to confirm whether all running VF is ready
3592 * before request PF reset
3594 ret = hclge_func_reset_sync_vf(hdev);
3598 ret = hclge_func_reset_cmd(hdev, 0);
3600 dev_err(&hdev->pdev->dev,
3601 "asserting function reset fail %d!\n", ret);
3605 /* After performaning pf reset, it is not necessary to do the
3606 * mailbox handling or send any command to firmware, because
3607 * any mailbox handling or command to firmware is only valid
3608 * after hclge_cmd_init is called.
3610 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3611 hdev->rst_stats.pf_rst_cnt++;
3613 case HNAE3_FLR_RESET:
3614 /* to confirm whether all running VF is ready
3615 * before request PF reset
3617 ret = hclge_func_reset_sync_vf(hdev);
3621 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3622 set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
3623 hdev->rst_stats.flr_rst_cnt++;
3625 case HNAE3_IMP_RESET:
3626 hclge_handle_imp_error(hdev);
3627 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3628 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3629 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3635 /* inform hardware that preparatory work is done */
3636 msleep(HCLGE_RESET_SYNC_TIME);
3637 hclge_reset_handshake(hdev, true);
3638 dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3643 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3645 #define MAX_RESET_FAIL_CNT 5
3647 if (hdev->reset_pending) {
3648 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3649 hdev->reset_pending);
3651 } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3652 HCLGE_RESET_INT_M) {
3653 dev_info(&hdev->pdev->dev,
3654 "reset failed because new reset interrupt\n");
3655 hclge_clear_reset_cause(hdev);
3657 } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3658 hdev->rst_stats.reset_fail_cnt++;
3659 set_bit(hdev->reset_type, &hdev->reset_pending);
3660 dev_info(&hdev->pdev->dev,
3661 "re-schedule reset task(%u)\n",
3662 hdev->rst_stats.reset_fail_cnt);
3666 hclge_clear_reset_cause(hdev);
3668 /* recover the handshake status when reset fail */
3669 hclge_reset_handshake(hdev, true);
3671 dev_err(&hdev->pdev->dev, "Reset fail!\n");
3673 hclge_dbg_dump_rst_info(hdev);
3678 static int hclge_set_rst_done(struct hclge_dev *hdev)
3680 struct hclge_pf_rst_done_cmd *req;
3681 struct hclge_desc desc;
3684 req = (struct hclge_pf_rst_done_cmd *)desc.data;
3685 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
3686 req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
3688 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3689 /* To be compatible with the old firmware, which does not support
3690 * command HCLGE_OPC_PF_RST_DONE, just print a warning and
3693 if (ret == -EOPNOTSUPP) {
3694 dev_warn(&hdev->pdev->dev,
3695 "current firmware does not support command(0x%x)!\n",
3696 HCLGE_OPC_PF_RST_DONE);
3699 dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
3706 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3710 switch (hdev->reset_type) {
3711 case HNAE3_FUNC_RESET:
3713 case HNAE3_FLR_RESET:
3714 ret = hclge_set_all_vf_rst(hdev, false);
3716 case HNAE3_GLOBAL_RESET:
3718 case HNAE3_IMP_RESET:
3719 ret = hclge_set_rst_done(hdev);
3725 /* clear up the handshake status after re-initialize done */
3726 hclge_reset_handshake(hdev, false);
3731 static int hclge_reset_stack(struct hclge_dev *hdev)
3735 ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3739 ret = hclge_reset_ae_dev(hdev->ae_dev);
3743 ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3747 return hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
3750 static void hclge_reset(struct hclge_dev *hdev)
3752 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3753 enum hnae3_reset_type reset_level;
3756 /* Initialize ae_dev reset status as well, in case enet layer wants to
3757 * know if device is undergoing reset
3759 ae_dev->reset_type = hdev->reset_type;
3760 hdev->rst_stats.reset_cnt++;
3761 /* perform reset of the stack & ae device for a client */
3762 ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3766 ret = hclge_reset_prepare_down(hdev);
3771 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3773 goto err_reset_lock;
3777 ret = hclge_reset_prepare_wait(hdev);
3781 if (hclge_reset_wait(hdev))
3784 hdev->rst_stats.hw_reset_done_cnt++;
3786 ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3792 ret = hclge_reset_stack(hdev);
3794 goto err_reset_lock;
3796 hclge_clear_reset_cause(hdev);
3798 ret = hclge_reset_prepare_up(hdev);
3800 goto err_reset_lock;
3804 ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3805 /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
3809 hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
3814 ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3816 goto err_reset_lock;
3820 ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3824 hdev->last_reset_time = jiffies;
3825 hdev->rst_stats.reset_fail_cnt = 0;
3826 hdev->rst_stats.reset_done_cnt++;
3827 ae_dev->reset_type = HNAE3_NONE_RESET;
3829 /* if default_reset_request has a higher level reset request,
3830 * it should be handled as soon as possible. since some errors
3831 * need this kind of reset to fix.
3833 reset_level = hclge_get_reset_level(ae_dev,
3834 &hdev->default_reset_request);
3835 if (reset_level != HNAE3_NONE_RESET)
3836 set_bit(reset_level, &hdev->reset_request);
3843 if (hclge_reset_err_handle(hdev))
3844 hclge_reset_task_schedule(hdev);
3847 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3849 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3850 struct hclge_dev *hdev = ae_dev->priv;
3852 /* We might end up getting called broadly because of 2 below cases:
3853 * 1. Recoverable error was conveyed through APEI and only way to bring
3854 * normalcy is to reset.
3855 * 2. A new reset request from the stack due to timeout
3857 * For the first case,error event might not have ae handle available.
3858 * check if this is a new reset request and we are not here just because
3859 * last reset attempt did not succeed and watchdog hit us again. We will
3860 * know this if last reset request did not occur very recently (watchdog
3861 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3862 * In case of new request we reset the "reset level" to PF reset.
3863 * And if it is a repeat reset request of the most recent one then we
3864 * want to make sure we throttle the reset request. Therefore, we will
3865 * not allow it again before 3*HZ times.
3868 handle = &hdev->vport[0].nic;
3870 if (time_before(jiffies, (hdev->last_reset_time +
3871 HCLGE_RESET_INTERVAL))) {
3872 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
3874 } else if (hdev->default_reset_request) {
3876 hclge_get_reset_level(ae_dev,
3877 &hdev->default_reset_request);
3878 } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
3879 hdev->reset_level = HNAE3_FUNC_RESET;
3882 dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
3885 /* request reset & schedule reset task */
3886 set_bit(hdev->reset_level, &hdev->reset_request);
3887 hclge_reset_task_schedule(hdev);
3889 if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3890 hdev->reset_level++;
3893 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3894 enum hnae3_reset_type rst_type)
3896 struct hclge_dev *hdev = ae_dev->priv;
3898 set_bit(rst_type, &hdev->default_reset_request);
3901 static void hclge_reset_timer(struct timer_list *t)
3903 struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3905 /* if default_reset_request has no value, it means that this reset
3906 * request has already be handled, so just return here
3908 if (!hdev->default_reset_request)
3911 dev_info(&hdev->pdev->dev,
3912 "triggering reset in reset timer\n");
3913 hclge_reset_event(hdev->pdev, NULL);
3916 static void hclge_reset_subtask(struct hclge_dev *hdev)
3918 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3920 /* check if there is any ongoing reset in the hardware. This status can
3921 * be checked from reset_pending. If there is then, we need to wait for
3922 * hardware to complete reset.
3923 * a. If we are able to figure out in reasonable time that hardware
3924 * has fully resetted then, we can proceed with driver, client
3926 * b. else, we can come back later to check this status so re-sched
3929 hdev->last_reset_time = jiffies;
3930 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
3931 if (hdev->reset_type != HNAE3_NONE_RESET)
3934 /* check if we got any *new* reset requests to be honored */
3935 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
3936 if (hdev->reset_type != HNAE3_NONE_RESET)
3937 hclge_do_reset(hdev);
3939 hdev->reset_type = HNAE3_NONE_RESET;
3942 static void hclge_reset_service_task(struct work_struct *work)
3944 struct hclge_dev *hdev =
3945 container_of(work, struct hclge_dev, rst_service_task);
3947 if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3950 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
3952 hclge_reset_subtask(hdev);
3954 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3957 static void hclge_mailbox_service_task(struct work_struct *work)
3959 struct hclge_dev *hdev =
3960 container_of(work, struct hclge_dev, mbx_service_task);
3962 if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3965 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
3967 hclge_mbx_handler(hdev);
3969 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3972 static void hclge_update_vport_alive(struct hclge_dev *hdev)
3976 /* start from vport 1 for PF is always alive */
3977 for (i = 1; i < hdev->num_alloc_vport; i++) {
3978 struct hclge_vport *vport = &hdev->vport[i];
3980 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3981 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
3983 /* If vf is not alive, set to default value */
3984 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3985 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
3989 static void hclge_service_task(struct work_struct *work)
3991 struct hclge_dev *hdev =
3992 container_of(work, struct hclge_dev, service_task.work);
3994 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
3996 if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
3997 hclge_update_stats_for_all(hdev);
3998 hdev->hw_stats.stats_timer = 0;
4001 hclge_update_port_info(hdev);
4002 hclge_update_link_status(hdev);
4003 hclge_update_vport_alive(hdev);
4004 hclge_sync_vlan_filter(hdev);
4006 if (hdev->fd_arfs_expire_timer >= HCLGE_FD_ARFS_EXPIRE_TIMER_INTERVAL) {
4007 hclge_rfs_filter_expire(hdev);
4008 hdev->fd_arfs_expire_timer = 0;
4011 hclge_task_schedule(hdev, round_jiffies_relative(HZ));
4014 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4016 /* VF handle has no client */
4017 if (!handle->client)
4018 return container_of(handle, struct hclge_vport, nic);
4019 else if (handle->client->type == HNAE3_CLIENT_ROCE)
4020 return container_of(handle, struct hclge_vport, roce);
4022 return container_of(handle, struct hclge_vport, nic);
4025 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4026 struct hnae3_vector_info *vector_info)
4028 struct hclge_vport *vport = hclge_get_vport(handle);
4029 struct hnae3_vector_info *vector = vector_info;
4030 struct hclge_dev *hdev = vport->back;
4034 vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
4035 vector_num = min(hdev->num_msi_left, vector_num);
4037 for (j = 0; j < vector_num; j++) {
4038 for (i = 1; i < hdev->num_msi; i++) {
4039 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4040 vector->vector = pci_irq_vector(hdev->pdev, i);
4041 vector->io_addr = hdev->hw.io_base +
4042 HCLGE_VECTOR_REG_BASE +
4043 (i - 1) * HCLGE_VECTOR_REG_OFFSET +
4045 HCLGE_VECTOR_VF_OFFSET;
4046 hdev->vector_status[i] = vport->vport_id;
4047 hdev->vector_irq[i] = vector->vector;
4056 hdev->num_msi_left -= alloc;
4057 hdev->num_msi_used += alloc;
4062 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4066 for (i = 0; i < hdev->num_msi; i++)
4067 if (vector == hdev->vector_irq[i])
4073 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4075 struct hclge_vport *vport = hclge_get_vport(handle);
4076 struct hclge_dev *hdev = vport->back;
4079 vector_id = hclge_get_vector_index(hdev, vector);
4080 if (vector_id < 0) {
4081 dev_err(&hdev->pdev->dev,
4082 "Get vector index fail. vector_id =%d\n", vector_id);
4086 hclge_free_vector(hdev, vector_id);
4091 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
4093 return HCLGE_RSS_KEY_SIZE;
4096 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
4098 return HCLGE_RSS_IND_TBL_SIZE;
4101 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
4102 const u8 hfunc, const u8 *key)
4104 struct hclge_rss_config_cmd *req;
4105 unsigned int key_offset = 0;
4106 struct hclge_desc desc;
4111 key_counts = HCLGE_RSS_KEY_SIZE;
4112 req = (struct hclge_rss_config_cmd *)desc.data;
4114 while (key_counts) {
4115 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4118 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4119 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4121 key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
4122 memcpy(req->hash_key,
4123 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4125 key_counts -= key_size;
4127 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4129 dev_err(&hdev->pdev->dev,
4130 "Configure RSS config fail, status = %d\n",
4138 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
4140 struct hclge_rss_indirection_table_cmd *req;
4141 struct hclge_desc desc;
4145 req = (struct hclge_rss_indirection_table_cmd *)desc.data;
4147 for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
4148 hclge_cmd_setup_basic_desc
4149 (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4151 req->start_table_index =
4152 cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4153 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
4155 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
4156 req->rss_result[j] =
4157 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4159 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4161 dev_err(&hdev->pdev->dev,
4162 "Configure rss indir table fail,status = %d\n",
4170 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4171 u16 *tc_size, u16 *tc_offset)
4173 struct hclge_rss_tc_mode_cmd *req;
4174 struct hclge_desc desc;
4178 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
4179 req = (struct hclge_rss_tc_mode_cmd *)desc.data;
4181 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4184 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4185 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4186 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4187 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4188 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
4190 req->rss_tc_mode[i] = cpu_to_le16(mode);
4193 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4195 dev_err(&hdev->pdev->dev,
4196 "Configure rss tc mode fail, status = %d\n", ret);
4201 static void hclge_get_rss_type(struct hclge_vport *vport)
4203 if (vport->rss_tuple_sets.ipv4_tcp_en ||
4204 vport->rss_tuple_sets.ipv4_udp_en ||
4205 vport->rss_tuple_sets.ipv4_sctp_en ||
4206 vport->rss_tuple_sets.ipv6_tcp_en ||
4207 vport->rss_tuple_sets.ipv6_udp_en ||
4208 vport->rss_tuple_sets.ipv6_sctp_en)
4209 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4210 else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4211 vport->rss_tuple_sets.ipv6_fragment_en)
4212 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4214 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4217 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4219 struct hclge_rss_input_tuple_cmd *req;
4220 struct hclge_desc desc;
4223 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4225 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4227 /* Get the tuple cfg from pf */
4228 req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4229 req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4230 req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4231 req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4232 req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4233 req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4234 req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4235 req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4236 hclge_get_rss_type(&hdev->vport[0]);
4237 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4239 dev_err(&hdev->pdev->dev,
4240 "Configure rss input fail, status = %d\n", ret);
4244 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4247 struct hclge_vport *vport = hclge_get_vport(handle);
4250 /* Get hash algorithm */
4252 switch (vport->rss_algo) {
4253 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4254 *hfunc = ETH_RSS_HASH_TOP;
4256 case HCLGE_RSS_HASH_ALGO_SIMPLE:
4257 *hfunc = ETH_RSS_HASH_XOR;
4260 *hfunc = ETH_RSS_HASH_UNKNOWN;
4265 /* Get the RSS Key required by the user */
4267 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4269 /* Get indirect table */
4271 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4272 indir[i] = vport->rss_indirection_tbl[i];
4277 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4278 const u8 *key, const u8 hfunc)
4280 struct hclge_vport *vport = hclge_get_vport(handle);
4281 struct hclge_dev *hdev = vport->back;
4285 /* Set the RSS Hash Key if specififed by the user */
4288 case ETH_RSS_HASH_TOP:
4289 hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4291 case ETH_RSS_HASH_XOR:
4292 hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4294 case ETH_RSS_HASH_NO_CHANGE:
4295 hash_algo = vport->rss_algo;
4301 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4305 /* Update the shadow RSS key with user specified qids */
4306 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4307 vport->rss_algo = hash_algo;
4310 /* Update the shadow RSS table with user specified qids */
4311 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4312 vport->rss_indirection_tbl[i] = indir[i];
4314 /* Update the hardware */
4315 return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4318 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4320 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4322 if (nfc->data & RXH_L4_B_2_3)
4323 hash_sets |= HCLGE_D_PORT_BIT;
4325 hash_sets &= ~HCLGE_D_PORT_BIT;
4327 if (nfc->data & RXH_IP_SRC)
4328 hash_sets |= HCLGE_S_IP_BIT;
4330 hash_sets &= ~HCLGE_S_IP_BIT;
4332 if (nfc->data & RXH_IP_DST)
4333 hash_sets |= HCLGE_D_IP_BIT;
4335 hash_sets &= ~HCLGE_D_IP_BIT;
4337 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4338 hash_sets |= HCLGE_V_TAG_BIT;
4343 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4344 struct ethtool_rxnfc *nfc)
4346 struct hclge_vport *vport = hclge_get_vport(handle);
4347 struct hclge_dev *hdev = vport->back;
4348 struct hclge_rss_input_tuple_cmd *req;
4349 struct hclge_desc desc;
4353 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4354 RXH_L4_B_0_1 | RXH_L4_B_2_3))
4357 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4358 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4360 req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4361 req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4362 req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4363 req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4364 req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4365 req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4366 req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4367 req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4369 tuple_sets = hclge_get_rss_hash_bits(nfc);
4370 switch (nfc->flow_type) {
4372 req->ipv4_tcp_en = tuple_sets;
4375 req->ipv6_tcp_en = tuple_sets;
4378 req->ipv4_udp_en = tuple_sets;
4381 req->ipv6_udp_en = tuple_sets;
4384 req->ipv4_sctp_en = tuple_sets;
4387 if ((nfc->data & RXH_L4_B_0_1) ||
4388 (nfc->data & RXH_L4_B_2_3))
4391 req->ipv6_sctp_en = tuple_sets;
4394 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4397 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4403 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4405 dev_err(&hdev->pdev->dev,
4406 "Set rss tuple fail, status = %d\n", ret);
4410 vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4411 vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4412 vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4413 vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4414 vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4415 vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4416 vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4417 vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4418 hclge_get_rss_type(vport);
4422 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4423 struct ethtool_rxnfc *nfc)
4425 struct hclge_vport *vport = hclge_get_vport(handle);
4430 switch (nfc->flow_type) {
4432 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4435 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4438 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4441 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4444 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4447 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4451 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4460 if (tuple_sets & HCLGE_D_PORT_BIT)
4461 nfc->data |= RXH_L4_B_2_3;
4462 if (tuple_sets & HCLGE_S_PORT_BIT)
4463 nfc->data |= RXH_L4_B_0_1;
4464 if (tuple_sets & HCLGE_D_IP_BIT)
4465 nfc->data |= RXH_IP_DST;
4466 if (tuple_sets & HCLGE_S_IP_BIT)
4467 nfc->data |= RXH_IP_SRC;
4472 static int hclge_get_tc_size(struct hnae3_handle *handle)
4474 struct hclge_vport *vport = hclge_get_vport(handle);
4475 struct hclge_dev *hdev = vport->back;
4477 return hdev->rss_size_max;
4480 int hclge_rss_init_hw(struct hclge_dev *hdev)
4482 struct hclge_vport *vport = hdev->vport;
4483 u8 *rss_indir = vport[0].rss_indirection_tbl;
4484 u16 rss_size = vport[0].alloc_rss_size;
4485 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4486 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4487 u8 *key = vport[0].rss_hash_key;
4488 u8 hfunc = vport[0].rss_algo;
4489 u16 tc_valid[HCLGE_MAX_TC_NUM];
4494 ret = hclge_set_rss_indir_table(hdev, rss_indir);
4498 ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4502 ret = hclge_set_rss_input_tuple(hdev);
4506 /* Each TC have the same queue size, and tc_size set to hardware is
4507 * the log2 of roundup power of two of rss_size, the acutal queue
4508 * size is limited by indirection table.
4510 if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
4511 dev_err(&hdev->pdev->dev,
4512 "Configure rss tc size failed, invalid TC_SIZE = %u\n",
4517 roundup_size = roundup_pow_of_two(rss_size);
4518 roundup_size = ilog2(roundup_size);
4520 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4523 if (!(hdev->hw_tc_map & BIT(i)))
4527 tc_size[i] = roundup_size;
4528 tc_offset[i] = rss_size * i;
4531 return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4534 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4536 struct hclge_vport *vport = hdev->vport;
4539 for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4540 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4541 vport[j].rss_indirection_tbl[i] =
4542 i % vport[j].alloc_rss_size;
4546 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
4548 int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4549 struct hclge_vport *vport = hdev->vport;
4551 if (hdev->pdev->revision >= 0x21)
4552 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4554 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4555 vport[i].rss_tuple_sets.ipv4_tcp_en =
4556 HCLGE_RSS_INPUT_TUPLE_OTHER;
4557 vport[i].rss_tuple_sets.ipv4_udp_en =
4558 HCLGE_RSS_INPUT_TUPLE_OTHER;
4559 vport[i].rss_tuple_sets.ipv4_sctp_en =
4560 HCLGE_RSS_INPUT_TUPLE_SCTP;
4561 vport[i].rss_tuple_sets.ipv4_fragment_en =
4562 HCLGE_RSS_INPUT_TUPLE_OTHER;
4563 vport[i].rss_tuple_sets.ipv6_tcp_en =
4564 HCLGE_RSS_INPUT_TUPLE_OTHER;
4565 vport[i].rss_tuple_sets.ipv6_udp_en =
4566 HCLGE_RSS_INPUT_TUPLE_OTHER;
4567 vport[i].rss_tuple_sets.ipv6_sctp_en =
4568 HCLGE_RSS_INPUT_TUPLE_SCTP;
4569 vport[i].rss_tuple_sets.ipv6_fragment_en =
4570 HCLGE_RSS_INPUT_TUPLE_OTHER;
4572 vport[i].rss_algo = rss_algo;
4574 memcpy(vport[i].rss_hash_key, hclge_hash_key,
4575 HCLGE_RSS_KEY_SIZE);
4578 hclge_rss_indir_init_cfg(hdev);
4581 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4582 int vector_id, bool en,
4583 struct hnae3_ring_chain_node *ring_chain)
4585 struct hclge_dev *hdev = vport->back;
4586 struct hnae3_ring_chain_node *node;
4587 struct hclge_desc desc;
4588 struct hclge_ctrl_vector_chain_cmd *req =
4589 (struct hclge_ctrl_vector_chain_cmd *)desc.data;
4590 enum hclge_cmd_status status;
4591 enum hclge_opcode_type op;
4592 u16 tqp_type_and_id;
4595 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4596 hclge_cmd_setup_basic_desc(&desc, op, false);
4597 req->int_vector_id = vector_id;
4600 for (node = ring_chain; node; node = node->next) {
4601 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4602 hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
4604 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4605 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4606 HCLGE_TQP_ID_S, node->tqp_index);
4607 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4609 hnae3_get_field(node->int_gl_idx,
4610 HNAE3_RING_GL_IDX_M,
4611 HNAE3_RING_GL_IDX_S));
4612 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4613 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4614 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4615 req->vfid = vport->vport_id;
4617 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4619 dev_err(&hdev->pdev->dev,
4620 "Map TQP fail, status is %d.\n",
4626 hclge_cmd_setup_basic_desc(&desc,
4629 req->int_vector_id = vector_id;
4634 req->int_cause_num = i;
4635 req->vfid = vport->vport_id;
4636 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4638 dev_err(&hdev->pdev->dev,
4639 "Map TQP fail, status is %d.\n", status);
4647 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
4648 struct hnae3_ring_chain_node *ring_chain)
4650 struct hclge_vport *vport = hclge_get_vport(handle);
4651 struct hclge_dev *hdev = vport->back;
4654 vector_id = hclge_get_vector_index(hdev, vector);
4655 if (vector_id < 0) {
4656 dev_err(&hdev->pdev->dev,
4657 "Get vector index fail. vector_id =%d\n", vector_id);
4661 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4664 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
4665 struct hnae3_ring_chain_node *ring_chain)
4667 struct hclge_vport *vport = hclge_get_vport(handle);
4668 struct hclge_dev *hdev = vport->back;
4671 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4674 vector_id = hclge_get_vector_index(hdev, vector);
4675 if (vector_id < 0) {
4676 dev_err(&handle->pdev->dev,
4677 "Get vector index fail. ret =%d\n", vector_id);
4681 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4683 dev_err(&handle->pdev->dev,
4684 "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4690 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
4691 struct hclge_promisc_param *param)
4693 struct hclge_promisc_cfg_cmd *req;
4694 struct hclge_desc desc;
4697 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4699 req = (struct hclge_promisc_cfg_cmd *)desc.data;
4700 req->vf_id = param->vf_id;
4702 /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
4703 * pdev revision(0x20), new revision support them. The
4704 * value of this two fields will not return error when driver
4705 * send command to fireware in revision(0x20).
4707 req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
4708 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
4710 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4712 dev_err(&hdev->pdev->dev,
4713 "Set promisc mode fail, status is %d.\n", ret);
4718 static void hclge_promisc_param_init(struct hclge_promisc_param *param,
4719 bool en_uc, bool en_mc, bool en_bc,
4725 memset(param, 0, sizeof(struct hclge_promisc_param));
4727 param->enable = HCLGE_PROMISC_EN_UC;
4729 param->enable |= HCLGE_PROMISC_EN_MC;
4731 param->enable |= HCLGE_PROMISC_EN_BC;
4732 param->vf_id = vport_id;
4735 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
4736 bool en_mc_pmc, bool en_bc_pmc)
4738 struct hclge_dev *hdev = vport->back;
4739 struct hclge_promisc_param param;
4741 hclge_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, en_bc_pmc,
4743 return hclge_cmd_set_promisc_mode(hdev, ¶m);
4746 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4749 struct hclge_vport *vport = hclge_get_vport(handle);
4750 bool en_bc_pmc = true;
4752 /* For revision 0x20, if broadcast promisc enabled, vlan filter is
4753 * always bypassed. So broadcast promisc should be disabled until
4754 * user enable promisc mode
4756 if (handle->pdev->revision == 0x20)
4757 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4759 return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
4763 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4765 struct hclge_get_fd_mode_cmd *req;
4766 struct hclge_desc desc;
4769 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4771 req = (struct hclge_get_fd_mode_cmd *)desc.data;
4773 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4775 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4779 *fd_mode = req->mode;
4784 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4785 u32 *stage1_entry_num,
4786 u32 *stage2_entry_num,
4787 u16 *stage1_counter_num,
4788 u16 *stage2_counter_num)
4790 struct hclge_get_fd_allocation_cmd *req;
4791 struct hclge_desc desc;
4794 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4796 req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4798 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4800 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4805 *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4806 *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4807 *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4808 *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4813 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
4815 struct hclge_set_fd_key_config_cmd *req;
4816 struct hclge_fd_key_cfg *stage;
4817 struct hclge_desc desc;
4820 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4822 req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4823 stage = &hdev->fd_cfg.key_cfg[stage_num];
4824 req->stage = stage_num;
4825 req->key_select = stage->key_sel;
4826 req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4827 req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4828 req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4829 req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4830 req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4831 req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4833 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4835 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4840 static int hclge_init_fd_config(struct hclge_dev *hdev)
4842 #define LOW_2_WORDS 0x03
4843 struct hclge_fd_key_cfg *key_cfg;
4846 if (!hnae3_dev_fd_supported(hdev))
4849 ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4853 switch (hdev->fd_cfg.fd_mode) {
4854 case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4855 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4857 case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4858 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4861 dev_err(&hdev->pdev->dev,
4862 "Unsupported flow director mode %u\n",
4863 hdev->fd_cfg.fd_mode);
4867 hdev->fd_cfg.proto_support =
4868 TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
4869 UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
4870 key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4871 key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4872 key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4873 key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4874 key_cfg->outer_sipv6_word_en = 0;
4875 key_cfg->outer_dipv6_word_en = 0;
4877 key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4878 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4879 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4880 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4882 /* If use max 400bit key, we can support tuples for ether type */
4883 if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
4884 hdev->fd_cfg.proto_support |= ETHER_FLOW;
4885 key_cfg->tuple_active |=
4886 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
4889 /* roce_type is used to filter roce frames
4890 * dst_vport is used to specify the rule
4892 key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4894 ret = hclge_get_fd_allocation(hdev,
4895 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4896 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4897 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4898 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4902 return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4905 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4906 int loc, u8 *key, bool is_add)
4908 struct hclge_fd_tcam_config_1_cmd *req1;
4909 struct hclge_fd_tcam_config_2_cmd *req2;
4910 struct hclge_fd_tcam_config_3_cmd *req3;
4911 struct hclge_desc desc[3];
4914 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4915 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4916 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4917 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4918 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4920 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4921 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4922 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4924 req1->stage = stage;
4925 req1->xy_sel = sel_x ? 1 : 0;
4926 hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4927 req1->index = cpu_to_le32(loc);
4928 req1->entry_vld = sel_x ? is_add : 0;
4931 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4932 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4933 sizeof(req2->tcam_data));
4934 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4935 sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4938 ret = hclge_cmd_send(&hdev->hw, desc, 3);
4940 dev_err(&hdev->pdev->dev,
4941 "config tcam key fail, ret=%d\n",
4947 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4948 struct hclge_fd_ad_data *action)
4950 struct hclge_fd_ad_config_cmd *req;
4951 struct hclge_desc desc;
4955 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
4957 req = (struct hclge_fd_ad_config_cmd *)desc.data;
4958 req->index = cpu_to_le32(loc);
4961 hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
4962 action->write_rule_id_to_bd);
4963 hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
4966 hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
4967 hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
4968 action->forward_to_direct_queue);
4969 hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
4971 hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
4972 hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
4973 HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
4974 hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
4975 hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
4976 action->counter_id);
4978 req->ad_data = cpu_to_le64(ad_data);
4979 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4981 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
4986 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
4987 struct hclge_fd_rule *rule)
4989 u16 tmp_x_s, tmp_y_s;
4990 u32 tmp_x_l, tmp_y_l;
4993 if (rule->unused_tuple & tuple_bit)
4996 switch (tuple_bit) {
4999 case BIT(INNER_DST_MAC):
5000 for (i = 0; i < ETH_ALEN; i++) {
5001 calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5002 rule->tuples_mask.dst_mac[i]);
5003 calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5004 rule->tuples_mask.dst_mac[i]);
5008 case BIT(INNER_SRC_MAC):
5009 for (i = 0; i < ETH_ALEN; i++) {
5010 calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5011 rule->tuples.src_mac[i]);
5012 calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5013 rule->tuples.src_mac[i]);
5017 case BIT(INNER_VLAN_TAG_FST):
5018 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
5019 rule->tuples_mask.vlan_tag1);
5020 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
5021 rule->tuples_mask.vlan_tag1);
5022 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5023 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5026 case BIT(INNER_ETH_TYPE):
5027 calc_x(tmp_x_s, rule->tuples.ether_proto,
5028 rule->tuples_mask.ether_proto);
5029 calc_y(tmp_y_s, rule->tuples.ether_proto,
5030 rule->tuples_mask.ether_proto);
5031 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5032 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5035 case BIT(INNER_IP_TOS):
5036 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5037 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5040 case BIT(INNER_IP_PROTO):
5041 calc_x(*key_x, rule->tuples.ip_proto,
5042 rule->tuples_mask.ip_proto);
5043 calc_y(*key_y, rule->tuples.ip_proto,
5044 rule->tuples_mask.ip_proto);
5047 case BIT(INNER_SRC_IP):
5048 calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
5049 rule->tuples_mask.src_ip[IPV4_INDEX]);
5050 calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
5051 rule->tuples_mask.src_ip[IPV4_INDEX]);
5052 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5053 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5056 case BIT(INNER_DST_IP):
5057 calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
5058 rule->tuples_mask.dst_ip[IPV4_INDEX]);
5059 calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
5060 rule->tuples_mask.dst_ip[IPV4_INDEX]);
5061 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5062 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5065 case BIT(INNER_SRC_PORT):
5066 calc_x(tmp_x_s, rule->tuples.src_port,
5067 rule->tuples_mask.src_port);
5068 calc_y(tmp_y_s, rule->tuples.src_port,
5069 rule->tuples_mask.src_port);
5070 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5071 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5074 case BIT(INNER_DST_PORT):
5075 calc_x(tmp_x_s, rule->tuples.dst_port,
5076 rule->tuples_mask.dst_port);
5077 calc_y(tmp_y_s, rule->tuples.dst_port,
5078 rule->tuples_mask.dst_port);
5079 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5080 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5088 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5089 u8 vf_id, u8 network_port_id)
5091 u32 port_number = 0;
5093 if (port_type == HOST_PORT) {
5094 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5096 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5098 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5100 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5101 HCLGE_NETWORK_PORT_ID_S, network_port_id);
5102 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5108 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5109 __le32 *key_x, __le32 *key_y,
5110 struct hclge_fd_rule *rule)
5112 u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5113 u8 cur_pos = 0, tuple_size, shift_bits;
5116 for (i = 0; i < MAX_META_DATA; i++) {
5117 tuple_size = meta_data_key_info[i].key_length;
5118 tuple_bit = key_cfg->meta_data_active & BIT(i);
5120 switch (tuple_bit) {
5121 case BIT(ROCE_TYPE):
5122 hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5123 cur_pos += tuple_size;
5125 case BIT(DST_VPORT):
5126 port_number = hclge_get_port_number(HOST_PORT, 0,
5128 hnae3_set_field(meta_data,
5129 GENMASK(cur_pos + tuple_size, cur_pos),
5130 cur_pos, port_number);
5131 cur_pos += tuple_size;
5138 calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5139 calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5140 shift_bits = sizeof(meta_data) * 8 - cur_pos;
5142 *key_x = cpu_to_le32(tmp_x << shift_bits);
5143 *key_y = cpu_to_le32(tmp_y << shift_bits);
5146 /* A complete key is combined with meta data key and tuple key.
5147 * Meta data key is stored at the MSB region, and tuple key is stored at
5148 * the LSB region, unused bits will be filled 0.
5150 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5151 struct hclge_fd_rule *rule)
5153 struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5154 u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5155 u8 *cur_key_x, *cur_key_y;
5157 int ret, tuple_size;
5158 u8 meta_data_region;
5160 memset(key_x, 0, sizeof(key_x));
5161 memset(key_y, 0, sizeof(key_y));
5165 for (i = 0 ; i < MAX_TUPLE; i++) {
5169 tuple_size = tuple_key_info[i].key_length / 8;
5170 check_tuple = key_cfg->tuple_active & BIT(i);
5172 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
5175 cur_key_x += tuple_size;
5176 cur_key_y += tuple_size;
5180 meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5181 MAX_META_DATA_LENGTH / 8;
5183 hclge_fd_convert_meta_data(key_cfg,
5184 (__le32 *)(key_x + meta_data_region),
5185 (__le32 *)(key_y + meta_data_region),
5188 ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5191 dev_err(&hdev->pdev->dev,
5192 "fd key_y config fail, loc=%u, ret=%d\n",
5193 rule->queue_id, ret);
5197 ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5200 dev_err(&hdev->pdev->dev,
5201 "fd key_x config fail, loc=%u, ret=%d\n",
5202 rule->queue_id, ret);
5206 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5207 struct hclge_fd_rule *rule)
5209 struct hclge_fd_ad_data ad_data;
5211 ad_data.ad_id = rule->location;
5213 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5214 ad_data.drop_packet = true;
5215 ad_data.forward_to_direct_queue = false;
5216 ad_data.queue_id = 0;
5218 ad_data.drop_packet = false;
5219 ad_data.forward_to_direct_queue = true;
5220 ad_data.queue_id = rule->queue_id;
5223 ad_data.use_counter = false;
5224 ad_data.counter_id = 0;
5226 ad_data.use_next_stage = false;
5227 ad_data.next_input_key = 0;
5229 ad_data.write_rule_id_to_bd = true;
5230 ad_data.rule_id = rule->location;
5232 return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
5235 static int hclge_fd_check_spec(struct hclge_dev *hdev,
5236 struct ethtool_rx_flow_spec *fs, u32 *unused)
5238 struct ethtool_tcpip4_spec *tcp_ip4_spec;
5239 struct ethtool_usrip4_spec *usr_ip4_spec;
5240 struct ethtool_tcpip6_spec *tcp_ip6_spec;
5241 struct ethtool_usrip6_spec *usr_ip6_spec;
5242 struct ethhdr *ether_spec;
5244 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5247 if (!(fs->flow_type & hdev->fd_cfg.proto_support))
5250 if ((fs->flow_type & FLOW_EXT) &&
5251 (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
5252 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
5256 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5260 tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
5261 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5263 if (!tcp_ip4_spec->ip4src)
5264 *unused |= BIT(INNER_SRC_IP);
5266 if (!tcp_ip4_spec->ip4dst)
5267 *unused |= BIT(INNER_DST_IP);
5269 if (!tcp_ip4_spec->psrc)
5270 *unused |= BIT(INNER_SRC_PORT);
5272 if (!tcp_ip4_spec->pdst)
5273 *unused |= BIT(INNER_DST_PORT);
5275 if (!tcp_ip4_spec->tos)
5276 *unused |= BIT(INNER_IP_TOS);
5280 usr_ip4_spec = &fs->h_u.usr_ip4_spec;
5281 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5282 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5284 if (!usr_ip4_spec->ip4src)
5285 *unused |= BIT(INNER_SRC_IP);
5287 if (!usr_ip4_spec->ip4dst)
5288 *unused |= BIT(INNER_DST_IP);
5290 if (!usr_ip4_spec->tos)
5291 *unused |= BIT(INNER_IP_TOS);
5293 if (!usr_ip4_spec->proto)
5294 *unused |= BIT(INNER_IP_PROTO);
5296 if (usr_ip4_spec->l4_4_bytes)
5299 if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
5306 tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
5307 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5310 /* check whether src/dst ip address used */
5311 if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
5312 !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
5313 *unused |= BIT(INNER_SRC_IP);
5315 if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
5316 !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
5317 *unused |= BIT(INNER_DST_IP);
5319 if (!tcp_ip6_spec->psrc)
5320 *unused |= BIT(INNER_SRC_PORT);
5322 if (!tcp_ip6_spec->pdst)
5323 *unused |= BIT(INNER_DST_PORT);
5325 if (tcp_ip6_spec->tclass)
5329 case IPV6_USER_FLOW:
5330 usr_ip6_spec = &fs->h_u.usr_ip6_spec;
5331 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5332 BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
5333 BIT(INNER_DST_PORT);
5335 /* check whether src/dst ip address used */
5336 if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
5337 !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
5338 *unused |= BIT(INNER_SRC_IP);
5340 if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
5341 !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
5342 *unused |= BIT(INNER_DST_IP);
5344 if (!usr_ip6_spec->l4_proto)
5345 *unused |= BIT(INNER_IP_PROTO);
5347 if (usr_ip6_spec->tclass)
5350 if (usr_ip6_spec->l4_4_bytes)
5355 ether_spec = &fs->h_u.ether_spec;
5356 *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5357 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
5358 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
5360 if (is_zero_ether_addr(ether_spec->h_source))
5361 *unused |= BIT(INNER_SRC_MAC);
5363 if (is_zero_ether_addr(ether_spec->h_dest))
5364 *unused |= BIT(INNER_DST_MAC);
5366 if (!ether_spec->h_proto)
5367 *unused |= BIT(INNER_ETH_TYPE);
5374 if ((fs->flow_type & FLOW_EXT)) {
5375 if (fs->h_ext.vlan_etype)
5377 if (!fs->h_ext.vlan_tci)
5378 *unused |= BIT(INNER_VLAN_TAG_FST);
5380 if (fs->m_ext.vlan_tci) {
5381 if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
5385 *unused |= BIT(INNER_VLAN_TAG_FST);
5388 if (fs->flow_type & FLOW_MAC_EXT) {
5389 if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
5392 if (is_zero_ether_addr(fs->h_ext.h_dest))
5393 *unused |= BIT(INNER_DST_MAC);
5395 *unused &= ~(BIT(INNER_DST_MAC));
5401 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
5403 struct hclge_fd_rule *rule = NULL;
5404 struct hlist_node *node2;
5406 spin_lock_bh(&hdev->fd_rule_lock);
5407 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5408 if (rule->location >= location)
5412 spin_unlock_bh(&hdev->fd_rule_lock);
5414 return rule && rule->location == location;
5417 /* make sure being called after lock up with fd_rule_lock */
5418 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
5419 struct hclge_fd_rule *new_rule,
5423 struct hclge_fd_rule *rule = NULL, *parent = NULL;
5424 struct hlist_node *node2;
5426 if (is_add && !new_rule)
5429 hlist_for_each_entry_safe(rule, node2,
5430 &hdev->fd_rule_list, rule_node) {
5431 if (rule->location >= location)
5436 if (rule && rule->location == location) {
5437 hlist_del(&rule->rule_node);
5439 hdev->hclge_fd_rule_num--;
5442 if (!hdev->hclge_fd_rule_num)
5443 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5444 clear_bit(location, hdev->fd_bmap);
5448 } else if (!is_add) {
5449 dev_err(&hdev->pdev->dev,
5450 "delete fail, rule %u is inexistent\n",
5455 INIT_HLIST_NODE(&new_rule->rule_node);
5458 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
5460 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
5462 set_bit(location, hdev->fd_bmap);
5463 hdev->hclge_fd_rule_num++;
5464 hdev->fd_active_type = new_rule->rule_type;
5469 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
5470 struct ethtool_rx_flow_spec *fs,
5471 struct hclge_fd_rule *rule)
5473 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5475 switch (flow_type) {
5479 rule->tuples.src_ip[IPV4_INDEX] =
5480 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
5481 rule->tuples_mask.src_ip[IPV4_INDEX] =
5482 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
5484 rule->tuples.dst_ip[IPV4_INDEX] =
5485 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
5486 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5487 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5489 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5490 rule->tuples_mask.src_port =
5491 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5493 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5494 rule->tuples_mask.dst_port =
5495 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5497 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5498 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5500 rule->tuples.ether_proto = ETH_P_IP;
5501 rule->tuples_mask.ether_proto = 0xFFFF;
5505 rule->tuples.src_ip[IPV4_INDEX] =
5506 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5507 rule->tuples_mask.src_ip[IPV4_INDEX] =
5508 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5510 rule->tuples.dst_ip[IPV4_INDEX] =
5511 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5512 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5513 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5515 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5516 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5518 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5519 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5521 rule->tuples.ether_proto = ETH_P_IP;
5522 rule->tuples_mask.ether_proto = 0xFFFF;
5528 be32_to_cpu_array(rule->tuples.src_ip,
5529 fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5530 be32_to_cpu_array(rule->tuples_mask.src_ip,
5531 fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5533 be32_to_cpu_array(rule->tuples.dst_ip,
5534 fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5535 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5536 fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5538 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5539 rule->tuples_mask.src_port =
5540 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5542 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5543 rule->tuples_mask.dst_port =
5544 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5546 rule->tuples.ether_proto = ETH_P_IPV6;
5547 rule->tuples_mask.ether_proto = 0xFFFF;
5550 case IPV6_USER_FLOW:
5551 be32_to_cpu_array(rule->tuples.src_ip,
5552 fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5553 be32_to_cpu_array(rule->tuples_mask.src_ip,
5554 fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5556 be32_to_cpu_array(rule->tuples.dst_ip,
5557 fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5558 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5559 fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5561 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5562 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5564 rule->tuples.ether_proto = ETH_P_IPV6;
5565 rule->tuples_mask.ether_proto = 0xFFFF;
5569 ether_addr_copy(rule->tuples.src_mac,
5570 fs->h_u.ether_spec.h_source);
5571 ether_addr_copy(rule->tuples_mask.src_mac,
5572 fs->m_u.ether_spec.h_source);
5574 ether_addr_copy(rule->tuples.dst_mac,
5575 fs->h_u.ether_spec.h_dest);
5576 ether_addr_copy(rule->tuples_mask.dst_mac,
5577 fs->m_u.ether_spec.h_dest);
5579 rule->tuples.ether_proto =
5580 be16_to_cpu(fs->h_u.ether_spec.h_proto);
5581 rule->tuples_mask.ether_proto =
5582 be16_to_cpu(fs->m_u.ether_spec.h_proto);
5589 switch (flow_type) {
5592 rule->tuples.ip_proto = IPPROTO_SCTP;
5593 rule->tuples_mask.ip_proto = 0xFF;
5597 rule->tuples.ip_proto = IPPROTO_TCP;
5598 rule->tuples_mask.ip_proto = 0xFF;
5602 rule->tuples.ip_proto = IPPROTO_UDP;
5603 rule->tuples_mask.ip_proto = 0xFF;
5609 if ((fs->flow_type & FLOW_EXT)) {
5610 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5611 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5614 if (fs->flow_type & FLOW_MAC_EXT) {
5615 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5616 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5622 /* make sure being called after lock up with fd_rule_lock */
5623 static int hclge_fd_config_rule(struct hclge_dev *hdev,
5624 struct hclge_fd_rule *rule)
5629 dev_err(&hdev->pdev->dev,
5630 "The flow director rule is NULL\n");
5634 /* it will never fail here, so needn't to check return value */
5635 hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5637 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5641 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5648 hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5652 static int hclge_add_fd_entry(struct hnae3_handle *handle,
5653 struct ethtool_rxnfc *cmd)
5655 struct hclge_vport *vport = hclge_get_vport(handle);
5656 struct hclge_dev *hdev = vport->back;
5657 u16 dst_vport_id = 0, q_index = 0;
5658 struct ethtool_rx_flow_spec *fs;
5659 struct hclge_fd_rule *rule;
5664 if (!hnae3_dev_fd_supported(hdev))
5668 dev_warn(&hdev->pdev->dev,
5669 "Please enable flow director first\n");
5673 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5675 ret = hclge_fd_check_spec(hdev, fs, &unused);
5677 dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
5681 if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5682 action = HCLGE_FD_ACTION_DROP_PACKET;
5684 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5685 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5688 if (vf > hdev->num_req_vfs) {
5689 dev_err(&hdev->pdev->dev,
5690 "Error: vf id (%u) > max vf num (%u)\n",
5691 vf, hdev->num_req_vfs);
5695 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
5696 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
5699 dev_err(&hdev->pdev->dev,
5700 "Error: queue id (%u) > max tqp num (%u)\n",
5705 action = HCLGE_FD_ACTION_ACCEPT_PACKET;
5709 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5713 ret = hclge_fd_get_tuple(hdev, fs, rule);
5719 rule->flow_type = fs->flow_type;
5721 rule->location = fs->location;
5722 rule->unused_tuple = unused;
5723 rule->vf_id = dst_vport_id;
5724 rule->queue_id = q_index;
5725 rule->action = action;
5726 rule->rule_type = HCLGE_FD_EP_ACTIVE;
5728 /* to avoid rule conflict, when user configure rule by ethtool,
5729 * we need to clear all arfs rules
5731 hclge_clear_arfs_rules(handle);
5733 spin_lock_bh(&hdev->fd_rule_lock);
5734 ret = hclge_fd_config_rule(hdev, rule);
5736 spin_unlock_bh(&hdev->fd_rule_lock);
5741 static int hclge_del_fd_entry(struct hnae3_handle *handle,
5742 struct ethtool_rxnfc *cmd)
5744 struct hclge_vport *vport = hclge_get_vport(handle);
5745 struct hclge_dev *hdev = vport->back;
5746 struct ethtool_rx_flow_spec *fs;
5749 if (!hnae3_dev_fd_supported(hdev))
5752 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5754 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5757 if (!hclge_fd_rule_exist(hdev, fs->location)) {
5758 dev_err(&hdev->pdev->dev,
5759 "Delete fail, rule %u is inexistent\n", fs->location);
5763 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
5768 spin_lock_bh(&hdev->fd_rule_lock);
5769 ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
5771 spin_unlock_bh(&hdev->fd_rule_lock);
5776 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
5779 struct hclge_vport *vport = hclge_get_vport(handle);
5780 struct hclge_dev *hdev = vport->back;
5781 struct hclge_fd_rule *rule;
5782 struct hlist_node *node;
5785 if (!hnae3_dev_fd_supported(hdev))
5788 spin_lock_bh(&hdev->fd_rule_lock);
5789 for_each_set_bit(location, hdev->fd_bmap,
5790 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5791 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
5795 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
5797 hlist_del(&rule->rule_node);
5800 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5801 hdev->hclge_fd_rule_num = 0;
5802 bitmap_zero(hdev->fd_bmap,
5803 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
5806 spin_unlock_bh(&hdev->fd_rule_lock);
5809 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
5811 struct hclge_vport *vport = hclge_get_vport(handle);
5812 struct hclge_dev *hdev = vport->back;
5813 struct hclge_fd_rule *rule;
5814 struct hlist_node *node;
5817 /* Return ok here, because reset error handling will check this
5818 * return value. If error is returned here, the reset process will
5821 if (!hnae3_dev_fd_supported(hdev))
5824 /* if fd is disabled, should not restore it when reset */
5828 spin_lock_bh(&hdev->fd_rule_lock);
5829 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5830 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5832 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5835 dev_warn(&hdev->pdev->dev,
5836 "Restore rule %u failed, remove it\n",
5838 clear_bit(rule->location, hdev->fd_bmap);
5839 hlist_del(&rule->rule_node);
5841 hdev->hclge_fd_rule_num--;
5845 if (hdev->hclge_fd_rule_num)
5846 hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
5848 spin_unlock_bh(&hdev->fd_rule_lock);
5853 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
5854 struct ethtool_rxnfc *cmd)
5856 struct hclge_vport *vport = hclge_get_vport(handle);
5857 struct hclge_dev *hdev = vport->back;
5859 if (!hnae3_dev_fd_supported(hdev))
5862 cmd->rule_cnt = hdev->hclge_fd_rule_num;
5863 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5868 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
5869 struct ethtool_rxnfc *cmd)
5871 struct hclge_vport *vport = hclge_get_vport(handle);
5872 struct hclge_fd_rule *rule = NULL;
5873 struct hclge_dev *hdev = vport->back;
5874 struct ethtool_rx_flow_spec *fs;
5875 struct hlist_node *node2;
5877 if (!hnae3_dev_fd_supported(hdev))
5880 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5882 spin_lock_bh(&hdev->fd_rule_lock);
5884 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5885 if (rule->location >= fs->location)
5889 if (!rule || fs->location != rule->location) {
5890 spin_unlock_bh(&hdev->fd_rule_lock);
5895 fs->flow_type = rule->flow_type;
5896 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5900 fs->h_u.tcp_ip4_spec.ip4src =
5901 cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5902 fs->m_u.tcp_ip4_spec.ip4src =
5903 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5904 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5906 fs->h_u.tcp_ip4_spec.ip4dst =
5907 cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5908 fs->m_u.tcp_ip4_spec.ip4dst =
5909 rule->unused_tuple & BIT(INNER_DST_IP) ?
5910 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5912 fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5913 fs->m_u.tcp_ip4_spec.psrc =
5914 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5915 0 : cpu_to_be16(rule->tuples_mask.src_port);
5917 fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5918 fs->m_u.tcp_ip4_spec.pdst =
5919 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5920 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5922 fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
5923 fs->m_u.tcp_ip4_spec.tos =
5924 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5925 0 : rule->tuples_mask.ip_tos;
5929 fs->h_u.usr_ip4_spec.ip4src =
5930 cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5931 fs->m_u.tcp_ip4_spec.ip4src =
5932 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5933 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5935 fs->h_u.usr_ip4_spec.ip4dst =
5936 cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5937 fs->m_u.usr_ip4_spec.ip4dst =
5938 rule->unused_tuple & BIT(INNER_DST_IP) ?
5939 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5941 fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
5942 fs->m_u.usr_ip4_spec.tos =
5943 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5944 0 : rule->tuples_mask.ip_tos;
5946 fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
5947 fs->m_u.usr_ip4_spec.proto =
5948 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5949 0 : rule->tuples_mask.ip_proto;
5951 fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
5957 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
5958 rule->tuples.src_ip, IPV6_SIZE);
5959 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5960 memset(fs->m_u.tcp_ip6_spec.ip6src, 0,
5961 sizeof(int) * IPV6_SIZE);
5963 cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
5964 rule->tuples_mask.src_ip, IPV6_SIZE);
5966 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
5967 rule->tuples.dst_ip, IPV6_SIZE);
5968 if (rule->unused_tuple & BIT(INNER_DST_IP))
5969 memset(fs->m_u.tcp_ip6_spec.ip6dst, 0,
5970 sizeof(int) * IPV6_SIZE);
5972 cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
5973 rule->tuples_mask.dst_ip, IPV6_SIZE);
5975 fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5976 fs->m_u.tcp_ip6_spec.psrc =
5977 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5978 0 : cpu_to_be16(rule->tuples_mask.src_port);
5980 fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5981 fs->m_u.tcp_ip6_spec.pdst =
5982 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5983 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5986 case IPV6_USER_FLOW:
5987 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
5988 rule->tuples.src_ip, IPV6_SIZE);
5989 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5990 memset(fs->m_u.usr_ip6_spec.ip6src, 0,
5991 sizeof(int) * IPV6_SIZE);
5993 cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
5994 rule->tuples_mask.src_ip, IPV6_SIZE);
5996 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
5997 rule->tuples.dst_ip, IPV6_SIZE);
5998 if (rule->unused_tuple & BIT(INNER_DST_IP))
5999 memset(fs->m_u.usr_ip6_spec.ip6dst, 0,
6000 sizeof(int) * IPV6_SIZE);
6002 cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
6003 rule->tuples_mask.dst_ip, IPV6_SIZE);
6005 fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
6006 fs->m_u.usr_ip6_spec.l4_proto =
6007 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6008 0 : rule->tuples_mask.ip_proto;
6012 ether_addr_copy(fs->h_u.ether_spec.h_source,
6013 rule->tuples.src_mac);
6014 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6015 eth_zero_addr(fs->m_u.ether_spec.h_source);
6017 ether_addr_copy(fs->m_u.ether_spec.h_source,
6018 rule->tuples_mask.src_mac);
6020 ether_addr_copy(fs->h_u.ether_spec.h_dest,
6021 rule->tuples.dst_mac);
6022 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6023 eth_zero_addr(fs->m_u.ether_spec.h_dest);
6025 ether_addr_copy(fs->m_u.ether_spec.h_dest,
6026 rule->tuples_mask.dst_mac);
6028 fs->h_u.ether_spec.h_proto =
6029 cpu_to_be16(rule->tuples.ether_proto);
6030 fs->m_u.ether_spec.h_proto =
6031 rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6032 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6036 spin_unlock_bh(&hdev->fd_rule_lock);
6040 if (fs->flow_type & FLOW_EXT) {
6041 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
6042 fs->m_ext.vlan_tci =
6043 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
6044 cpu_to_be16(VLAN_VID_MASK) :
6045 cpu_to_be16(rule->tuples_mask.vlan_tag1);
6048 if (fs->flow_type & FLOW_MAC_EXT) {
6049 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
6050 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6051 eth_zero_addr(fs->m_u.ether_spec.h_dest);
6053 ether_addr_copy(fs->m_u.ether_spec.h_dest,
6054 rule->tuples_mask.dst_mac);
6057 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
6058 fs->ring_cookie = RX_CLS_FLOW_DISC;
6062 fs->ring_cookie = rule->queue_id;
6063 vf_id = rule->vf_id;
6064 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
6065 fs->ring_cookie |= vf_id;
6068 spin_unlock_bh(&hdev->fd_rule_lock);
6073 static int hclge_get_all_rules(struct hnae3_handle *handle,
6074 struct ethtool_rxnfc *cmd, u32 *rule_locs)
6076 struct hclge_vport *vport = hclge_get_vport(handle);
6077 struct hclge_dev *hdev = vport->back;
6078 struct hclge_fd_rule *rule;
6079 struct hlist_node *node2;
6082 if (!hnae3_dev_fd_supported(hdev))
6085 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6087 spin_lock_bh(&hdev->fd_rule_lock);
6088 hlist_for_each_entry_safe(rule, node2,
6089 &hdev->fd_rule_list, rule_node) {
6090 if (cnt == cmd->rule_cnt) {
6091 spin_unlock_bh(&hdev->fd_rule_lock);
6095 rule_locs[cnt] = rule->location;
6099 spin_unlock_bh(&hdev->fd_rule_lock);
6101 cmd->rule_cnt = cnt;
6106 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
6107 struct hclge_fd_rule_tuples *tuples)
6109 tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
6110 tuples->ip_proto = fkeys->basic.ip_proto;
6111 tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
6113 if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
6114 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
6115 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
6117 memcpy(tuples->src_ip,
6118 fkeys->addrs.v6addrs.src.in6_u.u6_addr32,
6119 sizeof(tuples->src_ip));
6120 memcpy(tuples->dst_ip,
6121 fkeys->addrs.v6addrs.dst.in6_u.u6_addr32,
6122 sizeof(tuples->dst_ip));
6126 /* traverse all rules, check whether an existed rule has the same tuples */
6127 static struct hclge_fd_rule *
6128 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
6129 const struct hclge_fd_rule_tuples *tuples)
6131 struct hclge_fd_rule *rule = NULL;
6132 struct hlist_node *node;
6134 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6135 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
6142 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
6143 struct hclge_fd_rule *rule)
6145 rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6146 BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
6147 BIT(INNER_SRC_PORT);
6150 rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
6151 if (tuples->ether_proto == ETH_P_IP) {
6152 if (tuples->ip_proto == IPPROTO_TCP)
6153 rule->flow_type = TCP_V4_FLOW;
6155 rule->flow_type = UDP_V4_FLOW;
6157 if (tuples->ip_proto == IPPROTO_TCP)
6158 rule->flow_type = TCP_V6_FLOW;
6160 rule->flow_type = UDP_V6_FLOW;
6162 memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
6163 memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
6166 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
6167 u16 flow_id, struct flow_keys *fkeys)
6169 struct hclge_vport *vport = hclge_get_vport(handle);
6170 struct hclge_fd_rule_tuples new_tuples;
6171 struct hclge_dev *hdev = vport->back;
6172 struct hclge_fd_rule *rule;
6177 if (!hnae3_dev_fd_supported(hdev))
6180 memset(&new_tuples, 0, sizeof(new_tuples));
6181 hclge_fd_get_flow_tuples(fkeys, &new_tuples);
6183 spin_lock_bh(&hdev->fd_rule_lock);
6185 /* when there is already fd rule existed add by user,
6186 * arfs should not work
6188 if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
6189 spin_unlock_bh(&hdev->fd_rule_lock);
6194 /* check is there flow director filter existed for this flow,
6195 * if not, create a new filter for it;
6196 * if filter exist with different queue id, modify the filter;
6197 * if filter exist with same queue id, do nothing
6199 rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
6201 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
6202 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6203 spin_unlock_bh(&hdev->fd_rule_lock);
6208 rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
6210 spin_unlock_bh(&hdev->fd_rule_lock);
6215 set_bit(bit_id, hdev->fd_bmap);
6216 rule->location = bit_id;
6217 rule->flow_id = flow_id;
6218 rule->queue_id = queue_id;
6219 hclge_fd_build_arfs_rule(&new_tuples, rule);
6220 ret = hclge_fd_config_rule(hdev, rule);
6222 spin_unlock_bh(&hdev->fd_rule_lock);
6227 return rule->location;
6230 spin_unlock_bh(&hdev->fd_rule_lock);
6232 if (rule->queue_id == queue_id)
6233 return rule->location;
6235 tmp_queue_id = rule->queue_id;
6236 rule->queue_id = queue_id;
6237 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6239 rule->queue_id = tmp_queue_id;
6243 return rule->location;
6246 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
6248 #ifdef CONFIG_RFS_ACCEL
6249 struct hnae3_handle *handle = &hdev->vport[0].nic;
6250 struct hclge_fd_rule *rule;
6251 struct hlist_node *node;
6252 HLIST_HEAD(del_list);
6254 spin_lock_bh(&hdev->fd_rule_lock);
6255 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
6256 spin_unlock_bh(&hdev->fd_rule_lock);
6259 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6260 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
6261 rule->flow_id, rule->location)) {
6262 hlist_del_init(&rule->rule_node);
6263 hlist_add_head(&rule->rule_node, &del_list);
6264 hdev->hclge_fd_rule_num--;
6265 clear_bit(rule->location, hdev->fd_bmap);
6268 spin_unlock_bh(&hdev->fd_rule_lock);
6270 hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
6271 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
6272 rule->location, NULL, false);
6278 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
6280 #ifdef CONFIG_RFS_ACCEL
6281 struct hclge_vport *vport = hclge_get_vport(handle);
6282 struct hclge_dev *hdev = vport->back;
6284 if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
6285 hclge_del_all_fd_entries(handle, true);
6289 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
6291 struct hclge_vport *vport = hclge_get_vport(handle);
6292 struct hclge_dev *hdev = vport->back;
6294 return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
6295 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
6298 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
6300 struct hclge_vport *vport = hclge_get_vport(handle);
6301 struct hclge_dev *hdev = vport->back;
6303 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
6306 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
6308 struct hclge_vport *vport = hclge_get_vport(handle);
6309 struct hclge_dev *hdev = vport->back;
6311 return hdev->rst_stats.hw_reset_done_cnt;
6314 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
6316 struct hclge_vport *vport = hclge_get_vport(handle);
6317 struct hclge_dev *hdev = vport->back;
6320 hdev->fd_en = enable;
6321 clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
6323 hclge_del_all_fd_entries(handle, clear);
6325 hclge_restore_fd_entries(handle);
6328 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
6330 struct hclge_desc desc;
6331 struct hclge_config_mac_mode_cmd *req =
6332 (struct hclge_config_mac_mode_cmd *)desc.data;
6336 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
6339 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
6340 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
6341 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
6342 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
6343 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
6344 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
6345 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
6346 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
6347 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
6348 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
6351 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6353 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6355 dev_err(&hdev->pdev->dev,
6356 "mac enable fail, ret =%d.\n", ret);
6359 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
6360 u8 switch_param, u8 param_mask)
6362 struct hclge_mac_vlan_switch_cmd *req;
6363 struct hclge_desc desc;
6367 func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
6368 req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
6370 /* read current config parameter */
6371 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
6373 req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
6374 req->func_id = cpu_to_le32(func_id);
6376 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6378 dev_err(&hdev->pdev->dev,
6379 "read mac vlan switch parameter fail, ret = %d\n", ret);
6383 /* modify and write new config parameter */
6384 hclge_cmd_reuse_desc(&desc, false);
6385 req->switch_param = (req->switch_param & param_mask) | switch_param;
6386 req->param_mask = param_mask;
6388 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6390 dev_err(&hdev->pdev->dev,
6391 "set mac vlan switch parameter fail, ret = %d\n", ret);
6395 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
6398 #define HCLGE_PHY_LINK_STATUS_NUM 200
6400 struct phy_device *phydev = hdev->hw.mac.phydev;
6405 ret = phy_read_status(phydev);
6407 dev_err(&hdev->pdev->dev,
6408 "phy update link status fail, ret = %d\n", ret);
6412 if (phydev->link == link_ret)
6415 msleep(HCLGE_LINK_STATUS_MS);
6416 } while (++i < HCLGE_PHY_LINK_STATUS_NUM);
6419 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
6421 #define HCLGE_MAC_LINK_STATUS_NUM 100
6427 ret = hclge_get_mac_link_status(hdev);
6430 else if (ret == link_ret)
6433 msleep(HCLGE_LINK_STATUS_MS);
6434 } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
6438 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
6441 #define HCLGE_LINK_STATUS_DOWN 0
6442 #define HCLGE_LINK_STATUS_UP 1
6446 link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
6449 hclge_phy_link_status_wait(hdev, link_ret);
6451 return hclge_mac_link_status_wait(hdev, link_ret);
6454 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
6456 struct hclge_config_mac_mode_cmd *req;
6457 struct hclge_desc desc;
6461 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
6462 /* 1 Read out the MAC mode config at first */
6463 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
6464 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6466 dev_err(&hdev->pdev->dev,
6467 "mac loopback get fail, ret =%d.\n", ret);
6471 /* 2 Then setup the loopback flag */
6472 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
6473 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
6474 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
6475 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
6477 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6479 /* 3 Config mac work mode with loopback flag
6480 * and its original configure parameters
6482 hclge_cmd_reuse_desc(&desc, false);
6483 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6485 dev_err(&hdev->pdev->dev,
6486 "mac loopback set fail, ret =%d.\n", ret);
6490 static int hclge_cfg_serdes_loopback(struct hclge_dev *hdev, bool en,
6491 enum hnae3_loop loop_mode)
6493 #define HCLGE_SERDES_RETRY_MS 10
6494 #define HCLGE_SERDES_RETRY_NUM 100
6496 struct hclge_serdes_lb_cmd *req;
6497 struct hclge_desc desc;
6501 req = (struct hclge_serdes_lb_cmd *)desc.data;
6502 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
6504 switch (loop_mode) {
6505 case HNAE3_LOOP_SERIAL_SERDES:
6506 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
6508 case HNAE3_LOOP_PARALLEL_SERDES:
6509 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
6512 dev_err(&hdev->pdev->dev,
6513 "unsupported serdes loopback mode %d\n", loop_mode);
6518 req->enable = loop_mode_b;
6519 req->mask = loop_mode_b;
6521 req->mask = loop_mode_b;
6524 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6526 dev_err(&hdev->pdev->dev,
6527 "serdes loopback set fail, ret = %d\n", ret);
6532 msleep(HCLGE_SERDES_RETRY_MS);
6533 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
6535 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6537 dev_err(&hdev->pdev->dev,
6538 "serdes loopback get, ret = %d\n", ret);
6541 } while (++i < HCLGE_SERDES_RETRY_NUM &&
6542 !(req->result & HCLGE_CMD_SERDES_DONE_B));
6544 if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
6545 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
6547 } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
6548 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
6554 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
6555 enum hnae3_loop loop_mode)
6559 ret = hclge_cfg_serdes_loopback(hdev, en, loop_mode);
6563 hclge_cfg_mac_mode(hdev, en);
6565 ret = hclge_mac_phy_link_status_wait(hdev, en, FALSE);
6567 dev_err(&hdev->pdev->dev,
6568 "serdes loopback config mac mode timeout\n");
6573 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
6574 struct phy_device *phydev)
6578 if (!phydev->suspended) {
6579 ret = phy_suspend(phydev);
6584 ret = phy_resume(phydev);
6588 return phy_loopback(phydev, true);
6591 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
6592 struct phy_device *phydev)
6596 ret = phy_loopback(phydev, false);
6600 return phy_suspend(phydev);
6603 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
6605 struct phy_device *phydev = hdev->hw.mac.phydev;
6612 ret = hclge_enable_phy_loopback(hdev, phydev);
6614 ret = hclge_disable_phy_loopback(hdev, phydev);
6616 dev_err(&hdev->pdev->dev,
6617 "set phy loopback fail, ret = %d\n", ret);
6621 hclge_cfg_mac_mode(hdev, en);
6623 ret = hclge_mac_phy_link_status_wait(hdev, en, TRUE);
6625 dev_err(&hdev->pdev->dev,
6626 "phy loopback config mac mode timeout\n");
6631 static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
6632 int stream_id, bool enable)
6634 struct hclge_desc desc;
6635 struct hclge_cfg_com_tqp_queue_cmd *req =
6636 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
6639 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
6640 req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
6641 req->stream_id = cpu_to_le16(stream_id);
6643 req->enable |= 1U << HCLGE_TQP_ENABLE_B;
6645 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6647 dev_err(&hdev->pdev->dev,
6648 "Tqp enable fail, status =%d.\n", ret);
6652 static int hclge_set_loopback(struct hnae3_handle *handle,
6653 enum hnae3_loop loop_mode, bool en)
6655 struct hclge_vport *vport = hclge_get_vport(handle);
6656 struct hnae3_knic_private_info *kinfo;
6657 struct hclge_dev *hdev = vport->back;
6660 /* Loopback can be enabled in three places: SSU, MAC, and serdes. By
6661 * default, SSU loopback is enabled, so if the SMAC and the DMAC are
6662 * the same, the packets are looped back in the SSU. If SSU loopback
6663 * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
6665 if (hdev->pdev->revision >= 0x21) {
6666 u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
6668 ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
6669 HCLGE_SWITCH_ALW_LPBK_MASK);
6674 switch (loop_mode) {
6675 case HNAE3_LOOP_APP:
6676 ret = hclge_set_app_loopback(hdev, en);
6678 case HNAE3_LOOP_SERIAL_SERDES:
6679 case HNAE3_LOOP_PARALLEL_SERDES:
6680 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
6682 case HNAE3_LOOP_PHY:
6683 ret = hclge_set_phy_loopback(hdev, en);
6687 dev_err(&hdev->pdev->dev,
6688 "loop_mode %d is not supported\n", loop_mode);
6695 kinfo = &vport->nic.kinfo;
6696 for (i = 0; i < kinfo->num_tqps; i++) {
6697 ret = hclge_tqp_enable(hdev, i, 0, en);
6705 static int hclge_set_default_loopback(struct hclge_dev *hdev)
6709 ret = hclge_set_app_loopback(hdev, false);
6713 ret = hclge_cfg_serdes_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
6717 return hclge_cfg_serdes_loopback(hdev, false,
6718 HNAE3_LOOP_PARALLEL_SERDES);
6721 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
6723 struct hclge_vport *vport = hclge_get_vport(handle);
6724 struct hnae3_knic_private_info *kinfo;
6725 struct hnae3_queue *queue;
6726 struct hclge_tqp *tqp;
6729 kinfo = &vport->nic.kinfo;
6730 for (i = 0; i < kinfo->num_tqps; i++) {
6731 queue = handle->kinfo.tqp[i];
6732 tqp = container_of(queue, struct hclge_tqp, q);
6733 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
6737 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
6739 struct hclge_vport *vport = hclge_get_vport(handle);
6740 struct hclge_dev *hdev = vport->back;
6743 hclge_task_schedule(hdev, round_jiffies_relative(HZ));
6745 /* Set the DOWN flag here to disable the service to be
6748 set_bit(HCLGE_STATE_DOWN, &hdev->state);
6749 cancel_delayed_work_sync(&hdev->service_task);
6750 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
6754 static int hclge_ae_start(struct hnae3_handle *handle)
6756 struct hclge_vport *vport = hclge_get_vport(handle);
6757 struct hclge_dev *hdev = vport->back;
6760 hclge_cfg_mac_mode(hdev, true);
6761 clear_bit(HCLGE_STATE_DOWN, &hdev->state);
6762 hdev->hw.mac.link = 0;
6764 /* reset tqp stats */
6765 hclge_reset_tqp_stats(handle);
6767 hclge_mac_start_phy(hdev);
6772 static void hclge_ae_stop(struct hnae3_handle *handle)
6774 struct hclge_vport *vport = hclge_get_vport(handle);
6775 struct hclge_dev *hdev = vport->back;
6778 set_bit(HCLGE_STATE_DOWN, &hdev->state);
6780 hclge_clear_arfs_rules(handle);
6782 /* If it is not PF reset, the firmware will disable the MAC,
6783 * so it only need to stop phy here.
6785 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
6786 hdev->reset_type != HNAE3_FUNC_RESET) {
6787 hclge_mac_stop_phy(hdev);
6788 hclge_update_link_status(hdev);
6792 for (i = 0; i < handle->kinfo.num_tqps; i++)
6793 hclge_reset_tqp(handle, i);
6795 hclge_config_mac_tnl_int(hdev, false);
6798 hclge_cfg_mac_mode(hdev, false);
6800 hclge_mac_stop_phy(hdev);
6802 /* reset tqp stats */
6803 hclge_reset_tqp_stats(handle);
6804 hclge_update_link_status(hdev);
6807 int hclge_vport_start(struct hclge_vport *vport)
6809 set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6810 vport->last_active_jiffies = jiffies;
6814 void hclge_vport_stop(struct hclge_vport *vport)
6816 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6819 static int hclge_client_start(struct hnae3_handle *handle)
6821 struct hclge_vport *vport = hclge_get_vport(handle);
6823 return hclge_vport_start(vport);
6826 static void hclge_client_stop(struct hnae3_handle *handle)
6828 struct hclge_vport *vport = hclge_get_vport(handle);
6830 hclge_vport_stop(vport);
6833 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
6834 u16 cmdq_resp, u8 resp_code,
6835 enum hclge_mac_vlan_tbl_opcode op)
6837 struct hclge_dev *hdev = vport->back;
6840 dev_err(&hdev->pdev->dev,
6841 "cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
6846 if (op == HCLGE_MAC_VLAN_ADD) {
6847 if ((!resp_code) || (resp_code == 1)) {
6849 } else if (resp_code == HCLGE_ADD_UC_OVERFLOW) {
6850 dev_err(&hdev->pdev->dev,
6851 "add mac addr failed for uc_overflow.\n");
6853 } else if (resp_code == HCLGE_ADD_MC_OVERFLOW) {
6854 dev_err(&hdev->pdev->dev,
6855 "add mac addr failed for mc_overflow.\n");
6859 dev_err(&hdev->pdev->dev,
6860 "add mac addr failed for undefined, code=%u.\n",
6863 } else if (op == HCLGE_MAC_VLAN_REMOVE) {
6866 } else if (resp_code == 1) {
6867 dev_dbg(&hdev->pdev->dev,
6868 "remove mac addr failed for miss.\n");
6872 dev_err(&hdev->pdev->dev,
6873 "remove mac addr failed for undefined, code=%u.\n",
6876 } else if (op == HCLGE_MAC_VLAN_LKUP) {
6879 } else if (resp_code == 1) {
6880 dev_dbg(&hdev->pdev->dev,
6881 "lookup mac addr failed for miss.\n");
6885 dev_err(&hdev->pdev->dev,
6886 "lookup mac addr failed for undefined, code=%u.\n",
6891 dev_err(&hdev->pdev->dev,
6892 "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
6897 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
6899 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
6901 unsigned int word_num;
6902 unsigned int bit_num;
6904 if (vfid > 255 || vfid < 0)
6907 if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
6908 word_num = vfid / 32;
6909 bit_num = vfid % 32;
6911 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6913 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
6915 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
6916 bit_num = vfid % 32;
6918 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6920 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
6926 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
6928 #define HCLGE_DESC_NUMBER 3
6929 #define HCLGE_FUNC_NUMBER_PER_DESC 6
6932 for (i = 1; i < HCLGE_DESC_NUMBER; i++)
6933 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
6934 if (desc[i].data[j])
6940 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
6941 const u8 *addr, bool is_mc)
6943 const unsigned char *mac_addr = addr;
6944 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
6945 (mac_addr[0]) | (mac_addr[1] << 8);
6946 u32 low_val = mac_addr[4] | (mac_addr[5] << 8);
6948 hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6950 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
6951 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6954 new_req->mac_addr_hi32 = cpu_to_le32(high_val);
6955 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
6958 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
6959 struct hclge_mac_vlan_tbl_entry_cmd *req)
6961 struct hclge_dev *hdev = vport->back;
6962 struct hclge_desc desc;
6967 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
6969 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6971 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6973 dev_err(&hdev->pdev->dev,
6974 "del mac addr failed for cmd_send, ret =%d.\n",
6978 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6979 retval = le16_to_cpu(desc.retval);
6981 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
6982 HCLGE_MAC_VLAN_REMOVE);
6985 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
6986 struct hclge_mac_vlan_tbl_entry_cmd *req,
6987 struct hclge_desc *desc,
6990 struct hclge_dev *hdev = vport->back;
6995 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
6997 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6998 memcpy(desc[0].data,
7000 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7001 hclge_cmd_setup_basic_desc(&desc[1],
7002 HCLGE_OPC_MAC_VLAN_ADD,
7004 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7005 hclge_cmd_setup_basic_desc(&desc[2],
7006 HCLGE_OPC_MAC_VLAN_ADD,
7008 ret = hclge_cmd_send(&hdev->hw, desc, 3);
7010 memcpy(desc[0].data,
7012 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7013 ret = hclge_cmd_send(&hdev->hw, desc, 1);
7016 dev_err(&hdev->pdev->dev,
7017 "lookup mac addr failed for cmd_send, ret =%d.\n",
7021 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
7022 retval = le16_to_cpu(desc[0].retval);
7024 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7025 HCLGE_MAC_VLAN_LKUP);
7028 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
7029 struct hclge_mac_vlan_tbl_entry_cmd *req,
7030 struct hclge_desc *mc_desc)
7032 struct hclge_dev *hdev = vport->back;
7039 struct hclge_desc desc;
7041 hclge_cmd_setup_basic_desc(&desc,
7042 HCLGE_OPC_MAC_VLAN_ADD,
7044 memcpy(desc.data, req,
7045 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7046 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7047 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7048 retval = le16_to_cpu(desc.retval);
7050 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7052 HCLGE_MAC_VLAN_ADD);
7054 hclge_cmd_reuse_desc(&mc_desc[0], false);
7055 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7056 hclge_cmd_reuse_desc(&mc_desc[1], false);
7057 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7058 hclge_cmd_reuse_desc(&mc_desc[2], false);
7059 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
7060 memcpy(mc_desc[0].data, req,
7061 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7062 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
7063 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
7064 retval = le16_to_cpu(mc_desc[0].retval);
7066 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7068 HCLGE_MAC_VLAN_ADD);
7072 dev_err(&hdev->pdev->dev,
7073 "add mac addr failed for cmd_send, ret =%d.\n",
7081 static int hclge_init_umv_space(struct hclge_dev *hdev)
7083 u16 allocated_size = 0;
7086 ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
7091 if (allocated_size < hdev->wanted_umv_size)
7092 dev_warn(&hdev->pdev->dev,
7093 "Alloc umv space failed, want %u, get %u\n",
7094 hdev->wanted_umv_size, allocated_size);
7096 mutex_init(&hdev->umv_mutex);
7097 hdev->max_umv_size = allocated_size;
7098 /* divide max_umv_size by (hdev->num_req_vfs + 2), in order to
7099 * preserve some unicast mac vlan table entries shared by pf
7102 hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
7103 hdev->share_umv_size = hdev->priv_umv_size +
7104 hdev->max_umv_size % (hdev->num_req_vfs + 2);
7109 static int hclge_uninit_umv_space(struct hclge_dev *hdev)
7113 if (hdev->max_umv_size > 0) {
7114 ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
7118 hdev->max_umv_size = 0;
7120 mutex_destroy(&hdev->umv_mutex);
7125 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
7126 u16 *allocated_size, bool is_alloc)
7128 struct hclge_umv_spc_alc_cmd *req;
7129 struct hclge_desc desc;
7132 req = (struct hclge_umv_spc_alc_cmd *)desc.data;
7133 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
7135 hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, 1);
7137 req->space_size = cpu_to_le32(space_size);
7139 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7141 dev_err(&hdev->pdev->dev,
7142 "%s umv space failed for cmd_send, ret =%d\n",
7143 is_alloc ? "allocate" : "free", ret);
7147 if (is_alloc && allocated_size)
7148 *allocated_size = le32_to_cpu(desc.data[1]);
7153 static void hclge_reset_umv_space(struct hclge_dev *hdev)
7155 struct hclge_vport *vport;
7158 for (i = 0; i < hdev->num_alloc_vport; i++) {
7159 vport = &hdev->vport[i];
7160 vport->used_umv_num = 0;
7163 mutex_lock(&hdev->umv_mutex);
7164 hdev->share_umv_size = hdev->priv_umv_size +
7165 hdev->max_umv_size % (hdev->num_req_vfs + 2);
7166 mutex_unlock(&hdev->umv_mutex);
7169 static bool hclge_is_umv_space_full(struct hclge_vport *vport)
7171 struct hclge_dev *hdev = vport->back;
7174 mutex_lock(&hdev->umv_mutex);
7175 is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
7176 hdev->share_umv_size == 0);
7177 mutex_unlock(&hdev->umv_mutex);
7182 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
7184 struct hclge_dev *hdev = vport->back;
7186 mutex_lock(&hdev->umv_mutex);
7188 if (vport->used_umv_num > hdev->priv_umv_size)
7189 hdev->share_umv_size++;
7191 if (vport->used_umv_num > 0)
7192 vport->used_umv_num--;
7194 if (vport->used_umv_num >= hdev->priv_umv_size &&
7195 hdev->share_umv_size > 0)
7196 hdev->share_umv_size--;
7197 vport->used_umv_num++;
7199 mutex_unlock(&hdev->umv_mutex);
7202 static int hclge_add_uc_addr(struct hnae3_handle *handle,
7203 const unsigned char *addr)
7205 struct hclge_vport *vport = hclge_get_vport(handle);
7207 return hclge_add_uc_addr_common(vport, addr);
7210 int hclge_add_uc_addr_common(struct hclge_vport *vport,
7211 const unsigned char *addr)
7213 struct hclge_dev *hdev = vport->back;
7214 struct hclge_mac_vlan_tbl_entry_cmd req;
7215 struct hclge_desc desc;
7216 u16 egress_port = 0;
7219 /* mac addr check */
7220 if (is_zero_ether_addr(addr) ||
7221 is_broadcast_ether_addr(addr) ||
7222 is_multicast_ether_addr(addr)) {
7223 dev_err(&hdev->pdev->dev,
7224 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
7225 addr, is_zero_ether_addr(addr),
7226 is_broadcast_ether_addr(addr),
7227 is_multicast_ether_addr(addr));
7231 memset(&req, 0, sizeof(req));
7233 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
7234 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
7236 req.egress_port = cpu_to_le16(egress_port);
7238 hclge_prepare_mac_addr(&req, addr, false);
7240 /* Lookup the mac address in the mac_vlan table, and add
7241 * it if the entry is inexistent. Repeated unicast entry
7242 * is not allowed in the mac vlan table.
7244 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
7245 if (ret == -ENOENT) {
7246 if (!hclge_is_umv_space_full(vport)) {
7247 ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
7249 hclge_update_umv_space(vport, false);
7253 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
7254 hdev->priv_umv_size);
7259 /* check if we just hit the duplicate */
7261 dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n",
7262 vport->vport_id, addr);
7266 dev_err(&hdev->pdev->dev,
7267 "PF failed to add unicast entry(%pM) in the MAC table\n",
7273 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
7274 const unsigned char *addr)
7276 struct hclge_vport *vport = hclge_get_vport(handle);
7278 return hclge_rm_uc_addr_common(vport, addr);
7281 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
7282 const unsigned char *addr)
7284 struct hclge_dev *hdev = vport->back;
7285 struct hclge_mac_vlan_tbl_entry_cmd req;
7288 /* mac addr check */
7289 if (is_zero_ether_addr(addr) ||
7290 is_broadcast_ether_addr(addr) ||
7291 is_multicast_ether_addr(addr)) {
7292 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
7297 memset(&req, 0, sizeof(req));
7298 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7299 hclge_prepare_mac_addr(&req, addr, false);
7300 ret = hclge_remove_mac_vlan_tbl(vport, &req);
7302 hclge_update_umv_space(vport, true);
7307 static int hclge_add_mc_addr(struct hnae3_handle *handle,
7308 const unsigned char *addr)
7310 struct hclge_vport *vport = hclge_get_vport(handle);
7312 return hclge_add_mc_addr_common(vport, addr);
7315 int hclge_add_mc_addr_common(struct hclge_vport *vport,
7316 const unsigned char *addr)
7318 struct hclge_dev *hdev = vport->back;
7319 struct hclge_mac_vlan_tbl_entry_cmd req;
7320 struct hclge_desc desc[3];
7323 /* mac addr check */
7324 if (!is_multicast_ether_addr(addr)) {
7325 dev_err(&hdev->pdev->dev,
7326 "Add mc mac err! invalid mac:%pM.\n",
7330 memset(&req, 0, sizeof(req));
7331 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7332 hclge_prepare_mac_addr(&req, addr, true);
7333 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7335 /* This mac addr do not exist, add new entry for it */
7336 memset(desc[0].data, 0, sizeof(desc[0].data));
7337 memset(desc[1].data, 0, sizeof(desc[0].data));
7338 memset(desc[2].data, 0, sizeof(desc[0].data));
7340 status = hclge_update_desc_vfid(desc, vport->vport_id, false);
7343 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7345 if (status == -ENOSPC)
7346 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
7351 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
7352 const unsigned char *addr)
7354 struct hclge_vport *vport = hclge_get_vport(handle);
7356 return hclge_rm_mc_addr_common(vport, addr);
7359 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
7360 const unsigned char *addr)
7362 struct hclge_dev *hdev = vport->back;
7363 struct hclge_mac_vlan_tbl_entry_cmd req;
7364 enum hclge_cmd_status status;
7365 struct hclge_desc desc[3];
7367 /* mac addr check */
7368 if (!is_multicast_ether_addr(addr)) {
7369 dev_dbg(&hdev->pdev->dev,
7370 "Remove mc mac err! invalid mac:%pM.\n",
7375 memset(&req, 0, sizeof(req));
7376 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7377 hclge_prepare_mac_addr(&req, addr, true);
7378 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7380 /* This mac addr exist, remove this handle's VFID for it */
7381 status = hclge_update_desc_vfid(desc, vport->vport_id, true);
7385 if (hclge_is_all_function_id_zero(desc))
7386 /* All the vfid is zero, so need to delete this entry */
7387 status = hclge_remove_mac_vlan_tbl(vport, &req);
7389 /* Not all the vfid is zero, update the vfid */
7390 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7393 /* Maybe this mac address is in mta table, but it cannot be
7394 * deleted here because an entry of mta represents an address
7395 * range rather than a specific address. the delete action to
7396 * all entries will take effect in update_mta_status called by
7397 * hns3_nic_set_rx_mode.
7405 void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
7406 enum HCLGE_MAC_ADDR_TYPE mac_type)
7408 struct hclge_vport_mac_addr_cfg *mac_cfg;
7409 struct list_head *list;
7411 if (!vport->vport_id)
7414 mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL);
7418 mac_cfg->hd_tbl_status = true;
7419 memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN);
7421 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7422 &vport->uc_mac_list : &vport->mc_mac_list;
7424 list_add_tail(&mac_cfg->node, list);
7427 void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
7429 enum HCLGE_MAC_ADDR_TYPE mac_type)
7431 struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
7432 struct list_head *list;
7433 bool uc_flag, mc_flag;
7435 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7436 &vport->uc_mac_list : &vport->mc_mac_list;
7438 uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC;
7439 mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
7441 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
7442 if (ether_addr_equal(mac_cfg->mac_addr, mac_addr)) {
7443 if (uc_flag && mac_cfg->hd_tbl_status)
7444 hclge_rm_uc_addr_common(vport, mac_addr);
7446 if (mc_flag && mac_cfg->hd_tbl_status)
7447 hclge_rm_mc_addr_common(vport, mac_addr);
7449 list_del(&mac_cfg->node);
7456 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
7457 enum HCLGE_MAC_ADDR_TYPE mac_type)
7459 struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
7460 struct list_head *list;
7462 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7463 &vport->uc_mac_list : &vport->mc_mac_list;
7465 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
7466 if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status)
7467 hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr);
7469 if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status)
7470 hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr);
7472 mac_cfg->hd_tbl_status = false;
7474 list_del(&mac_cfg->node);
7480 void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
7482 struct hclge_vport_mac_addr_cfg *mac, *tmp;
7483 struct hclge_vport *vport;
7486 mutex_lock(&hdev->vport_cfg_mutex);
7487 for (i = 0; i < hdev->num_alloc_vport; i++) {
7488 vport = &hdev->vport[i];
7489 list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
7490 list_del(&mac->node);
7494 list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) {
7495 list_del(&mac->node);
7499 mutex_unlock(&hdev->vport_cfg_mutex);
7502 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
7503 u16 cmdq_resp, u8 resp_code)
7505 #define HCLGE_ETHERTYPE_SUCCESS_ADD 0
7506 #define HCLGE_ETHERTYPE_ALREADY_ADD 1
7507 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2
7508 #define HCLGE_ETHERTYPE_KEY_CONFLICT 3
7513 dev_err(&hdev->pdev->dev,
7514 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
7519 switch (resp_code) {
7520 case HCLGE_ETHERTYPE_SUCCESS_ADD:
7521 case HCLGE_ETHERTYPE_ALREADY_ADD:
7524 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
7525 dev_err(&hdev->pdev->dev,
7526 "add mac ethertype failed for manager table overflow.\n");
7527 return_status = -EIO;
7529 case HCLGE_ETHERTYPE_KEY_CONFLICT:
7530 dev_err(&hdev->pdev->dev,
7531 "add mac ethertype failed for key conflict.\n");
7532 return_status = -EIO;
7535 dev_err(&hdev->pdev->dev,
7536 "add mac ethertype failed for undefined, code=%u.\n",
7538 return_status = -EIO;
7541 return return_status;
7544 static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
7547 struct hclge_mac_vlan_tbl_entry_cmd req;
7548 struct hclge_dev *hdev = vport->back;
7549 struct hclge_desc desc;
7550 u16 egress_port = 0;
7553 if (is_zero_ether_addr(mac_addr))
7556 memset(&req, 0, sizeof(req));
7557 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
7558 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
7559 req.egress_port = cpu_to_le16(egress_port);
7560 hclge_prepare_mac_addr(&req, mac_addr, false);
7562 if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
7565 vf_idx += HCLGE_VF_VPORT_START_NUM;
7566 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++)
7568 ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
7574 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
7577 struct hclge_vport *vport = hclge_get_vport(handle);
7578 struct hclge_dev *hdev = vport->back;
7580 vport = hclge_get_vf_vport(hdev, vf);
7584 if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
7585 dev_info(&hdev->pdev->dev,
7586 "Specified MAC(=%pM) is same as before, no change committed!\n",
7591 if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
7592 dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
7597 ether_addr_copy(vport->vf_info.mac, mac_addr);
7598 dev_info(&hdev->pdev->dev,
7599 "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
7602 return hclge_inform_reset_assert_to_vf(vport);
7605 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
7606 const struct hclge_mac_mgr_tbl_entry_cmd *req)
7608 struct hclge_desc desc;
7613 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
7614 memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
7616 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7618 dev_err(&hdev->pdev->dev,
7619 "add mac ethertype failed for cmd_send, ret =%d.\n",
7624 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7625 retval = le16_to_cpu(desc.retval);
7627 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
7630 static int init_mgr_tbl(struct hclge_dev *hdev)
7635 for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
7636 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
7638 dev_err(&hdev->pdev->dev,
7639 "add mac ethertype failed, ret =%d.\n",
7648 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
7650 struct hclge_vport *vport = hclge_get_vport(handle);
7651 struct hclge_dev *hdev = vport->back;
7653 ether_addr_copy(p, hdev->hw.mac.mac_addr);
7656 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
7659 const unsigned char *new_addr = (const unsigned char *)p;
7660 struct hclge_vport *vport = hclge_get_vport(handle);
7661 struct hclge_dev *hdev = vport->back;
7664 /* mac addr check */
7665 if (is_zero_ether_addr(new_addr) ||
7666 is_broadcast_ether_addr(new_addr) ||
7667 is_multicast_ether_addr(new_addr)) {
7668 dev_err(&hdev->pdev->dev,
7669 "Change uc mac err! invalid mac:%pM.\n",
7674 if ((!is_first || is_kdump_kernel()) &&
7675 hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
7676 dev_warn(&hdev->pdev->dev,
7677 "remove old uc mac address fail.\n");
7679 ret = hclge_add_uc_addr(handle, new_addr);
7681 dev_err(&hdev->pdev->dev,
7682 "add uc mac address fail, ret =%d.\n",
7686 hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
7687 dev_err(&hdev->pdev->dev,
7688 "restore uc mac address fail.\n");
7693 ret = hclge_pause_addr_cfg(hdev, new_addr);
7695 dev_err(&hdev->pdev->dev,
7696 "configure mac pause address fail, ret =%d.\n",
7701 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
7706 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
7709 struct hclge_vport *vport = hclge_get_vport(handle);
7710 struct hclge_dev *hdev = vport->back;
7712 if (!hdev->hw.mac.phydev)
7715 return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
7718 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
7719 u8 fe_type, bool filter_en, u8 vf_id)
7721 struct hclge_vlan_filter_ctrl_cmd *req;
7722 struct hclge_desc desc;
7725 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
7727 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
7728 req->vlan_type = vlan_type;
7729 req->vlan_fe = filter_en ? fe_type : 0;
7732 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7734 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
7740 #define HCLGE_FILTER_TYPE_VF 0
7741 #define HCLGE_FILTER_TYPE_PORT 1
7742 #define HCLGE_FILTER_FE_EGRESS_V1_B BIT(0)
7743 #define HCLGE_FILTER_FE_NIC_INGRESS_B BIT(0)
7744 #define HCLGE_FILTER_FE_NIC_EGRESS_B BIT(1)
7745 #define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2)
7746 #define HCLGE_FILTER_FE_ROCE_EGRESS_B BIT(3)
7747 #define HCLGE_FILTER_FE_EGRESS (HCLGE_FILTER_FE_NIC_EGRESS_B \
7748 | HCLGE_FILTER_FE_ROCE_EGRESS_B)
7749 #define HCLGE_FILTER_FE_INGRESS (HCLGE_FILTER_FE_NIC_INGRESS_B \
7750 | HCLGE_FILTER_FE_ROCE_INGRESS_B)
7752 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
7754 struct hclge_vport *vport = hclge_get_vport(handle);
7755 struct hclge_dev *hdev = vport->back;
7757 if (hdev->pdev->revision >= 0x21) {
7758 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7759 HCLGE_FILTER_FE_EGRESS, enable, 0);
7760 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7761 HCLGE_FILTER_FE_INGRESS, enable, 0);
7763 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7764 HCLGE_FILTER_FE_EGRESS_V1_B, enable,
7768 handle->netdev_flags |= HNAE3_VLAN_FLTR;
7770 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
7773 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
7774 bool is_kill, u16 vlan,
7777 struct hclge_vport *vport = &hdev->vport[vfid];
7778 struct hclge_vlan_filter_vf_cfg_cmd *req0;
7779 struct hclge_vlan_filter_vf_cfg_cmd *req1;
7780 struct hclge_desc desc[2];
7785 /* if vf vlan table is full, firmware will close vf vlan filter, it
7786 * is unable and unnecessary to add new vlan id to vf vlan filter.
7787 * If spoof check is enable, and vf vlan is full, it shouldn't add
7788 * new vlan, because tx packets with these vlan id will be dropped.
7790 if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
7791 if (vport->vf_info.spoofchk && vlan) {
7792 dev_err(&hdev->pdev->dev,
7793 "Can't add vlan due to spoof check is on and vf vlan table is full\n");
7799 hclge_cmd_setup_basic_desc(&desc[0],
7800 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7801 hclge_cmd_setup_basic_desc(&desc[1],
7802 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7804 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7806 vf_byte_off = vfid / 8;
7807 vf_byte_val = 1 << (vfid % 8);
7809 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
7810 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
7812 req0->vlan_id = cpu_to_le16(vlan);
7813 req0->vlan_cfg = is_kill;
7815 if (vf_byte_off < HCLGE_MAX_VF_BYTES)
7816 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
7818 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
7820 ret = hclge_cmd_send(&hdev->hw, desc, 2);
7822 dev_err(&hdev->pdev->dev,
7823 "Send vf vlan command fail, ret =%d.\n",
7829 #define HCLGE_VF_VLAN_NO_ENTRY 2
7830 if (!req0->resp_code || req0->resp_code == 1)
7833 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
7834 set_bit(vfid, hdev->vf_vlan_full);
7835 dev_warn(&hdev->pdev->dev,
7836 "vf vlan table is full, vf vlan filter is disabled\n");
7840 dev_err(&hdev->pdev->dev,
7841 "Add vf vlan filter fail, ret =%u.\n",
7844 #define HCLGE_VF_VLAN_DEL_NO_FOUND 1
7845 if (!req0->resp_code)
7848 /* vf vlan filter is disabled when vf vlan table is full,
7849 * then new vlan id will not be added into vf vlan table.
7850 * Just return 0 without warning, avoid massive verbose
7851 * print logs when unload.
7853 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
7856 dev_err(&hdev->pdev->dev,
7857 "Kill vf vlan filter fail, ret =%u.\n",
7864 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
7865 u16 vlan_id, bool is_kill)
7867 struct hclge_vlan_filter_pf_cfg_cmd *req;
7868 struct hclge_desc desc;
7869 u8 vlan_offset_byte_val;
7870 u8 vlan_offset_byte;
7874 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
7876 vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
7877 vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
7878 HCLGE_VLAN_BYTE_SIZE;
7879 vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
7881 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
7882 req->vlan_offset = vlan_offset_160;
7883 req->vlan_cfg = is_kill;
7884 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
7886 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7888 dev_err(&hdev->pdev->dev,
7889 "port vlan command, send fail, ret =%d.\n", ret);
7893 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
7894 u16 vport_id, u16 vlan_id,
7897 u16 vport_idx, vport_num = 0;
7900 if (is_kill && !vlan_id)
7903 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
7906 dev_err(&hdev->pdev->dev,
7907 "Set %u vport vlan filter config fail, ret =%d.\n",
7912 /* vlan 0 may be added twice when 8021q module is enabled */
7913 if (!is_kill && !vlan_id &&
7914 test_bit(vport_id, hdev->vlan_table[vlan_id]))
7917 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
7918 dev_err(&hdev->pdev->dev,
7919 "Add port vlan failed, vport %u is already in vlan %u\n",
7925 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
7926 dev_err(&hdev->pdev->dev,
7927 "Delete port vlan failed, vport %u is not in vlan %u\n",
7932 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
7935 if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
7936 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
7942 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
7944 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
7945 struct hclge_vport_vtag_tx_cfg_cmd *req;
7946 struct hclge_dev *hdev = vport->back;
7947 struct hclge_desc desc;
7951 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
7953 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
7954 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
7955 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
7956 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
7957 vcfg->accept_tag1 ? 1 : 0);
7958 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
7959 vcfg->accept_untag1 ? 1 : 0);
7960 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
7961 vcfg->accept_tag2 ? 1 : 0);
7962 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
7963 vcfg->accept_untag2 ? 1 : 0);
7964 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
7965 vcfg->insert_tag1_en ? 1 : 0);
7966 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
7967 vcfg->insert_tag2_en ? 1 : 0);
7968 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
7970 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7971 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
7972 HCLGE_VF_NUM_PER_BYTE;
7973 req->vf_bitmap[bmap_index] =
7974 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7976 status = hclge_cmd_send(&hdev->hw, &desc, 1);
7978 dev_err(&hdev->pdev->dev,
7979 "Send port txvlan cfg command fail, ret =%d\n",
7985 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
7987 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
7988 struct hclge_vport_vtag_rx_cfg_cmd *req;
7989 struct hclge_dev *hdev = vport->back;
7990 struct hclge_desc desc;
7994 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
7996 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
7997 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
7998 vcfg->strip_tag1_en ? 1 : 0);
7999 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
8000 vcfg->strip_tag2_en ? 1 : 0);
8001 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
8002 vcfg->vlan1_vlan_prionly ? 1 : 0);
8003 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
8004 vcfg->vlan2_vlan_prionly ? 1 : 0);
8006 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
8007 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
8008 HCLGE_VF_NUM_PER_BYTE;
8009 req->vf_bitmap[bmap_index] =
8010 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
8012 status = hclge_cmd_send(&hdev->hw, &desc, 1);
8014 dev_err(&hdev->pdev->dev,
8015 "Send port rxvlan cfg command fail, ret =%d\n",
8021 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
8022 u16 port_base_vlan_state,
8027 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8028 vport->txvlan_cfg.accept_tag1 = true;
8029 vport->txvlan_cfg.insert_tag1_en = false;
8030 vport->txvlan_cfg.default_tag1 = 0;
8032 vport->txvlan_cfg.accept_tag1 = false;
8033 vport->txvlan_cfg.insert_tag1_en = true;
8034 vport->txvlan_cfg.default_tag1 = vlan_tag;
8037 vport->txvlan_cfg.accept_untag1 = true;
8039 /* accept_tag2 and accept_untag2 are not supported on
8040 * pdev revision(0x20), new revision support them,
8041 * this two fields can not be configured by user.
8043 vport->txvlan_cfg.accept_tag2 = true;
8044 vport->txvlan_cfg.accept_untag2 = true;
8045 vport->txvlan_cfg.insert_tag2_en = false;
8046 vport->txvlan_cfg.default_tag2 = 0;
8048 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8049 vport->rxvlan_cfg.strip_tag1_en = false;
8050 vport->rxvlan_cfg.strip_tag2_en =
8051 vport->rxvlan_cfg.rx_vlan_offload_en;
8053 vport->rxvlan_cfg.strip_tag1_en =
8054 vport->rxvlan_cfg.rx_vlan_offload_en;
8055 vport->rxvlan_cfg.strip_tag2_en = true;
8057 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8058 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
8060 ret = hclge_set_vlan_tx_offload_cfg(vport);
8064 return hclge_set_vlan_rx_offload_cfg(vport);
8067 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
8069 struct hclge_rx_vlan_type_cfg_cmd *rx_req;
8070 struct hclge_tx_vlan_type_cfg_cmd *tx_req;
8071 struct hclge_desc desc;
8074 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
8075 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
8076 rx_req->ot_fst_vlan_type =
8077 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
8078 rx_req->ot_sec_vlan_type =
8079 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
8080 rx_req->in_fst_vlan_type =
8081 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
8082 rx_req->in_sec_vlan_type =
8083 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
8085 status = hclge_cmd_send(&hdev->hw, &desc, 1);
8087 dev_err(&hdev->pdev->dev,
8088 "Send rxvlan protocol type command fail, ret =%d\n",
8093 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
8095 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
8096 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
8097 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
8099 status = hclge_cmd_send(&hdev->hw, &desc, 1);
8101 dev_err(&hdev->pdev->dev,
8102 "Send txvlan protocol type command fail, ret =%d\n",
8108 static int hclge_init_vlan_config(struct hclge_dev *hdev)
8110 #define HCLGE_DEF_VLAN_TYPE 0x8100
8112 struct hnae3_handle *handle = &hdev->vport[0].nic;
8113 struct hclge_vport *vport;
8117 if (hdev->pdev->revision >= 0x21) {
8118 /* for revision 0x21, vf vlan filter is per function */
8119 for (i = 0; i < hdev->num_alloc_vport; i++) {
8120 vport = &hdev->vport[i];
8121 ret = hclge_set_vlan_filter_ctrl(hdev,
8122 HCLGE_FILTER_TYPE_VF,
8123 HCLGE_FILTER_FE_EGRESS,
8130 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
8131 HCLGE_FILTER_FE_INGRESS, true,
8136 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8137 HCLGE_FILTER_FE_EGRESS_V1_B,
8143 handle->netdev_flags |= HNAE3_VLAN_FLTR;
8145 hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
8146 hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
8147 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
8148 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
8149 hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
8150 hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
8152 ret = hclge_set_vlan_protocol_type(hdev);
8156 for (i = 0; i < hdev->num_alloc_vport; i++) {
8159 vport = &hdev->vport[i];
8160 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
8162 ret = hclge_vlan_offload_cfg(vport,
8163 vport->port_base_vlan_cfg.state,
8169 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
8172 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8175 struct hclge_vport_vlan_cfg *vlan;
8177 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
8181 vlan->hd_tbl_status = writen_to_tbl;
8182 vlan->vlan_id = vlan_id;
8184 list_add_tail(&vlan->node, &vport->vlan_list);
8187 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
8189 struct hclge_vport_vlan_cfg *vlan, *tmp;
8190 struct hclge_dev *hdev = vport->back;
8193 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8194 if (!vlan->hd_tbl_status) {
8195 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8197 vlan->vlan_id, false);
8199 dev_err(&hdev->pdev->dev,
8200 "restore vport vlan list failed, ret=%d\n",
8205 vlan->hd_tbl_status = true;
8211 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8214 struct hclge_vport_vlan_cfg *vlan, *tmp;
8215 struct hclge_dev *hdev = vport->back;
8217 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8218 if (vlan->vlan_id == vlan_id) {
8219 if (is_write_tbl && vlan->hd_tbl_status)
8220 hclge_set_vlan_filter_hw(hdev,
8226 list_del(&vlan->node);
8233 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
8235 struct hclge_vport_vlan_cfg *vlan, *tmp;
8236 struct hclge_dev *hdev = vport->back;
8238 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8239 if (vlan->hd_tbl_status)
8240 hclge_set_vlan_filter_hw(hdev,
8246 vlan->hd_tbl_status = false;
8248 list_del(&vlan->node);
8254 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
8256 struct hclge_vport_vlan_cfg *vlan, *tmp;
8257 struct hclge_vport *vport;
8260 mutex_lock(&hdev->vport_cfg_mutex);
8261 for (i = 0; i < hdev->num_alloc_vport; i++) {
8262 vport = &hdev->vport[i];
8263 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8264 list_del(&vlan->node);
8268 mutex_unlock(&hdev->vport_cfg_mutex);
8271 static void hclge_restore_vlan_table(struct hnae3_handle *handle)
8273 struct hclge_vport *vport = hclge_get_vport(handle);
8274 struct hclge_vport_vlan_cfg *vlan, *tmp;
8275 struct hclge_dev *hdev = vport->back;
8280 mutex_lock(&hdev->vport_cfg_mutex);
8281 for (i = 0; i < hdev->num_alloc_vport; i++) {
8282 vport = &hdev->vport[i];
8283 vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
8284 vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
8285 state = vport->port_base_vlan_cfg.state;
8287 if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
8288 hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
8289 vport->vport_id, vlan_id,
8294 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8297 if (!vlan->hd_tbl_status)
8299 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8301 vlan->vlan_id, false);
8307 mutex_unlock(&hdev->vport_cfg_mutex);
8310 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
8312 struct hclge_vport *vport = hclge_get_vport(handle);
8314 if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8315 vport->rxvlan_cfg.strip_tag1_en = false;
8316 vport->rxvlan_cfg.strip_tag2_en = enable;
8318 vport->rxvlan_cfg.strip_tag1_en = enable;
8319 vport->rxvlan_cfg.strip_tag2_en = true;
8321 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8322 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
8323 vport->rxvlan_cfg.rx_vlan_offload_en = enable;
8325 return hclge_set_vlan_rx_offload_cfg(vport);
8328 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
8329 u16 port_base_vlan_state,
8330 struct hclge_vlan_info *new_info,
8331 struct hclge_vlan_info *old_info)
8333 struct hclge_dev *hdev = vport->back;
8336 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
8337 hclge_rm_vport_all_vlan_table(vport, false);
8338 return hclge_set_vlan_filter_hw(hdev,
8339 htons(new_info->vlan_proto),
8345 ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
8346 vport->vport_id, old_info->vlan_tag,
8351 return hclge_add_vport_all_vlan_table(vport);
8354 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
8355 struct hclge_vlan_info *vlan_info)
8357 struct hnae3_handle *nic = &vport->nic;
8358 struct hclge_vlan_info *old_vlan_info;
8359 struct hclge_dev *hdev = vport->back;
8362 old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
8364 ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
8368 if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
8369 /* add new VLAN tag */
8370 ret = hclge_set_vlan_filter_hw(hdev,
8371 htons(vlan_info->vlan_proto),
8373 vlan_info->vlan_tag,
8378 /* remove old VLAN tag */
8379 ret = hclge_set_vlan_filter_hw(hdev,
8380 htons(old_vlan_info->vlan_proto),
8382 old_vlan_info->vlan_tag,
8390 ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
8395 /* update state only when disable/enable port based VLAN */
8396 vport->port_base_vlan_cfg.state = state;
8397 if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
8398 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
8400 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
8403 vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
8404 vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
8405 vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
8410 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
8411 enum hnae3_port_base_vlan_state state,
8414 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8416 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
8418 return HNAE3_PORT_BASE_VLAN_ENABLE;
8421 return HNAE3_PORT_BASE_VLAN_DISABLE;
8422 else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
8423 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
8425 return HNAE3_PORT_BASE_VLAN_MODIFY;
8429 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
8430 u16 vlan, u8 qos, __be16 proto)
8432 struct hclge_vport *vport = hclge_get_vport(handle);
8433 struct hclge_dev *hdev = vport->back;
8434 struct hclge_vlan_info vlan_info;
8438 if (hdev->pdev->revision == 0x20)
8441 vport = hclge_get_vf_vport(hdev, vfid);
8445 /* qos is a 3 bits value, so can not be bigger than 7 */
8446 if (vlan > VLAN_N_VID - 1 || qos > 7)
8448 if (proto != htons(ETH_P_8021Q))
8449 return -EPROTONOSUPPORT;
8451 state = hclge_get_port_base_vlan_state(vport,
8452 vport->port_base_vlan_cfg.state,
8454 if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
8457 vlan_info.vlan_tag = vlan;
8458 vlan_info.qos = qos;
8459 vlan_info.vlan_proto = ntohs(proto);
8461 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
8462 return hclge_update_port_base_vlan_cfg(vport, state,
8465 ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
8466 vport->vport_id, state,
8473 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
8474 u16 vlan_id, bool is_kill)
8476 struct hclge_vport *vport = hclge_get_vport(handle);
8477 struct hclge_dev *hdev = vport->back;
8478 bool writen_to_tbl = false;
8481 /* When device is resetting, firmware is unable to handle
8482 * mailbox. Just record the vlan id, and remove it after
8485 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && is_kill) {
8486 set_bit(vlan_id, vport->vlan_del_fail_bmap);
8490 /* when port base vlan enabled, we use port base vlan as the vlan
8491 * filter entry. In this case, we don't update vlan filter table
8492 * when user add new vlan or remove exist vlan, just update the vport
8493 * vlan list. The vlan id in vlan list will be writen in vlan filter
8494 * table until port base vlan disabled
8496 if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8497 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
8499 writen_to_tbl = true;
8504 hclge_rm_vport_vlan_table(vport, vlan_id, false);
8506 hclge_add_vport_vlan_table(vport, vlan_id,
8508 } else if (is_kill) {
8509 /* when remove hw vlan filter failed, record the vlan id,
8510 * and try to remove it from hw later, to be consistence
8513 set_bit(vlan_id, vport->vlan_del_fail_bmap);
8518 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
8520 #define HCLGE_MAX_SYNC_COUNT 60
8522 int i, ret, sync_cnt = 0;
8525 /* start from vport 1 for PF is always alive */
8526 for (i = 0; i < hdev->num_alloc_vport; i++) {
8527 struct hclge_vport *vport = &hdev->vport[i];
8529 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
8531 while (vlan_id != VLAN_N_VID) {
8532 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8533 vport->vport_id, vlan_id,
8535 if (ret && ret != -EINVAL)
8538 clear_bit(vlan_id, vport->vlan_del_fail_bmap);
8539 hclge_rm_vport_vlan_table(vport, vlan_id, false);
8542 if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
8545 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
8551 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
8553 struct hclge_config_max_frm_size_cmd *req;
8554 struct hclge_desc desc;
8556 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
8558 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
8559 req->max_frm_size = cpu_to_le16(new_mps);
8560 req->min_frm_size = HCLGE_MAC_MIN_FRAME;
8562 return hclge_cmd_send(&hdev->hw, &desc, 1);
8565 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
8567 struct hclge_vport *vport = hclge_get_vport(handle);
8569 return hclge_set_vport_mtu(vport, new_mtu);
8572 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
8574 struct hclge_dev *hdev = vport->back;
8575 int i, max_frm_size, ret;
8577 /* HW supprt 2 layer vlan */
8578 max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
8579 if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
8580 max_frm_size > HCLGE_MAC_MAX_FRAME)
8583 max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
8584 mutex_lock(&hdev->vport_lock);
8585 /* VF's mps must fit within hdev->mps */
8586 if (vport->vport_id && max_frm_size > hdev->mps) {
8587 mutex_unlock(&hdev->vport_lock);
8589 } else if (vport->vport_id) {
8590 vport->mps = max_frm_size;
8591 mutex_unlock(&hdev->vport_lock);
8595 /* PF's mps must be greater then VF's mps */
8596 for (i = 1; i < hdev->num_alloc_vport; i++)
8597 if (max_frm_size < hdev->vport[i].mps) {
8598 mutex_unlock(&hdev->vport_lock);
8602 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
8604 ret = hclge_set_mac_mtu(hdev, max_frm_size);
8606 dev_err(&hdev->pdev->dev,
8607 "Change mtu fail, ret =%d\n", ret);
8611 hdev->mps = max_frm_size;
8612 vport->mps = max_frm_size;
8614 ret = hclge_buffer_alloc(hdev);
8616 dev_err(&hdev->pdev->dev,
8617 "Allocate buffer fail, ret =%d\n", ret);
8620 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
8621 mutex_unlock(&hdev->vport_lock);
8625 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
8628 struct hclge_reset_tqp_queue_cmd *req;
8629 struct hclge_desc desc;
8632 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
8634 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
8635 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
8637 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
8639 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8641 dev_err(&hdev->pdev->dev,
8642 "Send tqp reset cmd error, status =%d\n", ret);
8649 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
8651 struct hclge_reset_tqp_queue_cmd *req;
8652 struct hclge_desc desc;
8655 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
8657 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
8658 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
8660 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8662 dev_err(&hdev->pdev->dev,
8663 "Get reset status error, status =%d\n", ret);
8667 return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
8670 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
8672 struct hnae3_queue *queue;
8673 struct hclge_tqp *tqp;
8675 queue = handle->kinfo.tqp[queue_id];
8676 tqp = container_of(queue, struct hclge_tqp, q);
8681 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
8683 struct hclge_vport *vport = hclge_get_vport(handle);
8684 struct hclge_dev *hdev = vport->back;
8685 int reset_try_times = 0;
8690 queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
8692 ret = hclge_tqp_enable(hdev, queue_id, 0, false);
8694 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
8698 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
8700 dev_err(&hdev->pdev->dev,
8701 "Send reset tqp cmd fail, ret = %d\n", ret);
8705 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
8706 reset_status = hclge_get_reset_status(hdev, queue_gid);
8710 /* Wait for tqp hw reset */
8711 usleep_range(1000, 1200);
8714 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
8715 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
8719 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
8721 dev_err(&hdev->pdev->dev,
8722 "Deassert the soft reset fail, ret = %d\n", ret);
8727 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
8729 struct hclge_dev *hdev = vport->back;
8730 int reset_try_times = 0;
8735 queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
8737 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
8739 dev_warn(&hdev->pdev->dev,
8740 "Send reset tqp cmd fail, ret = %d\n", ret);
8744 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
8745 reset_status = hclge_get_reset_status(hdev, queue_gid);
8749 /* Wait for tqp hw reset */
8750 usleep_range(1000, 1200);
8753 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
8754 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
8758 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
8760 dev_warn(&hdev->pdev->dev,
8761 "Deassert the soft reset fail, ret = %d\n", ret);
8764 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
8766 struct hclge_vport *vport = hclge_get_vport(handle);
8767 struct hclge_dev *hdev = vport->back;
8769 return hdev->fw_version;
8772 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
8774 struct phy_device *phydev = hdev->hw.mac.phydev;
8779 phy_set_asym_pause(phydev, rx_en, tx_en);
8782 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
8786 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
8789 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
8791 dev_err(&hdev->pdev->dev,
8792 "configure pauseparam error, ret = %d.\n", ret);
8797 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
8799 struct phy_device *phydev = hdev->hw.mac.phydev;
8800 u16 remote_advertising = 0;
8801 u16 local_advertising;
8802 u32 rx_pause, tx_pause;
8805 if (!phydev->link || !phydev->autoneg)
8808 local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
8811 remote_advertising = LPA_PAUSE_CAP;
8813 if (phydev->asym_pause)
8814 remote_advertising |= LPA_PAUSE_ASYM;
8816 flowctl = mii_resolve_flowctrl_fdx(local_advertising,
8817 remote_advertising);
8818 tx_pause = flowctl & FLOW_CTRL_TX;
8819 rx_pause = flowctl & FLOW_CTRL_RX;
8821 if (phydev->duplex == HCLGE_MAC_HALF) {
8826 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
8829 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
8830 u32 *rx_en, u32 *tx_en)
8832 struct hclge_vport *vport = hclge_get_vport(handle);
8833 struct hclge_dev *hdev = vport->back;
8834 struct phy_device *phydev = hdev->hw.mac.phydev;
8836 *auto_neg = phydev ? hclge_get_autoneg(handle) : 0;
8838 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8844 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
8847 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
8850 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
8859 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
8860 u32 rx_en, u32 tx_en)
8863 hdev->fc_mode_last_time = HCLGE_FC_FULL;
8864 else if (rx_en && !tx_en)
8865 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
8866 else if (!rx_en && tx_en)
8867 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
8869 hdev->fc_mode_last_time = HCLGE_FC_NONE;
8871 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
8874 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
8875 u32 rx_en, u32 tx_en)
8877 struct hclge_vport *vport = hclge_get_vport(handle);
8878 struct hclge_dev *hdev = vport->back;
8879 struct phy_device *phydev = hdev->hw.mac.phydev;
8883 fc_autoneg = hclge_get_autoneg(handle);
8884 if (auto_neg != fc_autoneg) {
8885 dev_info(&hdev->pdev->dev,
8886 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
8891 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8892 dev_info(&hdev->pdev->dev,
8893 "Priority flow control enabled. Cannot set link flow control.\n");
8897 hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
8899 hclge_record_user_pauseparam(hdev, rx_en, tx_en);
8902 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
8905 return phy_start_aneg(phydev);
8910 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
8911 u8 *auto_neg, u32 *speed, u8 *duplex)
8913 struct hclge_vport *vport = hclge_get_vport(handle);
8914 struct hclge_dev *hdev = vport->back;
8917 *speed = hdev->hw.mac.speed;
8919 *duplex = hdev->hw.mac.duplex;
8921 *auto_neg = hdev->hw.mac.autoneg;
8924 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
8927 struct hclge_vport *vport = hclge_get_vport(handle);
8928 struct hclge_dev *hdev = vport->back;
8931 *media_type = hdev->hw.mac.media_type;
8934 *module_type = hdev->hw.mac.module_type;
8937 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
8938 u8 *tp_mdix_ctrl, u8 *tp_mdix)
8940 struct hclge_vport *vport = hclge_get_vport(handle);
8941 struct hclge_dev *hdev = vport->back;
8942 struct phy_device *phydev = hdev->hw.mac.phydev;
8943 int mdix_ctrl, mdix, is_resolved;
8944 unsigned int retval;
8947 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8948 *tp_mdix = ETH_TP_MDI_INVALID;
8952 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
8954 retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
8955 mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
8956 HCLGE_PHY_MDIX_CTRL_S);
8958 retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
8959 mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
8960 is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
8962 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
8964 switch (mdix_ctrl) {
8966 *tp_mdix_ctrl = ETH_TP_MDI;
8969 *tp_mdix_ctrl = ETH_TP_MDI_X;
8972 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
8975 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8980 *tp_mdix = ETH_TP_MDI_INVALID;
8982 *tp_mdix = ETH_TP_MDI_X;
8984 *tp_mdix = ETH_TP_MDI;
8987 static void hclge_info_show(struct hclge_dev *hdev)
8989 struct device *dev = &hdev->pdev->dev;
8991 dev_info(dev, "PF info begin:\n");
8993 dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
8994 dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
8995 dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
8996 dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
8997 dev_info(dev, "Numbers of vmdp vports: %u\n", hdev->num_vmdq_vport);
8998 dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
8999 dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
9000 dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
9001 dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
9002 dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
9003 dev_info(dev, "This is %s PF\n",
9004 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
9005 dev_info(dev, "DCB %s\n",
9006 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
9007 dev_info(dev, "MQPRIO %s\n",
9008 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
9010 dev_info(dev, "PF info end.\n");
9013 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
9014 struct hclge_vport *vport)
9016 struct hnae3_client *client = vport->nic.client;
9017 struct hclge_dev *hdev = ae_dev->priv;
9018 int rst_cnt = hdev->rst_stats.reset_cnt;
9021 ret = client->ops->init_instance(&vport->nic);
9025 set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9026 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9027 rst_cnt != hdev->rst_stats.reset_cnt) {
9032 /* Enable nic hw error interrupts */
9033 ret = hclge_config_nic_hw_error(hdev, true);
9035 dev_err(&ae_dev->pdev->dev,
9036 "fail(%d) to enable hw error interrupts\n", ret);
9040 hnae3_set_client_init_flag(client, ae_dev, 1);
9042 if (netif_msg_drv(&hdev->vport->nic))
9043 hclge_info_show(hdev);
9048 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9049 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9050 msleep(HCLGE_WAIT_RESET_DONE);
9052 client->ops->uninit_instance(&vport->nic, 0);
9057 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
9058 struct hclge_vport *vport)
9060 struct hnae3_client *client = vport->roce.client;
9061 struct hclge_dev *hdev = ae_dev->priv;
9065 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
9069 client = hdev->roce_client;
9070 ret = hclge_init_roce_base_info(vport);
9074 rst_cnt = hdev->rst_stats.reset_cnt;
9075 ret = client->ops->init_instance(&vport->roce);
9079 set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9080 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9081 rst_cnt != hdev->rst_stats.reset_cnt) {
9086 /* Enable roce ras interrupts */
9087 ret = hclge_config_rocee_ras_interrupt(hdev, true);
9089 dev_err(&ae_dev->pdev->dev,
9090 "fail(%d) to enable roce ras interrupts\n", ret);
9094 hnae3_set_client_init_flag(client, ae_dev, 1);
9099 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9100 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9101 msleep(HCLGE_WAIT_RESET_DONE);
9103 hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
9108 static int hclge_init_client_instance(struct hnae3_client *client,
9109 struct hnae3_ae_dev *ae_dev)
9111 struct hclge_dev *hdev = ae_dev->priv;
9112 struct hclge_vport *vport;
9115 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
9116 vport = &hdev->vport[i];
9118 switch (client->type) {
9119 case HNAE3_CLIENT_KNIC:
9120 hdev->nic_client = client;
9121 vport->nic.client = client;
9122 ret = hclge_init_nic_client_instance(ae_dev, vport);
9126 ret = hclge_init_roce_client_instance(ae_dev, vport);
9131 case HNAE3_CLIENT_ROCE:
9132 if (hnae3_dev_roce_supported(hdev)) {
9133 hdev->roce_client = client;
9134 vport->roce.client = client;
9137 ret = hclge_init_roce_client_instance(ae_dev, vport);
9150 hdev->nic_client = NULL;
9151 vport->nic.client = NULL;
9154 hdev->roce_client = NULL;
9155 vport->roce.client = NULL;
9159 static void hclge_uninit_client_instance(struct hnae3_client *client,
9160 struct hnae3_ae_dev *ae_dev)
9162 struct hclge_dev *hdev = ae_dev->priv;
9163 struct hclge_vport *vport;
9166 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
9167 vport = &hdev->vport[i];
9168 if (hdev->roce_client) {
9169 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9170 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9171 msleep(HCLGE_WAIT_RESET_DONE);
9173 hdev->roce_client->ops->uninit_instance(&vport->roce,
9175 hdev->roce_client = NULL;
9176 vport->roce.client = NULL;
9178 if (client->type == HNAE3_CLIENT_ROCE)
9180 if (hdev->nic_client && client->ops->uninit_instance) {
9181 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9182 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9183 msleep(HCLGE_WAIT_RESET_DONE);
9185 client->ops->uninit_instance(&vport->nic, 0);
9186 hdev->nic_client = NULL;
9187 vport->nic.client = NULL;
9192 static int hclge_pci_init(struct hclge_dev *hdev)
9194 struct pci_dev *pdev = hdev->pdev;
9195 struct hclge_hw *hw;
9198 ret = pci_enable_device(pdev);
9200 dev_err(&pdev->dev, "failed to enable PCI device\n");
9204 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
9206 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9209 "can't set consistent PCI DMA");
9210 goto err_disable_device;
9212 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
9215 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
9217 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
9218 goto err_disable_device;
9221 pci_set_master(pdev);
9223 hw->io_base = pcim_iomap(pdev, 2, 0);
9225 dev_err(&pdev->dev, "Can't map configuration register space\n");
9227 goto err_clr_master;
9230 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
9234 pci_clear_master(pdev);
9235 pci_release_regions(pdev);
9237 pci_disable_device(pdev);
9242 static void hclge_pci_uninit(struct hclge_dev *hdev)
9244 struct pci_dev *pdev = hdev->pdev;
9246 pcim_iounmap(pdev, hdev->hw.io_base);
9247 pci_free_irq_vectors(pdev);
9248 pci_clear_master(pdev);
9249 pci_release_mem_regions(pdev);
9250 pci_disable_device(pdev);
9253 static void hclge_state_init(struct hclge_dev *hdev)
9255 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
9256 set_bit(HCLGE_STATE_DOWN, &hdev->state);
9257 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
9258 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9259 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
9260 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
9263 static void hclge_state_uninit(struct hclge_dev *hdev)
9265 set_bit(HCLGE_STATE_DOWN, &hdev->state);
9266 set_bit(HCLGE_STATE_REMOVING, &hdev->state);
9268 if (hdev->reset_timer.function)
9269 del_timer_sync(&hdev->reset_timer);
9270 if (hdev->service_task.work.func)
9271 cancel_delayed_work_sync(&hdev->service_task);
9272 if (hdev->rst_service_task.func)
9273 cancel_work_sync(&hdev->rst_service_task);
9274 if (hdev->mbx_service_task.func)
9275 cancel_work_sync(&hdev->mbx_service_task);
9278 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
9280 #define HCLGE_FLR_WAIT_MS 100
9281 #define HCLGE_FLR_WAIT_CNT 50
9282 struct hclge_dev *hdev = ae_dev->priv;
9285 clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
9286 clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
9287 set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
9288 hclge_reset_event(hdev->pdev, NULL);
9290 while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
9291 cnt++ < HCLGE_FLR_WAIT_CNT)
9292 msleep(HCLGE_FLR_WAIT_MS);
9294 if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
9295 dev_err(&hdev->pdev->dev,
9296 "flr wait down timeout: %d\n", cnt);
9299 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
9301 struct hclge_dev *hdev = ae_dev->priv;
9303 set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
9306 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
9310 for (i = 0; i < hdev->num_alloc_vport; i++) {
9311 struct hclge_vport *vport = &hdev->vport[i];
9314 /* Send cmd to clear VF's FUNC_RST_ING */
9315 ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
9317 dev_warn(&hdev->pdev->dev,
9318 "clear vf(%u) rst failed %d!\n",
9319 vport->vport_id, ret);
9323 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
9325 struct pci_dev *pdev = ae_dev->pdev;
9326 struct hclge_dev *hdev;
9329 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
9336 hdev->ae_dev = ae_dev;
9337 hdev->reset_type = HNAE3_NONE_RESET;
9338 hdev->reset_level = HNAE3_FUNC_RESET;
9339 ae_dev->priv = hdev;
9341 /* HW supprt 2 layer vlan */
9342 hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
9344 mutex_init(&hdev->vport_lock);
9345 mutex_init(&hdev->vport_cfg_mutex);
9346 spin_lock_init(&hdev->fd_rule_lock);
9348 ret = hclge_pci_init(hdev);
9350 dev_err(&pdev->dev, "PCI init failed\n");
9354 /* Firmware command queue initialize */
9355 ret = hclge_cmd_queue_init(hdev);
9357 dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
9358 goto err_pci_uninit;
9361 /* Firmware command initialize */
9362 ret = hclge_cmd_init(hdev);
9364 goto err_cmd_uninit;
9366 ret = hclge_get_cap(hdev);
9368 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
9370 goto err_cmd_uninit;
9373 ret = hclge_configure(hdev);
9375 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
9376 goto err_cmd_uninit;
9379 ret = hclge_init_msi(hdev);
9381 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
9382 goto err_cmd_uninit;
9385 ret = hclge_misc_irq_init(hdev);
9388 "Misc IRQ(vector0) init error, ret = %d.\n",
9390 goto err_msi_uninit;
9393 ret = hclge_alloc_tqps(hdev);
9395 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
9396 goto err_msi_irq_uninit;
9399 ret = hclge_alloc_vport(hdev);
9401 dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
9402 goto err_msi_irq_uninit;
9405 ret = hclge_map_tqp(hdev);
9407 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
9408 goto err_msi_irq_uninit;
9411 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
9412 ret = hclge_mac_mdio_config(hdev);
9414 dev_err(&hdev->pdev->dev,
9415 "mdio config fail ret=%d\n", ret);
9416 goto err_msi_irq_uninit;
9420 ret = hclge_init_umv_space(hdev);
9422 dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
9423 goto err_mdiobus_unreg;
9426 ret = hclge_mac_init(hdev);
9428 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
9429 goto err_mdiobus_unreg;
9432 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
9434 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
9435 goto err_mdiobus_unreg;
9438 ret = hclge_config_gro(hdev, true);
9440 goto err_mdiobus_unreg;
9442 ret = hclge_init_vlan_config(hdev);
9444 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
9445 goto err_mdiobus_unreg;
9448 ret = hclge_tm_schd_init(hdev);
9450 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
9451 goto err_mdiobus_unreg;
9454 hclge_rss_init_cfg(hdev);
9455 ret = hclge_rss_init_hw(hdev);
9457 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
9458 goto err_mdiobus_unreg;
9461 ret = init_mgr_tbl(hdev);
9463 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
9464 goto err_mdiobus_unreg;
9467 ret = hclge_init_fd_config(hdev);
9470 "fd table init fail, ret=%d\n", ret);
9471 goto err_mdiobus_unreg;
9474 INIT_KFIFO(hdev->mac_tnl_log);
9476 hclge_dcb_ops_set(hdev);
9478 timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
9479 INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
9480 INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
9481 INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
9483 /* Setup affinity after service timer setup because add_timer_on
9484 * is called in affinity notify.
9486 hclge_misc_affinity_setup(hdev);
9488 hclge_clear_all_event_cause(hdev);
9489 hclge_clear_resetting_state(hdev);
9491 /* Log and clear the hw errors those already occurred */
9492 hclge_handle_all_hns_hw_errors(ae_dev);
9494 /* request delayed reset for the error recovery because an immediate
9495 * global reset on a PF affecting pending initialization of other PFs
9497 if (ae_dev->hw_err_reset_req) {
9498 enum hnae3_reset_type reset_level;
9500 reset_level = hclge_get_reset_level(ae_dev,
9501 &ae_dev->hw_err_reset_req);
9502 hclge_set_def_reset_request(ae_dev, reset_level);
9503 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
9506 /* Enable MISC vector(vector0) */
9507 hclge_enable_vector(&hdev->misc_vector, true);
9509 hclge_state_init(hdev);
9510 hdev->last_reset_time = jiffies;
9512 dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
9518 if (hdev->hw.mac.phydev)
9519 mdiobus_unregister(hdev->hw.mac.mdio_bus);
9521 hclge_misc_irq_uninit(hdev);
9523 pci_free_irq_vectors(pdev);
9525 hclge_cmd_uninit(hdev);
9527 pcim_iounmap(pdev, hdev->hw.io_base);
9528 pci_clear_master(pdev);
9529 pci_release_regions(pdev);
9530 pci_disable_device(pdev);
9535 static void hclge_stats_clear(struct hclge_dev *hdev)
9537 memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
9540 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
9542 return hclge_config_switch_param(hdev, vf, enable,
9543 HCLGE_SWITCH_ANTI_SPOOF_MASK);
9546 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
9548 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9549 HCLGE_FILTER_FE_NIC_INGRESS_B,
9553 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
9557 ret = hclge_set_mac_spoofchk(hdev, vf, enable);
9559 dev_err(&hdev->pdev->dev,
9560 "Set vf %d mac spoof check %s failed, ret=%d\n",
9561 vf, enable ? "on" : "off", ret);
9565 ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
9567 dev_err(&hdev->pdev->dev,
9568 "Set vf %d vlan spoof check %s failed, ret=%d\n",
9569 vf, enable ? "on" : "off", ret);
9574 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
9577 struct hclge_vport *vport = hclge_get_vport(handle);
9578 struct hclge_dev *hdev = vport->back;
9579 u32 new_spoofchk = enable ? 1 : 0;
9582 if (hdev->pdev->revision == 0x20)
9585 vport = hclge_get_vf_vport(hdev, vf);
9589 if (vport->vf_info.spoofchk == new_spoofchk)
9592 if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
9593 dev_warn(&hdev->pdev->dev,
9594 "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
9596 else if (enable && hclge_is_umv_space_full(vport))
9597 dev_warn(&hdev->pdev->dev,
9598 "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
9601 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
9605 vport->vf_info.spoofchk = new_spoofchk;
9609 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
9611 struct hclge_vport *vport = hdev->vport;
9615 if (hdev->pdev->revision == 0x20)
9618 /* resume the vf spoof check state after reset */
9619 for (i = 0; i < hdev->num_alloc_vport; i++) {
9620 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
9621 vport->vf_info.spoofchk);
9631 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
9633 struct hclge_vport *vport = hclge_get_vport(handle);
9634 struct hclge_dev *hdev = vport->back;
9635 u32 new_trusted = enable ? 1 : 0;
9639 vport = hclge_get_vf_vport(hdev, vf);
9643 if (vport->vf_info.trusted == new_trusted)
9646 /* Disable promisc mode for VF if it is not trusted any more. */
9647 if (!enable && vport->vf_info.promisc_enable) {
9648 en_bc_pmc = hdev->pdev->revision != 0x20;
9649 ret = hclge_set_vport_promisc_mode(vport, false, false,
9653 vport->vf_info.promisc_enable = 0;
9654 hclge_inform_vf_promisc_info(vport);
9657 vport->vf_info.trusted = new_trusted;
9662 static void hclge_reset_vf_rate(struct hclge_dev *hdev)
9667 /* reset vf rate to default value */
9668 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
9669 struct hclge_vport *vport = &hdev->vport[vf];
9671 vport->vf_info.max_tx_rate = 0;
9672 ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
9674 dev_err(&hdev->pdev->dev,
9675 "vf%d failed to reset to default, ret=%d\n",
9676 vf - HCLGE_VF_VPORT_START_NUM, ret);
9680 static int hclge_vf_rate_param_check(struct hclge_dev *hdev, int vf,
9681 int min_tx_rate, int max_tx_rate)
9683 if (min_tx_rate != 0 ||
9684 max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
9685 dev_err(&hdev->pdev->dev,
9686 "min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
9687 min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
9694 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
9695 int min_tx_rate, int max_tx_rate, bool force)
9697 struct hclge_vport *vport = hclge_get_vport(handle);
9698 struct hclge_dev *hdev = vport->back;
9701 ret = hclge_vf_rate_param_check(hdev, vf, min_tx_rate, max_tx_rate);
9705 vport = hclge_get_vf_vport(hdev, vf);
9709 if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
9712 ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
9716 vport->vf_info.max_tx_rate = max_tx_rate;
9721 static int hclge_resume_vf_rate(struct hclge_dev *hdev)
9723 struct hnae3_handle *handle = &hdev->vport->nic;
9724 struct hclge_vport *vport;
9728 /* resume the vf max_tx_rate after reset */
9729 for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
9730 vport = hclge_get_vf_vport(hdev, vf);
9734 /* zero means max rate, after reset, firmware already set it to
9735 * max rate, so just continue.
9737 if (!vport->vf_info.max_tx_rate)
9740 ret = hclge_set_vf_rate(handle, vf, 0,
9741 vport->vf_info.max_tx_rate, true);
9743 dev_err(&hdev->pdev->dev,
9744 "vf%d failed to resume tx_rate:%u, ret=%d\n",
9745 vf, vport->vf_info.max_tx_rate, ret);
9753 static void hclge_reset_vport_state(struct hclge_dev *hdev)
9755 struct hclge_vport *vport = hdev->vport;
9758 for (i = 0; i < hdev->num_alloc_vport; i++) {
9759 hclge_vport_stop(vport);
9764 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
9766 struct hclge_dev *hdev = ae_dev->priv;
9767 struct pci_dev *pdev = ae_dev->pdev;
9770 set_bit(HCLGE_STATE_DOWN, &hdev->state);
9772 hclge_stats_clear(hdev);
9773 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
9774 memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
9776 ret = hclge_cmd_init(hdev);
9778 dev_err(&pdev->dev, "Cmd queue init failed\n");
9782 ret = hclge_map_tqp(hdev);
9784 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
9788 hclge_reset_umv_space(hdev);
9790 ret = hclge_mac_init(hdev);
9792 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
9796 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
9798 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
9802 ret = hclge_config_gro(hdev, true);
9806 ret = hclge_init_vlan_config(hdev);
9808 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
9812 ret = hclge_tm_init_hw(hdev, true);
9814 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
9818 ret = hclge_rss_init_hw(hdev);
9820 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
9824 ret = hclge_init_fd_config(hdev);
9826 dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
9830 /* Log and clear the hw errors those already occurred */
9831 hclge_handle_all_hns_hw_errors(ae_dev);
9833 /* Re-enable the hw error interrupts because
9834 * the interrupts get disabled on global reset.
9836 ret = hclge_config_nic_hw_error(hdev, true);
9839 "fail(%d) to re-enable NIC hw error interrupts\n",
9844 if (hdev->roce_client) {
9845 ret = hclge_config_rocee_ras_interrupt(hdev, true);
9848 "fail(%d) to re-enable roce ras interrupts\n",
9854 hclge_reset_vport_state(hdev);
9855 ret = hclge_reset_vport_spoofchk(hdev);
9859 ret = hclge_resume_vf_rate(hdev);
9863 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
9869 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
9871 struct hclge_dev *hdev = ae_dev->priv;
9872 struct hclge_mac *mac = &hdev->hw.mac;
9874 hclge_reset_vf_rate(hdev);
9875 hclge_misc_affinity_teardown(hdev);
9876 hclge_state_uninit(hdev);
9879 mdiobus_unregister(mac->mdio_bus);
9881 hclge_uninit_umv_space(hdev);
9883 /* Disable MISC vector(vector0) */
9884 hclge_enable_vector(&hdev->misc_vector, false);
9885 synchronize_irq(hdev->misc_vector.vector_irq);
9887 /* Disable all hw interrupts */
9888 hclge_config_mac_tnl_int(hdev, false);
9889 hclge_config_nic_hw_error(hdev, false);
9890 hclge_config_rocee_ras_interrupt(hdev, false);
9892 hclge_cmd_uninit(hdev);
9893 hclge_misc_irq_uninit(hdev);
9894 hclge_pci_uninit(hdev);
9895 mutex_destroy(&hdev->vport_lock);
9896 hclge_uninit_vport_mac_table(hdev);
9897 hclge_uninit_vport_vlan_table(hdev);
9898 mutex_destroy(&hdev->vport_cfg_mutex);
9899 ae_dev->priv = NULL;
9902 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
9904 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9905 struct hclge_vport *vport = hclge_get_vport(handle);
9906 struct hclge_dev *hdev = vport->back;
9908 return min_t(u32, hdev->rss_size_max,
9909 vport->alloc_tqps / kinfo->num_tc);
9912 static void hclge_get_channels(struct hnae3_handle *handle,
9913 struct ethtool_channels *ch)
9915 ch->max_combined = hclge_get_max_channels(handle);
9916 ch->other_count = 1;
9918 ch->combined_count = handle->kinfo.rss_size;
9921 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
9922 u16 *alloc_tqps, u16 *max_rss_size)
9924 struct hclge_vport *vport = hclge_get_vport(handle);
9925 struct hclge_dev *hdev = vport->back;
9927 *alloc_tqps = vport->alloc_tqps;
9928 *max_rss_size = hdev->rss_size_max;
9931 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
9932 bool rxfh_configured)
9934 struct hclge_vport *vport = hclge_get_vport(handle);
9935 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
9936 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
9937 struct hclge_dev *hdev = vport->back;
9938 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
9939 u16 cur_rss_size = kinfo->rss_size;
9940 u16 cur_tqps = kinfo->num_tqps;
9941 u16 tc_valid[HCLGE_MAX_TC_NUM];
9947 kinfo->req_rss_size = new_tqps_num;
9949 ret = hclge_tm_vport_map_update(hdev);
9951 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
9955 roundup_size = roundup_pow_of_two(kinfo->rss_size);
9956 roundup_size = ilog2(roundup_size);
9957 /* Set the RSS TC mode according to the new RSS size */
9958 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
9961 if (!(hdev->hw_tc_map & BIT(i)))
9965 tc_size[i] = roundup_size;
9966 tc_offset[i] = kinfo->rss_size * i;
9968 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
9972 /* RSS indirection table has been configuared by user */
9973 if (rxfh_configured)
9976 /* Reinitializes the rss indirect table according to the new RSS size */
9977 rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
9981 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
9982 rss_indir[i] = i % kinfo->rss_size;
9984 ret = hclge_set_rss(handle, rss_indir, NULL, 0);
9986 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
9993 dev_info(&hdev->pdev->dev,
9994 "Channels changed, rss_size from %u to %u, tqps from %u to %u",
9995 cur_rss_size, kinfo->rss_size,
9996 cur_tqps, kinfo->rss_size * kinfo->num_tc);
10001 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
10002 u32 *regs_num_64_bit)
10004 struct hclge_desc desc;
10008 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
10009 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10011 dev_err(&hdev->pdev->dev,
10012 "Query register number cmd failed, ret = %d.\n", ret);
10016 *regs_num_32_bit = le32_to_cpu(desc.data[0]);
10017 *regs_num_64_bit = le32_to_cpu(desc.data[1]);
10019 total_num = *regs_num_32_bit + *regs_num_64_bit;
10026 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
10029 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
10030 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
10032 struct hclge_desc *desc;
10033 u32 *reg_val = data;
10043 nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
10044 cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
10045 HCLGE_32_BIT_REG_RTN_DATANUM);
10046 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
10050 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
10051 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
10053 dev_err(&hdev->pdev->dev,
10054 "Query 32 bit register cmd failed, ret = %d.\n", ret);
10059 for (i = 0; i < cmd_num; i++) {
10061 desc_data = (__le32 *)(&desc[i].data[0]);
10062 n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
10064 desc_data = (__le32 *)(&desc[i]);
10065 n = HCLGE_32_BIT_REG_RTN_DATANUM;
10067 for (k = 0; k < n; k++) {
10068 *reg_val++ = le32_to_cpu(*desc_data++);
10080 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
10083 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
10084 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
10086 struct hclge_desc *desc;
10087 u64 *reg_val = data;
10097 nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
10098 cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
10099 HCLGE_64_BIT_REG_RTN_DATANUM);
10100 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
10104 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
10105 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
10107 dev_err(&hdev->pdev->dev,
10108 "Query 64 bit register cmd failed, ret = %d.\n", ret);
10113 for (i = 0; i < cmd_num; i++) {
10115 desc_data = (__le64 *)(&desc[i].data[0]);
10116 n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
10118 desc_data = (__le64 *)(&desc[i]);
10119 n = HCLGE_64_BIT_REG_RTN_DATANUM;
10121 for (k = 0; k < n; k++) {
10122 *reg_val++ = le64_to_cpu(*desc_data++);
10134 #define MAX_SEPARATE_NUM 4
10135 #define SEPARATOR_VALUE 0xFDFCFBFA
10136 #define REG_NUM_PER_LINE 4
10137 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
10138 #define REG_SEPARATOR_LINE 1
10139 #define REG_NUM_REMAIN_MASK 3
10140 #define BD_LIST_MAX_NUM 30
10142 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
10144 /*prepare 4 commands to query DFX BD number*/
10145 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_DFX_BD_NUM, true);
10146 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10147 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_DFX_BD_NUM, true);
10148 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10149 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_DFX_BD_NUM, true);
10150 desc[2].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10151 hclge_cmd_setup_basic_desc(&desc[3], HCLGE_OPC_DFX_BD_NUM, true);
10153 return hclge_cmd_send(&hdev->hw, desc, 4);
10156 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
10160 #define HCLGE_DFX_REG_BD_NUM 4
10162 u32 entries_per_desc, desc_index, index, offset, i;
10163 struct hclge_desc desc[HCLGE_DFX_REG_BD_NUM];
10166 ret = hclge_query_bd_num_cmd_send(hdev, desc);
10168 dev_err(&hdev->pdev->dev,
10169 "Get dfx bd num fail, status is %d.\n", ret);
10173 entries_per_desc = ARRAY_SIZE(desc[0].data);
10174 for (i = 0; i < type_num; i++) {
10175 offset = hclge_dfx_bd_offset_list[i];
10176 index = offset % entries_per_desc;
10177 desc_index = offset / entries_per_desc;
10178 bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
10184 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
10185 struct hclge_desc *desc_src, int bd_num,
10186 enum hclge_opcode_type cmd)
10188 struct hclge_desc *desc = desc_src;
10191 hclge_cmd_setup_basic_desc(desc, cmd, true);
10192 for (i = 0; i < bd_num - 1; i++) {
10193 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10195 hclge_cmd_setup_basic_desc(desc, cmd, true);
10199 ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
10201 dev_err(&hdev->pdev->dev,
10202 "Query dfx reg cmd(0x%x) send fail, status is %d.\n",
10208 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
10211 int entries_per_desc, reg_num, separator_num, desc_index, index, i;
10212 struct hclge_desc *desc = desc_src;
10215 entries_per_desc = ARRAY_SIZE(desc->data);
10216 reg_num = entries_per_desc * bd_num;
10217 separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
10218 for (i = 0; i < reg_num; i++) {
10219 index = i % entries_per_desc;
10220 desc_index = i / entries_per_desc;
10221 *reg++ = le32_to_cpu(desc[desc_index].data[index]);
10223 for (i = 0; i < separator_num; i++)
10224 *reg++ = SEPARATOR_VALUE;
10226 return reg_num + separator_num;
10229 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
10231 u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
10232 int data_len_per_desc, data_len, bd_num, i;
10233 int bd_num_list[BD_LIST_MAX_NUM];
10236 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
10238 dev_err(&hdev->pdev->dev,
10239 "Get dfx reg bd num fail, status is %d.\n", ret);
10243 data_len_per_desc = sizeof_field(struct hclge_desc, data);
10245 for (i = 0; i < dfx_reg_type_num; i++) {
10246 bd_num = bd_num_list[i];
10247 data_len = data_len_per_desc * bd_num;
10248 *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
10254 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
10256 u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
10257 int bd_num, bd_num_max, buf_len, i;
10258 int bd_num_list[BD_LIST_MAX_NUM];
10259 struct hclge_desc *desc_src;
10263 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
10265 dev_err(&hdev->pdev->dev,
10266 "Get dfx reg bd num fail, status is %d.\n", ret);
10270 bd_num_max = bd_num_list[0];
10271 for (i = 1; i < dfx_reg_type_num; i++)
10272 bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
10274 buf_len = sizeof(*desc_src) * bd_num_max;
10275 desc_src = kzalloc(buf_len, GFP_KERNEL);
10277 dev_err(&hdev->pdev->dev, "%s kzalloc failed\n", __func__);
10281 for (i = 0; i < dfx_reg_type_num; i++) {
10282 bd_num = bd_num_list[i];
10283 ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
10284 hclge_dfx_reg_opcode_list[i]);
10286 dev_err(&hdev->pdev->dev,
10287 "Get dfx reg fail, status is %d.\n", ret);
10291 reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
10298 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
10299 struct hnae3_knic_private_info *kinfo)
10301 #define HCLGE_RING_REG_OFFSET 0x200
10302 #define HCLGE_RING_INT_REG_OFFSET 0x4
10304 int i, j, reg_num, separator_num;
10308 /* fetching per-PF registers valus from PF PCIe register space */
10309 reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
10310 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10311 for (i = 0; i < reg_num; i++)
10312 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
10313 for (i = 0; i < separator_num; i++)
10314 *reg++ = SEPARATOR_VALUE;
10315 data_num_sum = reg_num + separator_num;
10317 reg_num = ARRAY_SIZE(common_reg_addr_list);
10318 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10319 for (i = 0; i < reg_num; i++)
10320 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
10321 for (i = 0; i < separator_num; i++)
10322 *reg++ = SEPARATOR_VALUE;
10323 data_num_sum += reg_num + separator_num;
10325 reg_num = ARRAY_SIZE(ring_reg_addr_list);
10326 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10327 for (j = 0; j < kinfo->num_tqps; j++) {
10328 for (i = 0; i < reg_num; i++)
10329 *reg++ = hclge_read_dev(&hdev->hw,
10330 ring_reg_addr_list[i] +
10331 HCLGE_RING_REG_OFFSET * j);
10332 for (i = 0; i < separator_num; i++)
10333 *reg++ = SEPARATOR_VALUE;
10335 data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
10337 reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
10338 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10339 for (j = 0; j < hdev->num_msi_used - 1; j++) {
10340 for (i = 0; i < reg_num; i++)
10341 *reg++ = hclge_read_dev(&hdev->hw,
10342 tqp_intr_reg_addr_list[i] +
10343 HCLGE_RING_INT_REG_OFFSET * j);
10344 for (i = 0; i < separator_num; i++)
10345 *reg++ = SEPARATOR_VALUE;
10347 data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
10349 return data_num_sum;
10352 static int hclge_get_regs_len(struct hnae3_handle *handle)
10354 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
10355 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
10356 struct hclge_vport *vport = hclge_get_vport(handle);
10357 struct hclge_dev *hdev = vport->back;
10358 int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
10359 int regs_lines_32_bit, regs_lines_64_bit;
10362 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
10364 dev_err(&hdev->pdev->dev,
10365 "Get register number failed, ret = %d.\n", ret);
10369 ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
10371 dev_err(&hdev->pdev->dev,
10372 "Get dfx reg len failed, ret = %d.\n", ret);
10376 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
10377 REG_SEPARATOR_LINE;
10378 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
10379 REG_SEPARATOR_LINE;
10380 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
10381 REG_SEPARATOR_LINE;
10382 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
10383 REG_SEPARATOR_LINE;
10384 regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
10385 REG_SEPARATOR_LINE;
10386 regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
10387 REG_SEPARATOR_LINE;
10389 return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
10390 tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
10391 regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
10394 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
10397 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
10398 struct hclge_vport *vport = hclge_get_vport(handle);
10399 struct hclge_dev *hdev = vport->back;
10400 u32 regs_num_32_bit, regs_num_64_bit;
10401 int i, reg_num, separator_num, ret;
10404 *version = hdev->fw_version;
10406 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
10408 dev_err(&hdev->pdev->dev,
10409 "Get register number failed, ret = %d.\n", ret);
10413 reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
10415 ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
10417 dev_err(&hdev->pdev->dev,
10418 "Get 32 bit register failed, ret = %d.\n", ret);
10421 reg_num = regs_num_32_bit;
10423 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10424 for (i = 0; i < separator_num; i++)
10425 *reg++ = SEPARATOR_VALUE;
10427 ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
10429 dev_err(&hdev->pdev->dev,
10430 "Get 64 bit register failed, ret = %d.\n", ret);
10433 reg_num = regs_num_64_bit * 2;
10435 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10436 for (i = 0; i < separator_num; i++)
10437 *reg++ = SEPARATOR_VALUE;
10439 ret = hclge_get_dfx_reg(hdev, reg);
10441 dev_err(&hdev->pdev->dev,
10442 "Get dfx register failed, ret = %d.\n", ret);
10445 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
10447 struct hclge_set_led_state_cmd *req;
10448 struct hclge_desc desc;
10451 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
10453 req = (struct hclge_set_led_state_cmd *)desc.data;
10454 hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
10455 HCLGE_LED_LOCATE_STATE_S, locate_led_status);
10457 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10459 dev_err(&hdev->pdev->dev,
10460 "Send set led state cmd error, ret =%d\n", ret);
10465 enum hclge_led_status {
10468 HCLGE_LED_NO_CHANGE = 0xFF,
10471 static int hclge_set_led_id(struct hnae3_handle *handle,
10472 enum ethtool_phys_id_state status)
10474 struct hclge_vport *vport = hclge_get_vport(handle);
10475 struct hclge_dev *hdev = vport->back;
10478 case ETHTOOL_ID_ACTIVE:
10479 return hclge_set_led_status(hdev, HCLGE_LED_ON);
10480 case ETHTOOL_ID_INACTIVE:
10481 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
10487 static void hclge_get_link_mode(struct hnae3_handle *handle,
10488 unsigned long *supported,
10489 unsigned long *advertising)
10491 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
10492 struct hclge_vport *vport = hclge_get_vport(handle);
10493 struct hclge_dev *hdev = vport->back;
10494 unsigned int idx = 0;
10496 for (; idx < size; idx++) {
10497 supported[idx] = hdev->hw.mac.supported[idx];
10498 advertising[idx] = hdev->hw.mac.advertising[idx];
10502 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
10504 struct hclge_vport *vport = hclge_get_vport(handle);
10505 struct hclge_dev *hdev = vport->back;
10507 return hclge_config_gro(hdev, enable);
10510 static const struct hnae3_ae_ops hclge_ops = {
10511 .init_ae_dev = hclge_init_ae_dev,
10512 .uninit_ae_dev = hclge_uninit_ae_dev,
10513 .flr_prepare = hclge_flr_prepare,
10514 .flr_done = hclge_flr_done,
10515 .init_client_instance = hclge_init_client_instance,
10516 .uninit_client_instance = hclge_uninit_client_instance,
10517 .map_ring_to_vector = hclge_map_ring_to_vector,
10518 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
10519 .get_vector = hclge_get_vector,
10520 .put_vector = hclge_put_vector,
10521 .set_promisc_mode = hclge_set_promisc_mode,
10522 .set_loopback = hclge_set_loopback,
10523 .start = hclge_ae_start,
10524 .stop = hclge_ae_stop,
10525 .client_start = hclge_client_start,
10526 .client_stop = hclge_client_stop,
10527 .get_status = hclge_get_status,
10528 .get_ksettings_an_result = hclge_get_ksettings_an_result,
10529 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
10530 .get_media_type = hclge_get_media_type,
10531 .check_port_speed = hclge_check_port_speed,
10532 .get_fec = hclge_get_fec,
10533 .set_fec = hclge_set_fec,
10534 .get_rss_key_size = hclge_get_rss_key_size,
10535 .get_rss_indir_size = hclge_get_rss_indir_size,
10536 .get_rss = hclge_get_rss,
10537 .set_rss = hclge_set_rss,
10538 .set_rss_tuple = hclge_set_rss_tuple,
10539 .get_rss_tuple = hclge_get_rss_tuple,
10540 .get_tc_size = hclge_get_tc_size,
10541 .get_mac_addr = hclge_get_mac_addr,
10542 .set_mac_addr = hclge_set_mac_addr,
10543 .do_ioctl = hclge_do_ioctl,
10544 .add_uc_addr = hclge_add_uc_addr,
10545 .rm_uc_addr = hclge_rm_uc_addr,
10546 .add_mc_addr = hclge_add_mc_addr,
10547 .rm_mc_addr = hclge_rm_mc_addr,
10548 .set_autoneg = hclge_set_autoneg,
10549 .get_autoneg = hclge_get_autoneg,
10550 .restart_autoneg = hclge_restart_autoneg,
10551 .halt_autoneg = hclge_halt_autoneg,
10552 .get_pauseparam = hclge_get_pauseparam,
10553 .set_pauseparam = hclge_set_pauseparam,
10554 .set_mtu = hclge_set_mtu,
10555 .reset_queue = hclge_reset_tqp,
10556 .get_stats = hclge_get_stats,
10557 .get_mac_stats = hclge_get_mac_stat,
10558 .update_stats = hclge_update_stats,
10559 .get_strings = hclge_get_strings,
10560 .get_sset_count = hclge_get_sset_count,
10561 .get_fw_version = hclge_get_fw_version,
10562 .get_mdix_mode = hclge_get_mdix_mode,
10563 .enable_vlan_filter = hclge_enable_vlan_filter,
10564 .set_vlan_filter = hclge_set_vlan_filter,
10565 .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
10566 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
10567 .reset_event = hclge_reset_event,
10568 .get_reset_level = hclge_get_reset_level,
10569 .set_default_reset_request = hclge_set_def_reset_request,
10570 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
10571 .set_channels = hclge_set_channels,
10572 .get_channels = hclge_get_channels,
10573 .get_regs_len = hclge_get_regs_len,
10574 .get_regs = hclge_get_regs,
10575 .set_led_id = hclge_set_led_id,
10576 .get_link_mode = hclge_get_link_mode,
10577 .add_fd_entry = hclge_add_fd_entry,
10578 .del_fd_entry = hclge_del_fd_entry,
10579 .del_all_fd_entries = hclge_del_all_fd_entries,
10580 .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
10581 .get_fd_rule_info = hclge_get_fd_rule_info,
10582 .get_fd_all_rules = hclge_get_all_rules,
10583 .restore_fd_rules = hclge_restore_fd_entries,
10584 .enable_fd = hclge_enable_fd,
10585 .add_arfs_entry = hclge_add_fd_entry_by_arfs,
10586 .dbg_run_cmd = hclge_dbg_run_cmd,
10587 .handle_hw_ras_error = hclge_handle_hw_ras_error,
10588 .get_hw_reset_stat = hclge_get_hw_reset_stat,
10589 .ae_dev_resetting = hclge_ae_dev_resetting,
10590 .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
10591 .set_gro_en = hclge_gro_en,
10592 .get_global_queue_id = hclge_covert_handle_qid_global,
10593 .set_timer_task = hclge_set_timer_task,
10594 .mac_connect_phy = hclge_mac_connect_phy,
10595 .mac_disconnect_phy = hclge_mac_disconnect_phy,
10596 .restore_vlan_table = hclge_restore_vlan_table,
10597 .get_vf_config = hclge_get_vf_config,
10598 .set_vf_link_state = hclge_set_vf_link_state,
10599 .set_vf_spoofchk = hclge_set_vf_spoofchk,
10600 .set_vf_trust = hclge_set_vf_trust,
10601 .set_vf_rate = hclge_set_vf_rate,
10602 .set_vf_mac = hclge_set_vf_mac,
10605 static struct hnae3_ae_algo ae_algo = {
10607 .pdev_id_table = ae_algo_pci_tbl,
10610 static int hclge_init(void)
10612 pr_info("%s is initializing\n", HCLGE_NAME);
10614 hnae3_register_ae_algo(&ae_algo);
10619 static void hclge_exit(void)
10621 hnae3_unregister_ae_algo(&ae_algo);
10623 module_init(hclge_init);
10624 module_exit(hclge_exit);
10626 MODULE_LICENSE("GPL");
10627 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
10628 MODULE_DESCRIPTION("HCLGE Driver");
10629 MODULE_VERSION(HCLGE_MOD_VERSION);