1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
23 #include "hclge_err.h"
26 #define HCLGE_NAME "hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
30 #define HCLGE_BUF_SIZE_UNIT 256U
31 #define HCLGE_BUF_MUL_BY 2
32 #define HCLGE_BUF_DIV_BY 2
33 #define NEED_RESERVE_TC_NUM 2
34 #define BUF_MAX_PERCENT 100
35 #define BUF_RESERVE_PERCENT 90
37 #define HCLGE_RESET_MAX_FAIL_CNT 5
38 #define HCLGE_RESET_SYNC_TIME 100
39 #define HCLGE_PF_RESET_SYNC_TIME 20
40 #define HCLGE_PF_RESET_SYNC_CNT 1500
42 /* Get DFX BD number offset */
43 #define HCLGE_DFX_BIOS_BD_OFFSET 1
44 #define HCLGE_DFX_SSU_0_BD_OFFSET 2
45 #define HCLGE_DFX_SSU_1_BD_OFFSET 3
46 #define HCLGE_DFX_IGU_BD_OFFSET 4
47 #define HCLGE_DFX_RPU_0_BD_OFFSET 5
48 #define HCLGE_DFX_RPU_1_BD_OFFSET 6
49 #define HCLGE_DFX_NCSI_BD_OFFSET 7
50 #define HCLGE_DFX_RTC_BD_OFFSET 8
51 #define HCLGE_DFX_PPP_BD_OFFSET 9
52 #define HCLGE_DFX_RCB_BD_OFFSET 10
53 #define HCLGE_DFX_TQP_BD_OFFSET 11
54 #define HCLGE_DFX_SSU_2_BD_OFFSET 12
56 #define HCLGE_LINK_STATUS_MS 10
58 #define HCLGE_VF_VPORT_START_NUM 1
60 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
61 static int hclge_init_vlan_config(struct hclge_dev *hdev);
62 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
63 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
64 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
65 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
66 u16 *allocated_size, bool is_alloc);
67 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
68 static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
69 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
71 static int hclge_set_default_loopback(struct hclge_dev *hdev);
73 static struct hnae3_ae_algo ae_algo;
75 static struct workqueue_struct *hclge_wq;
77 static const struct pci_device_id ae_algo_pci_tbl[] = {
78 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
79 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
80 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
81 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
82 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
83 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
84 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
85 /* required last entry */
89 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
91 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
92 HCLGE_CMDQ_TX_ADDR_H_REG,
93 HCLGE_CMDQ_TX_DEPTH_REG,
94 HCLGE_CMDQ_TX_TAIL_REG,
95 HCLGE_CMDQ_TX_HEAD_REG,
96 HCLGE_CMDQ_RX_ADDR_L_REG,
97 HCLGE_CMDQ_RX_ADDR_H_REG,
98 HCLGE_CMDQ_RX_DEPTH_REG,
99 HCLGE_CMDQ_RX_TAIL_REG,
100 HCLGE_CMDQ_RX_HEAD_REG,
101 HCLGE_VECTOR0_CMDQ_SRC_REG,
102 HCLGE_CMDQ_INTR_STS_REG,
103 HCLGE_CMDQ_INTR_EN_REG,
104 HCLGE_CMDQ_INTR_GEN_REG};
106 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
107 HCLGE_VECTOR0_OTER_EN_REG,
108 HCLGE_MISC_RESET_STS_REG,
109 HCLGE_MISC_VECTOR_INT_STS,
110 HCLGE_GLOBAL_RESET_REG,
114 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
115 HCLGE_RING_RX_ADDR_H_REG,
116 HCLGE_RING_RX_BD_NUM_REG,
117 HCLGE_RING_RX_BD_LENGTH_REG,
118 HCLGE_RING_RX_MERGE_EN_REG,
119 HCLGE_RING_RX_TAIL_REG,
120 HCLGE_RING_RX_HEAD_REG,
121 HCLGE_RING_RX_FBD_NUM_REG,
122 HCLGE_RING_RX_OFFSET_REG,
123 HCLGE_RING_RX_FBD_OFFSET_REG,
124 HCLGE_RING_RX_STASH_REG,
125 HCLGE_RING_RX_BD_ERR_REG,
126 HCLGE_RING_TX_ADDR_L_REG,
127 HCLGE_RING_TX_ADDR_H_REG,
128 HCLGE_RING_TX_BD_NUM_REG,
129 HCLGE_RING_TX_PRIORITY_REG,
130 HCLGE_RING_TX_TC_REG,
131 HCLGE_RING_TX_MERGE_EN_REG,
132 HCLGE_RING_TX_TAIL_REG,
133 HCLGE_RING_TX_HEAD_REG,
134 HCLGE_RING_TX_FBD_NUM_REG,
135 HCLGE_RING_TX_OFFSET_REG,
136 HCLGE_RING_TX_EBD_NUM_REG,
137 HCLGE_RING_TX_EBD_OFFSET_REG,
138 HCLGE_RING_TX_BD_ERR_REG,
141 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
142 HCLGE_TQP_INTR_GL0_REG,
143 HCLGE_TQP_INTR_GL1_REG,
144 HCLGE_TQP_INTR_GL2_REG,
145 HCLGE_TQP_INTR_RL_REG};
147 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
149 "Serdes serial Loopback test",
150 "Serdes parallel Loopback test",
154 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
155 {"mac_tx_mac_pause_num",
156 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
157 {"mac_rx_mac_pause_num",
158 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
159 {"mac_tx_control_pkt_num",
160 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
161 {"mac_rx_control_pkt_num",
162 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
163 {"mac_tx_pfc_pkt_num",
164 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
165 {"mac_tx_pfc_pri0_pkt_num",
166 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
167 {"mac_tx_pfc_pri1_pkt_num",
168 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
169 {"mac_tx_pfc_pri2_pkt_num",
170 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
171 {"mac_tx_pfc_pri3_pkt_num",
172 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
173 {"mac_tx_pfc_pri4_pkt_num",
174 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
175 {"mac_tx_pfc_pri5_pkt_num",
176 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
177 {"mac_tx_pfc_pri6_pkt_num",
178 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
179 {"mac_tx_pfc_pri7_pkt_num",
180 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
181 {"mac_rx_pfc_pkt_num",
182 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
183 {"mac_rx_pfc_pri0_pkt_num",
184 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
185 {"mac_rx_pfc_pri1_pkt_num",
186 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
187 {"mac_rx_pfc_pri2_pkt_num",
188 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
189 {"mac_rx_pfc_pri3_pkt_num",
190 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
191 {"mac_rx_pfc_pri4_pkt_num",
192 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
193 {"mac_rx_pfc_pri5_pkt_num",
194 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
195 {"mac_rx_pfc_pri6_pkt_num",
196 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
197 {"mac_rx_pfc_pri7_pkt_num",
198 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
199 {"mac_tx_total_pkt_num",
200 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
201 {"mac_tx_total_oct_num",
202 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
203 {"mac_tx_good_pkt_num",
204 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
205 {"mac_tx_bad_pkt_num",
206 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
207 {"mac_tx_good_oct_num",
208 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
209 {"mac_tx_bad_oct_num",
210 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
211 {"mac_tx_uni_pkt_num",
212 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
213 {"mac_tx_multi_pkt_num",
214 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
215 {"mac_tx_broad_pkt_num",
216 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
217 {"mac_tx_undersize_pkt_num",
218 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
219 {"mac_tx_oversize_pkt_num",
220 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
221 {"mac_tx_64_oct_pkt_num",
222 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
223 {"mac_tx_65_127_oct_pkt_num",
224 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
225 {"mac_tx_128_255_oct_pkt_num",
226 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
227 {"mac_tx_256_511_oct_pkt_num",
228 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
229 {"mac_tx_512_1023_oct_pkt_num",
230 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
231 {"mac_tx_1024_1518_oct_pkt_num",
232 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
233 {"mac_tx_1519_2047_oct_pkt_num",
234 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
235 {"mac_tx_2048_4095_oct_pkt_num",
236 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
237 {"mac_tx_4096_8191_oct_pkt_num",
238 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
239 {"mac_tx_8192_9216_oct_pkt_num",
240 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
241 {"mac_tx_9217_12287_oct_pkt_num",
242 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
243 {"mac_tx_12288_16383_oct_pkt_num",
244 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
245 {"mac_tx_1519_max_good_pkt_num",
246 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
247 {"mac_tx_1519_max_bad_pkt_num",
248 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
249 {"mac_rx_total_pkt_num",
250 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
251 {"mac_rx_total_oct_num",
252 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
253 {"mac_rx_good_pkt_num",
254 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
255 {"mac_rx_bad_pkt_num",
256 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
257 {"mac_rx_good_oct_num",
258 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
259 {"mac_rx_bad_oct_num",
260 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
261 {"mac_rx_uni_pkt_num",
262 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
263 {"mac_rx_multi_pkt_num",
264 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
265 {"mac_rx_broad_pkt_num",
266 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
267 {"mac_rx_undersize_pkt_num",
268 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
269 {"mac_rx_oversize_pkt_num",
270 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
271 {"mac_rx_64_oct_pkt_num",
272 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
273 {"mac_rx_65_127_oct_pkt_num",
274 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
275 {"mac_rx_128_255_oct_pkt_num",
276 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
277 {"mac_rx_256_511_oct_pkt_num",
278 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
279 {"mac_rx_512_1023_oct_pkt_num",
280 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
281 {"mac_rx_1024_1518_oct_pkt_num",
282 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
283 {"mac_rx_1519_2047_oct_pkt_num",
284 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
285 {"mac_rx_2048_4095_oct_pkt_num",
286 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
287 {"mac_rx_4096_8191_oct_pkt_num",
288 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
289 {"mac_rx_8192_9216_oct_pkt_num",
290 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
291 {"mac_rx_9217_12287_oct_pkt_num",
292 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
293 {"mac_rx_12288_16383_oct_pkt_num",
294 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
295 {"mac_rx_1519_max_good_pkt_num",
296 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
297 {"mac_rx_1519_max_bad_pkt_num",
298 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
300 {"mac_tx_fragment_pkt_num",
301 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
302 {"mac_tx_undermin_pkt_num",
303 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
304 {"mac_tx_jabber_pkt_num",
305 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
306 {"mac_tx_err_all_pkt_num",
307 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
308 {"mac_tx_from_app_good_pkt_num",
309 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
310 {"mac_tx_from_app_bad_pkt_num",
311 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
312 {"mac_rx_fragment_pkt_num",
313 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
314 {"mac_rx_undermin_pkt_num",
315 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
316 {"mac_rx_jabber_pkt_num",
317 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
318 {"mac_rx_fcs_err_pkt_num",
319 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
320 {"mac_rx_send_app_good_pkt_num",
321 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
322 {"mac_rx_send_app_bad_pkt_num",
323 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
326 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
328 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
329 .ethter_type = cpu_to_le16(ETH_P_LLDP),
330 .mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
331 .i_port_bitmap = 0x1,
335 static const u8 hclge_hash_key[] = {
336 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
337 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
338 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
339 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
340 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
343 static const u32 hclge_dfx_bd_offset_list[] = {
344 HCLGE_DFX_BIOS_BD_OFFSET,
345 HCLGE_DFX_SSU_0_BD_OFFSET,
346 HCLGE_DFX_SSU_1_BD_OFFSET,
347 HCLGE_DFX_IGU_BD_OFFSET,
348 HCLGE_DFX_RPU_0_BD_OFFSET,
349 HCLGE_DFX_RPU_1_BD_OFFSET,
350 HCLGE_DFX_NCSI_BD_OFFSET,
351 HCLGE_DFX_RTC_BD_OFFSET,
352 HCLGE_DFX_PPP_BD_OFFSET,
353 HCLGE_DFX_RCB_BD_OFFSET,
354 HCLGE_DFX_TQP_BD_OFFSET,
355 HCLGE_DFX_SSU_2_BD_OFFSET
358 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
359 HCLGE_OPC_DFX_BIOS_COMMON_REG,
360 HCLGE_OPC_DFX_SSU_REG_0,
361 HCLGE_OPC_DFX_SSU_REG_1,
362 HCLGE_OPC_DFX_IGU_EGU_REG,
363 HCLGE_OPC_DFX_RPU_REG_0,
364 HCLGE_OPC_DFX_RPU_REG_1,
365 HCLGE_OPC_DFX_NCSI_REG,
366 HCLGE_OPC_DFX_RTC_REG,
367 HCLGE_OPC_DFX_PPP_REG,
368 HCLGE_OPC_DFX_RCB_REG,
369 HCLGE_OPC_DFX_TQP_REG,
370 HCLGE_OPC_DFX_SSU_REG_2
373 static const struct key_info meta_data_key_info[] = {
374 { PACKET_TYPE_ID, 6},
384 static const struct key_info tuple_key_info[] = {
385 { OUTER_DST_MAC, 48},
386 { OUTER_SRC_MAC, 48},
387 { OUTER_VLAN_TAG_FST, 16},
388 { OUTER_VLAN_TAG_SEC, 16},
389 { OUTER_ETH_TYPE, 16},
392 { OUTER_IP_PROTO, 8},
396 { OUTER_SRC_PORT, 16},
397 { OUTER_DST_PORT, 16},
399 { OUTER_TUN_VNI, 24},
400 { OUTER_TUN_FLOW_ID, 8},
401 { INNER_DST_MAC, 48},
402 { INNER_SRC_MAC, 48},
403 { INNER_VLAN_TAG_FST, 16},
404 { INNER_VLAN_TAG_SEC, 16},
405 { INNER_ETH_TYPE, 16},
408 { INNER_IP_PROTO, 8},
412 { INNER_SRC_PORT, 16},
413 { INNER_DST_PORT, 16},
417 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
419 #define HCLGE_MAC_CMD_NUM 21
421 u64 *data = (u64 *)(&hdev->mac_stats);
422 struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
427 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
428 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
430 dev_err(&hdev->pdev->dev,
431 "Get MAC pkt stats fail, status = %d.\n", ret);
436 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
437 /* for special opcode 0032, only the first desc has the head */
438 if (unlikely(i == 0)) {
439 desc_data = (__le64 *)(&desc[i].data[0]);
440 n = HCLGE_RD_FIRST_STATS_NUM;
442 desc_data = (__le64 *)(&desc[i]);
443 n = HCLGE_RD_OTHER_STATS_NUM;
446 for (k = 0; k < n; k++) {
447 *data += le64_to_cpu(*desc_data);
456 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
458 u64 *data = (u64 *)(&hdev->mac_stats);
459 struct hclge_desc *desc;
464 /* This may be called inside atomic sections,
465 * so GFP_ATOMIC is more suitalbe here
467 desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
471 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
472 ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
478 for (i = 0; i < desc_num; i++) {
479 /* for special opcode 0034, only the first desc has the head */
481 desc_data = (__le64 *)(&desc[i].data[0]);
482 n = HCLGE_RD_FIRST_STATS_NUM;
484 desc_data = (__le64 *)(&desc[i]);
485 n = HCLGE_RD_OTHER_STATS_NUM;
488 for (k = 0; k < n; k++) {
489 *data += le64_to_cpu(*desc_data);
500 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
502 struct hclge_desc desc;
507 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
508 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
512 desc_data = (__le32 *)(&desc.data[0]);
513 reg_num = le32_to_cpu(*desc_data);
515 *desc_num = 1 + ((reg_num - 3) >> 2) +
516 (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
521 static int hclge_mac_update_stats(struct hclge_dev *hdev)
526 ret = hclge_mac_query_reg_num(hdev, &desc_num);
528 /* The firmware supports the new statistics acquisition method */
530 ret = hclge_mac_update_stats_complete(hdev, desc_num);
531 else if (ret == -EOPNOTSUPP)
532 ret = hclge_mac_update_stats_defective(hdev);
534 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
539 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
541 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
542 struct hclge_vport *vport = hclge_get_vport(handle);
543 struct hclge_dev *hdev = vport->back;
544 struct hnae3_queue *queue;
545 struct hclge_desc desc[1];
546 struct hclge_tqp *tqp;
549 for (i = 0; i < kinfo->num_tqps; i++) {
550 queue = handle->kinfo.tqp[i];
551 tqp = container_of(queue, struct hclge_tqp, q);
552 /* command : HCLGE_OPC_QUERY_IGU_STAT */
553 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATUS,
556 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
557 ret = hclge_cmd_send(&hdev->hw, desc, 1);
559 dev_err(&hdev->pdev->dev,
560 "Query tqp stat fail, status = %d,queue = %d\n",
564 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
565 le32_to_cpu(desc[0].data[1]);
568 for (i = 0; i < kinfo->num_tqps; i++) {
569 queue = handle->kinfo.tqp[i];
570 tqp = container_of(queue, struct hclge_tqp, q);
571 /* command : HCLGE_OPC_QUERY_IGU_STAT */
572 hclge_cmd_setup_basic_desc(&desc[0],
573 HCLGE_OPC_QUERY_TX_STATUS,
576 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
577 ret = hclge_cmd_send(&hdev->hw, desc, 1);
579 dev_err(&hdev->pdev->dev,
580 "Query tqp stat fail, status = %d,queue = %d\n",
584 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
585 le32_to_cpu(desc[0].data[1]);
591 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
593 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
594 struct hclge_tqp *tqp;
598 for (i = 0; i < kinfo->num_tqps; i++) {
599 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
600 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
603 for (i = 0; i < kinfo->num_tqps; i++) {
604 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
605 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
611 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
613 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
615 /* each tqp has TX & RX two queues */
616 return kinfo->num_tqps * (2);
619 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
621 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
625 for (i = 0; i < kinfo->num_tqps; i++) {
626 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
627 struct hclge_tqp, q);
628 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
630 buff = buff + ETH_GSTRING_LEN;
633 for (i = 0; i < kinfo->num_tqps; i++) {
634 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
635 struct hclge_tqp, q);
636 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
638 buff = buff + ETH_GSTRING_LEN;
644 static u64 *hclge_comm_get_stats(const void *comm_stats,
645 const struct hclge_comm_stats_str strs[],
651 for (i = 0; i < size; i++)
652 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
657 static u8 *hclge_comm_get_strings(u32 stringset,
658 const struct hclge_comm_stats_str strs[],
661 char *buff = (char *)data;
664 if (stringset != ETH_SS_STATS)
667 for (i = 0; i < size; i++) {
668 snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
669 buff = buff + ETH_GSTRING_LEN;
675 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
677 struct hnae3_handle *handle;
680 handle = &hdev->vport[0].nic;
681 if (handle->client) {
682 status = hclge_tqps_update_stats(handle);
684 dev_err(&hdev->pdev->dev,
685 "Update TQPS stats fail, status = %d.\n",
690 status = hclge_mac_update_stats(hdev);
692 dev_err(&hdev->pdev->dev,
693 "Update MAC stats fail, status = %d.\n", status);
696 static void hclge_update_stats(struct hnae3_handle *handle,
697 struct net_device_stats *net_stats)
699 struct hclge_vport *vport = hclge_get_vport(handle);
700 struct hclge_dev *hdev = vport->back;
703 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
706 status = hclge_mac_update_stats(hdev);
708 dev_err(&hdev->pdev->dev,
709 "Update MAC stats fail, status = %d.\n",
712 status = hclge_tqps_update_stats(handle);
714 dev_err(&hdev->pdev->dev,
715 "Update TQPS stats fail, status = %d.\n",
718 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
721 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
723 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
724 HNAE3_SUPPORT_PHY_LOOPBACK |\
725 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
726 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
728 struct hclge_vport *vport = hclge_get_vport(handle);
729 struct hclge_dev *hdev = vport->back;
732 /* Loopback test support rules:
733 * mac: only GE mode support
734 * serdes: all mac mode will support include GE/XGE/LGE/CGE
735 * phy: only support when phy device exist on board
737 if (stringset == ETH_SS_TEST) {
738 /* clear loopback bit flags at first */
739 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
740 if (hdev->pdev->revision >= 0x21 ||
741 hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
742 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
743 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
745 handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
749 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
750 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
752 if (hdev->hw.mac.phydev) {
754 handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
757 } else if (stringset == ETH_SS_STATS) {
758 count = ARRAY_SIZE(g_mac_stats_string) +
759 hclge_tqps_get_sset_count(handle, stringset);
765 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
768 u8 *p = (char *)data;
771 if (stringset == ETH_SS_STATS) {
772 size = ARRAY_SIZE(g_mac_stats_string);
773 p = hclge_comm_get_strings(stringset, g_mac_stats_string,
775 p = hclge_tqps_get_strings(handle, p);
776 } else if (stringset == ETH_SS_TEST) {
777 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
778 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
780 p += ETH_GSTRING_LEN;
782 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
783 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
785 p += ETH_GSTRING_LEN;
787 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
789 hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
791 p += ETH_GSTRING_LEN;
793 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
794 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
796 p += ETH_GSTRING_LEN;
801 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
803 struct hclge_vport *vport = hclge_get_vport(handle);
804 struct hclge_dev *hdev = vport->back;
807 p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
808 ARRAY_SIZE(g_mac_stats_string), data);
809 p = hclge_tqps_get_stats(handle, p);
812 static void hclge_get_mac_stat(struct hnae3_handle *handle,
813 struct hns3_mac_stats *mac_stats)
815 struct hclge_vport *vport = hclge_get_vport(handle);
816 struct hclge_dev *hdev = vport->back;
818 hclge_update_stats(handle, NULL);
820 mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
821 mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
824 static int hclge_parse_func_status(struct hclge_dev *hdev,
825 struct hclge_func_status_cmd *status)
827 if (!(status->pf_state & HCLGE_PF_STATE_DONE))
830 /* Set the pf to main pf */
831 if (status->pf_state & HCLGE_PF_STATE_MAIN)
832 hdev->flag |= HCLGE_FLAG_MAIN;
834 hdev->flag &= ~HCLGE_FLAG_MAIN;
839 static int hclge_query_function_status(struct hclge_dev *hdev)
841 #define HCLGE_QUERY_MAX_CNT 5
843 struct hclge_func_status_cmd *req;
844 struct hclge_desc desc;
848 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
849 req = (struct hclge_func_status_cmd *)desc.data;
852 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
854 dev_err(&hdev->pdev->dev,
855 "query function status failed %d.\n", ret);
859 /* Check pf reset is done */
862 usleep_range(1000, 2000);
863 } while (timeout++ < HCLGE_QUERY_MAX_CNT);
865 return hclge_parse_func_status(hdev, req);
868 static int hclge_query_pf_resource(struct hclge_dev *hdev)
870 struct hclge_pf_res_cmd *req;
871 struct hclge_desc desc;
874 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
875 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
877 dev_err(&hdev->pdev->dev,
878 "query pf resource failed %d.\n", ret);
882 req = (struct hclge_pf_res_cmd *)desc.data;
883 hdev->num_tqps = le16_to_cpu(req->tqp_num);
884 hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
886 if (req->tx_buf_size)
888 le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
890 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
892 hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
894 if (req->dv_buf_size)
896 le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
898 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
900 hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
902 if (hnae3_dev_roce_supported(hdev)) {
903 hdev->roce_base_msix_offset =
904 hnae3_get_field(le16_to_cpu(req->msixcap_localid_ba_rocee),
905 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
907 hnae3_get_field(le16_to_cpu(req->pf_intr_vector_number),
908 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
910 /* nic's msix numbers is always equals to the roce's. */
911 hdev->num_nic_msi = hdev->num_roce_msi;
913 /* PF should have NIC vectors and Roce vectors,
914 * NIC vectors are queued before Roce vectors.
916 hdev->num_msi = hdev->num_roce_msi +
917 hdev->roce_base_msix_offset;
920 hnae3_get_field(le16_to_cpu(req->pf_intr_vector_number),
921 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
923 hdev->num_nic_msi = hdev->num_msi;
926 if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
927 dev_err(&hdev->pdev->dev,
928 "Just %u msi resources, not enough for pf(min:2).\n",
936 static int hclge_parse_speed(int speed_cmd, int *speed)
940 *speed = HCLGE_MAC_SPEED_10M;
943 *speed = HCLGE_MAC_SPEED_100M;
946 *speed = HCLGE_MAC_SPEED_1G;
949 *speed = HCLGE_MAC_SPEED_10G;
952 *speed = HCLGE_MAC_SPEED_25G;
955 *speed = HCLGE_MAC_SPEED_40G;
958 *speed = HCLGE_MAC_SPEED_50G;
961 *speed = HCLGE_MAC_SPEED_100G;
970 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
972 struct hclge_vport *vport = hclge_get_vport(handle);
973 struct hclge_dev *hdev = vport->back;
974 u32 speed_ability = hdev->hw.mac.speed_ability;
978 case HCLGE_MAC_SPEED_10M:
979 speed_bit = HCLGE_SUPPORT_10M_BIT;
981 case HCLGE_MAC_SPEED_100M:
982 speed_bit = HCLGE_SUPPORT_100M_BIT;
984 case HCLGE_MAC_SPEED_1G:
985 speed_bit = HCLGE_SUPPORT_1G_BIT;
987 case HCLGE_MAC_SPEED_10G:
988 speed_bit = HCLGE_SUPPORT_10G_BIT;
990 case HCLGE_MAC_SPEED_25G:
991 speed_bit = HCLGE_SUPPORT_25G_BIT;
993 case HCLGE_MAC_SPEED_40G:
994 speed_bit = HCLGE_SUPPORT_40G_BIT;
996 case HCLGE_MAC_SPEED_50G:
997 speed_bit = HCLGE_SUPPORT_50G_BIT;
999 case HCLGE_MAC_SPEED_100G:
1000 speed_bit = HCLGE_SUPPORT_100G_BIT;
1006 if (speed_bit & speed_ability)
1012 static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability)
1014 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1015 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1017 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1018 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1020 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1021 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1023 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1024 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1026 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1027 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1031 static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability)
1033 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1034 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1036 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1037 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1039 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1040 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1042 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1043 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1045 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1046 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1050 static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability)
1052 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1053 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1055 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1056 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1058 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1059 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1061 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1062 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1064 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1065 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1069 static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability)
1071 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1072 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1074 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1075 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1077 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1078 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1080 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1081 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1083 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1084 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1086 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1087 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1091 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1093 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1094 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1096 switch (mac->speed) {
1097 case HCLGE_MAC_SPEED_10G:
1098 case HCLGE_MAC_SPEED_40G:
1099 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1102 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1104 case HCLGE_MAC_SPEED_25G:
1105 case HCLGE_MAC_SPEED_50G:
1106 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1109 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1110 BIT(HNAE3_FEC_AUTO);
1112 case HCLGE_MAC_SPEED_100G:
1113 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1114 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1117 mac->fec_ability = 0;
1122 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1125 struct hclge_mac *mac = &hdev->hw.mac;
1127 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1128 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1131 hclge_convert_setting_sr(mac, speed_ability);
1132 hclge_convert_setting_lr(mac, speed_ability);
1133 hclge_convert_setting_cr(mac, speed_ability);
1134 if (hdev->pdev->revision >= 0x21)
1135 hclge_convert_setting_fec(mac);
1137 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1138 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1139 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1142 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1145 struct hclge_mac *mac = &hdev->hw.mac;
1147 hclge_convert_setting_kr(mac, speed_ability);
1148 if (hdev->pdev->revision >= 0x21)
1149 hclge_convert_setting_fec(mac);
1150 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1151 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1152 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1155 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1158 unsigned long *supported = hdev->hw.mac.supported;
1160 /* default to support all speed for GE port */
1162 speed_ability = HCLGE_SUPPORT_GE;
1164 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1165 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1168 if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1169 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1171 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1175 if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1176 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1177 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1180 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1181 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1182 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1183 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1186 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
1188 u8 media_type = hdev->hw.mac.media_type;
1190 if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1191 hclge_parse_fiber_link_mode(hdev, speed_ability);
1192 else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1193 hclge_parse_copper_link_mode(hdev, speed_ability);
1194 else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1195 hclge_parse_backplane_link_mode(hdev, speed_ability);
1198 static u32 hclge_get_max_speed(u8 speed_ability)
1200 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1201 return HCLGE_MAC_SPEED_100G;
1203 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1204 return HCLGE_MAC_SPEED_50G;
1206 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1207 return HCLGE_MAC_SPEED_40G;
1209 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1210 return HCLGE_MAC_SPEED_25G;
1212 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1213 return HCLGE_MAC_SPEED_10G;
1215 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1216 return HCLGE_MAC_SPEED_1G;
1218 if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1219 return HCLGE_MAC_SPEED_100M;
1221 if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1222 return HCLGE_MAC_SPEED_10M;
1224 return HCLGE_MAC_SPEED_1G;
1227 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1229 struct hclge_cfg_param_cmd *req;
1230 u64 mac_addr_tmp_high;
1234 req = (struct hclge_cfg_param_cmd *)desc[0].data;
1236 /* get the configuration */
1237 cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1240 cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1241 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1242 cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1243 HCLGE_CFG_TQP_DESC_N_M,
1244 HCLGE_CFG_TQP_DESC_N_S);
1246 cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1247 HCLGE_CFG_PHY_ADDR_M,
1248 HCLGE_CFG_PHY_ADDR_S);
1249 cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1250 HCLGE_CFG_MEDIA_TP_M,
1251 HCLGE_CFG_MEDIA_TP_S);
1252 cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1253 HCLGE_CFG_RX_BUF_LEN_M,
1254 HCLGE_CFG_RX_BUF_LEN_S);
1255 /* get mac_address */
1256 mac_addr_tmp = __le32_to_cpu(req->param[2]);
1257 mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1258 HCLGE_CFG_MAC_ADDR_H_M,
1259 HCLGE_CFG_MAC_ADDR_H_S);
1261 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1263 cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1264 HCLGE_CFG_DEFAULT_SPEED_M,
1265 HCLGE_CFG_DEFAULT_SPEED_S);
1266 cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1267 HCLGE_CFG_RSS_SIZE_M,
1268 HCLGE_CFG_RSS_SIZE_S);
1270 for (i = 0; i < ETH_ALEN; i++)
1271 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1273 req = (struct hclge_cfg_param_cmd *)desc[1].data;
1274 cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1276 cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1277 HCLGE_CFG_SPEED_ABILITY_M,
1278 HCLGE_CFG_SPEED_ABILITY_S);
1279 cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1280 HCLGE_CFG_UMV_TBL_SPACE_M,
1281 HCLGE_CFG_UMV_TBL_SPACE_S);
1282 if (!cfg->umv_space)
1283 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1286 /* hclge_get_cfg: query the static parameter from flash
1287 * @hdev: pointer to struct hclge_dev
1288 * @hcfg: the config structure to be getted
1290 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1292 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1293 struct hclge_cfg_param_cmd *req;
1297 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1300 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1301 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1303 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1304 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1305 /* Len should be united by 4 bytes when send to hardware */
1306 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1307 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1308 req->offset = cpu_to_le32(offset);
1311 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1313 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1317 hclge_parse_cfg(hcfg, desc);
1322 static int hclge_get_cap(struct hclge_dev *hdev)
1326 ret = hclge_query_function_status(hdev);
1328 dev_err(&hdev->pdev->dev,
1329 "query function status error %d.\n", ret);
1333 /* get pf resource */
1334 return hclge_query_pf_resource(hdev);
1337 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1339 #define HCLGE_MIN_TX_DESC 64
1340 #define HCLGE_MIN_RX_DESC 64
1342 if (!is_kdump_kernel())
1345 dev_info(&hdev->pdev->dev,
1346 "Running kdump kernel. Using minimal resources\n");
1348 /* minimal queue pairs equals to the number of vports */
1349 hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1350 hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1351 hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1354 static int hclge_configure(struct hclge_dev *hdev)
1356 struct hclge_cfg cfg;
1360 ret = hclge_get_cfg(hdev, &cfg);
1362 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1366 hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1367 hdev->base_tqp_pid = 0;
1368 hdev->rss_size_max = cfg.rss_size_max;
1369 hdev->rx_buf_len = cfg.rx_buf_len;
1370 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1371 hdev->hw.mac.media_type = cfg.media_type;
1372 hdev->hw.mac.phy_addr = cfg.phy_addr;
1373 hdev->num_tx_desc = cfg.tqp_desc_num;
1374 hdev->num_rx_desc = cfg.tqp_desc_num;
1375 hdev->tm_info.num_pg = 1;
1376 hdev->tc_max = cfg.tc_num;
1377 hdev->tm_info.hw_pfc_map = 0;
1378 hdev->wanted_umv_size = cfg.umv_space;
1380 if (hnae3_dev_fd_supported(hdev)) {
1382 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1385 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1387 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1391 hclge_parse_link_mode(hdev, cfg.speed_ability);
1393 hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1395 if ((hdev->tc_max > HNAE3_MAX_TC) ||
1396 (hdev->tc_max < 1)) {
1397 dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1402 /* Dev does not support DCB */
1403 if (!hnae3_dev_dcb_supported(hdev)) {
1407 hdev->pfc_max = hdev->tc_max;
1410 hdev->tm_info.num_tc = 1;
1412 /* Currently not support uncontiuous tc */
1413 for (i = 0; i < hdev->tm_info.num_tc; i++)
1414 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1416 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1418 hclge_init_kdump_kernel_config(hdev);
1420 /* Set the init affinity based on pci func number */
1421 i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
1422 i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
1423 cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
1424 &hdev->affinity_mask);
1429 static int hclge_config_tso(struct hclge_dev *hdev, unsigned int tso_mss_min,
1430 unsigned int tso_mss_max)
1432 struct hclge_cfg_tso_status_cmd *req;
1433 struct hclge_desc desc;
1436 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1438 req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1441 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1442 HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1443 req->tso_mss_min = cpu_to_le16(tso_mss);
1446 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1447 HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1448 req->tso_mss_max = cpu_to_le16(tso_mss);
1450 return hclge_cmd_send(&hdev->hw, &desc, 1);
1453 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1455 struct hclge_cfg_gro_status_cmd *req;
1456 struct hclge_desc desc;
1459 if (!hnae3_dev_gro_supported(hdev))
1462 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1463 req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1465 req->gro_en = cpu_to_le16(en ? 1 : 0);
1467 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1469 dev_err(&hdev->pdev->dev,
1470 "GRO hardware config cmd failed, ret = %d\n", ret);
1475 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1477 struct hclge_tqp *tqp;
1480 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1481 sizeof(struct hclge_tqp), GFP_KERNEL);
1487 for (i = 0; i < hdev->num_tqps; i++) {
1488 tqp->dev = &hdev->pdev->dev;
1491 tqp->q.ae_algo = &ae_algo;
1492 tqp->q.buf_size = hdev->rx_buf_len;
1493 tqp->q.tx_desc_num = hdev->num_tx_desc;
1494 tqp->q.rx_desc_num = hdev->num_rx_desc;
1495 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1496 i * HCLGE_TQP_REG_SIZE;
1504 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1505 u16 tqp_pid, u16 tqp_vid, bool is_pf)
1507 struct hclge_tqp_map_cmd *req;
1508 struct hclge_desc desc;
1511 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1513 req = (struct hclge_tqp_map_cmd *)desc.data;
1514 req->tqp_id = cpu_to_le16(tqp_pid);
1515 req->tqp_vf = func_id;
1516 req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1518 req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1519 req->tqp_vid = cpu_to_le16(tqp_vid);
1521 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1523 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1528 static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1530 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1531 struct hclge_dev *hdev = vport->back;
1534 for (i = 0, alloced = 0; i < hdev->num_tqps &&
1535 alloced < num_tqps; i++) {
1536 if (!hdev->htqp[i].alloced) {
1537 hdev->htqp[i].q.handle = &vport->nic;
1538 hdev->htqp[i].q.tqp_index = alloced;
1539 hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1540 hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1541 kinfo->tqp[alloced] = &hdev->htqp[i].q;
1542 hdev->htqp[i].alloced = true;
1546 vport->alloc_tqps = alloced;
1547 kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1548 vport->alloc_tqps / hdev->tm_info.num_tc);
1550 /* ensure one to one mapping between irq and queue at default */
1551 kinfo->rss_size = min_t(u16, kinfo->rss_size,
1552 (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1557 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1558 u16 num_tx_desc, u16 num_rx_desc)
1561 struct hnae3_handle *nic = &vport->nic;
1562 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1563 struct hclge_dev *hdev = vport->back;
1566 kinfo->num_tx_desc = num_tx_desc;
1567 kinfo->num_rx_desc = num_rx_desc;
1569 kinfo->rx_buf_len = hdev->rx_buf_len;
1571 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1572 sizeof(struct hnae3_queue *), GFP_KERNEL);
1576 ret = hclge_assign_tqp(vport, num_tqps);
1578 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1583 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1584 struct hclge_vport *vport)
1586 struct hnae3_handle *nic = &vport->nic;
1587 struct hnae3_knic_private_info *kinfo;
1590 kinfo = &nic->kinfo;
1591 for (i = 0; i < vport->alloc_tqps; i++) {
1592 struct hclge_tqp *q =
1593 container_of(kinfo->tqp[i], struct hclge_tqp, q);
1597 is_pf = !(vport->vport_id);
1598 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1607 static int hclge_map_tqp(struct hclge_dev *hdev)
1609 struct hclge_vport *vport = hdev->vport;
1612 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1613 for (i = 0; i < num_vport; i++) {
1616 ret = hclge_map_tqp_to_vport(hdev, vport);
1626 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1628 struct hnae3_handle *nic = &vport->nic;
1629 struct hclge_dev *hdev = vport->back;
1632 nic->pdev = hdev->pdev;
1633 nic->ae_algo = &ae_algo;
1634 nic->numa_node_mask = hdev->numa_node_mask;
1636 ret = hclge_knic_setup(vport, num_tqps,
1637 hdev->num_tx_desc, hdev->num_rx_desc);
1639 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1644 static int hclge_alloc_vport(struct hclge_dev *hdev)
1646 struct pci_dev *pdev = hdev->pdev;
1647 struct hclge_vport *vport;
1653 /* We need to alloc a vport for main NIC of PF */
1654 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1656 if (hdev->num_tqps < num_vport) {
1657 dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1658 hdev->num_tqps, num_vport);
1662 /* Alloc the same number of TQPs for every vport */
1663 tqp_per_vport = hdev->num_tqps / num_vport;
1664 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1666 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1671 hdev->vport = vport;
1672 hdev->num_alloc_vport = num_vport;
1674 if (IS_ENABLED(CONFIG_PCI_IOV))
1675 hdev->num_alloc_vfs = hdev->num_req_vfs;
1677 for (i = 0; i < num_vport; i++) {
1679 vport->vport_id = i;
1680 vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1681 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1682 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1683 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1684 INIT_LIST_HEAD(&vport->vlan_list);
1685 INIT_LIST_HEAD(&vport->uc_mac_list);
1686 INIT_LIST_HEAD(&vport->mc_mac_list);
1689 ret = hclge_vport_setup(vport, tqp_main_vport);
1691 ret = hclge_vport_setup(vport, tqp_per_vport);
1694 "vport setup failed for vport %d, %d\n",
1705 static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1706 struct hclge_pkt_buf_alloc *buf_alloc)
1708 /* TX buffer size is unit by 128 byte */
1709 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1710 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
1711 struct hclge_tx_buff_alloc_cmd *req;
1712 struct hclge_desc desc;
1716 req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1718 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1719 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1720 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1722 req->tx_pkt_buff[i] =
1723 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1724 HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1727 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1729 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1735 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1736 struct hclge_pkt_buf_alloc *buf_alloc)
1738 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1741 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1746 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1751 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1752 if (hdev->hw_tc_map & BIT(i))
1757 /* Get the number of pfc enabled TCs, which have private buffer */
1758 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1759 struct hclge_pkt_buf_alloc *buf_alloc)
1761 struct hclge_priv_buf *priv;
1765 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1766 priv = &buf_alloc->priv_buf[i];
1767 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1775 /* Get the number of pfc disabled TCs, which have private buffer */
1776 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1777 struct hclge_pkt_buf_alloc *buf_alloc)
1779 struct hclge_priv_buf *priv;
1783 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1784 priv = &buf_alloc->priv_buf[i];
1785 if (hdev->hw_tc_map & BIT(i) &&
1786 !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1794 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1796 struct hclge_priv_buf *priv;
1800 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1801 priv = &buf_alloc->priv_buf[i];
1803 rx_priv += priv->buf_size;
1808 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1810 u32 i, total_tx_size = 0;
1812 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1813 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1815 return total_tx_size;
1818 static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1819 struct hclge_pkt_buf_alloc *buf_alloc,
1822 u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1823 u32 tc_num = hclge_get_tc_num(hdev);
1824 u32 shared_buf, aligned_mps;
1828 aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1830 if (hnae3_dev_dcb_supported(hdev))
1831 shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
1834 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1835 + hdev->dv_buf_size;
1837 shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1838 shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1839 HCLGE_BUF_SIZE_UNIT);
1841 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1842 if (rx_all < rx_priv + shared_std)
1845 shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1846 buf_alloc->s_buf.buf_size = shared_buf;
1847 if (hnae3_dev_dcb_supported(hdev)) {
1848 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1849 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1850 - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
1851 HCLGE_BUF_SIZE_UNIT);
1853 buf_alloc->s_buf.self.high = aligned_mps +
1854 HCLGE_NON_DCB_ADDITIONAL_BUF;
1855 buf_alloc->s_buf.self.low = aligned_mps;
1858 if (hnae3_dev_dcb_supported(hdev)) {
1859 hi_thrd = shared_buf - hdev->dv_buf_size;
1861 if (tc_num <= NEED_RESERVE_TC_NUM)
1862 hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
1866 hi_thrd = hi_thrd / tc_num;
1868 hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
1869 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1870 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
1872 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1873 lo_thrd = aligned_mps;
1876 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1877 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1878 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
1884 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1885 struct hclge_pkt_buf_alloc *buf_alloc)
1889 total_size = hdev->pkt_buf_size;
1891 /* alloc tx buffer for all enabled tc */
1892 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1893 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1895 if (hdev->hw_tc_map & BIT(i)) {
1896 if (total_size < hdev->tx_buf_size)
1899 priv->tx_buf_size = hdev->tx_buf_size;
1901 priv->tx_buf_size = 0;
1904 total_size -= priv->tx_buf_size;
1910 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
1911 struct hclge_pkt_buf_alloc *buf_alloc)
1913 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1914 u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1917 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1918 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1925 if (!(hdev->hw_tc_map & BIT(i)))
1930 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1931 priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
1932 priv->wl.high = roundup(priv->wl.low + aligned_mps,
1933 HCLGE_BUF_SIZE_UNIT);
1936 priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
1940 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1943 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1946 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
1947 struct hclge_pkt_buf_alloc *buf_alloc)
1949 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1950 int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1953 /* let the last to be cleared first */
1954 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1955 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1956 unsigned int mask = BIT((unsigned int)i);
1958 if (hdev->hw_tc_map & mask &&
1959 !(hdev->tm_info.hw_pfc_map & mask)) {
1960 /* Clear the no pfc TC private buffer */
1968 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1969 no_pfc_priv_num == 0)
1973 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1976 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
1977 struct hclge_pkt_buf_alloc *buf_alloc)
1979 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1980 int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1983 /* let the last to be cleared first */
1984 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1985 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1986 unsigned int mask = BIT((unsigned int)i);
1988 if (hdev->hw_tc_map & mask &&
1989 hdev->tm_info.hw_pfc_map & mask) {
1990 /* Reduce the number of pfc TC with private buffer */
1998 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2003 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2006 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2007 struct hclge_pkt_buf_alloc *buf_alloc)
2009 #define COMPENSATE_BUFFER 0x3C00
2010 #define COMPENSATE_HALF_MPS_NUM 5
2011 #define PRIV_WL_GAP 0x1800
2013 u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2014 u32 tc_num = hclge_get_tc_num(hdev);
2015 u32 half_mps = hdev->mps >> 1;
2020 rx_priv = rx_priv / tc_num;
2022 if (tc_num <= NEED_RESERVE_TC_NUM)
2023 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2025 min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2026 COMPENSATE_HALF_MPS_NUM * half_mps;
2027 min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2028 rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2030 if (rx_priv < min_rx_priv)
2033 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2034 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2041 if (!(hdev->hw_tc_map & BIT(i)))
2045 priv->buf_size = rx_priv;
2046 priv->wl.high = rx_priv - hdev->dv_buf_size;
2047 priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2050 buf_alloc->s_buf.buf_size = 0;
2055 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2056 * @hdev: pointer to struct hclge_dev
2057 * @buf_alloc: pointer to buffer calculation data
2058 * @return: 0: calculate sucessful, negative: fail
2060 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2061 struct hclge_pkt_buf_alloc *buf_alloc)
2063 /* When DCB is not supported, rx private buffer is not allocated. */
2064 if (!hnae3_dev_dcb_supported(hdev)) {
2065 u32 rx_all = hdev->pkt_buf_size;
2067 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2068 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2074 if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2077 if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2080 /* try to decrease the buffer size */
2081 if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2084 if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2087 if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2093 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2094 struct hclge_pkt_buf_alloc *buf_alloc)
2096 struct hclge_rx_priv_buff_cmd *req;
2097 struct hclge_desc desc;
2101 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2102 req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2104 /* Alloc private buffer TCs */
2105 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2106 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2109 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2111 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2115 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2116 (1 << HCLGE_TC0_PRI_BUF_EN_B));
2118 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2120 dev_err(&hdev->pdev->dev,
2121 "rx private buffer alloc cmd failed %d\n", ret);
2126 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2127 struct hclge_pkt_buf_alloc *buf_alloc)
2129 struct hclge_rx_priv_wl_buf *req;
2130 struct hclge_priv_buf *priv;
2131 struct hclge_desc desc[2];
2135 for (i = 0; i < 2; i++) {
2136 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2138 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2140 /* The first descriptor set the NEXT bit to 1 */
2142 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2144 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2146 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2147 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2149 priv = &buf_alloc->priv_buf[idx];
2150 req->tc_wl[j].high =
2151 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2152 req->tc_wl[j].high |=
2153 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2155 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2156 req->tc_wl[j].low |=
2157 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2161 /* Send 2 descriptor at one time */
2162 ret = hclge_cmd_send(&hdev->hw, desc, 2);
2164 dev_err(&hdev->pdev->dev,
2165 "rx private waterline config cmd failed %d\n",
2170 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2171 struct hclge_pkt_buf_alloc *buf_alloc)
2173 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2174 struct hclge_rx_com_thrd *req;
2175 struct hclge_desc desc[2];
2176 struct hclge_tc_thrd *tc;
2180 for (i = 0; i < 2; i++) {
2181 hclge_cmd_setup_basic_desc(&desc[i],
2182 HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2183 req = (struct hclge_rx_com_thrd *)&desc[i].data;
2185 /* The first descriptor set the NEXT bit to 1 */
2187 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2189 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2191 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2192 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2194 req->com_thrd[j].high =
2195 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2196 req->com_thrd[j].high |=
2197 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2198 req->com_thrd[j].low =
2199 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2200 req->com_thrd[j].low |=
2201 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2205 /* Send 2 descriptors at one time */
2206 ret = hclge_cmd_send(&hdev->hw, desc, 2);
2208 dev_err(&hdev->pdev->dev,
2209 "common threshold config cmd failed %d\n", ret);
2213 static int hclge_common_wl_config(struct hclge_dev *hdev,
2214 struct hclge_pkt_buf_alloc *buf_alloc)
2216 struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2217 struct hclge_rx_com_wl *req;
2218 struct hclge_desc desc;
2221 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2223 req = (struct hclge_rx_com_wl *)desc.data;
2224 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2225 req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2227 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2228 req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2230 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2232 dev_err(&hdev->pdev->dev,
2233 "common waterline config cmd failed %d\n", ret);
2238 int hclge_buffer_alloc(struct hclge_dev *hdev)
2240 struct hclge_pkt_buf_alloc *pkt_buf;
2243 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2247 ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2249 dev_err(&hdev->pdev->dev,
2250 "could not calc tx buffer size for all TCs %d\n", ret);
2254 ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2256 dev_err(&hdev->pdev->dev,
2257 "could not alloc tx buffers %d\n", ret);
2261 ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2263 dev_err(&hdev->pdev->dev,
2264 "could not calc rx priv buffer size for all TCs %d\n",
2269 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2271 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2276 if (hnae3_dev_dcb_supported(hdev)) {
2277 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2279 dev_err(&hdev->pdev->dev,
2280 "could not configure rx private waterline %d\n",
2285 ret = hclge_common_thrd_config(hdev, pkt_buf);
2287 dev_err(&hdev->pdev->dev,
2288 "could not configure common threshold %d\n",
2294 ret = hclge_common_wl_config(hdev, pkt_buf);
2296 dev_err(&hdev->pdev->dev,
2297 "could not configure common waterline %d\n", ret);
2304 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2306 struct hnae3_handle *roce = &vport->roce;
2307 struct hnae3_handle *nic = &vport->nic;
2309 roce->rinfo.num_vectors = vport->back->num_roce_msi;
2311 if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
2312 vport->back->num_msi_left == 0)
2315 roce->rinfo.base_vector = vport->back->roce_base_vector;
2317 roce->rinfo.netdev = nic->kinfo.netdev;
2318 roce->rinfo.roce_io_base = vport->back->hw.io_base;
2320 roce->pdev = nic->pdev;
2321 roce->ae_algo = nic->ae_algo;
2322 roce->numa_node_mask = nic->numa_node_mask;
2327 static int hclge_init_msi(struct hclge_dev *hdev)
2329 struct pci_dev *pdev = hdev->pdev;
2333 vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2335 PCI_IRQ_MSI | PCI_IRQ_MSIX);
2338 "failed(%d) to allocate MSI/MSI-X vectors\n",
2342 if (vectors < hdev->num_msi)
2343 dev_warn(&hdev->pdev->dev,
2344 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2345 hdev->num_msi, vectors);
2347 hdev->num_msi = vectors;
2348 hdev->num_msi_left = vectors;
2350 hdev->base_msi_vector = pdev->irq;
2351 hdev->roce_base_vector = hdev->base_msi_vector +
2352 hdev->roce_base_msix_offset;
2354 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2355 sizeof(u16), GFP_KERNEL);
2356 if (!hdev->vector_status) {
2357 pci_free_irq_vectors(pdev);
2361 for (i = 0; i < hdev->num_msi; i++)
2362 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2364 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2365 sizeof(int), GFP_KERNEL);
2366 if (!hdev->vector_irq) {
2367 pci_free_irq_vectors(pdev);
2374 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2376 if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2377 duplex = HCLGE_MAC_FULL;
2382 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2385 struct hclge_config_mac_speed_dup_cmd *req;
2386 struct hclge_desc desc;
2389 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2391 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2394 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2397 case HCLGE_MAC_SPEED_10M:
2398 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2399 HCLGE_CFG_SPEED_S, 6);
2401 case HCLGE_MAC_SPEED_100M:
2402 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2403 HCLGE_CFG_SPEED_S, 7);
2405 case HCLGE_MAC_SPEED_1G:
2406 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2407 HCLGE_CFG_SPEED_S, 0);
2409 case HCLGE_MAC_SPEED_10G:
2410 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2411 HCLGE_CFG_SPEED_S, 1);
2413 case HCLGE_MAC_SPEED_25G:
2414 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2415 HCLGE_CFG_SPEED_S, 2);
2417 case HCLGE_MAC_SPEED_40G:
2418 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2419 HCLGE_CFG_SPEED_S, 3);
2421 case HCLGE_MAC_SPEED_50G:
2422 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2423 HCLGE_CFG_SPEED_S, 4);
2425 case HCLGE_MAC_SPEED_100G:
2426 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2427 HCLGE_CFG_SPEED_S, 5);
2430 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2434 hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2437 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2439 dev_err(&hdev->pdev->dev,
2440 "mac speed/duplex config cmd failed %d.\n", ret);
2447 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2449 struct hclge_mac *mac = &hdev->hw.mac;
2452 duplex = hclge_check_speed_dup(duplex, speed);
2453 if (!mac->support_autoneg && mac->speed == speed &&
2454 mac->duplex == duplex)
2457 ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2461 hdev->hw.mac.speed = speed;
2462 hdev->hw.mac.duplex = duplex;
2467 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2470 struct hclge_vport *vport = hclge_get_vport(handle);
2471 struct hclge_dev *hdev = vport->back;
2473 return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2476 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2478 struct hclge_config_auto_neg_cmd *req;
2479 struct hclge_desc desc;
2483 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2485 req = (struct hclge_config_auto_neg_cmd *)desc.data;
2487 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2488 req->cfg_an_cmd_flag = cpu_to_le32(flag);
2490 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2492 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2498 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2500 struct hclge_vport *vport = hclge_get_vport(handle);
2501 struct hclge_dev *hdev = vport->back;
2503 if (!hdev->hw.mac.support_autoneg) {
2505 dev_err(&hdev->pdev->dev,
2506 "autoneg is not supported by current port\n");
2513 return hclge_set_autoneg_en(hdev, enable);
2516 static int hclge_get_autoneg(struct hnae3_handle *handle)
2518 struct hclge_vport *vport = hclge_get_vport(handle);
2519 struct hclge_dev *hdev = vport->back;
2520 struct phy_device *phydev = hdev->hw.mac.phydev;
2523 return phydev->autoneg;
2525 return hdev->hw.mac.autoneg;
2528 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2530 struct hclge_vport *vport = hclge_get_vport(handle);
2531 struct hclge_dev *hdev = vport->back;
2534 dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2536 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2539 return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2542 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2544 struct hclge_vport *vport = hclge_get_vport(handle);
2545 struct hclge_dev *hdev = vport->back;
2547 if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2548 return hclge_set_autoneg_en(hdev, !halt);
2553 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2555 struct hclge_config_fec_cmd *req;
2556 struct hclge_desc desc;
2559 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2561 req = (struct hclge_config_fec_cmd *)desc.data;
2562 if (fec_mode & BIT(HNAE3_FEC_AUTO))
2563 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2564 if (fec_mode & BIT(HNAE3_FEC_RS))
2565 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2566 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2567 if (fec_mode & BIT(HNAE3_FEC_BASER))
2568 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2569 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2571 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2573 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2578 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2580 struct hclge_vport *vport = hclge_get_vport(handle);
2581 struct hclge_dev *hdev = vport->back;
2582 struct hclge_mac *mac = &hdev->hw.mac;
2585 if (fec_mode && !(mac->fec_ability & fec_mode)) {
2586 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2590 ret = hclge_set_fec_hw(hdev, fec_mode);
2594 mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2598 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2601 struct hclge_vport *vport = hclge_get_vport(handle);
2602 struct hclge_dev *hdev = vport->back;
2603 struct hclge_mac *mac = &hdev->hw.mac;
2606 *fec_ability = mac->fec_ability;
2608 *fec_mode = mac->fec_mode;
2611 static int hclge_mac_init(struct hclge_dev *hdev)
2613 struct hclge_mac *mac = &hdev->hw.mac;
2616 hdev->support_sfp_query = true;
2617 hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2618 ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2619 hdev->hw.mac.duplex);
2623 if (hdev->hw.mac.support_autoneg) {
2624 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2631 if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2632 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2637 ret = hclge_set_mac_mtu(hdev, hdev->mps);
2639 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2643 ret = hclge_set_default_loopback(hdev);
2647 ret = hclge_buffer_alloc(hdev);
2649 dev_err(&hdev->pdev->dev,
2650 "allocate buffer fail, ret=%d\n", ret);
2655 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2657 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2658 !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2659 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2660 hclge_wq, &hdev->service_task, 0);
2663 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2665 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2666 !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2667 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2668 hclge_wq, &hdev->service_task, 0);
2671 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2673 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2674 !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
2675 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2676 hclge_wq, &hdev->service_task,
2680 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2682 struct hclge_link_status_cmd *req;
2683 struct hclge_desc desc;
2687 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2688 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2690 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2695 req = (struct hclge_link_status_cmd *)desc.data;
2696 link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2698 return !!link_status;
2701 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2703 unsigned int mac_state;
2706 if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2709 mac_state = hclge_get_mac_link_status(hdev);
2711 if (hdev->hw.mac.phydev) {
2712 if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2713 link_stat = mac_state &
2714 hdev->hw.mac.phydev->link;
2719 link_stat = mac_state;
2725 static void hclge_update_link_status(struct hclge_dev *hdev)
2727 struct hnae3_client *rclient = hdev->roce_client;
2728 struct hnae3_client *client = hdev->nic_client;
2729 struct hnae3_handle *rhandle;
2730 struct hnae3_handle *handle;
2737 if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
2740 state = hclge_get_mac_phy_link(hdev);
2741 if (state != hdev->hw.mac.link) {
2742 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2743 handle = &hdev->vport[i].nic;
2744 client->ops->link_status_change(handle, state);
2745 hclge_config_mac_tnl_int(hdev, state);
2746 rhandle = &hdev->vport[i].roce;
2747 if (rclient && rclient->ops->link_status_change)
2748 rclient->ops->link_status_change(rhandle,
2751 hdev->hw.mac.link = state;
2754 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2757 static void hclge_update_port_capability(struct hclge_mac *mac)
2759 /* update fec ability by speed */
2760 hclge_convert_setting_fec(mac);
2762 /* firmware can not identify back plane type, the media type
2763 * read from configuration can help deal it
2765 if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2766 mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2767 mac->module_type = HNAE3_MODULE_TYPE_KR;
2768 else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2769 mac->module_type = HNAE3_MODULE_TYPE_TP;
2771 if (mac->support_autoneg) {
2772 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2773 linkmode_copy(mac->advertising, mac->supported);
2775 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2777 linkmode_zero(mac->advertising);
2781 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2783 struct hclge_sfp_info_cmd *resp;
2784 struct hclge_desc desc;
2787 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2788 resp = (struct hclge_sfp_info_cmd *)desc.data;
2789 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2790 if (ret == -EOPNOTSUPP) {
2791 dev_warn(&hdev->pdev->dev,
2792 "IMP do not support get SFP speed %d\n", ret);
2795 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2799 *speed = le32_to_cpu(resp->speed);
2804 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2806 struct hclge_sfp_info_cmd *resp;
2807 struct hclge_desc desc;
2810 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2811 resp = (struct hclge_sfp_info_cmd *)desc.data;
2813 resp->query_type = QUERY_ACTIVE_SPEED;
2815 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2816 if (ret == -EOPNOTSUPP) {
2817 dev_warn(&hdev->pdev->dev,
2818 "IMP does not support get SFP info %d\n", ret);
2821 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2825 /* In some case, mac speed get from IMP may be 0, it shouldn't be
2826 * set to mac->speed.
2828 if (!le32_to_cpu(resp->speed))
2831 mac->speed = le32_to_cpu(resp->speed);
2832 /* if resp->speed_ability is 0, it means it's an old version
2833 * firmware, do not update these params
2835 if (resp->speed_ability) {
2836 mac->module_type = le32_to_cpu(resp->module_type);
2837 mac->speed_ability = le32_to_cpu(resp->speed_ability);
2838 mac->autoneg = resp->autoneg;
2839 mac->support_autoneg = resp->autoneg_ability;
2840 mac->speed_type = QUERY_ACTIVE_SPEED;
2841 if (!resp->active_fec)
2844 mac->fec_mode = BIT(resp->active_fec);
2846 mac->speed_type = QUERY_SFP_SPEED;
2852 static int hclge_update_port_info(struct hclge_dev *hdev)
2854 struct hclge_mac *mac = &hdev->hw.mac;
2855 int speed = HCLGE_MAC_SPEED_UNKNOWN;
2858 /* get the port info from SFP cmd if not copper port */
2859 if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2862 /* if IMP does not support get SFP/qSFP info, return directly */
2863 if (!hdev->support_sfp_query)
2866 if (hdev->pdev->revision >= 0x21)
2867 ret = hclge_get_sfp_info(hdev, mac);
2869 ret = hclge_get_sfp_speed(hdev, &speed);
2871 if (ret == -EOPNOTSUPP) {
2872 hdev->support_sfp_query = false;
2878 if (hdev->pdev->revision >= 0x21) {
2879 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
2880 hclge_update_port_capability(mac);
2883 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
2886 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2887 return 0; /* do nothing if no SFP */
2889 /* must config full duplex for SFP */
2890 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2894 static int hclge_get_status(struct hnae3_handle *handle)
2896 struct hclge_vport *vport = hclge_get_vport(handle);
2897 struct hclge_dev *hdev = vport->back;
2899 hclge_update_link_status(hdev);
2901 return hdev->hw.mac.link;
2904 static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
2906 if (!pci_num_vf(hdev->pdev)) {
2907 dev_err(&hdev->pdev->dev,
2908 "SRIOV is disabled, can not get vport(%d) info.\n", vf);
2912 if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
2913 dev_err(&hdev->pdev->dev,
2914 "vf id(%d) is out of range(0 <= vfid < %d)\n",
2915 vf, pci_num_vf(hdev->pdev));
2919 /* VF start from 1 in vport */
2920 vf += HCLGE_VF_VPORT_START_NUM;
2921 return &hdev->vport[vf];
2924 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
2925 struct ifla_vf_info *ivf)
2927 struct hclge_vport *vport = hclge_get_vport(handle);
2928 struct hclge_dev *hdev = vport->back;
2930 vport = hclge_get_vf_vport(hdev, vf);
2935 ivf->linkstate = vport->vf_info.link_state;
2936 ivf->spoofchk = vport->vf_info.spoofchk;
2937 ivf->trusted = vport->vf_info.trusted;
2938 ivf->min_tx_rate = 0;
2939 ivf->max_tx_rate = vport->vf_info.max_tx_rate;
2940 ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
2941 ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
2942 ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
2943 ether_addr_copy(ivf->mac, vport->vf_info.mac);
2948 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
2951 struct hclge_vport *vport = hclge_get_vport(handle);
2952 struct hclge_dev *hdev = vport->back;
2954 vport = hclge_get_vf_vport(hdev, vf);
2958 vport->vf_info.link_state = link_state;
2963 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2965 u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
2967 /* fetch the events from their corresponding regs */
2968 rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2969 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2970 msix_src_reg = hclge_read_dev(&hdev->hw,
2971 HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
2973 /* Assumption: If by any chance reset and mailbox events are reported
2974 * together then we will only process reset event in this go and will
2975 * defer the processing of the mailbox events. Since, we would have not
2976 * cleared RX CMDQ event this time we would receive again another
2977 * interrupt from H/W just for the mailbox.
2979 * check for vector0 reset event sources
2981 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2982 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2983 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2984 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2985 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2986 hdev->rst_stats.imp_rst_cnt++;
2987 return HCLGE_VECTOR0_EVENT_RST;
2990 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2991 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2992 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2993 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2994 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2995 hdev->rst_stats.global_rst_cnt++;
2996 return HCLGE_VECTOR0_EVENT_RST;
2999 /* check for vector0 msix event source */
3000 if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
3001 *clearval = msix_src_reg;
3002 return HCLGE_VECTOR0_EVENT_ERR;
3005 /* check for vector0 mailbox(=CMDQ RX) event source */
3006 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3007 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3008 *clearval = cmdq_src_reg;
3009 return HCLGE_VECTOR0_EVENT_MBX;
3012 /* print other vector0 event source */
3013 dev_info(&hdev->pdev->dev,
3014 "CMDQ INT status:0x%x, other INT status:0x%x\n",
3015 cmdq_src_reg, msix_src_reg);
3016 *clearval = msix_src_reg;
3018 return HCLGE_VECTOR0_EVENT_OTHER;
3021 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3024 switch (event_type) {
3025 case HCLGE_VECTOR0_EVENT_RST:
3026 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3028 case HCLGE_VECTOR0_EVENT_MBX:
3029 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3036 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3038 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3039 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3040 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3041 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3042 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3045 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3047 writel(enable ? 1 : 0, vector->addr);
3050 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3052 struct hclge_dev *hdev = data;
3056 hclge_enable_vector(&hdev->misc_vector, false);
3057 event_cause = hclge_check_event_cause(hdev, &clearval);
3059 /* vector 0 interrupt is shared with reset and mailbox source events.*/
3060 switch (event_cause) {
3061 case HCLGE_VECTOR0_EVENT_ERR:
3062 /* we do not know what type of reset is required now. This could
3063 * only be decided after we fetch the type of errors which
3064 * caused this event. Therefore, we will do below for now:
3065 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
3066 * have defered type of reset to be used.
3067 * 2. Schedule the reset serivce task.
3068 * 3. When service task receives HNAE3_UNKNOWN_RESET type it
3069 * will fetch the correct type of reset. This would be done
3070 * by first decoding the types of errors.
3072 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
3074 case HCLGE_VECTOR0_EVENT_RST:
3075 hclge_reset_task_schedule(hdev);
3077 case HCLGE_VECTOR0_EVENT_MBX:
3078 /* If we are here then,
3079 * 1. Either we are not handling any mbx task and we are not
3082 * 2. We could be handling a mbx task but nothing more is
3084 * In both cases, we should schedule mbx task as there are more
3085 * mbx messages reported by this interrupt.
3087 hclge_mbx_task_schedule(hdev);
3090 dev_warn(&hdev->pdev->dev,
3091 "received unknown or unhandled event of vector0\n");
3095 hclge_clear_event_cause(hdev, event_cause, clearval);
3097 /* Enable interrupt if it is not cause by reset. And when
3098 * clearval equal to 0, it means interrupt status may be
3099 * cleared by hardware before driver reads status register.
3100 * For this case, vector0 interrupt also should be enabled.
3103 event_cause == HCLGE_VECTOR0_EVENT_MBX) {
3104 hclge_enable_vector(&hdev->misc_vector, true);
3110 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3112 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3113 dev_warn(&hdev->pdev->dev,
3114 "vector(vector_id %d) has been freed.\n", vector_id);
3118 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3119 hdev->num_msi_left += 1;
3120 hdev->num_msi_used -= 1;
3123 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3125 struct hclge_misc_vector *vector = &hdev->misc_vector;
3127 vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3129 vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3130 hdev->vector_status[0] = 0;
3132 hdev->num_msi_left -= 1;
3133 hdev->num_msi_used += 1;
3136 static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3137 const cpumask_t *mask)
3139 struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3142 cpumask_copy(&hdev->affinity_mask, mask);
3145 static void hclge_irq_affinity_release(struct kref *ref)
3149 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3151 irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3152 &hdev->affinity_mask);
3154 hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3155 hdev->affinity_notify.release = hclge_irq_affinity_release;
3156 irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3157 &hdev->affinity_notify);
3160 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3162 irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3163 irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3166 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3170 hclge_get_misc_vector(hdev);
3172 /* this would be explicitly freed in the end */
3173 snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
3174 HCLGE_NAME, pci_name(hdev->pdev));
3175 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3176 0, hdev->misc_vector.name, hdev);
3178 hclge_free_vector(hdev, 0);
3179 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3180 hdev->misc_vector.vector_irq);
3186 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3188 free_irq(hdev->misc_vector.vector_irq, hdev);
3189 hclge_free_vector(hdev, 0);
3192 int hclge_notify_client(struct hclge_dev *hdev,
3193 enum hnae3_reset_notify_type type)
3195 struct hnae3_client *client = hdev->nic_client;
3198 if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3201 if (!client->ops->reset_notify)
3204 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3205 struct hnae3_handle *handle = &hdev->vport[i].nic;
3208 ret = client->ops->reset_notify(handle, type);
3210 dev_err(&hdev->pdev->dev,
3211 "notify nic client failed %d(%d)\n", type, ret);
3219 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3220 enum hnae3_reset_notify_type type)
3222 struct hnae3_client *client = hdev->roce_client;
3226 if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3229 if (!client->ops->reset_notify)
3232 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3233 struct hnae3_handle *handle = &hdev->vport[i].roce;
3235 ret = client->ops->reset_notify(handle, type);
3237 dev_err(&hdev->pdev->dev,
3238 "notify roce client failed %d(%d)",
3247 static int hclge_reset_wait(struct hclge_dev *hdev)
3249 #define HCLGE_RESET_WATI_MS 100
3250 #define HCLGE_RESET_WAIT_CNT 350
3252 u32 val, reg, reg_bit;
3255 switch (hdev->reset_type) {
3256 case HNAE3_IMP_RESET:
3257 reg = HCLGE_GLOBAL_RESET_REG;
3258 reg_bit = HCLGE_IMP_RESET_BIT;
3260 case HNAE3_GLOBAL_RESET:
3261 reg = HCLGE_GLOBAL_RESET_REG;
3262 reg_bit = HCLGE_GLOBAL_RESET_BIT;
3264 case HNAE3_FUNC_RESET:
3265 reg = HCLGE_FUN_RST_ING;
3266 reg_bit = HCLGE_FUN_RST_ING_B;
3269 dev_err(&hdev->pdev->dev,
3270 "Wait for unsupported reset type: %d\n",
3275 val = hclge_read_dev(&hdev->hw, reg);
3276 while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3277 msleep(HCLGE_RESET_WATI_MS);
3278 val = hclge_read_dev(&hdev->hw, reg);
3282 if (cnt >= HCLGE_RESET_WAIT_CNT) {
3283 dev_warn(&hdev->pdev->dev,
3284 "Wait for reset timeout: %d\n", hdev->reset_type);
3291 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3293 struct hclge_vf_rst_cmd *req;
3294 struct hclge_desc desc;
3296 req = (struct hclge_vf_rst_cmd *)desc.data;
3297 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3298 req->dest_vfid = func_id;
3303 return hclge_cmd_send(&hdev->hw, &desc, 1);
3306 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3310 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
3311 struct hclge_vport *vport = &hdev->vport[i];
3314 /* Send cmd to set/clear VF's FUNC_RST_ING */
3315 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3317 dev_err(&hdev->pdev->dev,
3318 "set vf(%u) rst failed %d!\n",
3319 vport->vport_id, ret);
3323 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3326 /* Inform VF to process the reset.
3327 * hclge_inform_reset_assert_to_vf may fail if VF
3328 * driver is not loaded.
3330 ret = hclge_inform_reset_assert_to_vf(vport);
3332 dev_warn(&hdev->pdev->dev,
3333 "inform reset to vf(%u) failed %d!\n",
3334 vport->vport_id, ret);
3340 static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3342 if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3343 test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
3344 test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3347 hclge_mbx_handler(hdev);
3349 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3352 static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3354 struct hclge_pf_rst_sync_cmd *req;
3355 struct hclge_desc desc;
3359 req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3360 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3363 /* vf need to down netdev by mbx during PF or FLR reset */
3364 hclge_mailbox_service_task(hdev);
3366 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3367 /* for compatible with old firmware, wait
3368 * 100 ms for VF to stop IO
3370 if (ret == -EOPNOTSUPP) {
3371 msleep(HCLGE_RESET_SYNC_TIME);
3374 dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
3377 } else if (req->all_vf_ready) {
3380 msleep(HCLGE_PF_RESET_SYNC_TIME);
3381 hclge_cmd_reuse_desc(&desc, true);
3382 } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3384 dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
3387 void hclge_report_hw_error(struct hclge_dev *hdev,
3388 enum hnae3_hw_error_type type)
3390 struct hnae3_client *client = hdev->nic_client;
3393 if (!client || !client->ops->process_hw_error ||
3394 !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3397 for (i = 0; i < hdev->num_vmdq_vport + 1; i++)
3398 client->ops->process_hw_error(&hdev->vport[i].nic, type);
3401 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3405 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3406 if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3407 hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3408 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3409 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3412 if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3413 hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3414 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3415 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3419 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3421 struct hclge_desc desc;
3422 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3425 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3426 hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3427 req->fun_reset_vfid = func_id;
3429 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3431 dev_err(&hdev->pdev->dev,
3432 "send function reset cmd fail, status =%d\n", ret);
3437 static void hclge_do_reset(struct hclge_dev *hdev)
3439 struct hnae3_handle *handle = &hdev->vport[0].nic;
3440 struct pci_dev *pdev = hdev->pdev;
3443 if (hclge_get_hw_reset_stat(handle)) {
3444 dev_info(&pdev->dev, "Hardware reset not finish\n");
3445 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3446 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3447 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3451 switch (hdev->reset_type) {
3452 case HNAE3_GLOBAL_RESET:
3453 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3454 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3455 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3456 dev_info(&pdev->dev, "Global Reset requested\n");
3458 case HNAE3_FUNC_RESET:
3459 dev_info(&pdev->dev, "PF Reset requested\n");
3460 /* schedule again to check later */
3461 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3462 hclge_reset_task_schedule(hdev);
3465 dev_warn(&pdev->dev,
3466 "Unsupported reset type: %d\n", hdev->reset_type);
3471 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3472 unsigned long *addr)
3474 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3475 struct hclge_dev *hdev = ae_dev->priv;
3477 /* first, resolve any unknown reset type to the known type(s) */
3478 if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3479 u32 msix_sts_reg = hclge_read_dev(&hdev->hw,
3480 HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
3481 /* we will intentionally ignore any errors from this function
3482 * as we will end up in *some* reset request in any case
3484 if (hclge_handle_hw_msix_error(hdev, addr))
3485 dev_info(&hdev->pdev->dev, "received msix interrupt 0x%x\n",
3488 clear_bit(HNAE3_UNKNOWN_RESET, addr);
3489 /* We defered the clearing of the error event which caused
3490 * interrupt since it was not posssible to do that in
3491 * interrupt context (and this is the reason we introduced
3492 * new UNKNOWN reset type). Now, the errors have been
3493 * handled and cleared in hardware we can safely enable
3494 * interrupts. This is an exception to the norm.
3496 hclge_enable_vector(&hdev->misc_vector, true);
3499 /* return the highest priority reset level amongst all */
3500 if (test_bit(HNAE3_IMP_RESET, addr)) {
3501 rst_level = HNAE3_IMP_RESET;
3502 clear_bit(HNAE3_IMP_RESET, addr);
3503 clear_bit(HNAE3_GLOBAL_RESET, addr);
3504 clear_bit(HNAE3_FUNC_RESET, addr);
3505 } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3506 rst_level = HNAE3_GLOBAL_RESET;
3507 clear_bit(HNAE3_GLOBAL_RESET, addr);
3508 clear_bit(HNAE3_FUNC_RESET, addr);
3509 } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3510 rst_level = HNAE3_FUNC_RESET;
3511 clear_bit(HNAE3_FUNC_RESET, addr);
3512 } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3513 rst_level = HNAE3_FLR_RESET;
3514 clear_bit(HNAE3_FLR_RESET, addr);
3517 if (hdev->reset_type != HNAE3_NONE_RESET &&
3518 rst_level < hdev->reset_type)
3519 return HNAE3_NONE_RESET;
3524 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3528 switch (hdev->reset_type) {
3529 case HNAE3_IMP_RESET:
3530 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3532 case HNAE3_GLOBAL_RESET:
3533 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3542 /* For revision 0x20, the reset interrupt source
3543 * can only be cleared after hardware reset done
3545 if (hdev->pdev->revision == 0x20)
3546 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3549 hclge_enable_vector(&hdev->misc_vector, true);
3552 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3556 reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3558 reg_val |= HCLGE_NIC_SW_RST_RDY;
3560 reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3562 hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3565 static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
3569 ret = hclge_set_all_vf_rst(hdev, true);
3573 hclge_func_reset_sync_vf(hdev);
3578 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3583 switch (hdev->reset_type) {
3584 case HNAE3_FUNC_RESET:
3585 ret = hclge_func_reset_notify_vf(hdev);
3589 ret = hclge_func_reset_cmd(hdev, 0);
3591 dev_err(&hdev->pdev->dev,
3592 "asserting function reset fail %d!\n", ret);
3596 /* After performaning pf reset, it is not necessary to do the
3597 * mailbox handling or send any command to firmware, because
3598 * any mailbox handling or command to firmware is only valid
3599 * after hclge_cmd_init is called.
3601 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3602 hdev->rst_stats.pf_rst_cnt++;
3604 case HNAE3_FLR_RESET:
3605 ret = hclge_func_reset_notify_vf(hdev);
3609 case HNAE3_IMP_RESET:
3610 hclge_handle_imp_error(hdev);
3611 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3612 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3613 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3619 /* inform hardware that preparatory work is done */
3620 msleep(HCLGE_RESET_SYNC_TIME);
3621 hclge_reset_handshake(hdev, true);
3622 dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3627 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3629 #define MAX_RESET_FAIL_CNT 5
3631 if (hdev->reset_pending) {
3632 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3633 hdev->reset_pending);
3635 } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3636 HCLGE_RESET_INT_M) {
3637 dev_info(&hdev->pdev->dev,
3638 "reset failed because new reset interrupt\n");
3639 hclge_clear_reset_cause(hdev);
3641 } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3642 hdev->rst_stats.reset_fail_cnt++;
3643 set_bit(hdev->reset_type, &hdev->reset_pending);
3644 dev_info(&hdev->pdev->dev,
3645 "re-schedule reset task(%u)\n",
3646 hdev->rst_stats.reset_fail_cnt);
3650 hclge_clear_reset_cause(hdev);
3652 /* recover the handshake status when reset fail */
3653 hclge_reset_handshake(hdev, true);
3655 dev_err(&hdev->pdev->dev, "Reset fail!\n");
3657 hclge_dbg_dump_rst_info(hdev);
3659 set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3664 static int hclge_set_rst_done(struct hclge_dev *hdev)
3666 struct hclge_pf_rst_done_cmd *req;
3667 struct hclge_desc desc;
3670 req = (struct hclge_pf_rst_done_cmd *)desc.data;
3671 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
3672 req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
3674 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3675 /* To be compatible with the old firmware, which does not support
3676 * command HCLGE_OPC_PF_RST_DONE, just print a warning and
3679 if (ret == -EOPNOTSUPP) {
3680 dev_warn(&hdev->pdev->dev,
3681 "current firmware does not support command(0x%x)!\n",
3682 HCLGE_OPC_PF_RST_DONE);
3685 dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
3692 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3696 switch (hdev->reset_type) {
3697 case HNAE3_FUNC_RESET:
3699 case HNAE3_FLR_RESET:
3700 ret = hclge_set_all_vf_rst(hdev, false);
3702 case HNAE3_GLOBAL_RESET:
3704 case HNAE3_IMP_RESET:
3705 ret = hclge_set_rst_done(hdev);
3711 /* clear up the handshake status after re-initialize done */
3712 hclge_reset_handshake(hdev, false);
3717 static int hclge_reset_stack(struct hclge_dev *hdev)
3721 ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3725 ret = hclge_reset_ae_dev(hdev->ae_dev);
3729 ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3733 return hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
3736 static int hclge_reset_prepare(struct hclge_dev *hdev)
3738 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3741 /* Initialize ae_dev reset status as well, in case enet layer wants to
3742 * know if device is undergoing reset
3744 ae_dev->reset_type = hdev->reset_type;
3745 hdev->rst_stats.reset_cnt++;
3746 /* perform reset of the stack & ae device for a client */
3747 ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3752 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3757 return hclge_reset_prepare_wait(hdev);
3760 static int hclge_reset_rebuild(struct hclge_dev *hdev)
3762 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3763 enum hnae3_reset_type reset_level;
3766 hdev->rst_stats.hw_reset_done_cnt++;
3768 ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3773 ret = hclge_reset_stack(hdev);
3778 hclge_clear_reset_cause(hdev);
3780 ret = hclge_reset_prepare_up(hdev);
3785 ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3786 /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
3790 hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
3794 ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3799 ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3803 hdev->last_reset_time = jiffies;
3804 hdev->rst_stats.reset_fail_cnt = 0;
3805 hdev->rst_stats.reset_done_cnt++;
3806 ae_dev->reset_type = HNAE3_NONE_RESET;
3807 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3809 /* if default_reset_request has a higher level reset request,
3810 * it should be handled as soon as possible. since some errors
3811 * need this kind of reset to fix.
3813 reset_level = hclge_get_reset_level(ae_dev,
3814 &hdev->default_reset_request);
3815 if (reset_level != HNAE3_NONE_RESET)
3816 set_bit(reset_level, &hdev->reset_request);
3821 static void hclge_reset(struct hclge_dev *hdev)
3823 if (hclge_reset_prepare(hdev))
3826 if (hclge_reset_wait(hdev))
3829 if (hclge_reset_rebuild(hdev))
3835 if (hclge_reset_err_handle(hdev))
3836 hclge_reset_task_schedule(hdev);
3839 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3841 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3842 struct hclge_dev *hdev = ae_dev->priv;
3844 /* We might end up getting called broadly because of 2 below cases:
3845 * 1. Recoverable error was conveyed through APEI and only way to bring
3846 * normalcy is to reset.
3847 * 2. A new reset request from the stack due to timeout
3849 * For the first case,error event might not have ae handle available.
3850 * check if this is a new reset request and we are not here just because
3851 * last reset attempt did not succeed and watchdog hit us again. We will
3852 * know this if last reset request did not occur very recently (watchdog
3853 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3854 * In case of new request we reset the "reset level" to PF reset.
3855 * And if it is a repeat reset request of the most recent one then we
3856 * want to make sure we throttle the reset request. Therefore, we will
3857 * not allow it again before 3*HZ times.
3860 handle = &hdev->vport[0].nic;
3862 if (time_before(jiffies, (hdev->last_reset_time +
3863 HCLGE_RESET_INTERVAL))) {
3864 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
3866 } else if (hdev->default_reset_request) {
3868 hclge_get_reset_level(ae_dev,
3869 &hdev->default_reset_request);
3870 } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
3871 hdev->reset_level = HNAE3_FUNC_RESET;
3874 dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
3877 /* request reset & schedule reset task */
3878 set_bit(hdev->reset_level, &hdev->reset_request);
3879 hclge_reset_task_schedule(hdev);
3881 if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3882 hdev->reset_level++;
3885 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3886 enum hnae3_reset_type rst_type)
3888 struct hclge_dev *hdev = ae_dev->priv;
3890 set_bit(rst_type, &hdev->default_reset_request);
3893 static void hclge_reset_timer(struct timer_list *t)
3895 struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3897 /* if default_reset_request has no value, it means that this reset
3898 * request has already be handled, so just return here
3900 if (!hdev->default_reset_request)
3903 dev_info(&hdev->pdev->dev,
3904 "triggering reset in reset timer\n");
3905 hclge_reset_event(hdev->pdev, NULL);
3908 static void hclge_reset_subtask(struct hclge_dev *hdev)
3910 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3912 /* check if there is any ongoing reset in the hardware. This status can
3913 * be checked from reset_pending. If there is then, we need to wait for
3914 * hardware to complete reset.
3915 * a. If we are able to figure out in reasonable time that hardware
3916 * has fully resetted then, we can proceed with driver, client
3918 * b. else, we can come back later to check this status so re-sched
3921 hdev->last_reset_time = jiffies;
3922 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
3923 if (hdev->reset_type != HNAE3_NONE_RESET)
3926 /* check if we got any *new* reset requests to be honored */
3927 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
3928 if (hdev->reset_type != HNAE3_NONE_RESET)
3929 hclge_do_reset(hdev);
3931 hdev->reset_type = HNAE3_NONE_RESET;
3934 static void hclge_reset_service_task(struct hclge_dev *hdev)
3936 if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
3939 down(&hdev->reset_sem);
3940 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3942 hclge_reset_subtask(hdev);
3944 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3945 up(&hdev->reset_sem);
3948 static void hclge_update_vport_alive(struct hclge_dev *hdev)
3952 /* start from vport 1 for PF is always alive */
3953 for (i = 1; i < hdev->num_alloc_vport; i++) {
3954 struct hclge_vport *vport = &hdev->vport[i];
3956 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3957 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
3959 /* If vf is not alive, set to default value */
3960 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3961 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
3965 static void hclge_periodic_service_task(struct hclge_dev *hdev)
3967 unsigned long delta = round_jiffies_relative(HZ);
3969 /* Always handle the link updating to make sure link state is
3970 * updated when it is triggered by mbx.
3972 hclge_update_link_status(hdev);
3974 if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
3975 delta = jiffies - hdev->last_serv_processed;
3977 if (delta < round_jiffies_relative(HZ)) {
3978 delta = round_jiffies_relative(HZ) - delta;
3983 hdev->serv_processed_cnt++;
3984 hclge_update_vport_alive(hdev);
3986 if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
3987 hdev->last_serv_processed = jiffies;
3991 if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
3992 hclge_update_stats_for_all(hdev);
3994 hclge_update_port_info(hdev);
3995 hclge_sync_vlan_filter(hdev);
3997 if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
3998 hclge_rfs_filter_expire(hdev);
4000 hdev->last_serv_processed = jiffies;
4003 hclge_task_schedule(hdev, delta);
4006 static void hclge_service_task(struct work_struct *work)
4008 struct hclge_dev *hdev =
4009 container_of(work, struct hclge_dev, service_task.work);
4011 hclge_reset_service_task(hdev);
4012 hclge_mailbox_service_task(hdev);
4013 hclge_periodic_service_task(hdev);
4015 /* Handle reset and mbx again in case periodical task delays the
4016 * handling by calling hclge_task_schedule() in
4017 * hclge_periodic_service_task().
4019 hclge_reset_service_task(hdev);
4020 hclge_mailbox_service_task(hdev);
4023 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4025 /* VF handle has no client */
4026 if (!handle->client)
4027 return container_of(handle, struct hclge_vport, nic);
4028 else if (handle->client->type == HNAE3_CLIENT_ROCE)
4029 return container_of(handle, struct hclge_vport, roce);
4031 return container_of(handle, struct hclge_vport, nic);
4034 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4035 struct hnae3_vector_info *vector_info)
4037 struct hclge_vport *vport = hclge_get_vport(handle);
4038 struct hnae3_vector_info *vector = vector_info;
4039 struct hclge_dev *hdev = vport->back;
4043 vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
4044 vector_num = min(hdev->num_msi_left, vector_num);
4046 for (j = 0; j < vector_num; j++) {
4047 for (i = 1; i < hdev->num_msi; i++) {
4048 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4049 vector->vector = pci_irq_vector(hdev->pdev, i);
4050 vector->io_addr = hdev->hw.io_base +
4051 HCLGE_VECTOR_REG_BASE +
4052 (i - 1) * HCLGE_VECTOR_REG_OFFSET +
4054 HCLGE_VECTOR_VF_OFFSET;
4055 hdev->vector_status[i] = vport->vport_id;
4056 hdev->vector_irq[i] = vector->vector;
4065 hdev->num_msi_left -= alloc;
4066 hdev->num_msi_used += alloc;
4071 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4075 for (i = 0; i < hdev->num_msi; i++)
4076 if (vector == hdev->vector_irq[i])
4082 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4084 struct hclge_vport *vport = hclge_get_vport(handle);
4085 struct hclge_dev *hdev = vport->back;
4088 vector_id = hclge_get_vector_index(hdev, vector);
4089 if (vector_id < 0) {
4090 dev_err(&hdev->pdev->dev,
4091 "Get vector index fail. vector = %d\n", vector);
4095 hclge_free_vector(hdev, vector_id);
4100 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
4102 return HCLGE_RSS_KEY_SIZE;
4105 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
4107 return HCLGE_RSS_IND_TBL_SIZE;
4110 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
4111 const u8 hfunc, const u8 *key)
4113 struct hclge_rss_config_cmd *req;
4114 unsigned int key_offset = 0;
4115 struct hclge_desc desc;
4120 key_counts = HCLGE_RSS_KEY_SIZE;
4121 req = (struct hclge_rss_config_cmd *)desc.data;
4123 while (key_counts) {
4124 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4127 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4128 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4130 key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
4131 memcpy(req->hash_key,
4132 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4134 key_counts -= key_size;
4136 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4138 dev_err(&hdev->pdev->dev,
4139 "Configure RSS config fail, status = %d\n",
4147 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
4149 struct hclge_rss_indirection_table_cmd *req;
4150 struct hclge_desc desc;
4154 req = (struct hclge_rss_indirection_table_cmd *)desc.data;
4156 for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
4157 hclge_cmd_setup_basic_desc
4158 (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4160 req->start_table_index =
4161 cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4162 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
4164 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
4165 req->rss_result[j] =
4166 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4168 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4170 dev_err(&hdev->pdev->dev,
4171 "Configure rss indir table fail,status = %d\n",
4179 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4180 u16 *tc_size, u16 *tc_offset)
4182 struct hclge_rss_tc_mode_cmd *req;
4183 struct hclge_desc desc;
4187 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
4188 req = (struct hclge_rss_tc_mode_cmd *)desc.data;
4190 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4193 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4194 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4195 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4196 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4197 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
4199 req->rss_tc_mode[i] = cpu_to_le16(mode);
4202 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4204 dev_err(&hdev->pdev->dev,
4205 "Configure rss tc mode fail, status = %d\n", ret);
4210 static void hclge_get_rss_type(struct hclge_vport *vport)
4212 if (vport->rss_tuple_sets.ipv4_tcp_en ||
4213 vport->rss_tuple_sets.ipv4_udp_en ||
4214 vport->rss_tuple_sets.ipv4_sctp_en ||
4215 vport->rss_tuple_sets.ipv6_tcp_en ||
4216 vport->rss_tuple_sets.ipv6_udp_en ||
4217 vport->rss_tuple_sets.ipv6_sctp_en)
4218 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4219 else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4220 vport->rss_tuple_sets.ipv6_fragment_en)
4221 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4223 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4226 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4228 struct hclge_rss_input_tuple_cmd *req;
4229 struct hclge_desc desc;
4232 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4234 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4236 /* Get the tuple cfg from pf */
4237 req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4238 req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4239 req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4240 req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4241 req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4242 req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4243 req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4244 req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4245 hclge_get_rss_type(&hdev->vport[0]);
4246 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4248 dev_err(&hdev->pdev->dev,
4249 "Configure rss input fail, status = %d\n", ret);
4253 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4256 struct hclge_vport *vport = hclge_get_vport(handle);
4259 /* Get hash algorithm */
4261 switch (vport->rss_algo) {
4262 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4263 *hfunc = ETH_RSS_HASH_TOP;
4265 case HCLGE_RSS_HASH_ALGO_SIMPLE:
4266 *hfunc = ETH_RSS_HASH_XOR;
4269 *hfunc = ETH_RSS_HASH_UNKNOWN;
4274 /* Get the RSS Key required by the user */
4276 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4278 /* Get indirect table */
4280 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4281 indir[i] = vport->rss_indirection_tbl[i];
4286 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4287 const u8 *key, const u8 hfunc)
4289 struct hclge_vport *vport = hclge_get_vport(handle);
4290 struct hclge_dev *hdev = vport->back;
4294 /* Set the RSS Hash Key if specififed by the user */
4297 case ETH_RSS_HASH_TOP:
4298 hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4300 case ETH_RSS_HASH_XOR:
4301 hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4303 case ETH_RSS_HASH_NO_CHANGE:
4304 hash_algo = vport->rss_algo;
4310 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4314 /* Update the shadow RSS key with user specified qids */
4315 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4316 vport->rss_algo = hash_algo;
4319 /* Update the shadow RSS table with user specified qids */
4320 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4321 vport->rss_indirection_tbl[i] = indir[i];
4323 /* Update the hardware */
4324 return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4327 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4329 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4331 if (nfc->data & RXH_L4_B_2_3)
4332 hash_sets |= HCLGE_D_PORT_BIT;
4334 hash_sets &= ~HCLGE_D_PORT_BIT;
4336 if (nfc->data & RXH_IP_SRC)
4337 hash_sets |= HCLGE_S_IP_BIT;
4339 hash_sets &= ~HCLGE_S_IP_BIT;
4341 if (nfc->data & RXH_IP_DST)
4342 hash_sets |= HCLGE_D_IP_BIT;
4344 hash_sets &= ~HCLGE_D_IP_BIT;
4346 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4347 hash_sets |= HCLGE_V_TAG_BIT;
4352 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4353 struct ethtool_rxnfc *nfc)
4355 struct hclge_vport *vport = hclge_get_vport(handle);
4356 struct hclge_dev *hdev = vport->back;
4357 struct hclge_rss_input_tuple_cmd *req;
4358 struct hclge_desc desc;
4362 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4363 RXH_L4_B_0_1 | RXH_L4_B_2_3))
4366 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4367 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4369 req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4370 req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4371 req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4372 req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4373 req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4374 req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4375 req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4376 req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4378 tuple_sets = hclge_get_rss_hash_bits(nfc);
4379 switch (nfc->flow_type) {
4381 req->ipv4_tcp_en = tuple_sets;
4384 req->ipv6_tcp_en = tuple_sets;
4387 req->ipv4_udp_en = tuple_sets;
4390 req->ipv6_udp_en = tuple_sets;
4393 req->ipv4_sctp_en = tuple_sets;
4396 if ((nfc->data & RXH_L4_B_0_1) ||
4397 (nfc->data & RXH_L4_B_2_3))
4400 req->ipv6_sctp_en = tuple_sets;
4403 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4406 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4412 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4414 dev_err(&hdev->pdev->dev,
4415 "Set rss tuple fail, status = %d\n", ret);
4419 vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4420 vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4421 vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4422 vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4423 vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4424 vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4425 vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4426 vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4427 hclge_get_rss_type(vport);
4431 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4432 struct ethtool_rxnfc *nfc)
4434 struct hclge_vport *vport = hclge_get_vport(handle);
4439 switch (nfc->flow_type) {
4441 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4444 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4447 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4450 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4453 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4456 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4460 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4469 if (tuple_sets & HCLGE_D_PORT_BIT)
4470 nfc->data |= RXH_L4_B_2_3;
4471 if (tuple_sets & HCLGE_S_PORT_BIT)
4472 nfc->data |= RXH_L4_B_0_1;
4473 if (tuple_sets & HCLGE_D_IP_BIT)
4474 nfc->data |= RXH_IP_DST;
4475 if (tuple_sets & HCLGE_S_IP_BIT)
4476 nfc->data |= RXH_IP_SRC;
4481 static int hclge_get_tc_size(struct hnae3_handle *handle)
4483 struct hclge_vport *vport = hclge_get_vport(handle);
4484 struct hclge_dev *hdev = vport->back;
4486 return hdev->rss_size_max;
4489 int hclge_rss_init_hw(struct hclge_dev *hdev)
4491 struct hclge_vport *vport = hdev->vport;
4492 u8 *rss_indir = vport[0].rss_indirection_tbl;
4493 u16 rss_size = vport[0].alloc_rss_size;
4494 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4495 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4496 u8 *key = vport[0].rss_hash_key;
4497 u8 hfunc = vport[0].rss_algo;
4498 u16 tc_valid[HCLGE_MAX_TC_NUM];
4503 ret = hclge_set_rss_indir_table(hdev, rss_indir);
4507 ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4511 ret = hclge_set_rss_input_tuple(hdev);
4515 /* Each TC have the same queue size, and tc_size set to hardware is
4516 * the log2 of roundup power of two of rss_size, the acutal queue
4517 * size is limited by indirection table.
4519 if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
4520 dev_err(&hdev->pdev->dev,
4521 "Configure rss tc size failed, invalid TC_SIZE = %u\n",
4526 roundup_size = roundup_pow_of_two(rss_size);
4527 roundup_size = ilog2(roundup_size);
4529 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4532 if (!(hdev->hw_tc_map & BIT(i)))
4536 tc_size[i] = roundup_size;
4537 tc_offset[i] = rss_size * i;
4540 return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4543 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4545 struct hclge_vport *vport = hdev->vport;
4548 for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4549 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4550 vport[j].rss_indirection_tbl[i] =
4551 i % vport[j].alloc_rss_size;
4555 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
4557 int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4558 struct hclge_vport *vport = hdev->vport;
4560 if (hdev->pdev->revision >= 0x21)
4561 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4563 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4564 vport[i].rss_tuple_sets.ipv4_tcp_en =
4565 HCLGE_RSS_INPUT_TUPLE_OTHER;
4566 vport[i].rss_tuple_sets.ipv4_udp_en =
4567 HCLGE_RSS_INPUT_TUPLE_OTHER;
4568 vport[i].rss_tuple_sets.ipv4_sctp_en =
4569 HCLGE_RSS_INPUT_TUPLE_SCTP;
4570 vport[i].rss_tuple_sets.ipv4_fragment_en =
4571 HCLGE_RSS_INPUT_TUPLE_OTHER;
4572 vport[i].rss_tuple_sets.ipv6_tcp_en =
4573 HCLGE_RSS_INPUT_TUPLE_OTHER;
4574 vport[i].rss_tuple_sets.ipv6_udp_en =
4575 HCLGE_RSS_INPUT_TUPLE_OTHER;
4576 vport[i].rss_tuple_sets.ipv6_sctp_en =
4577 HCLGE_RSS_INPUT_TUPLE_SCTP;
4578 vport[i].rss_tuple_sets.ipv6_fragment_en =
4579 HCLGE_RSS_INPUT_TUPLE_OTHER;
4581 vport[i].rss_algo = rss_algo;
4583 memcpy(vport[i].rss_hash_key, hclge_hash_key,
4584 HCLGE_RSS_KEY_SIZE);
4587 hclge_rss_indir_init_cfg(hdev);
4590 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4591 int vector_id, bool en,
4592 struct hnae3_ring_chain_node *ring_chain)
4594 struct hclge_dev *hdev = vport->back;
4595 struct hnae3_ring_chain_node *node;
4596 struct hclge_desc desc;
4597 struct hclge_ctrl_vector_chain_cmd *req =
4598 (struct hclge_ctrl_vector_chain_cmd *)desc.data;
4599 enum hclge_cmd_status status;
4600 enum hclge_opcode_type op;
4601 u16 tqp_type_and_id;
4604 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4605 hclge_cmd_setup_basic_desc(&desc, op, false);
4606 req->int_vector_id = vector_id;
4609 for (node = ring_chain; node; node = node->next) {
4610 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4611 hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
4613 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4614 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4615 HCLGE_TQP_ID_S, node->tqp_index);
4616 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4618 hnae3_get_field(node->int_gl_idx,
4619 HNAE3_RING_GL_IDX_M,
4620 HNAE3_RING_GL_IDX_S));
4621 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4622 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4623 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4624 req->vfid = vport->vport_id;
4626 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4628 dev_err(&hdev->pdev->dev,
4629 "Map TQP fail, status is %d.\n",
4635 hclge_cmd_setup_basic_desc(&desc,
4638 req->int_vector_id = vector_id;
4643 req->int_cause_num = i;
4644 req->vfid = vport->vport_id;
4645 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4647 dev_err(&hdev->pdev->dev,
4648 "Map TQP fail, status is %d.\n", status);
4656 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
4657 struct hnae3_ring_chain_node *ring_chain)
4659 struct hclge_vport *vport = hclge_get_vport(handle);
4660 struct hclge_dev *hdev = vport->back;
4663 vector_id = hclge_get_vector_index(hdev, vector);
4664 if (vector_id < 0) {
4665 dev_err(&hdev->pdev->dev,
4666 "failed to get vector index. vector=%d\n", vector);
4670 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4673 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
4674 struct hnae3_ring_chain_node *ring_chain)
4676 struct hclge_vport *vport = hclge_get_vport(handle);
4677 struct hclge_dev *hdev = vport->back;
4680 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4683 vector_id = hclge_get_vector_index(hdev, vector);
4684 if (vector_id < 0) {
4685 dev_err(&handle->pdev->dev,
4686 "Get vector index fail. ret =%d\n", vector_id);
4690 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4692 dev_err(&handle->pdev->dev,
4693 "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4699 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
4700 struct hclge_promisc_param *param)
4702 struct hclge_promisc_cfg_cmd *req;
4703 struct hclge_desc desc;
4706 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4708 req = (struct hclge_promisc_cfg_cmd *)desc.data;
4709 req->vf_id = param->vf_id;
4711 /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
4712 * pdev revision(0x20), new revision support them. The
4713 * value of this two fields will not return error when driver
4714 * send command to fireware in revision(0x20).
4716 req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
4717 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
4719 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4721 dev_err(&hdev->pdev->dev,
4722 "Set promisc mode fail, status is %d.\n", ret);
4727 static void hclge_promisc_param_init(struct hclge_promisc_param *param,
4728 bool en_uc, bool en_mc, bool en_bc,
4734 memset(param, 0, sizeof(struct hclge_promisc_param));
4736 param->enable = HCLGE_PROMISC_EN_UC;
4738 param->enable |= HCLGE_PROMISC_EN_MC;
4740 param->enable |= HCLGE_PROMISC_EN_BC;
4741 param->vf_id = vport_id;
4744 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
4745 bool en_mc_pmc, bool en_bc_pmc)
4747 struct hclge_dev *hdev = vport->back;
4748 struct hclge_promisc_param param;
4750 hclge_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, en_bc_pmc,
4752 return hclge_cmd_set_promisc_mode(hdev, ¶m);
4755 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4758 struct hclge_vport *vport = hclge_get_vport(handle);
4759 bool en_bc_pmc = true;
4761 /* For revision 0x20, if broadcast promisc enabled, vlan filter is
4762 * always bypassed. So broadcast promisc should be disabled until
4763 * user enable promisc mode
4765 if (handle->pdev->revision == 0x20)
4766 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4768 return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
4772 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4774 struct hclge_get_fd_mode_cmd *req;
4775 struct hclge_desc desc;
4778 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4780 req = (struct hclge_get_fd_mode_cmd *)desc.data;
4782 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4784 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4788 *fd_mode = req->mode;
4793 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4794 u32 *stage1_entry_num,
4795 u32 *stage2_entry_num,
4796 u16 *stage1_counter_num,
4797 u16 *stage2_counter_num)
4799 struct hclge_get_fd_allocation_cmd *req;
4800 struct hclge_desc desc;
4803 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4805 req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4807 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4809 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4814 *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4815 *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4816 *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4817 *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4822 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
4824 struct hclge_set_fd_key_config_cmd *req;
4825 struct hclge_fd_key_cfg *stage;
4826 struct hclge_desc desc;
4829 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4831 req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4832 stage = &hdev->fd_cfg.key_cfg[stage_num];
4833 req->stage = stage_num;
4834 req->key_select = stage->key_sel;
4835 req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4836 req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4837 req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4838 req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4839 req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4840 req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4842 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4844 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4849 static int hclge_init_fd_config(struct hclge_dev *hdev)
4851 #define LOW_2_WORDS 0x03
4852 struct hclge_fd_key_cfg *key_cfg;
4855 if (!hnae3_dev_fd_supported(hdev))
4858 ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4862 switch (hdev->fd_cfg.fd_mode) {
4863 case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4864 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4866 case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4867 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4870 dev_err(&hdev->pdev->dev,
4871 "Unsupported flow director mode %u\n",
4872 hdev->fd_cfg.fd_mode);
4876 hdev->fd_cfg.proto_support =
4877 TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
4878 UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
4879 key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4880 key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4881 key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4882 key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4883 key_cfg->outer_sipv6_word_en = 0;
4884 key_cfg->outer_dipv6_word_en = 0;
4886 key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4887 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4888 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4889 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4891 /* If use max 400bit key, we can support tuples for ether type */
4892 if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
4893 hdev->fd_cfg.proto_support |= ETHER_FLOW;
4894 key_cfg->tuple_active |=
4895 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
4898 /* roce_type is used to filter roce frames
4899 * dst_vport is used to specify the rule
4901 key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4903 ret = hclge_get_fd_allocation(hdev,
4904 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4905 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4906 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4907 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4911 return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4914 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4915 int loc, u8 *key, bool is_add)
4917 struct hclge_fd_tcam_config_1_cmd *req1;
4918 struct hclge_fd_tcam_config_2_cmd *req2;
4919 struct hclge_fd_tcam_config_3_cmd *req3;
4920 struct hclge_desc desc[3];
4923 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4924 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4925 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4926 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4927 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4929 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4930 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4931 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4933 req1->stage = stage;
4934 req1->xy_sel = sel_x ? 1 : 0;
4935 hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4936 req1->index = cpu_to_le32(loc);
4937 req1->entry_vld = sel_x ? is_add : 0;
4940 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4941 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4942 sizeof(req2->tcam_data));
4943 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4944 sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4947 ret = hclge_cmd_send(&hdev->hw, desc, 3);
4949 dev_err(&hdev->pdev->dev,
4950 "config tcam key fail, ret=%d\n",
4956 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4957 struct hclge_fd_ad_data *action)
4959 struct hclge_fd_ad_config_cmd *req;
4960 struct hclge_desc desc;
4964 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
4966 req = (struct hclge_fd_ad_config_cmd *)desc.data;
4967 req->index = cpu_to_le32(loc);
4970 hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
4971 action->write_rule_id_to_bd);
4972 hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
4975 hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
4976 hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
4977 action->forward_to_direct_queue);
4978 hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
4980 hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
4981 hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
4982 HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
4983 hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
4984 hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
4985 action->counter_id);
4987 req->ad_data = cpu_to_le64(ad_data);
4988 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4990 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
4995 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
4996 struct hclge_fd_rule *rule)
4998 u16 tmp_x_s, tmp_y_s;
4999 u32 tmp_x_l, tmp_y_l;
5002 if (rule->unused_tuple & tuple_bit)
5005 switch (tuple_bit) {
5008 case BIT(INNER_DST_MAC):
5009 for (i = 0; i < ETH_ALEN; i++) {
5010 calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5011 rule->tuples_mask.dst_mac[i]);
5012 calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5013 rule->tuples_mask.dst_mac[i]);
5017 case BIT(INNER_SRC_MAC):
5018 for (i = 0; i < ETH_ALEN; i++) {
5019 calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5020 rule->tuples.src_mac[i]);
5021 calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5022 rule->tuples.src_mac[i]);
5026 case BIT(INNER_VLAN_TAG_FST):
5027 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
5028 rule->tuples_mask.vlan_tag1);
5029 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
5030 rule->tuples_mask.vlan_tag1);
5031 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5032 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5035 case BIT(INNER_ETH_TYPE):
5036 calc_x(tmp_x_s, rule->tuples.ether_proto,
5037 rule->tuples_mask.ether_proto);
5038 calc_y(tmp_y_s, rule->tuples.ether_proto,
5039 rule->tuples_mask.ether_proto);
5040 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5041 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5044 case BIT(INNER_IP_TOS):
5045 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5046 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5049 case BIT(INNER_IP_PROTO):
5050 calc_x(*key_x, rule->tuples.ip_proto,
5051 rule->tuples_mask.ip_proto);
5052 calc_y(*key_y, rule->tuples.ip_proto,
5053 rule->tuples_mask.ip_proto);
5056 case BIT(INNER_SRC_IP):
5057 calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
5058 rule->tuples_mask.src_ip[IPV4_INDEX]);
5059 calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
5060 rule->tuples_mask.src_ip[IPV4_INDEX]);
5061 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5062 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5065 case BIT(INNER_DST_IP):
5066 calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
5067 rule->tuples_mask.dst_ip[IPV4_INDEX]);
5068 calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
5069 rule->tuples_mask.dst_ip[IPV4_INDEX]);
5070 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5071 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5074 case BIT(INNER_SRC_PORT):
5075 calc_x(tmp_x_s, rule->tuples.src_port,
5076 rule->tuples_mask.src_port);
5077 calc_y(tmp_y_s, rule->tuples.src_port,
5078 rule->tuples_mask.src_port);
5079 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5080 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5083 case BIT(INNER_DST_PORT):
5084 calc_x(tmp_x_s, rule->tuples.dst_port,
5085 rule->tuples_mask.dst_port);
5086 calc_y(tmp_y_s, rule->tuples.dst_port,
5087 rule->tuples_mask.dst_port);
5088 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5089 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5097 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5098 u8 vf_id, u8 network_port_id)
5100 u32 port_number = 0;
5102 if (port_type == HOST_PORT) {
5103 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5105 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5107 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5109 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5110 HCLGE_NETWORK_PORT_ID_S, network_port_id);
5111 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5117 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5118 __le32 *key_x, __le32 *key_y,
5119 struct hclge_fd_rule *rule)
5121 u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5122 u8 cur_pos = 0, tuple_size, shift_bits;
5125 for (i = 0; i < MAX_META_DATA; i++) {
5126 tuple_size = meta_data_key_info[i].key_length;
5127 tuple_bit = key_cfg->meta_data_active & BIT(i);
5129 switch (tuple_bit) {
5130 case BIT(ROCE_TYPE):
5131 hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5132 cur_pos += tuple_size;
5134 case BIT(DST_VPORT):
5135 port_number = hclge_get_port_number(HOST_PORT, 0,
5137 hnae3_set_field(meta_data,
5138 GENMASK(cur_pos + tuple_size, cur_pos),
5139 cur_pos, port_number);
5140 cur_pos += tuple_size;
5147 calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5148 calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5149 shift_bits = sizeof(meta_data) * 8 - cur_pos;
5151 *key_x = cpu_to_le32(tmp_x << shift_bits);
5152 *key_y = cpu_to_le32(tmp_y << shift_bits);
5155 /* A complete key is combined with meta data key and tuple key.
5156 * Meta data key is stored at the MSB region, and tuple key is stored at
5157 * the LSB region, unused bits will be filled 0.
5159 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5160 struct hclge_fd_rule *rule)
5162 struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5163 u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5164 u8 *cur_key_x, *cur_key_y;
5166 int ret, tuple_size;
5167 u8 meta_data_region;
5169 memset(key_x, 0, sizeof(key_x));
5170 memset(key_y, 0, sizeof(key_y));
5174 for (i = 0 ; i < MAX_TUPLE; i++) {
5178 tuple_size = tuple_key_info[i].key_length / 8;
5179 check_tuple = key_cfg->tuple_active & BIT(i);
5181 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
5184 cur_key_x += tuple_size;
5185 cur_key_y += tuple_size;
5189 meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5190 MAX_META_DATA_LENGTH / 8;
5192 hclge_fd_convert_meta_data(key_cfg,
5193 (__le32 *)(key_x + meta_data_region),
5194 (__le32 *)(key_y + meta_data_region),
5197 ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5200 dev_err(&hdev->pdev->dev,
5201 "fd key_y config fail, loc=%u, ret=%d\n",
5202 rule->queue_id, ret);
5206 ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5209 dev_err(&hdev->pdev->dev,
5210 "fd key_x config fail, loc=%u, ret=%d\n",
5211 rule->queue_id, ret);
5215 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5216 struct hclge_fd_rule *rule)
5218 struct hclge_fd_ad_data ad_data;
5220 ad_data.ad_id = rule->location;
5222 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5223 ad_data.drop_packet = true;
5224 ad_data.forward_to_direct_queue = false;
5225 ad_data.queue_id = 0;
5227 ad_data.drop_packet = false;
5228 ad_data.forward_to_direct_queue = true;
5229 ad_data.queue_id = rule->queue_id;
5232 ad_data.use_counter = false;
5233 ad_data.counter_id = 0;
5235 ad_data.use_next_stage = false;
5236 ad_data.next_input_key = 0;
5238 ad_data.write_rule_id_to_bd = true;
5239 ad_data.rule_id = rule->location;
5241 return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
5244 static int hclge_fd_check_spec(struct hclge_dev *hdev,
5245 struct ethtool_rx_flow_spec *fs, u32 *unused)
5247 struct ethtool_tcpip4_spec *tcp_ip4_spec;
5248 struct ethtool_usrip4_spec *usr_ip4_spec;
5249 struct ethtool_tcpip6_spec *tcp_ip6_spec;
5250 struct ethtool_usrip6_spec *usr_ip6_spec;
5251 struct ethhdr *ether_spec;
5253 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5256 if (!(fs->flow_type & hdev->fd_cfg.proto_support))
5259 if ((fs->flow_type & FLOW_EXT) &&
5260 (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
5261 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
5265 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5269 tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
5270 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5272 if (!tcp_ip4_spec->ip4src)
5273 *unused |= BIT(INNER_SRC_IP);
5275 if (!tcp_ip4_spec->ip4dst)
5276 *unused |= BIT(INNER_DST_IP);
5278 if (!tcp_ip4_spec->psrc)
5279 *unused |= BIT(INNER_SRC_PORT);
5281 if (!tcp_ip4_spec->pdst)
5282 *unused |= BIT(INNER_DST_PORT);
5284 if (!tcp_ip4_spec->tos)
5285 *unused |= BIT(INNER_IP_TOS);
5289 usr_ip4_spec = &fs->h_u.usr_ip4_spec;
5290 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5291 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5293 if (!usr_ip4_spec->ip4src)
5294 *unused |= BIT(INNER_SRC_IP);
5296 if (!usr_ip4_spec->ip4dst)
5297 *unused |= BIT(INNER_DST_IP);
5299 if (!usr_ip4_spec->tos)
5300 *unused |= BIT(INNER_IP_TOS);
5302 if (!usr_ip4_spec->proto)
5303 *unused |= BIT(INNER_IP_PROTO);
5305 if (usr_ip4_spec->l4_4_bytes)
5308 if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
5315 tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
5316 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5319 /* check whether src/dst ip address used */
5320 if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
5321 !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
5322 *unused |= BIT(INNER_SRC_IP);
5324 if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
5325 !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
5326 *unused |= BIT(INNER_DST_IP);
5328 if (!tcp_ip6_spec->psrc)
5329 *unused |= BIT(INNER_SRC_PORT);
5331 if (!tcp_ip6_spec->pdst)
5332 *unused |= BIT(INNER_DST_PORT);
5334 if (tcp_ip6_spec->tclass)
5338 case IPV6_USER_FLOW:
5339 usr_ip6_spec = &fs->h_u.usr_ip6_spec;
5340 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5341 BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
5342 BIT(INNER_DST_PORT);
5344 /* check whether src/dst ip address used */
5345 if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
5346 !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
5347 *unused |= BIT(INNER_SRC_IP);
5349 if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
5350 !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
5351 *unused |= BIT(INNER_DST_IP);
5353 if (!usr_ip6_spec->l4_proto)
5354 *unused |= BIT(INNER_IP_PROTO);
5356 if (usr_ip6_spec->tclass)
5359 if (usr_ip6_spec->l4_4_bytes)
5364 ether_spec = &fs->h_u.ether_spec;
5365 *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5366 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
5367 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
5369 if (is_zero_ether_addr(ether_spec->h_source))
5370 *unused |= BIT(INNER_SRC_MAC);
5372 if (is_zero_ether_addr(ether_spec->h_dest))
5373 *unused |= BIT(INNER_DST_MAC);
5375 if (!ether_spec->h_proto)
5376 *unused |= BIT(INNER_ETH_TYPE);
5383 if ((fs->flow_type & FLOW_EXT)) {
5384 if (fs->h_ext.vlan_etype)
5386 if (!fs->h_ext.vlan_tci)
5387 *unused |= BIT(INNER_VLAN_TAG_FST);
5389 if (fs->m_ext.vlan_tci) {
5390 if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
5394 *unused |= BIT(INNER_VLAN_TAG_FST);
5397 if (fs->flow_type & FLOW_MAC_EXT) {
5398 if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
5401 if (is_zero_ether_addr(fs->h_ext.h_dest))
5402 *unused |= BIT(INNER_DST_MAC);
5404 *unused &= ~(BIT(INNER_DST_MAC));
5410 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
5412 struct hclge_fd_rule *rule = NULL;
5413 struct hlist_node *node2;
5415 spin_lock_bh(&hdev->fd_rule_lock);
5416 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5417 if (rule->location >= location)
5421 spin_unlock_bh(&hdev->fd_rule_lock);
5423 return rule && rule->location == location;
5426 /* make sure being called after lock up with fd_rule_lock */
5427 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
5428 struct hclge_fd_rule *new_rule,
5432 struct hclge_fd_rule *rule = NULL, *parent = NULL;
5433 struct hlist_node *node2;
5435 if (is_add && !new_rule)
5438 hlist_for_each_entry_safe(rule, node2,
5439 &hdev->fd_rule_list, rule_node) {
5440 if (rule->location >= location)
5445 if (rule && rule->location == location) {
5446 hlist_del(&rule->rule_node);
5448 hdev->hclge_fd_rule_num--;
5451 if (!hdev->hclge_fd_rule_num)
5452 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5453 clear_bit(location, hdev->fd_bmap);
5457 } else if (!is_add) {
5458 dev_err(&hdev->pdev->dev,
5459 "delete fail, rule %u is inexistent\n",
5464 INIT_HLIST_NODE(&new_rule->rule_node);
5467 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
5469 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
5471 set_bit(location, hdev->fd_bmap);
5472 hdev->hclge_fd_rule_num++;
5473 hdev->fd_active_type = new_rule->rule_type;
5478 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
5479 struct ethtool_rx_flow_spec *fs,
5480 struct hclge_fd_rule *rule)
5482 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5484 switch (flow_type) {
5488 rule->tuples.src_ip[IPV4_INDEX] =
5489 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
5490 rule->tuples_mask.src_ip[IPV4_INDEX] =
5491 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
5493 rule->tuples.dst_ip[IPV4_INDEX] =
5494 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
5495 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5496 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5498 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5499 rule->tuples_mask.src_port =
5500 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5502 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5503 rule->tuples_mask.dst_port =
5504 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5506 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5507 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5509 rule->tuples.ether_proto = ETH_P_IP;
5510 rule->tuples_mask.ether_proto = 0xFFFF;
5514 rule->tuples.src_ip[IPV4_INDEX] =
5515 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5516 rule->tuples_mask.src_ip[IPV4_INDEX] =
5517 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5519 rule->tuples.dst_ip[IPV4_INDEX] =
5520 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5521 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5522 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5524 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5525 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5527 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5528 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5530 rule->tuples.ether_proto = ETH_P_IP;
5531 rule->tuples_mask.ether_proto = 0xFFFF;
5537 be32_to_cpu_array(rule->tuples.src_ip,
5538 fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5539 be32_to_cpu_array(rule->tuples_mask.src_ip,
5540 fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5542 be32_to_cpu_array(rule->tuples.dst_ip,
5543 fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5544 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5545 fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5547 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5548 rule->tuples_mask.src_port =
5549 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5551 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5552 rule->tuples_mask.dst_port =
5553 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5555 rule->tuples.ether_proto = ETH_P_IPV6;
5556 rule->tuples_mask.ether_proto = 0xFFFF;
5559 case IPV6_USER_FLOW:
5560 be32_to_cpu_array(rule->tuples.src_ip,
5561 fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5562 be32_to_cpu_array(rule->tuples_mask.src_ip,
5563 fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5565 be32_to_cpu_array(rule->tuples.dst_ip,
5566 fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5567 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5568 fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5570 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5571 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5573 rule->tuples.ether_proto = ETH_P_IPV6;
5574 rule->tuples_mask.ether_proto = 0xFFFF;
5578 ether_addr_copy(rule->tuples.src_mac,
5579 fs->h_u.ether_spec.h_source);
5580 ether_addr_copy(rule->tuples_mask.src_mac,
5581 fs->m_u.ether_spec.h_source);
5583 ether_addr_copy(rule->tuples.dst_mac,
5584 fs->h_u.ether_spec.h_dest);
5585 ether_addr_copy(rule->tuples_mask.dst_mac,
5586 fs->m_u.ether_spec.h_dest);
5588 rule->tuples.ether_proto =
5589 be16_to_cpu(fs->h_u.ether_spec.h_proto);
5590 rule->tuples_mask.ether_proto =
5591 be16_to_cpu(fs->m_u.ether_spec.h_proto);
5598 switch (flow_type) {
5601 rule->tuples.ip_proto = IPPROTO_SCTP;
5602 rule->tuples_mask.ip_proto = 0xFF;
5606 rule->tuples.ip_proto = IPPROTO_TCP;
5607 rule->tuples_mask.ip_proto = 0xFF;
5611 rule->tuples.ip_proto = IPPROTO_UDP;
5612 rule->tuples_mask.ip_proto = 0xFF;
5618 if ((fs->flow_type & FLOW_EXT)) {
5619 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5620 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5623 if (fs->flow_type & FLOW_MAC_EXT) {
5624 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5625 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5631 /* make sure being called after lock up with fd_rule_lock */
5632 static int hclge_fd_config_rule(struct hclge_dev *hdev,
5633 struct hclge_fd_rule *rule)
5638 dev_err(&hdev->pdev->dev,
5639 "The flow director rule is NULL\n");
5643 /* it will never fail here, so needn't to check return value */
5644 hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5646 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5650 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5657 hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5661 static int hclge_add_fd_entry(struct hnae3_handle *handle,
5662 struct ethtool_rxnfc *cmd)
5664 struct hclge_vport *vport = hclge_get_vport(handle);
5665 struct hclge_dev *hdev = vport->back;
5666 u16 dst_vport_id = 0, q_index = 0;
5667 struct ethtool_rx_flow_spec *fs;
5668 struct hclge_fd_rule *rule;
5673 if (!hnae3_dev_fd_supported(hdev))
5677 dev_warn(&hdev->pdev->dev,
5678 "Please enable flow director first\n");
5682 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5684 ret = hclge_fd_check_spec(hdev, fs, &unused);
5686 dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
5690 if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5691 action = HCLGE_FD_ACTION_DROP_PACKET;
5693 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5694 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5697 if (vf > hdev->num_req_vfs) {
5698 dev_err(&hdev->pdev->dev,
5699 "Error: vf id (%u) > max vf num (%u)\n",
5700 vf, hdev->num_req_vfs);
5704 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
5705 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
5708 dev_err(&hdev->pdev->dev,
5709 "Error: queue id (%u) > max tqp num (%u)\n",
5714 action = HCLGE_FD_ACTION_ACCEPT_PACKET;
5718 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5722 ret = hclge_fd_get_tuple(hdev, fs, rule);
5728 rule->flow_type = fs->flow_type;
5730 rule->location = fs->location;
5731 rule->unused_tuple = unused;
5732 rule->vf_id = dst_vport_id;
5733 rule->queue_id = q_index;
5734 rule->action = action;
5735 rule->rule_type = HCLGE_FD_EP_ACTIVE;
5737 /* to avoid rule conflict, when user configure rule by ethtool,
5738 * we need to clear all arfs rules
5740 hclge_clear_arfs_rules(handle);
5742 spin_lock_bh(&hdev->fd_rule_lock);
5743 ret = hclge_fd_config_rule(hdev, rule);
5745 spin_unlock_bh(&hdev->fd_rule_lock);
5750 static int hclge_del_fd_entry(struct hnae3_handle *handle,
5751 struct ethtool_rxnfc *cmd)
5753 struct hclge_vport *vport = hclge_get_vport(handle);
5754 struct hclge_dev *hdev = vport->back;
5755 struct ethtool_rx_flow_spec *fs;
5758 if (!hnae3_dev_fd_supported(hdev))
5761 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5763 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5766 if (!hclge_fd_rule_exist(hdev, fs->location)) {
5767 dev_err(&hdev->pdev->dev,
5768 "Delete fail, rule %u is inexistent\n", fs->location);
5772 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
5777 spin_lock_bh(&hdev->fd_rule_lock);
5778 ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
5780 spin_unlock_bh(&hdev->fd_rule_lock);
5785 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
5788 struct hclge_vport *vport = hclge_get_vport(handle);
5789 struct hclge_dev *hdev = vport->back;
5790 struct hclge_fd_rule *rule;
5791 struct hlist_node *node;
5794 if (!hnae3_dev_fd_supported(hdev))
5797 spin_lock_bh(&hdev->fd_rule_lock);
5798 for_each_set_bit(location, hdev->fd_bmap,
5799 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5800 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
5804 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
5806 hlist_del(&rule->rule_node);
5809 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5810 hdev->hclge_fd_rule_num = 0;
5811 bitmap_zero(hdev->fd_bmap,
5812 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
5815 spin_unlock_bh(&hdev->fd_rule_lock);
5818 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
5820 struct hclge_vport *vport = hclge_get_vport(handle);
5821 struct hclge_dev *hdev = vport->back;
5822 struct hclge_fd_rule *rule;
5823 struct hlist_node *node;
5826 /* Return ok here, because reset error handling will check this
5827 * return value. If error is returned here, the reset process will
5830 if (!hnae3_dev_fd_supported(hdev))
5833 /* if fd is disabled, should not restore it when reset */
5837 spin_lock_bh(&hdev->fd_rule_lock);
5838 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5839 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5841 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5844 dev_warn(&hdev->pdev->dev,
5845 "Restore rule %u failed, remove it\n",
5847 clear_bit(rule->location, hdev->fd_bmap);
5848 hlist_del(&rule->rule_node);
5850 hdev->hclge_fd_rule_num--;
5854 if (hdev->hclge_fd_rule_num)
5855 hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
5857 spin_unlock_bh(&hdev->fd_rule_lock);
5862 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
5863 struct ethtool_rxnfc *cmd)
5865 struct hclge_vport *vport = hclge_get_vport(handle);
5866 struct hclge_dev *hdev = vport->back;
5868 if (!hnae3_dev_fd_supported(hdev))
5871 cmd->rule_cnt = hdev->hclge_fd_rule_num;
5872 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5877 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
5878 struct ethtool_rxnfc *cmd)
5880 struct hclge_vport *vport = hclge_get_vport(handle);
5881 struct hclge_fd_rule *rule = NULL;
5882 struct hclge_dev *hdev = vport->back;
5883 struct ethtool_rx_flow_spec *fs;
5884 struct hlist_node *node2;
5886 if (!hnae3_dev_fd_supported(hdev))
5889 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5891 spin_lock_bh(&hdev->fd_rule_lock);
5893 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5894 if (rule->location >= fs->location)
5898 if (!rule || fs->location != rule->location) {
5899 spin_unlock_bh(&hdev->fd_rule_lock);
5904 fs->flow_type = rule->flow_type;
5905 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5909 fs->h_u.tcp_ip4_spec.ip4src =
5910 cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5911 fs->m_u.tcp_ip4_spec.ip4src =
5912 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5913 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5915 fs->h_u.tcp_ip4_spec.ip4dst =
5916 cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5917 fs->m_u.tcp_ip4_spec.ip4dst =
5918 rule->unused_tuple & BIT(INNER_DST_IP) ?
5919 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5921 fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5922 fs->m_u.tcp_ip4_spec.psrc =
5923 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5924 0 : cpu_to_be16(rule->tuples_mask.src_port);
5926 fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5927 fs->m_u.tcp_ip4_spec.pdst =
5928 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5929 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5931 fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
5932 fs->m_u.tcp_ip4_spec.tos =
5933 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5934 0 : rule->tuples_mask.ip_tos;
5938 fs->h_u.usr_ip4_spec.ip4src =
5939 cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5940 fs->m_u.tcp_ip4_spec.ip4src =
5941 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5942 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5944 fs->h_u.usr_ip4_spec.ip4dst =
5945 cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5946 fs->m_u.usr_ip4_spec.ip4dst =
5947 rule->unused_tuple & BIT(INNER_DST_IP) ?
5948 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5950 fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
5951 fs->m_u.usr_ip4_spec.tos =
5952 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5953 0 : rule->tuples_mask.ip_tos;
5955 fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
5956 fs->m_u.usr_ip4_spec.proto =
5957 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5958 0 : rule->tuples_mask.ip_proto;
5960 fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
5966 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
5967 rule->tuples.src_ip, IPV6_SIZE);
5968 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5969 memset(fs->m_u.tcp_ip6_spec.ip6src, 0,
5970 sizeof(int) * IPV6_SIZE);
5972 cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
5973 rule->tuples_mask.src_ip, IPV6_SIZE);
5975 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
5976 rule->tuples.dst_ip, IPV6_SIZE);
5977 if (rule->unused_tuple & BIT(INNER_DST_IP))
5978 memset(fs->m_u.tcp_ip6_spec.ip6dst, 0,
5979 sizeof(int) * IPV6_SIZE);
5981 cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
5982 rule->tuples_mask.dst_ip, IPV6_SIZE);
5984 fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5985 fs->m_u.tcp_ip6_spec.psrc =
5986 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5987 0 : cpu_to_be16(rule->tuples_mask.src_port);
5989 fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5990 fs->m_u.tcp_ip6_spec.pdst =
5991 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5992 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5995 case IPV6_USER_FLOW:
5996 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
5997 rule->tuples.src_ip, IPV6_SIZE);
5998 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5999 memset(fs->m_u.usr_ip6_spec.ip6src, 0,
6000 sizeof(int) * IPV6_SIZE);
6002 cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
6003 rule->tuples_mask.src_ip, IPV6_SIZE);
6005 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
6006 rule->tuples.dst_ip, IPV6_SIZE);
6007 if (rule->unused_tuple & BIT(INNER_DST_IP))
6008 memset(fs->m_u.usr_ip6_spec.ip6dst, 0,
6009 sizeof(int) * IPV6_SIZE);
6011 cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
6012 rule->tuples_mask.dst_ip, IPV6_SIZE);
6014 fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
6015 fs->m_u.usr_ip6_spec.l4_proto =
6016 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6017 0 : rule->tuples_mask.ip_proto;
6021 ether_addr_copy(fs->h_u.ether_spec.h_source,
6022 rule->tuples.src_mac);
6023 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6024 eth_zero_addr(fs->m_u.ether_spec.h_source);
6026 ether_addr_copy(fs->m_u.ether_spec.h_source,
6027 rule->tuples_mask.src_mac);
6029 ether_addr_copy(fs->h_u.ether_spec.h_dest,
6030 rule->tuples.dst_mac);
6031 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6032 eth_zero_addr(fs->m_u.ether_spec.h_dest);
6034 ether_addr_copy(fs->m_u.ether_spec.h_dest,
6035 rule->tuples_mask.dst_mac);
6037 fs->h_u.ether_spec.h_proto =
6038 cpu_to_be16(rule->tuples.ether_proto);
6039 fs->m_u.ether_spec.h_proto =
6040 rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6041 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6045 spin_unlock_bh(&hdev->fd_rule_lock);
6049 if (fs->flow_type & FLOW_EXT) {
6050 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
6051 fs->m_ext.vlan_tci =
6052 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
6053 cpu_to_be16(VLAN_VID_MASK) :
6054 cpu_to_be16(rule->tuples_mask.vlan_tag1);
6057 if (fs->flow_type & FLOW_MAC_EXT) {
6058 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
6059 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6060 eth_zero_addr(fs->m_u.ether_spec.h_dest);
6062 ether_addr_copy(fs->m_u.ether_spec.h_dest,
6063 rule->tuples_mask.dst_mac);
6066 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
6067 fs->ring_cookie = RX_CLS_FLOW_DISC;
6071 fs->ring_cookie = rule->queue_id;
6072 vf_id = rule->vf_id;
6073 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
6074 fs->ring_cookie |= vf_id;
6077 spin_unlock_bh(&hdev->fd_rule_lock);
6082 static int hclge_get_all_rules(struct hnae3_handle *handle,
6083 struct ethtool_rxnfc *cmd, u32 *rule_locs)
6085 struct hclge_vport *vport = hclge_get_vport(handle);
6086 struct hclge_dev *hdev = vport->back;
6087 struct hclge_fd_rule *rule;
6088 struct hlist_node *node2;
6091 if (!hnae3_dev_fd_supported(hdev))
6094 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6096 spin_lock_bh(&hdev->fd_rule_lock);
6097 hlist_for_each_entry_safe(rule, node2,
6098 &hdev->fd_rule_list, rule_node) {
6099 if (cnt == cmd->rule_cnt) {
6100 spin_unlock_bh(&hdev->fd_rule_lock);
6104 rule_locs[cnt] = rule->location;
6108 spin_unlock_bh(&hdev->fd_rule_lock);
6110 cmd->rule_cnt = cnt;
6115 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
6116 struct hclge_fd_rule_tuples *tuples)
6118 #define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
6119 #define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
6121 tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
6122 tuples->ip_proto = fkeys->basic.ip_proto;
6123 tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
6125 if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
6126 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
6127 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
6131 for (i = 0; i < IPV6_SIZE; i++) {
6132 tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
6133 tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
6138 /* traverse all rules, check whether an existed rule has the same tuples */
6139 static struct hclge_fd_rule *
6140 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
6141 const struct hclge_fd_rule_tuples *tuples)
6143 struct hclge_fd_rule *rule = NULL;
6144 struct hlist_node *node;
6146 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6147 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
6154 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
6155 struct hclge_fd_rule *rule)
6157 rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6158 BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
6159 BIT(INNER_SRC_PORT);
6162 rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
6163 if (tuples->ether_proto == ETH_P_IP) {
6164 if (tuples->ip_proto == IPPROTO_TCP)
6165 rule->flow_type = TCP_V4_FLOW;
6167 rule->flow_type = UDP_V4_FLOW;
6169 if (tuples->ip_proto == IPPROTO_TCP)
6170 rule->flow_type = TCP_V6_FLOW;
6172 rule->flow_type = UDP_V6_FLOW;
6174 memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
6175 memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
6178 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
6179 u16 flow_id, struct flow_keys *fkeys)
6181 struct hclge_vport *vport = hclge_get_vport(handle);
6182 struct hclge_fd_rule_tuples new_tuples;
6183 struct hclge_dev *hdev = vport->back;
6184 struct hclge_fd_rule *rule;
6189 if (!hnae3_dev_fd_supported(hdev))
6192 memset(&new_tuples, 0, sizeof(new_tuples));
6193 hclge_fd_get_flow_tuples(fkeys, &new_tuples);
6195 spin_lock_bh(&hdev->fd_rule_lock);
6197 /* when there is already fd rule existed add by user,
6198 * arfs should not work
6200 if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
6201 spin_unlock_bh(&hdev->fd_rule_lock);
6206 /* check is there flow director filter existed for this flow,
6207 * if not, create a new filter for it;
6208 * if filter exist with different queue id, modify the filter;
6209 * if filter exist with same queue id, do nothing
6211 rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
6213 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
6214 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6215 spin_unlock_bh(&hdev->fd_rule_lock);
6220 rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
6222 spin_unlock_bh(&hdev->fd_rule_lock);
6227 set_bit(bit_id, hdev->fd_bmap);
6228 rule->location = bit_id;
6229 rule->flow_id = flow_id;
6230 rule->queue_id = queue_id;
6231 hclge_fd_build_arfs_rule(&new_tuples, rule);
6232 ret = hclge_fd_config_rule(hdev, rule);
6234 spin_unlock_bh(&hdev->fd_rule_lock);
6239 return rule->location;
6242 spin_unlock_bh(&hdev->fd_rule_lock);
6244 if (rule->queue_id == queue_id)
6245 return rule->location;
6247 tmp_queue_id = rule->queue_id;
6248 rule->queue_id = queue_id;
6249 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6251 rule->queue_id = tmp_queue_id;
6255 return rule->location;
6258 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
6260 #ifdef CONFIG_RFS_ACCEL
6261 struct hnae3_handle *handle = &hdev->vport[0].nic;
6262 struct hclge_fd_rule *rule;
6263 struct hlist_node *node;
6264 HLIST_HEAD(del_list);
6266 spin_lock_bh(&hdev->fd_rule_lock);
6267 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
6268 spin_unlock_bh(&hdev->fd_rule_lock);
6271 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6272 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
6273 rule->flow_id, rule->location)) {
6274 hlist_del_init(&rule->rule_node);
6275 hlist_add_head(&rule->rule_node, &del_list);
6276 hdev->hclge_fd_rule_num--;
6277 clear_bit(rule->location, hdev->fd_bmap);
6280 spin_unlock_bh(&hdev->fd_rule_lock);
6282 hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
6283 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
6284 rule->location, NULL, false);
6290 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
6292 #ifdef CONFIG_RFS_ACCEL
6293 struct hclge_vport *vport = hclge_get_vport(handle);
6294 struct hclge_dev *hdev = vport->back;
6296 if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
6297 hclge_del_all_fd_entries(handle, true);
6301 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
6303 struct hclge_vport *vport = hclge_get_vport(handle);
6304 struct hclge_dev *hdev = vport->back;
6306 return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
6307 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
6310 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
6312 struct hclge_vport *vport = hclge_get_vport(handle);
6313 struct hclge_dev *hdev = vport->back;
6315 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
6318 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
6320 struct hclge_vport *vport = hclge_get_vport(handle);
6321 struct hclge_dev *hdev = vport->back;
6323 return hdev->rst_stats.hw_reset_done_cnt;
6326 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
6328 struct hclge_vport *vport = hclge_get_vport(handle);
6329 struct hclge_dev *hdev = vport->back;
6332 hdev->fd_en = enable;
6333 clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
6335 hclge_del_all_fd_entries(handle, clear);
6337 hclge_restore_fd_entries(handle);
6340 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
6342 struct hclge_desc desc;
6343 struct hclge_config_mac_mode_cmd *req =
6344 (struct hclge_config_mac_mode_cmd *)desc.data;
6348 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
6351 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
6352 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
6353 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
6354 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
6355 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
6356 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
6357 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
6358 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
6359 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
6360 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
6363 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6365 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6367 dev_err(&hdev->pdev->dev,
6368 "mac enable fail, ret =%d.\n", ret);
6371 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
6372 u8 switch_param, u8 param_mask)
6374 struct hclge_mac_vlan_switch_cmd *req;
6375 struct hclge_desc desc;
6379 func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
6380 req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
6382 /* read current config parameter */
6383 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
6385 req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
6386 req->func_id = cpu_to_le32(func_id);
6388 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6390 dev_err(&hdev->pdev->dev,
6391 "read mac vlan switch parameter fail, ret = %d\n", ret);
6395 /* modify and write new config parameter */
6396 hclge_cmd_reuse_desc(&desc, false);
6397 req->switch_param = (req->switch_param & param_mask) | switch_param;
6398 req->param_mask = param_mask;
6400 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6402 dev_err(&hdev->pdev->dev,
6403 "set mac vlan switch parameter fail, ret = %d\n", ret);
6407 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
6410 #define HCLGE_PHY_LINK_STATUS_NUM 200
6412 struct phy_device *phydev = hdev->hw.mac.phydev;
6417 ret = phy_read_status(phydev);
6419 dev_err(&hdev->pdev->dev,
6420 "phy update link status fail, ret = %d\n", ret);
6424 if (phydev->link == link_ret)
6427 msleep(HCLGE_LINK_STATUS_MS);
6428 } while (++i < HCLGE_PHY_LINK_STATUS_NUM);
6431 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
6433 #define HCLGE_MAC_LINK_STATUS_NUM 100
6439 ret = hclge_get_mac_link_status(hdev);
6442 else if (ret == link_ret)
6445 msleep(HCLGE_LINK_STATUS_MS);
6446 } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
6450 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
6453 #define HCLGE_LINK_STATUS_DOWN 0
6454 #define HCLGE_LINK_STATUS_UP 1
6458 link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
6461 hclge_phy_link_status_wait(hdev, link_ret);
6463 return hclge_mac_link_status_wait(hdev, link_ret);
6466 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
6468 struct hclge_config_mac_mode_cmd *req;
6469 struct hclge_desc desc;
6473 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
6474 /* 1 Read out the MAC mode config at first */
6475 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
6476 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6478 dev_err(&hdev->pdev->dev,
6479 "mac loopback get fail, ret =%d.\n", ret);
6483 /* 2 Then setup the loopback flag */
6484 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
6485 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
6486 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
6487 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
6489 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6491 /* 3 Config mac work mode with loopback flag
6492 * and its original configure parameters
6494 hclge_cmd_reuse_desc(&desc, false);
6495 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6497 dev_err(&hdev->pdev->dev,
6498 "mac loopback set fail, ret =%d.\n", ret);
6502 static int hclge_cfg_serdes_loopback(struct hclge_dev *hdev, bool en,
6503 enum hnae3_loop loop_mode)
6505 #define HCLGE_SERDES_RETRY_MS 10
6506 #define HCLGE_SERDES_RETRY_NUM 100
6508 struct hclge_serdes_lb_cmd *req;
6509 struct hclge_desc desc;
6513 req = (struct hclge_serdes_lb_cmd *)desc.data;
6514 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
6516 switch (loop_mode) {
6517 case HNAE3_LOOP_SERIAL_SERDES:
6518 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
6520 case HNAE3_LOOP_PARALLEL_SERDES:
6521 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
6524 dev_err(&hdev->pdev->dev,
6525 "unsupported serdes loopback mode %d\n", loop_mode);
6530 req->enable = loop_mode_b;
6531 req->mask = loop_mode_b;
6533 req->mask = loop_mode_b;
6536 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6538 dev_err(&hdev->pdev->dev,
6539 "serdes loopback set fail, ret = %d\n", ret);
6544 msleep(HCLGE_SERDES_RETRY_MS);
6545 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
6547 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6549 dev_err(&hdev->pdev->dev,
6550 "serdes loopback get, ret = %d\n", ret);
6553 } while (++i < HCLGE_SERDES_RETRY_NUM &&
6554 !(req->result & HCLGE_CMD_SERDES_DONE_B));
6556 if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
6557 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
6559 } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
6560 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
6566 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
6567 enum hnae3_loop loop_mode)
6571 ret = hclge_cfg_serdes_loopback(hdev, en, loop_mode);
6575 hclge_cfg_mac_mode(hdev, en);
6577 ret = hclge_mac_phy_link_status_wait(hdev, en, false);
6579 dev_err(&hdev->pdev->dev,
6580 "serdes loopback config mac mode timeout\n");
6585 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
6586 struct phy_device *phydev)
6590 if (!phydev->suspended) {
6591 ret = phy_suspend(phydev);
6596 ret = phy_resume(phydev);
6600 return phy_loopback(phydev, true);
6603 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
6604 struct phy_device *phydev)
6608 ret = phy_loopback(phydev, false);
6612 return phy_suspend(phydev);
6615 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
6617 struct phy_device *phydev = hdev->hw.mac.phydev;
6624 ret = hclge_enable_phy_loopback(hdev, phydev);
6626 ret = hclge_disable_phy_loopback(hdev, phydev);
6628 dev_err(&hdev->pdev->dev,
6629 "set phy loopback fail, ret = %d\n", ret);
6633 hclge_cfg_mac_mode(hdev, en);
6635 ret = hclge_mac_phy_link_status_wait(hdev, en, true);
6637 dev_err(&hdev->pdev->dev,
6638 "phy loopback config mac mode timeout\n");
6643 static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
6644 int stream_id, bool enable)
6646 struct hclge_desc desc;
6647 struct hclge_cfg_com_tqp_queue_cmd *req =
6648 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
6651 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
6652 req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
6653 req->stream_id = cpu_to_le16(stream_id);
6655 req->enable |= 1U << HCLGE_TQP_ENABLE_B;
6657 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6659 dev_err(&hdev->pdev->dev,
6660 "Tqp enable fail, status =%d.\n", ret);
6664 static int hclge_set_loopback(struct hnae3_handle *handle,
6665 enum hnae3_loop loop_mode, bool en)
6667 struct hclge_vport *vport = hclge_get_vport(handle);
6668 struct hnae3_knic_private_info *kinfo;
6669 struct hclge_dev *hdev = vport->back;
6672 /* Loopback can be enabled in three places: SSU, MAC, and serdes. By
6673 * default, SSU loopback is enabled, so if the SMAC and the DMAC are
6674 * the same, the packets are looped back in the SSU. If SSU loopback
6675 * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
6677 if (hdev->pdev->revision >= 0x21) {
6678 u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
6680 ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
6681 HCLGE_SWITCH_ALW_LPBK_MASK);
6686 switch (loop_mode) {
6687 case HNAE3_LOOP_APP:
6688 ret = hclge_set_app_loopback(hdev, en);
6690 case HNAE3_LOOP_SERIAL_SERDES:
6691 case HNAE3_LOOP_PARALLEL_SERDES:
6692 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
6694 case HNAE3_LOOP_PHY:
6695 ret = hclge_set_phy_loopback(hdev, en);
6699 dev_err(&hdev->pdev->dev,
6700 "loop_mode %d is not supported\n", loop_mode);
6707 kinfo = &vport->nic.kinfo;
6708 for (i = 0; i < kinfo->num_tqps; i++) {
6709 ret = hclge_tqp_enable(hdev, i, 0, en);
6717 static int hclge_set_default_loopback(struct hclge_dev *hdev)
6721 ret = hclge_set_app_loopback(hdev, false);
6725 ret = hclge_cfg_serdes_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
6729 return hclge_cfg_serdes_loopback(hdev, false,
6730 HNAE3_LOOP_PARALLEL_SERDES);
6733 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
6735 struct hclge_vport *vport = hclge_get_vport(handle);
6736 struct hnae3_knic_private_info *kinfo;
6737 struct hnae3_queue *queue;
6738 struct hclge_tqp *tqp;
6741 kinfo = &vport->nic.kinfo;
6742 for (i = 0; i < kinfo->num_tqps; i++) {
6743 queue = handle->kinfo.tqp[i];
6744 tqp = container_of(queue, struct hclge_tqp, q);
6745 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
6749 static void hclge_flush_link_update(struct hclge_dev *hdev)
6751 #define HCLGE_FLUSH_LINK_TIMEOUT 100000
6753 unsigned long last = hdev->serv_processed_cnt;
6756 while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
6757 i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
6758 last == hdev->serv_processed_cnt)
6762 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
6764 struct hclge_vport *vport = hclge_get_vport(handle);
6765 struct hclge_dev *hdev = vport->back;
6768 hclge_task_schedule(hdev, round_jiffies_relative(HZ));
6770 /* Set the DOWN flag here to disable link updating */
6771 set_bit(HCLGE_STATE_DOWN, &hdev->state);
6773 /* flush memory to make sure DOWN is seen by service task */
6774 smp_mb__before_atomic();
6775 hclge_flush_link_update(hdev);
6779 static int hclge_ae_start(struct hnae3_handle *handle)
6781 struct hclge_vport *vport = hclge_get_vport(handle);
6782 struct hclge_dev *hdev = vport->back;
6785 hclge_cfg_mac_mode(hdev, true);
6786 clear_bit(HCLGE_STATE_DOWN, &hdev->state);
6787 hdev->hw.mac.link = 0;
6789 /* reset tqp stats */
6790 hclge_reset_tqp_stats(handle);
6792 hclge_mac_start_phy(hdev);
6797 static void hclge_ae_stop(struct hnae3_handle *handle)
6799 struct hclge_vport *vport = hclge_get_vport(handle);
6800 struct hclge_dev *hdev = vport->back;
6803 set_bit(HCLGE_STATE_DOWN, &hdev->state);
6805 hclge_clear_arfs_rules(handle);
6807 /* If it is not PF reset, the firmware will disable the MAC,
6808 * so it only need to stop phy here.
6810 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
6811 hdev->reset_type != HNAE3_FUNC_RESET) {
6812 hclge_mac_stop_phy(hdev);
6813 hclge_update_link_status(hdev);
6817 for (i = 0; i < handle->kinfo.num_tqps; i++)
6818 hclge_reset_tqp(handle, i);
6820 hclge_config_mac_tnl_int(hdev, false);
6823 hclge_cfg_mac_mode(hdev, false);
6825 hclge_mac_stop_phy(hdev);
6827 /* reset tqp stats */
6828 hclge_reset_tqp_stats(handle);
6829 hclge_update_link_status(hdev);
6832 int hclge_vport_start(struct hclge_vport *vport)
6834 set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6835 vport->last_active_jiffies = jiffies;
6839 void hclge_vport_stop(struct hclge_vport *vport)
6841 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6844 static int hclge_client_start(struct hnae3_handle *handle)
6846 struct hclge_vport *vport = hclge_get_vport(handle);
6848 return hclge_vport_start(vport);
6851 static void hclge_client_stop(struct hnae3_handle *handle)
6853 struct hclge_vport *vport = hclge_get_vport(handle);
6855 hclge_vport_stop(vport);
6858 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
6859 u16 cmdq_resp, u8 resp_code,
6860 enum hclge_mac_vlan_tbl_opcode op)
6862 struct hclge_dev *hdev = vport->back;
6865 dev_err(&hdev->pdev->dev,
6866 "cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
6871 if (op == HCLGE_MAC_VLAN_ADD) {
6872 if ((!resp_code) || (resp_code == 1)) {
6874 } else if (resp_code == HCLGE_ADD_UC_OVERFLOW) {
6875 dev_err(&hdev->pdev->dev,
6876 "add mac addr failed for uc_overflow.\n");
6878 } else if (resp_code == HCLGE_ADD_MC_OVERFLOW) {
6879 dev_err(&hdev->pdev->dev,
6880 "add mac addr failed for mc_overflow.\n");
6884 dev_err(&hdev->pdev->dev,
6885 "add mac addr failed for undefined, code=%u.\n",
6888 } else if (op == HCLGE_MAC_VLAN_REMOVE) {
6891 } else if (resp_code == 1) {
6892 dev_dbg(&hdev->pdev->dev,
6893 "remove mac addr failed for miss.\n");
6897 dev_err(&hdev->pdev->dev,
6898 "remove mac addr failed for undefined, code=%u.\n",
6901 } else if (op == HCLGE_MAC_VLAN_LKUP) {
6904 } else if (resp_code == 1) {
6905 dev_dbg(&hdev->pdev->dev,
6906 "lookup mac addr failed for miss.\n");
6910 dev_err(&hdev->pdev->dev,
6911 "lookup mac addr failed for undefined, code=%u.\n",
6916 dev_err(&hdev->pdev->dev,
6917 "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
6922 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
6924 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
6926 unsigned int word_num;
6927 unsigned int bit_num;
6929 if (vfid > 255 || vfid < 0)
6932 if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
6933 word_num = vfid / 32;
6934 bit_num = vfid % 32;
6936 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6938 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
6940 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
6941 bit_num = vfid % 32;
6943 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6945 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
6951 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
6953 #define HCLGE_DESC_NUMBER 3
6954 #define HCLGE_FUNC_NUMBER_PER_DESC 6
6957 for (i = 1; i < HCLGE_DESC_NUMBER; i++)
6958 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
6959 if (desc[i].data[j])
6965 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
6966 const u8 *addr, bool is_mc)
6968 const unsigned char *mac_addr = addr;
6969 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
6970 (mac_addr[0]) | (mac_addr[1] << 8);
6971 u32 low_val = mac_addr[4] | (mac_addr[5] << 8);
6973 hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6975 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
6976 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6979 new_req->mac_addr_hi32 = cpu_to_le32(high_val);
6980 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
6983 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
6984 struct hclge_mac_vlan_tbl_entry_cmd *req)
6986 struct hclge_dev *hdev = vport->back;
6987 struct hclge_desc desc;
6992 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
6994 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6996 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6998 dev_err(&hdev->pdev->dev,
6999 "del mac addr failed for cmd_send, ret =%d.\n",
7003 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7004 retval = le16_to_cpu(desc.retval);
7006 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7007 HCLGE_MAC_VLAN_REMOVE);
7010 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
7011 struct hclge_mac_vlan_tbl_entry_cmd *req,
7012 struct hclge_desc *desc,
7015 struct hclge_dev *hdev = vport->back;
7020 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
7022 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7023 memcpy(desc[0].data,
7025 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7026 hclge_cmd_setup_basic_desc(&desc[1],
7027 HCLGE_OPC_MAC_VLAN_ADD,
7029 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7030 hclge_cmd_setup_basic_desc(&desc[2],
7031 HCLGE_OPC_MAC_VLAN_ADD,
7033 ret = hclge_cmd_send(&hdev->hw, desc, 3);
7035 memcpy(desc[0].data,
7037 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7038 ret = hclge_cmd_send(&hdev->hw, desc, 1);
7041 dev_err(&hdev->pdev->dev,
7042 "lookup mac addr failed for cmd_send, ret =%d.\n",
7046 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
7047 retval = le16_to_cpu(desc[0].retval);
7049 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7050 HCLGE_MAC_VLAN_LKUP);
7053 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
7054 struct hclge_mac_vlan_tbl_entry_cmd *req,
7055 struct hclge_desc *mc_desc)
7057 struct hclge_dev *hdev = vport->back;
7064 struct hclge_desc desc;
7066 hclge_cmd_setup_basic_desc(&desc,
7067 HCLGE_OPC_MAC_VLAN_ADD,
7069 memcpy(desc.data, req,
7070 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7071 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7072 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7073 retval = le16_to_cpu(desc.retval);
7075 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7077 HCLGE_MAC_VLAN_ADD);
7079 hclge_cmd_reuse_desc(&mc_desc[0], false);
7080 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7081 hclge_cmd_reuse_desc(&mc_desc[1], false);
7082 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7083 hclge_cmd_reuse_desc(&mc_desc[2], false);
7084 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
7085 memcpy(mc_desc[0].data, req,
7086 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7087 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
7088 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
7089 retval = le16_to_cpu(mc_desc[0].retval);
7091 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7093 HCLGE_MAC_VLAN_ADD);
7097 dev_err(&hdev->pdev->dev,
7098 "add mac addr failed for cmd_send, ret =%d.\n",
7106 static int hclge_init_umv_space(struct hclge_dev *hdev)
7108 u16 allocated_size = 0;
7111 ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
7116 if (allocated_size < hdev->wanted_umv_size)
7117 dev_warn(&hdev->pdev->dev,
7118 "Alloc umv space failed, want %u, get %u\n",
7119 hdev->wanted_umv_size, allocated_size);
7121 mutex_init(&hdev->umv_mutex);
7122 hdev->max_umv_size = allocated_size;
7123 /* divide max_umv_size by (hdev->num_req_vfs + 2), in order to
7124 * preserve some unicast mac vlan table entries shared by pf
7127 hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
7128 hdev->share_umv_size = hdev->priv_umv_size +
7129 hdev->max_umv_size % (hdev->num_req_vfs + 2);
7134 static int hclge_uninit_umv_space(struct hclge_dev *hdev)
7138 if (hdev->max_umv_size > 0) {
7139 ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
7143 hdev->max_umv_size = 0;
7145 mutex_destroy(&hdev->umv_mutex);
7150 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
7151 u16 *allocated_size, bool is_alloc)
7153 struct hclge_umv_spc_alc_cmd *req;
7154 struct hclge_desc desc;
7157 req = (struct hclge_umv_spc_alc_cmd *)desc.data;
7158 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
7160 hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, 1);
7162 req->space_size = cpu_to_le32(space_size);
7164 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7166 dev_err(&hdev->pdev->dev,
7167 "%s umv space failed for cmd_send, ret =%d\n",
7168 is_alloc ? "allocate" : "free", ret);
7172 if (is_alloc && allocated_size)
7173 *allocated_size = le32_to_cpu(desc.data[1]);
7178 static void hclge_reset_umv_space(struct hclge_dev *hdev)
7180 struct hclge_vport *vport;
7183 for (i = 0; i < hdev->num_alloc_vport; i++) {
7184 vport = &hdev->vport[i];
7185 vport->used_umv_num = 0;
7188 mutex_lock(&hdev->umv_mutex);
7189 hdev->share_umv_size = hdev->priv_umv_size +
7190 hdev->max_umv_size % (hdev->num_req_vfs + 2);
7191 mutex_unlock(&hdev->umv_mutex);
7194 static bool hclge_is_umv_space_full(struct hclge_vport *vport)
7196 struct hclge_dev *hdev = vport->back;
7199 mutex_lock(&hdev->umv_mutex);
7200 is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
7201 hdev->share_umv_size == 0);
7202 mutex_unlock(&hdev->umv_mutex);
7207 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
7209 struct hclge_dev *hdev = vport->back;
7211 mutex_lock(&hdev->umv_mutex);
7213 if (vport->used_umv_num > hdev->priv_umv_size)
7214 hdev->share_umv_size++;
7216 if (vport->used_umv_num > 0)
7217 vport->used_umv_num--;
7219 if (vport->used_umv_num >= hdev->priv_umv_size &&
7220 hdev->share_umv_size > 0)
7221 hdev->share_umv_size--;
7222 vport->used_umv_num++;
7224 mutex_unlock(&hdev->umv_mutex);
7227 static int hclge_add_uc_addr(struct hnae3_handle *handle,
7228 const unsigned char *addr)
7230 struct hclge_vport *vport = hclge_get_vport(handle);
7232 return hclge_add_uc_addr_common(vport, addr);
7235 int hclge_add_uc_addr_common(struct hclge_vport *vport,
7236 const unsigned char *addr)
7238 struct hclge_dev *hdev = vport->back;
7239 struct hclge_mac_vlan_tbl_entry_cmd req;
7240 struct hclge_desc desc;
7241 u16 egress_port = 0;
7244 /* mac addr check */
7245 if (is_zero_ether_addr(addr) ||
7246 is_broadcast_ether_addr(addr) ||
7247 is_multicast_ether_addr(addr)) {
7248 dev_err(&hdev->pdev->dev,
7249 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
7250 addr, is_zero_ether_addr(addr),
7251 is_broadcast_ether_addr(addr),
7252 is_multicast_ether_addr(addr));
7256 memset(&req, 0, sizeof(req));
7258 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
7259 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
7261 req.egress_port = cpu_to_le16(egress_port);
7263 hclge_prepare_mac_addr(&req, addr, false);
7265 /* Lookup the mac address in the mac_vlan table, and add
7266 * it if the entry is inexistent. Repeated unicast entry
7267 * is not allowed in the mac vlan table.
7269 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
7270 if (ret == -ENOENT) {
7271 if (!hclge_is_umv_space_full(vport)) {
7272 ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
7274 hclge_update_umv_space(vport, false);
7278 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
7279 hdev->priv_umv_size);
7284 /* check if we just hit the duplicate */
7286 dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n",
7287 vport->vport_id, addr);
7291 dev_err(&hdev->pdev->dev,
7292 "PF failed to add unicast entry(%pM) in the MAC table\n",
7298 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
7299 const unsigned char *addr)
7301 struct hclge_vport *vport = hclge_get_vport(handle);
7303 return hclge_rm_uc_addr_common(vport, addr);
7306 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
7307 const unsigned char *addr)
7309 struct hclge_dev *hdev = vport->back;
7310 struct hclge_mac_vlan_tbl_entry_cmd req;
7313 /* mac addr check */
7314 if (is_zero_ether_addr(addr) ||
7315 is_broadcast_ether_addr(addr) ||
7316 is_multicast_ether_addr(addr)) {
7317 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
7322 memset(&req, 0, sizeof(req));
7323 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7324 hclge_prepare_mac_addr(&req, addr, false);
7325 ret = hclge_remove_mac_vlan_tbl(vport, &req);
7327 hclge_update_umv_space(vport, true);
7332 static int hclge_add_mc_addr(struct hnae3_handle *handle,
7333 const unsigned char *addr)
7335 struct hclge_vport *vport = hclge_get_vport(handle);
7337 return hclge_add_mc_addr_common(vport, addr);
7340 int hclge_add_mc_addr_common(struct hclge_vport *vport,
7341 const unsigned char *addr)
7343 struct hclge_dev *hdev = vport->back;
7344 struct hclge_mac_vlan_tbl_entry_cmd req;
7345 struct hclge_desc desc[3];
7348 /* mac addr check */
7349 if (!is_multicast_ether_addr(addr)) {
7350 dev_err(&hdev->pdev->dev,
7351 "Add mc mac err! invalid mac:%pM.\n",
7355 memset(&req, 0, sizeof(req));
7356 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7357 hclge_prepare_mac_addr(&req, addr, true);
7358 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7360 /* This mac addr do not exist, add new entry for it */
7361 memset(desc[0].data, 0, sizeof(desc[0].data));
7362 memset(desc[1].data, 0, sizeof(desc[0].data));
7363 memset(desc[2].data, 0, sizeof(desc[0].data));
7365 status = hclge_update_desc_vfid(desc, vport->vport_id, false);
7368 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7370 if (status == -ENOSPC)
7371 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
7376 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
7377 const unsigned char *addr)
7379 struct hclge_vport *vport = hclge_get_vport(handle);
7381 return hclge_rm_mc_addr_common(vport, addr);
7384 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
7385 const unsigned char *addr)
7387 struct hclge_dev *hdev = vport->back;
7388 struct hclge_mac_vlan_tbl_entry_cmd req;
7389 enum hclge_cmd_status status;
7390 struct hclge_desc desc[3];
7392 /* mac addr check */
7393 if (!is_multicast_ether_addr(addr)) {
7394 dev_dbg(&hdev->pdev->dev,
7395 "Remove mc mac err! invalid mac:%pM.\n",
7400 memset(&req, 0, sizeof(req));
7401 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7402 hclge_prepare_mac_addr(&req, addr, true);
7403 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7405 /* This mac addr exist, remove this handle's VFID for it */
7406 status = hclge_update_desc_vfid(desc, vport->vport_id, true);
7410 if (hclge_is_all_function_id_zero(desc))
7411 /* All the vfid is zero, so need to delete this entry */
7412 status = hclge_remove_mac_vlan_tbl(vport, &req);
7414 /* Not all the vfid is zero, update the vfid */
7415 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7418 /* Maybe this mac address is in mta table, but it cannot be
7419 * deleted here because an entry of mta represents an address
7420 * range rather than a specific address. the delete action to
7421 * all entries will take effect in update_mta_status called by
7422 * hns3_nic_set_rx_mode.
7430 void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
7431 enum HCLGE_MAC_ADDR_TYPE mac_type)
7433 struct hclge_vport_mac_addr_cfg *mac_cfg;
7434 struct list_head *list;
7436 if (!vport->vport_id)
7439 mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL);
7443 mac_cfg->hd_tbl_status = true;
7444 memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN);
7446 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7447 &vport->uc_mac_list : &vport->mc_mac_list;
7449 list_add_tail(&mac_cfg->node, list);
7452 void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
7454 enum HCLGE_MAC_ADDR_TYPE mac_type)
7456 struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
7457 struct list_head *list;
7458 bool uc_flag, mc_flag;
7460 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7461 &vport->uc_mac_list : &vport->mc_mac_list;
7463 uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC;
7464 mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
7466 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
7467 if (ether_addr_equal(mac_cfg->mac_addr, mac_addr)) {
7468 if (uc_flag && mac_cfg->hd_tbl_status)
7469 hclge_rm_uc_addr_common(vport, mac_addr);
7471 if (mc_flag && mac_cfg->hd_tbl_status)
7472 hclge_rm_mc_addr_common(vport, mac_addr);
7474 list_del(&mac_cfg->node);
7481 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
7482 enum HCLGE_MAC_ADDR_TYPE mac_type)
7484 struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
7485 struct list_head *list;
7487 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7488 &vport->uc_mac_list : &vport->mc_mac_list;
7490 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
7491 if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status)
7492 hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr);
7494 if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status)
7495 hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr);
7497 mac_cfg->hd_tbl_status = false;
7499 list_del(&mac_cfg->node);
7505 void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
7507 struct hclge_vport_mac_addr_cfg *mac, *tmp;
7508 struct hclge_vport *vport;
7511 for (i = 0; i < hdev->num_alloc_vport; i++) {
7512 vport = &hdev->vport[i];
7513 list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
7514 list_del(&mac->node);
7518 list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) {
7519 list_del(&mac->node);
7525 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
7526 u16 cmdq_resp, u8 resp_code)
7528 #define HCLGE_ETHERTYPE_SUCCESS_ADD 0
7529 #define HCLGE_ETHERTYPE_ALREADY_ADD 1
7530 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2
7531 #define HCLGE_ETHERTYPE_KEY_CONFLICT 3
7536 dev_err(&hdev->pdev->dev,
7537 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
7542 switch (resp_code) {
7543 case HCLGE_ETHERTYPE_SUCCESS_ADD:
7544 case HCLGE_ETHERTYPE_ALREADY_ADD:
7547 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
7548 dev_err(&hdev->pdev->dev,
7549 "add mac ethertype failed for manager table overflow.\n");
7550 return_status = -EIO;
7552 case HCLGE_ETHERTYPE_KEY_CONFLICT:
7553 dev_err(&hdev->pdev->dev,
7554 "add mac ethertype failed for key conflict.\n");
7555 return_status = -EIO;
7558 dev_err(&hdev->pdev->dev,
7559 "add mac ethertype failed for undefined, code=%u.\n",
7561 return_status = -EIO;
7564 return return_status;
7567 static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
7570 struct hclge_mac_vlan_tbl_entry_cmd req;
7571 struct hclge_dev *hdev = vport->back;
7572 struct hclge_desc desc;
7573 u16 egress_port = 0;
7576 if (is_zero_ether_addr(mac_addr))
7579 memset(&req, 0, sizeof(req));
7580 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
7581 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
7582 req.egress_port = cpu_to_le16(egress_port);
7583 hclge_prepare_mac_addr(&req, mac_addr, false);
7585 if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
7588 vf_idx += HCLGE_VF_VPORT_START_NUM;
7589 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++)
7591 ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
7597 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
7600 struct hclge_vport *vport = hclge_get_vport(handle);
7601 struct hclge_dev *hdev = vport->back;
7603 vport = hclge_get_vf_vport(hdev, vf);
7607 if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
7608 dev_info(&hdev->pdev->dev,
7609 "Specified MAC(=%pM) is same as before, no change committed!\n",
7614 if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
7615 dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
7620 ether_addr_copy(vport->vf_info.mac, mac_addr);
7621 dev_info(&hdev->pdev->dev,
7622 "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
7625 return hclge_inform_reset_assert_to_vf(vport);
7628 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
7629 const struct hclge_mac_mgr_tbl_entry_cmd *req)
7631 struct hclge_desc desc;
7636 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
7637 memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
7639 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7641 dev_err(&hdev->pdev->dev,
7642 "add mac ethertype failed for cmd_send, ret =%d.\n",
7647 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7648 retval = le16_to_cpu(desc.retval);
7650 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
7653 static int init_mgr_tbl(struct hclge_dev *hdev)
7658 for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
7659 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
7661 dev_err(&hdev->pdev->dev,
7662 "add mac ethertype failed, ret =%d.\n",
7671 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
7673 struct hclge_vport *vport = hclge_get_vport(handle);
7674 struct hclge_dev *hdev = vport->back;
7676 ether_addr_copy(p, hdev->hw.mac.mac_addr);
7679 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
7682 const unsigned char *new_addr = (const unsigned char *)p;
7683 struct hclge_vport *vport = hclge_get_vport(handle);
7684 struct hclge_dev *hdev = vport->back;
7687 /* mac addr check */
7688 if (is_zero_ether_addr(new_addr) ||
7689 is_broadcast_ether_addr(new_addr) ||
7690 is_multicast_ether_addr(new_addr)) {
7691 dev_err(&hdev->pdev->dev,
7692 "Change uc mac err! invalid mac:%pM.\n",
7697 if ((!is_first || is_kdump_kernel()) &&
7698 hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
7699 dev_warn(&hdev->pdev->dev,
7700 "remove old uc mac address fail.\n");
7702 ret = hclge_add_uc_addr(handle, new_addr);
7704 dev_err(&hdev->pdev->dev,
7705 "add uc mac address fail, ret =%d.\n",
7709 hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
7710 dev_err(&hdev->pdev->dev,
7711 "restore uc mac address fail.\n");
7716 ret = hclge_pause_addr_cfg(hdev, new_addr);
7718 dev_err(&hdev->pdev->dev,
7719 "configure mac pause address fail, ret =%d.\n",
7724 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
7729 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
7732 struct hclge_vport *vport = hclge_get_vport(handle);
7733 struct hclge_dev *hdev = vport->back;
7735 if (!hdev->hw.mac.phydev)
7738 return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
7741 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
7742 u8 fe_type, bool filter_en, u8 vf_id)
7744 struct hclge_vlan_filter_ctrl_cmd *req;
7745 struct hclge_desc desc;
7748 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
7750 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
7751 req->vlan_type = vlan_type;
7752 req->vlan_fe = filter_en ? fe_type : 0;
7755 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7757 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
7763 #define HCLGE_FILTER_TYPE_VF 0
7764 #define HCLGE_FILTER_TYPE_PORT 1
7765 #define HCLGE_FILTER_FE_EGRESS_V1_B BIT(0)
7766 #define HCLGE_FILTER_FE_NIC_INGRESS_B BIT(0)
7767 #define HCLGE_FILTER_FE_NIC_EGRESS_B BIT(1)
7768 #define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2)
7769 #define HCLGE_FILTER_FE_ROCE_EGRESS_B BIT(3)
7770 #define HCLGE_FILTER_FE_EGRESS (HCLGE_FILTER_FE_NIC_EGRESS_B \
7771 | HCLGE_FILTER_FE_ROCE_EGRESS_B)
7772 #define HCLGE_FILTER_FE_INGRESS (HCLGE_FILTER_FE_NIC_INGRESS_B \
7773 | HCLGE_FILTER_FE_ROCE_INGRESS_B)
7775 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
7777 struct hclge_vport *vport = hclge_get_vport(handle);
7778 struct hclge_dev *hdev = vport->back;
7780 if (hdev->pdev->revision >= 0x21) {
7781 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7782 HCLGE_FILTER_FE_EGRESS, enable, 0);
7783 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7784 HCLGE_FILTER_FE_INGRESS, enable, 0);
7786 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7787 HCLGE_FILTER_FE_EGRESS_V1_B, enable,
7791 handle->netdev_flags |= HNAE3_VLAN_FLTR;
7793 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
7796 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
7797 bool is_kill, u16 vlan,
7800 struct hclge_vport *vport = &hdev->vport[vfid];
7801 struct hclge_vlan_filter_vf_cfg_cmd *req0;
7802 struct hclge_vlan_filter_vf_cfg_cmd *req1;
7803 struct hclge_desc desc[2];
7808 /* if vf vlan table is full, firmware will close vf vlan filter, it
7809 * is unable and unnecessary to add new vlan id to vf vlan filter.
7810 * If spoof check is enable, and vf vlan is full, it shouldn't add
7811 * new vlan, because tx packets with these vlan id will be dropped.
7813 if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
7814 if (vport->vf_info.spoofchk && vlan) {
7815 dev_err(&hdev->pdev->dev,
7816 "Can't add vlan due to spoof check is on and vf vlan table is full\n");
7822 hclge_cmd_setup_basic_desc(&desc[0],
7823 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7824 hclge_cmd_setup_basic_desc(&desc[1],
7825 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7827 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7829 vf_byte_off = vfid / 8;
7830 vf_byte_val = 1 << (vfid % 8);
7832 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
7833 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
7835 req0->vlan_id = cpu_to_le16(vlan);
7836 req0->vlan_cfg = is_kill;
7838 if (vf_byte_off < HCLGE_MAX_VF_BYTES)
7839 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
7841 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
7843 ret = hclge_cmd_send(&hdev->hw, desc, 2);
7845 dev_err(&hdev->pdev->dev,
7846 "Send vf vlan command fail, ret =%d.\n",
7852 #define HCLGE_VF_VLAN_NO_ENTRY 2
7853 if (!req0->resp_code || req0->resp_code == 1)
7856 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
7857 set_bit(vfid, hdev->vf_vlan_full);
7858 dev_warn(&hdev->pdev->dev,
7859 "vf vlan table is full, vf vlan filter is disabled\n");
7863 dev_err(&hdev->pdev->dev,
7864 "Add vf vlan filter fail, ret =%u.\n",
7867 #define HCLGE_VF_VLAN_DEL_NO_FOUND 1
7868 if (!req0->resp_code)
7871 /* vf vlan filter is disabled when vf vlan table is full,
7872 * then new vlan id will not be added into vf vlan table.
7873 * Just return 0 without warning, avoid massive verbose
7874 * print logs when unload.
7876 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
7879 dev_err(&hdev->pdev->dev,
7880 "Kill vf vlan filter fail, ret =%u.\n",
7887 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
7888 u16 vlan_id, bool is_kill)
7890 struct hclge_vlan_filter_pf_cfg_cmd *req;
7891 struct hclge_desc desc;
7892 u8 vlan_offset_byte_val;
7893 u8 vlan_offset_byte;
7897 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
7899 vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
7900 vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
7901 HCLGE_VLAN_BYTE_SIZE;
7902 vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
7904 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
7905 req->vlan_offset = vlan_offset_160;
7906 req->vlan_cfg = is_kill;
7907 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
7909 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7911 dev_err(&hdev->pdev->dev,
7912 "port vlan command, send fail, ret =%d.\n", ret);
7916 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
7917 u16 vport_id, u16 vlan_id,
7920 u16 vport_idx, vport_num = 0;
7923 if (is_kill && !vlan_id)
7926 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
7929 dev_err(&hdev->pdev->dev,
7930 "Set %u vport vlan filter config fail, ret =%d.\n",
7935 /* vlan 0 may be added twice when 8021q module is enabled */
7936 if (!is_kill && !vlan_id &&
7937 test_bit(vport_id, hdev->vlan_table[vlan_id]))
7940 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
7941 dev_err(&hdev->pdev->dev,
7942 "Add port vlan failed, vport %u is already in vlan %u\n",
7948 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
7949 dev_err(&hdev->pdev->dev,
7950 "Delete port vlan failed, vport %u is not in vlan %u\n",
7955 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
7958 if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
7959 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
7965 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
7967 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
7968 struct hclge_vport_vtag_tx_cfg_cmd *req;
7969 struct hclge_dev *hdev = vport->back;
7970 struct hclge_desc desc;
7974 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
7976 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
7977 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
7978 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
7979 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
7980 vcfg->accept_tag1 ? 1 : 0);
7981 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
7982 vcfg->accept_untag1 ? 1 : 0);
7983 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
7984 vcfg->accept_tag2 ? 1 : 0);
7985 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
7986 vcfg->accept_untag2 ? 1 : 0);
7987 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
7988 vcfg->insert_tag1_en ? 1 : 0);
7989 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
7990 vcfg->insert_tag2_en ? 1 : 0);
7991 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
7993 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7994 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
7995 HCLGE_VF_NUM_PER_BYTE;
7996 req->vf_bitmap[bmap_index] =
7997 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7999 status = hclge_cmd_send(&hdev->hw, &desc, 1);
8001 dev_err(&hdev->pdev->dev,
8002 "Send port txvlan cfg command fail, ret =%d\n",
8008 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
8010 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
8011 struct hclge_vport_vtag_rx_cfg_cmd *req;
8012 struct hclge_dev *hdev = vport->back;
8013 struct hclge_desc desc;
8017 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
8019 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
8020 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
8021 vcfg->strip_tag1_en ? 1 : 0);
8022 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
8023 vcfg->strip_tag2_en ? 1 : 0);
8024 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
8025 vcfg->vlan1_vlan_prionly ? 1 : 0);
8026 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
8027 vcfg->vlan2_vlan_prionly ? 1 : 0);
8029 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
8030 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
8031 HCLGE_VF_NUM_PER_BYTE;
8032 req->vf_bitmap[bmap_index] =
8033 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
8035 status = hclge_cmd_send(&hdev->hw, &desc, 1);
8037 dev_err(&hdev->pdev->dev,
8038 "Send port rxvlan cfg command fail, ret =%d\n",
8044 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
8045 u16 port_base_vlan_state,
8050 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8051 vport->txvlan_cfg.accept_tag1 = true;
8052 vport->txvlan_cfg.insert_tag1_en = false;
8053 vport->txvlan_cfg.default_tag1 = 0;
8055 vport->txvlan_cfg.accept_tag1 = false;
8056 vport->txvlan_cfg.insert_tag1_en = true;
8057 vport->txvlan_cfg.default_tag1 = vlan_tag;
8060 vport->txvlan_cfg.accept_untag1 = true;
8062 /* accept_tag2 and accept_untag2 are not supported on
8063 * pdev revision(0x20), new revision support them,
8064 * this two fields can not be configured by user.
8066 vport->txvlan_cfg.accept_tag2 = true;
8067 vport->txvlan_cfg.accept_untag2 = true;
8068 vport->txvlan_cfg.insert_tag2_en = false;
8069 vport->txvlan_cfg.default_tag2 = 0;
8071 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8072 vport->rxvlan_cfg.strip_tag1_en = false;
8073 vport->rxvlan_cfg.strip_tag2_en =
8074 vport->rxvlan_cfg.rx_vlan_offload_en;
8076 vport->rxvlan_cfg.strip_tag1_en =
8077 vport->rxvlan_cfg.rx_vlan_offload_en;
8078 vport->rxvlan_cfg.strip_tag2_en = true;
8080 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8081 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
8083 ret = hclge_set_vlan_tx_offload_cfg(vport);
8087 return hclge_set_vlan_rx_offload_cfg(vport);
8090 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
8092 struct hclge_rx_vlan_type_cfg_cmd *rx_req;
8093 struct hclge_tx_vlan_type_cfg_cmd *tx_req;
8094 struct hclge_desc desc;
8097 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
8098 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
8099 rx_req->ot_fst_vlan_type =
8100 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
8101 rx_req->ot_sec_vlan_type =
8102 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
8103 rx_req->in_fst_vlan_type =
8104 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
8105 rx_req->in_sec_vlan_type =
8106 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
8108 status = hclge_cmd_send(&hdev->hw, &desc, 1);
8110 dev_err(&hdev->pdev->dev,
8111 "Send rxvlan protocol type command fail, ret =%d\n",
8116 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
8118 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
8119 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
8120 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
8122 status = hclge_cmd_send(&hdev->hw, &desc, 1);
8124 dev_err(&hdev->pdev->dev,
8125 "Send txvlan protocol type command fail, ret =%d\n",
8131 static int hclge_init_vlan_config(struct hclge_dev *hdev)
8133 #define HCLGE_DEF_VLAN_TYPE 0x8100
8135 struct hnae3_handle *handle = &hdev->vport[0].nic;
8136 struct hclge_vport *vport;
8140 if (hdev->pdev->revision >= 0x21) {
8141 /* for revision 0x21, vf vlan filter is per function */
8142 for (i = 0; i < hdev->num_alloc_vport; i++) {
8143 vport = &hdev->vport[i];
8144 ret = hclge_set_vlan_filter_ctrl(hdev,
8145 HCLGE_FILTER_TYPE_VF,
8146 HCLGE_FILTER_FE_EGRESS,
8153 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
8154 HCLGE_FILTER_FE_INGRESS, true,
8159 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8160 HCLGE_FILTER_FE_EGRESS_V1_B,
8166 handle->netdev_flags |= HNAE3_VLAN_FLTR;
8168 hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
8169 hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
8170 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
8171 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
8172 hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
8173 hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
8175 ret = hclge_set_vlan_protocol_type(hdev);
8179 for (i = 0; i < hdev->num_alloc_vport; i++) {
8182 vport = &hdev->vport[i];
8183 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
8185 ret = hclge_vlan_offload_cfg(vport,
8186 vport->port_base_vlan_cfg.state,
8192 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
8195 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8198 struct hclge_vport_vlan_cfg *vlan;
8200 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
8204 vlan->hd_tbl_status = writen_to_tbl;
8205 vlan->vlan_id = vlan_id;
8207 list_add_tail(&vlan->node, &vport->vlan_list);
8210 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
8212 struct hclge_vport_vlan_cfg *vlan, *tmp;
8213 struct hclge_dev *hdev = vport->back;
8216 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8217 if (!vlan->hd_tbl_status) {
8218 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8220 vlan->vlan_id, false);
8222 dev_err(&hdev->pdev->dev,
8223 "restore vport vlan list failed, ret=%d\n",
8228 vlan->hd_tbl_status = true;
8234 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8237 struct hclge_vport_vlan_cfg *vlan, *tmp;
8238 struct hclge_dev *hdev = vport->back;
8240 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8241 if (vlan->vlan_id == vlan_id) {
8242 if (is_write_tbl && vlan->hd_tbl_status)
8243 hclge_set_vlan_filter_hw(hdev,
8249 list_del(&vlan->node);
8256 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
8258 struct hclge_vport_vlan_cfg *vlan, *tmp;
8259 struct hclge_dev *hdev = vport->back;
8261 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8262 if (vlan->hd_tbl_status)
8263 hclge_set_vlan_filter_hw(hdev,
8269 vlan->hd_tbl_status = false;
8271 list_del(&vlan->node);
8275 clear_bit(vport->vport_id, hdev->vf_vlan_full);
8278 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
8280 struct hclge_vport_vlan_cfg *vlan, *tmp;
8281 struct hclge_vport *vport;
8284 for (i = 0; i < hdev->num_alloc_vport; i++) {
8285 vport = &hdev->vport[i];
8286 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8287 list_del(&vlan->node);
8293 static void hclge_restore_vlan_table(struct hnae3_handle *handle)
8295 struct hclge_vport *vport = hclge_get_vport(handle);
8296 struct hclge_vport_vlan_cfg *vlan, *tmp;
8297 struct hclge_dev *hdev = vport->back;
8302 for (i = 0; i < hdev->num_alloc_vport; i++) {
8303 vport = &hdev->vport[i];
8304 vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
8305 vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
8306 state = vport->port_base_vlan_cfg.state;
8308 if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
8309 hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
8310 vport->vport_id, vlan_id,
8315 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8318 if (!vlan->hd_tbl_status)
8320 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8322 vlan->vlan_id, false);
8329 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
8331 struct hclge_vport *vport = hclge_get_vport(handle);
8333 if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8334 vport->rxvlan_cfg.strip_tag1_en = false;
8335 vport->rxvlan_cfg.strip_tag2_en = enable;
8337 vport->rxvlan_cfg.strip_tag1_en = enable;
8338 vport->rxvlan_cfg.strip_tag2_en = true;
8340 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8341 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
8342 vport->rxvlan_cfg.rx_vlan_offload_en = enable;
8344 return hclge_set_vlan_rx_offload_cfg(vport);
8347 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
8348 u16 port_base_vlan_state,
8349 struct hclge_vlan_info *new_info,
8350 struct hclge_vlan_info *old_info)
8352 struct hclge_dev *hdev = vport->back;
8355 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
8356 hclge_rm_vport_all_vlan_table(vport, false);
8357 return hclge_set_vlan_filter_hw(hdev,
8358 htons(new_info->vlan_proto),
8364 ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
8365 vport->vport_id, old_info->vlan_tag,
8370 return hclge_add_vport_all_vlan_table(vport);
8373 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
8374 struct hclge_vlan_info *vlan_info)
8376 struct hnae3_handle *nic = &vport->nic;
8377 struct hclge_vlan_info *old_vlan_info;
8378 struct hclge_dev *hdev = vport->back;
8381 old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
8383 ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
8387 if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
8388 /* add new VLAN tag */
8389 ret = hclge_set_vlan_filter_hw(hdev,
8390 htons(vlan_info->vlan_proto),
8392 vlan_info->vlan_tag,
8397 /* remove old VLAN tag */
8398 ret = hclge_set_vlan_filter_hw(hdev,
8399 htons(old_vlan_info->vlan_proto),
8401 old_vlan_info->vlan_tag,
8409 ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
8414 /* update state only when disable/enable port based VLAN */
8415 vport->port_base_vlan_cfg.state = state;
8416 if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
8417 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
8419 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
8422 vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
8423 vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
8424 vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
8429 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
8430 enum hnae3_port_base_vlan_state state,
8433 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8435 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
8437 return HNAE3_PORT_BASE_VLAN_ENABLE;
8440 return HNAE3_PORT_BASE_VLAN_DISABLE;
8441 else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
8442 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
8444 return HNAE3_PORT_BASE_VLAN_MODIFY;
8448 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
8449 u16 vlan, u8 qos, __be16 proto)
8451 struct hclge_vport *vport = hclge_get_vport(handle);
8452 struct hclge_dev *hdev = vport->back;
8453 struct hclge_vlan_info vlan_info;
8457 if (hdev->pdev->revision == 0x20)
8460 vport = hclge_get_vf_vport(hdev, vfid);
8464 /* qos is a 3 bits value, so can not be bigger than 7 */
8465 if (vlan > VLAN_N_VID - 1 || qos > 7)
8467 if (proto != htons(ETH_P_8021Q))
8468 return -EPROTONOSUPPORT;
8470 state = hclge_get_port_base_vlan_state(vport,
8471 vport->port_base_vlan_cfg.state,
8473 if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
8476 vlan_info.vlan_tag = vlan;
8477 vlan_info.qos = qos;
8478 vlan_info.vlan_proto = ntohs(proto);
8480 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
8481 return hclge_update_port_base_vlan_cfg(vport, state,
8484 ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
8485 vport->vport_id, state,
8492 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
8493 u16 vlan_id, bool is_kill)
8495 struct hclge_vport *vport = hclge_get_vport(handle);
8496 struct hclge_dev *hdev = vport->back;
8497 bool writen_to_tbl = false;
8500 /* When device is resetting, firmware is unable to handle
8501 * mailbox. Just record the vlan id, and remove it after
8504 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && is_kill) {
8505 set_bit(vlan_id, vport->vlan_del_fail_bmap);
8509 /* when port base vlan enabled, we use port base vlan as the vlan
8510 * filter entry. In this case, we don't update vlan filter table
8511 * when user add new vlan or remove exist vlan, just update the vport
8512 * vlan list. The vlan id in vlan list will be writen in vlan filter
8513 * table until port base vlan disabled
8515 if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8516 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
8518 writen_to_tbl = true;
8523 hclge_rm_vport_vlan_table(vport, vlan_id, false);
8525 hclge_add_vport_vlan_table(vport, vlan_id,
8527 } else if (is_kill) {
8528 /* when remove hw vlan filter failed, record the vlan id,
8529 * and try to remove it from hw later, to be consistence
8532 set_bit(vlan_id, vport->vlan_del_fail_bmap);
8537 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
8539 #define HCLGE_MAX_SYNC_COUNT 60
8541 int i, ret, sync_cnt = 0;
8544 /* start from vport 1 for PF is always alive */
8545 for (i = 0; i < hdev->num_alloc_vport; i++) {
8546 struct hclge_vport *vport = &hdev->vport[i];
8548 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
8550 while (vlan_id != VLAN_N_VID) {
8551 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8552 vport->vport_id, vlan_id,
8554 if (ret && ret != -EINVAL)
8557 clear_bit(vlan_id, vport->vlan_del_fail_bmap);
8558 hclge_rm_vport_vlan_table(vport, vlan_id, false);
8561 if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
8564 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
8570 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
8572 struct hclge_config_max_frm_size_cmd *req;
8573 struct hclge_desc desc;
8575 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
8577 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
8578 req->max_frm_size = cpu_to_le16(new_mps);
8579 req->min_frm_size = HCLGE_MAC_MIN_FRAME;
8581 return hclge_cmd_send(&hdev->hw, &desc, 1);
8584 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
8586 struct hclge_vport *vport = hclge_get_vport(handle);
8588 return hclge_set_vport_mtu(vport, new_mtu);
8591 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
8593 struct hclge_dev *hdev = vport->back;
8594 int i, max_frm_size, ret;
8596 /* HW supprt 2 layer vlan */
8597 max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
8598 if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
8599 max_frm_size > HCLGE_MAC_MAX_FRAME)
8602 max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
8603 mutex_lock(&hdev->vport_lock);
8604 /* VF's mps must fit within hdev->mps */
8605 if (vport->vport_id && max_frm_size > hdev->mps) {
8606 mutex_unlock(&hdev->vport_lock);
8608 } else if (vport->vport_id) {
8609 vport->mps = max_frm_size;
8610 mutex_unlock(&hdev->vport_lock);
8614 /* PF's mps must be greater then VF's mps */
8615 for (i = 1; i < hdev->num_alloc_vport; i++)
8616 if (max_frm_size < hdev->vport[i].mps) {
8617 mutex_unlock(&hdev->vport_lock);
8621 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
8623 ret = hclge_set_mac_mtu(hdev, max_frm_size);
8625 dev_err(&hdev->pdev->dev,
8626 "Change mtu fail, ret =%d\n", ret);
8630 hdev->mps = max_frm_size;
8631 vport->mps = max_frm_size;
8633 ret = hclge_buffer_alloc(hdev);
8635 dev_err(&hdev->pdev->dev,
8636 "Allocate buffer fail, ret =%d\n", ret);
8639 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
8640 mutex_unlock(&hdev->vport_lock);
8644 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
8647 struct hclge_reset_tqp_queue_cmd *req;
8648 struct hclge_desc desc;
8651 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
8653 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
8654 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
8656 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
8658 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8660 dev_err(&hdev->pdev->dev,
8661 "Send tqp reset cmd error, status =%d\n", ret);
8668 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
8670 struct hclge_reset_tqp_queue_cmd *req;
8671 struct hclge_desc desc;
8674 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
8676 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
8677 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
8679 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8681 dev_err(&hdev->pdev->dev,
8682 "Get reset status error, status =%d\n", ret);
8686 return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
8689 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
8691 struct hnae3_queue *queue;
8692 struct hclge_tqp *tqp;
8694 queue = handle->kinfo.tqp[queue_id];
8695 tqp = container_of(queue, struct hclge_tqp, q);
8700 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
8702 struct hclge_vport *vport = hclge_get_vport(handle);
8703 struct hclge_dev *hdev = vport->back;
8704 int reset_try_times = 0;
8709 queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
8711 ret = hclge_tqp_enable(hdev, queue_id, 0, false);
8713 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
8717 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
8719 dev_err(&hdev->pdev->dev,
8720 "Send reset tqp cmd fail, ret = %d\n", ret);
8724 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
8725 reset_status = hclge_get_reset_status(hdev, queue_gid);
8729 /* Wait for tqp hw reset */
8730 usleep_range(1000, 1200);
8733 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
8734 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
8738 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
8740 dev_err(&hdev->pdev->dev,
8741 "Deassert the soft reset fail, ret = %d\n", ret);
8746 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
8748 struct hclge_dev *hdev = vport->back;
8749 int reset_try_times = 0;
8754 queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
8756 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
8758 dev_warn(&hdev->pdev->dev,
8759 "Send reset tqp cmd fail, ret = %d\n", ret);
8763 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
8764 reset_status = hclge_get_reset_status(hdev, queue_gid);
8768 /* Wait for tqp hw reset */
8769 usleep_range(1000, 1200);
8772 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
8773 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
8777 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
8779 dev_warn(&hdev->pdev->dev,
8780 "Deassert the soft reset fail, ret = %d\n", ret);
8783 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
8785 struct hclge_vport *vport = hclge_get_vport(handle);
8786 struct hclge_dev *hdev = vport->back;
8788 return hdev->fw_version;
8791 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
8793 struct phy_device *phydev = hdev->hw.mac.phydev;
8798 phy_set_asym_pause(phydev, rx_en, tx_en);
8801 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
8805 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
8808 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
8810 dev_err(&hdev->pdev->dev,
8811 "configure pauseparam error, ret = %d.\n", ret);
8816 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
8818 struct phy_device *phydev = hdev->hw.mac.phydev;
8819 u16 remote_advertising = 0;
8820 u16 local_advertising;
8821 u32 rx_pause, tx_pause;
8824 if (!phydev->link || !phydev->autoneg)
8827 local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
8830 remote_advertising = LPA_PAUSE_CAP;
8832 if (phydev->asym_pause)
8833 remote_advertising |= LPA_PAUSE_ASYM;
8835 flowctl = mii_resolve_flowctrl_fdx(local_advertising,
8836 remote_advertising);
8837 tx_pause = flowctl & FLOW_CTRL_TX;
8838 rx_pause = flowctl & FLOW_CTRL_RX;
8840 if (phydev->duplex == HCLGE_MAC_HALF) {
8845 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
8848 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
8849 u32 *rx_en, u32 *tx_en)
8851 struct hclge_vport *vport = hclge_get_vport(handle);
8852 struct hclge_dev *hdev = vport->back;
8853 struct phy_device *phydev = hdev->hw.mac.phydev;
8855 *auto_neg = phydev ? hclge_get_autoneg(handle) : 0;
8857 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8863 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
8866 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
8869 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
8878 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
8879 u32 rx_en, u32 tx_en)
8882 hdev->fc_mode_last_time = HCLGE_FC_FULL;
8883 else if (rx_en && !tx_en)
8884 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
8885 else if (!rx_en && tx_en)
8886 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
8888 hdev->fc_mode_last_time = HCLGE_FC_NONE;
8890 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
8893 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
8894 u32 rx_en, u32 tx_en)
8896 struct hclge_vport *vport = hclge_get_vport(handle);
8897 struct hclge_dev *hdev = vport->back;
8898 struct phy_device *phydev = hdev->hw.mac.phydev;
8902 fc_autoneg = hclge_get_autoneg(handle);
8903 if (auto_neg != fc_autoneg) {
8904 dev_info(&hdev->pdev->dev,
8905 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
8910 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8911 dev_info(&hdev->pdev->dev,
8912 "Priority flow control enabled. Cannot set link flow control.\n");
8916 hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
8918 hclge_record_user_pauseparam(hdev, rx_en, tx_en);
8921 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
8924 return phy_start_aneg(phydev);
8929 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
8930 u8 *auto_neg, u32 *speed, u8 *duplex)
8932 struct hclge_vport *vport = hclge_get_vport(handle);
8933 struct hclge_dev *hdev = vport->back;
8936 *speed = hdev->hw.mac.speed;
8938 *duplex = hdev->hw.mac.duplex;
8940 *auto_neg = hdev->hw.mac.autoneg;
8943 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
8946 struct hclge_vport *vport = hclge_get_vport(handle);
8947 struct hclge_dev *hdev = vport->back;
8950 *media_type = hdev->hw.mac.media_type;
8953 *module_type = hdev->hw.mac.module_type;
8956 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
8957 u8 *tp_mdix_ctrl, u8 *tp_mdix)
8959 struct hclge_vport *vport = hclge_get_vport(handle);
8960 struct hclge_dev *hdev = vport->back;
8961 struct phy_device *phydev = hdev->hw.mac.phydev;
8962 int mdix_ctrl, mdix, is_resolved;
8963 unsigned int retval;
8966 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8967 *tp_mdix = ETH_TP_MDI_INVALID;
8971 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
8973 retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
8974 mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
8975 HCLGE_PHY_MDIX_CTRL_S);
8977 retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
8978 mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
8979 is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
8981 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
8983 switch (mdix_ctrl) {
8985 *tp_mdix_ctrl = ETH_TP_MDI;
8988 *tp_mdix_ctrl = ETH_TP_MDI_X;
8991 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
8994 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8999 *tp_mdix = ETH_TP_MDI_INVALID;
9001 *tp_mdix = ETH_TP_MDI_X;
9003 *tp_mdix = ETH_TP_MDI;
9006 static void hclge_info_show(struct hclge_dev *hdev)
9008 struct device *dev = &hdev->pdev->dev;
9010 dev_info(dev, "PF info begin:\n");
9012 dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
9013 dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
9014 dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
9015 dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
9016 dev_info(dev, "Numbers of vmdp vports: %u\n", hdev->num_vmdq_vport);
9017 dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
9018 dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
9019 dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
9020 dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
9021 dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
9022 dev_info(dev, "This is %s PF\n",
9023 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
9024 dev_info(dev, "DCB %s\n",
9025 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
9026 dev_info(dev, "MQPRIO %s\n",
9027 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
9029 dev_info(dev, "PF info end.\n");
9032 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
9033 struct hclge_vport *vport)
9035 struct hnae3_client *client = vport->nic.client;
9036 struct hclge_dev *hdev = ae_dev->priv;
9037 int rst_cnt = hdev->rst_stats.reset_cnt;
9040 ret = client->ops->init_instance(&vport->nic);
9044 set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9045 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9046 rst_cnt != hdev->rst_stats.reset_cnt) {
9051 /* Enable nic hw error interrupts */
9052 ret = hclge_config_nic_hw_error(hdev, true);
9054 dev_err(&ae_dev->pdev->dev,
9055 "fail(%d) to enable hw error interrupts\n", ret);
9059 hnae3_set_client_init_flag(client, ae_dev, 1);
9061 if (netif_msg_drv(&hdev->vport->nic))
9062 hclge_info_show(hdev);
9067 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9068 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9069 msleep(HCLGE_WAIT_RESET_DONE);
9071 client->ops->uninit_instance(&vport->nic, 0);
9076 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
9077 struct hclge_vport *vport)
9079 struct hnae3_client *client = vport->roce.client;
9080 struct hclge_dev *hdev = ae_dev->priv;
9084 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
9088 client = hdev->roce_client;
9089 ret = hclge_init_roce_base_info(vport);
9093 rst_cnt = hdev->rst_stats.reset_cnt;
9094 ret = client->ops->init_instance(&vport->roce);
9098 set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9099 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9100 rst_cnt != hdev->rst_stats.reset_cnt) {
9105 /* Enable roce ras interrupts */
9106 ret = hclge_config_rocee_ras_interrupt(hdev, true);
9108 dev_err(&ae_dev->pdev->dev,
9109 "fail(%d) to enable roce ras interrupts\n", ret);
9113 hnae3_set_client_init_flag(client, ae_dev, 1);
9118 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9119 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9120 msleep(HCLGE_WAIT_RESET_DONE);
9122 hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
9127 static int hclge_init_client_instance(struct hnae3_client *client,
9128 struct hnae3_ae_dev *ae_dev)
9130 struct hclge_dev *hdev = ae_dev->priv;
9131 struct hclge_vport *vport;
9134 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
9135 vport = &hdev->vport[i];
9137 switch (client->type) {
9138 case HNAE3_CLIENT_KNIC:
9139 hdev->nic_client = client;
9140 vport->nic.client = client;
9141 ret = hclge_init_nic_client_instance(ae_dev, vport);
9145 ret = hclge_init_roce_client_instance(ae_dev, vport);
9150 case HNAE3_CLIENT_ROCE:
9151 if (hnae3_dev_roce_supported(hdev)) {
9152 hdev->roce_client = client;
9153 vport->roce.client = client;
9156 ret = hclge_init_roce_client_instance(ae_dev, vport);
9169 hdev->nic_client = NULL;
9170 vport->nic.client = NULL;
9173 hdev->roce_client = NULL;
9174 vport->roce.client = NULL;
9178 static void hclge_uninit_client_instance(struct hnae3_client *client,
9179 struct hnae3_ae_dev *ae_dev)
9181 struct hclge_dev *hdev = ae_dev->priv;
9182 struct hclge_vport *vport;
9185 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
9186 vport = &hdev->vport[i];
9187 if (hdev->roce_client) {
9188 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9189 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9190 msleep(HCLGE_WAIT_RESET_DONE);
9192 hdev->roce_client->ops->uninit_instance(&vport->roce,
9194 hdev->roce_client = NULL;
9195 vport->roce.client = NULL;
9197 if (client->type == HNAE3_CLIENT_ROCE)
9199 if (hdev->nic_client && client->ops->uninit_instance) {
9200 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9201 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9202 msleep(HCLGE_WAIT_RESET_DONE);
9204 client->ops->uninit_instance(&vport->nic, 0);
9205 hdev->nic_client = NULL;
9206 vport->nic.client = NULL;
9211 static int hclge_pci_init(struct hclge_dev *hdev)
9213 struct pci_dev *pdev = hdev->pdev;
9214 struct hclge_hw *hw;
9217 ret = pci_enable_device(pdev);
9219 dev_err(&pdev->dev, "failed to enable PCI device\n");
9223 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
9225 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9228 "can't set consistent PCI DMA");
9229 goto err_disable_device;
9231 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
9234 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
9236 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
9237 goto err_disable_device;
9240 pci_set_master(pdev);
9242 hw->io_base = pcim_iomap(pdev, 2, 0);
9244 dev_err(&pdev->dev, "Can't map configuration register space\n");
9246 goto err_clr_master;
9249 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
9253 pci_clear_master(pdev);
9254 pci_release_regions(pdev);
9256 pci_disable_device(pdev);
9261 static void hclge_pci_uninit(struct hclge_dev *hdev)
9263 struct pci_dev *pdev = hdev->pdev;
9265 pcim_iounmap(pdev, hdev->hw.io_base);
9266 pci_free_irq_vectors(pdev);
9267 pci_clear_master(pdev);
9268 pci_release_mem_regions(pdev);
9269 pci_disable_device(pdev);
9272 static void hclge_state_init(struct hclge_dev *hdev)
9274 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
9275 set_bit(HCLGE_STATE_DOWN, &hdev->state);
9276 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
9277 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9278 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
9279 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
9280 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
9283 static void hclge_state_uninit(struct hclge_dev *hdev)
9285 set_bit(HCLGE_STATE_DOWN, &hdev->state);
9286 set_bit(HCLGE_STATE_REMOVING, &hdev->state);
9288 if (hdev->reset_timer.function)
9289 del_timer_sync(&hdev->reset_timer);
9290 if (hdev->service_task.work.func)
9291 cancel_delayed_work_sync(&hdev->service_task);
9294 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
9296 #define HCLGE_FLR_RETRY_WAIT_MS 500
9297 #define HCLGE_FLR_RETRY_CNT 5
9299 struct hclge_dev *hdev = ae_dev->priv;
9304 down(&hdev->reset_sem);
9305 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9306 hdev->reset_type = HNAE3_FLR_RESET;
9307 ret = hclge_reset_prepare(hdev);
9309 dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n",
9311 if (hdev->reset_pending ||
9312 retry_cnt++ < HCLGE_FLR_RETRY_CNT) {
9313 dev_err(&hdev->pdev->dev,
9314 "reset_pending:0x%lx, retry_cnt:%d\n",
9315 hdev->reset_pending, retry_cnt);
9316 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9317 up(&hdev->reset_sem);
9318 msleep(HCLGE_FLR_RETRY_WAIT_MS);
9323 /* disable misc vector before FLR done */
9324 hclge_enable_vector(&hdev->misc_vector, false);
9325 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
9326 hdev->rst_stats.flr_rst_cnt++;
9329 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
9331 struct hclge_dev *hdev = ae_dev->priv;
9334 hclge_enable_vector(&hdev->misc_vector, true);
9336 ret = hclge_reset_rebuild(hdev);
9338 dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
9340 hdev->reset_type = HNAE3_NONE_RESET;
9341 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9342 up(&hdev->reset_sem);
9345 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
9349 for (i = 0; i < hdev->num_alloc_vport; i++) {
9350 struct hclge_vport *vport = &hdev->vport[i];
9353 /* Send cmd to clear VF's FUNC_RST_ING */
9354 ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
9356 dev_warn(&hdev->pdev->dev,
9357 "clear vf(%u) rst failed %d!\n",
9358 vport->vport_id, ret);
9362 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
9364 struct pci_dev *pdev = ae_dev->pdev;
9365 struct hclge_dev *hdev;
9368 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
9375 hdev->ae_dev = ae_dev;
9376 hdev->reset_type = HNAE3_NONE_RESET;
9377 hdev->reset_level = HNAE3_FUNC_RESET;
9378 ae_dev->priv = hdev;
9380 /* HW supprt 2 layer vlan */
9381 hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
9383 mutex_init(&hdev->vport_lock);
9384 spin_lock_init(&hdev->fd_rule_lock);
9385 sema_init(&hdev->reset_sem, 1);
9387 ret = hclge_pci_init(hdev);
9391 /* Firmware command queue initialize */
9392 ret = hclge_cmd_queue_init(hdev);
9394 goto err_pci_uninit;
9396 /* Firmware command initialize */
9397 ret = hclge_cmd_init(hdev);
9399 goto err_cmd_uninit;
9401 ret = hclge_get_cap(hdev);
9403 goto err_cmd_uninit;
9405 ret = hclge_configure(hdev);
9407 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
9408 goto err_cmd_uninit;
9411 ret = hclge_init_msi(hdev);
9413 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
9414 goto err_cmd_uninit;
9417 ret = hclge_misc_irq_init(hdev);
9419 goto err_msi_uninit;
9421 ret = hclge_alloc_tqps(hdev);
9423 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
9424 goto err_msi_irq_uninit;
9427 ret = hclge_alloc_vport(hdev);
9429 goto err_msi_irq_uninit;
9431 ret = hclge_map_tqp(hdev);
9433 goto err_msi_irq_uninit;
9435 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
9436 ret = hclge_mac_mdio_config(hdev);
9438 goto err_msi_irq_uninit;
9441 ret = hclge_init_umv_space(hdev);
9443 goto err_mdiobus_unreg;
9445 ret = hclge_mac_init(hdev);
9447 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
9448 goto err_mdiobus_unreg;
9451 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
9453 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
9454 goto err_mdiobus_unreg;
9457 ret = hclge_config_gro(hdev, true);
9459 goto err_mdiobus_unreg;
9461 ret = hclge_init_vlan_config(hdev);
9463 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
9464 goto err_mdiobus_unreg;
9467 ret = hclge_tm_schd_init(hdev);
9469 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
9470 goto err_mdiobus_unreg;
9473 hclge_rss_init_cfg(hdev);
9474 ret = hclge_rss_init_hw(hdev);
9476 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
9477 goto err_mdiobus_unreg;
9480 ret = init_mgr_tbl(hdev);
9482 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
9483 goto err_mdiobus_unreg;
9486 ret = hclge_init_fd_config(hdev);
9489 "fd table init fail, ret=%d\n", ret);
9490 goto err_mdiobus_unreg;
9493 INIT_KFIFO(hdev->mac_tnl_log);
9495 hclge_dcb_ops_set(hdev);
9497 timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
9498 INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
9500 /* Setup affinity after service timer setup because add_timer_on
9501 * is called in affinity notify.
9503 hclge_misc_affinity_setup(hdev);
9505 hclge_clear_all_event_cause(hdev);
9506 hclge_clear_resetting_state(hdev);
9508 /* Log and clear the hw errors those already occurred */
9509 hclge_handle_all_hns_hw_errors(ae_dev);
9511 /* request delayed reset for the error recovery because an immediate
9512 * global reset on a PF affecting pending initialization of other PFs
9514 if (ae_dev->hw_err_reset_req) {
9515 enum hnae3_reset_type reset_level;
9517 reset_level = hclge_get_reset_level(ae_dev,
9518 &ae_dev->hw_err_reset_req);
9519 hclge_set_def_reset_request(ae_dev, reset_level);
9520 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
9523 /* Enable MISC vector(vector0) */
9524 hclge_enable_vector(&hdev->misc_vector, true);
9526 hclge_state_init(hdev);
9527 hdev->last_reset_time = jiffies;
9529 dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
9532 hclge_task_schedule(hdev, round_jiffies_relative(HZ));
9537 if (hdev->hw.mac.phydev)
9538 mdiobus_unregister(hdev->hw.mac.mdio_bus);
9540 hclge_misc_irq_uninit(hdev);
9542 pci_free_irq_vectors(pdev);
9544 hclge_cmd_uninit(hdev);
9546 pcim_iounmap(pdev, hdev->hw.io_base);
9547 pci_clear_master(pdev);
9548 pci_release_regions(pdev);
9549 pci_disable_device(pdev);
9554 static void hclge_stats_clear(struct hclge_dev *hdev)
9556 memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
9559 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
9561 return hclge_config_switch_param(hdev, vf, enable,
9562 HCLGE_SWITCH_ANTI_SPOOF_MASK);
9565 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
9567 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9568 HCLGE_FILTER_FE_NIC_INGRESS_B,
9572 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
9576 ret = hclge_set_mac_spoofchk(hdev, vf, enable);
9578 dev_err(&hdev->pdev->dev,
9579 "Set vf %d mac spoof check %s failed, ret=%d\n",
9580 vf, enable ? "on" : "off", ret);
9584 ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
9586 dev_err(&hdev->pdev->dev,
9587 "Set vf %d vlan spoof check %s failed, ret=%d\n",
9588 vf, enable ? "on" : "off", ret);
9593 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
9596 struct hclge_vport *vport = hclge_get_vport(handle);
9597 struct hclge_dev *hdev = vport->back;
9598 u32 new_spoofchk = enable ? 1 : 0;
9601 if (hdev->pdev->revision == 0x20)
9604 vport = hclge_get_vf_vport(hdev, vf);
9608 if (vport->vf_info.spoofchk == new_spoofchk)
9611 if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
9612 dev_warn(&hdev->pdev->dev,
9613 "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
9615 else if (enable && hclge_is_umv_space_full(vport))
9616 dev_warn(&hdev->pdev->dev,
9617 "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
9620 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
9624 vport->vf_info.spoofchk = new_spoofchk;
9628 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
9630 struct hclge_vport *vport = hdev->vport;
9634 if (hdev->pdev->revision == 0x20)
9637 /* resume the vf spoof check state after reset */
9638 for (i = 0; i < hdev->num_alloc_vport; i++) {
9639 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
9640 vport->vf_info.spoofchk);
9650 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
9652 struct hclge_vport *vport = hclge_get_vport(handle);
9653 struct hclge_dev *hdev = vport->back;
9654 u32 new_trusted = enable ? 1 : 0;
9658 vport = hclge_get_vf_vport(hdev, vf);
9662 if (vport->vf_info.trusted == new_trusted)
9665 /* Disable promisc mode for VF if it is not trusted any more. */
9666 if (!enable && vport->vf_info.promisc_enable) {
9667 en_bc_pmc = hdev->pdev->revision != 0x20;
9668 ret = hclge_set_vport_promisc_mode(vport, false, false,
9672 vport->vf_info.promisc_enable = 0;
9673 hclge_inform_vf_promisc_info(vport);
9676 vport->vf_info.trusted = new_trusted;
9681 static void hclge_reset_vf_rate(struct hclge_dev *hdev)
9686 /* reset vf rate to default value */
9687 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
9688 struct hclge_vport *vport = &hdev->vport[vf];
9690 vport->vf_info.max_tx_rate = 0;
9691 ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
9693 dev_err(&hdev->pdev->dev,
9694 "vf%d failed to reset to default, ret=%d\n",
9695 vf - HCLGE_VF_VPORT_START_NUM, ret);
9699 static int hclge_vf_rate_param_check(struct hclge_dev *hdev, int vf,
9700 int min_tx_rate, int max_tx_rate)
9702 if (min_tx_rate != 0 ||
9703 max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
9704 dev_err(&hdev->pdev->dev,
9705 "min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
9706 min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
9713 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
9714 int min_tx_rate, int max_tx_rate, bool force)
9716 struct hclge_vport *vport = hclge_get_vport(handle);
9717 struct hclge_dev *hdev = vport->back;
9720 ret = hclge_vf_rate_param_check(hdev, vf, min_tx_rate, max_tx_rate);
9724 vport = hclge_get_vf_vport(hdev, vf);
9728 if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
9731 ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
9735 vport->vf_info.max_tx_rate = max_tx_rate;
9740 static int hclge_resume_vf_rate(struct hclge_dev *hdev)
9742 struct hnae3_handle *handle = &hdev->vport->nic;
9743 struct hclge_vport *vport;
9747 /* resume the vf max_tx_rate after reset */
9748 for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
9749 vport = hclge_get_vf_vport(hdev, vf);
9753 /* zero means max rate, after reset, firmware already set it to
9754 * max rate, so just continue.
9756 if (!vport->vf_info.max_tx_rate)
9759 ret = hclge_set_vf_rate(handle, vf, 0,
9760 vport->vf_info.max_tx_rate, true);
9762 dev_err(&hdev->pdev->dev,
9763 "vf%d failed to resume tx_rate:%u, ret=%d\n",
9764 vf, vport->vf_info.max_tx_rate, ret);
9772 static void hclge_reset_vport_state(struct hclge_dev *hdev)
9774 struct hclge_vport *vport = hdev->vport;
9777 for (i = 0; i < hdev->num_alloc_vport; i++) {
9778 hclge_vport_stop(vport);
9783 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
9785 struct hclge_dev *hdev = ae_dev->priv;
9786 struct pci_dev *pdev = ae_dev->pdev;
9789 set_bit(HCLGE_STATE_DOWN, &hdev->state);
9791 hclge_stats_clear(hdev);
9792 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
9793 memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
9795 ret = hclge_cmd_init(hdev);
9797 dev_err(&pdev->dev, "Cmd queue init failed\n");
9801 ret = hclge_map_tqp(hdev);
9803 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
9807 hclge_reset_umv_space(hdev);
9809 ret = hclge_mac_init(hdev);
9811 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
9815 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
9817 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
9821 ret = hclge_config_gro(hdev, true);
9825 ret = hclge_init_vlan_config(hdev);
9827 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
9831 ret = hclge_tm_init_hw(hdev, true);
9833 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
9837 ret = hclge_rss_init_hw(hdev);
9839 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
9843 ret = init_mgr_tbl(hdev);
9846 "failed to reinit manager table, ret = %d\n", ret);
9850 ret = hclge_init_fd_config(hdev);
9852 dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
9856 /* Log and clear the hw errors those already occurred */
9857 hclge_handle_all_hns_hw_errors(ae_dev);
9859 /* Re-enable the hw error interrupts because
9860 * the interrupts get disabled on global reset.
9862 ret = hclge_config_nic_hw_error(hdev, true);
9865 "fail(%d) to re-enable NIC hw error interrupts\n",
9870 if (hdev->roce_client) {
9871 ret = hclge_config_rocee_ras_interrupt(hdev, true);
9874 "fail(%d) to re-enable roce ras interrupts\n",
9880 hclge_reset_vport_state(hdev);
9881 ret = hclge_reset_vport_spoofchk(hdev);
9885 ret = hclge_resume_vf_rate(hdev);
9889 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
9895 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
9897 struct hclge_dev *hdev = ae_dev->priv;
9898 struct hclge_mac *mac = &hdev->hw.mac;
9900 hclge_reset_vf_rate(hdev);
9901 hclge_misc_affinity_teardown(hdev);
9902 hclge_state_uninit(hdev);
9905 mdiobus_unregister(mac->mdio_bus);
9907 hclge_uninit_umv_space(hdev);
9909 /* Disable MISC vector(vector0) */
9910 hclge_enable_vector(&hdev->misc_vector, false);
9911 synchronize_irq(hdev->misc_vector.vector_irq);
9913 /* Disable all hw interrupts */
9914 hclge_config_mac_tnl_int(hdev, false);
9915 hclge_config_nic_hw_error(hdev, false);
9916 hclge_config_rocee_ras_interrupt(hdev, false);
9918 hclge_cmd_uninit(hdev);
9919 hclge_misc_irq_uninit(hdev);
9920 hclge_pci_uninit(hdev);
9921 mutex_destroy(&hdev->vport_lock);
9922 hclge_uninit_vport_mac_table(hdev);
9923 hclge_uninit_vport_vlan_table(hdev);
9924 ae_dev->priv = NULL;
9927 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
9929 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9930 struct hclge_vport *vport = hclge_get_vport(handle);
9931 struct hclge_dev *hdev = vport->back;
9933 return min_t(u32, hdev->rss_size_max,
9934 vport->alloc_tqps / kinfo->num_tc);
9937 static void hclge_get_channels(struct hnae3_handle *handle,
9938 struct ethtool_channels *ch)
9940 ch->max_combined = hclge_get_max_channels(handle);
9941 ch->other_count = 1;
9943 ch->combined_count = handle->kinfo.rss_size;
9946 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
9947 u16 *alloc_tqps, u16 *max_rss_size)
9949 struct hclge_vport *vport = hclge_get_vport(handle);
9950 struct hclge_dev *hdev = vport->back;
9952 *alloc_tqps = vport->alloc_tqps;
9953 *max_rss_size = hdev->rss_size_max;
9956 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
9957 bool rxfh_configured)
9959 struct hclge_vport *vport = hclge_get_vport(handle);
9960 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
9961 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
9962 struct hclge_dev *hdev = vport->back;
9963 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
9964 u16 cur_rss_size = kinfo->rss_size;
9965 u16 cur_tqps = kinfo->num_tqps;
9966 u16 tc_valid[HCLGE_MAX_TC_NUM];
9972 kinfo->req_rss_size = new_tqps_num;
9974 ret = hclge_tm_vport_map_update(hdev);
9976 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
9980 roundup_size = roundup_pow_of_two(kinfo->rss_size);
9981 roundup_size = ilog2(roundup_size);
9982 /* Set the RSS TC mode according to the new RSS size */
9983 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
9986 if (!(hdev->hw_tc_map & BIT(i)))
9990 tc_size[i] = roundup_size;
9991 tc_offset[i] = kinfo->rss_size * i;
9993 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
9997 /* RSS indirection table has been configuared by user */
9998 if (rxfh_configured)
10001 /* Reinitializes the rss indirect table according to the new RSS size */
10002 rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
10006 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
10007 rss_indir[i] = i % kinfo->rss_size;
10009 ret = hclge_set_rss(handle, rss_indir, NULL, 0);
10011 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
10018 dev_info(&hdev->pdev->dev,
10019 "Channels changed, rss_size from %u to %u, tqps from %u to %u",
10020 cur_rss_size, kinfo->rss_size,
10021 cur_tqps, kinfo->rss_size * kinfo->num_tc);
10026 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
10027 u32 *regs_num_64_bit)
10029 struct hclge_desc desc;
10033 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
10034 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10036 dev_err(&hdev->pdev->dev,
10037 "Query register number cmd failed, ret = %d.\n", ret);
10041 *regs_num_32_bit = le32_to_cpu(desc.data[0]);
10042 *regs_num_64_bit = le32_to_cpu(desc.data[1]);
10044 total_num = *regs_num_32_bit + *regs_num_64_bit;
10051 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
10054 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
10055 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
10057 struct hclge_desc *desc;
10058 u32 *reg_val = data;
10068 nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
10069 cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
10070 HCLGE_32_BIT_REG_RTN_DATANUM);
10071 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
10075 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
10076 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
10078 dev_err(&hdev->pdev->dev,
10079 "Query 32 bit register cmd failed, ret = %d.\n", ret);
10084 for (i = 0; i < cmd_num; i++) {
10086 desc_data = (__le32 *)(&desc[i].data[0]);
10087 n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
10089 desc_data = (__le32 *)(&desc[i]);
10090 n = HCLGE_32_BIT_REG_RTN_DATANUM;
10092 for (k = 0; k < n; k++) {
10093 *reg_val++ = le32_to_cpu(*desc_data++);
10105 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
10108 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
10109 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
10111 struct hclge_desc *desc;
10112 u64 *reg_val = data;
10122 nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
10123 cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
10124 HCLGE_64_BIT_REG_RTN_DATANUM);
10125 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
10129 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
10130 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
10132 dev_err(&hdev->pdev->dev,
10133 "Query 64 bit register cmd failed, ret = %d.\n", ret);
10138 for (i = 0; i < cmd_num; i++) {
10140 desc_data = (__le64 *)(&desc[i].data[0]);
10141 n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
10143 desc_data = (__le64 *)(&desc[i]);
10144 n = HCLGE_64_BIT_REG_RTN_DATANUM;
10146 for (k = 0; k < n; k++) {
10147 *reg_val++ = le64_to_cpu(*desc_data++);
10159 #define MAX_SEPARATE_NUM 4
10160 #define SEPARATOR_VALUE 0xFDFCFBFA
10161 #define REG_NUM_PER_LINE 4
10162 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
10163 #define REG_SEPARATOR_LINE 1
10164 #define REG_NUM_REMAIN_MASK 3
10165 #define BD_LIST_MAX_NUM 30
10167 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
10169 /*prepare 4 commands to query DFX BD number*/
10170 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_DFX_BD_NUM, true);
10171 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10172 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_DFX_BD_NUM, true);
10173 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10174 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_DFX_BD_NUM, true);
10175 desc[2].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10176 hclge_cmd_setup_basic_desc(&desc[3], HCLGE_OPC_DFX_BD_NUM, true);
10178 return hclge_cmd_send(&hdev->hw, desc, 4);
10181 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
10185 u32 entries_per_desc, desc_index, index, offset, i;
10186 struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
10189 ret = hclge_query_bd_num_cmd_send(hdev, desc);
10191 dev_err(&hdev->pdev->dev,
10192 "Get dfx bd num fail, status is %d.\n", ret);
10196 entries_per_desc = ARRAY_SIZE(desc[0].data);
10197 for (i = 0; i < type_num; i++) {
10198 offset = hclge_dfx_bd_offset_list[i];
10199 index = offset % entries_per_desc;
10200 desc_index = offset / entries_per_desc;
10201 bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
10207 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
10208 struct hclge_desc *desc_src, int bd_num,
10209 enum hclge_opcode_type cmd)
10211 struct hclge_desc *desc = desc_src;
10214 hclge_cmd_setup_basic_desc(desc, cmd, true);
10215 for (i = 0; i < bd_num - 1; i++) {
10216 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10218 hclge_cmd_setup_basic_desc(desc, cmd, true);
10222 ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
10224 dev_err(&hdev->pdev->dev,
10225 "Query dfx reg cmd(0x%x) send fail, status is %d.\n",
10231 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
10234 int entries_per_desc, reg_num, separator_num, desc_index, index, i;
10235 struct hclge_desc *desc = desc_src;
10238 entries_per_desc = ARRAY_SIZE(desc->data);
10239 reg_num = entries_per_desc * bd_num;
10240 separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
10241 for (i = 0; i < reg_num; i++) {
10242 index = i % entries_per_desc;
10243 desc_index = i / entries_per_desc;
10244 *reg++ = le32_to_cpu(desc[desc_index].data[index]);
10246 for (i = 0; i < separator_num; i++)
10247 *reg++ = SEPARATOR_VALUE;
10249 return reg_num + separator_num;
10252 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
10254 u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
10255 int data_len_per_desc, data_len, bd_num, i;
10256 int bd_num_list[BD_LIST_MAX_NUM];
10259 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
10261 dev_err(&hdev->pdev->dev,
10262 "Get dfx reg bd num fail, status is %d.\n", ret);
10266 data_len_per_desc = sizeof_field(struct hclge_desc, data);
10268 for (i = 0; i < dfx_reg_type_num; i++) {
10269 bd_num = bd_num_list[i];
10270 data_len = data_len_per_desc * bd_num;
10271 *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
10277 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
10279 u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
10280 int bd_num, bd_num_max, buf_len, i;
10281 int bd_num_list[BD_LIST_MAX_NUM];
10282 struct hclge_desc *desc_src;
10286 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
10288 dev_err(&hdev->pdev->dev,
10289 "Get dfx reg bd num fail, status is %d.\n", ret);
10293 bd_num_max = bd_num_list[0];
10294 for (i = 1; i < dfx_reg_type_num; i++)
10295 bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
10297 buf_len = sizeof(*desc_src) * bd_num_max;
10298 desc_src = kzalloc(buf_len, GFP_KERNEL);
10302 for (i = 0; i < dfx_reg_type_num; i++) {
10303 bd_num = bd_num_list[i];
10304 ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
10305 hclge_dfx_reg_opcode_list[i]);
10307 dev_err(&hdev->pdev->dev,
10308 "Get dfx reg fail, status is %d.\n", ret);
10312 reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
10319 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
10320 struct hnae3_knic_private_info *kinfo)
10322 #define HCLGE_RING_REG_OFFSET 0x200
10323 #define HCLGE_RING_INT_REG_OFFSET 0x4
10325 int i, j, reg_num, separator_num;
10329 /* fetching per-PF registers valus from PF PCIe register space */
10330 reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
10331 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10332 for (i = 0; i < reg_num; i++)
10333 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
10334 for (i = 0; i < separator_num; i++)
10335 *reg++ = SEPARATOR_VALUE;
10336 data_num_sum = reg_num + separator_num;
10338 reg_num = ARRAY_SIZE(common_reg_addr_list);
10339 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10340 for (i = 0; i < reg_num; i++)
10341 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
10342 for (i = 0; i < separator_num; i++)
10343 *reg++ = SEPARATOR_VALUE;
10344 data_num_sum += reg_num + separator_num;
10346 reg_num = ARRAY_SIZE(ring_reg_addr_list);
10347 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10348 for (j = 0; j < kinfo->num_tqps; j++) {
10349 for (i = 0; i < reg_num; i++)
10350 *reg++ = hclge_read_dev(&hdev->hw,
10351 ring_reg_addr_list[i] +
10352 HCLGE_RING_REG_OFFSET * j);
10353 for (i = 0; i < separator_num; i++)
10354 *reg++ = SEPARATOR_VALUE;
10356 data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
10358 reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
10359 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10360 for (j = 0; j < hdev->num_msi_used - 1; j++) {
10361 for (i = 0; i < reg_num; i++)
10362 *reg++ = hclge_read_dev(&hdev->hw,
10363 tqp_intr_reg_addr_list[i] +
10364 HCLGE_RING_INT_REG_OFFSET * j);
10365 for (i = 0; i < separator_num; i++)
10366 *reg++ = SEPARATOR_VALUE;
10368 data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
10370 return data_num_sum;
10373 static int hclge_get_regs_len(struct hnae3_handle *handle)
10375 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
10376 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
10377 struct hclge_vport *vport = hclge_get_vport(handle);
10378 struct hclge_dev *hdev = vport->back;
10379 int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
10380 int regs_lines_32_bit, regs_lines_64_bit;
10383 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
10385 dev_err(&hdev->pdev->dev,
10386 "Get register number failed, ret = %d.\n", ret);
10390 ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
10392 dev_err(&hdev->pdev->dev,
10393 "Get dfx reg len failed, ret = %d.\n", ret);
10397 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
10398 REG_SEPARATOR_LINE;
10399 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
10400 REG_SEPARATOR_LINE;
10401 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
10402 REG_SEPARATOR_LINE;
10403 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
10404 REG_SEPARATOR_LINE;
10405 regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
10406 REG_SEPARATOR_LINE;
10407 regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
10408 REG_SEPARATOR_LINE;
10410 return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
10411 tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
10412 regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
10415 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
10418 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
10419 struct hclge_vport *vport = hclge_get_vport(handle);
10420 struct hclge_dev *hdev = vport->back;
10421 u32 regs_num_32_bit, regs_num_64_bit;
10422 int i, reg_num, separator_num, ret;
10425 *version = hdev->fw_version;
10427 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
10429 dev_err(&hdev->pdev->dev,
10430 "Get register number failed, ret = %d.\n", ret);
10434 reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
10436 ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
10438 dev_err(&hdev->pdev->dev,
10439 "Get 32 bit register failed, ret = %d.\n", ret);
10442 reg_num = regs_num_32_bit;
10444 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10445 for (i = 0; i < separator_num; i++)
10446 *reg++ = SEPARATOR_VALUE;
10448 ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
10450 dev_err(&hdev->pdev->dev,
10451 "Get 64 bit register failed, ret = %d.\n", ret);
10454 reg_num = regs_num_64_bit * 2;
10456 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10457 for (i = 0; i < separator_num; i++)
10458 *reg++ = SEPARATOR_VALUE;
10460 ret = hclge_get_dfx_reg(hdev, reg);
10462 dev_err(&hdev->pdev->dev,
10463 "Get dfx register failed, ret = %d.\n", ret);
10466 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
10468 struct hclge_set_led_state_cmd *req;
10469 struct hclge_desc desc;
10472 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
10474 req = (struct hclge_set_led_state_cmd *)desc.data;
10475 hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
10476 HCLGE_LED_LOCATE_STATE_S, locate_led_status);
10478 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10480 dev_err(&hdev->pdev->dev,
10481 "Send set led state cmd error, ret =%d\n", ret);
10486 enum hclge_led_status {
10489 HCLGE_LED_NO_CHANGE = 0xFF,
10492 static int hclge_set_led_id(struct hnae3_handle *handle,
10493 enum ethtool_phys_id_state status)
10495 struct hclge_vport *vport = hclge_get_vport(handle);
10496 struct hclge_dev *hdev = vport->back;
10499 case ETHTOOL_ID_ACTIVE:
10500 return hclge_set_led_status(hdev, HCLGE_LED_ON);
10501 case ETHTOOL_ID_INACTIVE:
10502 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
10508 static void hclge_get_link_mode(struct hnae3_handle *handle,
10509 unsigned long *supported,
10510 unsigned long *advertising)
10512 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
10513 struct hclge_vport *vport = hclge_get_vport(handle);
10514 struct hclge_dev *hdev = vport->back;
10515 unsigned int idx = 0;
10517 for (; idx < size; idx++) {
10518 supported[idx] = hdev->hw.mac.supported[idx];
10519 advertising[idx] = hdev->hw.mac.advertising[idx];
10523 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
10525 struct hclge_vport *vport = hclge_get_vport(handle);
10526 struct hclge_dev *hdev = vport->back;
10528 return hclge_config_gro(hdev, enable);
10531 static const struct hnae3_ae_ops hclge_ops = {
10532 .init_ae_dev = hclge_init_ae_dev,
10533 .uninit_ae_dev = hclge_uninit_ae_dev,
10534 .flr_prepare = hclge_flr_prepare,
10535 .flr_done = hclge_flr_done,
10536 .init_client_instance = hclge_init_client_instance,
10537 .uninit_client_instance = hclge_uninit_client_instance,
10538 .map_ring_to_vector = hclge_map_ring_to_vector,
10539 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
10540 .get_vector = hclge_get_vector,
10541 .put_vector = hclge_put_vector,
10542 .set_promisc_mode = hclge_set_promisc_mode,
10543 .set_loopback = hclge_set_loopback,
10544 .start = hclge_ae_start,
10545 .stop = hclge_ae_stop,
10546 .client_start = hclge_client_start,
10547 .client_stop = hclge_client_stop,
10548 .get_status = hclge_get_status,
10549 .get_ksettings_an_result = hclge_get_ksettings_an_result,
10550 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
10551 .get_media_type = hclge_get_media_type,
10552 .check_port_speed = hclge_check_port_speed,
10553 .get_fec = hclge_get_fec,
10554 .set_fec = hclge_set_fec,
10555 .get_rss_key_size = hclge_get_rss_key_size,
10556 .get_rss_indir_size = hclge_get_rss_indir_size,
10557 .get_rss = hclge_get_rss,
10558 .set_rss = hclge_set_rss,
10559 .set_rss_tuple = hclge_set_rss_tuple,
10560 .get_rss_tuple = hclge_get_rss_tuple,
10561 .get_tc_size = hclge_get_tc_size,
10562 .get_mac_addr = hclge_get_mac_addr,
10563 .set_mac_addr = hclge_set_mac_addr,
10564 .do_ioctl = hclge_do_ioctl,
10565 .add_uc_addr = hclge_add_uc_addr,
10566 .rm_uc_addr = hclge_rm_uc_addr,
10567 .add_mc_addr = hclge_add_mc_addr,
10568 .rm_mc_addr = hclge_rm_mc_addr,
10569 .set_autoneg = hclge_set_autoneg,
10570 .get_autoneg = hclge_get_autoneg,
10571 .restart_autoneg = hclge_restart_autoneg,
10572 .halt_autoneg = hclge_halt_autoneg,
10573 .get_pauseparam = hclge_get_pauseparam,
10574 .set_pauseparam = hclge_set_pauseparam,
10575 .set_mtu = hclge_set_mtu,
10576 .reset_queue = hclge_reset_tqp,
10577 .get_stats = hclge_get_stats,
10578 .get_mac_stats = hclge_get_mac_stat,
10579 .update_stats = hclge_update_stats,
10580 .get_strings = hclge_get_strings,
10581 .get_sset_count = hclge_get_sset_count,
10582 .get_fw_version = hclge_get_fw_version,
10583 .get_mdix_mode = hclge_get_mdix_mode,
10584 .enable_vlan_filter = hclge_enable_vlan_filter,
10585 .set_vlan_filter = hclge_set_vlan_filter,
10586 .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
10587 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
10588 .reset_event = hclge_reset_event,
10589 .get_reset_level = hclge_get_reset_level,
10590 .set_default_reset_request = hclge_set_def_reset_request,
10591 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
10592 .set_channels = hclge_set_channels,
10593 .get_channels = hclge_get_channels,
10594 .get_regs_len = hclge_get_regs_len,
10595 .get_regs = hclge_get_regs,
10596 .set_led_id = hclge_set_led_id,
10597 .get_link_mode = hclge_get_link_mode,
10598 .add_fd_entry = hclge_add_fd_entry,
10599 .del_fd_entry = hclge_del_fd_entry,
10600 .del_all_fd_entries = hclge_del_all_fd_entries,
10601 .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
10602 .get_fd_rule_info = hclge_get_fd_rule_info,
10603 .get_fd_all_rules = hclge_get_all_rules,
10604 .restore_fd_rules = hclge_restore_fd_entries,
10605 .enable_fd = hclge_enable_fd,
10606 .add_arfs_entry = hclge_add_fd_entry_by_arfs,
10607 .dbg_run_cmd = hclge_dbg_run_cmd,
10608 .handle_hw_ras_error = hclge_handle_hw_ras_error,
10609 .get_hw_reset_stat = hclge_get_hw_reset_stat,
10610 .ae_dev_resetting = hclge_ae_dev_resetting,
10611 .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
10612 .set_gro_en = hclge_gro_en,
10613 .get_global_queue_id = hclge_covert_handle_qid_global,
10614 .set_timer_task = hclge_set_timer_task,
10615 .mac_connect_phy = hclge_mac_connect_phy,
10616 .mac_disconnect_phy = hclge_mac_disconnect_phy,
10617 .restore_vlan_table = hclge_restore_vlan_table,
10618 .get_vf_config = hclge_get_vf_config,
10619 .set_vf_link_state = hclge_set_vf_link_state,
10620 .set_vf_spoofchk = hclge_set_vf_spoofchk,
10621 .set_vf_trust = hclge_set_vf_trust,
10622 .set_vf_rate = hclge_set_vf_rate,
10623 .set_vf_mac = hclge_set_vf_mac,
10626 static struct hnae3_ae_algo ae_algo = {
10628 .pdev_id_table = ae_algo_pci_tbl,
10631 static int hclge_init(void)
10633 pr_info("%s is initializing\n", HCLGE_NAME);
10635 hclge_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, HCLGE_NAME);
10637 pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
10641 hnae3_register_ae_algo(&ae_algo);
10646 static void hclge_exit(void)
10648 hnae3_unregister_ae_algo(&ae_algo);
10649 destroy_workqueue(hclge_wq);
10651 module_init(hclge_init);
10652 module_exit(hclge_exit);
10654 MODULE_LICENSE("GPL");
10655 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
10656 MODULE_DESCRIPTION("HCLGE Driver");
10657 MODULE_VERSION(HCLGE_MOD_VERSION);