1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
23 #include "hclge_err.h"
26 #define HCLGE_NAME "hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
30 #define HCLGE_BUF_SIZE_UNIT 256U
31 #define HCLGE_BUF_MUL_BY 2
32 #define HCLGE_BUF_DIV_BY 2
33 #define NEED_RESERVE_TC_NUM 2
34 #define BUF_MAX_PERCENT 100
35 #define BUF_RESERVE_PERCENT 90
37 #define HCLGE_RESET_MAX_FAIL_CNT 5
38 #define HCLGE_RESET_SYNC_TIME 100
39 #define HCLGE_PF_RESET_SYNC_TIME 20
40 #define HCLGE_PF_RESET_SYNC_CNT 1500
42 /* Get DFX BD number offset */
43 #define HCLGE_DFX_BIOS_BD_OFFSET 1
44 #define HCLGE_DFX_SSU_0_BD_OFFSET 2
45 #define HCLGE_DFX_SSU_1_BD_OFFSET 3
46 #define HCLGE_DFX_IGU_BD_OFFSET 4
47 #define HCLGE_DFX_RPU_0_BD_OFFSET 5
48 #define HCLGE_DFX_RPU_1_BD_OFFSET 6
49 #define HCLGE_DFX_NCSI_BD_OFFSET 7
50 #define HCLGE_DFX_RTC_BD_OFFSET 8
51 #define HCLGE_DFX_PPP_BD_OFFSET 9
52 #define HCLGE_DFX_RCB_BD_OFFSET 10
53 #define HCLGE_DFX_TQP_BD_OFFSET 11
54 #define HCLGE_DFX_SSU_2_BD_OFFSET 12
56 #define HCLGE_LINK_STATUS_MS 10
58 #define HCLGE_VF_VPORT_START_NUM 1
60 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
61 static int hclge_init_vlan_config(struct hclge_dev *hdev);
62 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
63 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
64 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
65 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
66 u16 *allocated_size, bool is_alloc);
67 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
68 static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
69 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
71 static int hclge_set_default_loopback(struct hclge_dev *hdev);
73 static struct hnae3_ae_algo ae_algo;
75 static struct workqueue_struct *hclge_wq;
77 static const struct pci_device_id ae_algo_pci_tbl[] = {
78 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
79 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
80 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
81 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
82 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
83 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
84 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
85 /* required last entry */
89 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
91 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
92 HCLGE_CMDQ_TX_ADDR_H_REG,
93 HCLGE_CMDQ_TX_DEPTH_REG,
94 HCLGE_CMDQ_TX_TAIL_REG,
95 HCLGE_CMDQ_TX_HEAD_REG,
96 HCLGE_CMDQ_RX_ADDR_L_REG,
97 HCLGE_CMDQ_RX_ADDR_H_REG,
98 HCLGE_CMDQ_RX_DEPTH_REG,
99 HCLGE_CMDQ_RX_TAIL_REG,
100 HCLGE_CMDQ_RX_HEAD_REG,
101 HCLGE_VECTOR0_CMDQ_SRC_REG,
102 HCLGE_CMDQ_INTR_STS_REG,
103 HCLGE_CMDQ_INTR_EN_REG,
104 HCLGE_CMDQ_INTR_GEN_REG};
106 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
107 HCLGE_VECTOR0_OTER_EN_REG,
108 HCLGE_MISC_RESET_STS_REG,
109 HCLGE_MISC_VECTOR_INT_STS,
110 HCLGE_GLOBAL_RESET_REG,
114 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
115 HCLGE_RING_RX_ADDR_H_REG,
116 HCLGE_RING_RX_BD_NUM_REG,
117 HCLGE_RING_RX_BD_LENGTH_REG,
118 HCLGE_RING_RX_MERGE_EN_REG,
119 HCLGE_RING_RX_TAIL_REG,
120 HCLGE_RING_RX_HEAD_REG,
121 HCLGE_RING_RX_FBD_NUM_REG,
122 HCLGE_RING_RX_OFFSET_REG,
123 HCLGE_RING_RX_FBD_OFFSET_REG,
124 HCLGE_RING_RX_STASH_REG,
125 HCLGE_RING_RX_BD_ERR_REG,
126 HCLGE_RING_TX_ADDR_L_REG,
127 HCLGE_RING_TX_ADDR_H_REG,
128 HCLGE_RING_TX_BD_NUM_REG,
129 HCLGE_RING_TX_PRIORITY_REG,
130 HCLGE_RING_TX_TC_REG,
131 HCLGE_RING_TX_MERGE_EN_REG,
132 HCLGE_RING_TX_TAIL_REG,
133 HCLGE_RING_TX_HEAD_REG,
134 HCLGE_RING_TX_FBD_NUM_REG,
135 HCLGE_RING_TX_OFFSET_REG,
136 HCLGE_RING_TX_EBD_NUM_REG,
137 HCLGE_RING_TX_EBD_OFFSET_REG,
138 HCLGE_RING_TX_BD_ERR_REG,
141 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
142 HCLGE_TQP_INTR_GL0_REG,
143 HCLGE_TQP_INTR_GL1_REG,
144 HCLGE_TQP_INTR_GL2_REG,
145 HCLGE_TQP_INTR_RL_REG};
147 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
149 "Serdes serial Loopback test",
150 "Serdes parallel Loopback test",
154 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
155 {"mac_tx_mac_pause_num",
156 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
157 {"mac_rx_mac_pause_num",
158 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
159 {"mac_tx_control_pkt_num",
160 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
161 {"mac_rx_control_pkt_num",
162 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
163 {"mac_tx_pfc_pkt_num",
164 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
165 {"mac_tx_pfc_pri0_pkt_num",
166 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
167 {"mac_tx_pfc_pri1_pkt_num",
168 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
169 {"mac_tx_pfc_pri2_pkt_num",
170 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
171 {"mac_tx_pfc_pri3_pkt_num",
172 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
173 {"mac_tx_pfc_pri4_pkt_num",
174 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
175 {"mac_tx_pfc_pri5_pkt_num",
176 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
177 {"mac_tx_pfc_pri6_pkt_num",
178 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
179 {"mac_tx_pfc_pri7_pkt_num",
180 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
181 {"mac_rx_pfc_pkt_num",
182 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
183 {"mac_rx_pfc_pri0_pkt_num",
184 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
185 {"mac_rx_pfc_pri1_pkt_num",
186 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
187 {"mac_rx_pfc_pri2_pkt_num",
188 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
189 {"mac_rx_pfc_pri3_pkt_num",
190 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
191 {"mac_rx_pfc_pri4_pkt_num",
192 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
193 {"mac_rx_pfc_pri5_pkt_num",
194 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
195 {"mac_rx_pfc_pri6_pkt_num",
196 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
197 {"mac_rx_pfc_pri7_pkt_num",
198 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
199 {"mac_tx_total_pkt_num",
200 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
201 {"mac_tx_total_oct_num",
202 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
203 {"mac_tx_good_pkt_num",
204 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
205 {"mac_tx_bad_pkt_num",
206 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
207 {"mac_tx_good_oct_num",
208 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
209 {"mac_tx_bad_oct_num",
210 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
211 {"mac_tx_uni_pkt_num",
212 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
213 {"mac_tx_multi_pkt_num",
214 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
215 {"mac_tx_broad_pkt_num",
216 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
217 {"mac_tx_undersize_pkt_num",
218 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
219 {"mac_tx_oversize_pkt_num",
220 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
221 {"mac_tx_64_oct_pkt_num",
222 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
223 {"mac_tx_65_127_oct_pkt_num",
224 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
225 {"mac_tx_128_255_oct_pkt_num",
226 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
227 {"mac_tx_256_511_oct_pkt_num",
228 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
229 {"mac_tx_512_1023_oct_pkt_num",
230 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
231 {"mac_tx_1024_1518_oct_pkt_num",
232 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
233 {"mac_tx_1519_2047_oct_pkt_num",
234 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
235 {"mac_tx_2048_4095_oct_pkt_num",
236 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
237 {"mac_tx_4096_8191_oct_pkt_num",
238 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
239 {"mac_tx_8192_9216_oct_pkt_num",
240 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
241 {"mac_tx_9217_12287_oct_pkt_num",
242 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
243 {"mac_tx_12288_16383_oct_pkt_num",
244 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
245 {"mac_tx_1519_max_good_pkt_num",
246 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
247 {"mac_tx_1519_max_bad_pkt_num",
248 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
249 {"mac_rx_total_pkt_num",
250 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
251 {"mac_rx_total_oct_num",
252 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
253 {"mac_rx_good_pkt_num",
254 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
255 {"mac_rx_bad_pkt_num",
256 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
257 {"mac_rx_good_oct_num",
258 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
259 {"mac_rx_bad_oct_num",
260 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
261 {"mac_rx_uni_pkt_num",
262 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
263 {"mac_rx_multi_pkt_num",
264 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
265 {"mac_rx_broad_pkt_num",
266 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
267 {"mac_rx_undersize_pkt_num",
268 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
269 {"mac_rx_oversize_pkt_num",
270 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
271 {"mac_rx_64_oct_pkt_num",
272 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
273 {"mac_rx_65_127_oct_pkt_num",
274 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
275 {"mac_rx_128_255_oct_pkt_num",
276 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
277 {"mac_rx_256_511_oct_pkt_num",
278 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
279 {"mac_rx_512_1023_oct_pkt_num",
280 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
281 {"mac_rx_1024_1518_oct_pkt_num",
282 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
283 {"mac_rx_1519_2047_oct_pkt_num",
284 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
285 {"mac_rx_2048_4095_oct_pkt_num",
286 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
287 {"mac_rx_4096_8191_oct_pkt_num",
288 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
289 {"mac_rx_8192_9216_oct_pkt_num",
290 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
291 {"mac_rx_9217_12287_oct_pkt_num",
292 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
293 {"mac_rx_12288_16383_oct_pkt_num",
294 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
295 {"mac_rx_1519_max_good_pkt_num",
296 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
297 {"mac_rx_1519_max_bad_pkt_num",
298 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
300 {"mac_tx_fragment_pkt_num",
301 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
302 {"mac_tx_undermin_pkt_num",
303 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
304 {"mac_tx_jabber_pkt_num",
305 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
306 {"mac_tx_err_all_pkt_num",
307 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
308 {"mac_tx_from_app_good_pkt_num",
309 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
310 {"mac_tx_from_app_bad_pkt_num",
311 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
312 {"mac_rx_fragment_pkt_num",
313 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
314 {"mac_rx_undermin_pkt_num",
315 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
316 {"mac_rx_jabber_pkt_num",
317 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
318 {"mac_rx_fcs_err_pkt_num",
319 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
320 {"mac_rx_send_app_good_pkt_num",
321 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
322 {"mac_rx_send_app_bad_pkt_num",
323 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
326 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
328 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
329 .ethter_type = cpu_to_le16(ETH_P_LLDP),
330 .mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
331 .i_port_bitmap = 0x1,
335 static const u8 hclge_hash_key[] = {
336 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
337 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
338 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
339 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
340 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
343 static const u32 hclge_dfx_bd_offset_list[] = {
344 HCLGE_DFX_BIOS_BD_OFFSET,
345 HCLGE_DFX_SSU_0_BD_OFFSET,
346 HCLGE_DFX_SSU_1_BD_OFFSET,
347 HCLGE_DFX_IGU_BD_OFFSET,
348 HCLGE_DFX_RPU_0_BD_OFFSET,
349 HCLGE_DFX_RPU_1_BD_OFFSET,
350 HCLGE_DFX_NCSI_BD_OFFSET,
351 HCLGE_DFX_RTC_BD_OFFSET,
352 HCLGE_DFX_PPP_BD_OFFSET,
353 HCLGE_DFX_RCB_BD_OFFSET,
354 HCLGE_DFX_TQP_BD_OFFSET,
355 HCLGE_DFX_SSU_2_BD_OFFSET
358 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
359 HCLGE_OPC_DFX_BIOS_COMMON_REG,
360 HCLGE_OPC_DFX_SSU_REG_0,
361 HCLGE_OPC_DFX_SSU_REG_1,
362 HCLGE_OPC_DFX_IGU_EGU_REG,
363 HCLGE_OPC_DFX_RPU_REG_0,
364 HCLGE_OPC_DFX_RPU_REG_1,
365 HCLGE_OPC_DFX_NCSI_REG,
366 HCLGE_OPC_DFX_RTC_REG,
367 HCLGE_OPC_DFX_PPP_REG,
368 HCLGE_OPC_DFX_RCB_REG,
369 HCLGE_OPC_DFX_TQP_REG,
370 HCLGE_OPC_DFX_SSU_REG_2
373 static const struct key_info meta_data_key_info[] = {
374 { PACKET_TYPE_ID, 6},
384 static const struct key_info tuple_key_info[] = {
385 { OUTER_DST_MAC, 48},
386 { OUTER_SRC_MAC, 48},
387 { OUTER_VLAN_TAG_FST, 16},
388 { OUTER_VLAN_TAG_SEC, 16},
389 { OUTER_ETH_TYPE, 16},
392 { OUTER_IP_PROTO, 8},
396 { OUTER_SRC_PORT, 16},
397 { OUTER_DST_PORT, 16},
399 { OUTER_TUN_VNI, 24},
400 { OUTER_TUN_FLOW_ID, 8},
401 { INNER_DST_MAC, 48},
402 { INNER_SRC_MAC, 48},
403 { INNER_VLAN_TAG_FST, 16},
404 { INNER_VLAN_TAG_SEC, 16},
405 { INNER_ETH_TYPE, 16},
408 { INNER_IP_PROTO, 8},
412 { INNER_SRC_PORT, 16},
413 { INNER_DST_PORT, 16},
417 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
419 #define HCLGE_MAC_CMD_NUM 21
421 u64 *data = (u64 *)(&hdev->mac_stats);
422 struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
427 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
428 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
430 dev_err(&hdev->pdev->dev,
431 "Get MAC pkt stats fail, status = %d.\n", ret);
436 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
437 /* for special opcode 0032, only the first desc has the head */
438 if (unlikely(i == 0)) {
439 desc_data = (__le64 *)(&desc[i].data[0]);
440 n = HCLGE_RD_FIRST_STATS_NUM;
442 desc_data = (__le64 *)(&desc[i]);
443 n = HCLGE_RD_OTHER_STATS_NUM;
446 for (k = 0; k < n; k++) {
447 *data += le64_to_cpu(*desc_data);
456 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
458 u64 *data = (u64 *)(&hdev->mac_stats);
459 struct hclge_desc *desc;
464 /* This may be called inside atomic sections,
465 * so GFP_ATOMIC is more suitalbe here
467 desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
471 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
472 ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
478 for (i = 0; i < desc_num; i++) {
479 /* for special opcode 0034, only the first desc has the head */
481 desc_data = (__le64 *)(&desc[i].data[0]);
482 n = HCLGE_RD_FIRST_STATS_NUM;
484 desc_data = (__le64 *)(&desc[i]);
485 n = HCLGE_RD_OTHER_STATS_NUM;
488 for (k = 0; k < n; k++) {
489 *data += le64_to_cpu(*desc_data);
500 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
502 struct hclge_desc desc;
507 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
508 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
512 desc_data = (__le32 *)(&desc.data[0]);
513 reg_num = le32_to_cpu(*desc_data);
515 *desc_num = 1 + ((reg_num - 3) >> 2) +
516 (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
521 static int hclge_mac_update_stats(struct hclge_dev *hdev)
526 ret = hclge_mac_query_reg_num(hdev, &desc_num);
528 /* The firmware supports the new statistics acquisition method */
530 ret = hclge_mac_update_stats_complete(hdev, desc_num);
531 else if (ret == -EOPNOTSUPP)
532 ret = hclge_mac_update_stats_defective(hdev);
534 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
539 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
541 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
542 struct hclge_vport *vport = hclge_get_vport(handle);
543 struct hclge_dev *hdev = vport->back;
544 struct hnae3_queue *queue;
545 struct hclge_desc desc[1];
546 struct hclge_tqp *tqp;
549 for (i = 0; i < kinfo->num_tqps; i++) {
550 queue = handle->kinfo.tqp[i];
551 tqp = container_of(queue, struct hclge_tqp, q);
552 /* command : HCLGE_OPC_QUERY_IGU_STAT */
553 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATUS,
556 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
557 ret = hclge_cmd_send(&hdev->hw, desc, 1);
559 dev_err(&hdev->pdev->dev,
560 "Query tqp stat fail, status = %d,queue = %d\n",
564 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
565 le32_to_cpu(desc[0].data[1]);
568 for (i = 0; i < kinfo->num_tqps; i++) {
569 queue = handle->kinfo.tqp[i];
570 tqp = container_of(queue, struct hclge_tqp, q);
571 /* command : HCLGE_OPC_QUERY_IGU_STAT */
572 hclge_cmd_setup_basic_desc(&desc[0],
573 HCLGE_OPC_QUERY_TX_STATUS,
576 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
577 ret = hclge_cmd_send(&hdev->hw, desc, 1);
579 dev_err(&hdev->pdev->dev,
580 "Query tqp stat fail, status = %d,queue = %d\n",
584 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
585 le32_to_cpu(desc[0].data[1]);
591 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
593 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
594 struct hclge_tqp *tqp;
598 for (i = 0; i < kinfo->num_tqps; i++) {
599 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
600 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
603 for (i = 0; i < kinfo->num_tqps; i++) {
604 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
605 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
611 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
613 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
615 /* each tqp has TX & RX two queues */
616 return kinfo->num_tqps * (2);
619 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
621 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
625 for (i = 0; i < kinfo->num_tqps; i++) {
626 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
627 struct hclge_tqp, q);
628 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
630 buff = buff + ETH_GSTRING_LEN;
633 for (i = 0; i < kinfo->num_tqps; i++) {
634 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
635 struct hclge_tqp, q);
636 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
638 buff = buff + ETH_GSTRING_LEN;
644 static u64 *hclge_comm_get_stats(const void *comm_stats,
645 const struct hclge_comm_stats_str strs[],
651 for (i = 0; i < size; i++)
652 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
657 static u8 *hclge_comm_get_strings(u32 stringset,
658 const struct hclge_comm_stats_str strs[],
661 char *buff = (char *)data;
664 if (stringset != ETH_SS_STATS)
667 for (i = 0; i < size; i++) {
668 snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
669 buff = buff + ETH_GSTRING_LEN;
675 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
677 struct hnae3_handle *handle;
680 handle = &hdev->vport[0].nic;
681 if (handle->client) {
682 status = hclge_tqps_update_stats(handle);
684 dev_err(&hdev->pdev->dev,
685 "Update TQPS stats fail, status = %d.\n",
690 status = hclge_mac_update_stats(hdev);
692 dev_err(&hdev->pdev->dev,
693 "Update MAC stats fail, status = %d.\n", status);
696 static void hclge_update_stats(struct hnae3_handle *handle,
697 struct net_device_stats *net_stats)
699 struct hclge_vport *vport = hclge_get_vport(handle);
700 struct hclge_dev *hdev = vport->back;
703 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
706 status = hclge_mac_update_stats(hdev);
708 dev_err(&hdev->pdev->dev,
709 "Update MAC stats fail, status = %d.\n",
712 status = hclge_tqps_update_stats(handle);
714 dev_err(&hdev->pdev->dev,
715 "Update TQPS stats fail, status = %d.\n",
718 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
721 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
723 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
724 HNAE3_SUPPORT_PHY_LOOPBACK |\
725 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
726 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
728 struct hclge_vport *vport = hclge_get_vport(handle);
729 struct hclge_dev *hdev = vport->back;
732 /* Loopback test support rules:
733 * mac: only GE mode support
734 * serdes: all mac mode will support include GE/XGE/LGE/CGE
735 * phy: only support when phy device exist on board
737 if (stringset == ETH_SS_TEST) {
738 /* clear loopback bit flags at first */
739 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
740 if (hdev->pdev->revision >= 0x21 ||
741 hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
742 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
743 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
745 handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
749 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
750 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
752 if (hdev->hw.mac.phydev) {
754 handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
757 } else if (stringset == ETH_SS_STATS) {
758 count = ARRAY_SIZE(g_mac_stats_string) +
759 hclge_tqps_get_sset_count(handle, stringset);
765 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
768 u8 *p = (char *)data;
771 if (stringset == ETH_SS_STATS) {
772 size = ARRAY_SIZE(g_mac_stats_string);
773 p = hclge_comm_get_strings(stringset, g_mac_stats_string,
775 p = hclge_tqps_get_strings(handle, p);
776 } else if (stringset == ETH_SS_TEST) {
777 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
778 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
780 p += ETH_GSTRING_LEN;
782 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
783 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
785 p += ETH_GSTRING_LEN;
787 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
789 hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
791 p += ETH_GSTRING_LEN;
793 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
794 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
796 p += ETH_GSTRING_LEN;
801 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
803 struct hclge_vport *vport = hclge_get_vport(handle);
804 struct hclge_dev *hdev = vport->back;
807 p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
808 ARRAY_SIZE(g_mac_stats_string), data);
809 p = hclge_tqps_get_stats(handle, p);
812 static void hclge_get_mac_stat(struct hnae3_handle *handle,
813 struct hns3_mac_stats *mac_stats)
815 struct hclge_vport *vport = hclge_get_vport(handle);
816 struct hclge_dev *hdev = vport->back;
818 hclge_update_stats(handle, NULL);
820 mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
821 mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
824 static int hclge_parse_func_status(struct hclge_dev *hdev,
825 struct hclge_func_status_cmd *status)
827 if (!(status->pf_state & HCLGE_PF_STATE_DONE))
830 /* Set the pf to main pf */
831 if (status->pf_state & HCLGE_PF_STATE_MAIN)
832 hdev->flag |= HCLGE_FLAG_MAIN;
834 hdev->flag &= ~HCLGE_FLAG_MAIN;
839 static int hclge_query_function_status(struct hclge_dev *hdev)
841 #define HCLGE_QUERY_MAX_CNT 5
843 struct hclge_func_status_cmd *req;
844 struct hclge_desc desc;
848 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
849 req = (struct hclge_func_status_cmd *)desc.data;
852 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
854 dev_err(&hdev->pdev->dev,
855 "query function status failed %d.\n", ret);
859 /* Check pf reset is done */
862 usleep_range(1000, 2000);
863 } while (timeout++ < HCLGE_QUERY_MAX_CNT);
865 return hclge_parse_func_status(hdev, req);
868 static int hclge_query_pf_resource(struct hclge_dev *hdev)
870 struct hclge_pf_res_cmd *req;
871 struct hclge_desc desc;
874 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
875 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
877 dev_err(&hdev->pdev->dev,
878 "query pf resource failed %d.\n", ret);
882 req = (struct hclge_pf_res_cmd *)desc.data;
883 hdev->num_tqps = le16_to_cpu(req->tqp_num);
884 hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
886 if (req->tx_buf_size)
888 le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
890 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
892 hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
894 if (req->dv_buf_size)
896 le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
898 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
900 hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
902 if (hnae3_dev_roce_supported(hdev)) {
903 hdev->roce_base_msix_offset =
904 hnae3_get_field(le16_to_cpu(req->msixcap_localid_ba_rocee),
905 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
907 hnae3_get_field(le16_to_cpu(req->pf_intr_vector_number),
908 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
910 /* nic's msix numbers is always equals to the roce's. */
911 hdev->num_nic_msi = hdev->num_roce_msi;
913 /* PF should have NIC vectors and Roce vectors,
914 * NIC vectors are queued before Roce vectors.
916 hdev->num_msi = hdev->num_roce_msi +
917 hdev->roce_base_msix_offset;
920 hnae3_get_field(le16_to_cpu(req->pf_intr_vector_number),
921 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
923 hdev->num_nic_msi = hdev->num_msi;
926 if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
927 dev_err(&hdev->pdev->dev,
928 "Just %u msi resources, not enough for pf(min:2).\n",
936 static int hclge_parse_speed(int speed_cmd, int *speed)
940 *speed = HCLGE_MAC_SPEED_10M;
943 *speed = HCLGE_MAC_SPEED_100M;
946 *speed = HCLGE_MAC_SPEED_1G;
949 *speed = HCLGE_MAC_SPEED_10G;
952 *speed = HCLGE_MAC_SPEED_25G;
955 *speed = HCLGE_MAC_SPEED_40G;
958 *speed = HCLGE_MAC_SPEED_50G;
961 *speed = HCLGE_MAC_SPEED_100G;
970 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
972 struct hclge_vport *vport = hclge_get_vport(handle);
973 struct hclge_dev *hdev = vport->back;
974 u32 speed_ability = hdev->hw.mac.speed_ability;
978 case HCLGE_MAC_SPEED_10M:
979 speed_bit = HCLGE_SUPPORT_10M_BIT;
981 case HCLGE_MAC_SPEED_100M:
982 speed_bit = HCLGE_SUPPORT_100M_BIT;
984 case HCLGE_MAC_SPEED_1G:
985 speed_bit = HCLGE_SUPPORT_1G_BIT;
987 case HCLGE_MAC_SPEED_10G:
988 speed_bit = HCLGE_SUPPORT_10G_BIT;
990 case HCLGE_MAC_SPEED_25G:
991 speed_bit = HCLGE_SUPPORT_25G_BIT;
993 case HCLGE_MAC_SPEED_40G:
994 speed_bit = HCLGE_SUPPORT_40G_BIT;
996 case HCLGE_MAC_SPEED_50G:
997 speed_bit = HCLGE_SUPPORT_50G_BIT;
999 case HCLGE_MAC_SPEED_100G:
1000 speed_bit = HCLGE_SUPPORT_100G_BIT;
1006 if (speed_bit & speed_ability)
1012 static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability)
1014 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1015 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1017 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1018 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1020 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1021 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1023 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1024 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1026 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1027 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1031 static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability)
1033 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1034 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1036 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1037 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1039 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1040 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1042 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1043 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1045 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1046 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1050 static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability)
1052 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1053 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1055 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1056 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1058 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1059 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1061 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1062 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1064 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1065 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1069 static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability)
1071 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1072 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1074 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1075 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1077 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1078 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1080 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1081 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1083 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1084 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1086 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1087 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1091 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1093 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1094 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1096 switch (mac->speed) {
1097 case HCLGE_MAC_SPEED_10G:
1098 case HCLGE_MAC_SPEED_40G:
1099 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1102 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1104 case HCLGE_MAC_SPEED_25G:
1105 case HCLGE_MAC_SPEED_50G:
1106 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1109 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1110 BIT(HNAE3_FEC_AUTO);
1112 case HCLGE_MAC_SPEED_100G:
1113 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1114 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1117 mac->fec_ability = 0;
1122 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1125 struct hclge_mac *mac = &hdev->hw.mac;
1127 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1128 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1131 hclge_convert_setting_sr(mac, speed_ability);
1132 hclge_convert_setting_lr(mac, speed_ability);
1133 hclge_convert_setting_cr(mac, speed_ability);
1134 if (hdev->pdev->revision >= 0x21)
1135 hclge_convert_setting_fec(mac);
1137 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1138 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1139 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1142 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1145 struct hclge_mac *mac = &hdev->hw.mac;
1147 hclge_convert_setting_kr(mac, speed_ability);
1148 if (hdev->pdev->revision >= 0x21)
1149 hclge_convert_setting_fec(mac);
1150 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1151 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1152 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1155 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1158 unsigned long *supported = hdev->hw.mac.supported;
1160 /* default to support all speed for GE port */
1162 speed_ability = HCLGE_SUPPORT_GE;
1164 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1165 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1168 if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1169 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1171 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1175 if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1176 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1177 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1180 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1181 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1182 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1183 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1186 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
1188 u8 media_type = hdev->hw.mac.media_type;
1190 if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1191 hclge_parse_fiber_link_mode(hdev, speed_ability);
1192 else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1193 hclge_parse_copper_link_mode(hdev, speed_ability);
1194 else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1195 hclge_parse_backplane_link_mode(hdev, speed_ability);
1198 static u32 hclge_get_max_speed(u8 speed_ability)
1200 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1201 return HCLGE_MAC_SPEED_100G;
1203 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1204 return HCLGE_MAC_SPEED_50G;
1206 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1207 return HCLGE_MAC_SPEED_40G;
1209 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1210 return HCLGE_MAC_SPEED_25G;
1212 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1213 return HCLGE_MAC_SPEED_10G;
1215 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1216 return HCLGE_MAC_SPEED_1G;
1218 if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1219 return HCLGE_MAC_SPEED_100M;
1221 if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1222 return HCLGE_MAC_SPEED_10M;
1224 return HCLGE_MAC_SPEED_1G;
1227 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1229 struct hclge_cfg_param_cmd *req;
1230 u64 mac_addr_tmp_high;
1234 req = (struct hclge_cfg_param_cmd *)desc[0].data;
1236 /* get the configuration */
1237 cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1240 cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1241 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1242 cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1243 HCLGE_CFG_TQP_DESC_N_M,
1244 HCLGE_CFG_TQP_DESC_N_S);
1246 cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1247 HCLGE_CFG_PHY_ADDR_M,
1248 HCLGE_CFG_PHY_ADDR_S);
1249 cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1250 HCLGE_CFG_MEDIA_TP_M,
1251 HCLGE_CFG_MEDIA_TP_S);
1252 cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1253 HCLGE_CFG_RX_BUF_LEN_M,
1254 HCLGE_CFG_RX_BUF_LEN_S);
1255 /* get mac_address */
1256 mac_addr_tmp = __le32_to_cpu(req->param[2]);
1257 mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1258 HCLGE_CFG_MAC_ADDR_H_M,
1259 HCLGE_CFG_MAC_ADDR_H_S);
1261 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1263 cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1264 HCLGE_CFG_DEFAULT_SPEED_M,
1265 HCLGE_CFG_DEFAULT_SPEED_S);
1266 cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1267 HCLGE_CFG_RSS_SIZE_M,
1268 HCLGE_CFG_RSS_SIZE_S);
1270 for (i = 0; i < ETH_ALEN; i++)
1271 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1273 req = (struct hclge_cfg_param_cmd *)desc[1].data;
1274 cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1276 cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1277 HCLGE_CFG_SPEED_ABILITY_M,
1278 HCLGE_CFG_SPEED_ABILITY_S);
1279 cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1280 HCLGE_CFG_UMV_TBL_SPACE_M,
1281 HCLGE_CFG_UMV_TBL_SPACE_S);
1282 if (!cfg->umv_space)
1283 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1286 /* hclge_get_cfg: query the static parameter from flash
1287 * @hdev: pointer to struct hclge_dev
1288 * @hcfg: the config structure to be getted
1290 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1292 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1293 struct hclge_cfg_param_cmd *req;
1297 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1300 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1301 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1303 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1304 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1305 /* Len should be united by 4 bytes when send to hardware */
1306 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1307 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1308 req->offset = cpu_to_le32(offset);
1311 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1313 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1317 hclge_parse_cfg(hcfg, desc);
1322 static int hclge_get_cap(struct hclge_dev *hdev)
1326 ret = hclge_query_function_status(hdev);
1328 dev_err(&hdev->pdev->dev,
1329 "query function status error %d.\n", ret);
1333 /* get pf resource */
1334 return hclge_query_pf_resource(hdev);
1337 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1339 #define HCLGE_MIN_TX_DESC 64
1340 #define HCLGE_MIN_RX_DESC 64
1342 if (!is_kdump_kernel())
1345 dev_info(&hdev->pdev->dev,
1346 "Running kdump kernel. Using minimal resources\n");
1348 /* minimal queue pairs equals to the number of vports */
1349 hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1350 hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1351 hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1354 static int hclge_configure(struct hclge_dev *hdev)
1356 struct hclge_cfg cfg;
1360 ret = hclge_get_cfg(hdev, &cfg);
1362 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1366 hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1367 hdev->base_tqp_pid = 0;
1368 hdev->rss_size_max = cfg.rss_size_max;
1369 hdev->rx_buf_len = cfg.rx_buf_len;
1370 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1371 hdev->hw.mac.media_type = cfg.media_type;
1372 hdev->hw.mac.phy_addr = cfg.phy_addr;
1373 hdev->num_tx_desc = cfg.tqp_desc_num;
1374 hdev->num_rx_desc = cfg.tqp_desc_num;
1375 hdev->tm_info.num_pg = 1;
1376 hdev->tc_max = cfg.tc_num;
1377 hdev->tm_info.hw_pfc_map = 0;
1378 hdev->wanted_umv_size = cfg.umv_space;
1380 if (hnae3_dev_fd_supported(hdev)) {
1382 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1385 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1387 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1391 hclge_parse_link_mode(hdev, cfg.speed_ability);
1393 hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1395 if ((hdev->tc_max > HNAE3_MAX_TC) ||
1396 (hdev->tc_max < 1)) {
1397 dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1402 /* Dev does not support DCB */
1403 if (!hnae3_dev_dcb_supported(hdev)) {
1407 hdev->pfc_max = hdev->tc_max;
1410 hdev->tm_info.num_tc = 1;
1412 /* Currently not support uncontiuous tc */
1413 for (i = 0; i < hdev->tm_info.num_tc; i++)
1414 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1416 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1418 hclge_init_kdump_kernel_config(hdev);
1420 /* Set the init affinity based on pci func number */
1421 i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
1422 i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
1423 cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
1424 &hdev->affinity_mask);
1429 static int hclge_config_tso(struct hclge_dev *hdev, unsigned int tso_mss_min,
1430 unsigned int tso_mss_max)
1432 struct hclge_cfg_tso_status_cmd *req;
1433 struct hclge_desc desc;
1436 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1438 req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1441 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1442 HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1443 req->tso_mss_min = cpu_to_le16(tso_mss);
1446 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1447 HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1448 req->tso_mss_max = cpu_to_le16(tso_mss);
1450 return hclge_cmd_send(&hdev->hw, &desc, 1);
1453 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1455 struct hclge_cfg_gro_status_cmd *req;
1456 struct hclge_desc desc;
1459 if (!hnae3_dev_gro_supported(hdev))
1462 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1463 req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1465 req->gro_en = cpu_to_le16(en ? 1 : 0);
1467 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1469 dev_err(&hdev->pdev->dev,
1470 "GRO hardware config cmd failed, ret = %d\n", ret);
1475 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1477 struct hclge_tqp *tqp;
1480 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1481 sizeof(struct hclge_tqp), GFP_KERNEL);
1487 for (i = 0; i < hdev->num_tqps; i++) {
1488 tqp->dev = &hdev->pdev->dev;
1491 tqp->q.ae_algo = &ae_algo;
1492 tqp->q.buf_size = hdev->rx_buf_len;
1493 tqp->q.tx_desc_num = hdev->num_tx_desc;
1494 tqp->q.rx_desc_num = hdev->num_rx_desc;
1495 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1496 i * HCLGE_TQP_REG_SIZE;
1504 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1505 u16 tqp_pid, u16 tqp_vid, bool is_pf)
1507 struct hclge_tqp_map_cmd *req;
1508 struct hclge_desc desc;
1511 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1513 req = (struct hclge_tqp_map_cmd *)desc.data;
1514 req->tqp_id = cpu_to_le16(tqp_pid);
1515 req->tqp_vf = func_id;
1516 req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1518 req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1519 req->tqp_vid = cpu_to_le16(tqp_vid);
1521 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1523 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1528 static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1530 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1531 struct hclge_dev *hdev = vport->back;
1534 for (i = 0, alloced = 0; i < hdev->num_tqps &&
1535 alloced < num_tqps; i++) {
1536 if (!hdev->htqp[i].alloced) {
1537 hdev->htqp[i].q.handle = &vport->nic;
1538 hdev->htqp[i].q.tqp_index = alloced;
1539 hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1540 hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1541 kinfo->tqp[alloced] = &hdev->htqp[i].q;
1542 hdev->htqp[i].alloced = true;
1546 vport->alloc_tqps = alloced;
1547 kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1548 vport->alloc_tqps / hdev->tm_info.num_tc);
1550 /* ensure one to one mapping between irq and queue at default */
1551 kinfo->rss_size = min_t(u16, kinfo->rss_size,
1552 (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1557 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1558 u16 num_tx_desc, u16 num_rx_desc)
1561 struct hnae3_handle *nic = &vport->nic;
1562 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1563 struct hclge_dev *hdev = vport->back;
1566 kinfo->num_tx_desc = num_tx_desc;
1567 kinfo->num_rx_desc = num_rx_desc;
1569 kinfo->rx_buf_len = hdev->rx_buf_len;
1571 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1572 sizeof(struct hnae3_queue *), GFP_KERNEL);
1576 ret = hclge_assign_tqp(vport, num_tqps);
1578 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1583 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1584 struct hclge_vport *vport)
1586 struct hnae3_handle *nic = &vport->nic;
1587 struct hnae3_knic_private_info *kinfo;
1590 kinfo = &nic->kinfo;
1591 for (i = 0; i < vport->alloc_tqps; i++) {
1592 struct hclge_tqp *q =
1593 container_of(kinfo->tqp[i], struct hclge_tqp, q);
1597 is_pf = !(vport->vport_id);
1598 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1607 static int hclge_map_tqp(struct hclge_dev *hdev)
1609 struct hclge_vport *vport = hdev->vport;
1612 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1613 for (i = 0; i < num_vport; i++) {
1616 ret = hclge_map_tqp_to_vport(hdev, vport);
1626 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1628 struct hnae3_handle *nic = &vport->nic;
1629 struct hclge_dev *hdev = vport->back;
1632 nic->pdev = hdev->pdev;
1633 nic->ae_algo = &ae_algo;
1634 nic->numa_node_mask = hdev->numa_node_mask;
1636 ret = hclge_knic_setup(vport, num_tqps,
1637 hdev->num_tx_desc, hdev->num_rx_desc);
1639 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1644 static int hclge_alloc_vport(struct hclge_dev *hdev)
1646 struct pci_dev *pdev = hdev->pdev;
1647 struct hclge_vport *vport;
1653 /* We need to alloc a vport for main NIC of PF */
1654 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1656 if (hdev->num_tqps < num_vport) {
1657 dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1658 hdev->num_tqps, num_vport);
1662 /* Alloc the same number of TQPs for every vport */
1663 tqp_per_vport = hdev->num_tqps / num_vport;
1664 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1666 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1671 hdev->vport = vport;
1672 hdev->num_alloc_vport = num_vport;
1674 if (IS_ENABLED(CONFIG_PCI_IOV))
1675 hdev->num_alloc_vfs = hdev->num_req_vfs;
1677 for (i = 0; i < num_vport; i++) {
1679 vport->vport_id = i;
1680 vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1681 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1682 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1683 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1684 INIT_LIST_HEAD(&vport->vlan_list);
1685 INIT_LIST_HEAD(&vport->uc_mac_list);
1686 INIT_LIST_HEAD(&vport->mc_mac_list);
1689 ret = hclge_vport_setup(vport, tqp_main_vport);
1691 ret = hclge_vport_setup(vport, tqp_per_vport);
1694 "vport setup failed for vport %d, %d\n",
1705 static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1706 struct hclge_pkt_buf_alloc *buf_alloc)
1708 /* TX buffer size is unit by 128 byte */
1709 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1710 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
1711 struct hclge_tx_buff_alloc_cmd *req;
1712 struct hclge_desc desc;
1716 req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1718 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1719 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1720 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1722 req->tx_pkt_buff[i] =
1723 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1724 HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1727 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1729 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1735 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1736 struct hclge_pkt_buf_alloc *buf_alloc)
1738 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1741 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1746 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1751 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1752 if (hdev->hw_tc_map & BIT(i))
1757 /* Get the number of pfc enabled TCs, which have private buffer */
1758 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1759 struct hclge_pkt_buf_alloc *buf_alloc)
1761 struct hclge_priv_buf *priv;
1765 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1766 priv = &buf_alloc->priv_buf[i];
1767 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1775 /* Get the number of pfc disabled TCs, which have private buffer */
1776 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1777 struct hclge_pkt_buf_alloc *buf_alloc)
1779 struct hclge_priv_buf *priv;
1783 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1784 priv = &buf_alloc->priv_buf[i];
1785 if (hdev->hw_tc_map & BIT(i) &&
1786 !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1794 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1796 struct hclge_priv_buf *priv;
1800 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1801 priv = &buf_alloc->priv_buf[i];
1803 rx_priv += priv->buf_size;
1808 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1810 u32 i, total_tx_size = 0;
1812 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1813 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1815 return total_tx_size;
1818 static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1819 struct hclge_pkt_buf_alloc *buf_alloc,
1822 u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1823 u32 tc_num = hclge_get_tc_num(hdev);
1824 u32 shared_buf, aligned_mps;
1828 aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1830 if (hnae3_dev_dcb_supported(hdev))
1831 shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
1834 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1835 + hdev->dv_buf_size;
1837 shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1838 shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1839 HCLGE_BUF_SIZE_UNIT);
1841 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1842 if (rx_all < rx_priv + shared_std)
1845 shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1846 buf_alloc->s_buf.buf_size = shared_buf;
1847 if (hnae3_dev_dcb_supported(hdev)) {
1848 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1849 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1850 - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
1851 HCLGE_BUF_SIZE_UNIT);
1853 buf_alloc->s_buf.self.high = aligned_mps +
1854 HCLGE_NON_DCB_ADDITIONAL_BUF;
1855 buf_alloc->s_buf.self.low = aligned_mps;
1858 if (hnae3_dev_dcb_supported(hdev)) {
1859 hi_thrd = shared_buf - hdev->dv_buf_size;
1861 if (tc_num <= NEED_RESERVE_TC_NUM)
1862 hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
1866 hi_thrd = hi_thrd / tc_num;
1868 hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
1869 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1870 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
1872 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1873 lo_thrd = aligned_mps;
1876 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1877 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1878 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
1884 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1885 struct hclge_pkt_buf_alloc *buf_alloc)
1889 total_size = hdev->pkt_buf_size;
1891 /* alloc tx buffer for all enabled tc */
1892 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1893 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1895 if (hdev->hw_tc_map & BIT(i)) {
1896 if (total_size < hdev->tx_buf_size)
1899 priv->tx_buf_size = hdev->tx_buf_size;
1901 priv->tx_buf_size = 0;
1904 total_size -= priv->tx_buf_size;
1910 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
1911 struct hclge_pkt_buf_alloc *buf_alloc)
1913 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1914 u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1917 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1918 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1925 if (!(hdev->hw_tc_map & BIT(i)))
1930 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1931 priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
1932 priv->wl.high = roundup(priv->wl.low + aligned_mps,
1933 HCLGE_BUF_SIZE_UNIT);
1936 priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
1940 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1943 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1946 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
1947 struct hclge_pkt_buf_alloc *buf_alloc)
1949 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1950 int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1953 /* let the last to be cleared first */
1954 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1955 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1956 unsigned int mask = BIT((unsigned int)i);
1958 if (hdev->hw_tc_map & mask &&
1959 !(hdev->tm_info.hw_pfc_map & mask)) {
1960 /* Clear the no pfc TC private buffer */
1968 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1969 no_pfc_priv_num == 0)
1973 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1976 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
1977 struct hclge_pkt_buf_alloc *buf_alloc)
1979 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1980 int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1983 /* let the last to be cleared first */
1984 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1985 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1986 unsigned int mask = BIT((unsigned int)i);
1988 if (hdev->hw_tc_map & mask &&
1989 hdev->tm_info.hw_pfc_map & mask) {
1990 /* Reduce the number of pfc TC with private buffer */
1998 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2003 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2006 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2007 struct hclge_pkt_buf_alloc *buf_alloc)
2009 #define COMPENSATE_BUFFER 0x3C00
2010 #define COMPENSATE_HALF_MPS_NUM 5
2011 #define PRIV_WL_GAP 0x1800
2013 u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2014 u32 tc_num = hclge_get_tc_num(hdev);
2015 u32 half_mps = hdev->mps >> 1;
2020 rx_priv = rx_priv / tc_num;
2022 if (tc_num <= NEED_RESERVE_TC_NUM)
2023 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2025 min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2026 COMPENSATE_HALF_MPS_NUM * half_mps;
2027 min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2028 rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2030 if (rx_priv < min_rx_priv)
2033 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2034 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2041 if (!(hdev->hw_tc_map & BIT(i)))
2045 priv->buf_size = rx_priv;
2046 priv->wl.high = rx_priv - hdev->dv_buf_size;
2047 priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2050 buf_alloc->s_buf.buf_size = 0;
2055 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2056 * @hdev: pointer to struct hclge_dev
2057 * @buf_alloc: pointer to buffer calculation data
2058 * @return: 0: calculate sucessful, negative: fail
2060 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2061 struct hclge_pkt_buf_alloc *buf_alloc)
2063 /* When DCB is not supported, rx private buffer is not allocated. */
2064 if (!hnae3_dev_dcb_supported(hdev)) {
2065 u32 rx_all = hdev->pkt_buf_size;
2067 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2068 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2074 if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2077 if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2080 /* try to decrease the buffer size */
2081 if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2084 if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2087 if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2093 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2094 struct hclge_pkt_buf_alloc *buf_alloc)
2096 struct hclge_rx_priv_buff_cmd *req;
2097 struct hclge_desc desc;
2101 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2102 req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2104 /* Alloc private buffer TCs */
2105 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2106 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2109 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2111 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2115 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2116 (1 << HCLGE_TC0_PRI_BUF_EN_B));
2118 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2120 dev_err(&hdev->pdev->dev,
2121 "rx private buffer alloc cmd failed %d\n", ret);
2126 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2127 struct hclge_pkt_buf_alloc *buf_alloc)
2129 struct hclge_rx_priv_wl_buf *req;
2130 struct hclge_priv_buf *priv;
2131 struct hclge_desc desc[2];
2135 for (i = 0; i < 2; i++) {
2136 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2138 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2140 /* The first descriptor set the NEXT bit to 1 */
2142 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2144 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2146 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2147 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2149 priv = &buf_alloc->priv_buf[idx];
2150 req->tc_wl[j].high =
2151 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2152 req->tc_wl[j].high |=
2153 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2155 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2156 req->tc_wl[j].low |=
2157 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2161 /* Send 2 descriptor at one time */
2162 ret = hclge_cmd_send(&hdev->hw, desc, 2);
2164 dev_err(&hdev->pdev->dev,
2165 "rx private waterline config cmd failed %d\n",
2170 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2171 struct hclge_pkt_buf_alloc *buf_alloc)
2173 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2174 struct hclge_rx_com_thrd *req;
2175 struct hclge_desc desc[2];
2176 struct hclge_tc_thrd *tc;
2180 for (i = 0; i < 2; i++) {
2181 hclge_cmd_setup_basic_desc(&desc[i],
2182 HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2183 req = (struct hclge_rx_com_thrd *)&desc[i].data;
2185 /* The first descriptor set the NEXT bit to 1 */
2187 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2189 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2191 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2192 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2194 req->com_thrd[j].high =
2195 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2196 req->com_thrd[j].high |=
2197 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2198 req->com_thrd[j].low =
2199 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2200 req->com_thrd[j].low |=
2201 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2205 /* Send 2 descriptors at one time */
2206 ret = hclge_cmd_send(&hdev->hw, desc, 2);
2208 dev_err(&hdev->pdev->dev,
2209 "common threshold config cmd failed %d\n", ret);
2213 static int hclge_common_wl_config(struct hclge_dev *hdev,
2214 struct hclge_pkt_buf_alloc *buf_alloc)
2216 struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2217 struct hclge_rx_com_wl *req;
2218 struct hclge_desc desc;
2221 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2223 req = (struct hclge_rx_com_wl *)desc.data;
2224 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2225 req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2227 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2228 req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2230 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2232 dev_err(&hdev->pdev->dev,
2233 "common waterline config cmd failed %d\n", ret);
2238 int hclge_buffer_alloc(struct hclge_dev *hdev)
2240 struct hclge_pkt_buf_alloc *pkt_buf;
2243 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2247 ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2249 dev_err(&hdev->pdev->dev,
2250 "could not calc tx buffer size for all TCs %d\n", ret);
2254 ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2256 dev_err(&hdev->pdev->dev,
2257 "could not alloc tx buffers %d\n", ret);
2261 ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2263 dev_err(&hdev->pdev->dev,
2264 "could not calc rx priv buffer size for all TCs %d\n",
2269 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2271 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2276 if (hnae3_dev_dcb_supported(hdev)) {
2277 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2279 dev_err(&hdev->pdev->dev,
2280 "could not configure rx private waterline %d\n",
2285 ret = hclge_common_thrd_config(hdev, pkt_buf);
2287 dev_err(&hdev->pdev->dev,
2288 "could not configure common threshold %d\n",
2294 ret = hclge_common_wl_config(hdev, pkt_buf);
2296 dev_err(&hdev->pdev->dev,
2297 "could not configure common waterline %d\n", ret);
2304 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2306 struct hnae3_handle *roce = &vport->roce;
2307 struct hnae3_handle *nic = &vport->nic;
2309 roce->rinfo.num_vectors = vport->back->num_roce_msi;
2311 if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
2312 vport->back->num_msi_left == 0)
2315 roce->rinfo.base_vector = vport->back->roce_base_vector;
2317 roce->rinfo.netdev = nic->kinfo.netdev;
2318 roce->rinfo.roce_io_base = vport->back->hw.io_base;
2320 roce->pdev = nic->pdev;
2321 roce->ae_algo = nic->ae_algo;
2322 roce->numa_node_mask = nic->numa_node_mask;
2327 static int hclge_init_msi(struct hclge_dev *hdev)
2329 struct pci_dev *pdev = hdev->pdev;
2333 vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2335 PCI_IRQ_MSI | PCI_IRQ_MSIX);
2338 "failed(%d) to allocate MSI/MSI-X vectors\n",
2342 if (vectors < hdev->num_msi)
2343 dev_warn(&hdev->pdev->dev,
2344 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2345 hdev->num_msi, vectors);
2347 hdev->num_msi = vectors;
2348 hdev->num_msi_left = vectors;
2350 hdev->base_msi_vector = pdev->irq;
2351 hdev->roce_base_vector = hdev->base_msi_vector +
2352 hdev->roce_base_msix_offset;
2354 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2355 sizeof(u16), GFP_KERNEL);
2356 if (!hdev->vector_status) {
2357 pci_free_irq_vectors(pdev);
2361 for (i = 0; i < hdev->num_msi; i++)
2362 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2364 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2365 sizeof(int), GFP_KERNEL);
2366 if (!hdev->vector_irq) {
2367 pci_free_irq_vectors(pdev);
2374 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2376 if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2377 duplex = HCLGE_MAC_FULL;
2382 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2385 struct hclge_config_mac_speed_dup_cmd *req;
2386 struct hclge_desc desc;
2389 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2391 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2394 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2397 case HCLGE_MAC_SPEED_10M:
2398 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2399 HCLGE_CFG_SPEED_S, 6);
2401 case HCLGE_MAC_SPEED_100M:
2402 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2403 HCLGE_CFG_SPEED_S, 7);
2405 case HCLGE_MAC_SPEED_1G:
2406 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2407 HCLGE_CFG_SPEED_S, 0);
2409 case HCLGE_MAC_SPEED_10G:
2410 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2411 HCLGE_CFG_SPEED_S, 1);
2413 case HCLGE_MAC_SPEED_25G:
2414 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2415 HCLGE_CFG_SPEED_S, 2);
2417 case HCLGE_MAC_SPEED_40G:
2418 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2419 HCLGE_CFG_SPEED_S, 3);
2421 case HCLGE_MAC_SPEED_50G:
2422 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2423 HCLGE_CFG_SPEED_S, 4);
2425 case HCLGE_MAC_SPEED_100G:
2426 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2427 HCLGE_CFG_SPEED_S, 5);
2430 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2434 hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2437 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2439 dev_err(&hdev->pdev->dev,
2440 "mac speed/duplex config cmd failed %d.\n", ret);
2447 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2451 duplex = hclge_check_speed_dup(duplex, speed);
2452 if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
2455 ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2459 hdev->hw.mac.speed = speed;
2460 hdev->hw.mac.duplex = duplex;
2465 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2468 struct hclge_vport *vport = hclge_get_vport(handle);
2469 struct hclge_dev *hdev = vport->back;
2471 return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2474 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2476 struct hclge_config_auto_neg_cmd *req;
2477 struct hclge_desc desc;
2481 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2483 req = (struct hclge_config_auto_neg_cmd *)desc.data;
2485 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2486 req->cfg_an_cmd_flag = cpu_to_le32(flag);
2488 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2490 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2496 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2498 struct hclge_vport *vport = hclge_get_vport(handle);
2499 struct hclge_dev *hdev = vport->back;
2501 if (!hdev->hw.mac.support_autoneg) {
2503 dev_err(&hdev->pdev->dev,
2504 "autoneg is not supported by current port\n");
2511 return hclge_set_autoneg_en(hdev, enable);
2514 static int hclge_get_autoneg(struct hnae3_handle *handle)
2516 struct hclge_vport *vport = hclge_get_vport(handle);
2517 struct hclge_dev *hdev = vport->back;
2518 struct phy_device *phydev = hdev->hw.mac.phydev;
2521 return phydev->autoneg;
2523 return hdev->hw.mac.autoneg;
2526 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2528 struct hclge_vport *vport = hclge_get_vport(handle);
2529 struct hclge_dev *hdev = vport->back;
2532 dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2534 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2537 return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2540 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2542 struct hclge_vport *vport = hclge_get_vport(handle);
2543 struct hclge_dev *hdev = vport->back;
2545 if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2546 return hclge_set_autoneg_en(hdev, !halt);
2551 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2553 struct hclge_config_fec_cmd *req;
2554 struct hclge_desc desc;
2557 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2559 req = (struct hclge_config_fec_cmd *)desc.data;
2560 if (fec_mode & BIT(HNAE3_FEC_AUTO))
2561 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2562 if (fec_mode & BIT(HNAE3_FEC_RS))
2563 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2564 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2565 if (fec_mode & BIT(HNAE3_FEC_BASER))
2566 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2567 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2569 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2571 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2576 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2578 struct hclge_vport *vport = hclge_get_vport(handle);
2579 struct hclge_dev *hdev = vport->back;
2580 struct hclge_mac *mac = &hdev->hw.mac;
2583 if (fec_mode && !(mac->fec_ability & fec_mode)) {
2584 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2588 ret = hclge_set_fec_hw(hdev, fec_mode);
2592 mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2596 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2599 struct hclge_vport *vport = hclge_get_vport(handle);
2600 struct hclge_dev *hdev = vport->back;
2601 struct hclge_mac *mac = &hdev->hw.mac;
2604 *fec_ability = mac->fec_ability;
2606 *fec_mode = mac->fec_mode;
2609 static int hclge_mac_init(struct hclge_dev *hdev)
2611 struct hclge_mac *mac = &hdev->hw.mac;
2614 hdev->support_sfp_query = true;
2615 hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2616 ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2617 hdev->hw.mac.duplex);
2621 if (hdev->hw.mac.support_autoneg) {
2622 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2629 if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2630 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2635 ret = hclge_set_mac_mtu(hdev, hdev->mps);
2637 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2641 ret = hclge_set_default_loopback(hdev);
2645 ret = hclge_buffer_alloc(hdev);
2647 dev_err(&hdev->pdev->dev,
2648 "allocate buffer fail, ret=%d\n", ret);
2653 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2655 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2656 !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2657 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2658 hclge_wq, &hdev->service_task, 0);
2661 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2663 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2664 !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2665 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2666 hclge_wq, &hdev->service_task, 0);
2669 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2671 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2672 !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
2673 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2674 hclge_wq, &hdev->service_task,
2678 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2680 struct hclge_link_status_cmd *req;
2681 struct hclge_desc desc;
2685 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2686 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2688 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2693 req = (struct hclge_link_status_cmd *)desc.data;
2694 link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2696 return !!link_status;
2699 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2701 unsigned int mac_state;
2704 if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2707 mac_state = hclge_get_mac_link_status(hdev);
2709 if (hdev->hw.mac.phydev) {
2710 if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2711 link_stat = mac_state &
2712 hdev->hw.mac.phydev->link;
2717 link_stat = mac_state;
2723 static void hclge_update_link_status(struct hclge_dev *hdev)
2725 struct hnae3_client *rclient = hdev->roce_client;
2726 struct hnae3_client *client = hdev->nic_client;
2727 struct hnae3_handle *rhandle;
2728 struct hnae3_handle *handle;
2735 if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
2738 state = hclge_get_mac_phy_link(hdev);
2739 if (state != hdev->hw.mac.link) {
2740 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2741 handle = &hdev->vport[i].nic;
2742 client->ops->link_status_change(handle, state);
2743 hclge_config_mac_tnl_int(hdev, state);
2744 rhandle = &hdev->vport[i].roce;
2745 if (rclient && rclient->ops->link_status_change)
2746 rclient->ops->link_status_change(rhandle,
2749 hdev->hw.mac.link = state;
2752 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2755 static void hclge_update_port_capability(struct hclge_mac *mac)
2757 /* update fec ability by speed */
2758 hclge_convert_setting_fec(mac);
2760 /* firmware can not identify back plane type, the media type
2761 * read from configuration can help deal it
2763 if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2764 mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2765 mac->module_type = HNAE3_MODULE_TYPE_KR;
2766 else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2767 mac->module_type = HNAE3_MODULE_TYPE_TP;
2769 if (mac->support_autoneg) {
2770 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2771 linkmode_copy(mac->advertising, mac->supported);
2773 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2775 linkmode_zero(mac->advertising);
2779 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2781 struct hclge_sfp_info_cmd *resp;
2782 struct hclge_desc desc;
2785 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2786 resp = (struct hclge_sfp_info_cmd *)desc.data;
2787 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2788 if (ret == -EOPNOTSUPP) {
2789 dev_warn(&hdev->pdev->dev,
2790 "IMP do not support get SFP speed %d\n", ret);
2793 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2797 *speed = le32_to_cpu(resp->speed);
2802 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2804 struct hclge_sfp_info_cmd *resp;
2805 struct hclge_desc desc;
2808 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2809 resp = (struct hclge_sfp_info_cmd *)desc.data;
2811 resp->query_type = QUERY_ACTIVE_SPEED;
2813 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2814 if (ret == -EOPNOTSUPP) {
2815 dev_warn(&hdev->pdev->dev,
2816 "IMP does not support get SFP info %d\n", ret);
2819 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2823 /* In some case, mac speed get from IMP may be 0, it shouldn't be
2824 * set to mac->speed.
2826 if (!le32_to_cpu(resp->speed))
2829 mac->speed = le32_to_cpu(resp->speed);
2830 /* if resp->speed_ability is 0, it means it's an old version
2831 * firmware, do not update these params
2833 if (resp->speed_ability) {
2834 mac->module_type = le32_to_cpu(resp->module_type);
2835 mac->speed_ability = le32_to_cpu(resp->speed_ability);
2836 mac->autoneg = resp->autoneg;
2837 mac->support_autoneg = resp->autoneg_ability;
2838 mac->speed_type = QUERY_ACTIVE_SPEED;
2839 if (!resp->active_fec)
2842 mac->fec_mode = BIT(resp->active_fec);
2844 mac->speed_type = QUERY_SFP_SPEED;
2850 static int hclge_update_port_info(struct hclge_dev *hdev)
2852 struct hclge_mac *mac = &hdev->hw.mac;
2853 int speed = HCLGE_MAC_SPEED_UNKNOWN;
2856 /* get the port info from SFP cmd if not copper port */
2857 if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2860 /* if IMP does not support get SFP/qSFP info, return directly */
2861 if (!hdev->support_sfp_query)
2864 if (hdev->pdev->revision >= 0x21)
2865 ret = hclge_get_sfp_info(hdev, mac);
2867 ret = hclge_get_sfp_speed(hdev, &speed);
2869 if (ret == -EOPNOTSUPP) {
2870 hdev->support_sfp_query = false;
2876 if (hdev->pdev->revision >= 0x21) {
2877 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
2878 hclge_update_port_capability(mac);
2881 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
2884 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2885 return 0; /* do nothing if no SFP */
2887 /* must config full duplex for SFP */
2888 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2892 static int hclge_get_status(struct hnae3_handle *handle)
2894 struct hclge_vport *vport = hclge_get_vport(handle);
2895 struct hclge_dev *hdev = vport->back;
2897 hclge_update_link_status(hdev);
2899 return hdev->hw.mac.link;
2902 static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
2904 if (!pci_num_vf(hdev->pdev)) {
2905 dev_err(&hdev->pdev->dev,
2906 "SRIOV is disabled, can not get vport(%d) info.\n", vf);
2910 if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
2911 dev_err(&hdev->pdev->dev,
2912 "vf id(%d) is out of range(0 <= vfid < %d)\n",
2913 vf, pci_num_vf(hdev->pdev));
2917 /* VF start from 1 in vport */
2918 vf += HCLGE_VF_VPORT_START_NUM;
2919 return &hdev->vport[vf];
2922 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
2923 struct ifla_vf_info *ivf)
2925 struct hclge_vport *vport = hclge_get_vport(handle);
2926 struct hclge_dev *hdev = vport->back;
2928 vport = hclge_get_vf_vport(hdev, vf);
2933 ivf->linkstate = vport->vf_info.link_state;
2934 ivf->spoofchk = vport->vf_info.spoofchk;
2935 ivf->trusted = vport->vf_info.trusted;
2936 ivf->min_tx_rate = 0;
2937 ivf->max_tx_rate = vport->vf_info.max_tx_rate;
2938 ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
2939 ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
2940 ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
2941 ether_addr_copy(ivf->mac, vport->vf_info.mac);
2946 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
2949 struct hclge_vport *vport = hclge_get_vport(handle);
2950 struct hclge_dev *hdev = vport->back;
2952 vport = hclge_get_vf_vport(hdev, vf);
2956 vport->vf_info.link_state = link_state;
2961 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2963 u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
2965 /* fetch the events from their corresponding regs */
2966 rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2967 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2968 msix_src_reg = hclge_read_dev(&hdev->hw,
2969 HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
2971 /* Assumption: If by any chance reset and mailbox events are reported
2972 * together then we will only process reset event in this go and will
2973 * defer the processing of the mailbox events. Since, we would have not
2974 * cleared RX CMDQ event this time we would receive again another
2975 * interrupt from H/W just for the mailbox.
2977 * check for vector0 reset event sources
2979 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2980 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2981 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2982 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2983 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2984 hdev->rst_stats.imp_rst_cnt++;
2985 return HCLGE_VECTOR0_EVENT_RST;
2988 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2989 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2990 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2991 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2992 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2993 hdev->rst_stats.global_rst_cnt++;
2994 return HCLGE_VECTOR0_EVENT_RST;
2997 /* check for vector0 msix event source */
2998 if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
2999 *clearval = msix_src_reg;
3000 return HCLGE_VECTOR0_EVENT_ERR;
3003 /* check for vector0 mailbox(=CMDQ RX) event source */
3004 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3005 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3006 *clearval = cmdq_src_reg;
3007 return HCLGE_VECTOR0_EVENT_MBX;
3010 /* print other vector0 event source */
3011 dev_info(&hdev->pdev->dev,
3012 "CMDQ INT status:0x%x, other INT status:0x%x\n",
3013 cmdq_src_reg, msix_src_reg);
3014 *clearval = msix_src_reg;
3016 return HCLGE_VECTOR0_EVENT_OTHER;
3019 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3022 switch (event_type) {
3023 case HCLGE_VECTOR0_EVENT_RST:
3024 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3026 case HCLGE_VECTOR0_EVENT_MBX:
3027 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3034 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3036 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3037 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3038 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3039 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3040 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3043 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3045 writel(enable ? 1 : 0, vector->addr);
3048 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3050 struct hclge_dev *hdev = data;
3054 hclge_enable_vector(&hdev->misc_vector, false);
3055 event_cause = hclge_check_event_cause(hdev, &clearval);
3057 /* vector 0 interrupt is shared with reset and mailbox source events.*/
3058 switch (event_cause) {
3059 case HCLGE_VECTOR0_EVENT_ERR:
3060 /* we do not know what type of reset is required now. This could
3061 * only be decided after we fetch the type of errors which
3062 * caused this event. Therefore, we will do below for now:
3063 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
3064 * have defered type of reset to be used.
3065 * 2. Schedule the reset serivce task.
3066 * 3. When service task receives HNAE3_UNKNOWN_RESET type it
3067 * will fetch the correct type of reset. This would be done
3068 * by first decoding the types of errors.
3070 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
3072 case HCLGE_VECTOR0_EVENT_RST:
3073 hclge_reset_task_schedule(hdev);
3075 case HCLGE_VECTOR0_EVENT_MBX:
3076 /* If we are here then,
3077 * 1. Either we are not handling any mbx task and we are not
3080 * 2. We could be handling a mbx task but nothing more is
3082 * In both cases, we should schedule mbx task as there are more
3083 * mbx messages reported by this interrupt.
3085 hclge_mbx_task_schedule(hdev);
3088 dev_warn(&hdev->pdev->dev,
3089 "received unknown or unhandled event of vector0\n");
3093 hclge_clear_event_cause(hdev, event_cause, clearval);
3095 /* Enable interrupt if it is not cause by reset. And when
3096 * clearval equal to 0, it means interrupt status may be
3097 * cleared by hardware before driver reads status register.
3098 * For this case, vector0 interrupt also should be enabled.
3101 event_cause == HCLGE_VECTOR0_EVENT_MBX) {
3102 hclge_enable_vector(&hdev->misc_vector, true);
3108 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3110 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3111 dev_warn(&hdev->pdev->dev,
3112 "vector(vector_id %d) has been freed.\n", vector_id);
3116 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3117 hdev->num_msi_left += 1;
3118 hdev->num_msi_used -= 1;
3121 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3123 struct hclge_misc_vector *vector = &hdev->misc_vector;
3125 vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3127 vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3128 hdev->vector_status[0] = 0;
3130 hdev->num_msi_left -= 1;
3131 hdev->num_msi_used += 1;
3134 static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3135 const cpumask_t *mask)
3137 struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3140 cpumask_copy(&hdev->affinity_mask, mask);
3143 static void hclge_irq_affinity_release(struct kref *ref)
3147 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3149 irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3150 &hdev->affinity_mask);
3152 hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3153 hdev->affinity_notify.release = hclge_irq_affinity_release;
3154 irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3155 &hdev->affinity_notify);
3158 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3160 irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3161 irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3164 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3168 hclge_get_misc_vector(hdev);
3170 /* this would be explicitly freed in the end */
3171 snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
3172 HCLGE_NAME, pci_name(hdev->pdev));
3173 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3174 0, hdev->misc_vector.name, hdev);
3176 hclge_free_vector(hdev, 0);
3177 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3178 hdev->misc_vector.vector_irq);
3184 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3186 free_irq(hdev->misc_vector.vector_irq, hdev);
3187 hclge_free_vector(hdev, 0);
3190 int hclge_notify_client(struct hclge_dev *hdev,
3191 enum hnae3_reset_notify_type type)
3193 struct hnae3_client *client = hdev->nic_client;
3196 if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3199 if (!client->ops->reset_notify)
3202 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3203 struct hnae3_handle *handle = &hdev->vport[i].nic;
3206 ret = client->ops->reset_notify(handle, type);
3208 dev_err(&hdev->pdev->dev,
3209 "notify nic client failed %d(%d)\n", type, ret);
3217 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3218 enum hnae3_reset_notify_type type)
3220 struct hnae3_client *client = hdev->roce_client;
3224 if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3227 if (!client->ops->reset_notify)
3230 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3231 struct hnae3_handle *handle = &hdev->vport[i].roce;
3233 ret = client->ops->reset_notify(handle, type);
3235 dev_err(&hdev->pdev->dev,
3236 "notify roce client failed %d(%d)",
3245 static int hclge_reset_wait(struct hclge_dev *hdev)
3247 #define HCLGE_RESET_WATI_MS 100
3248 #define HCLGE_RESET_WAIT_CNT 350
3250 u32 val, reg, reg_bit;
3253 switch (hdev->reset_type) {
3254 case HNAE3_IMP_RESET:
3255 reg = HCLGE_GLOBAL_RESET_REG;
3256 reg_bit = HCLGE_IMP_RESET_BIT;
3258 case HNAE3_GLOBAL_RESET:
3259 reg = HCLGE_GLOBAL_RESET_REG;
3260 reg_bit = HCLGE_GLOBAL_RESET_BIT;
3262 case HNAE3_FUNC_RESET:
3263 reg = HCLGE_FUN_RST_ING;
3264 reg_bit = HCLGE_FUN_RST_ING_B;
3267 dev_err(&hdev->pdev->dev,
3268 "Wait for unsupported reset type: %d\n",
3273 val = hclge_read_dev(&hdev->hw, reg);
3274 while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3275 msleep(HCLGE_RESET_WATI_MS);
3276 val = hclge_read_dev(&hdev->hw, reg);
3280 if (cnt >= HCLGE_RESET_WAIT_CNT) {
3281 dev_warn(&hdev->pdev->dev,
3282 "Wait for reset timeout: %d\n", hdev->reset_type);
3289 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3291 struct hclge_vf_rst_cmd *req;
3292 struct hclge_desc desc;
3294 req = (struct hclge_vf_rst_cmd *)desc.data;
3295 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3296 req->dest_vfid = func_id;
3301 return hclge_cmd_send(&hdev->hw, &desc, 1);
3304 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3308 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
3309 struct hclge_vport *vport = &hdev->vport[i];
3312 /* Send cmd to set/clear VF's FUNC_RST_ING */
3313 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3315 dev_err(&hdev->pdev->dev,
3316 "set vf(%u) rst failed %d!\n",
3317 vport->vport_id, ret);
3321 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3324 /* Inform VF to process the reset.
3325 * hclge_inform_reset_assert_to_vf may fail if VF
3326 * driver is not loaded.
3328 ret = hclge_inform_reset_assert_to_vf(vport);
3330 dev_warn(&hdev->pdev->dev,
3331 "inform reset to vf(%u) failed %d!\n",
3332 vport->vport_id, ret);
3338 static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3340 if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3341 test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
3342 test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3345 hclge_mbx_handler(hdev);
3347 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3350 static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3352 struct hclge_pf_rst_sync_cmd *req;
3353 struct hclge_desc desc;
3357 req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3358 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3361 /* vf need to down netdev by mbx during PF or FLR reset */
3362 hclge_mailbox_service_task(hdev);
3364 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3365 /* for compatible with old firmware, wait
3366 * 100 ms for VF to stop IO
3368 if (ret == -EOPNOTSUPP) {
3369 msleep(HCLGE_RESET_SYNC_TIME);
3372 dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
3375 } else if (req->all_vf_ready) {
3378 msleep(HCLGE_PF_RESET_SYNC_TIME);
3379 hclge_cmd_reuse_desc(&desc, true);
3380 } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3382 dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
3385 void hclge_report_hw_error(struct hclge_dev *hdev,
3386 enum hnae3_hw_error_type type)
3388 struct hnae3_client *client = hdev->nic_client;
3391 if (!client || !client->ops->process_hw_error ||
3392 !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3395 for (i = 0; i < hdev->num_vmdq_vport + 1; i++)
3396 client->ops->process_hw_error(&hdev->vport[i].nic, type);
3399 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3403 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3404 if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3405 hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3406 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3407 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3410 if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3411 hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3412 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3413 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3417 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3419 struct hclge_desc desc;
3420 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3423 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3424 hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3425 req->fun_reset_vfid = func_id;
3427 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3429 dev_err(&hdev->pdev->dev,
3430 "send function reset cmd fail, status =%d\n", ret);
3435 static void hclge_do_reset(struct hclge_dev *hdev)
3437 struct hnae3_handle *handle = &hdev->vport[0].nic;
3438 struct pci_dev *pdev = hdev->pdev;
3441 if (hclge_get_hw_reset_stat(handle)) {
3442 dev_info(&pdev->dev, "Hardware reset not finish\n");
3443 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3444 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3445 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3449 switch (hdev->reset_type) {
3450 case HNAE3_GLOBAL_RESET:
3451 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3452 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3453 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3454 dev_info(&pdev->dev, "Global Reset requested\n");
3456 case HNAE3_FUNC_RESET:
3457 dev_info(&pdev->dev, "PF Reset requested\n");
3458 /* schedule again to check later */
3459 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3460 hclge_reset_task_schedule(hdev);
3463 dev_warn(&pdev->dev,
3464 "Unsupported reset type: %d\n", hdev->reset_type);
3469 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3470 unsigned long *addr)
3472 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3473 struct hclge_dev *hdev = ae_dev->priv;
3475 /* first, resolve any unknown reset type to the known type(s) */
3476 if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3477 u32 msix_sts_reg = hclge_read_dev(&hdev->hw,
3478 HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
3479 /* we will intentionally ignore any errors from this function
3480 * as we will end up in *some* reset request in any case
3482 if (hclge_handle_hw_msix_error(hdev, addr))
3483 dev_info(&hdev->pdev->dev, "received msix interrupt 0x%x\n",
3486 clear_bit(HNAE3_UNKNOWN_RESET, addr);
3487 /* We defered the clearing of the error event which caused
3488 * interrupt since it was not posssible to do that in
3489 * interrupt context (and this is the reason we introduced
3490 * new UNKNOWN reset type). Now, the errors have been
3491 * handled and cleared in hardware we can safely enable
3492 * interrupts. This is an exception to the norm.
3494 hclge_enable_vector(&hdev->misc_vector, true);
3497 /* return the highest priority reset level amongst all */
3498 if (test_bit(HNAE3_IMP_RESET, addr)) {
3499 rst_level = HNAE3_IMP_RESET;
3500 clear_bit(HNAE3_IMP_RESET, addr);
3501 clear_bit(HNAE3_GLOBAL_RESET, addr);
3502 clear_bit(HNAE3_FUNC_RESET, addr);
3503 } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3504 rst_level = HNAE3_GLOBAL_RESET;
3505 clear_bit(HNAE3_GLOBAL_RESET, addr);
3506 clear_bit(HNAE3_FUNC_RESET, addr);
3507 } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3508 rst_level = HNAE3_FUNC_RESET;
3509 clear_bit(HNAE3_FUNC_RESET, addr);
3510 } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3511 rst_level = HNAE3_FLR_RESET;
3512 clear_bit(HNAE3_FLR_RESET, addr);
3515 if (hdev->reset_type != HNAE3_NONE_RESET &&
3516 rst_level < hdev->reset_type)
3517 return HNAE3_NONE_RESET;
3522 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3526 switch (hdev->reset_type) {
3527 case HNAE3_IMP_RESET:
3528 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3530 case HNAE3_GLOBAL_RESET:
3531 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3540 /* For revision 0x20, the reset interrupt source
3541 * can only be cleared after hardware reset done
3543 if (hdev->pdev->revision == 0x20)
3544 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3547 hclge_enable_vector(&hdev->misc_vector, true);
3550 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3554 reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3556 reg_val |= HCLGE_NIC_SW_RST_RDY;
3558 reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3560 hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3563 static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
3567 ret = hclge_set_all_vf_rst(hdev, true);
3571 hclge_func_reset_sync_vf(hdev);
3576 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3581 switch (hdev->reset_type) {
3582 case HNAE3_FUNC_RESET:
3583 ret = hclge_func_reset_notify_vf(hdev);
3587 ret = hclge_func_reset_cmd(hdev, 0);
3589 dev_err(&hdev->pdev->dev,
3590 "asserting function reset fail %d!\n", ret);
3594 /* After performaning pf reset, it is not necessary to do the
3595 * mailbox handling or send any command to firmware, because
3596 * any mailbox handling or command to firmware is only valid
3597 * after hclge_cmd_init is called.
3599 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3600 hdev->rst_stats.pf_rst_cnt++;
3602 case HNAE3_FLR_RESET:
3603 ret = hclge_func_reset_notify_vf(hdev);
3607 case HNAE3_IMP_RESET:
3608 hclge_handle_imp_error(hdev);
3609 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3610 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3611 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3617 /* inform hardware that preparatory work is done */
3618 msleep(HCLGE_RESET_SYNC_TIME);
3619 hclge_reset_handshake(hdev, true);
3620 dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3625 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3627 #define MAX_RESET_FAIL_CNT 5
3629 if (hdev->reset_pending) {
3630 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3631 hdev->reset_pending);
3633 } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3634 HCLGE_RESET_INT_M) {
3635 dev_info(&hdev->pdev->dev,
3636 "reset failed because new reset interrupt\n");
3637 hclge_clear_reset_cause(hdev);
3639 } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3640 hdev->rst_stats.reset_fail_cnt++;
3641 set_bit(hdev->reset_type, &hdev->reset_pending);
3642 dev_info(&hdev->pdev->dev,
3643 "re-schedule reset task(%u)\n",
3644 hdev->rst_stats.reset_fail_cnt);
3648 hclge_clear_reset_cause(hdev);
3650 /* recover the handshake status when reset fail */
3651 hclge_reset_handshake(hdev, true);
3653 dev_err(&hdev->pdev->dev, "Reset fail!\n");
3655 hclge_dbg_dump_rst_info(hdev);
3657 set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3662 static int hclge_set_rst_done(struct hclge_dev *hdev)
3664 struct hclge_pf_rst_done_cmd *req;
3665 struct hclge_desc desc;
3668 req = (struct hclge_pf_rst_done_cmd *)desc.data;
3669 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
3670 req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
3672 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3673 /* To be compatible with the old firmware, which does not support
3674 * command HCLGE_OPC_PF_RST_DONE, just print a warning and
3677 if (ret == -EOPNOTSUPP) {
3678 dev_warn(&hdev->pdev->dev,
3679 "current firmware does not support command(0x%x)!\n",
3680 HCLGE_OPC_PF_RST_DONE);
3683 dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
3690 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3694 switch (hdev->reset_type) {
3695 case HNAE3_FUNC_RESET:
3697 case HNAE3_FLR_RESET:
3698 ret = hclge_set_all_vf_rst(hdev, false);
3700 case HNAE3_GLOBAL_RESET:
3702 case HNAE3_IMP_RESET:
3703 ret = hclge_set_rst_done(hdev);
3709 /* clear up the handshake status after re-initialize done */
3710 hclge_reset_handshake(hdev, false);
3715 static int hclge_reset_stack(struct hclge_dev *hdev)
3719 ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3723 ret = hclge_reset_ae_dev(hdev->ae_dev);
3727 ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3731 return hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
3734 static int hclge_reset_prepare(struct hclge_dev *hdev)
3736 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3739 /* Initialize ae_dev reset status as well, in case enet layer wants to
3740 * know if device is undergoing reset
3742 ae_dev->reset_type = hdev->reset_type;
3743 hdev->rst_stats.reset_cnt++;
3744 /* perform reset of the stack & ae device for a client */
3745 ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3750 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3755 return hclge_reset_prepare_wait(hdev);
3758 static int hclge_reset_rebuild(struct hclge_dev *hdev)
3760 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3761 enum hnae3_reset_type reset_level;
3764 hdev->rst_stats.hw_reset_done_cnt++;
3766 ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3771 ret = hclge_reset_stack(hdev);
3776 hclge_clear_reset_cause(hdev);
3778 ret = hclge_reset_prepare_up(hdev);
3783 ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3784 /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
3788 hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
3792 ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3797 ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3801 hdev->last_reset_time = jiffies;
3802 hdev->rst_stats.reset_fail_cnt = 0;
3803 hdev->rst_stats.reset_done_cnt++;
3804 ae_dev->reset_type = HNAE3_NONE_RESET;
3805 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3807 /* if default_reset_request has a higher level reset request,
3808 * it should be handled as soon as possible. since some errors
3809 * need this kind of reset to fix.
3811 reset_level = hclge_get_reset_level(ae_dev,
3812 &hdev->default_reset_request);
3813 if (reset_level != HNAE3_NONE_RESET)
3814 set_bit(reset_level, &hdev->reset_request);
3819 static void hclge_reset(struct hclge_dev *hdev)
3821 if (hclge_reset_prepare(hdev))
3824 if (hclge_reset_wait(hdev))
3827 if (hclge_reset_rebuild(hdev))
3833 if (hclge_reset_err_handle(hdev))
3834 hclge_reset_task_schedule(hdev);
3837 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3839 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3840 struct hclge_dev *hdev = ae_dev->priv;
3842 /* We might end up getting called broadly because of 2 below cases:
3843 * 1. Recoverable error was conveyed through APEI and only way to bring
3844 * normalcy is to reset.
3845 * 2. A new reset request from the stack due to timeout
3847 * For the first case,error event might not have ae handle available.
3848 * check if this is a new reset request and we are not here just because
3849 * last reset attempt did not succeed and watchdog hit us again. We will
3850 * know this if last reset request did not occur very recently (watchdog
3851 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3852 * In case of new request we reset the "reset level" to PF reset.
3853 * And if it is a repeat reset request of the most recent one then we
3854 * want to make sure we throttle the reset request. Therefore, we will
3855 * not allow it again before 3*HZ times.
3858 handle = &hdev->vport[0].nic;
3860 if (time_before(jiffies, (hdev->last_reset_time +
3861 HCLGE_RESET_INTERVAL))) {
3862 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
3864 } else if (hdev->default_reset_request) {
3866 hclge_get_reset_level(ae_dev,
3867 &hdev->default_reset_request);
3868 } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
3869 hdev->reset_level = HNAE3_FUNC_RESET;
3872 dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
3875 /* request reset & schedule reset task */
3876 set_bit(hdev->reset_level, &hdev->reset_request);
3877 hclge_reset_task_schedule(hdev);
3879 if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3880 hdev->reset_level++;
3883 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3884 enum hnae3_reset_type rst_type)
3886 struct hclge_dev *hdev = ae_dev->priv;
3888 set_bit(rst_type, &hdev->default_reset_request);
3891 static void hclge_reset_timer(struct timer_list *t)
3893 struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3895 /* if default_reset_request has no value, it means that this reset
3896 * request has already be handled, so just return here
3898 if (!hdev->default_reset_request)
3901 dev_info(&hdev->pdev->dev,
3902 "triggering reset in reset timer\n");
3903 hclge_reset_event(hdev->pdev, NULL);
3906 static void hclge_reset_subtask(struct hclge_dev *hdev)
3908 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3910 /* check if there is any ongoing reset in the hardware. This status can
3911 * be checked from reset_pending. If there is then, we need to wait for
3912 * hardware to complete reset.
3913 * a. If we are able to figure out in reasonable time that hardware
3914 * has fully resetted then, we can proceed with driver, client
3916 * b. else, we can come back later to check this status so re-sched
3919 hdev->last_reset_time = jiffies;
3920 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
3921 if (hdev->reset_type != HNAE3_NONE_RESET)
3924 /* check if we got any *new* reset requests to be honored */
3925 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
3926 if (hdev->reset_type != HNAE3_NONE_RESET)
3927 hclge_do_reset(hdev);
3929 hdev->reset_type = HNAE3_NONE_RESET;
3932 static void hclge_reset_service_task(struct hclge_dev *hdev)
3934 if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
3937 down(&hdev->reset_sem);
3938 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3940 hclge_reset_subtask(hdev);
3942 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3943 up(&hdev->reset_sem);
3946 static void hclge_update_vport_alive(struct hclge_dev *hdev)
3950 /* start from vport 1 for PF is always alive */
3951 for (i = 1; i < hdev->num_alloc_vport; i++) {
3952 struct hclge_vport *vport = &hdev->vport[i];
3954 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3955 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
3957 /* If vf is not alive, set to default value */
3958 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3959 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
3963 static void hclge_periodic_service_task(struct hclge_dev *hdev)
3965 unsigned long delta = round_jiffies_relative(HZ);
3967 /* Always handle the link updating to make sure link state is
3968 * updated when it is triggered by mbx.
3970 hclge_update_link_status(hdev);
3972 if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
3973 delta = jiffies - hdev->last_serv_processed;
3975 if (delta < round_jiffies_relative(HZ)) {
3976 delta = round_jiffies_relative(HZ) - delta;
3981 hdev->serv_processed_cnt++;
3982 hclge_update_vport_alive(hdev);
3984 if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
3985 hdev->last_serv_processed = jiffies;
3989 if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
3990 hclge_update_stats_for_all(hdev);
3992 hclge_update_port_info(hdev);
3993 hclge_sync_vlan_filter(hdev);
3995 if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
3996 hclge_rfs_filter_expire(hdev);
3998 hdev->last_serv_processed = jiffies;
4001 hclge_task_schedule(hdev, delta);
4004 static void hclge_service_task(struct work_struct *work)
4006 struct hclge_dev *hdev =
4007 container_of(work, struct hclge_dev, service_task.work);
4009 hclge_reset_service_task(hdev);
4010 hclge_mailbox_service_task(hdev);
4011 hclge_periodic_service_task(hdev);
4013 /* Handle reset and mbx again in case periodical task delays the
4014 * handling by calling hclge_task_schedule() in
4015 * hclge_periodic_service_task().
4017 hclge_reset_service_task(hdev);
4018 hclge_mailbox_service_task(hdev);
4021 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4023 /* VF handle has no client */
4024 if (!handle->client)
4025 return container_of(handle, struct hclge_vport, nic);
4026 else if (handle->client->type == HNAE3_CLIENT_ROCE)
4027 return container_of(handle, struct hclge_vport, roce);
4029 return container_of(handle, struct hclge_vport, nic);
4032 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4033 struct hnae3_vector_info *vector_info)
4035 struct hclge_vport *vport = hclge_get_vport(handle);
4036 struct hnae3_vector_info *vector = vector_info;
4037 struct hclge_dev *hdev = vport->back;
4041 vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
4042 vector_num = min(hdev->num_msi_left, vector_num);
4044 for (j = 0; j < vector_num; j++) {
4045 for (i = 1; i < hdev->num_msi; i++) {
4046 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4047 vector->vector = pci_irq_vector(hdev->pdev, i);
4048 vector->io_addr = hdev->hw.io_base +
4049 HCLGE_VECTOR_REG_BASE +
4050 (i - 1) * HCLGE_VECTOR_REG_OFFSET +
4052 HCLGE_VECTOR_VF_OFFSET;
4053 hdev->vector_status[i] = vport->vport_id;
4054 hdev->vector_irq[i] = vector->vector;
4063 hdev->num_msi_left -= alloc;
4064 hdev->num_msi_used += alloc;
4069 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4073 for (i = 0; i < hdev->num_msi; i++)
4074 if (vector == hdev->vector_irq[i])
4080 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4082 struct hclge_vport *vport = hclge_get_vport(handle);
4083 struct hclge_dev *hdev = vport->back;
4086 vector_id = hclge_get_vector_index(hdev, vector);
4087 if (vector_id < 0) {
4088 dev_err(&hdev->pdev->dev,
4089 "Get vector index fail. vector = %d\n", vector);
4093 hclge_free_vector(hdev, vector_id);
4098 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
4100 return HCLGE_RSS_KEY_SIZE;
4103 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
4105 return HCLGE_RSS_IND_TBL_SIZE;
4108 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
4109 const u8 hfunc, const u8 *key)
4111 struct hclge_rss_config_cmd *req;
4112 unsigned int key_offset = 0;
4113 struct hclge_desc desc;
4118 key_counts = HCLGE_RSS_KEY_SIZE;
4119 req = (struct hclge_rss_config_cmd *)desc.data;
4121 while (key_counts) {
4122 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4125 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4126 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4128 key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
4129 memcpy(req->hash_key,
4130 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4132 key_counts -= key_size;
4134 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4136 dev_err(&hdev->pdev->dev,
4137 "Configure RSS config fail, status = %d\n",
4145 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
4147 struct hclge_rss_indirection_table_cmd *req;
4148 struct hclge_desc desc;
4152 req = (struct hclge_rss_indirection_table_cmd *)desc.data;
4154 for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
4155 hclge_cmd_setup_basic_desc
4156 (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4158 req->start_table_index =
4159 cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4160 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
4162 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
4163 req->rss_result[j] =
4164 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4166 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4168 dev_err(&hdev->pdev->dev,
4169 "Configure rss indir table fail,status = %d\n",
4177 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4178 u16 *tc_size, u16 *tc_offset)
4180 struct hclge_rss_tc_mode_cmd *req;
4181 struct hclge_desc desc;
4185 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
4186 req = (struct hclge_rss_tc_mode_cmd *)desc.data;
4188 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4191 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4192 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4193 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4194 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4195 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
4197 req->rss_tc_mode[i] = cpu_to_le16(mode);
4200 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4202 dev_err(&hdev->pdev->dev,
4203 "Configure rss tc mode fail, status = %d\n", ret);
4208 static void hclge_get_rss_type(struct hclge_vport *vport)
4210 if (vport->rss_tuple_sets.ipv4_tcp_en ||
4211 vport->rss_tuple_sets.ipv4_udp_en ||
4212 vport->rss_tuple_sets.ipv4_sctp_en ||
4213 vport->rss_tuple_sets.ipv6_tcp_en ||
4214 vport->rss_tuple_sets.ipv6_udp_en ||
4215 vport->rss_tuple_sets.ipv6_sctp_en)
4216 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4217 else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4218 vport->rss_tuple_sets.ipv6_fragment_en)
4219 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4221 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4224 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4226 struct hclge_rss_input_tuple_cmd *req;
4227 struct hclge_desc desc;
4230 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4232 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4234 /* Get the tuple cfg from pf */
4235 req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4236 req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4237 req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4238 req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4239 req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4240 req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4241 req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4242 req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4243 hclge_get_rss_type(&hdev->vport[0]);
4244 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4246 dev_err(&hdev->pdev->dev,
4247 "Configure rss input fail, status = %d\n", ret);
4251 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4254 struct hclge_vport *vport = hclge_get_vport(handle);
4257 /* Get hash algorithm */
4259 switch (vport->rss_algo) {
4260 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4261 *hfunc = ETH_RSS_HASH_TOP;
4263 case HCLGE_RSS_HASH_ALGO_SIMPLE:
4264 *hfunc = ETH_RSS_HASH_XOR;
4267 *hfunc = ETH_RSS_HASH_UNKNOWN;
4272 /* Get the RSS Key required by the user */
4274 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4276 /* Get indirect table */
4278 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4279 indir[i] = vport->rss_indirection_tbl[i];
4284 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4285 const u8 *key, const u8 hfunc)
4287 struct hclge_vport *vport = hclge_get_vport(handle);
4288 struct hclge_dev *hdev = vport->back;
4292 /* Set the RSS Hash Key if specififed by the user */
4295 case ETH_RSS_HASH_TOP:
4296 hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4298 case ETH_RSS_HASH_XOR:
4299 hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4301 case ETH_RSS_HASH_NO_CHANGE:
4302 hash_algo = vport->rss_algo;
4308 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4312 /* Update the shadow RSS key with user specified qids */
4313 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4314 vport->rss_algo = hash_algo;
4317 /* Update the shadow RSS table with user specified qids */
4318 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4319 vport->rss_indirection_tbl[i] = indir[i];
4321 /* Update the hardware */
4322 return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4325 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4327 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4329 if (nfc->data & RXH_L4_B_2_3)
4330 hash_sets |= HCLGE_D_PORT_BIT;
4332 hash_sets &= ~HCLGE_D_PORT_BIT;
4334 if (nfc->data & RXH_IP_SRC)
4335 hash_sets |= HCLGE_S_IP_BIT;
4337 hash_sets &= ~HCLGE_S_IP_BIT;
4339 if (nfc->data & RXH_IP_DST)
4340 hash_sets |= HCLGE_D_IP_BIT;
4342 hash_sets &= ~HCLGE_D_IP_BIT;
4344 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4345 hash_sets |= HCLGE_V_TAG_BIT;
4350 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4351 struct ethtool_rxnfc *nfc)
4353 struct hclge_vport *vport = hclge_get_vport(handle);
4354 struct hclge_dev *hdev = vport->back;
4355 struct hclge_rss_input_tuple_cmd *req;
4356 struct hclge_desc desc;
4360 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4361 RXH_L4_B_0_1 | RXH_L4_B_2_3))
4364 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4365 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4367 req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4368 req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4369 req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4370 req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4371 req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4372 req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4373 req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4374 req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4376 tuple_sets = hclge_get_rss_hash_bits(nfc);
4377 switch (nfc->flow_type) {
4379 req->ipv4_tcp_en = tuple_sets;
4382 req->ipv6_tcp_en = tuple_sets;
4385 req->ipv4_udp_en = tuple_sets;
4388 req->ipv6_udp_en = tuple_sets;
4391 req->ipv4_sctp_en = tuple_sets;
4394 if ((nfc->data & RXH_L4_B_0_1) ||
4395 (nfc->data & RXH_L4_B_2_3))
4398 req->ipv6_sctp_en = tuple_sets;
4401 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4404 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4410 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4412 dev_err(&hdev->pdev->dev,
4413 "Set rss tuple fail, status = %d\n", ret);
4417 vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4418 vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4419 vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4420 vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4421 vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4422 vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4423 vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4424 vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4425 hclge_get_rss_type(vport);
4429 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4430 struct ethtool_rxnfc *nfc)
4432 struct hclge_vport *vport = hclge_get_vport(handle);
4437 switch (nfc->flow_type) {
4439 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4442 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4445 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4448 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4451 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4454 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4458 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4467 if (tuple_sets & HCLGE_D_PORT_BIT)
4468 nfc->data |= RXH_L4_B_2_3;
4469 if (tuple_sets & HCLGE_S_PORT_BIT)
4470 nfc->data |= RXH_L4_B_0_1;
4471 if (tuple_sets & HCLGE_D_IP_BIT)
4472 nfc->data |= RXH_IP_DST;
4473 if (tuple_sets & HCLGE_S_IP_BIT)
4474 nfc->data |= RXH_IP_SRC;
4479 static int hclge_get_tc_size(struct hnae3_handle *handle)
4481 struct hclge_vport *vport = hclge_get_vport(handle);
4482 struct hclge_dev *hdev = vport->back;
4484 return hdev->rss_size_max;
4487 int hclge_rss_init_hw(struct hclge_dev *hdev)
4489 struct hclge_vport *vport = hdev->vport;
4490 u8 *rss_indir = vport[0].rss_indirection_tbl;
4491 u16 rss_size = vport[0].alloc_rss_size;
4492 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4493 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4494 u8 *key = vport[0].rss_hash_key;
4495 u8 hfunc = vport[0].rss_algo;
4496 u16 tc_valid[HCLGE_MAX_TC_NUM];
4501 ret = hclge_set_rss_indir_table(hdev, rss_indir);
4505 ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4509 ret = hclge_set_rss_input_tuple(hdev);
4513 /* Each TC have the same queue size, and tc_size set to hardware is
4514 * the log2 of roundup power of two of rss_size, the acutal queue
4515 * size is limited by indirection table.
4517 if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
4518 dev_err(&hdev->pdev->dev,
4519 "Configure rss tc size failed, invalid TC_SIZE = %u\n",
4524 roundup_size = roundup_pow_of_two(rss_size);
4525 roundup_size = ilog2(roundup_size);
4527 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4530 if (!(hdev->hw_tc_map & BIT(i)))
4534 tc_size[i] = roundup_size;
4535 tc_offset[i] = rss_size * i;
4538 return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4541 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4543 struct hclge_vport *vport = hdev->vport;
4546 for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4547 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4548 vport[j].rss_indirection_tbl[i] =
4549 i % vport[j].alloc_rss_size;
4553 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
4555 int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4556 struct hclge_vport *vport = hdev->vport;
4558 if (hdev->pdev->revision >= 0x21)
4559 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4561 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4562 vport[i].rss_tuple_sets.ipv4_tcp_en =
4563 HCLGE_RSS_INPUT_TUPLE_OTHER;
4564 vport[i].rss_tuple_sets.ipv4_udp_en =
4565 HCLGE_RSS_INPUT_TUPLE_OTHER;
4566 vport[i].rss_tuple_sets.ipv4_sctp_en =
4567 HCLGE_RSS_INPUT_TUPLE_SCTP;
4568 vport[i].rss_tuple_sets.ipv4_fragment_en =
4569 HCLGE_RSS_INPUT_TUPLE_OTHER;
4570 vport[i].rss_tuple_sets.ipv6_tcp_en =
4571 HCLGE_RSS_INPUT_TUPLE_OTHER;
4572 vport[i].rss_tuple_sets.ipv6_udp_en =
4573 HCLGE_RSS_INPUT_TUPLE_OTHER;
4574 vport[i].rss_tuple_sets.ipv6_sctp_en =
4575 HCLGE_RSS_INPUT_TUPLE_SCTP;
4576 vport[i].rss_tuple_sets.ipv6_fragment_en =
4577 HCLGE_RSS_INPUT_TUPLE_OTHER;
4579 vport[i].rss_algo = rss_algo;
4581 memcpy(vport[i].rss_hash_key, hclge_hash_key,
4582 HCLGE_RSS_KEY_SIZE);
4585 hclge_rss_indir_init_cfg(hdev);
4588 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4589 int vector_id, bool en,
4590 struct hnae3_ring_chain_node *ring_chain)
4592 struct hclge_dev *hdev = vport->back;
4593 struct hnae3_ring_chain_node *node;
4594 struct hclge_desc desc;
4595 struct hclge_ctrl_vector_chain_cmd *req =
4596 (struct hclge_ctrl_vector_chain_cmd *)desc.data;
4597 enum hclge_cmd_status status;
4598 enum hclge_opcode_type op;
4599 u16 tqp_type_and_id;
4602 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4603 hclge_cmd_setup_basic_desc(&desc, op, false);
4604 req->int_vector_id = vector_id;
4607 for (node = ring_chain; node; node = node->next) {
4608 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4609 hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
4611 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4612 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4613 HCLGE_TQP_ID_S, node->tqp_index);
4614 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4616 hnae3_get_field(node->int_gl_idx,
4617 HNAE3_RING_GL_IDX_M,
4618 HNAE3_RING_GL_IDX_S));
4619 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4620 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4621 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4622 req->vfid = vport->vport_id;
4624 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4626 dev_err(&hdev->pdev->dev,
4627 "Map TQP fail, status is %d.\n",
4633 hclge_cmd_setup_basic_desc(&desc,
4636 req->int_vector_id = vector_id;
4641 req->int_cause_num = i;
4642 req->vfid = vport->vport_id;
4643 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4645 dev_err(&hdev->pdev->dev,
4646 "Map TQP fail, status is %d.\n", status);
4654 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
4655 struct hnae3_ring_chain_node *ring_chain)
4657 struct hclge_vport *vport = hclge_get_vport(handle);
4658 struct hclge_dev *hdev = vport->back;
4661 vector_id = hclge_get_vector_index(hdev, vector);
4662 if (vector_id < 0) {
4663 dev_err(&hdev->pdev->dev,
4664 "failed to get vector index. vector=%d\n", vector);
4668 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4671 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
4672 struct hnae3_ring_chain_node *ring_chain)
4674 struct hclge_vport *vport = hclge_get_vport(handle);
4675 struct hclge_dev *hdev = vport->back;
4678 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4681 vector_id = hclge_get_vector_index(hdev, vector);
4682 if (vector_id < 0) {
4683 dev_err(&handle->pdev->dev,
4684 "Get vector index fail. ret =%d\n", vector_id);
4688 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4690 dev_err(&handle->pdev->dev,
4691 "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4697 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
4698 struct hclge_promisc_param *param)
4700 struct hclge_promisc_cfg_cmd *req;
4701 struct hclge_desc desc;
4704 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4706 req = (struct hclge_promisc_cfg_cmd *)desc.data;
4707 req->vf_id = param->vf_id;
4709 /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
4710 * pdev revision(0x20), new revision support them. The
4711 * value of this two fields will not return error when driver
4712 * send command to fireware in revision(0x20).
4714 req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
4715 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
4717 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4719 dev_err(&hdev->pdev->dev,
4720 "Set promisc mode fail, status is %d.\n", ret);
4725 static void hclge_promisc_param_init(struct hclge_promisc_param *param,
4726 bool en_uc, bool en_mc, bool en_bc,
4732 memset(param, 0, sizeof(struct hclge_promisc_param));
4734 param->enable = HCLGE_PROMISC_EN_UC;
4736 param->enable |= HCLGE_PROMISC_EN_MC;
4738 param->enable |= HCLGE_PROMISC_EN_BC;
4739 param->vf_id = vport_id;
4742 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
4743 bool en_mc_pmc, bool en_bc_pmc)
4745 struct hclge_dev *hdev = vport->back;
4746 struct hclge_promisc_param param;
4748 hclge_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, en_bc_pmc,
4750 return hclge_cmd_set_promisc_mode(hdev, ¶m);
4753 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4756 struct hclge_vport *vport = hclge_get_vport(handle);
4757 bool en_bc_pmc = true;
4759 /* For revision 0x20, if broadcast promisc enabled, vlan filter is
4760 * always bypassed. So broadcast promisc should be disabled until
4761 * user enable promisc mode
4763 if (handle->pdev->revision == 0x20)
4764 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4766 return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
4770 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4772 struct hclge_get_fd_mode_cmd *req;
4773 struct hclge_desc desc;
4776 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4778 req = (struct hclge_get_fd_mode_cmd *)desc.data;
4780 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4782 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4786 *fd_mode = req->mode;
4791 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4792 u32 *stage1_entry_num,
4793 u32 *stage2_entry_num,
4794 u16 *stage1_counter_num,
4795 u16 *stage2_counter_num)
4797 struct hclge_get_fd_allocation_cmd *req;
4798 struct hclge_desc desc;
4801 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4803 req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4805 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4807 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4812 *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4813 *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4814 *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4815 *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4820 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
4822 struct hclge_set_fd_key_config_cmd *req;
4823 struct hclge_fd_key_cfg *stage;
4824 struct hclge_desc desc;
4827 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4829 req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4830 stage = &hdev->fd_cfg.key_cfg[stage_num];
4831 req->stage = stage_num;
4832 req->key_select = stage->key_sel;
4833 req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4834 req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4835 req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4836 req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4837 req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4838 req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4840 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4842 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4847 static int hclge_init_fd_config(struct hclge_dev *hdev)
4849 #define LOW_2_WORDS 0x03
4850 struct hclge_fd_key_cfg *key_cfg;
4853 if (!hnae3_dev_fd_supported(hdev))
4856 ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4860 switch (hdev->fd_cfg.fd_mode) {
4861 case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4862 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4864 case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4865 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4868 dev_err(&hdev->pdev->dev,
4869 "Unsupported flow director mode %u\n",
4870 hdev->fd_cfg.fd_mode);
4874 hdev->fd_cfg.proto_support =
4875 TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
4876 UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
4877 key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4878 key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4879 key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4880 key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4881 key_cfg->outer_sipv6_word_en = 0;
4882 key_cfg->outer_dipv6_word_en = 0;
4884 key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4885 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4886 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4887 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4889 /* If use max 400bit key, we can support tuples for ether type */
4890 if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
4891 hdev->fd_cfg.proto_support |= ETHER_FLOW;
4892 key_cfg->tuple_active |=
4893 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
4896 /* roce_type is used to filter roce frames
4897 * dst_vport is used to specify the rule
4899 key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4901 ret = hclge_get_fd_allocation(hdev,
4902 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4903 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4904 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4905 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4909 return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4912 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4913 int loc, u8 *key, bool is_add)
4915 struct hclge_fd_tcam_config_1_cmd *req1;
4916 struct hclge_fd_tcam_config_2_cmd *req2;
4917 struct hclge_fd_tcam_config_3_cmd *req3;
4918 struct hclge_desc desc[3];
4921 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4922 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4923 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4924 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4925 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4927 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4928 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4929 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4931 req1->stage = stage;
4932 req1->xy_sel = sel_x ? 1 : 0;
4933 hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4934 req1->index = cpu_to_le32(loc);
4935 req1->entry_vld = sel_x ? is_add : 0;
4938 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4939 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4940 sizeof(req2->tcam_data));
4941 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4942 sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4945 ret = hclge_cmd_send(&hdev->hw, desc, 3);
4947 dev_err(&hdev->pdev->dev,
4948 "config tcam key fail, ret=%d\n",
4954 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4955 struct hclge_fd_ad_data *action)
4957 struct hclge_fd_ad_config_cmd *req;
4958 struct hclge_desc desc;
4962 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
4964 req = (struct hclge_fd_ad_config_cmd *)desc.data;
4965 req->index = cpu_to_le32(loc);
4968 hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
4969 action->write_rule_id_to_bd);
4970 hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
4973 hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
4974 hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
4975 action->forward_to_direct_queue);
4976 hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
4978 hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
4979 hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
4980 HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
4981 hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
4982 hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
4983 action->counter_id);
4985 req->ad_data = cpu_to_le64(ad_data);
4986 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4988 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
4993 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
4994 struct hclge_fd_rule *rule)
4996 u16 tmp_x_s, tmp_y_s;
4997 u32 tmp_x_l, tmp_y_l;
5000 if (rule->unused_tuple & tuple_bit)
5003 switch (tuple_bit) {
5006 case BIT(INNER_DST_MAC):
5007 for (i = 0; i < ETH_ALEN; i++) {
5008 calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5009 rule->tuples_mask.dst_mac[i]);
5010 calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5011 rule->tuples_mask.dst_mac[i]);
5015 case BIT(INNER_SRC_MAC):
5016 for (i = 0; i < ETH_ALEN; i++) {
5017 calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5018 rule->tuples.src_mac[i]);
5019 calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5020 rule->tuples.src_mac[i]);
5024 case BIT(INNER_VLAN_TAG_FST):
5025 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
5026 rule->tuples_mask.vlan_tag1);
5027 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
5028 rule->tuples_mask.vlan_tag1);
5029 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5030 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5033 case BIT(INNER_ETH_TYPE):
5034 calc_x(tmp_x_s, rule->tuples.ether_proto,
5035 rule->tuples_mask.ether_proto);
5036 calc_y(tmp_y_s, rule->tuples.ether_proto,
5037 rule->tuples_mask.ether_proto);
5038 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5039 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5042 case BIT(INNER_IP_TOS):
5043 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5044 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5047 case BIT(INNER_IP_PROTO):
5048 calc_x(*key_x, rule->tuples.ip_proto,
5049 rule->tuples_mask.ip_proto);
5050 calc_y(*key_y, rule->tuples.ip_proto,
5051 rule->tuples_mask.ip_proto);
5054 case BIT(INNER_SRC_IP):
5055 calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
5056 rule->tuples_mask.src_ip[IPV4_INDEX]);
5057 calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
5058 rule->tuples_mask.src_ip[IPV4_INDEX]);
5059 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5060 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5063 case BIT(INNER_DST_IP):
5064 calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
5065 rule->tuples_mask.dst_ip[IPV4_INDEX]);
5066 calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
5067 rule->tuples_mask.dst_ip[IPV4_INDEX]);
5068 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5069 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5072 case BIT(INNER_SRC_PORT):
5073 calc_x(tmp_x_s, rule->tuples.src_port,
5074 rule->tuples_mask.src_port);
5075 calc_y(tmp_y_s, rule->tuples.src_port,
5076 rule->tuples_mask.src_port);
5077 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5078 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5081 case BIT(INNER_DST_PORT):
5082 calc_x(tmp_x_s, rule->tuples.dst_port,
5083 rule->tuples_mask.dst_port);
5084 calc_y(tmp_y_s, rule->tuples.dst_port,
5085 rule->tuples_mask.dst_port);
5086 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5087 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5095 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5096 u8 vf_id, u8 network_port_id)
5098 u32 port_number = 0;
5100 if (port_type == HOST_PORT) {
5101 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5103 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5105 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5107 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5108 HCLGE_NETWORK_PORT_ID_S, network_port_id);
5109 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5115 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5116 __le32 *key_x, __le32 *key_y,
5117 struct hclge_fd_rule *rule)
5119 u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5120 u8 cur_pos = 0, tuple_size, shift_bits;
5123 for (i = 0; i < MAX_META_DATA; i++) {
5124 tuple_size = meta_data_key_info[i].key_length;
5125 tuple_bit = key_cfg->meta_data_active & BIT(i);
5127 switch (tuple_bit) {
5128 case BIT(ROCE_TYPE):
5129 hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5130 cur_pos += tuple_size;
5132 case BIT(DST_VPORT):
5133 port_number = hclge_get_port_number(HOST_PORT, 0,
5135 hnae3_set_field(meta_data,
5136 GENMASK(cur_pos + tuple_size, cur_pos),
5137 cur_pos, port_number);
5138 cur_pos += tuple_size;
5145 calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5146 calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5147 shift_bits = sizeof(meta_data) * 8 - cur_pos;
5149 *key_x = cpu_to_le32(tmp_x << shift_bits);
5150 *key_y = cpu_to_le32(tmp_y << shift_bits);
5153 /* A complete key is combined with meta data key and tuple key.
5154 * Meta data key is stored at the MSB region, and tuple key is stored at
5155 * the LSB region, unused bits will be filled 0.
5157 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5158 struct hclge_fd_rule *rule)
5160 struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5161 u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5162 u8 *cur_key_x, *cur_key_y;
5164 int ret, tuple_size;
5165 u8 meta_data_region;
5167 memset(key_x, 0, sizeof(key_x));
5168 memset(key_y, 0, sizeof(key_y));
5172 for (i = 0 ; i < MAX_TUPLE; i++) {
5176 tuple_size = tuple_key_info[i].key_length / 8;
5177 check_tuple = key_cfg->tuple_active & BIT(i);
5179 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
5182 cur_key_x += tuple_size;
5183 cur_key_y += tuple_size;
5187 meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5188 MAX_META_DATA_LENGTH / 8;
5190 hclge_fd_convert_meta_data(key_cfg,
5191 (__le32 *)(key_x + meta_data_region),
5192 (__le32 *)(key_y + meta_data_region),
5195 ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5198 dev_err(&hdev->pdev->dev,
5199 "fd key_y config fail, loc=%u, ret=%d\n",
5200 rule->queue_id, ret);
5204 ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5207 dev_err(&hdev->pdev->dev,
5208 "fd key_x config fail, loc=%u, ret=%d\n",
5209 rule->queue_id, ret);
5213 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5214 struct hclge_fd_rule *rule)
5216 struct hclge_fd_ad_data ad_data;
5218 ad_data.ad_id = rule->location;
5220 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5221 ad_data.drop_packet = true;
5222 ad_data.forward_to_direct_queue = false;
5223 ad_data.queue_id = 0;
5225 ad_data.drop_packet = false;
5226 ad_data.forward_to_direct_queue = true;
5227 ad_data.queue_id = rule->queue_id;
5230 ad_data.use_counter = false;
5231 ad_data.counter_id = 0;
5233 ad_data.use_next_stage = false;
5234 ad_data.next_input_key = 0;
5236 ad_data.write_rule_id_to_bd = true;
5237 ad_data.rule_id = rule->location;
5239 return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
5242 static int hclge_fd_check_spec(struct hclge_dev *hdev,
5243 struct ethtool_rx_flow_spec *fs, u32 *unused)
5245 struct ethtool_tcpip4_spec *tcp_ip4_spec;
5246 struct ethtool_usrip4_spec *usr_ip4_spec;
5247 struct ethtool_tcpip6_spec *tcp_ip6_spec;
5248 struct ethtool_usrip6_spec *usr_ip6_spec;
5249 struct ethhdr *ether_spec;
5251 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5254 if (!(fs->flow_type & hdev->fd_cfg.proto_support))
5257 if ((fs->flow_type & FLOW_EXT) &&
5258 (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
5259 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
5263 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5267 tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
5268 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5270 if (!tcp_ip4_spec->ip4src)
5271 *unused |= BIT(INNER_SRC_IP);
5273 if (!tcp_ip4_spec->ip4dst)
5274 *unused |= BIT(INNER_DST_IP);
5276 if (!tcp_ip4_spec->psrc)
5277 *unused |= BIT(INNER_SRC_PORT);
5279 if (!tcp_ip4_spec->pdst)
5280 *unused |= BIT(INNER_DST_PORT);
5282 if (!tcp_ip4_spec->tos)
5283 *unused |= BIT(INNER_IP_TOS);
5287 usr_ip4_spec = &fs->h_u.usr_ip4_spec;
5288 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5289 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5291 if (!usr_ip4_spec->ip4src)
5292 *unused |= BIT(INNER_SRC_IP);
5294 if (!usr_ip4_spec->ip4dst)
5295 *unused |= BIT(INNER_DST_IP);
5297 if (!usr_ip4_spec->tos)
5298 *unused |= BIT(INNER_IP_TOS);
5300 if (!usr_ip4_spec->proto)
5301 *unused |= BIT(INNER_IP_PROTO);
5303 if (usr_ip4_spec->l4_4_bytes)
5306 if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
5313 tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
5314 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5317 /* check whether src/dst ip address used */
5318 if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
5319 !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
5320 *unused |= BIT(INNER_SRC_IP);
5322 if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
5323 !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
5324 *unused |= BIT(INNER_DST_IP);
5326 if (!tcp_ip6_spec->psrc)
5327 *unused |= BIT(INNER_SRC_PORT);
5329 if (!tcp_ip6_spec->pdst)
5330 *unused |= BIT(INNER_DST_PORT);
5332 if (tcp_ip6_spec->tclass)
5336 case IPV6_USER_FLOW:
5337 usr_ip6_spec = &fs->h_u.usr_ip6_spec;
5338 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5339 BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
5340 BIT(INNER_DST_PORT);
5342 /* check whether src/dst ip address used */
5343 if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
5344 !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
5345 *unused |= BIT(INNER_SRC_IP);
5347 if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
5348 !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
5349 *unused |= BIT(INNER_DST_IP);
5351 if (!usr_ip6_spec->l4_proto)
5352 *unused |= BIT(INNER_IP_PROTO);
5354 if (usr_ip6_spec->tclass)
5357 if (usr_ip6_spec->l4_4_bytes)
5362 ether_spec = &fs->h_u.ether_spec;
5363 *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5364 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
5365 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
5367 if (is_zero_ether_addr(ether_spec->h_source))
5368 *unused |= BIT(INNER_SRC_MAC);
5370 if (is_zero_ether_addr(ether_spec->h_dest))
5371 *unused |= BIT(INNER_DST_MAC);
5373 if (!ether_spec->h_proto)
5374 *unused |= BIT(INNER_ETH_TYPE);
5381 if ((fs->flow_type & FLOW_EXT)) {
5382 if (fs->h_ext.vlan_etype)
5384 if (!fs->h_ext.vlan_tci)
5385 *unused |= BIT(INNER_VLAN_TAG_FST);
5387 if (fs->m_ext.vlan_tci) {
5388 if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
5392 *unused |= BIT(INNER_VLAN_TAG_FST);
5395 if (fs->flow_type & FLOW_MAC_EXT) {
5396 if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
5399 if (is_zero_ether_addr(fs->h_ext.h_dest))
5400 *unused |= BIT(INNER_DST_MAC);
5402 *unused &= ~(BIT(INNER_DST_MAC));
5408 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
5410 struct hclge_fd_rule *rule = NULL;
5411 struct hlist_node *node2;
5413 spin_lock_bh(&hdev->fd_rule_lock);
5414 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5415 if (rule->location >= location)
5419 spin_unlock_bh(&hdev->fd_rule_lock);
5421 return rule && rule->location == location;
5424 /* make sure being called after lock up with fd_rule_lock */
5425 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
5426 struct hclge_fd_rule *new_rule,
5430 struct hclge_fd_rule *rule = NULL, *parent = NULL;
5431 struct hlist_node *node2;
5433 if (is_add && !new_rule)
5436 hlist_for_each_entry_safe(rule, node2,
5437 &hdev->fd_rule_list, rule_node) {
5438 if (rule->location >= location)
5443 if (rule && rule->location == location) {
5444 hlist_del(&rule->rule_node);
5446 hdev->hclge_fd_rule_num--;
5449 if (!hdev->hclge_fd_rule_num)
5450 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5451 clear_bit(location, hdev->fd_bmap);
5455 } else if (!is_add) {
5456 dev_err(&hdev->pdev->dev,
5457 "delete fail, rule %u is inexistent\n",
5462 INIT_HLIST_NODE(&new_rule->rule_node);
5465 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
5467 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
5469 set_bit(location, hdev->fd_bmap);
5470 hdev->hclge_fd_rule_num++;
5471 hdev->fd_active_type = new_rule->rule_type;
5476 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
5477 struct ethtool_rx_flow_spec *fs,
5478 struct hclge_fd_rule *rule)
5480 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5482 switch (flow_type) {
5486 rule->tuples.src_ip[IPV4_INDEX] =
5487 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
5488 rule->tuples_mask.src_ip[IPV4_INDEX] =
5489 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
5491 rule->tuples.dst_ip[IPV4_INDEX] =
5492 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
5493 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5494 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5496 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5497 rule->tuples_mask.src_port =
5498 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5500 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5501 rule->tuples_mask.dst_port =
5502 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5504 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5505 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5507 rule->tuples.ether_proto = ETH_P_IP;
5508 rule->tuples_mask.ether_proto = 0xFFFF;
5512 rule->tuples.src_ip[IPV4_INDEX] =
5513 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5514 rule->tuples_mask.src_ip[IPV4_INDEX] =
5515 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5517 rule->tuples.dst_ip[IPV4_INDEX] =
5518 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5519 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5520 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5522 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5523 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5525 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5526 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5528 rule->tuples.ether_proto = ETH_P_IP;
5529 rule->tuples_mask.ether_proto = 0xFFFF;
5535 be32_to_cpu_array(rule->tuples.src_ip,
5536 fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5537 be32_to_cpu_array(rule->tuples_mask.src_ip,
5538 fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5540 be32_to_cpu_array(rule->tuples.dst_ip,
5541 fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5542 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5543 fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5545 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5546 rule->tuples_mask.src_port =
5547 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5549 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5550 rule->tuples_mask.dst_port =
5551 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5553 rule->tuples.ether_proto = ETH_P_IPV6;
5554 rule->tuples_mask.ether_proto = 0xFFFF;
5557 case IPV6_USER_FLOW:
5558 be32_to_cpu_array(rule->tuples.src_ip,
5559 fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5560 be32_to_cpu_array(rule->tuples_mask.src_ip,
5561 fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5563 be32_to_cpu_array(rule->tuples.dst_ip,
5564 fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5565 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5566 fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5568 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5569 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5571 rule->tuples.ether_proto = ETH_P_IPV6;
5572 rule->tuples_mask.ether_proto = 0xFFFF;
5576 ether_addr_copy(rule->tuples.src_mac,
5577 fs->h_u.ether_spec.h_source);
5578 ether_addr_copy(rule->tuples_mask.src_mac,
5579 fs->m_u.ether_spec.h_source);
5581 ether_addr_copy(rule->tuples.dst_mac,
5582 fs->h_u.ether_spec.h_dest);
5583 ether_addr_copy(rule->tuples_mask.dst_mac,
5584 fs->m_u.ether_spec.h_dest);
5586 rule->tuples.ether_proto =
5587 be16_to_cpu(fs->h_u.ether_spec.h_proto);
5588 rule->tuples_mask.ether_proto =
5589 be16_to_cpu(fs->m_u.ether_spec.h_proto);
5596 switch (flow_type) {
5599 rule->tuples.ip_proto = IPPROTO_SCTP;
5600 rule->tuples_mask.ip_proto = 0xFF;
5604 rule->tuples.ip_proto = IPPROTO_TCP;
5605 rule->tuples_mask.ip_proto = 0xFF;
5609 rule->tuples.ip_proto = IPPROTO_UDP;
5610 rule->tuples_mask.ip_proto = 0xFF;
5616 if ((fs->flow_type & FLOW_EXT)) {
5617 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5618 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5621 if (fs->flow_type & FLOW_MAC_EXT) {
5622 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5623 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5629 /* make sure being called after lock up with fd_rule_lock */
5630 static int hclge_fd_config_rule(struct hclge_dev *hdev,
5631 struct hclge_fd_rule *rule)
5636 dev_err(&hdev->pdev->dev,
5637 "The flow director rule is NULL\n");
5641 /* it will never fail here, so needn't to check return value */
5642 hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5644 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5648 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5655 hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5659 static int hclge_add_fd_entry(struct hnae3_handle *handle,
5660 struct ethtool_rxnfc *cmd)
5662 struct hclge_vport *vport = hclge_get_vport(handle);
5663 struct hclge_dev *hdev = vport->back;
5664 u16 dst_vport_id = 0, q_index = 0;
5665 struct ethtool_rx_flow_spec *fs;
5666 struct hclge_fd_rule *rule;
5671 if (!hnae3_dev_fd_supported(hdev))
5675 dev_warn(&hdev->pdev->dev,
5676 "Please enable flow director first\n");
5680 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5682 ret = hclge_fd_check_spec(hdev, fs, &unused);
5684 dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
5688 if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5689 action = HCLGE_FD_ACTION_DROP_PACKET;
5691 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5692 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5695 if (vf > hdev->num_req_vfs) {
5696 dev_err(&hdev->pdev->dev,
5697 "Error: vf id (%u) > max vf num (%u)\n",
5698 vf, hdev->num_req_vfs);
5702 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
5703 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
5706 dev_err(&hdev->pdev->dev,
5707 "Error: queue id (%u) > max tqp num (%u)\n",
5712 action = HCLGE_FD_ACTION_ACCEPT_PACKET;
5716 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5720 ret = hclge_fd_get_tuple(hdev, fs, rule);
5726 rule->flow_type = fs->flow_type;
5728 rule->location = fs->location;
5729 rule->unused_tuple = unused;
5730 rule->vf_id = dst_vport_id;
5731 rule->queue_id = q_index;
5732 rule->action = action;
5733 rule->rule_type = HCLGE_FD_EP_ACTIVE;
5735 /* to avoid rule conflict, when user configure rule by ethtool,
5736 * we need to clear all arfs rules
5738 hclge_clear_arfs_rules(handle);
5740 spin_lock_bh(&hdev->fd_rule_lock);
5741 ret = hclge_fd_config_rule(hdev, rule);
5743 spin_unlock_bh(&hdev->fd_rule_lock);
5748 static int hclge_del_fd_entry(struct hnae3_handle *handle,
5749 struct ethtool_rxnfc *cmd)
5751 struct hclge_vport *vport = hclge_get_vport(handle);
5752 struct hclge_dev *hdev = vport->back;
5753 struct ethtool_rx_flow_spec *fs;
5756 if (!hnae3_dev_fd_supported(hdev))
5759 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5761 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5764 if (!hclge_fd_rule_exist(hdev, fs->location)) {
5765 dev_err(&hdev->pdev->dev,
5766 "Delete fail, rule %u is inexistent\n", fs->location);
5770 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
5775 spin_lock_bh(&hdev->fd_rule_lock);
5776 ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
5778 spin_unlock_bh(&hdev->fd_rule_lock);
5783 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
5786 struct hclge_vport *vport = hclge_get_vport(handle);
5787 struct hclge_dev *hdev = vport->back;
5788 struct hclge_fd_rule *rule;
5789 struct hlist_node *node;
5792 if (!hnae3_dev_fd_supported(hdev))
5795 spin_lock_bh(&hdev->fd_rule_lock);
5796 for_each_set_bit(location, hdev->fd_bmap,
5797 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5798 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
5802 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
5804 hlist_del(&rule->rule_node);
5807 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5808 hdev->hclge_fd_rule_num = 0;
5809 bitmap_zero(hdev->fd_bmap,
5810 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
5813 spin_unlock_bh(&hdev->fd_rule_lock);
5816 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
5818 struct hclge_vport *vport = hclge_get_vport(handle);
5819 struct hclge_dev *hdev = vport->back;
5820 struct hclge_fd_rule *rule;
5821 struct hlist_node *node;
5824 /* Return ok here, because reset error handling will check this
5825 * return value. If error is returned here, the reset process will
5828 if (!hnae3_dev_fd_supported(hdev))
5831 /* if fd is disabled, should not restore it when reset */
5835 spin_lock_bh(&hdev->fd_rule_lock);
5836 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5837 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5839 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5842 dev_warn(&hdev->pdev->dev,
5843 "Restore rule %u failed, remove it\n",
5845 clear_bit(rule->location, hdev->fd_bmap);
5846 hlist_del(&rule->rule_node);
5848 hdev->hclge_fd_rule_num--;
5852 if (hdev->hclge_fd_rule_num)
5853 hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
5855 spin_unlock_bh(&hdev->fd_rule_lock);
5860 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
5861 struct ethtool_rxnfc *cmd)
5863 struct hclge_vport *vport = hclge_get_vport(handle);
5864 struct hclge_dev *hdev = vport->back;
5866 if (!hnae3_dev_fd_supported(hdev))
5869 cmd->rule_cnt = hdev->hclge_fd_rule_num;
5870 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5875 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
5876 struct ethtool_rxnfc *cmd)
5878 struct hclge_vport *vport = hclge_get_vport(handle);
5879 struct hclge_fd_rule *rule = NULL;
5880 struct hclge_dev *hdev = vport->back;
5881 struct ethtool_rx_flow_spec *fs;
5882 struct hlist_node *node2;
5884 if (!hnae3_dev_fd_supported(hdev))
5887 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5889 spin_lock_bh(&hdev->fd_rule_lock);
5891 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5892 if (rule->location >= fs->location)
5896 if (!rule || fs->location != rule->location) {
5897 spin_unlock_bh(&hdev->fd_rule_lock);
5902 fs->flow_type = rule->flow_type;
5903 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5907 fs->h_u.tcp_ip4_spec.ip4src =
5908 cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5909 fs->m_u.tcp_ip4_spec.ip4src =
5910 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5911 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5913 fs->h_u.tcp_ip4_spec.ip4dst =
5914 cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5915 fs->m_u.tcp_ip4_spec.ip4dst =
5916 rule->unused_tuple & BIT(INNER_DST_IP) ?
5917 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5919 fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5920 fs->m_u.tcp_ip4_spec.psrc =
5921 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5922 0 : cpu_to_be16(rule->tuples_mask.src_port);
5924 fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5925 fs->m_u.tcp_ip4_spec.pdst =
5926 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5927 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5929 fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
5930 fs->m_u.tcp_ip4_spec.tos =
5931 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5932 0 : rule->tuples_mask.ip_tos;
5936 fs->h_u.usr_ip4_spec.ip4src =
5937 cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5938 fs->m_u.tcp_ip4_spec.ip4src =
5939 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5940 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5942 fs->h_u.usr_ip4_spec.ip4dst =
5943 cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5944 fs->m_u.usr_ip4_spec.ip4dst =
5945 rule->unused_tuple & BIT(INNER_DST_IP) ?
5946 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5948 fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
5949 fs->m_u.usr_ip4_spec.tos =
5950 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5951 0 : rule->tuples_mask.ip_tos;
5953 fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
5954 fs->m_u.usr_ip4_spec.proto =
5955 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5956 0 : rule->tuples_mask.ip_proto;
5958 fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
5964 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
5965 rule->tuples.src_ip, IPV6_SIZE);
5966 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5967 memset(fs->m_u.tcp_ip6_spec.ip6src, 0,
5968 sizeof(int) * IPV6_SIZE);
5970 cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
5971 rule->tuples_mask.src_ip, IPV6_SIZE);
5973 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
5974 rule->tuples.dst_ip, IPV6_SIZE);
5975 if (rule->unused_tuple & BIT(INNER_DST_IP))
5976 memset(fs->m_u.tcp_ip6_spec.ip6dst, 0,
5977 sizeof(int) * IPV6_SIZE);
5979 cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
5980 rule->tuples_mask.dst_ip, IPV6_SIZE);
5982 fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5983 fs->m_u.tcp_ip6_spec.psrc =
5984 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5985 0 : cpu_to_be16(rule->tuples_mask.src_port);
5987 fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5988 fs->m_u.tcp_ip6_spec.pdst =
5989 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5990 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5993 case IPV6_USER_FLOW:
5994 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
5995 rule->tuples.src_ip, IPV6_SIZE);
5996 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5997 memset(fs->m_u.usr_ip6_spec.ip6src, 0,
5998 sizeof(int) * IPV6_SIZE);
6000 cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
6001 rule->tuples_mask.src_ip, IPV6_SIZE);
6003 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
6004 rule->tuples.dst_ip, IPV6_SIZE);
6005 if (rule->unused_tuple & BIT(INNER_DST_IP))
6006 memset(fs->m_u.usr_ip6_spec.ip6dst, 0,
6007 sizeof(int) * IPV6_SIZE);
6009 cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
6010 rule->tuples_mask.dst_ip, IPV6_SIZE);
6012 fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
6013 fs->m_u.usr_ip6_spec.l4_proto =
6014 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6015 0 : rule->tuples_mask.ip_proto;
6019 ether_addr_copy(fs->h_u.ether_spec.h_source,
6020 rule->tuples.src_mac);
6021 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6022 eth_zero_addr(fs->m_u.ether_spec.h_source);
6024 ether_addr_copy(fs->m_u.ether_spec.h_source,
6025 rule->tuples_mask.src_mac);
6027 ether_addr_copy(fs->h_u.ether_spec.h_dest,
6028 rule->tuples.dst_mac);
6029 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6030 eth_zero_addr(fs->m_u.ether_spec.h_dest);
6032 ether_addr_copy(fs->m_u.ether_spec.h_dest,
6033 rule->tuples_mask.dst_mac);
6035 fs->h_u.ether_spec.h_proto =
6036 cpu_to_be16(rule->tuples.ether_proto);
6037 fs->m_u.ether_spec.h_proto =
6038 rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6039 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6043 spin_unlock_bh(&hdev->fd_rule_lock);
6047 if (fs->flow_type & FLOW_EXT) {
6048 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
6049 fs->m_ext.vlan_tci =
6050 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
6051 cpu_to_be16(VLAN_VID_MASK) :
6052 cpu_to_be16(rule->tuples_mask.vlan_tag1);
6055 if (fs->flow_type & FLOW_MAC_EXT) {
6056 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
6057 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6058 eth_zero_addr(fs->m_u.ether_spec.h_dest);
6060 ether_addr_copy(fs->m_u.ether_spec.h_dest,
6061 rule->tuples_mask.dst_mac);
6064 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
6065 fs->ring_cookie = RX_CLS_FLOW_DISC;
6069 fs->ring_cookie = rule->queue_id;
6070 vf_id = rule->vf_id;
6071 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
6072 fs->ring_cookie |= vf_id;
6075 spin_unlock_bh(&hdev->fd_rule_lock);
6080 static int hclge_get_all_rules(struct hnae3_handle *handle,
6081 struct ethtool_rxnfc *cmd, u32 *rule_locs)
6083 struct hclge_vport *vport = hclge_get_vport(handle);
6084 struct hclge_dev *hdev = vport->back;
6085 struct hclge_fd_rule *rule;
6086 struct hlist_node *node2;
6089 if (!hnae3_dev_fd_supported(hdev))
6092 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6094 spin_lock_bh(&hdev->fd_rule_lock);
6095 hlist_for_each_entry_safe(rule, node2,
6096 &hdev->fd_rule_list, rule_node) {
6097 if (cnt == cmd->rule_cnt) {
6098 spin_unlock_bh(&hdev->fd_rule_lock);
6102 rule_locs[cnt] = rule->location;
6106 spin_unlock_bh(&hdev->fd_rule_lock);
6108 cmd->rule_cnt = cnt;
6113 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
6114 struct hclge_fd_rule_tuples *tuples)
6116 tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
6117 tuples->ip_proto = fkeys->basic.ip_proto;
6118 tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
6120 if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
6121 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
6122 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
6124 memcpy(tuples->src_ip,
6125 fkeys->addrs.v6addrs.src.in6_u.u6_addr32,
6126 sizeof(tuples->src_ip));
6127 memcpy(tuples->dst_ip,
6128 fkeys->addrs.v6addrs.dst.in6_u.u6_addr32,
6129 sizeof(tuples->dst_ip));
6133 /* traverse all rules, check whether an existed rule has the same tuples */
6134 static struct hclge_fd_rule *
6135 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
6136 const struct hclge_fd_rule_tuples *tuples)
6138 struct hclge_fd_rule *rule = NULL;
6139 struct hlist_node *node;
6141 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6142 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
6149 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
6150 struct hclge_fd_rule *rule)
6152 rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6153 BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
6154 BIT(INNER_SRC_PORT);
6157 rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
6158 if (tuples->ether_proto == ETH_P_IP) {
6159 if (tuples->ip_proto == IPPROTO_TCP)
6160 rule->flow_type = TCP_V4_FLOW;
6162 rule->flow_type = UDP_V4_FLOW;
6164 if (tuples->ip_proto == IPPROTO_TCP)
6165 rule->flow_type = TCP_V6_FLOW;
6167 rule->flow_type = UDP_V6_FLOW;
6169 memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
6170 memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
6173 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
6174 u16 flow_id, struct flow_keys *fkeys)
6176 struct hclge_vport *vport = hclge_get_vport(handle);
6177 struct hclge_fd_rule_tuples new_tuples;
6178 struct hclge_dev *hdev = vport->back;
6179 struct hclge_fd_rule *rule;
6184 if (!hnae3_dev_fd_supported(hdev))
6187 memset(&new_tuples, 0, sizeof(new_tuples));
6188 hclge_fd_get_flow_tuples(fkeys, &new_tuples);
6190 spin_lock_bh(&hdev->fd_rule_lock);
6192 /* when there is already fd rule existed add by user,
6193 * arfs should not work
6195 if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
6196 spin_unlock_bh(&hdev->fd_rule_lock);
6201 /* check is there flow director filter existed for this flow,
6202 * if not, create a new filter for it;
6203 * if filter exist with different queue id, modify the filter;
6204 * if filter exist with same queue id, do nothing
6206 rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
6208 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
6209 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6210 spin_unlock_bh(&hdev->fd_rule_lock);
6215 rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
6217 spin_unlock_bh(&hdev->fd_rule_lock);
6222 set_bit(bit_id, hdev->fd_bmap);
6223 rule->location = bit_id;
6224 rule->flow_id = flow_id;
6225 rule->queue_id = queue_id;
6226 hclge_fd_build_arfs_rule(&new_tuples, rule);
6227 ret = hclge_fd_config_rule(hdev, rule);
6229 spin_unlock_bh(&hdev->fd_rule_lock);
6234 return rule->location;
6237 spin_unlock_bh(&hdev->fd_rule_lock);
6239 if (rule->queue_id == queue_id)
6240 return rule->location;
6242 tmp_queue_id = rule->queue_id;
6243 rule->queue_id = queue_id;
6244 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6246 rule->queue_id = tmp_queue_id;
6250 return rule->location;
6253 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
6255 #ifdef CONFIG_RFS_ACCEL
6256 struct hnae3_handle *handle = &hdev->vport[0].nic;
6257 struct hclge_fd_rule *rule;
6258 struct hlist_node *node;
6259 HLIST_HEAD(del_list);
6261 spin_lock_bh(&hdev->fd_rule_lock);
6262 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
6263 spin_unlock_bh(&hdev->fd_rule_lock);
6266 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6267 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
6268 rule->flow_id, rule->location)) {
6269 hlist_del_init(&rule->rule_node);
6270 hlist_add_head(&rule->rule_node, &del_list);
6271 hdev->hclge_fd_rule_num--;
6272 clear_bit(rule->location, hdev->fd_bmap);
6275 spin_unlock_bh(&hdev->fd_rule_lock);
6277 hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
6278 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
6279 rule->location, NULL, false);
6285 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
6287 #ifdef CONFIG_RFS_ACCEL
6288 struct hclge_vport *vport = hclge_get_vport(handle);
6289 struct hclge_dev *hdev = vport->back;
6291 if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
6292 hclge_del_all_fd_entries(handle, true);
6296 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
6298 struct hclge_vport *vport = hclge_get_vport(handle);
6299 struct hclge_dev *hdev = vport->back;
6301 return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
6302 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
6305 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
6307 struct hclge_vport *vport = hclge_get_vport(handle);
6308 struct hclge_dev *hdev = vport->back;
6310 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
6313 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
6315 struct hclge_vport *vport = hclge_get_vport(handle);
6316 struct hclge_dev *hdev = vport->back;
6318 return hdev->rst_stats.hw_reset_done_cnt;
6321 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
6323 struct hclge_vport *vport = hclge_get_vport(handle);
6324 struct hclge_dev *hdev = vport->back;
6327 hdev->fd_en = enable;
6328 clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
6330 hclge_del_all_fd_entries(handle, clear);
6332 hclge_restore_fd_entries(handle);
6335 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
6337 struct hclge_desc desc;
6338 struct hclge_config_mac_mode_cmd *req =
6339 (struct hclge_config_mac_mode_cmd *)desc.data;
6343 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
6346 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
6347 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
6348 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
6349 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
6350 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
6351 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
6352 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
6353 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
6354 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
6355 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
6358 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6360 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6362 dev_err(&hdev->pdev->dev,
6363 "mac enable fail, ret =%d.\n", ret);
6366 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
6367 u8 switch_param, u8 param_mask)
6369 struct hclge_mac_vlan_switch_cmd *req;
6370 struct hclge_desc desc;
6374 func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
6375 req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
6377 /* read current config parameter */
6378 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
6380 req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
6381 req->func_id = cpu_to_le32(func_id);
6383 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6385 dev_err(&hdev->pdev->dev,
6386 "read mac vlan switch parameter fail, ret = %d\n", ret);
6390 /* modify and write new config parameter */
6391 hclge_cmd_reuse_desc(&desc, false);
6392 req->switch_param = (req->switch_param & param_mask) | switch_param;
6393 req->param_mask = param_mask;
6395 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6397 dev_err(&hdev->pdev->dev,
6398 "set mac vlan switch parameter fail, ret = %d\n", ret);
6402 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
6405 #define HCLGE_PHY_LINK_STATUS_NUM 200
6407 struct phy_device *phydev = hdev->hw.mac.phydev;
6412 ret = phy_read_status(phydev);
6414 dev_err(&hdev->pdev->dev,
6415 "phy update link status fail, ret = %d\n", ret);
6419 if (phydev->link == link_ret)
6422 msleep(HCLGE_LINK_STATUS_MS);
6423 } while (++i < HCLGE_PHY_LINK_STATUS_NUM);
6426 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
6428 #define HCLGE_MAC_LINK_STATUS_NUM 100
6434 ret = hclge_get_mac_link_status(hdev);
6437 else if (ret == link_ret)
6440 msleep(HCLGE_LINK_STATUS_MS);
6441 } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
6445 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
6448 #define HCLGE_LINK_STATUS_DOWN 0
6449 #define HCLGE_LINK_STATUS_UP 1
6453 link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
6456 hclge_phy_link_status_wait(hdev, link_ret);
6458 return hclge_mac_link_status_wait(hdev, link_ret);
6461 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
6463 struct hclge_config_mac_mode_cmd *req;
6464 struct hclge_desc desc;
6468 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
6469 /* 1 Read out the MAC mode config at first */
6470 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
6471 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6473 dev_err(&hdev->pdev->dev,
6474 "mac loopback get fail, ret =%d.\n", ret);
6478 /* 2 Then setup the loopback flag */
6479 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
6480 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
6481 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
6482 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
6484 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6486 /* 3 Config mac work mode with loopback flag
6487 * and its original configure parameters
6489 hclge_cmd_reuse_desc(&desc, false);
6490 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6492 dev_err(&hdev->pdev->dev,
6493 "mac loopback set fail, ret =%d.\n", ret);
6497 static int hclge_cfg_serdes_loopback(struct hclge_dev *hdev, bool en,
6498 enum hnae3_loop loop_mode)
6500 #define HCLGE_SERDES_RETRY_MS 10
6501 #define HCLGE_SERDES_RETRY_NUM 100
6503 struct hclge_serdes_lb_cmd *req;
6504 struct hclge_desc desc;
6508 req = (struct hclge_serdes_lb_cmd *)desc.data;
6509 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
6511 switch (loop_mode) {
6512 case HNAE3_LOOP_SERIAL_SERDES:
6513 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
6515 case HNAE3_LOOP_PARALLEL_SERDES:
6516 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
6519 dev_err(&hdev->pdev->dev,
6520 "unsupported serdes loopback mode %d\n", loop_mode);
6525 req->enable = loop_mode_b;
6526 req->mask = loop_mode_b;
6528 req->mask = loop_mode_b;
6531 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6533 dev_err(&hdev->pdev->dev,
6534 "serdes loopback set fail, ret = %d\n", ret);
6539 msleep(HCLGE_SERDES_RETRY_MS);
6540 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
6542 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6544 dev_err(&hdev->pdev->dev,
6545 "serdes loopback get, ret = %d\n", ret);
6548 } while (++i < HCLGE_SERDES_RETRY_NUM &&
6549 !(req->result & HCLGE_CMD_SERDES_DONE_B));
6551 if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
6552 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
6554 } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
6555 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
6561 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
6562 enum hnae3_loop loop_mode)
6566 ret = hclge_cfg_serdes_loopback(hdev, en, loop_mode);
6570 hclge_cfg_mac_mode(hdev, en);
6572 ret = hclge_mac_phy_link_status_wait(hdev, en, false);
6574 dev_err(&hdev->pdev->dev,
6575 "serdes loopback config mac mode timeout\n");
6580 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
6581 struct phy_device *phydev)
6585 if (!phydev->suspended) {
6586 ret = phy_suspend(phydev);
6591 ret = phy_resume(phydev);
6595 return phy_loopback(phydev, true);
6598 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
6599 struct phy_device *phydev)
6603 ret = phy_loopback(phydev, false);
6607 return phy_suspend(phydev);
6610 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
6612 struct phy_device *phydev = hdev->hw.mac.phydev;
6619 ret = hclge_enable_phy_loopback(hdev, phydev);
6621 ret = hclge_disable_phy_loopback(hdev, phydev);
6623 dev_err(&hdev->pdev->dev,
6624 "set phy loopback fail, ret = %d\n", ret);
6628 hclge_cfg_mac_mode(hdev, en);
6630 ret = hclge_mac_phy_link_status_wait(hdev, en, true);
6632 dev_err(&hdev->pdev->dev,
6633 "phy loopback config mac mode timeout\n");
6638 static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
6639 int stream_id, bool enable)
6641 struct hclge_desc desc;
6642 struct hclge_cfg_com_tqp_queue_cmd *req =
6643 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
6646 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
6647 req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
6648 req->stream_id = cpu_to_le16(stream_id);
6650 req->enable |= 1U << HCLGE_TQP_ENABLE_B;
6652 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6654 dev_err(&hdev->pdev->dev,
6655 "Tqp enable fail, status =%d.\n", ret);
6659 static int hclge_set_loopback(struct hnae3_handle *handle,
6660 enum hnae3_loop loop_mode, bool en)
6662 struct hclge_vport *vport = hclge_get_vport(handle);
6663 struct hnae3_knic_private_info *kinfo;
6664 struct hclge_dev *hdev = vport->back;
6667 /* Loopback can be enabled in three places: SSU, MAC, and serdes. By
6668 * default, SSU loopback is enabled, so if the SMAC and the DMAC are
6669 * the same, the packets are looped back in the SSU. If SSU loopback
6670 * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
6672 if (hdev->pdev->revision >= 0x21) {
6673 u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
6675 ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
6676 HCLGE_SWITCH_ALW_LPBK_MASK);
6681 switch (loop_mode) {
6682 case HNAE3_LOOP_APP:
6683 ret = hclge_set_app_loopback(hdev, en);
6685 case HNAE3_LOOP_SERIAL_SERDES:
6686 case HNAE3_LOOP_PARALLEL_SERDES:
6687 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
6689 case HNAE3_LOOP_PHY:
6690 ret = hclge_set_phy_loopback(hdev, en);
6694 dev_err(&hdev->pdev->dev,
6695 "loop_mode %d is not supported\n", loop_mode);
6702 kinfo = &vport->nic.kinfo;
6703 for (i = 0; i < kinfo->num_tqps; i++) {
6704 ret = hclge_tqp_enable(hdev, i, 0, en);
6712 static int hclge_set_default_loopback(struct hclge_dev *hdev)
6716 ret = hclge_set_app_loopback(hdev, false);
6720 ret = hclge_cfg_serdes_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
6724 return hclge_cfg_serdes_loopback(hdev, false,
6725 HNAE3_LOOP_PARALLEL_SERDES);
6728 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
6730 struct hclge_vport *vport = hclge_get_vport(handle);
6731 struct hnae3_knic_private_info *kinfo;
6732 struct hnae3_queue *queue;
6733 struct hclge_tqp *tqp;
6736 kinfo = &vport->nic.kinfo;
6737 for (i = 0; i < kinfo->num_tqps; i++) {
6738 queue = handle->kinfo.tqp[i];
6739 tqp = container_of(queue, struct hclge_tqp, q);
6740 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
6744 static void hclge_flush_link_update(struct hclge_dev *hdev)
6746 #define HCLGE_FLUSH_LINK_TIMEOUT 100000
6748 unsigned long last = hdev->serv_processed_cnt;
6751 while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
6752 i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
6753 last == hdev->serv_processed_cnt)
6757 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
6759 struct hclge_vport *vport = hclge_get_vport(handle);
6760 struct hclge_dev *hdev = vport->back;
6763 hclge_task_schedule(hdev, round_jiffies_relative(HZ));
6765 /* Set the DOWN flag here to disable link updating */
6766 set_bit(HCLGE_STATE_DOWN, &hdev->state);
6768 /* flush memory to make sure DOWN is seen by service task */
6769 smp_mb__before_atomic();
6770 hclge_flush_link_update(hdev);
6774 static int hclge_ae_start(struct hnae3_handle *handle)
6776 struct hclge_vport *vport = hclge_get_vport(handle);
6777 struct hclge_dev *hdev = vport->back;
6780 hclge_cfg_mac_mode(hdev, true);
6781 clear_bit(HCLGE_STATE_DOWN, &hdev->state);
6782 hdev->hw.mac.link = 0;
6784 /* reset tqp stats */
6785 hclge_reset_tqp_stats(handle);
6787 hclge_mac_start_phy(hdev);
6792 static void hclge_ae_stop(struct hnae3_handle *handle)
6794 struct hclge_vport *vport = hclge_get_vport(handle);
6795 struct hclge_dev *hdev = vport->back;
6798 set_bit(HCLGE_STATE_DOWN, &hdev->state);
6800 hclge_clear_arfs_rules(handle);
6802 /* If it is not PF reset, the firmware will disable the MAC,
6803 * so it only need to stop phy here.
6805 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
6806 hdev->reset_type != HNAE3_FUNC_RESET) {
6807 hclge_mac_stop_phy(hdev);
6808 hclge_update_link_status(hdev);
6812 for (i = 0; i < handle->kinfo.num_tqps; i++)
6813 hclge_reset_tqp(handle, i);
6815 hclge_config_mac_tnl_int(hdev, false);
6818 hclge_cfg_mac_mode(hdev, false);
6820 hclge_mac_stop_phy(hdev);
6822 /* reset tqp stats */
6823 hclge_reset_tqp_stats(handle);
6824 hclge_update_link_status(hdev);
6827 int hclge_vport_start(struct hclge_vport *vport)
6829 set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6830 vport->last_active_jiffies = jiffies;
6834 void hclge_vport_stop(struct hclge_vport *vport)
6836 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6839 static int hclge_client_start(struct hnae3_handle *handle)
6841 struct hclge_vport *vport = hclge_get_vport(handle);
6843 return hclge_vport_start(vport);
6846 static void hclge_client_stop(struct hnae3_handle *handle)
6848 struct hclge_vport *vport = hclge_get_vport(handle);
6850 hclge_vport_stop(vport);
6853 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
6854 u16 cmdq_resp, u8 resp_code,
6855 enum hclge_mac_vlan_tbl_opcode op)
6857 struct hclge_dev *hdev = vport->back;
6860 dev_err(&hdev->pdev->dev,
6861 "cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
6866 if (op == HCLGE_MAC_VLAN_ADD) {
6867 if ((!resp_code) || (resp_code == 1)) {
6869 } else if (resp_code == HCLGE_ADD_UC_OVERFLOW) {
6870 dev_err(&hdev->pdev->dev,
6871 "add mac addr failed for uc_overflow.\n");
6873 } else if (resp_code == HCLGE_ADD_MC_OVERFLOW) {
6874 dev_err(&hdev->pdev->dev,
6875 "add mac addr failed for mc_overflow.\n");
6879 dev_err(&hdev->pdev->dev,
6880 "add mac addr failed for undefined, code=%u.\n",
6883 } else if (op == HCLGE_MAC_VLAN_REMOVE) {
6886 } else if (resp_code == 1) {
6887 dev_dbg(&hdev->pdev->dev,
6888 "remove mac addr failed for miss.\n");
6892 dev_err(&hdev->pdev->dev,
6893 "remove mac addr failed for undefined, code=%u.\n",
6896 } else if (op == HCLGE_MAC_VLAN_LKUP) {
6899 } else if (resp_code == 1) {
6900 dev_dbg(&hdev->pdev->dev,
6901 "lookup mac addr failed for miss.\n");
6905 dev_err(&hdev->pdev->dev,
6906 "lookup mac addr failed for undefined, code=%u.\n",
6911 dev_err(&hdev->pdev->dev,
6912 "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
6917 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
6919 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
6921 unsigned int word_num;
6922 unsigned int bit_num;
6924 if (vfid > 255 || vfid < 0)
6927 if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
6928 word_num = vfid / 32;
6929 bit_num = vfid % 32;
6931 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6933 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
6935 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
6936 bit_num = vfid % 32;
6938 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6940 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
6946 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
6948 #define HCLGE_DESC_NUMBER 3
6949 #define HCLGE_FUNC_NUMBER_PER_DESC 6
6952 for (i = 1; i < HCLGE_DESC_NUMBER; i++)
6953 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
6954 if (desc[i].data[j])
6960 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
6961 const u8 *addr, bool is_mc)
6963 const unsigned char *mac_addr = addr;
6964 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
6965 (mac_addr[0]) | (mac_addr[1] << 8);
6966 u32 low_val = mac_addr[4] | (mac_addr[5] << 8);
6968 hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6970 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
6971 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6974 new_req->mac_addr_hi32 = cpu_to_le32(high_val);
6975 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
6978 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
6979 struct hclge_mac_vlan_tbl_entry_cmd *req)
6981 struct hclge_dev *hdev = vport->back;
6982 struct hclge_desc desc;
6987 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
6989 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6991 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6993 dev_err(&hdev->pdev->dev,
6994 "del mac addr failed for cmd_send, ret =%d.\n",
6998 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6999 retval = le16_to_cpu(desc.retval);
7001 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7002 HCLGE_MAC_VLAN_REMOVE);
7005 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
7006 struct hclge_mac_vlan_tbl_entry_cmd *req,
7007 struct hclge_desc *desc,
7010 struct hclge_dev *hdev = vport->back;
7015 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
7017 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7018 memcpy(desc[0].data,
7020 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7021 hclge_cmd_setup_basic_desc(&desc[1],
7022 HCLGE_OPC_MAC_VLAN_ADD,
7024 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7025 hclge_cmd_setup_basic_desc(&desc[2],
7026 HCLGE_OPC_MAC_VLAN_ADD,
7028 ret = hclge_cmd_send(&hdev->hw, desc, 3);
7030 memcpy(desc[0].data,
7032 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7033 ret = hclge_cmd_send(&hdev->hw, desc, 1);
7036 dev_err(&hdev->pdev->dev,
7037 "lookup mac addr failed for cmd_send, ret =%d.\n",
7041 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
7042 retval = le16_to_cpu(desc[0].retval);
7044 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7045 HCLGE_MAC_VLAN_LKUP);
7048 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
7049 struct hclge_mac_vlan_tbl_entry_cmd *req,
7050 struct hclge_desc *mc_desc)
7052 struct hclge_dev *hdev = vport->back;
7059 struct hclge_desc desc;
7061 hclge_cmd_setup_basic_desc(&desc,
7062 HCLGE_OPC_MAC_VLAN_ADD,
7064 memcpy(desc.data, req,
7065 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7066 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7067 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7068 retval = le16_to_cpu(desc.retval);
7070 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7072 HCLGE_MAC_VLAN_ADD);
7074 hclge_cmd_reuse_desc(&mc_desc[0], false);
7075 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7076 hclge_cmd_reuse_desc(&mc_desc[1], false);
7077 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7078 hclge_cmd_reuse_desc(&mc_desc[2], false);
7079 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
7080 memcpy(mc_desc[0].data, req,
7081 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7082 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
7083 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
7084 retval = le16_to_cpu(mc_desc[0].retval);
7086 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7088 HCLGE_MAC_VLAN_ADD);
7092 dev_err(&hdev->pdev->dev,
7093 "add mac addr failed for cmd_send, ret =%d.\n",
7101 static int hclge_init_umv_space(struct hclge_dev *hdev)
7103 u16 allocated_size = 0;
7106 ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
7111 if (allocated_size < hdev->wanted_umv_size)
7112 dev_warn(&hdev->pdev->dev,
7113 "Alloc umv space failed, want %u, get %u\n",
7114 hdev->wanted_umv_size, allocated_size);
7116 mutex_init(&hdev->umv_mutex);
7117 hdev->max_umv_size = allocated_size;
7118 /* divide max_umv_size by (hdev->num_req_vfs + 2), in order to
7119 * preserve some unicast mac vlan table entries shared by pf
7122 hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
7123 hdev->share_umv_size = hdev->priv_umv_size +
7124 hdev->max_umv_size % (hdev->num_req_vfs + 2);
7129 static int hclge_uninit_umv_space(struct hclge_dev *hdev)
7133 if (hdev->max_umv_size > 0) {
7134 ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
7138 hdev->max_umv_size = 0;
7140 mutex_destroy(&hdev->umv_mutex);
7145 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
7146 u16 *allocated_size, bool is_alloc)
7148 struct hclge_umv_spc_alc_cmd *req;
7149 struct hclge_desc desc;
7152 req = (struct hclge_umv_spc_alc_cmd *)desc.data;
7153 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
7155 hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, 1);
7157 req->space_size = cpu_to_le32(space_size);
7159 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7161 dev_err(&hdev->pdev->dev,
7162 "%s umv space failed for cmd_send, ret =%d\n",
7163 is_alloc ? "allocate" : "free", ret);
7167 if (is_alloc && allocated_size)
7168 *allocated_size = le32_to_cpu(desc.data[1]);
7173 static void hclge_reset_umv_space(struct hclge_dev *hdev)
7175 struct hclge_vport *vport;
7178 for (i = 0; i < hdev->num_alloc_vport; i++) {
7179 vport = &hdev->vport[i];
7180 vport->used_umv_num = 0;
7183 mutex_lock(&hdev->umv_mutex);
7184 hdev->share_umv_size = hdev->priv_umv_size +
7185 hdev->max_umv_size % (hdev->num_req_vfs + 2);
7186 mutex_unlock(&hdev->umv_mutex);
7189 static bool hclge_is_umv_space_full(struct hclge_vport *vport)
7191 struct hclge_dev *hdev = vport->back;
7194 mutex_lock(&hdev->umv_mutex);
7195 is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
7196 hdev->share_umv_size == 0);
7197 mutex_unlock(&hdev->umv_mutex);
7202 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
7204 struct hclge_dev *hdev = vport->back;
7206 mutex_lock(&hdev->umv_mutex);
7208 if (vport->used_umv_num > hdev->priv_umv_size)
7209 hdev->share_umv_size++;
7211 if (vport->used_umv_num > 0)
7212 vport->used_umv_num--;
7214 if (vport->used_umv_num >= hdev->priv_umv_size &&
7215 hdev->share_umv_size > 0)
7216 hdev->share_umv_size--;
7217 vport->used_umv_num++;
7219 mutex_unlock(&hdev->umv_mutex);
7222 static int hclge_add_uc_addr(struct hnae3_handle *handle,
7223 const unsigned char *addr)
7225 struct hclge_vport *vport = hclge_get_vport(handle);
7227 return hclge_add_uc_addr_common(vport, addr);
7230 int hclge_add_uc_addr_common(struct hclge_vport *vport,
7231 const unsigned char *addr)
7233 struct hclge_dev *hdev = vport->back;
7234 struct hclge_mac_vlan_tbl_entry_cmd req;
7235 struct hclge_desc desc;
7236 u16 egress_port = 0;
7239 /* mac addr check */
7240 if (is_zero_ether_addr(addr) ||
7241 is_broadcast_ether_addr(addr) ||
7242 is_multicast_ether_addr(addr)) {
7243 dev_err(&hdev->pdev->dev,
7244 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
7245 addr, is_zero_ether_addr(addr),
7246 is_broadcast_ether_addr(addr),
7247 is_multicast_ether_addr(addr));
7251 memset(&req, 0, sizeof(req));
7253 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
7254 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
7256 req.egress_port = cpu_to_le16(egress_port);
7258 hclge_prepare_mac_addr(&req, addr, false);
7260 /* Lookup the mac address in the mac_vlan table, and add
7261 * it if the entry is inexistent. Repeated unicast entry
7262 * is not allowed in the mac vlan table.
7264 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
7265 if (ret == -ENOENT) {
7266 if (!hclge_is_umv_space_full(vport)) {
7267 ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
7269 hclge_update_umv_space(vport, false);
7273 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
7274 hdev->priv_umv_size);
7279 /* check if we just hit the duplicate */
7281 dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n",
7282 vport->vport_id, addr);
7286 dev_err(&hdev->pdev->dev,
7287 "PF failed to add unicast entry(%pM) in the MAC table\n",
7293 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
7294 const unsigned char *addr)
7296 struct hclge_vport *vport = hclge_get_vport(handle);
7298 return hclge_rm_uc_addr_common(vport, addr);
7301 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
7302 const unsigned char *addr)
7304 struct hclge_dev *hdev = vport->back;
7305 struct hclge_mac_vlan_tbl_entry_cmd req;
7308 /* mac addr check */
7309 if (is_zero_ether_addr(addr) ||
7310 is_broadcast_ether_addr(addr) ||
7311 is_multicast_ether_addr(addr)) {
7312 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
7317 memset(&req, 0, sizeof(req));
7318 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7319 hclge_prepare_mac_addr(&req, addr, false);
7320 ret = hclge_remove_mac_vlan_tbl(vport, &req);
7322 hclge_update_umv_space(vport, true);
7327 static int hclge_add_mc_addr(struct hnae3_handle *handle,
7328 const unsigned char *addr)
7330 struct hclge_vport *vport = hclge_get_vport(handle);
7332 return hclge_add_mc_addr_common(vport, addr);
7335 int hclge_add_mc_addr_common(struct hclge_vport *vport,
7336 const unsigned char *addr)
7338 struct hclge_dev *hdev = vport->back;
7339 struct hclge_mac_vlan_tbl_entry_cmd req;
7340 struct hclge_desc desc[3];
7343 /* mac addr check */
7344 if (!is_multicast_ether_addr(addr)) {
7345 dev_err(&hdev->pdev->dev,
7346 "Add mc mac err! invalid mac:%pM.\n",
7350 memset(&req, 0, sizeof(req));
7351 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7352 hclge_prepare_mac_addr(&req, addr, true);
7353 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7355 /* This mac addr do not exist, add new entry for it */
7356 memset(desc[0].data, 0, sizeof(desc[0].data));
7357 memset(desc[1].data, 0, sizeof(desc[0].data));
7358 memset(desc[2].data, 0, sizeof(desc[0].data));
7360 status = hclge_update_desc_vfid(desc, vport->vport_id, false);
7363 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7365 if (status == -ENOSPC)
7366 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
7371 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
7372 const unsigned char *addr)
7374 struct hclge_vport *vport = hclge_get_vport(handle);
7376 return hclge_rm_mc_addr_common(vport, addr);
7379 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
7380 const unsigned char *addr)
7382 struct hclge_dev *hdev = vport->back;
7383 struct hclge_mac_vlan_tbl_entry_cmd req;
7384 enum hclge_cmd_status status;
7385 struct hclge_desc desc[3];
7387 /* mac addr check */
7388 if (!is_multicast_ether_addr(addr)) {
7389 dev_dbg(&hdev->pdev->dev,
7390 "Remove mc mac err! invalid mac:%pM.\n",
7395 memset(&req, 0, sizeof(req));
7396 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7397 hclge_prepare_mac_addr(&req, addr, true);
7398 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7400 /* This mac addr exist, remove this handle's VFID for it */
7401 status = hclge_update_desc_vfid(desc, vport->vport_id, true);
7405 if (hclge_is_all_function_id_zero(desc))
7406 /* All the vfid is zero, so need to delete this entry */
7407 status = hclge_remove_mac_vlan_tbl(vport, &req);
7409 /* Not all the vfid is zero, update the vfid */
7410 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7413 /* Maybe this mac address is in mta table, but it cannot be
7414 * deleted here because an entry of mta represents an address
7415 * range rather than a specific address. the delete action to
7416 * all entries will take effect in update_mta_status called by
7417 * hns3_nic_set_rx_mode.
7425 void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
7426 enum HCLGE_MAC_ADDR_TYPE mac_type)
7428 struct hclge_vport_mac_addr_cfg *mac_cfg;
7429 struct list_head *list;
7431 if (!vport->vport_id)
7434 mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL);
7438 mac_cfg->hd_tbl_status = true;
7439 memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN);
7441 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7442 &vport->uc_mac_list : &vport->mc_mac_list;
7444 list_add_tail(&mac_cfg->node, list);
7447 void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
7449 enum HCLGE_MAC_ADDR_TYPE mac_type)
7451 struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
7452 struct list_head *list;
7453 bool uc_flag, mc_flag;
7455 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7456 &vport->uc_mac_list : &vport->mc_mac_list;
7458 uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC;
7459 mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
7461 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
7462 if (ether_addr_equal(mac_cfg->mac_addr, mac_addr)) {
7463 if (uc_flag && mac_cfg->hd_tbl_status)
7464 hclge_rm_uc_addr_common(vport, mac_addr);
7466 if (mc_flag && mac_cfg->hd_tbl_status)
7467 hclge_rm_mc_addr_common(vport, mac_addr);
7469 list_del(&mac_cfg->node);
7476 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
7477 enum HCLGE_MAC_ADDR_TYPE mac_type)
7479 struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
7480 struct list_head *list;
7482 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7483 &vport->uc_mac_list : &vport->mc_mac_list;
7485 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
7486 if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status)
7487 hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr);
7489 if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status)
7490 hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr);
7492 mac_cfg->hd_tbl_status = false;
7494 list_del(&mac_cfg->node);
7500 void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
7502 struct hclge_vport_mac_addr_cfg *mac, *tmp;
7503 struct hclge_vport *vport;
7506 for (i = 0; i < hdev->num_alloc_vport; i++) {
7507 vport = &hdev->vport[i];
7508 list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
7509 list_del(&mac->node);
7513 list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) {
7514 list_del(&mac->node);
7520 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
7521 u16 cmdq_resp, u8 resp_code)
7523 #define HCLGE_ETHERTYPE_SUCCESS_ADD 0
7524 #define HCLGE_ETHERTYPE_ALREADY_ADD 1
7525 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2
7526 #define HCLGE_ETHERTYPE_KEY_CONFLICT 3
7531 dev_err(&hdev->pdev->dev,
7532 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
7537 switch (resp_code) {
7538 case HCLGE_ETHERTYPE_SUCCESS_ADD:
7539 case HCLGE_ETHERTYPE_ALREADY_ADD:
7542 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
7543 dev_err(&hdev->pdev->dev,
7544 "add mac ethertype failed for manager table overflow.\n");
7545 return_status = -EIO;
7547 case HCLGE_ETHERTYPE_KEY_CONFLICT:
7548 dev_err(&hdev->pdev->dev,
7549 "add mac ethertype failed for key conflict.\n");
7550 return_status = -EIO;
7553 dev_err(&hdev->pdev->dev,
7554 "add mac ethertype failed for undefined, code=%u.\n",
7556 return_status = -EIO;
7559 return return_status;
7562 static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
7565 struct hclge_mac_vlan_tbl_entry_cmd req;
7566 struct hclge_dev *hdev = vport->back;
7567 struct hclge_desc desc;
7568 u16 egress_port = 0;
7571 if (is_zero_ether_addr(mac_addr))
7574 memset(&req, 0, sizeof(req));
7575 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
7576 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
7577 req.egress_port = cpu_to_le16(egress_port);
7578 hclge_prepare_mac_addr(&req, mac_addr, false);
7580 if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
7583 vf_idx += HCLGE_VF_VPORT_START_NUM;
7584 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++)
7586 ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
7592 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
7595 struct hclge_vport *vport = hclge_get_vport(handle);
7596 struct hclge_dev *hdev = vport->back;
7598 vport = hclge_get_vf_vport(hdev, vf);
7602 if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
7603 dev_info(&hdev->pdev->dev,
7604 "Specified MAC(=%pM) is same as before, no change committed!\n",
7609 if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
7610 dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
7615 ether_addr_copy(vport->vf_info.mac, mac_addr);
7616 dev_info(&hdev->pdev->dev,
7617 "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
7620 return hclge_inform_reset_assert_to_vf(vport);
7623 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
7624 const struct hclge_mac_mgr_tbl_entry_cmd *req)
7626 struct hclge_desc desc;
7631 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
7632 memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
7634 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7636 dev_err(&hdev->pdev->dev,
7637 "add mac ethertype failed for cmd_send, ret =%d.\n",
7642 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7643 retval = le16_to_cpu(desc.retval);
7645 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
7648 static int init_mgr_tbl(struct hclge_dev *hdev)
7653 for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
7654 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
7656 dev_err(&hdev->pdev->dev,
7657 "add mac ethertype failed, ret =%d.\n",
7666 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
7668 struct hclge_vport *vport = hclge_get_vport(handle);
7669 struct hclge_dev *hdev = vport->back;
7671 ether_addr_copy(p, hdev->hw.mac.mac_addr);
7674 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
7677 const unsigned char *new_addr = (const unsigned char *)p;
7678 struct hclge_vport *vport = hclge_get_vport(handle);
7679 struct hclge_dev *hdev = vport->back;
7682 /* mac addr check */
7683 if (is_zero_ether_addr(new_addr) ||
7684 is_broadcast_ether_addr(new_addr) ||
7685 is_multicast_ether_addr(new_addr)) {
7686 dev_err(&hdev->pdev->dev,
7687 "Change uc mac err! invalid mac:%pM.\n",
7692 if ((!is_first || is_kdump_kernel()) &&
7693 hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
7694 dev_warn(&hdev->pdev->dev,
7695 "remove old uc mac address fail.\n");
7697 ret = hclge_add_uc_addr(handle, new_addr);
7699 dev_err(&hdev->pdev->dev,
7700 "add uc mac address fail, ret =%d.\n",
7704 hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
7705 dev_err(&hdev->pdev->dev,
7706 "restore uc mac address fail.\n");
7711 ret = hclge_pause_addr_cfg(hdev, new_addr);
7713 dev_err(&hdev->pdev->dev,
7714 "configure mac pause address fail, ret =%d.\n",
7719 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
7724 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
7727 struct hclge_vport *vport = hclge_get_vport(handle);
7728 struct hclge_dev *hdev = vport->back;
7730 if (!hdev->hw.mac.phydev)
7733 return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
7736 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
7737 u8 fe_type, bool filter_en, u8 vf_id)
7739 struct hclge_vlan_filter_ctrl_cmd *req;
7740 struct hclge_desc desc;
7743 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
7745 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
7746 req->vlan_type = vlan_type;
7747 req->vlan_fe = filter_en ? fe_type : 0;
7750 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7752 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
7758 #define HCLGE_FILTER_TYPE_VF 0
7759 #define HCLGE_FILTER_TYPE_PORT 1
7760 #define HCLGE_FILTER_FE_EGRESS_V1_B BIT(0)
7761 #define HCLGE_FILTER_FE_NIC_INGRESS_B BIT(0)
7762 #define HCLGE_FILTER_FE_NIC_EGRESS_B BIT(1)
7763 #define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2)
7764 #define HCLGE_FILTER_FE_ROCE_EGRESS_B BIT(3)
7765 #define HCLGE_FILTER_FE_EGRESS (HCLGE_FILTER_FE_NIC_EGRESS_B \
7766 | HCLGE_FILTER_FE_ROCE_EGRESS_B)
7767 #define HCLGE_FILTER_FE_INGRESS (HCLGE_FILTER_FE_NIC_INGRESS_B \
7768 | HCLGE_FILTER_FE_ROCE_INGRESS_B)
7770 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
7772 struct hclge_vport *vport = hclge_get_vport(handle);
7773 struct hclge_dev *hdev = vport->back;
7775 if (hdev->pdev->revision >= 0x21) {
7776 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7777 HCLGE_FILTER_FE_EGRESS, enable, 0);
7778 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7779 HCLGE_FILTER_FE_INGRESS, enable, 0);
7781 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7782 HCLGE_FILTER_FE_EGRESS_V1_B, enable,
7786 handle->netdev_flags |= HNAE3_VLAN_FLTR;
7788 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
7791 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
7792 bool is_kill, u16 vlan,
7795 struct hclge_vport *vport = &hdev->vport[vfid];
7796 struct hclge_vlan_filter_vf_cfg_cmd *req0;
7797 struct hclge_vlan_filter_vf_cfg_cmd *req1;
7798 struct hclge_desc desc[2];
7803 /* if vf vlan table is full, firmware will close vf vlan filter, it
7804 * is unable and unnecessary to add new vlan id to vf vlan filter.
7805 * If spoof check is enable, and vf vlan is full, it shouldn't add
7806 * new vlan, because tx packets with these vlan id will be dropped.
7808 if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
7809 if (vport->vf_info.spoofchk && vlan) {
7810 dev_err(&hdev->pdev->dev,
7811 "Can't add vlan due to spoof check is on and vf vlan table is full\n");
7817 hclge_cmd_setup_basic_desc(&desc[0],
7818 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7819 hclge_cmd_setup_basic_desc(&desc[1],
7820 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7822 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7824 vf_byte_off = vfid / 8;
7825 vf_byte_val = 1 << (vfid % 8);
7827 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
7828 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
7830 req0->vlan_id = cpu_to_le16(vlan);
7831 req0->vlan_cfg = is_kill;
7833 if (vf_byte_off < HCLGE_MAX_VF_BYTES)
7834 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
7836 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
7838 ret = hclge_cmd_send(&hdev->hw, desc, 2);
7840 dev_err(&hdev->pdev->dev,
7841 "Send vf vlan command fail, ret =%d.\n",
7847 #define HCLGE_VF_VLAN_NO_ENTRY 2
7848 if (!req0->resp_code || req0->resp_code == 1)
7851 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
7852 set_bit(vfid, hdev->vf_vlan_full);
7853 dev_warn(&hdev->pdev->dev,
7854 "vf vlan table is full, vf vlan filter is disabled\n");
7858 dev_err(&hdev->pdev->dev,
7859 "Add vf vlan filter fail, ret =%u.\n",
7862 #define HCLGE_VF_VLAN_DEL_NO_FOUND 1
7863 if (!req0->resp_code)
7866 /* vf vlan filter is disabled when vf vlan table is full,
7867 * then new vlan id will not be added into vf vlan table.
7868 * Just return 0 without warning, avoid massive verbose
7869 * print logs when unload.
7871 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
7874 dev_err(&hdev->pdev->dev,
7875 "Kill vf vlan filter fail, ret =%u.\n",
7882 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
7883 u16 vlan_id, bool is_kill)
7885 struct hclge_vlan_filter_pf_cfg_cmd *req;
7886 struct hclge_desc desc;
7887 u8 vlan_offset_byte_val;
7888 u8 vlan_offset_byte;
7892 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
7894 vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
7895 vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
7896 HCLGE_VLAN_BYTE_SIZE;
7897 vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
7899 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
7900 req->vlan_offset = vlan_offset_160;
7901 req->vlan_cfg = is_kill;
7902 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
7904 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7906 dev_err(&hdev->pdev->dev,
7907 "port vlan command, send fail, ret =%d.\n", ret);
7911 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
7912 u16 vport_id, u16 vlan_id,
7915 u16 vport_idx, vport_num = 0;
7918 if (is_kill && !vlan_id)
7921 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
7924 dev_err(&hdev->pdev->dev,
7925 "Set %u vport vlan filter config fail, ret =%d.\n",
7930 /* vlan 0 may be added twice when 8021q module is enabled */
7931 if (!is_kill && !vlan_id &&
7932 test_bit(vport_id, hdev->vlan_table[vlan_id]))
7935 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
7936 dev_err(&hdev->pdev->dev,
7937 "Add port vlan failed, vport %u is already in vlan %u\n",
7943 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
7944 dev_err(&hdev->pdev->dev,
7945 "Delete port vlan failed, vport %u is not in vlan %u\n",
7950 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
7953 if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
7954 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
7960 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
7962 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
7963 struct hclge_vport_vtag_tx_cfg_cmd *req;
7964 struct hclge_dev *hdev = vport->back;
7965 struct hclge_desc desc;
7969 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
7971 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
7972 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
7973 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
7974 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
7975 vcfg->accept_tag1 ? 1 : 0);
7976 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
7977 vcfg->accept_untag1 ? 1 : 0);
7978 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
7979 vcfg->accept_tag2 ? 1 : 0);
7980 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
7981 vcfg->accept_untag2 ? 1 : 0);
7982 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
7983 vcfg->insert_tag1_en ? 1 : 0);
7984 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
7985 vcfg->insert_tag2_en ? 1 : 0);
7986 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
7988 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7989 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
7990 HCLGE_VF_NUM_PER_BYTE;
7991 req->vf_bitmap[bmap_index] =
7992 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7994 status = hclge_cmd_send(&hdev->hw, &desc, 1);
7996 dev_err(&hdev->pdev->dev,
7997 "Send port txvlan cfg command fail, ret =%d\n",
8003 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
8005 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
8006 struct hclge_vport_vtag_rx_cfg_cmd *req;
8007 struct hclge_dev *hdev = vport->back;
8008 struct hclge_desc desc;
8012 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
8014 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
8015 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
8016 vcfg->strip_tag1_en ? 1 : 0);
8017 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
8018 vcfg->strip_tag2_en ? 1 : 0);
8019 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
8020 vcfg->vlan1_vlan_prionly ? 1 : 0);
8021 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
8022 vcfg->vlan2_vlan_prionly ? 1 : 0);
8024 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
8025 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
8026 HCLGE_VF_NUM_PER_BYTE;
8027 req->vf_bitmap[bmap_index] =
8028 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
8030 status = hclge_cmd_send(&hdev->hw, &desc, 1);
8032 dev_err(&hdev->pdev->dev,
8033 "Send port rxvlan cfg command fail, ret =%d\n",
8039 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
8040 u16 port_base_vlan_state,
8045 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8046 vport->txvlan_cfg.accept_tag1 = true;
8047 vport->txvlan_cfg.insert_tag1_en = false;
8048 vport->txvlan_cfg.default_tag1 = 0;
8050 vport->txvlan_cfg.accept_tag1 = false;
8051 vport->txvlan_cfg.insert_tag1_en = true;
8052 vport->txvlan_cfg.default_tag1 = vlan_tag;
8055 vport->txvlan_cfg.accept_untag1 = true;
8057 /* accept_tag2 and accept_untag2 are not supported on
8058 * pdev revision(0x20), new revision support them,
8059 * this two fields can not be configured by user.
8061 vport->txvlan_cfg.accept_tag2 = true;
8062 vport->txvlan_cfg.accept_untag2 = true;
8063 vport->txvlan_cfg.insert_tag2_en = false;
8064 vport->txvlan_cfg.default_tag2 = 0;
8066 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8067 vport->rxvlan_cfg.strip_tag1_en = false;
8068 vport->rxvlan_cfg.strip_tag2_en =
8069 vport->rxvlan_cfg.rx_vlan_offload_en;
8071 vport->rxvlan_cfg.strip_tag1_en =
8072 vport->rxvlan_cfg.rx_vlan_offload_en;
8073 vport->rxvlan_cfg.strip_tag2_en = true;
8075 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8076 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
8078 ret = hclge_set_vlan_tx_offload_cfg(vport);
8082 return hclge_set_vlan_rx_offload_cfg(vport);
8085 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
8087 struct hclge_rx_vlan_type_cfg_cmd *rx_req;
8088 struct hclge_tx_vlan_type_cfg_cmd *tx_req;
8089 struct hclge_desc desc;
8092 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
8093 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
8094 rx_req->ot_fst_vlan_type =
8095 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
8096 rx_req->ot_sec_vlan_type =
8097 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
8098 rx_req->in_fst_vlan_type =
8099 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
8100 rx_req->in_sec_vlan_type =
8101 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
8103 status = hclge_cmd_send(&hdev->hw, &desc, 1);
8105 dev_err(&hdev->pdev->dev,
8106 "Send rxvlan protocol type command fail, ret =%d\n",
8111 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
8113 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
8114 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
8115 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
8117 status = hclge_cmd_send(&hdev->hw, &desc, 1);
8119 dev_err(&hdev->pdev->dev,
8120 "Send txvlan protocol type command fail, ret =%d\n",
8126 static int hclge_init_vlan_config(struct hclge_dev *hdev)
8128 #define HCLGE_DEF_VLAN_TYPE 0x8100
8130 struct hnae3_handle *handle = &hdev->vport[0].nic;
8131 struct hclge_vport *vport;
8135 if (hdev->pdev->revision >= 0x21) {
8136 /* for revision 0x21, vf vlan filter is per function */
8137 for (i = 0; i < hdev->num_alloc_vport; i++) {
8138 vport = &hdev->vport[i];
8139 ret = hclge_set_vlan_filter_ctrl(hdev,
8140 HCLGE_FILTER_TYPE_VF,
8141 HCLGE_FILTER_FE_EGRESS,
8148 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
8149 HCLGE_FILTER_FE_INGRESS, true,
8154 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8155 HCLGE_FILTER_FE_EGRESS_V1_B,
8161 handle->netdev_flags |= HNAE3_VLAN_FLTR;
8163 hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
8164 hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
8165 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
8166 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
8167 hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
8168 hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
8170 ret = hclge_set_vlan_protocol_type(hdev);
8174 for (i = 0; i < hdev->num_alloc_vport; i++) {
8177 vport = &hdev->vport[i];
8178 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
8180 ret = hclge_vlan_offload_cfg(vport,
8181 vport->port_base_vlan_cfg.state,
8187 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
8190 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8193 struct hclge_vport_vlan_cfg *vlan;
8195 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
8199 vlan->hd_tbl_status = writen_to_tbl;
8200 vlan->vlan_id = vlan_id;
8202 list_add_tail(&vlan->node, &vport->vlan_list);
8205 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
8207 struct hclge_vport_vlan_cfg *vlan, *tmp;
8208 struct hclge_dev *hdev = vport->back;
8211 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8212 if (!vlan->hd_tbl_status) {
8213 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8215 vlan->vlan_id, false);
8217 dev_err(&hdev->pdev->dev,
8218 "restore vport vlan list failed, ret=%d\n",
8223 vlan->hd_tbl_status = true;
8229 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8232 struct hclge_vport_vlan_cfg *vlan, *tmp;
8233 struct hclge_dev *hdev = vport->back;
8235 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8236 if (vlan->vlan_id == vlan_id) {
8237 if (is_write_tbl && vlan->hd_tbl_status)
8238 hclge_set_vlan_filter_hw(hdev,
8244 list_del(&vlan->node);
8251 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
8253 struct hclge_vport_vlan_cfg *vlan, *tmp;
8254 struct hclge_dev *hdev = vport->back;
8256 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8257 if (vlan->hd_tbl_status)
8258 hclge_set_vlan_filter_hw(hdev,
8264 vlan->hd_tbl_status = false;
8266 list_del(&vlan->node);
8272 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
8274 struct hclge_vport_vlan_cfg *vlan, *tmp;
8275 struct hclge_vport *vport;
8278 for (i = 0; i < hdev->num_alloc_vport; i++) {
8279 vport = &hdev->vport[i];
8280 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8281 list_del(&vlan->node);
8287 static void hclge_restore_vlan_table(struct hnae3_handle *handle)
8289 struct hclge_vport *vport = hclge_get_vport(handle);
8290 struct hclge_vport_vlan_cfg *vlan, *tmp;
8291 struct hclge_dev *hdev = vport->back;
8296 for (i = 0; i < hdev->num_alloc_vport; i++) {
8297 vport = &hdev->vport[i];
8298 vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
8299 vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
8300 state = vport->port_base_vlan_cfg.state;
8302 if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
8303 hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
8304 vport->vport_id, vlan_id,
8309 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8312 if (!vlan->hd_tbl_status)
8314 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8316 vlan->vlan_id, false);
8323 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
8325 struct hclge_vport *vport = hclge_get_vport(handle);
8327 if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8328 vport->rxvlan_cfg.strip_tag1_en = false;
8329 vport->rxvlan_cfg.strip_tag2_en = enable;
8331 vport->rxvlan_cfg.strip_tag1_en = enable;
8332 vport->rxvlan_cfg.strip_tag2_en = true;
8334 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8335 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
8336 vport->rxvlan_cfg.rx_vlan_offload_en = enable;
8338 return hclge_set_vlan_rx_offload_cfg(vport);
8341 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
8342 u16 port_base_vlan_state,
8343 struct hclge_vlan_info *new_info,
8344 struct hclge_vlan_info *old_info)
8346 struct hclge_dev *hdev = vport->back;
8349 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
8350 hclge_rm_vport_all_vlan_table(vport, false);
8351 return hclge_set_vlan_filter_hw(hdev,
8352 htons(new_info->vlan_proto),
8358 ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
8359 vport->vport_id, old_info->vlan_tag,
8364 return hclge_add_vport_all_vlan_table(vport);
8367 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
8368 struct hclge_vlan_info *vlan_info)
8370 struct hnae3_handle *nic = &vport->nic;
8371 struct hclge_vlan_info *old_vlan_info;
8372 struct hclge_dev *hdev = vport->back;
8375 old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
8377 ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
8381 if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
8382 /* add new VLAN tag */
8383 ret = hclge_set_vlan_filter_hw(hdev,
8384 htons(vlan_info->vlan_proto),
8386 vlan_info->vlan_tag,
8391 /* remove old VLAN tag */
8392 ret = hclge_set_vlan_filter_hw(hdev,
8393 htons(old_vlan_info->vlan_proto),
8395 old_vlan_info->vlan_tag,
8403 ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
8408 /* update state only when disable/enable port based VLAN */
8409 vport->port_base_vlan_cfg.state = state;
8410 if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
8411 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
8413 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
8416 vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
8417 vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
8418 vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
8423 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
8424 enum hnae3_port_base_vlan_state state,
8427 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8429 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
8431 return HNAE3_PORT_BASE_VLAN_ENABLE;
8434 return HNAE3_PORT_BASE_VLAN_DISABLE;
8435 else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
8436 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
8438 return HNAE3_PORT_BASE_VLAN_MODIFY;
8442 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
8443 u16 vlan, u8 qos, __be16 proto)
8445 struct hclge_vport *vport = hclge_get_vport(handle);
8446 struct hclge_dev *hdev = vport->back;
8447 struct hclge_vlan_info vlan_info;
8451 if (hdev->pdev->revision == 0x20)
8454 vport = hclge_get_vf_vport(hdev, vfid);
8458 /* qos is a 3 bits value, so can not be bigger than 7 */
8459 if (vlan > VLAN_N_VID - 1 || qos > 7)
8461 if (proto != htons(ETH_P_8021Q))
8462 return -EPROTONOSUPPORT;
8464 state = hclge_get_port_base_vlan_state(vport,
8465 vport->port_base_vlan_cfg.state,
8467 if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
8470 vlan_info.vlan_tag = vlan;
8471 vlan_info.qos = qos;
8472 vlan_info.vlan_proto = ntohs(proto);
8474 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
8475 return hclge_update_port_base_vlan_cfg(vport, state,
8478 ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
8479 vport->vport_id, state,
8486 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
8487 u16 vlan_id, bool is_kill)
8489 struct hclge_vport *vport = hclge_get_vport(handle);
8490 struct hclge_dev *hdev = vport->back;
8491 bool writen_to_tbl = false;
8494 /* When device is resetting, firmware is unable to handle
8495 * mailbox. Just record the vlan id, and remove it after
8498 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && is_kill) {
8499 set_bit(vlan_id, vport->vlan_del_fail_bmap);
8503 /* when port base vlan enabled, we use port base vlan as the vlan
8504 * filter entry. In this case, we don't update vlan filter table
8505 * when user add new vlan or remove exist vlan, just update the vport
8506 * vlan list. The vlan id in vlan list will be writen in vlan filter
8507 * table until port base vlan disabled
8509 if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8510 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
8512 writen_to_tbl = true;
8517 hclge_rm_vport_vlan_table(vport, vlan_id, false);
8519 hclge_add_vport_vlan_table(vport, vlan_id,
8521 } else if (is_kill) {
8522 /* when remove hw vlan filter failed, record the vlan id,
8523 * and try to remove it from hw later, to be consistence
8526 set_bit(vlan_id, vport->vlan_del_fail_bmap);
8531 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
8533 #define HCLGE_MAX_SYNC_COUNT 60
8535 int i, ret, sync_cnt = 0;
8538 /* start from vport 1 for PF is always alive */
8539 for (i = 0; i < hdev->num_alloc_vport; i++) {
8540 struct hclge_vport *vport = &hdev->vport[i];
8542 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
8544 while (vlan_id != VLAN_N_VID) {
8545 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8546 vport->vport_id, vlan_id,
8548 if (ret && ret != -EINVAL)
8551 clear_bit(vlan_id, vport->vlan_del_fail_bmap);
8552 hclge_rm_vport_vlan_table(vport, vlan_id, false);
8555 if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
8558 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
8564 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
8566 struct hclge_config_max_frm_size_cmd *req;
8567 struct hclge_desc desc;
8569 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
8571 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
8572 req->max_frm_size = cpu_to_le16(new_mps);
8573 req->min_frm_size = HCLGE_MAC_MIN_FRAME;
8575 return hclge_cmd_send(&hdev->hw, &desc, 1);
8578 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
8580 struct hclge_vport *vport = hclge_get_vport(handle);
8582 return hclge_set_vport_mtu(vport, new_mtu);
8585 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
8587 struct hclge_dev *hdev = vport->back;
8588 int i, max_frm_size, ret;
8590 /* HW supprt 2 layer vlan */
8591 max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
8592 if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
8593 max_frm_size > HCLGE_MAC_MAX_FRAME)
8596 max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
8597 mutex_lock(&hdev->vport_lock);
8598 /* VF's mps must fit within hdev->mps */
8599 if (vport->vport_id && max_frm_size > hdev->mps) {
8600 mutex_unlock(&hdev->vport_lock);
8602 } else if (vport->vport_id) {
8603 vport->mps = max_frm_size;
8604 mutex_unlock(&hdev->vport_lock);
8608 /* PF's mps must be greater then VF's mps */
8609 for (i = 1; i < hdev->num_alloc_vport; i++)
8610 if (max_frm_size < hdev->vport[i].mps) {
8611 mutex_unlock(&hdev->vport_lock);
8615 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
8617 ret = hclge_set_mac_mtu(hdev, max_frm_size);
8619 dev_err(&hdev->pdev->dev,
8620 "Change mtu fail, ret =%d\n", ret);
8624 hdev->mps = max_frm_size;
8625 vport->mps = max_frm_size;
8627 ret = hclge_buffer_alloc(hdev);
8629 dev_err(&hdev->pdev->dev,
8630 "Allocate buffer fail, ret =%d\n", ret);
8633 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
8634 mutex_unlock(&hdev->vport_lock);
8638 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
8641 struct hclge_reset_tqp_queue_cmd *req;
8642 struct hclge_desc desc;
8645 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
8647 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
8648 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
8650 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
8652 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8654 dev_err(&hdev->pdev->dev,
8655 "Send tqp reset cmd error, status =%d\n", ret);
8662 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
8664 struct hclge_reset_tqp_queue_cmd *req;
8665 struct hclge_desc desc;
8668 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
8670 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
8671 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
8673 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8675 dev_err(&hdev->pdev->dev,
8676 "Get reset status error, status =%d\n", ret);
8680 return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
8683 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
8685 struct hnae3_queue *queue;
8686 struct hclge_tqp *tqp;
8688 queue = handle->kinfo.tqp[queue_id];
8689 tqp = container_of(queue, struct hclge_tqp, q);
8694 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
8696 struct hclge_vport *vport = hclge_get_vport(handle);
8697 struct hclge_dev *hdev = vport->back;
8698 int reset_try_times = 0;
8703 queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
8705 ret = hclge_tqp_enable(hdev, queue_id, 0, false);
8707 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
8711 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
8713 dev_err(&hdev->pdev->dev,
8714 "Send reset tqp cmd fail, ret = %d\n", ret);
8718 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
8719 reset_status = hclge_get_reset_status(hdev, queue_gid);
8723 /* Wait for tqp hw reset */
8724 usleep_range(1000, 1200);
8727 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
8728 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
8732 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
8734 dev_err(&hdev->pdev->dev,
8735 "Deassert the soft reset fail, ret = %d\n", ret);
8740 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
8742 struct hclge_dev *hdev = vport->back;
8743 int reset_try_times = 0;
8748 queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
8750 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
8752 dev_warn(&hdev->pdev->dev,
8753 "Send reset tqp cmd fail, ret = %d\n", ret);
8757 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
8758 reset_status = hclge_get_reset_status(hdev, queue_gid);
8762 /* Wait for tqp hw reset */
8763 usleep_range(1000, 1200);
8766 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
8767 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
8771 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
8773 dev_warn(&hdev->pdev->dev,
8774 "Deassert the soft reset fail, ret = %d\n", ret);
8777 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
8779 struct hclge_vport *vport = hclge_get_vport(handle);
8780 struct hclge_dev *hdev = vport->back;
8782 return hdev->fw_version;
8785 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
8787 struct phy_device *phydev = hdev->hw.mac.phydev;
8792 phy_set_asym_pause(phydev, rx_en, tx_en);
8795 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
8799 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
8802 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
8804 dev_err(&hdev->pdev->dev,
8805 "configure pauseparam error, ret = %d.\n", ret);
8810 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
8812 struct phy_device *phydev = hdev->hw.mac.phydev;
8813 u16 remote_advertising = 0;
8814 u16 local_advertising;
8815 u32 rx_pause, tx_pause;
8818 if (!phydev->link || !phydev->autoneg)
8821 local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
8824 remote_advertising = LPA_PAUSE_CAP;
8826 if (phydev->asym_pause)
8827 remote_advertising |= LPA_PAUSE_ASYM;
8829 flowctl = mii_resolve_flowctrl_fdx(local_advertising,
8830 remote_advertising);
8831 tx_pause = flowctl & FLOW_CTRL_TX;
8832 rx_pause = flowctl & FLOW_CTRL_RX;
8834 if (phydev->duplex == HCLGE_MAC_HALF) {
8839 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
8842 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
8843 u32 *rx_en, u32 *tx_en)
8845 struct hclge_vport *vport = hclge_get_vport(handle);
8846 struct hclge_dev *hdev = vport->back;
8847 struct phy_device *phydev = hdev->hw.mac.phydev;
8849 *auto_neg = phydev ? hclge_get_autoneg(handle) : 0;
8851 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8857 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
8860 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
8863 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
8872 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
8873 u32 rx_en, u32 tx_en)
8876 hdev->fc_mode_last_time = HCLGE_FC_FULL;
8877 else if (rx_en && !tx_en)
8878 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
8879 else if (!rx_en && tx_en)
8880 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
8882 hdev->fc_mode_last_time = HCLGE_FC_NONE;
8884 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
8887 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
8888 u32 rx_en, u32 tx_en)
8890 struct hclge_vport *vport = hclge_get_vport(handle);
8891 struct hclge_dev *hdev = vport->back;
8892 struct phy_device *phydev = hdev->hw.mac.phydev;
8896 fc_autoneg = hclge_get_autoneg(handle);
8897 if (auto_neg != fc_autoneg) {
8898 dev_info(&hdev->pdev->dev,
8899 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
8904 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8905 dev_info(&hdev->pdev->dev,
8906 "Priority flow control enabled. Cannot set link flow control.\n");
8910 hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
8912 hclge_record_user_pauseparam(hdev, rx_en, tx_en);
8915 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
8918 return phy_start_aneg(phydev);
8923 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
8924 u8 *auto_neg, u32 *speed, u8 *duplex)
8926 struct hclge_vport *vport = hclge_get_vport(handle);
8927 struct hclge_dev *hdev = vport->back;
8930 *speed = hdev->hw.mac.speed;
8932 *duplex = hdev->hw.mac.duplex;
8934 *auto_neg = hdev->hw.mac.autoneg;
8937 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
8940 struct hclge_vport *vport = hclge_get_vport(handle);
8941 struct hclge_dev *hdev = vport->back;
8944 *media_type = hdev->hw.mac.media_type;
8947 *module_type = hdev->hw.mac.module_type;
8950 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
8951 u8 *tp_mdix_ctrl, u8 *tp_mdix)
8953 struct hclge_vport *vport = hclge_get_vport(handle);
8954 struct hclge_dev *hdev = vport->back;
8955 struct phy_device *phydev = hdev->hw.mac.phydev;
8956 int mdix_ctrl, mdix, is_resolved;
8957 unsigned int retval;
8960 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8961 *tp_mdix = ETH_TP_MDI_INVALID;
8965 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
8967 retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
8968 mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
8969 HCLGE_PHY_MDIX_CTRL_S);
8971 retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
8972 mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
8973 is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
8975 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
8977 switch (mdix_ctrl) {
8979 *tp_mdix_ctrl = ETH_TP_MDI;
8982 *tp_mdix_ctrl = ETH_TP_MDI_X;
8985 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
8988 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8993 *tp_mdix = ETH_TP_MDI_INVALID;
8995 *tp_mdix = ETH_TP_MDI_X;
8997 *tp_mdix = ETH_TP_MDI;
9000 static void hclge_info_show(struct hclge_dev *hdev)
9002 struct device *dev = &hdev->pdev->dev;
9004 dev_info(dev, "PF info begin:\n");
9006 dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
9007 dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
9008 dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
9009 dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
9010 dev_info(dev, "Numbers of vmdp vports: %u\n", hdev->num_vmdq_vport);
9011 dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
9012 dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
9013 dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
9014 dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
9015 dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
9016 dev_info(dev, "This is %s PF\n",
9017 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
9018 dev_info(dev, "DCB %s\n",
9019 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
9020 dev_info(dev, "MQPRIO %s\n",
9021 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
9023 dev_info(dev, "PF info end.\n");
9026 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
9027 struct hclge_vport *vport)
9029 struct hnae3_client *client = vport->nic.client;
9030 struct hclge_dev *hdev = ae_dev->priv;
9031 int rst_cnt = hdev->rst_stats.reset_cnt;
9034 ret = client->ops->init_instance(&vport->nic);
9038 set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9039 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9040 rst_cnt != hdev->rst_stats.reset_cnt) {
9045 /* Enable nic hw error interrupts */
9046 ret = hclge_config_nic_hw_error(hdev, true);
9048 dev_err(&ae_dev->pdev->dev,
9049 "fail(%d) to enable hw error interrupts\n", ret);
9053 hnae3_set_client_init_flag(client, ae_dev, 1);
9055 if (netif_msg_drv(&hdev->vport->nic))
9056 hclge_info_show(hdev);
9061 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9062 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9063 msleep(HCLGE_WAIT_RESET_DONE);
9065 client->ops->uninit_instance(&vport->nic, 0);
9070 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
9071 struct hclge_vport *vport)
9073 struct hnae3_client *client = vport->roce.client;
9074 struct hclge_dev *hdev = ae_dev->priv;
9078 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
9082 client = hdev->roce_client;
9083 ret = hclge_init_roce_base_info(vport);
9087 rst_cnt = hdev->rst_stats.reset_cnt;
9088 ret = client->ops->init_instance(&vport->roce);
9092 set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9093 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9094 rst_cnt != hdev->rst_stats.reset_cnt) {
9099 /* Enable roce ras interrupts */
9100 ret = hclge_config_rocee_ras_interrupt(hdev, true);
9102 dev_err(&ae_dev->pdev->dev,
9103 "fail(%d) to enable roce ras interrupts\n", ret);
9107 hnae3_set_client_init_flag(client, ae_dev, 1);
9112 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9113 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9114 msleep(HCLGE_WAIT_RESET_DONE);
9116 hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
9121 static int hclge_init_client_instance(struct hnae3_client *client,
9122 struct hnae3_ae_dev *ae_dev)
9124 struct hclge_dev *hdev = ae_dev->priv;
9125 struct hclge_vport *vport;
9128 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
9129 vport = &hdev->vport[i];
9131 switch (client->type) {
9132 case HNAE3_CLIENT_KNIC:
9133 hdev->nic_client = client;
9134 vport->nic.client = client;
9135 ret = hclge_init_nic_client_instance(ae_dev, vport);
9139 ret = hclge_init_roce_client_instance(ae_dev, vport);
9144 case HNAE3_CLIENT_ROCE:
9145 if (hnae3_dev_roce_supported(hdev)) {
9146 hdev->roce_client = client;
9147 vport->roce.client = client;
9150 ret = hclge_init_roce_client_instance(ae_dev, vport);
9163 hdev->nic_client = NULL;
9164 vport->nic.client = NULL;
9167 hdev->roce_client = NULL;
9168 vport->roce.client = NULL;
9172 static void hclge_uninit_client_instance(struct hnae3_client *client,
9173 struct hnae3_ae_dev *ae_dev)
9175 struct hclge_dev *hdev = ae_dev->priv;
9176 struct hclge_vport *vport;
9179 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
9180 vport = &hdev->vport[i];
9181 if (hdev->roce_client) {
9182 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9183 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9184 msleep(HCLGE_WAIT_RESET_DONE);
9186 hdev->roce_client->ops->uninit_instance(&vport->roce,
9188 hdev->roce_client = NULL;
9189 vport->roce.client = NULL;
9191 if (client->type == HNAE3_CLIENT_ROCE)
9193 if (hdev->nic_client && client->ops->uninit_instance) {
9194 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9195 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9196 msleep(HCLGE_WAIT_RESET_DONE);
9198 client->ops->uninit_instance(&vport->nic, 0);
9199 hdev->nic_client = NULL;
9200 vport->nic.client = NULL;
9205 static int hclge_pci_init(struct hclge_dev *hdev)
9207 struct pci_dev *pdev = hdev->pdev;
9208 struct hclge_hw *hw;
9211 ret = pci_enable_device(pdev);
9213 dev_err(&pdev->dev, "failed to enable PCI device\n");
9217 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
9219 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9222 "can't set consistent PCI DMA");
9223 goto err_disable_device;
9225 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
9228 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
9230 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
9231 goto err_disable_device;
9234 pci_set_master(pdev);
9236 hw->io_base = pcim_iomap(pdev, 2, 0);
9238 dev_err(&pdev->dev, "Can't map configuration register space\n");
9240 goto err_clr_master;
9243 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
9247 pci_clear_master(pdev);
9248 pci_release_regions(pdev);
9250 pci_disable_device(pdev);
9255 static void hclge_pci_uninit(struct hclge_dev *hdev)
9257 struct pci_dev *pdev = hdev->pdev;
9259 pcim_iounmap(pdev, hdev->hw.io_base);
9260 pci_free_irq_vectors(pdev);
9261 pci_clear_master(pdev);
9262 pci_release_mem_regions(pdev);
9263 pci_disable_device(pdev);
9266 static void hclge_state_init(struct hclge_dev *hdev)
9268 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
9269 set_bit(HCLGE_STATE_DOWN, &hdev->state);
9270 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
9271 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9272 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
9273 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
9274 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
9277 static void hclge_state_uninit(struct hclge_dev *hdev)
9279 set_bit(HCLGE_STATE_DOWN, &hdev->state);
9280 set_bit(HCLGE_STATE_REMOVING, &hdev->state);
9282 if (hdev->reset_timer.function)
9283 del_timer_sync(&hdev->reset_timer);
9284 if (hdev->service_task.work.func)
9285 cancel_delayed_work_sync(&hdev->service_task);
9288 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
9290 #define HCLGE_FLR_RETRY_WAIT_MS 500
9291 #define HCLGE_FLR_RETRY_CNT 5
9293 struct hclge_dev *hdev = ae_dev->priv;
9298 down(&hdev->reset_sem);
9299 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9300 hdev->reset_type = HNAE3_FLR_RESET;
9301 ret = hclge_reset_prepare(hdev);
9303 dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n",
9305 if (hdev->reset_pending ||
9306 retry_cnt++ < HCLGE_FLR_RETRY_CNT) {
9307 dev_err(&hdev->pdev->dev,
9308 "reset_pending:0x%lx, retry_cnt:%d\n",
9309 hdev->reset_pending, retry_cnt);
9310 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9311 up(&hdev->reset_sem);
9312 msleep(HCLGE_FLR_RETRY_WAIT_MS);
9317 /* disable misc vector before FLR done */
9318 hclge_enable_vector(&hdev->misc_vector, false);
9319 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
9320 hdev->rst_stats.flr_rst_cnt++;
9323 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
9325 struct hclge_dev *hdev = ae_dev->priv;
9328 hclge_enable_vector(&hdev->misc_vector, true);
9330 ret = hclge_reset_rebuild(hdev);
9332 dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
9334 hdev->reset_type = HNAE3_NONE_RESET;
9335 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9336 up(&hdev->reset_sem);
9339 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
9343 for (i = 0; i < hdev->num_alloc_vport; i++) {
9344 struct hclge_vport *vport = &hdev->vport[i];
9347 /* Send cmd to clear VF's FUNC_RST_ING */
9348 ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
9350 dev_warn(&hdev->pdev->dev,
9351 "clear vf(%u) rst failed %d!\n",
9352 vport->vport_id, ret);
9356 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
9358 struct pci_dev *pdev = ae_dev->pdev;
9359 struct hclge_dev *hdev;
9362 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
9369 hdev->ae_dev = ae_dev;
9370 hdev->reset_type = HNAE3_NONE_RESET;
9371 hdev->reset_level = HNAE3_FUNC_RESET;
9372 ae_dev->priv = hdev;
9374 /* HW supprt 2 layer vlan */
9375 hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
9377 mutex_init(&hdev->vport_lock);
9378 spin_lock_init(&hdev->fd_rule_lock);
9379 sema_init(&hdev->reset_sem, 1);
9381 ret = hclge_pci_init(hdev);
9385 /* Firmware command queue initialize */
9386 ret = hclge_cmd_queue_init(hdev);
9388 goto err_pci_uninit;
9390 /* Firmware command initialize */
9391 ret = hclge_cmd_init(hdev);
9393 goto err_cmd_uninit;
9395 ret = hclge_get_cap(hdev);
9397 goto err_cmd_uninit;
9399 ret = hclge_configure(hdev);
9401 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
9402 goto err_cmd_uninit;
9405 ret = hclge_init_msi(hdev);
9407 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
9408 goto err_cmd_uninit;
9411 ret = hclge_misc_irq_init(hdev);
9413 goto err_msi_uninit;
9415 ret = hclge_alloc_tqps(hdev);
9417 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
9418 goto err_msi_irq_uninit;
9421 ret = hclge_alloc_vport(hdev);
9423 goto err_msi_irq_uninit;
9425 ret = hclge_map_tqp(hdev);
9427 goto err_msi_irq_uninit;
9429 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
9430 ret = hclge_mac_mdio_config(hdev);
9432 goto err_msi_irq_uninit;
9435 ret = hclge_init_umv_space(hdev);
9437 goto err_mdiobus_unreg;
9439 ret = hclge_mac_init(hdev);
9441 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
9442 goto err_mdiobus_unreg;
9445 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
9447 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
9448 goto err_mdiobus_unreg;
9451 ret = hclge_config_gro(hdev, true);
9453 goto err_mdiobus_unreg;
9455 ret = hclge_init_vlan_config(hdev);
9457 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
9458 goto err_mdiobus_unreg;
9461 ret = hclge_tm_schd_init(hdev);
9463 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
9464 goto err_mdiobus_unreg;
9467 hclge_rss_init_cfg(hdev);
9468 ret = hclge_rss_init_hw(hdev);
9470 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
9471 goto err_mdiobus_unreg;
9474 ret = init_mgr_tbl(hdev);
9476 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
9477 goto err_mdiobus_unreg;
9480 ret = hclge_init_fd_config(hdev);
9483 "fd table init fail, ret=%d\n", ret);
9484 goto err_mdiobus_unreg;
9487 INIT_KFIFO(hdev->mac_tnl_log);
9489 hclge_dcb_ops_set(hdev);
9491 timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
9492 INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
9494 /* Setup affinity after service timer setup because add_timer_on
9495 * is called in affinity notify.
9497 hclge_misc_affinity_setup(hdev);
9499 hclge_clear_all_event_cause(hdev);
9500 hclge_clear_resetting_state(hdev);
9502 /* Log and clear the hw errors those already occurred */
9503 hclge_handle_all_hns_hw_errors(ae_dev);
9505 /* request delayed reset for the error recovery because an immediate
9506 * global reset on a PF affecting pending initialization of other PFs
9508 if (ae_dev->hw_err_reset_req) {
9509 enum hnae3_reset_type reset_level;
9511 reset_level = hclge_get_reset_level(ae_dev,
9512 &ae_dev->hw_err_reset_req);
9513 hclge_set_def_reset_request(ae_dev, reset_level);
9514 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
9517 /* Enable MISC vector(vector0) */
9518 hclge_enable_vector(&hdev->misc_vector, true);
9520 hclge_state_init(hdev);
9521 hdev->last_reset_time = jiffies;
9523 dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
9526 hclge_task_schedule(hdev, round_jiffies_relative(HZ));
9531 if (hdev->hw.mac.phydev)
9532 mdiobus_unregister(hdev->hw.mac.mdio_bus);
9534 hclge_misc_irq_uninit(hdev);
9536 pci_free_irq_vectors(pdev);
9538 hclge_cmd_uninit(hdev);
9540 pcim_iounmap(pdev, hdev->hw.io_base);
9541 pci_clear_master(pdev);
9542 pci_release_regions(pdev);
9543 pci_disable_device(pdev);
9548 static void hclge_stats_clear(struct hclge_dev *hdev)
9550 memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
9553 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
9555 return hclge_config_switch_param(hdev, vf, enable,
9556 HCLGE_SWITCH_ANTI_SPOOF_MASK);
9559 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
9561 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9562 HCLGE_FILTER_FE_NIC_INGRESS_B,
9566 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
9570 ret = hclge_set_mac_spoofchk(hdev, vf, enable);
9572 dev_err(&hdev->pdev->dev,
9573 "Set vf %d mac spoof check %s failed, ret=%d\n",
9574 vf, enable ? "on" : "off", ret);
9578 ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
9580 dev_err(&hdev->pdev->dev,
9581 "Set vf %d vlan spoof check %s failed, ret=%d\n",
9582 vf, enable ? "on" : "off", ret);
9587 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
9590 struct hclge_vport *vport = hclge_get_vport(handle);
9591 struct hclge_dev *hdev = vport->back;
9592 u32 new_spoofchk = enable ? 1 : 0;
9595 if (hdev->pdev->revision == 0x20)
9598 vport = hclge_get_vf_vport(hdev, vf);
9602 if (vport->vf_info.spoofchk == new_spoofchk)
9605 if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
9606 dev_warn(&hdev->pdev->dev,
9607 "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
9609 else if (enable && hclge_is_umv_space_full(vport))
9610 dev_warn(&hdev->pdev->dev,
9611 "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
9614 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
9618 vport->vf_info.spoofchk = new_spoofchk;
9622 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
9624 struct hclge_vport *vport = hdev->vport;
9628 if (hdev->pdev->revision == 0x20)
9631 /* resume the vf spoof check state after reset */
9632 for (i = 0; i < hdev->num_alloc_vport; i++) {
9633 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
9634 vport->vf_info.spoofchk);
9644 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
9646 struct hclge_vport *vport = hclge_get_vport(handle);
9647 struct hclge_dev *hdev = vport->back;
9648 u32 new_trusted = enable ? 1 : 0;
9652 vport = hclge_get_vf_vport(hdev, vf);
9656 if (vport->vf_info.trusted == new_trusted)
9659 /* Disable promisc mode for VF if it is not trusted any more. */
9660 if (!enable && vport->vf_info.promisc_enable) {
9661 en_bc_pmc = hdev->pdev->revision != 0x20;
9662 ret = hclge_set_vport_promisc_mode(vport, false, false,
9666 vport->vf_info.promisc_enable = 0;
9667 hclge_inform_vf_promisc_info(vport);
9670 vport->vf_info.trusted = new_trusted;
9675 static void hclge_reset_vf_rate(struct hclge_dev *hdev)
9680 /* reset vf rate to default value */
9681 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
9682 struct hclge_vport *vport = &hdev->vport[vf];
9684 vport->vf_info.max_tx_rate = 0;
9685 ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
9687 dev_err(&hdev->pdev->dev,
9688 "vf%d failed to reset to default, ret=%d\n",
9689 vf - HCLGE_VF_VPORT_START_NUM, ret);
9693 static int hclge_vf_rate_param_check(struct hclge_dev *hdev, int vf,
9694 int min_tx_rate, int max_tx_rate)
9696 if (min_tx_rate != 0 ||
9697 max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
9698 dev_err(&hdev->pdev->dev,
9699 "min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
9700 min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
9707 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
9708 int min_tx_rate, int max_tx_rate, bool force)
9710 struct hclge_vport *vport = hclge_get_vport(handle);
9711 struct hclge_dev *hdev = vport->back;
9714 ret = hclge_vf_rate_param_check(hdev, vf, min_tx_rate, max_tx_rate);
9718 vport = hclge_get_vf_vport(hdev, vf);
9722 if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
9725 ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
9729 vport->vf_info.max_tx_rate = max_tx_rate;
9734 static int hclge_resume_vf_rate(struct hclge_dev *hdev)
9736 struct hnae3_handle *handle = &hdev->vport->nic;
9737 struct hclge_vport *vport;
9741 /* resume the vf max_tx_rate after reset */
9742 for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
9743 vport = hclge_get_vf_vport(hdev, vf);
9747 /* zero means max rate, after reset, firmware already set it to
9748 * max rate, so just continue.
9750 if (!vport->vf_info.max_tx_rate)
9753 ret = hclge_set_vf_rate(handle, vf, 0,
9754 vport->vf_info.max_tx_rate, true);
9756 dev_err(&hdev->pdev->dev,
9757 "vf%d failed to resume tx_rate:%u, ret=%d\n",
9758 vf, vport->vf_info.max_tx_rate, ret);
9766 static void hclge_reset_vport_state(struct hclge_dev *hdev)
9768 struct hclge_vport *vport = hdev->vport;
9771 for (i = 0; i < hdev->num_alloc_vport; i++) {
9772 hclge_vport_stop(vport);
9777 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
9779 struct hclge_dev *hdev = ae_dev->priv;
9780 struct pci_dev *pdev = ae_dev->pdev;
9783 set_bit(HCLGE_STATE_DOWN, &hdev->state);
9785 hclge_stats_clear(hdev);
9786 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
9787 memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
9789 ret = hclge_cmd_init(hdev);
9791 dev_err(&pdev->dev, "Cmd queue init failed\n");
9795 ret = hclge_map_tqp(hdev);
9797 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
9801 hclge_reset_umv_space(hdev);
9803 ret = hclge_mac_init(hdev);
9805 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
9809 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
9811 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
9815 ret = hclge_config_gro(hdev, true);
9819 ret = hclge_init_vlan_config(hdev);
9821 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
9825 ret = hclge_tm_init_hw(hdev, true);
9827 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
9831 ret = hclge_rss_init_hw(hdev);
9833 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
9837 ret = hclge_init_fd_config(hdev);
9839 dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
9843 /* Log and clear the hw errors those already occurred */
9844 hclge_handle_all_hns_hw_errors(ae_dev);
9846 /* Re-enable the hw error interrupts because
9847 * the interrupts get disabled on global reset.
9849 ret = hclge_config_nic_hw_error(hdev, true);
9852 "fail(%d) to re-enable NIC hw error interrupts\n",
9857 if (hdev->roce_client) {
9858 ret = hclge_config_rocee_ras_interrupt(hdev, true);
9861 "fail(%d) to re-enable roce ras interrupts\n",
9867 hclge_reset_vport_state(hdev);
9868 ret = hclge_reset_vport_spoofchk(hdev);
9872 ret = hclge_resume_vf_rate(hdev);
9876 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
9882 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
9884 struct hclge_dev *hdev = ae_dev->priv;
9885 struct hclge_mac *mac = &hdev->hw.mac;
9887 hclge_reset_vf_rate(hdev);
9888 hclge_misc_affinity_teardown(hdev);
9889 hclge_state_uninit(hdev);
9892 mdiobus_unregister(mac->mdio_bus);
9894 hclge_uninit_umv_space(hdev);
9896 /* Disable MISC vector(vector0) */
9897 hclge_enable_vector(&hdev->misc_vector, false);
9898 synchronize_irq(hdev->misc_vector.vector_irq);
9900 /* Disable all hw interrupts */
9901 hclge_config_mac_tnl_int(hdev, false);
9902 hclge_config_nic_hw_error(hdev, false);
9903 hclge_config_rocee_ras_interrupt(hdev, false);
9905 hclge_cmd_uninit(hdev);
9906 hclge_misc_irq_uninit(hdev);
9907 hclge_pci_uninit(hdev);
9908 mutex_destroy(&hdev->vport_lock);
9909 hclge_uninit_vport_mac_table(hdev);
9910 hclge_uninit_vport_vlan_table(hdev);
9911 ae_dev->priv = NULL;
9914 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
9916 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9917 struct hclge_vport *vport = hclge_get_vport(handle);
9918 struct hclge_dev *hdev = vport->back;
9920 return min_t(u32, hdev->rss_size_max,
9921 vport->alloc_tqps / kinfo->num_tc);
9924 static void hclge_get_channels(struct hnae3_handle *handle,
9925 struct ethtool_channels *ch)
9927 ch->max_combined = hclge_get_max_channels(handle);
9928 ch->other_count = 1;
9930 ch->combined_count = handle->kinfo.rss_size;
9933 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
9934 u16 *alloc_tqps, u16 *max_rss_size)
9936 struct hclge_vport *vport = hclge_get_vport(handle);
9937 struct hclge_dev *hdev = vport->back;
9939 *alloc_tqps = vport->alloc_tqps;
9940 *max_rss_size = hdev->rss_size_max;
9943 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
9944 bool rxfh_configured)
9946 struct hclge_vport *vport = hclge_get_vport(handle);
9947 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
9948 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
9949 struct hclge_dev *hdev = vport->back;
9950 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
9951 u16 cur_rss_size = kinfo->rss_size;
9952 u16 cur_tqps = kinfo->num_tqps;
9953 u16 tc_valid[HCLGE_MAX_TC_NUM];
9959 kinfo->req_rss_size = new_tqps_num;
9961 ret = hclge_tm_vport_map_update(hdev);
9963 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
9967 roundup_size = roundup_pow_of_two(kinfo->rss_size);
9968 roundup_size = ilog2(roundup_size);
9969 /* Set the RSS TC mode according to the new RSS size */
9970 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
9973 if (!(hdev->hw_tc_map & BIT(i)))
9977 tc_size[i] = roundup_size;
9978 tc_offset[i] = kinfo->rss_size * i;
9980 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
9984 /* RSS indirection table has been configuared by user */
9985 if (rxfh_configured)
9988 /* Reinitializes the rss indirect table according to the new RSS size */
9989 rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
9993 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
9994 rss_indir[i] = i % kinfo->rss_size;
9996 ret = hclge_set_rss(handle, rss_indir, NULL, 0);
9998 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
10005 dev_info(&hdev->pdev->dev,
10006 "Channels changed, rss_size from %u to %u, tqps from %u to %u",
10007 cur_rss_size, kinfo->rss_size,
10008 cur_tqps, kinfo->rss_size * kinfo->num_tc);
10013 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
10014 u32 *regs_num_64_bit)
10016 struct hclge_desc desc;
10020 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
10021 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10023 dev_err(&hdev->pdev->dev,
10024 "Query register number cmd failed, ret = %d.\n", ret);
10028 *regs_num_32_bit = le32_to_cpu(desc.data[0]);
10029 *regs_num_64_bit = le32_to_cpu(desc.data[1]);
10031 total_num = *regs_num_32_bit + *regs_num_64_bit;
10038 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
10041 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
10042 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
10044 struct hclge_desc *desc;
10045 u32 *reg_val = data;
10055 nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
10056 cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
10057 HCLGE_32_BIT_REG_RTN_DATANUM);
10058 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
10062 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
10063 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
10065 dev_err(&hdev->pdev->dev,
10066 "Query 32 bit register cmd failed, ret = %d.\n", ret);
10071 for (i = 0; i < cmd_num; i++) {
10073 desc_data = (__le32 *)(&desc[i].data[0]);
10074 n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
10076 desc_data = (__le32 *)(&desc[i]);
10077 n = HCLGE_32_BIT_REG_RTN_DATANUM;
10079 for (k = 0; k < n; k++) {
10080 *reg_val++ = le32_to_cpu(*desc_data++);
10092 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
10095 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
10096 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
10098 struct hclge_desc *desc;
10099 u64 *reg_val = data;
10109 nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
10110 cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
10111 HCLGE_64_BIT_REG_RTN_DATANUM);
10112 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
10116 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
10117 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
10119 dev_err(&hdev->pdev->dev,
10120 "Query 64 bit register cmd failed, ret = %d.\n", ret);
10125 for (i = 0; i < cmd_num; i++) {
10127 desc_data = (__le64 *)(&desc[i].data[0]);
10128 n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
10130 desc_data = (__le64 *)(&desc[i]);
10131 n = HCLGE_64_BIT_REG_RTN_DATANUM;
10133 for (k = 0; k < n; k++) {
10134 *reg_val++ = le64_to_cpu(*desc_data++);
10146 #define MAX_SEPARATE_NUM 4
10147 #define SEPARATOR_VALUE 0xFDFCFBFA
10148 #define REG_NUM_PER_LINE 4
10149 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
10150 #define REG_SEPARATOR_LINE 1
10151 #define REG_NUM_REMAIN_MASK 3
10152 #define BD_LIST_MAX_NUM 30
10154 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
10156 /*prepare 4 commands to query DFX BD number*/
10157 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_DFX_BD_NUM, true);
10158 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10159 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_DFX_BD_NUM, true);
10160 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10161 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_DFX_BD_NUM, true);
10162 desc[2].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10163 hclge_cmd_setup_basic_desc(&desc[3], HCLGE_OPC_DFX_BD_NUM, true);
10165 return hclge_cmd_send(&hdev->hw, desc, 4);
10168 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
10172 u32 entries_per_desc, desc_index, index, offset, i;
10173 struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
10176 ret = hclge_query_bd_num_cmd_send(hdev, desc);
10178 dev_err(&hdev->pdev->dev,
10179 "Get dfx bd num fail, status is %d.\n", ret);
10183 entries_per_desc = ARRAY_SIZE(desc[0].data);
10184 for (i = 0; i < type_num; i++) {
10185 offset = hclge_dfx_bd_offset_list[i];
10186 index = offset % entries_per_desc;
10187 desc_index = offset / entries_per_desc;
10188 bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
10194 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
10195 struct hclge_desc *desc_src, int bd_num,
10196 enum hclge_opcode_type cmd)
10198 struct hclge_desc *desc = desc_src;
10201 hclge_cmd_setup_basic_desc(desc, cmd, true);
10202 for (i = 0; i < bd_num - 1; i++) {
10203 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10205 hclge_cmd_setup_basic_desc(desc, cmd, true);
10209 ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
10211 dev_err(&hdev->pdev->dev,
10212 "Query dfx reg cmd(0x%x) send fail, status is %d.\n",
10218 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
10221 int entries_per_desc, reg_num, separator_num, desc_index, index, i;
10222 struct hclge_desc *desc = desc_src;
10225 entries_per_desc = ARRAY_SIZE(desc->data);
10226 reg_num = entries_per_desc * bd_num;
10227 separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
10228 for (i = 0; i < reg_num; i++) {
10229 index = i % entries_per_desc;
10230 desc_index = i / entries_per_desc;
10231 *reg++ = le32_to_cpu(desc[desc_index].data[index]);
10233 for (i = 0; i < separator_num; i++)
10234 *reg++ = SEPARATOR_VALUE;
10236 return reg_num + separator_num;
10239 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
10241 u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
10242 int data_len_per_desc, data_len, bd_num, i;
10243 int bd_num_list[BD_LIST_MAX_NUM];
10246 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
10248 dev_err(&hdev->pdev->dev,
10249 "Get dfx reg bd num fail, status is %d.\n", ret);
10253 data_len_per_desc = sizeof_field(struct hclge_desc, data);
10255 for (i = 0; i < dfx_reg_type_num; i++) {
10256 bd_num = bd_num_list[i];
10257 data_len = data_len_per_desc * bd_num;
10258 *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
10264 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
10266 u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
10267 int bd_num, bd_num_max, buf_len, i;
10268 int bd_num_list[BD_LIST_MAX_NUM];
10269 struct hclge_desc *desc_src;
10273 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
10275 dev_err(&hdev->pdev->dev,
10276 "Get dfx reg bd num fail, status is %d.\n", ret);
10280 bd_num_max = bd_num_list[0];
10281 for (i = 1; i < dfx_reg_type_num; i++)
10282 bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
10284 buf_len = sizeof(*desc_src) * bd_num_max;
10285 desc_src = kzalloc(buf_len, GFP_KERNEL);
10289 for (i = 0; i < dfx_reg_type_num; i++) {
10290 bd_num = bd_num_list[i];
10291 ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
10292 hclge_dfx_reg_opcode_list[i]);
10294 dev_err(&hdev->pdev->dev,
10295 "Get dfx reg fail, status is %d.\n", ret);
10299 reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
10306 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
10307 struct hnae3_knic_private_info *kinfo)
10309 #define HCLGE_RING_REG_OFFSET 0x200
10310 #define HCLGE_RING_INT_REG_OFFSET 0x4
10312 int i, j, reg_num, separator_num;
10316 /* fetching per-PF registers valus from PF PCIe register space */
10317 reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
10318 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10319 for (i = 0; i < reg_num; i++)
10320 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
10321 for (i = 0; i < separator_num; i++)
10322 *reg++ = SEPARATOR_VALUE;
10323 data_num_sum = reg_num + separator_num;
10325 reg_num = ARRAY_SIZE(common_reg_addr_list);
10326 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10327 for (i = 0; i < reg_num; i++)
10328 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
10329 for (i = 0; i < separator_num; i++)
10330 *reg++ = SEPARATOR_VALUE;
10331 data_num_sum += reg_num + separator_num;
10333 reg_num = ARRAY_SIZE(ring_reg_addr_list);
10334 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10335 for (j = 0; j < kinfo->num_tqps; j++) {
10336 for (i = 0; i < reg_num; i++)
10337 *reg++ = hclge_read_dev(&hdev->hw,
10338 ring_reg_addr_list[i] +
10339 HCLGE_RING_REG_OFFSET * j);
10340 for (i = 0; i < separator_num; i++)
10341 *reg++ = SEPARATOR_VALUE;
10343 data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
10345 reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
10346 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10347 for (j = 0; j < hdev->num_msi_used - 1; j++) {
10348 for (i = 0; i < reg_num; i++)
10349 *reg++ = hclge_read_dev(&hdev->hw,
10350 tqp_intr_reg_addr_list[i] +
10351 HCLGE_RING_INT_REG_OFFSET * j);
10352 for (i = 0; i < separator_num; i++)
10353 *reg++ = SEPARATOR_VALUE;
10355 data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
10357 return data_num_sum;
10360 static int hclge_get_regs_len(struct hnae3_handle *handle)
10362 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
10363 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
10364 struct hclge_vport *vport = hclge_get_vport(handle);
10365 struct hclge_dev *hdev = vport->back;
10366 int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
10367 int regs_lines_32_bit, regs_lines_64_bit;
10370 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
10372 dev_err(&hdev->pdev->dev,
10373 "Get register number failed, ret = %d.\n", ret);
10377 ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
10379 dev_err(&hdev->pdev->dev,
10380 "Get dfx reg len failed, ret = %d.\n", ret);
10384 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
10385 REG_SEPARATOR_LINE;
10386 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
10387 REG_SEPARATOR_LINE;
10388 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
10389 REG_SEPARATOR_LINE;
10390 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
10391 REG_SEPARATOR_LINE;
10392 regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
10393 REG_SEPARATOR_LINE;
10394 regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
10395 REG_SEPARATOR_LINE;
10397 return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
10398 tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
10399 regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
10402 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
10405 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
10406 struct hclge_vport *vport = hclge_get_vport(handle);
10407 struct hclge_dev *hdev = vport->back;
10408 u32 regs_num_32_bit, regs_num_64_bit;
10409 int i, reg_num, separator_num, ret;
10412 *version = hdev->fw_version;
10414 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
10416 dev_err(&hdev->pdev->dev,
10417 "Get register number failed, ret = %d.\n", ret);
10421 reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
10423 ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
10425 dev_err(&hdev->pdev->dev,
10426 "Get 32 bit register failed, ret = %d.\n", ret);
10429 reg_num = regs_num_32_bit;
10431 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10432 for (i = 0; i < separator_num; i++)
10433 *reg++ = SEPARATOR_VALUE;
10435 ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
10437 dev_err(&hdev->pdev->dev,
10438 "Get 64 bit register failed, ret = %d.\n", ret);
10441 reg_num = regs_num_64_bit * 2;
10443 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10444 for (i = 0; i < separator_num; i++)
10445 *reg++ = SEPARATOR_VALUE;
10447 ret = hclge_get_dfx_reg(hdev, reg);
10449 dev_err(&hdev->pdev->dev,
10450 "Get dfx register failed, ret = %d.\n", ret);
10453 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
10455 struct hclge_set_led_state_cmd *req;
10456 struct hclge_desc desc;
10459 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
10461 req = (struct hclge_set_led_state_cmd *)desc.data;
10462 hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
10463 HCLGE_LED_LOCATE_STATE_S, locate_led_status);
10465 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10467 dev_err(&hdev->pdev->dev,
10468 "Send set led state cmd error, ret =%d\n", ret);
10473 enum hclge_led_status {
10476 HCLGE_LED_NO_CHANGE = 0xFF,
10479 static int hclge_set_led_id(struct hnae3_handle *handle,
10480 enum ethtool_phys_id_state status)
10482 struct hclge_vport *vport = hclge_get_vport(handle);
10483 struct hclge_dev *hdev = vport->back;
10486 case ETHTOOL_ID_ACTIVE:
10487 return hclge_set_led_status(hdev, HCLGE_LED_ON);
10488 case ETHTOOL_ID_INACTIVE:
10489 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
10495 static void hclge_get_link_mode(struct hnae3_handle *handle,
10496 unsigned long *supported,
10497 unsigned long *advertising)
10499 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
10500 struct hclge_vport *vport = hclge_get_vport(handle);
10501 struct hclge_dev *hdev = vport->back;
10502 unsigned int idx = 0;
10504 for (; idx < size; idx++) {
10505 supported[idx] = hdev->hw.mac.supported[idx];
10506 advertising[idx] = hdev->hw.mac.advertising[idx];
10510 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
10512 struct hclge_vport *vport = hclge_get_vport(handle);
10513 struct hclge_dev *hdev = vport->back;
10515 return hclge_config_gro(hdev, enable);
10518 static const struct hnae3_ae_ops hclge_ops = {
10519 .init_ae_dev = hclge_init_ae_dev,
10520 .uninit_ae_dev = hclge_uninit_ae_dev,
10521 .flr_prepare = hclge_flr_prepare,
10522 .flr_done = hclge_flr_done,
10523 .init_client_instance = hclge_init_client_instance,
10524 .uninit_client_instance = hclge_uninit_client_instance,
10525 .map_ring_to_vector = hclge_map_ring_to_vector,
10526 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
10527 .get_vector = hclge_get_vector,
10528 .put_vector = hclge_put_vector,
10529 .set_promisc_mode = hclge_set_promisc_mode,
10530 .set_loopback = hclge_set_loopback,
10531 .start = hclge_ae_start,
10532 .stop = hclge_ae_stop,
10533 .client_start = hclge_client_start,
10534 .client_stop = hclge_client_stop,
10535 .get_status = hclge_get_status,
10536 .get_ksettings_an_result = hclge_get_ksettings_an_result,
10537 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
10538 .get_media_type = hclge_get_media_type,
10539 .check_port_speed = hclge_check_port_speed,
10540 .get_fec = hclge_get_fec,
10541 .set_fec = hclge_set_fec,
10542 .get_rss_key_size = hclge_get_rss_key_size,
10543 .get_rss_indir_size = hclge_get_rss_indir_size,
10544 .get_rss = hclge_get_rss,
10545 .set_rss = hclge_set_rss,
10546 .set_rss_tuple = hclge_set_rss_tuple,
10547 .get_rss_tuple = hclge_get_rss_tuple,
10548 .get_tc_size = hclge_get_tc_size,
10549 .get_mac_addr = hclge_get_mac_addr,
10550 .set_mac_addr = hclge_set_mac_addr,
10551 .do_ioctl = hclge_do_ioctl,
10552 .add_uc_addr = hclge_add_uc_addr,
10553 .rm_uc_addr = hclge_rm_uc_addr,
10554 .add_mc_addr = hclge_add_mc_addr,
10555 .rm_mc_addr = hclge_rm_mc_addr,
10556 .set_autoneg = hclge_set_autoneg,
10557 .get_autoneg = hclge_get_autoneg,
10558 .restart_autoneg = hclge_restart_autoneg,
10559 .halt_autoneg = hclge_halt_autoneg,
10560 .get_pauseparam = hclge_get_pauseparam,
10561 .set_pauseparam = hclge_set_pauseparam,
10562 .set_mtu = hclge_set_mtu,
10563 .reset_queue = hclge_reset_tqp,
10564 .get_stats = hclge_get_stats,
10565 .get_mac_stats = hclge_get_mac_stat,
10566 .update_stats = hclge_update_stats,
10567 .get_strings = hclge_get_strings,
10568 .get_sset_count = hclge_get_sset_count,
10569 .get_fw_version = hclge_get_fw_version,
10570 .get_mdix_mode = hclge_get_mdix_mode,
10571 .enable_vlan_filter = hclge_enable_vlan_filter,
10572 .set_vlan_filter = hclge_set_vlan_filter,
10573 .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
10574 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
10575 .reset_event = hclge_reset_event,
10576 .get_reset_level = hclge_get_reset_level,
10577 .set_default_reset_request = hclge_set_def_reset_request,
10578 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
10579 .set_channels = hclge_set_channels,
10580 .get_channels = hclge_get_channels,
10581 .get_regs_len = hclge_get_regs_len,
10582 .get_regs = hclge_get_regs,
10583 .set_led_id = hclge_set_led_id,
10584 .get_link_mode = hclge_get_link_mode,
10585 .add_fd_entry = hclge_add_fd_entry,
10586 .del_fd_entry = hclge_del_fd_entry,
10587 .del_all_fd_entries = hclge_del_all_fd_entries,
10588 .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
10589 .get_fd_rule_info = hclge_get_fd_rule_info,
10590 .get_fd_all_rules = hclge_get_all_rules,
10591 .restore_fd_rules = hclge_restore_fd_entries,
10592 .enable_fd = hclge_enable_fd,
10593 .add_arfs_entry = hclge_add_fd_entry_by_arfs,
10594 .dbg_run_cmd = hclge_dbg_run_cmd,
10595 .handle_hw_ras_error = hclge_handle_hw_ras_error,
10596 .get_hw_reset_stat = hclge_get_hw_reset_stat,
10597 .ae_dev_resetting = hclge_ae_dev_resetting,
10598 .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
10599 .set_gro_en = hclge_gro_en,
10600 .get_global_queue_id = hclge_covert_handle_qid_global,
10601 .set_timer_task = hclge_set_timer_task,
10602 .mac_connect_phy = hclge_mac_connect_phy,
10603 .mac_disconnect_phy = hclge_mac_disconnect_phy,
10604 .restore_vlan_table = hclge_restore_vlan_table,
10605 .get_vf_config = hclge_get_vf_config,
10606 .set_vf_link_state = hclge_set_vf_link_state,
10607 .set_vf_spoofchk = hclge_set_vf_spoofchk,
10608 .set_vf_trust = hclge_set_vf_trust,
10609 .set_vf_rate = hclge_set_vf_rate,
10610 .set_vf_mac = hclge_set_vf_mac,
10613 static struct hnae3_ae_algo ae_algo = {
10615 .pdev_id_table = ae_algo_pci_tbl,
10618 static int hclge_init(void)
10620 pr_info("%s is initializing\n", HCLGE_NAME);
10622 hclge_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, HCLGE_NAME);
10624 pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
10628 hnae3_register_ae_algo(&ae_algo);
10633 static void hclge_exit(void)
10635 hnae3_unregister_ae_algo(&ae_algo);
10636 destroy_workqueue(hclge_wq);
10638 module_init(hclge_init);
10639 module_exit(hclge_exit);
10641 MODULE_LICENSE("GPL");
10642 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
10643 MODULE_DESCRIPTION("HCLGE Driver");
10644 MODULE_VERSION(HCLGE_MOD_VERSION);