2 * Copyright (c) 2016-2017 Hisilicon Limited.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
10 #include <linux/acpi.h>
11 #include <linux/device.h>
12 #include <linux/etherdevice.h>
13 #include <linux/init.h>
14 #include <linux/interrupt.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/netdevice.h>
18 #include <linux/pci.h>
19 #include <linux/platform_device.h>
20 #include <linux/if_vlan.h>
21 #include <net/rtnetlink.h>
22 #include "hclge_cmd.h"
23 #include "hclge_dcb.h"
24 #include "hclge_main.h"
25 #include "hclge_mbx.h"
26 #include "hclge_mdio.h"
30 #define HCLGE_NAME "hclge"
31 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
32 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
33 #define HCLGE_64BIT_STATS_FIELD_OFF(f) (offsetof(struct hclge_64_bit_stats, f))
34 #define HCLGE_32BIT_STATS_FIELD_OFF(f) (offsetof(struct hclge_32_bit_stats, f))
36 static int hclge_set_mta_filter_mode(struct hclge_dev *hdev,
37 enum hclge_mta_dmac_sel_type mta_mac_sel,
39 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu);
40 static int hclge_init_vlan_config(struct hclge_dev *hdev);
41 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
42 static int hclge_update_led_status(struct hclge_dev *hdev);
44 static struct hnae3_ae_algo ae_algo;
46 static const struct pci_device_id ae_algo_pci_tbl[] = {
47 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
48 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
49 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
50 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
51 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
52 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
53 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
54 /* required last entry */
58 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
60 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
62 "Serdes Loopback test",
66 static const struct hclge_comm_stats_str g_all_64bit_stats_string[] = {
67 {"igu_rx_oversize_pkt",
68 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_oversize_pkt)},
69 {"igu_rx_undersize_pkt",
70 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_undersize_pkt)},
71 {"igu_rx_out_all_pkt",
72 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_out_all_pkt)},
74 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_uni_pkt)},
76 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_multi_pkt)},
78 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_broad_pkt)},
79 {"egu_tx_out_all_pkt",
80 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_out_all_pkt)},
82 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_uni_pkt)},
84 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_multi_pkt)},
86 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_broad_pkt)},
87 {"ssu_ppp_mac_key_num",
88 HCLGE_64BIT_STATS_FIELD_OFF(ssu_ppp_mac_key_num)},
89 {"ssu_ppp_host_key_num",
90 HCLGE_64BIT_STATS_FIELD_OFF(ssu_ppp_host_key_num)},
91 {"ppp_ssu_mac_rlt_num",
92 HCLGE_64BIT_STATS_FIELD_OFF(ppp_ssu_mac_rlt_num)},
93 {"ppp_ssu_host_rlt_num",
94 HCLGE_64BIT_STATS_FIELD_OFF(ppp_ssu_host_rlt_num)},
96 HCLGE_64BIT_STATS_FIELD_OFF(ssu_tx_in_num)},
98 HCLGE_64BIT_STATS_FIELD_OFF(ssu_tx_out_num)},
100 HCLGE_64BIT_STATS_FIELD_OFF(ssu_rx_in_num)},
102 HCLGE_64BIT_STATS_FIELD_OFF(ssu_rx_out_num)}
105 static const struct hclge_comm_stats_str g_all_32bit_stats_string[] = {
107 HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_err_pkt)},
108 {"igu_rx_no_eof_pkt",
109 HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_no_eof_pkt)},
110 {"igu_rx_no_sof_pkt",
111 HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_no_sof_pkt)},
113 HCLGE_32BIT_STATS_FIELD_OFF(egu_tx_1588_pkt)},
114 {"ssu_full_drop_num",
115 HCLGE_32BIT_STATS_FIELD_OFF(ssu_full_drop_num)},
116 {"ssu_part_drop_num",
117 HCLGE_32BIT_STATS_FIELD_OFF(ssu_part_drop_num)},
119 HCLGE_32BIT_STATS_FIELD_OFF(ppp_key_drop_num)},
121 HCLGE_32BIT_STATS_FIELD_OFF(ppp_rlt_drop_num)},
123 HCLGE_32BIT_STATS_FIELD_OFF(ssu_key_drop_num)},
125 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_cnt)},
127 HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_rcv_cnt)},
129 HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_drop_cnt)},
130 {"qcn_fb_invaild_cnt",
131 HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_invaild_cnt)},
132 {"rx_packet_tc0_in_cnt",
133 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc0_in_cnt)},
134 {"rx_packet_tc1_in_cnt",
135 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc1_in_cnt)},
136 {"rx_packet_tc2_in_cnt",
137 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc2_in_cnt)},
138 {"rx_packet_tc3_in_cnt",
139 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc3_in_cnt)},
140 {"rx_packet_tc4_in_cnt",
141 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc4_in_cnt)},
142 {"rx_packet_tc5_in_cnt",
143 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc5_in_cnt)},
144 {"rx_packet_tc6_in_cnt",
145 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc6_in_cnt)},
146 {"rx_packet_tc7_in_cnt",
147 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc7_in_cnt)},
148 {"rx_packet_tc0_out_cnt",
149 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc0_out_cnt)},
150 {"rx_packet_tc1_out_cnt",
151 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc1_out_cnt)},
152 {"rx_packet_tc2_out_cnt",
153 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc2_out_cnt)},
154 {"rx_packet_tc3_out_cnt",
155 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc3_out_cnt)},
156 {"rx_packet_tc4_out_cnt",
157 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc4_out_cnt)},
158 {"rx_packet_tc5_out_cnt",
159 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc5_out_cnt)},
160 {"rx_packet_tc6_out_cnt",
161 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc6_out_cnt)},
162 {"rx_packet_tc7_out_cnt",
163 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc7_out_cnt)},
164 {"tx_packet_tc0_in_cnt",
165 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc0_in_cnt)},
166 {"tx_packet_tc1_in_cnt",
167 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc1_in_cnt)},
168 {"tx_packet_tc2_in_cnt",
169 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc2_in_cnt)},
170 {"tx_packet_tc3_in_cnt",
171 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc3_in_cnt)},
172 {"tx_packet_tc4_in_cnt",
173 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc4_in_cnt)},
174 {"tx_packet_tc5_in_cnt",
175 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc5_in_cnt)},
176 {"tx_packet_tc6_in_cnt",
177 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc6_in_cnt)},
178 {"tx_packet_tc7_in_cnt",
179 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc7_in_cnt)},
180 {"tx_packet_tc0_out_cnt",
181 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc0_out_cnt)},
182 {"tx_packet_tc1_out_cnt",
183 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc1_out_cnt)},
184 {"tx_packet_tc2_out_cnt",
185 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc2_out_cnt)},
186 {"tx_packet_tc3_out_cnt",
187 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc3_out_cnt)},
188 {"tx_packet_tc4_out_cnt",
189 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc4_out_cnt)},
190 {"tx_packet_tc5_out_cnt",
191 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc5_out_cnt)},
192 {"tx_packet_tc6_out_cnt",
193 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc6_out_cnt)},
194 {"tx_packet_tc7_out_cnt",
195 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc7_out_cnt)},
196 {"pkt_curr_buf_tc0_cnt",
197 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc0_cnt)},
198 {"pkt_curr_buf_tc1_cnt",
199 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc1_cnt)},
200 {"pkt_curr_buf_tc2_cnt",
201 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc2_cnt)},
202 {"pkt_curr_buf_tc3_cnt",
203 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc3_cnt)},
204 {"pkt_curr_buf_tc4_cnt",
205 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc4_cnt)},
206 {"pkt_curr_buf_tc5_cnt",
207 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc5_cnt)},
208 {"pkt_curr_buf_tc6_cnt",
209 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc6_cnt)},
210 {"pkt_curr_buf_tc7_cnt",
211 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc7_cnt)},
213 HCLGE_32BIT_STATS_FIELD_OFF(mb_uncopy_num)},
214 {"lo_pri_unicast_rlt_drop_num",
215 HCLGE_32BIT_STATS_FIELD_OFF(lo_pri_unicast_rlt_drop_num)},
216 {"hi_pri_multicast_rlt_drop_num",
217 HCLGE_32BIT_STATS_FIELD_OFF(hi_pri_multicast_rlt_drop_num)},
218 {"lo_pri_multicast_rlt_drop_num",
219 HCLGE_32BIT_STATS_FIELD_OFF(lo_pri_multicast_rlt_drop_num)},
220 {"rx_oq_drop_pkt_cnt",
221 HCLGE_32BIT_STATS_FIELD_OFF(rx_oq_drop_pkt_cnt)},
222 {"tx_oq_drop_pkt_cnt",
223 HCLGE_32BIT_STATS_FIELD_OFF(tx_oq_drop_pkt_cnt)},
224 {"nic_l2_err_drop_pkt_cnt",
225 HCLGE_32BIT_STATS_FIELD_OFF(nic_l2_err_drop_pkt_cnt)},
226 {"roc_l2_err_drop_pkt_cnt",
227 HCLGE_32BIT_STATS_FIELD_OFF(roc_l2_err_drop_pkt_cnt)}
230 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
231 {"mac_tx_mac_pause_num",
232 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
233 {"mac_rx_mac_pause_num",
234 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
235 {"mac_tx_pfc_pri0_pkt_num",
236 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
237 {"mac_tx_pfc_pri1_pkt_num",
238 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
239 {"mac_tx_pfc_pri2_pkt_num",
240 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
241 {"mac_tx_pfc_pri3_pkt_num",
242 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
243 {"mac_tx_pfc_pri4_pkt_num",
244 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
245 {"mac_tx_pfc_pri5_pkt_num",
246 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
247 {"mac_tx_pfc_pri6_pkt_num",
248 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
249 {"mac_tx_pfc_pri7_pkt_num",
250 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
251 {"mac_rx_pfc_pri0_pkt_num",
252 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
253 {"mac_rx_pfc_pri1_pkt_num",
254 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
255 {"mac_rx_pfc_pri2_pkt_num",
256 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
257 {"mac_rx_pfc_pri3_pkt_num",
258 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
259 {"mac_rx_pfc_pri4_pkt_num",
260 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
261 {"mac_rx_pfc_pri5_pkt_num",
262 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
263 {"mac_rx_pfc_pri6_pkt_num",
264 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
265 {"mac_rx_pfc_pri7_pkt_num",
266 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
267 {"mac_tx_total_pkt_num",
268 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
269 {"mac_tx_total_oct_num",
270 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
271 {"mac_tx_good_pkt_num",
272 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
273 {"mac_tx_bad_pkt_num",
274 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
275 {"mac_tx_good_oct_num",
276 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
277 {"mac_tx_bad_oct_num",
278 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
279 {"mac_tx_uni_pkt_num",
280 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
281 {"mac_tx_multi_pkt_num",
282 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
283 {"mac_tx_broad_pkt_num",
284 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
285 {"mac_tx_undersize_pkt_num",
286 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
287 {"mac_tx_oversize_pkt_num",
288 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
289 {"mac_tx_64_oct_pkt_num",
290 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
291 {"mac_tx_65_127_oct_pkt_num",
292 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
293 {"mac_tx_128_255_oct_pkt_num",
294 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
295 {"mac_tx_256_511_oct_pkt_num",
296 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
297 {"mac_tx_512_1023_oct_pkt_num",
298 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
299 {"mac_tx_1024_1518_oct_pkt_num",
300 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
301 {"mac_tx_1519_2047_oct_pkt_num",
302 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
303 {"mac_tx_2048_4095_oct_pkt_num",
304 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
305 {"mac_tx_4096_8191_oct_pkt_num",
306 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
307 {"mac_tx_8192_9216_oct_pkt_num",
308 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
309 {"mac_tx_9217_12287_oct_pkt_num",
310 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
311 {"mac_tx_12288_16383_oct_pkt_num",
312 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
313 {"mac_tx_1519_max_good_pkt_num",
314 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
315 {"mac_tx_1519_max_bad_pkt_num",
316 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
317 {"mac_rx_total_pkt_num",
318 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
319 {"mac_rx_total_oct_num",
320 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
321 {"mac_rx_good_pkt_num",
322 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
323 {"mac_rx_bad_pkt_num",
324 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
325 {"mac_rx_good_oct_num",
326 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
327 {"mac_rx_bad_oct_num",
328 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
329 {"mac_rx_uni_pkt_num",
330 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
331 {"mac_rx_multi_pkt_num",
332 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
333 {"mac_rx_broad_pkt_num",
334 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
335 {"mac_rx_undersize_pkt_num",
336 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
337 {"mac_rx_oversize_pkt_num",
338 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
339 {"mac_rx_64_oct_pkt_num",
340 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
341 {"mac_rx_65_127_oct_pkt_num",
342 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
343 {"mac_rx_128_255_oct_pkt_num",
344 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
345 {"mac_rx_256_511_oct_pkt_num",
346 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
347 {"mac_rx_512_1023_oct_pkt_num",
348 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
349 {"mac_rx_1024_1518_oct_pkt_num",
350 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
351 {"mac_rx_1519_2047_oct_pkt_num",
352 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
353 {"mac_rx_2048_4095_oct_pkt_num",
354 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
355 {"mac_rx_4096_8191_oct_pkt_num",
356 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
357 {"mac_rx_8192_9216_oct_pkt_num",
358 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
359 {"mac_rx_9217_12287_oct_pkt_num",
360 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
361 {"mac_rx_12288_16383_oct_pkt_num",
362 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
363 {"mac_rx_1519_max_good_pkt_num",
364 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
365 {"mac_rx_1519_max_bad_pkt_num",
366 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
368 {"mac_tx_fragment_pkt_num",
369 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
370 {"mac_tx_undermin_pkt_num",
371 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
372 {"mac_tx_jabber_pkt_num",
373 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
374 {"mac_tx_err_all_pkt_num",
375 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
376 {"mac_tx_from_app_good_pkt_num",
377 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
378 {"mac_tx_from_app_bad_pkt_num",
379 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
380 {"mac_rx_fragment_pkt_num",
381 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
382 {"mac_rx_undermin_pkt_num",
383 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
384 {"mac_rx_jabber_pkt_num",
385 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
386 {"mac_rx_fcs_err_pkt_num",
387 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
388 {"mac_rx_send_app_good_pkt_num",
389 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
390 {"mac_rx_send_app_bad_pkt_num",
391 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
394 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
396 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
397 .ethter_type = cpu_to_le16(HCLGE_MAC_ETHERTYPE_LLDP),
398 .mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)),
399 .mac_addr_lo16 = cpu_to_le16(htons(0x000E)),
400 .i_port_bitmap = 0x1,
404 static int hclge_64_bit_update_stats(struct hclge_dev *hdev)
406 #define HCLGE_64_BIT_CMD_NUM 5
407 #define HCLGE_64_BIT_RTN_DATANUM 4
408 u64 *data = (u64 *)(&hdev->hw_stats.all_64_bit_stats);
409 struct hclge_desc desc[HCLGE_64_BIT_CMD_NUM];
414 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_64_BIT, true);
415 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_64_BIT_CMD_NUM);
417 dev_err(&hdev->pdev->dev,
418 "Get 64 bit pkt stats fail, status = %d.\n", ret);
422 for (i = 0; i < HCLGE_64_BIT_CMD_NUM; i++) {
423 if (unlikely(i == 0)) {
424 desc_data = (__le64 *)(&desc[i].data[0]);
425 n = HCLGE_64_BIT_RTN_DATANUM - 1;
427 desc_data = (__le64 *)(&desc[i]);
428 n = HCLGE_64_BIT_RTN_DATANUM;
430 for (k = 0; k < n; k++) {
431 *data++ += le64_to_cpu(*desc_data);
439 static void hclge_reset_partial_32bit_counter(struct hclge_32_bit_stats *stats)
441 stats->pkt_curr_buf_cnt = 0;
442 stats->pkt_curr_buf_tc0_cnt = 0;
443 stats->pkt_curr_buf_tc1_cnt = 0;
444 stats->pkt_curr_buf_tc2_cnt = 0;
445 stats->pkt_curr_buf_tc3_cnt = 0;
446 stats->pkt_curr_buf_tc4_cnt = 0;
447 stats->pkt_curr_buf_tc5_cnt = 0;
448 stats->pkt_curr_buf_tc6_cnt = 0;
449 stats->pkt_curr_buf_tc7_cnt = 0;
452 static int hclge_32_bit_update_stats(struct hclge_dev *hdev)
454 #define HCLGE_32_BIT_CMD_NUM 8
455 #define HCLGE_32_BIT_RTN_DATANUM 8
457 struct hclge_desc desc[HCLGE_32_BIT_CMD_NUM];
458 struct hclge_32_bit_stats *all_32_bit_stats;
464 all_32_bit_stats = &hdev->hw_stats.all_32_bit_stats;
465 data = (u64 *)(&all_32_bit_stats->egu_tx_1588_pkt);
467 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_32_BIT, true);
468 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_32_BIT_CMD_NUM);
470 dev_err(&hdev->pdev->dev,
471 "Get 32 bit pkt stats fail, status = %d.\n", ret);
476 hclge_reset_partial_32bit_counter(all_32_bit_stats);
477 for (i = 0; i < HCLGE_32_BIT_CMD_NUM; i++) {
478 if (unlikely(i == 0)) {
479 __le16 *desc_data_16bit;
481 all_32_bit_stats->igu_rx_err_pkt +=
482 le32_to_cpu(desc[i].data[0]);
484 desc_data_16bit = (__le16 *)&desc[i].data[1];
485 all_32_bit_stats->igu_rx_no_eof_pkt +=
486 le16_to_cpu(*desc_data_16bit);
489 all_32_bit_stats->igu_rx_no_sof_pkt +=
490 le16_to_cpu(*desc_data_16bit);
492 desc_data = &desc[i].data[2];
493 n = HCLGE_32_BIT_RTN_DATANUM - 4;
495 desc_data = (__le32 *)&desc[i];
496 n = HCLGE_32_BIT_RTN_DATANUM;
498 for (k = 0; k < n; k++) {
499 *data++ += le32_to_cpu(*desc_data);
507 static int hclge_mac_get_traffic_stats(struct hclge_dev *hdev)
509 struct hclge_mac_stats *mac_stats = &hdev->hw_stats.mac_stats;
510 struct hclge_desc desc;
514 /* for fiber port, need to query the total rx/tx packets statstics,
515 * used for data transferring checking.
517 if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
520 if (test_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
523 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_STATS_MAC_TRAFFIC, true);
524 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
526 dev_err(&hdev->pdev->dev,
527 "Get MAC total pkt stats fail, ret = %d\n", ret);
532 desc_data = (__le64 *)(&desc.data[0]);
533 mac_stats->mac_tx_total_pkt_num += le64_to_cpu(*desc_data++);
534 mac_stats->mac_rx_total_pkt_num += le64_to_cpu(*desc_data);
539 static int hclge_mac_update_stats(struct hclge_dev *hdev)
541 #define HCLGE_MAC_CMD_NUM 21
542 #define HCLGE_RTN_DATA_NUM 4
544 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
545 struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
550 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
551 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
553 dev_err(&hdev->pdev->dev,
554 "Get MAC pkt stats fail, status = %d.\n", ret);
559 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
560 if (unlikely(i == 0)) {
561 desc_data = (__le64 *)(&desc[i].data[0]);
562 n = HCLGE_RTN_DATA_NUM - 2;
564 desc_data = (__le64 *)(&desc[i]);
565 n = HCLGE_RTN_DATA_NUM;
567 for (k = 0; k < n; k++) {
568 *data++ += le64_to_cpu(*desc_data);
576 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
578 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
579 struct hclge_vport *vport = hclge_get_vport(handle);
580 struct hclge_dev *hdev = vport->back;
581 struct hnae3_queue *queue;
582 struct hclge_desc desc[1];
583 struct hclge_tqp *tqp;
586 for (i = 0; i < kinfo->num_tqps; i++) {
587 queue = handle->kinfo.tqp[i];
588 tqp = container_of(queue, struct hclge_tqp, q);
589 /* command : HCLGE_OPC_QUERY_IGU_STAT */
590 hclge_cmd_setup_basic_desc(&desc[0],
591 HCLGE_OPC_QUERY_RX_STATUS,
594 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
595 ret = hclge_cmd_send(&hdev->hw, desc, 1);
597 dev_err(&hdev->pdev->dev,
598 "Query tqp stat fail, status = %d,queue = %d\n",
602 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
603 le32_to_cpu(desc[0].data[1]);
606 for (i = 0; i < kinfo->num_tqps; i++) {
607 queue = handle->kinfo.tqp[i];
608 tqp = container_of(queue, struct hclge_tqp, q);
609 /* command : HCLGE_OPC_QUERY_IGU_STAT */
610 hclge_cmd_setup_basic_desc(&desc[0],
611 HCLGE_OPC_QUERY_TX_STATUS,
614 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
615 ret = hclge_cmd_send(&hdev->hw, desc, 1);
617 dev_err(&hdev->pdev->dev,
618 "Query tqp stat fail, status = %d,queue = %d\n",
622 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
623 le32_to_cpu(desc[0].data[1]);
629 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
631 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
632 struct hclge_tqp *tqp;
636 for (i = 0; i < kinfo->num_tqps; i++) {
637 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
638 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
641 for (i = 0; i < kinfo->num_tqps; i++) {
642 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
643 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
649 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
651 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
653 return kinfo->num_tqps * (2);
656 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
658 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
662 for (i = 0; i < kinfo->num_tqps; i++) {
663 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
664 struct hclge_tqp, q);
665 snprintf(buff, ETH_GSTRING_LEN, "txq#%d_pktnum_rcd",
667 buff = buff + ETH_GSTRING_LEN;
670 for (i = 0; i < kinfo->num_tqps; i++) {
671 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
672 struct hclge_tqp, q);
673 snprintf(buff, ETH_GSTRING_LEN, "rxq#%d_pktnum_rcd",
675 buff = buff + ETH_GSTRING_LEN;
681 static u64 *hclge_comm_get_stats(void *comm_stats,
682 const struct hclge_comm_stats_str strs[],
688 for (i = 0; i < size; i++)
689 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
694 static u8 *hclge_comm_get_strings(u32 stringset,
695 const struct hclge_comm_stats_str strs[],
698 char *buff = (char *)data;
701 if (stringset != ETH_SS_STATS)
704 for (i = 0; i < size; i++) {
705 snprintf(buff, ETH_GSTRING_LEN,
707 buff = buff + ETH_GSTRING_LEN;
713 static void hclge_update_netstat(struct hclge_hw_stats *hw_stats,
714 struct net_device_stats *net_stats)
716 net_stats->tx_dropped = 0;
717 net_stats->rx_dropped = hw_stats->all_32_bit_stats.ssu_full_drop_num;
718 net_stats->rx_dropped += hw_stats->all_32_bit_stats.ppp_key_drop_num;
719 net_stats->rx_dropped += hw_stats->all_32_bit_stats.ssu_key_drop_num;
721 net_stats->rx_errors = hw_stats->mac_stats.mac_rx_oversize_pkt_num;
722 net_stats->rx_errors += hw_stats->mac_stats.mac_rx_undersize_pkt_num;
723 net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_eof_pkt;
724 net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_sof_pkt;
725 net_stats->rx_errors += hw_stats->mac_stats.mac_rx_fcs_err_pkt_num;
727 net_stats->multicast = hw_stats->mac_stats.mac_tx_multi_pkt_num;
728 net_stats->multicast += hw_stats->mac_stats.mac_rx_multi_pkt_num;
730 net_stats->rx_crc_errors = hw_stats->mac_stats.mac_rx_fcs_err_pkt_num;
731 net_stats->rx_length_errors =
732 hw_stats->mac_stats.mac_rx_undersize_pkt_num;
733 net_stats->rx_length_errors +=
734 hw_stats->mac_stats.mac_rx_oversize_pkt_num;
735 net_stats->rx_over_errors =
736 hw_stats->mac_stats.mac_rx_oversize_pkt_num;
739 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
741 struct hnae3_handle *handle;
744 handle = &hdev->vport[0].nic;
745 if (handle->client) {
746 status = hclge_tqps_update_stats(handle);
748 dev_err(&hdev->pdev->dev,
749 "Update TQPS stats fail, status = %d.\n",
754 status = hclge_mac_update_stats(hdev);
756 dev_err(&hdev->pdev->dev,
757 "Update MAC stats fail, status = %d.\n", status);
759 status = hclge_32_bit_update_stats(hdev);
761 dev_err(&hdev->pdev->dev,
762 "Update 32 bit stats fail, status = %d.\n",
765 hclge_update_netstat(&hdev->hw_stats, &handle->kinfo.netdev->stats);
768 static void hclge_update_stats(struct hnae3_handle *handle,
769 struct net_device_stats *net_stats)
771 struct hclge_vport *vport = hclge_get_vport(handle);
772 struct hclge_dev *hdev = vport->back;
773 struct hclge_hw_stats *hw_stats = &hdev->hw_stats;
776 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
779 status = hclge_mac_update_stats(hdev);
781 dev_err(&hdev->pdev->dev,
782 "Update MAC stats fail, status = %d.\n",
785 status = hclge_32_bit_update_stats(hdev);
787 dev_err(&hdev->pdev->dev,
788 "Update 32 bit stats fail, status = %d.\n",
791 status = hclge_64_bit_update_stats(hdev);
793 dev_err(&hdev->pdev->dev,
794 "Update 64 bit stats fail, status = %d.\n",
797 status = hclge_tqps_update_stats(handle);
799 dev_err(&hdev->pdev->dev,
800 "Update TQPS stats fail, status = %d.\n",
803 hclge_update_netstat(hw_stats, net_stats);
805 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
808 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
810 #define HCLGE_LOOPBACK_TEST_FLAGS 0x7
812 struct hclge_vport *vport = hclge_get_vport(handle);
813 struct hclge_dev *hdev = vport->back;
816 /* Loopback test support rules:
817 * mac: only GE mode support
818 * serdes: all mac mode will support include GE/XGE/LGE/CGE
819 * phy: only support when phy device exist on board
821 if (stringset == ETH_SS_TEST) {
822 /* clear loopback bit flags at first */
823 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
824 if (hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
825 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
826 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
828 handle->flags |= HNAE3_SUPPORT_MAC_LOOPBACK;
832 } else if (stringset == ETH_SS_STATS) {
833 count = ARRAY_SIZE(g_mac_stats_string) +
834 ARRAY_SIZE(g_all_32bit_stats_string) +
835 ARRAY_SIZE(g_all_64bit_stats_string) +
836 hclge_tqps_get_sset_count(handle, stringset);
842 static void hclge_get_strings(struct hnae3_handle *handle,
846 u8 *p = (char *)data;
849 if (stringset == ETH_SS_STATS) {
850 size = ARRAY_SIZE(g_mac_stats_string);
851 p = hclge_comm_get_strings(stringset,
855 size = ARRAY_SIZE(g_all_32bit_stats_string);
856 p = hclge_comm_get_strings(stringset,
857 g_all_32bit_stats_string,
860 size = ARRAY_SIZE(g_all_64bit_stats_string);
861 p = hclge_comm_get_strings(stringset,
862 g_all_64bit_stats_string,
865 p = hclge_tqps_get_strings(handle, p);
866 } else if (stringset == ETH_SS_TEST) {
867 if (handle->flags & HNAE3_SUPPORT_MAC_LOOPBACK) {
869 hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_MAC],
871 p += ETH_GSTRING_LEN;
873 if (handle->flags & HNAE3_SUPPORT_SERDES_LOOPBACK) {
875 hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_SERDES],
877 p += ETH_GSTRING_LEN;
879 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
881 hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_PHY],
883 p += ETH_GSTRING_LEN;
888 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
890 struct hclge_vport *vport = hclge_get_vport(handle);
891 struct hclge_dev *hdev = vport->back;
894 p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats,
896 ARRAY_SIZE(g_mac_stats_string),
898 p = hclge_comm_get_stats(&hdev->hw_stats.all_32_bit_stats,
899 g_all_32bit_stats_string,
900 ARRAY_SIZE(g_all_32bit_stats_string),
902 p = hclge_comm_get_stats(&hdev->hw_stats.all_64_bit_stats,
903 g_all_64bit_stats_string,
904 ARRAY_SIZE(g_all_64bit_stats_string),
906 p = hclge_tqps_get_stats(handle, p);
909 static int hclge_parse_func_status(struct hclge_dev *hdev,
910 struct hclge_func_status_cmd *status)
912 if (!(status->pf_state & HCLGE_PF_STATE_DONE))
915 /* Set the pf to main pf */
916 if (status->pf_state & HCLGE_PF_STATE_MAIN)
917 hdev->flag |= HCLGE_FLAG_MAIN;
919 hdev->flag &= ~HCLGE_FLAG_MAIN;
924 static int hclge_query_function_status(struct hclge_dev *hdev)
926 struct hclge_func_status_cmd *req;
927 struct hclge_desc desc;
931 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
932 req = (struct hclge_func_status_cmd *)desc.data;
935 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
937 dev_err(&hdev->pdev->dev,
938 "query function status failed %d.\n",
944 /* Check pf reset is done */
947 usleep_range(1000, 2000);
948 } while (timeout++ < 5);
950 ret = hclge_parse_func_status(hdev, req);
955 static int hclge_query_pf_resource(struct hclge_dev *hdev)
957 struct hclge_pf_res_cmd *req;
958 struct hclge_desc desc;
961 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
962 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
964 dev_err(&hdev->pdev->dev,
965 "query pf resource failed %d.\n", ret);
969 req = (struct hclge_pf_res_cmd *)desc.data;
970 hdev->num_tqps = __le16_to_cpu(req->tqp_num);
971 hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
973 if (hnae3_dev_roce_supported(hdev)) {
975 hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number),
976 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
978 /* PF should have NIC vectors and Roce vectors,
979 * NIC vectors are queued before Roce vectors.
981 hdev->num_msi = hdev->num_roce_msi + HCLGE_ROCE_VECTOR_OFFSET;
984 hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number),
985 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
991 static int hclge_parse_speed(int speed_cmd, int *speed)
995 *speed = HCLGE_MAC_SPEED_10M;
998 *speed = HCLGE_MAC_SPEED_100M;
1001 *speed = HCLGE_MAC_SPEED_1G;
1004 *speed = HCLGE_MAC_SPEED_10G;
1007 *speed = HCLGE_MAC_SPEED_25G;
1010 *speed = HCLGE_MAC_SPEED_40G;
1013 *speed = HCLGE_MAC_SPEED_50G;
1016 *speed = HCLGE_MAC_SPEED_100G;
1025 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1028 unsigned long *supported = hdev->hw.mac.supported;
1030 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1031 set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1034 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1035 set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1038 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1039 set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1042 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1043 set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1046 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1047 set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1050 set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, supported);
1051 set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1054 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
1056 u8 media_type = hdev->hw.mac.media_type;
1058 if (media_type != HNAE3_MEDIA_TYPE_FIBER)
1061 hclge_parse_fiber_link_mode(hdev, speed_ability);
1064 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1066 struct hclge_cfg_param_cmd *req;
1067 u64 mac_addr_tmp_high;
1071 req = (struct hclge_cfg_param_cmd *)desc[0].data;
1073 /* get the configuration */
1074 cfg->vmdq_vport_num = hnae_get_field(__le32_to_cpu(req->param[0]),
1077 cfg->tc_num = hnae_get_field(__le32_to_cpu(req->param[0]),
1078 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1079 cfg->tqp_desc_num = hnae_get_field(__le32_to_cpu(req->param[0]),
1080 HCLGE_CFG_TQP_DESC_N_M,
1081 HCLGE_CFG_TQP_DESC_N_S);
1083 cfg->phy_addr = hnae_get_field(__le32_to_cpu(req->param[1]),
1084 HCLGE_CFG_PHY_ADDR_M,
1085 HCLGE_CFG_PHY_ADDR_S);
1086 cfg->media_type = hnae_get_field(__le32_to_cpu(req->param[1]),
1087 HCLGE_CFG_MEDIA_TP_M,
1088 HCLGE_CFG_MEDIA_TP_S);
1089 cfg->rx_buf_len = hnae_get_field(__le32_to_cpu(req->param[1]),
1090 HCLGE_CFG_RX_BUF_LEN_M,
1091 HCLGE_CFG_RX_BUF_LEN_S);
1092 /* get mac_address */
1093 mac_addr_tmp = __le32_to_cpu(req->param[2]);
1094 mac_addr_tmp_high = hnae_get_field(__le32_to_cpu(req->param[3]),
1095 HCLGE_CFG_MAC_ADDR_H_M,
1096 HCLGE_CFG_MAC_ADDR_H_S);
1098 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1100 cfg->default_speed = hnae_get_field(__le32_to_cpu(req->param[3]),
1101 HCLGE_CFG_DEFAULT_SPEED_M,
1102 HCLGE_CFG_DEFAULT_SPEED_S);
1103 cfg->rss_size_max = hnae_get_field(__le32_to_cpu(req->param[3]),
1104 HCLGE_CFG_RSS_SIZE_M,
1105 HCLGE_CFG_RSS_SIZE_S);
1107 for (i = 0; i < ETH_ALEN; i++)
1108 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1110 req = (struct hclge_cfg_param_cmd *)desc[1].data;
1111 cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1113 cfg->speed_ability = hnae_get_field(__le32_to_cpu(req->param[1]),
1114 HCLGE_CFG_SPEED_ABILITY_M,
1115 HCLGE_CFG_SPEED_ABILITY_S);
1118 /* hclge_get_cfg: query the static parameter from flash
1119 * @hdev: pointer to struct hclge_dev
1120 * @hcfg: the config structure to be getted
1122 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1124 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1125 struct hclge_cfg_param_cmd *req;
1128 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1131 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1132 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1134 hnae_set_field(offset, HCLGE_CFG_OFFSET_M,
1135 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1136 /* Len should be united by 4 bytes when send to hardware */
1137 hnae_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1138 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1139 req->offset = cpu_to_le32(offset);
1142 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1144 dev_err(&hdev->pdev->dev,
1145 "get config failed %d.\n", ret);
1149 hclge_parse_cfg(hcfg, desc);
1153 static int hclge_get_cap(struct hclge_dev *hdev)
1157 ret = hclge_query_function_status(hdev);
1159 dev_err(&hdev->pdev->dev,
1160 "query function status error %d.\n", ret);
1164 /* get pf resource */
1165 ret = hclge_query_pf_resource(hdev);
1167 dev_err(&hdev->pdev->dev,
1168 "query pf resource error %d.\n", ret);
1175 static int hclge_configure(struct hclge_dev *hdev)
1177 struct hclge_cfg cfg;
1180 ret = hclge_get_cfg(hdev, &cfg);
1182 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1186 hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1187 hdev->base_tqp_pid = 0;
1188 hdev->rss_size_max = cfg.rss_size_max;
1189 hdev->rx_buf_len = cfg.rx_buf_len;
1190 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1191 hdev->hw.mac.media_type = cfg.media_type;
1192 hdev->hw.mac.phy_addr = cfg.phy_addr;
1193 hdev->num_desc = cfg.tqp_desc_num;
1194 hdev->tm_info.num_pg = 1;
1195 hdev->tc_max = cfg.tc_num;
1196 hdev->tm_info.hw_pfc_map = 0;
1198 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1200 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1204 hclge_parse_link_mode(hdev, cfg.speed_ability);
1206 if ((hdev->tc_max > HNAE3_MAX_TC) ||
1207 (hdev->tc_max < 1)) {
1208 dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
1213 /* Dev does not support DCB */
1214 if (!hnae3_dev_dcb_supported(hdev)) {
1218 hdev->pfc_max = hdev->tc_max;
1221 hdev->tm_info.num_tc = hdev->tc_max;
1223 /* Currently not support uncontiuous tc */
1224 for (i = 0; i < hdev->tm_info.num_tc; i++)
1225 hnae_set_bit(hdev->hw_tc_map, i, 1);
1227 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1232 static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min,
1235 struct hclge_cfg_tso_status_cmd *req;
1236 struct hclge_desc desc;
1239 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1241 req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1244 hnae_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1245 HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1246 req->tso_mss_min = cpu_to_le16(tso_mss);
1249 hnae_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1250 HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1251 req->tso_mss_max = cpu_to_le16(tso_mss);
1253 return hclge_cmd_send(&hdev->hw, &desc, 1);
1256 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1258 struct hclge_tqp *tqp;
1261 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1262 sizeof(struct hclge_tqp), GFP_KERNEL);
1268 for (i = 0; i < hdev->num_tqps; i++) {
1269 tqp->dev = &hdev->pdev->dev;
1272 tqp->q.ae_algo = &ae_algo;
1273 tqp->q.buf_size = hdev->rx_buf_len;
1274 tqp->q.desc_num = hdev->num_desc;
1275 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1276 i * HCLGE_TQP_REG_SIZE;
1284 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1285 u16 tqp_pid, u16 tqp_vid, bool is_pf)
1287 struct hclge_tqp_map_cmd *req;
1288 struct hclge_desc desc;
1291 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1293 req = (struct hclge_tqp_map_cmd *)desc.data;
1294 req->tqp_id = cpu_to_le16(tqp_pid);
1295 req->tqp_vf = func_id;
1296 req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B |
1297 1 << HCLGE_TQP_MAP_EN_B;
1298 req->tqp_vid = cpu_to_le16(tqp_vid);
1300 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1302 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n",
1310 static int hclge_assign_tqp(struct hclge_vport *vport,
1311 struct hnae3_queue **tqp, u16 num_tqps)
1313 struct hclge_dev *hdev = vport->back;
1316 for (i = 0, alloced = 0; i < hdev->num_tqps &&
1317 alloced < num_tqps; i++) {
1318 if (!hdev->htqp[i].alloced) {
1319 hdev->htqp[i].q.handle = &vport->nic;
1320 hdev->htqp[i].q.tqp_index = alloced;
1321 tqp[alloced] = &hdev->htqp[i].q;
1322 hdev->htqp[i].alloced = true;
1326 vport->alloc_tqps = num_tqps;
1331 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps)
1333 struct hnae3_handle *nic = &vport->nic;
1334 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1335 struct hclge_dev *hdev = vport->back;
1338 kinfo->num_desc = hdev->num_desc;
1339 kinfo->rx_buf_len = hdev->rx_buf_len;
1340 kinfo->num_tc = min_t(u16, num_tqps, hdev->tm_info.num_tc);
1342 = min_t(u16, hdev->rss_size_max, num_tqps / kinfo->num_tc);
1343 kinfo->num_tqps = kinfo->rss_size * kinfo->num_tc;
1345 for (i = 0; i < HNAE3_MAX_TC; i++) {
1346 if (hdev->hw_tc_map & BIT(i)) {
1347 kinfo->tc_info[i].enable = true;
1348 kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size;
1349 kinfo->tc_info[i].tqp_count = kinfo->rss_size;
1350 kinfo->tc_info[i].tc = i;
1352 /* Set to default queue if TC is disable */
1353 kinfo->tc_info[i].enable = false;
1354 kinfo->tc_info[i].tqp_offset = 0;
1355 kinfo->tc_info[i].tqp_count = 1;
1356 kinfo->tc_info[i].tc = 0;
1360 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps,
1361 sizeof(struct hnae3_queue *), GFP_KERNEL);
1365 ret = hclge_assign_tqp(vport, kinfo->tqp, kinfo->num_tqps);
1367 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1374 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1375 struct hclge_vport *vport)
1377 struct hnae3_handle *nic = &vport->nic;
1378 struct hnae3_knic_private_info *kinfo;
1381 kinfo = &nic->kinfo;
1382 for (i = 0; i < kinfo->num_tqps; i++) {
1383 struct hclge_tqp *q =
1384 container_of(kinfo->tqp[i], struct hclge_tqp, q);
1388 is_pf = !(vport->vport_id);
1389 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1398 static int hclge_map_tqp(struct hclge_dev *hdev)
1400 struct hclge_vport *vport = hdev->vport;
1403 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1404 for (i = 0; i < num_vport; i++) {
1407 ret = hclge_map_tqp_to_vport(hdev, vport);
1417 static void hclge_unic_setup(struct hclge_vport *vport, u16 num_tqps)
1419 /* this would be initialized later */
1422 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1424 struct hnae3_handle *nic = &vport->nic;
1425 struct hclge_dev *hdev = vport->back;
1428 nic->pdev = hdev->pdev;
1429 nic->ae_algo = &ae_algo;
1430 nic->numa_node_mask = hdev->numa_node_mask;
1432 if (hdev->ae_dev->dev_type == HNAE3_DEV_KNIC) {
1433 ret = hclge_knic_setup(vport, num_tqps);
1435 dev_err(&hdev->pdev->dev, "knic setup failed %d\n",
1440 hclge_unic_setup(vport, num_tqps);
1446 static int hclge_alloc_vport(struct hclge_dev *hdev)
1448 struct pci_dev *pdev = hdev->pdev;
1449 struct hclge_vport *vport;
1455 /* We need to alloc a vport for main NIC of PF */
1456 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1458 if (hdev->num_tqps < num_vport) {
1459 dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)",
1460 hdev->num_tqps, num_vport);
1464 /* Alloc the same number of TQPs for every vport */
1465 tqp_per_vport = hdev->num_tqps / num_vport;
1466 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1468 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1473 hdev->vport = vport;
1474 hdev->num_alloc_vport = num_vport;
1476 if (IS_ENABLED(CONFIG_PCI_IOV))
1477 hdev->num_alloc_vfs = hdev->num_req_vfs;
1479 for (i = 0; i < num_vport; i++) {
1481 vport->vport_id = i;
1484 ret = hclge_vport_setup(vport, tqp_main_vport);
1486 ret = hclge_vport_setup(vport, tqp_per_vport);
1489 "vport setup failed for vport %d, %d\n",
1500 static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1501 struct hclge_pkt_buf_alloc *buf_alloc)
1503 /* TX buffer size is unit by 128 byte */
1504 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1505 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
1506 struct hclge_tx_buff_alloc_cmd *req;
1507 struct hclge_desc desc;
1511 req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1513 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1514 for (i = 0; i < HCLGE_TC_NUM; i++) {
1515 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1517 req->tx_pkt_buff[i] =
1518 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1519 HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1522 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1524 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1532 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1533 struct hclge_pkt_buf_alloc *buf_alloc)
1535 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1538 dev_err(&hdev->pdev->dev,
1539 "tx buffer alloc failed %d\n", ret);
1546 static int hclge_get_tc_num(struct hclge_dev *hdev)
1550 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1551 if (hdev->hw_tc_map & BIT(i))
1556 static int hclge_get_pfc_enalbe_num(struct hclge_dev *hdev)
1560 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1561 if (hdev->hw_tc_map & BIT(i) &&
1562 hdev->tm_info.hw_pfc_map & BIT(i))
1567 /* Get the number of pfc enabled TCs, which have private buffer */
1568 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1569 struct hclge_pkt_buf_alloc *buf_alloc)
1571 struct hclge_priv_buf *priv;
1574 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1575 priv = &buf_alloc->priv_buf[i];
1576 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1584 /* Get the number of pfc disabled TCs, which have private buffer */
1585 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1586 struct hclge_pkt_buf_alloc *buf_alloc)
1588 struct hclge_priv_buf *priv;
1591 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1592 priv = &buf_alloc->priv_buf[i];
1593 if (hdev->hw_tc_map & BIT(i) &&
1594 !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1602 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1604 struct hclge_priv_buf *priv;
1608 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1609 priv = &buf_alloc->priv_buf[i];
1611 rx_priv += priv->buf_size;
1616 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1618 u32 i, total_tx_size = 0;
1620 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1621 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1623 return total_tx_size;
1626 static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1627 struct hclge_pkt_buf_alloc *buf_alloc,
1630 u32 shared_buf_min, shared_buf_tc, shared_std;
1631 int tc_num, pfc_enable_num;
1636 tc_num = hclge_get_tc_num(hdev);
1637 pfc_enable_num = hclge_get_pfc_enalbe_num(hdev);
1639 if (hnae3_dev_dcb_supported(hdev))
1640 shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_DV;
1642 shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_NON_DCB_DV;
1644 shared_buf_tc = pfc_enable_num * hdev->mps +
1645 (tc_num - pfc_enable_num) * hdev->mps / 2 +
1647 shared_std = max_t(u32, shared_buf_min, shared_buf_tc);
1649 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1650 if (rx_all <= rx_priv + shared_std)
1653 shared_buf = rx_all - rx_priv;
1654 buf_alloc->s_buf.buf_size = shared_buf;
1655 buf_alloc->s_buf.self.high = shared_buf;
1656 buf_alloc->s_buf.self.low = 2 * hdev->mps;
1658 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1659 if ((hdev->hw_tc_map & BIT(i)) &&
1660 (hdev->tm_info.hw_pfc_map & BIT(i))) {
1661 buf_alloc->s_buf.tc_thrd[i].low = hdev->mps;
1662 buf_alloc->s_buf.tc_thrd[i].high = 2 * hdev->mps;
1664 buf_alloc->s_buf.tc_thrd[i].low = 0;
1665 buf_alloc->s_buf.tc_thrd[i].high = hdev->mps;
1672 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1673 struct hclge_pkt_buf_alloc *buf_alloc)
1677 total_size = hdev->pkt_buf_size;
1679 /* alloc tx buffer for all enabled tc */
1680 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1681 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1683 if (total_size < HCLGE_DEFAULT_TX_BUF)
1686 if (hdev->hw_tc_map & BIT(i))
1687 priv->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
1689 priv->tx_buf_size = 0;
1691 total_size -= priv->tx_buf_size;
1697 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
1698 * @hdev: pointer to struct hclge_dev
1699 * @buf_alloc: pointer to buffer calculation data
1700 * @return: 0: calculate sucessful, negative: fail
1702 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
1703 struct hclge_pkt_buf_alloc *buf_alloc)
1705 u32 rx_all = hdev->pkt_buf_size;
1706 int no_pfc_priv_num, pfc_priv_num;
1707 struct hclge_priv_buf *priv;
1710 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
1712 /* When DCB is not supported, rx private
1713 * buffer is not allocated.
1715 if (!hnae3_dev_dcb_supported(hdev)) {
1716 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1722 /* step 1, try to alloc private buffer for all enabled tc */
1723 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1724 priv = &buf_alloc->priv_buf[i];
1725 if (hdev->hw_tc_map & BIT(i)) {
1727 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1728 priv->wl.low = hdev->mps;
1729 priv->wl.high = priv->wl.low + hdev->mps;
1730 priv->buf_size = priv->wl.high +
1734 priv->wl.high = 2 * hdev->mps;
1735 priv->buf_size = priv->wl.high;
1745 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1748 /* step 2, try to decrease the buffer size of
1749 * no pfc TC's private buffer
1751 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1752 priv = &buf_alloc->priv_buf[i];
1759 if (!(hdev->hw_tc_map & BIT(i)))
1764 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1766 priv->wl.high = priv->wl.low + hdev->mps;
1767 priv->buf_size = priv->wl.high + HCLGE_DEFAULT_DV;
1770 priv->wl.high = hdev->mps;
1771 priv->buf_size = priv->wl.high;
1775 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1778 /* step 3, try to reduce the number of pfc disabled TCs,
1779 * which have private buffer
1781 /* get the total no pfc enable TC number, which have private buffer */
1782 no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1784 /* let the last to be cleared first */
1785 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1786 priv = &buf_alloc->priv_buf[i];
1788 if (hdev->hw_tc_map & BIT(i) &&
1789 !(hdev->tm_info.hw_pfc_map & BIT(i))) {
1790 /* Clear the no pfc TC private buffer */
1798 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1799 no_pfc_priv_num == 0)
1803 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1806 /* step 4, try to reduce the number of pfc enabled TCs
1807 * which have private buffer.
1809 pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1811 /* let the last to be cleared first */
1812 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1813 priv = &buf_alloc->priv_buf[i];
1815 if (hdev->hw_tc_map & BIT(i) &&
1816 hdev->tm_info.hw_pfc_map & BIT(i)) {
1817 /* Reduce the number of pfc TC with private buffer */
1825 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1829 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1835 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
1836 struct hclge_pkt_buf_alloc *buf_alloc)
1838 struct hclge_rx_priv_buff_cmd *req;
1839 struct hclge_desc desc;
1843 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
1844 req = (struct hclge_rx_priv_buff_cmd *)desc.data;
1846 /* Alloc private buffer TCs */
1847 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1848 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1851 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
1853 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
1857 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
1858 (1 << HCLGE_TC0_PRI_BUF_EN_B));
1860 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1862 dev_err(&hdev->pdev->dev,
1863 "rx private buffer alloc cmd failed %d\n", ret);
1870 #define HCLGE_PRIV_ENABLE(a) ((a) > 0 ? 1 : 0)
1872 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
1873 struct hclge_pkt_buf_alloc *buf_alloc)
1875 struct hclge_rx_priv_wl_buf *req;
1876 struct hclge_priv_buf *priv;
1877 struct hclge_desc desc[2];
1881 for (i = 0; i < 2; i++) {
1882 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
1884 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
1886 /* The first descriptor set the NEXT bit to 1 */
1888 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1890 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1892 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1893 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
1895 priv = &buf_alloc->priv_buf[idx];
1896 req->tc_wl[j].high =
1897 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
1898 req->tc_wl[j].high |=
1899 cpu_to_le16(HCLGE_PRIV_ENABLE(priv->wl.high) <<
1900 HCLGE_RX_PRIV_EN_B);
1902 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
1903 req->tc_wl[j].low |=
1904 cpu_to_le16(HCLGE_PRIV_ENABLE(priv->wl.low) <<
1905 HCLGE_RX_PRIV_EN_B);
1909 /* Send 2 descriptor at one time */
1910 ret = hclge_cmd_send(&hdev->hw, desc, 2);
1912 dev_err(&hdev->pdev->dev,
1913 "rx private waterline config cmd failed %d\n",
1920 static int hclge_common_thrd_config(struct hclge_dev *hdev,
1921 struct hclge_pkt_buf_alloc *buf_alloc)
1923 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
1924 struct hclge_rx_com_thrd *req;
1925 struct hclge_desc desc[2];
1926 struct hclge_tc_thrd *tc;
1930 for (i = 0; i < 2; i++) {
1931 hclge_cmd_setup_basic_desc(&desc[i],
1932 HCLGE_OPC_RX_COM_THRD_ALLOC, false);
1933 req = (struct hclge_rx_com_thrd *)&desc[i].data;
1935 /* The first descriptor set the NEXT bit to 1 */
1937 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1939 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1941 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1942 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
1944 req->com_thrd[j].high =
1945 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
1946 req->com_thrd[j].high |=
1947 cpu_to_le16(HCLGE_PRIV_ENABLE(tc->high) <<
1948 HCLGE_RX_PRIV_EN_B);
1949 req->com_thrd[j].low =
1950 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
1951 req->com_thrd[j].low |=
1952 cpu_to_le16(HCLGE_PRIV_ENABLE(tc->low) <<
1953 HCLGE_RX_PRIV_EN_B);
1957 /* Send 2 descriptors at one time */
1958 ret = hclge_cmd_send(&hdev->hw, desc, 2);
1960 dev_err(&hdev->pdev->dev,
1961 "common threshold config cmd failed %d\n", ret);
1967 static int hclge_common_wl_config(struct hclge_dev *hdev,
1968 struct hclge_pkt_buf_alloc *buf_alloc)
1970 struct hclge_shared_buf *buf = &buf_alloc->s_buf;
1971 struct hclge_rx_com_wl *req;
1972 struct hclge_desc desc;
1975 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
1977 req = (struct hclge_rx_com_wl *)desc.data;
1978 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
1980 cpu_to_le16(HCLGE_PRIV_ENABLE(buf->self.high) <<
1981 HCLGE_RX_PRIV_EN_B);
1983 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
1985 cpu_to_le16(HCLGE_PRIV_ENABLE(buf->self.low) <<
1986 HCLGE_RX_PRIV_EN_B);
1988 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1990 dev_err(&hdev->pdev->dev,
1991 "common waterline config cmd failed %d\n", ret);
1998 int hclge_buffer_alloc(struct hclge_dev *hdev)
2000 struct hclge_pkt_buf_alloc *pkt_buf;
2003 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2007 ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2009 dev_err(&hdev->pdev->dev,
2010 "could not calc tx buffer size for all TCs %d\n", ret);
2014 ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2016 dev_err(&hdev->pdev->dev,
2017 "could not alloc tx buffers %d\n", ret);
2021 ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2023 dev_err(&hdev->pdev->dev,
2024 "could not calc rx priv buffer size for all TCs %d\n",
2029 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2031 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2036 if (hnae3_dev_dcb_supported(hdev)) {
2037 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2039 dev_err(&hdev->pdev->dev,
2040 "could not configure rx private waterline %d\n",
2045 ret = hclge_common_thrd_config(hdev, pkt_buf);
2047 dev_err(&hdev->pdev->dev,
2048 "could not configure common threshold %d\n",
2054 ret = hclge_common_wl_config(hdev, pkt_buf);
2056 dev_err(&hdev->pdev->dev,
2057 "could not configure common waterline %d\n", ret);
2064 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2066 struct hnae3_handle *roce = &vport->roce;
2067 struct hnae3_handle *nic = &vport->nic;
2069 roce->rinfo.num_vectors = vport->back->num_roce_msi;
2071 if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
2072 vport->back->num_msi_left == 0)
2075 roce->rinfo.base_vector = vport->back->roce_base_vector;
2077 roce->rinfo.netdev = nic->kinfo.netdev;
2078 roce->rinfo.roce_io_base = vport->back->hw.io_base;
2080 roce->pdev = nic->pdev;
2081 roce->ae_algo = nic->ae_algo;
2082 roce->numa_node_mask = nic->numa_node_mask;
2087 static int hclge_init_msi(struct hclge_dev *hdev)
2089 struct pci_dev *pdev = hdev->pdev;
2093 vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
2094 PCI_IRQ_MSI | PCI_IRQ_MSIX);
2097 "failed(%d) to allocate MSI/MSI-X vectors\n",
2101 if (vectors < hdev->num_msi)
2102 dev_warn(&hdev->pdev->dev,
2103 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2104 hdev->num_msi, vectors);
2106 hdev->num_msi = vectors;
2107 hdev->num_msi_left = vectors;
2108 hdev->base_msi_vector = pdev->irq;
2109 hdev->roce_base_vector = hdev->base_msi_vector +
2110 HCLGE_ROCE_VECTOR_OFFSET;
2112 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2113 sizeof(u16), GFP_KERNEL);
2114 if (!hdev->vector_status) {
2115 pci_free_irq_vectors(pdev);
2119 for (i = 0; i < hdev->num_msi; i++)
2120 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2122 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2123 sizeof(int), GFP_KERNEL);
2124 if (!hdev->vector_irq) {
2125 pci_free_irq_vectors(pdev);
2132 static void hclge_check_speed_dup(struct hclge_dev *hdev, int duplex, int speed)
2134 struct hclge_mac *mac = &hdev->hw.mac;
2136 if ((speed == HCLGE_MAC_SPEED_10M) || (speed == HCLGE_MAC_SPEED_100M))
2137 mac->duplex = (u8)duplex;
2139 mac->duplex = HCLGE_MAC_FULL;
2144 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2146 struct hclge_config_mac_speed_dup_cmd *req;
2147 struct hclge_desc desc;
2150 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2152 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2154 hnae_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex);
2157 case HCLGE_MAC_SPEED_10M:
2158 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2159 HCLGE_CFG_SPEED_S, 6);
2161 case HCLGE_MAC_SPEED_100M:
2162 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2163 HCLGE_CFG_SPEED_S, 7);
2165 case HCLGE_MAC_SPEED_1G:
2166 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2167 HCLGE_CFG_SPEED_S, 0);
2169 case HCLGE_MAC_SPEED_10G:
2170 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2171 HCLGE_CFG_SPEED_S, 1);
2173 case HCLGE_MAC_SPEED_25G:
2174 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2175 HCLGE_CFG_SPEED_S, 2);
2177 case HCLGE_MAC_SPEED_40G:
2178 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2179 HCLGE_CFG_SPEED_S, 3);
2181 case HCLGE_MAC_SPEED_50G:
2182 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2183 HCLGE_CFG_SPEED_S, 4);
2185 case HCLGE_MAC_SPEED_100G:
2186 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2187 HCLGE_CFG_SPEED_S, 5);
2190 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2194 hnae_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2197 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2199 dev_err(&hdev->pdev->dev,
2200 "mac speed/duplex config cmd failed %d.\n", ret);
2204 hclge_check_speed_dup(hdev, duplex, speed);
2209 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2212 struct hclge_vport *vport = hclge_get_vport(handle);
2213 struct hclge_dev *hdev = vport->back;
2215 return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2218 static int hclge_query_mac_an_speed_dup(struct hclge_dev *hdev, int *speed,
2221 struct hclge_query_an_speed_dup_cmd *req;
2222 struct hclge_desc desc;
2226 req = (struct hclge_query_an_speed_dup_cmd *)desc.data;
2228 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_AN_RESULT, true);
2229 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2231 dev_err(&hdev->pdev->dev,
2232 "mac speed/autoneg/duplex query cmd failed %d\n",
2237 *duplex = hnae_get_bit(req->an_syn_dup_speed, HCLGE_QUERY_DUPLEX_B);
2238 speed_tmp = hnae_get_field(req->an_syn_dup_speed, HCLGE_QUERY_SPEED_M,
2239 HCLGE_QUERY_SPEED_S);
2241 ret = hclge_parse_speed(speed_tmp, speed);
2243 dev_err(&hdev->pdev->dev,
2244 "could not parse speed(=%d), %d\n", speed_tmp, ret);
2251 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2253 struct hclge_config_auto_neg_cmd *req;
2254 struct hclge_desc desc;
2258 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2260 req = (struct hclge_config_auto_neg_cmd *)desc.data;
2261 hnae_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
2262 req->cfg_an_cmd_flag = cpu_to_le32(flag);
2264 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2266 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2274 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2276 struct hclge_vport *vport = hclge_get_vport(handle);
2277 struct hclge_dev *hdev = vport->back;
2279 return hclge_set_autoneg_en(hdev, enable);
2282 static int hclge_get_autoneg(struct hnae3_handle *handle)
2284 struct hclge_vport *vport = hclge_get_vport(handle);
2285 struct hclge_dev *hdev = vport->back;
2286 struct phy_device *phydev = hdev->hw.mac.phydev;
2289 return phydev->autoneg;
2291 return hdev->hw.mac.autoneg;
2294 static int hclge_set_default_mac_vlan_mask(struct hclge_dev *hdev,
2298 struct hclge_mac_vlan_mask_entry_cmd *req;
2299 struct hclge_desc desc;
2302 req = (struct hclge_mac_vlan_mask_entry_cmd *)desc.data;
2303 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_MASK_SET, false);
2305 hnae_set_bit(req->vlan_mask, HCLGE_VLAN_MASK_EN_B,
2307 ether_addr_copy(req->mac_mask, mac_mask);
2309 status = hclge_cmd_send(&hdev->hw, &desc, 1);
2311 dev_err(&hdev->pdev->dev,
2312 "Config mac_vlan_mask failed for cmd_send, ret =%d\n",
2318 static int hclge_mac_init(struct hclge_dev *hdev)
2320 struct hnae3_handle *handle = &hdev->vport[0].nic;
2321 struct net_device *netdev = handle->kinfo.netdev;
2322 struct hclge_mac *mac = &hdev->hw.mac;
2323 u8 mac_mask[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
2327 ret = hclge_cfg_mac_speed_dup(hdev, hdev->hw.mac.speed, HCLGE_MAC_FULL);
2329 dev_err(&hdev->pdev->dev,
2330 "Config mac speed dup fail ret=%d\n", ret);
2336 /* Initialize the MTA table work mode */
2337 hdev->accept_mta_mc = true;
2338 hdev->enable_mta = true;
2339 hdev->mta_mac_sel_type = HCLGE_MAC_ADDR_47_36;
2341 ret = hclge_set_mta_filter_mode(hdev,
2342 hdev->mta_mac_sel_type,
2345 dev_err(&hdev->pdev->dev, "set mta filter mode failed %d\n",
2350 ret = hclge_cfg_func_mta_filter(hdev, 0, hdev->accept_mta_mc);
2352 dev_err(&hdev->pdev->dev,
2353 "set mta filter mode fail ret=%d\n", ret);
2357 ret = hclge_set_default_mac_vlan_mask(hdev, true, mac_mask);
2359 dev_err(&hdev->pdev->dev,
2360 "set default mac_vlan_mask fail ret=%d\n", ret);
2369 ret = hclge_set_mtu(handle, mtu);
2371 dev_err(&hdev->pdev->dev,
2372 "set mtu failed ret=%d\n", ret);
2379 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2381 if (!test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2382 schedule_work(&hdev->mbx_service_task);
2385 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2387 if (!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2388 schedule_work(&hdev->rst_service_task);
2391 static void hclge_task_schedule(struct hclge_dev *hdev)
2393 if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
2394 !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2395 !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state))
2396 (void)schedule_work(&hdev->service_task);
2399 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2401 struct hclge_link_status_cmd *req;
2402 struct hclge_desc desc;
2406 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2407 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2409 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2414 req = (struct hclge_link_status_cmd *)desc.data;
2415 link_status = req->status & HCLGE_LINK_STATUS;
2417 return !!link_status;
2420 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2425 mac_state = hclge_get_mac_link_status(hdev);
2427 if (hdev->hw.mac.phydev) {
2428 if (!genphy_read_status(hdev->hw.mac.phydev))
2429 link_stat = mac_state &
2430 hdev->hw.mac.phydev->link;
2435 link_stat = mac_state;
2441 static void hclge_update_link_status(struct hclge_dev *hdev)
2443 struct hnae3_client *client = hdev->nic_client;
2444 struct hnae3_handle *handle;
2450 state = hclge_get_mac_phy_link(hdev);
2451 if (state != hdev->hw.mac.link) {
2452 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2453 handle = &hdev->vport[i].nic;
2454 client->ops->link_status_change(handle, state);
2456 hdev->hw.mac.link = state;
2460 static int hclge_update_speed_duplex(struct hclge_dev *hdev)
2462 struct hclge_mac mac = hdev->hw.mac;
2467 /* get the speed and duplex as autoneg'result from mac cmd when phy
2470 if (mac.phydev || !mac.autoneg)
2473 ret = hclge_query_mac_an_speed_dup(hdev, &speed, &duplex);
2475 dev_err(&hdev->pdev->dev,
2476 "mac autoneg/speed/duplex query failed %d\n", ret);
2480 if ((mac.speed != speed) || (mac.duplex != duplex)) {
2481 ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2483 dev_err(&hdev->pdev->dev,
2484 "mac speed/duplex config failed %d\n", ret);
2492 static int hclge_update_speed_duplex_h(struct hnae3_handle *handle)
2494 struct hclge_vport *vport = hclge_get_vport(handle);
2495 struct hclge_dev *hdev = vport->back;
2497 return hclge_update_speed_duplex(hdev);
2500 static int hclge_get_status(struct hnae3_handle *handle)
2502 struct hclge_vport *vport = hclge_get_vport(handle);
2503 struct hclge_dev *hdev = vport->back;
2505 hclge_update_link_status(hdev);
2507 return hdev->hw.mac.link;
2510 static void hclge_service_timer(struct timer_list *t)
2512 struct hclge_dev *hdev = from_timer(hdev, t, service_timer);
2514 mod_timer(&hdev->service_timer, jiffies + HZ);
2515 hdev->hw_stats.stats_timer++;
2516 hclge_task_schedule(hdev);
2519 static void hclge_service_complete(struct hclge_dev *hdev)
2521 WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state));
2523 /* Flush memory before next watchdog */
2524 smp_mb__before_atomic();
2525 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
2528 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2533 /* fetch the events from their corresponding regs */
2534 rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG);
2535 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2537 /* Assumption: If by any chance reset and mailbox events are reported
2538 * together then we will only process reset event in this go and will
2539 * defer the processing of the mailbox events. Since, we would have not
2540 * cleared RX CMDQ event this time we would receive again another
2541 * interrupt from H/W just for the mailbox.
2544 /* check for vector0 reset event sources */
2545 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2546 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2547 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2548 return HCLGE_VECTOR0_EVENT_RST;
2551 if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) {
2552 set_bit(HNAE3_CORE_RESET, &hdev->reset_pending);
2553 *clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
2554 return HCLGE_VECTOR0_EVENT_RST;
2557 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2558 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2559 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2560 return HCLGE_VECTOR0_EVENT_RST;
2563 /* check for vector0 mailbox(=CMDQ RX) event source */
2564 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
2565 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
2566 *clearval = cmdq_src_reg;
2567 return HCLGE_VECTOR0_EVENT_MBX;
2570 return HCLGE_VECTOR0_EVENT_OTHER;
2573 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
2576 switch (event_type) {
2577 case HCLGE_VECTOR0_EVENT_RST:
2578 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
2580 case HCLGE_VECTOR0_EVENT_MBX:
2581 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
2586 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
2588 writel(enable ? 1 : 0, vector->addr);
2591 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
2593 struct hclge_dev *hdev = data;
2597 hclge_enable_vector(&hdev->misc_vector, false);
2598 event_cause = hclge_check_event_cause(hdev, &clearval);
2600 /* vector 0 interrupt is shared with reset and mailbox source events.*/
2601 switch (event_cause) {
2602 case HCLGE_VECTOR0_EVENT_RST:
2603 hclge_reset_task_schedule(hdev);
2605 case HCLGE_VECTOR0_EVENT_MBX:
2606 /* If we are here then,
2607 * 1. Either we are not handling any mbx task and we are not
2610 * 2. We could be handling a mbx task but nothing more is
2612 * In both cases, we should schedule mbx task as there are more
2613 * mbx messages reported by this interrupt.
2615 hclge_mbx_task_schedule(hdev);
2618 dev_dbg(&hdev->pdev->dev,
2619 "received unknown or unhandled event of vector0\n");
2623 /* we should clear the source of interrupt */
2624 hclge_clear_event_cause(hdev, event_cause, clearval);
2625 hclge_enable_vector(&hdev->misc_vector, true);
2630 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
2632 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
2633 hdev->num_msi_left += 1;
2634 hdev->num_msi_used -= 1;
2637 static void hclge_get_misc_vector(struct hclge_dev *hdev)
2639 struct hclge_misc_vector *vector = &hdev->misc_vector;
2641 vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
2643 vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
2644 hdev->vector_status[0] = 0;
2646 hdev->num_msi_left -= 1;
2647 hdev->num_msi_used += 1;
2650 static int hclge_misc_irq_init(struct hclge_dev *hdev)
2654 hclge_get_misc_vector(hdev);
2656 /* this would be explicitly freed in the end */
2657 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
2658 0, "hclge_misc", hdev);
2660 hclge_free_vector(hdev, 0);
2661 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
2662 hdev->misc_vector.vector_irq);
2668 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
2670 free_irq(hdev->misc_vector.vector_irq, hdev);
2671 hclge_free_vector(hdev, 0);
2674 static int hclge_notify_client(struct hclge_dev *hdev,
2675 enum hnae3_reset_notify_type type)
2677 struct hnae3_client *client = hdev->nic_client;
2680 if (!client->ops->reset_notify)
2683 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2684 struct hnae3_handle *handle = &hdev->vport[i].nic;
2687 ret = client->ops->reset_notify(handle, type);
2695 static int hclge_reset_wait(struct hclge_dev *hdev)
2697 #define HCLGE_RESET_WATI_MS 100
2698 #define HCLGE_RESET_WAIT_CNT 5
2699 u32 val, reg, reg_bit;
2702 switch (hdev->reset_type) {
2703 case HNAE3_GLOBAL_RESET:
2704 reg = HCLGE_GLOBAL_RESET_REG;
2705 reg_bit = HCLGE_GLOBAL_RESET_BIT;
2707 case HNAE3_CORE_RESET:
2708 reg = HCLGE_GLOBAL_RESET_REG;
2709 reg_bit = HCLGE_CORE_RESET_BIT;
2711 case HNAE3_FUNC_RESET:
2712 reg = HCLGE_FUN_RST_ING;
2713 reg_bit = HCLGE_FUN_RST_ING_B;
2716 dev_err(&hdev->pdev->dev,
2717 "Wait for unsupported reset type: %d\n",
2722 val = hclge_read_dev(&hdev->hw, reg);
2723 while (hnae_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
2724 msleep(HCLGE_RESET_WATI_MS);
2725 val = hclge_read_dev(&hdev->hw, reg);
2729 if (cnt >= HCLGE_RESET_WAIT_CNT) {
2730 dev_warn(&hdev->pdev->dev,
2731 "Wait for reset timeout: %d\n", hdev->reset_type);
2738 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
2740 struct hclge_desc desc;
2741 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
2744 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
2745 hnae_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_MAC_B, 0);
2746 hnae_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
2747 req->fun_reset_vfid = func_id;
2749 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2751 dev_err(&hdev->pdev->dev,
2752 "send function reset cmd fail, status =%d\n", ret);
2757 static void hclge_do_reset(struct hclge_dev *hdev)
2759 struct pci_dev *pdev = hdev->pdev;
2762 switch (hdev->reset_type) {
2763 case HNAE3_GLOBAL_RESET:
2764 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
2765 hnae_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
2766 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
2767 dev_info(&pdev->dev, "Global Reset requested\n");
2769 case HNAE3_CORE_RESET:
2770 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
2771 hnae_set_bit(val, HCLGE_CORE_RESET_BIT, 1);
2772 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
2773 dev_info(&pdev->dev, "Core Reset requested\n");
2775 case HNAE3_FUNC_RESET:
2776 dev_info(&pdev->dev, "PF Reset requested\n");
2777 hclge_func_reset_cmd(hdev, 0);
2778 /* schedule again to check later */
2779 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
2780 hclge_reset_task_schedule(hdev);
2783 dev_warn(&pdev->dev,
2784 "Unsupported reset type: %d\n", hdev->reset_type);
2789 static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev,
2790 unsigned long *addr)
2792 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
2794 /* return the highest priority reset level amongst all */
2795 if (test_bit(HNAE3_GLOBAL_RESET, addr))
2796 rst_level = HNAE3_GLOBAL_RESET;
2797 else if (test_bit(HNAE3_CORE_RESET, addr))
2798 rst_level = HNAE3_CORE_RESET;
2799 else if (test_bit(HNAE3_IMP_RESET, addr))
2800 rst_level = HNAE3_IMP_RESET;
2801 else if (test_bit(HNAE3_FUNC_RESET, addr))
2802 rst_level = HNAE3_FUNC_RESET;
2804 /* now, clear all other resets */
2805 clear_bit(HNAE3_GLOBAL_RESET, addr);
2806 clear_bit(HNAE3_CORE_RESET, addr);
2807 clear_bit(HNAE3_IMP_RESET, addr);
2808 clear_bit(HNAE3_FUNC_RESET, addr);
2813 static void hclge_reset(struct hclge_dev *hdev)
2815 /* perform reset of the stack & ae device for a client */
2817 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2819 if (!hclge_reset_wait(hdev)) {
2821 hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
2822 hclge_reset_ae_dev(hdev->ae_dev);
2823 hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
2826 /* schedule again to check pending resets later */
2827 set_bit(hdev->reset_type, &hdev->reset_pending);
2828 hclge_reset_task_schedule(hdev);
2831 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2834 static void hclge_reset_event(struct hnae3_handle *handle)
2836 struct hclge_vport *vport = hclge_get_vport(handle);
2837 struct hclge_dev *hdev = vport->back;
2839 /* check if this is a new reset request and we are not here just because
2840 * last reset attempt did not succeed and watchdog hit us again. We will
2841 * know this if last reset request did not occur very recently (watchdog
2842 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
2843 * In case of new request we reset the "reset level" to PF reset.
2845 if (time_after(jiffies, (handle->last_reset_time + 4 * 5 * HZ)))
2846 handle->reset_level = HNAE3_FUNC_RESET;
2848 dev_info(&hdev->pdev->dev, "received reset event , reset type is %d",
2849 handle->reset_level);
2851 /* request reset & schedule reset task */
2852 set_bit(handle->reset_level, &hdev->reset_request);
2853 hclge_reset_task_schedule(hdev);
2855 if (handle->reset_level < HNAE3_GLOBAL_RESET)
2856 handle->reset_level++;
2858 handle->last_reset_time = jiffies;
2861 static void hclge_reset_subtask(struct hclge_dev *hdev)
2863 /* check if there is any ongoing reset in the hardware. This status can
2864 * be checked from reset_pending. If there is then, we need to wait for
2865 * hardware to complete reset.
2866 * a. If we are able to figure out in reasonable time that hardware
2867 * has fully resetted then, we can proceed with driver, client
2869 * b. else, we can come back later to check this status so re-sched
2872 hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending);
2873 if (hdev->reset_type != HNAE3_NONE_RESET)
2876 /* check if we got any *new* reset requests to be honored */
2877 hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_request);
2878 if (hdev->reset_type != HNAE3_NONE_RESET)
2879 hclge_do_reset(hdev);
2881 hdev->reset_type = HNAE3_NONE_RESET;
2884 static void hclge_reset_service_task(struct work_struct *work)
2886 struct hclge_dev *hdev =
2887 container_of(work, struct hclge_dev, rst_service_task);
2889 if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
2892 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
2894 hclge_reset_subtask(hdev);
2896 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
2899 static void hclge_mailbox_service_task(struct work_struct *work)
2901 struct hclge_dev *hdev =
2902 container_of(work, struct hclge_dev, mbx_service_task);
2904 if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
2907 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
2909 hclge_mbx_handler(hdev);
2911 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
2914 static void hclge_service_task(struct work_struct *work)
2916 struct hclge_dev *hdev =
2917 container_of(work, struct hclge_dev, service_task);
2919 /* The total rx/tx packets statstics are wanted to be updated
2920 * per second. Both hclge_update_stats_for_all() and
2921 * hclge_mac_get_traffic_stats() can do it.
2923 if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
2924 hclge_update_stats_for_all(hdev);
2925 hdev->hw_stats.stats_timer = 0;
2927 hclge_mac_get_traffic_stats(hdev);
2930 hclge_update_speed_duplex(hdev);
2931 hclge_update_link_status(hdev);
2932 hclge_update_led_status(hdev);
2933 hclge_service_complete(hdev);
2936 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
2938 /* VF handle has no client */
2939 if (!handle->client)
2940 return container_of(handle, struct hclge_vport, nic);
2941 else if (handle->client->type == HNAE3_CLIENT_ROCE)
2942 return container_of(handle, struct hclge_vport, roce);
2944 return container_of(handle, struct hclge_vport, nic);
2947 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
2948 struct hnae3_vector_info *vector_info)
2950 struct hclge_vport *vport = hclge_get_vport(handle);
2951 struct hnae3_vector_info *vector = vector_info;
2952 struct hclge_dev *hdev = vport->back;
2956 vector_num = min(hdev->num_msi_left, vector_num);
2958 for (j = 0; j < vector_num; j++) {
2959 for (i = 1; i < hdev->num_msi; i++) {
2960 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
2961 vector->vector = pci_irq_vector(hdev->pdev, i);
2962 vector->io_addr = hdev->hw.io_base +
2963 HCLGE_VECTOR_REG_BASE +
2964 (i - 1) * HCLGE_VECTOR_REG_OFFSET +
2966 HCLGE_VECTOR_VF_OFFSET;
2967 hdev->vector_status[i] = vport->vport_id;
2968 hdev->vector_irq[i] = vector->vector;
2977 hdev->num_msi_left -= alloc;
2978 hdev->num_msi_used += alloc;
2983 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
2987 for (i = 0; i < hdev->num_msi; i++)
2988 if (vector == hdev->vector_irq[i])
2994 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
2996 struct hclge_vport *vport = hclge_get_vport(handle);
2997 struct hclge_dev *hdev = vport->back;
3000 vector_id = hclge_get_vector_index(hdev, vector);
3001 if (vector_id < 0) {
3002 dev_err(&hdev->pdev->dev,
3003 "Get vector index fail. vector_id =%d\n", vector_id);
3007 hclge_free_vector(hdev, vector_id);
3012 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
3014 return HCLGE_RSS_KEY_SIZE;
3017 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
3019 return HCLGE_RSS_IND_TBL_SIZE;
3022 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
3023 const u8 hfunc, const u8 *key)
3025 struct hclge_rss_config_cmd *req;
3026 struct hclge_desc desc;
3031 req = (struct hclge_rss_config_cmd *)desc.data;
3033 for (key_offset = 0; key_offset < 3; key_offset++) {
3034 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
3037 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
3038 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
3040 if (key_offset == 2)
3042 HCLGE_RSS_KEY_SIZE - HCLGE_RSS_HASH_KEY_NUM * 2;
3044 key_size = HCLGE_RSS_HASH_KEY_NUM;
3046 memcpy(req->hash_key,
3047 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
3049 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3051 dev_err(&hdev->pdev->dev,
3052 "Configure RSS config fail, status = %d\n",
3060 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
3062 struct hclge_rss_indirection_table_cmd *req;
3063 struct hclge_desc desc;
3067 req = (struct hclge_rss_indirection_table_cmd *)desc.data;
3069 for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
3070 hclge_cmd_setup_basic_desc
3071 (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
3073 req->start_table_index =
3074 cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
3075 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
3077 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
3078 req->rss_result[j] =
3079 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
3081 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3083 dev_err(&hdev->pdev->dev,
3084 "Configure rss indir table fail,status = %d\n",
3092 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
3093 u16 *tc_size, u16 *tc_offset)
3095 struct hclge_rss_tc_mode_cmd *req;
3096 struct hclge_desc desc;
3100 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
3101 req = (struct hclge_rss_tc_mode_cmd *)desc.data;
3103 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3106 hnae_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
3107 hnae_set_field(mode, HCLGE_RSS_TC_SIZE_M,
3108 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
3109 hnae_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
3110 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
3112 req->rss_tc_mode[i] = cpu_to_le16(mode);
3115 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3117 dev_err(&hdev->pdev->dev,
3118 "Configure rss tc mode fail, status = %d\n", ret);
3125 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
3127 struct hclge_rss_input_tuple_cmd *req;
3128 struct hclge_desc desc;
3131 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3133 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3135 /* Get the tuple cfg from pf */
3136 req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
3137 req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
3138 req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
3139 req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
3140 req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
3141 req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
3142 req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
3143 req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
3144 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3146 dev_err(&hdev->pdev->dev,
3147 "Configure rss input fail, status = %d\n", ret);
3154 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
3157 struct hclge_vport *vport = hclge_get_vport(handle);
3160 /* Get hash algorithm */
3162 *hfunc = vport->rss_algo;
3164 /* Get the RSS Key required by the user */
3166 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
3168 /* Get indirect table */
3170 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3171 indir[i] = vport->rss_indirection_tbl[i];
3176 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
3177 const u8 *key, const u8 hfunc)
3179 struct hclge_vport *vport = hclge_get_vport(handle);
3180 struct hclge_dev *hdev = vport->back;
3184 /* Set the RSS Hash Key if specififed by the user */
3187 if (hfunc == ETH_RSS_HASH_TOP ||
3188 hfunc == ETH_RSS_HASH_NO_CHANGE)
3189 hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3192 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
3196 /* Update the shadow RSS key with user specified qids */
3197 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
3198 vport->rss_algo = hash_algo;
3201 /* Update the shadow RSS table with user specified qids */
3202 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3203 vport->rss_indirection_tbl[i] = indir[i];
3205 /* Update the hardware */
3206 return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
3209 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
3211 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
3213 if (nfc->data & RXH_L4_B_2_3)
3214 hash_sets |= HCLGE_D_PORT_BIT;
3216 hash_sets &= ~HCLGE_D_PORT_BIT;
3218 if (nfc->data & RXH_IP_SRC)
3219 hash_sets |= HCLGE_S_IP_BIT;
3221 hash_sets &= ~HCLGE_S_IP_BIT;
3223 if (nfc->data & RXH_IP_DST)
3224 hash_sets |= HCLGE_D_IP_BIT;
3226 hash_sets &= ~HCLGE_D_IP_BIT;
3228 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
3229 hash_sets |= HCLGE_V_TAG_BIT;
3234 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
3235 struct ethtool_rxnfc *nfc)
3237 struct hclge_vport *vport = hclge_get_vport(handle);
3238 struct hclge_dev *hdev = vport->back;
3239 struct hclge_rss_input_tuple_cmd *req;
3240 struct hclge_desc desc;
3244 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
3245 RXH_L4_B_0_1 | RXH_L4_B_2_3))
3248 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3249 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3251 req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
3252 req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
3253 req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
3254 req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
3255 req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
3256 req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
3257 req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
3258 req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
3260 tuple_sets = hclge_get_rss_hash_bits(nfc);
3261 switch (nfc->flow_type) {
3263 req->ipv4_tcp_en = tuple_sets;
3266 req->ipv6_tcp_en = tuple_sets;
3269 req->ipv4_udp_en = tuple_sets;
3272 req->ipv6_udp_en = tuple_sets;
3275 req->ipv4_sctp_en = tuple_sets;
3278 if ((nfc->data & RXH_L4_B_0_1) ||
3279 (nfc->data & RXH_L4_B_2_3))
3282 req->ipv6_sctp_en = tuple_sets;
3285 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3288 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3294 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3296 dev_err(&hdev->pdev->dev,
3297 "Set rss tuple fail, status = %d\n", ret);
3301 vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
3302 vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
3303 vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
3304 vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
3305 vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
3306 vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
3307 vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
3308 vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
3312 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
3313 struct ethtool_rxnfc *nfc)
3315 struct hclge_vport *vport = hclge_get_vport(handle);
3320 switch (nfc->flow_type) {
3322 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
3325 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
3328 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
3331 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
3334 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
3337 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
3341 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
3350 if (tuple_sets & HCLGE_D_PORT_BIT)
3351 nfc->data |= RXH_L4_B_2_3;
3352 if (tuple_sets & HCLGE_S_PORT_BIT)
3353 nfc->data |= RXH_L4_B_0_1;
3354 if (tuple_sets & HCLGE_D_IP_BIT)
3355 nfc->data |= RXH_IP_DST;
3356 if (tuple_sets & HCLGE_S_IP_BIT)
3357 nfc->data |= RXH_IP_SRC;
3362 static int hclge_get_tc_size(struct hnae3_handle *handle)
3364 struct hclge_vport *vport = hclge_get_vport(handle);
3365 struct hclge_dev *hdev = vport->back;
3367 return hdev->rss_size_max;
3370 int hclge_rss_init_hw(struct hclge_dev *hdev)
3372 struct hclge_vport *vport = hdev->vport;
3373 u8 *rss_indir = vport[0].rss_indirection_tbl;
3374 u16 rss_size = vport[0].alloc_rss_size;
3375 u8 *key = vport[0].rss_hash_key;
3376 u8 hfunc = vport[0].rss_algo;
3377 u16 tc_offset[HCLGE_MAX_TC_NUM];
3378 u16 tc_valid[HCLGE_MAX_TC_NUM];
3379 u16 tc_size[HCLGE_MAX_TC_NUM];
3383 ret = hclge_set_rss_indir_table(hdev, rss_indir);
3387 ret = hclge_set_rss_algo_key(hdev, hfunc, key);
3391 ret = hclge_set_rss_input_tuple(hdev);
3395 /* Each TC have the same queue size, and tc_size set to hardware is
3396 * the log2 of roundup power of two of rss_size, the acutal queue
3397 * size is limited by indirection table.
3399 if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
3400 dev_err(&hdev->pdev->dev,
3401 "Configure rss tc size failed, invalid TC_SIZE = %d\n",
3406 roundup_size = roundup_pow_of_two(rss_size);
3407 roundup_size = ilog2(roundup_size);
3409 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3412 if (!(hdev->hw_tc_map & BIT(i)))
3416 tc_size[i] = roundup_size;
3417 tc_offset[i] = rss_size * i;
3420 return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
3423 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
3425 struct hclge_vport *vport = hdev->vport;
3428 for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
3429 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3430 vport[j].rss_indirection_tbl[i] =
3431 i % vport[j].alloc_rss_size;
3435 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
3437 struct hclge_vport *vport = hdev->vport;
3440 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3441 vport[i].rss_tuple_sets.ipv4_tcp_en =
3442 HCLGE_RSS_INPUT_TUPLE_OTHER;
3443 vport[i].rss_tuple_sets.ipv4_udp_en =
3444 HCLGE_RSS_INPUT_TUPLE_OTHER;
3445 vport[i].rss_tuple_sets.ipv4_sctp_en =
3446 HCLGE_RSS_INPUT_TUPLE_SCTP;
3447 vport[i].rss_tuple_sets.ipv4_fragment_en =
3448 HCLGE_RSS_INPUT_TUPLE_OTHER;
3449 vport[i].rss_tuple_sets.ipv6_tcp_en =
3450 HCLGE_RSS_INPUT_TUPLE_OTHER;
3451 vport[i].rss_tuple_sets.ipv6_udp_en =
3452 HCLGE_RSS_INPUT_TUPLE_OTHER;
3453 vport[i].rss_tuple_sets.ipv6_sctp_en =
3454 HCLGE_RSS_INPUT_TUPLE_SCTP;
3455 vport[i].rss_tuple_sets.ipv6_fragment_en =
3456 HCLGE_RSS_INPUT_TUPLE_OTHER;
3458 vport[i].rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3460 netdev_rss_key_fill(vport[i].rss_hash_key, HCLGE_RSS_KEY_SIZE);
3463 hclge_rss_indir_init_cfg(hdev);
3466 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
3467 int vector_id, bool en,
3468 struct hnae3_ring_chain_node *ring_chain)
3470 struct hclge_dev *hdev = vport->back;
3471 struct hnae3_ring_chain_node *node;
3472 struct hclge_desc desc;
3473 struct hclge_ctrl_vector_chain_cmd *req
3474 = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
3475 enum hclge_cmd_status status;
3476 enum hclge_opcode_type op;
3477 u16 tqp_type_and_id;
3480 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
3481 hclge_cmd_setup_basic_desc(&desc, op, false);
3482 req->int_vector_id = vector_id;
3485 for (node = ring_chain; node; node = node->next) {
3486 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
3487 hnae_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
3489 hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
3490 hnae_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
3491 HCLGE_TQP_ID_S, node->tqp_index);
3492 hnae_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
3494 hnae_get_field(node->int_gl_idx,
3495 HNAE3_RING_GL_IDX_M,
3496 HNAE3_RING_GL_IDX_S));
3497 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
3498 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
3499 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
3500 req->vfid = vport->vport_id;
3502 status = hclge_cmd_send(&hdev->hw, &desc, 1);
3504 dev_err(&hdev->pdev->dev,
3505 "Map TQP fail, status is %d.\n",
3511 hclge_cmd_setup_basic_desc(&desc,
3514 req->int_vector_id = vector_id;
3519 req->int_cause_num = i;
3520 req->vfid = vport->vport_id;
3521 status = hclge_cmd_send(&hdev->hw, &desc, 1);
3523 dev_err(&hdev->pdev->dev,
3524 "Map TQP fail, status is %d.\n", status);
3532 static int hclge_map_ring_to_vector(struct hnae3_handle *handle,
3534 struct hnae3_ring_chain_node *ring_chain)
3536 struct hclge_vport *vport = hclge_get_vport(handle);
3537 struct hclge_dev *hdev = vport->back;
3540 vector_id = hclge_get_vector_index(hdev, vector);
3541 if (vector_id < 0) {
3542 dev_err(&hdev->pdev->dev,
3543 "Get vector index fail. vector_id =%d\n", vector_id);
3547 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
3550 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle,
3552 struct hnae3_ring_chain_node *ring_chain)
3554 struct hclge_vport *vport = hclge_get_vport(handle);
3555 struct hclge_dev *hdev = vport->back;
3558 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3561 vector_id = hclge_get_vector_index(hdev, vector);
3562 if (vector_id < 0) {
3563 dev_err(&handle->pdev->dev,
3564 "Get vector index fail. ret =%d\n", vector_id);
3568 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
3570 dev_err(&handle->pdev->dev,
3571 "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
3578 int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
3579 struct hclge_promisc_param *param)
3581 struct hclge_promisc_cfg_cmd *req;
3582 struct hclge_desc desc;
3585 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
3587 req = (struct hclge_promisc_cfg_cmd *)desc.data;
3588 req->vf_id = param->vf_id;
3589 req->flag = (param->enable << HCLGE_PROMISC_EN_B);
3591 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3593 dev_err(&hdev->pdev->dev,
3594 "Set promisc mode fail, status is %d.\n", ret);
3600 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
3601 bool en_mc, bool en_bc, int vport_id)
3606 memset(param, 0, sizeof(struct hclge_promisc_param));
3608 param->enable = HCLGE_PROMISC_EN_UC;
3610 param->enable |= HCLGE_PROMISC_EN_MC;
3612 param->enable |= HCLGE_PROMISC_EN_BC;
3613 param->vf_id = vport_id;
3616 static void hclge_set_promisc_mode(struct hnae3_handle *handle, u32 en)
3618 struct hclge_vport *vport = hclge_get_vport(handle);
3619 struct hclge_dev *hdev = vport->back;
3620 struct hclge_promisc_param param;
3622 hclge_promisc_param_init(¶m, en, en, true, vport->vport_id);
3623 hclge_cmd_set_promisc_mode(hdev, ¶m);
3626 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
3628 struct hclge_desc desc;
3629 struct hclge_config_mac_mode_cmd *req =
3630 (struct hclge_config_mac_mode_cmd *)desc.data;
3634 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
3635 hnae_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable);
3636 hnae_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable);
3637 hnae_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable);
3638 hnae_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable);
3639 hnae_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0);
3640 hnae_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0);
3641 hnae_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
3642 hnae_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0);
3643 hnae_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable);
3644 hnae_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable);
3645 hnae_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable);
3646 hnae_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
3647 hnae_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
3648 hnae_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
3649 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
3651 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3653 dev_err(&hdev->pdev->dev,
3654 "mac enable fail, ret =%d.\n", ret);
3657 static int hclge_set_mac_loopback(struct hclge_dev *hdev, bool en)
3659 struct hclge_config_mac_mode_cmd *req;
3660 struct hclge_desc desc;
3664 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
3665 /* 1 Read out the MAC mode config at first */
3666 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
3667 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3669 dev_err(&hdev->pdev->dev,
3670 "mac loopback get fail, ret =%d.\n", ret);
3674 /* 2 Then setup the loopback flag */
3675 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
3676 hnae_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
3678 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
3680 /* 3 Config mac work mode with loopback flag
3681 * and its original configure parameters
3683 hclge_cmd_reuse_desc(&desc, false);
3684 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3686 dev_err(&hdev->pdev->dev,
3687 "mac loopback set fail, ret =%d.\n", ret);
3691 static int hclge_set_loopback(struct hnae3_handle *handle,
3692 enum hnae3_loop loop_mode, bool en)
3694 struct hclge_vport *vport = hclge_get_vport(handle);
3695 struct hclge_dev *hdev = vport->back;
3698 switch (loop_mode) {
3699 case HNAE3_MAC_INTER_LOOP_MAC:
3700 ret = hclge_set_mac_loopback(hdev, en);
3704 dev_err(&hdev->pdev->dev,
3705 "loop_mode %d is not supported\n", loop_mode);
3712 static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id,
3713 int stream_id, bool enable)
3715 struct hclge_desc desc;
3716 struct hclge_cfg_com_tqp_queue_cmd *req =
3717 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
3720 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
3721 req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
3722 req->stream_id = cpu_to_le16(stream_id);
3723 req->enable |= enable << HCLGE_TQP_ENABLE_B;
3725 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3727 dev_err(&hdev->pdev->dev,
3728 "Tqp enable fail, status =%d.\n", ret);
3732 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
3734 struct hclge_vport *vport = hclge_get_vport(handle);
3735 struct hnae3_queue *queue;
3736 struct hclge_tqp *tqp;
3739 for (i = 0; i < vport->alloc_tqps; i++) {
3740 queue = handle->kinfo.tqp[i];
3741 tqp = container_of(queue, struct hclge_tqp, q);
3742 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
3746 static int hclge_ae_start(struct hnae3_handle *handle)
3748 struct hclge_vport *vport = hclge_get_vport(handle);
3749 struct hclge_dev *hdev = vport->back;
3752 for (i = 0; i < vport->alloc_tqps; i++)
3753 hclge_tqp_enable(hdev, i, 0, true);
3756 hclge_cfg_mac_mode(hdev, true);
3757 clear_bit(HCLGE_STATE_DOWN, &hdev->state);
3758 mod_timer(&hdev->service_timer, jiffies + HZ);
3759 hdev->hw.mac.link = 0;
3761 /* reset tqp stats */
3762 hclge_reset_tqp_stats(handle);
3764 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3767 ret = hclge_mac_start_phy(hdev);
3774 static void hclge_ae_stop(struct hnae3_handle *handle)
3776 struct hclge_vport *vport = hclge_get_vport(handle);
3777 struct hclge_dev *hdev = vport->back;
3780 del_timer_sync(&hdev->service_timer);
3781 cancel_work_sync(&hdev->service_task);
3783 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3786 for (i = 0; i < vport->alloc_tqps; i++)
3787 hclge_tqp_enable(hdev, i, 0, false);
3790 hclge_cfg_mac_mode(hdev, false);
3792 hclge_mac_stop_phy(hdev);
3794 /* reset tqp stats */
3795 hclge_reset_tqp_stats(handle);
3798 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
3799 u16 cmdq_resp, u8 resp_code,
3800 enum hclge_mac_vlan_tbl_opcode op)
3802 struct hclge_dev *hdev = vport->back;
3803 int return_status = -EIO;
3806 dev_err(&hdev->pdev->dev,
3807 "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
3812 if (op == HCLGE_MAC_VLAN_ADD) {
3813 if ((!resp_code) || (resp_code == 1)) {
3815 } else if (resp_code == 2) {
3816 return_status = -ENOSPC;
3817 dev_err(&hdev->pdev->dev,
3818 "add mac addr failed for uc_overflow.\n");
3819 } else if (resp_code == 3) {
3820 return_status = -ENOSPC;
3821 dev_err(&hdev->pdev->dev,
3822 "add mac addr failed for mc_overflow.\n");
3824 dev_err(&hdev->pdev->dev,
3825 "add mac addr failed for undefined, code=%d.\n",
3828 } else if (op == HCLGE_MAC_VLAN_REMOVE) {
3831 } else if (resp_code == 1) {
3832 return_status = -ENOENT;
3833 dev_dbg(&hdev->pdev->dev,
3834 "remove mac addr failed for miss.\n");
3836 dev_err(&hdev->pdev->dev,
3837 "remove mac addr failed for undefined, code=%d.\n",
3840 } else if (op == HCLGE_MAC_VLAN_LKUP) {
3843 } else if (resp_code == 1) {
3844 return_status = -ENOENT;
3845 dev_dbg(&hdev->pdev->dev,
3846 "lookup mac addr failed for miss.\n");
3848 dev_err(&hdev->pdev->dev,
3849 "lookup mac addr failed for undefined, code=%d.\n",
3853 return_status = -EINVAL;
3854 dev_err(&hdev->pdev->dev,
3855 "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n",
3859 return return_status;
3862 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
3867 if (vfid > 255 || vfid < 0)
3870 if (vfid >= 0 && vfid <= 191) {
3871 word_num = vfid / 32;
3872 bit_num = vfid % 32;
3874 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
3876 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
3878 word_num = (vfid - 192) / 32;
3879 bit_num = vfid % 32;
3881 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
3883 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
3889 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
3891 #define HCLGE_DESC_NUMBER 3
3892 #define HCLGE_FUNC_NUMBER_PER_DESC 6
3895 for (i = 0; i < HCLGE_DESC_NUMBER; i++)
3896 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
3897 if (desc[i].data[j])
3903 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
3906 const unsigned char *mac_addr = addr;
3907 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
3908 (mac_addr[0]) | (mac_addr[1] << 8);
3909 u32 low_val = mac_addr[4] | (mac_addr[5] << 8);
3911 new_req->mac_addr_hi32 = cpu_to_le32(high_val);
3912 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
3915 static u16 hclge_get_mac_addr_to_mta_index(struct hclge_vport *vport,
3918 u16 high_val = addr[1] | (addr[0] << 8);
3919 struct hclge_dev *hdev = vport->back;
3920 u32 rsh = 4 - hdev->mta_mac_sel_type;
3921 u16 ret_val = (high_val >> rsh) & 0xfff;
3926 static int hclge_set_mta_filter_mode(struct hclge_dev *hdev,
3927 enum hclge_mta_dmac_sel_type mta_mac_sel,
3930 struct hclge_mta_filter_mode_cmd *req;
3931 struct hclge_desc desc;
3934 req = (struct hclge_mta_filter_mode_cmd *)desc.data;
3935 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_MODE_CFG, false);
3937 hnae_set_bit(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_EN_B,
3939 hnae_set_field(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_SEL_M,
3940 HCLGE_CFG_MTA_MAC_SEL_S, mta_mac_sel);
3942 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3944 dev_err(&hdev->pdev->dev,
3945 "Config mat filter mode failed for cmd_send, ret =%d.\n",
3953 int hclge_cfg_func_mta_filter(struct hclge_dev *hdev,
3957 struct hclge_cfg_func_mta_filter_cmd *req;
3958 struct hclge_desc desc;
3961 req = (struct hclge_cfg_func_mta_filter_cmd *)desc.data;
3962 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_FUNC_CFG, false);
3964 hnae_set_bit(req->accept, HCLGE_CFG_FUNC_MTA_ACCEPT_B,
3966 req->function_id = func_id;
3968 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3970 dev_err(&hdev->pdev->dev,
3971 "Config func_id enable failed for cmd_send, ret =%d.\n",
3979 static int hclge_set_mta_table_item(struct hclge_vport *vport,
3983 struct hclge_dev *hdev = vport->back;
3984 struct hclge_cfg_func_mta_item_cmd *req;
3985 struct hclge_desc desc;
3989 req = (struct hclge_cfg_func_mta_item_cmd *)desc.data;
3990 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_TBL_ITEM_CFG, false);
3991 hnae_set_bit(req->accept, HCLGE_CFG_MTA_ITEM_ACCEPT_B, enable);
3993 hnae_set_field(item_idx, HCLGE_CFG_MTA_ITEM_IDX_M,
3994 HCLGE_CFG_MTA_ITEM_IDX_S, idx);
3995 req->item_idx = cpu_to_le16(item_idx);
3997 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3999 dev_err(&hdev->pdev->dev,
4000 "Config mta table item failed for cmd_send, ret =%d.\n",
4008 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
4009 struct hclge_mac_vlan_tbl_entry_cmd *req)
4011 struct hclge_dev *hdev = vport->back;
4012 struct hclge_desc desc;
4017 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
4019 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
4021 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4023 dev_err(&hdev->pdev->dev,
4024 "del mac addr failed for cmd_send, ret =%d.\n",
4028 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
4029 retval = le16_to_cpu(desc.retval);
4031 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
4032 HCLGE_MAC_VLAN_REMOVE);
4035 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
4036 struct hclge_mac_vlan_tbl_entry_cmd *req,
4037 struct hclge_desc *desc,
4040 struct hclge_dev *hdev = vport->back;
4045 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
4047 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4048 memcpy(desc[0].data,
4050 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
4051 hclge_cmd_setup_basic_desc(&desc[1],
4052 HCLGE_OPC_MAC_VLAN_ADD,
4054 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4055 hclge_cmd_setup_basic_desc(&desc[2],
4056 HCLGE_OPC_MAC_VLAN_ADD,
4058 ret = hclge_cmd_send(&hdev->hw, desc, 3);
4060 memcpy(desc[0].data,
4062 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
4063 ret = hclge_cmd_send(&hdev->hw, desc, 1);
4066 dev_err(&hdev->pdev->dev,
4067 "lookup mac addr failed for cmd_send, ret =%d.\n",
4071 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
4072 retval = le16_to_cpu(desc[0].retval);
4074 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
4075 HCLGE_MAC_VLAN_LKUP);
4078 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
4079 struct hclge_mac_vlan_tbl_entry_cmd *req,
4080 struct hclge_desc *mc_desc)
4082 struct hclge_dev *hdev = vport->back;
4089 struct hclge_desc desc;
4091 hclge_cmd_setup_basic_desc(&desc,
4092 HCLGE_OPC_MAC_VLAN_ADD,
4094 memcpy(desc.data, req,
4095 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
4096 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4097 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
4098 retval = le16_to_cpu(desc.retval);
4100 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
4102 HCLGE_MAC_VLAN_ADD);
4104 hclge_cmd_reuse_desc(&mc_desc[0], false);
4105 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4106 hclge_cmd_reuse_desc(&mc_desc[1], false);
4107 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4108 hclge_cmd_reuse_desc(&mc_desc[2], false);
4109 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
4110 memcpy(mc_desc[0].data, req,
4111 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
4112 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
4113 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
4114 retval = le16_to_cpu(mc_desc[0].retval);
4116 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
4118 HCLGE_MAC_VLAN_ADD);
4122 dev_err(&hdev->pdev->dev,
4123 "add mac addr failed for cmd_send, ret =%d.\n",
4131 static int hclge_add_uc_addr(struct hnae3_handle *handle,
4132 const unsigned char *addr)
4134 struct hclge_vport *vport = hclge_get_vport(handle);
4136 return hclge_add_uc_addr_common(vport, addr);
4139 int hclge_add_uc_addr_common(struct hclge_vport *vport,
4140 const unsigned char *addr)
4142 struct hclge_dev *hdev = vport->back;
4143 struct hclge_mac_vlan_tbl_entry_cmd req;
4144 struct hclge_desc desc;
4145 u16 egress_port = 0;
4148 /* mac addr check */
4149 if (is_zero_ether_addr(addr) ||
4150 is_broadcast_ether_addr(addr) ||
4151 is_multicast_ether_addr(addr)) {
4152 dev_err(&hdev->pdev->dev,
4153 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
4155 is_zero_ether_addr(addr),
4156 is_broadcast_ether_addr(addr),
4157 is_multicast_ether_addr(addr));
4161 memset(&req, 0, sizeof(req));
4162 hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
4163 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
4164 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 0);
4165 hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
4167 hnae_set_bit(egress_port, HCLGE_MAC_EPORT_SW_EN_B, 0);
4168 hnae_set_bit(egress_port, HCLGE_MAC_EPORT_TYPE_B, 0);
4169 hnae_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
4170 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
4171 hnae_set_field(egress_port, HCLGE_MAC_EPORT_PFID_M,
4172 HCLGE_MAC_EPORT_PFID_S, 0);
4174 req.egress_port = cpu_to_le16(egress_port);
4176 hclge_prepare_mac_addr(&req, addr);
4178 /* Lookup the mac address in the mac_vlan table, and add
4179 * it if the entry is inexistent. Repeated unicast entry
4180 * is not allowed in the mac vlan table.
4182 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
4184 return hclge_add_mac_vlan_tbl(vport, &req, NULL);
4186 /* check if we just hit the duplicate */
4190 dev_err(&hdev->pdev->dev,
4191 "PF failed to add unicast entry(%pM) in the MAC table\n",
4197 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
4198 const unsigned char *addr)
4200 struct hclge_vport *vport = hclge_get_vport(handle);
4202 return hclge_rm_uc_addr_common(vport, addr);
4205 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
4206 const unsigned char *addr)
4208 struct hclge_dev *hdev = vport->back;
4209 struct hclge_mac_vlan_tbl_entry_cmd req;
4212 /* mac addr check */
4213 if (is_zero_ether_addr(addr) ||
4214 is_broadcast_ether_addr(addr) ||
4215 is_multicast_ether_addr(addr)) {
4216 dev_dbg(&hdev->pdev->dev,
4217 "Remove mac err! invalid mac:%pM.\n",
4222 memset(&req, 0, sizeof(req));
4223 hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
4224 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
4225 hclge_prepare_mac_addr(&req, addr);
4226 ret = hclge_remove_mac_vlan_tbl(vport, &req);
4231 static int hclge_add_mc_addr(struct hnae3_handle *handle,
4232 const unsigned char *addr)
4234 struct hclge_vport *vport = hclge_get_vport(handle);
4236 return hclge_add_mc_addr_common(vport, addr);
4239 int hclge_add_mc_addr_common(struct hclge_vport *vport,
4240 const unsigned char *addr)
4242 struct hclge_dev *hdev = vport->back;
4243 struct hclge_mac_vlan_tbl_entry_cmd req;
4244 struct hclge_desc desc[3];
4248 /* mac addr check */
4249 if (!is_multicast_ether_addr(addr)) {
4250 dev_err(&hdev->pdev->dev,
4251 "Add mc mac err! invalid mac:%pM.\n",
4255 memset(&req, 0, sizeof(req));
4256 hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
4257 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
4258 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
4259 hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
4260 hclge_prepare_mac_addr(&req, addr);
4261 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
4263 /* This mac addr exist, update VFID for it */
4264 hclge_update_desc_vfid(desc, vport->vport_id, false);
4265 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
4267 /* This mac addr do not exist, add new entry for it */
4268 memset(desc[0].data, 0, sizeof(desc[0].data));
4269 memset(desc[1].data, 0, sizeof(desc[0].data));
4270 memset(desc[2].data, 0, sizeof(desc[0].data));
4271 hclge_update_desc_vfid(desc, vport->vport_id, false);
4272 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
4275 /* Set MTA table for this MAC address */
4276 tbl_idx = hclge_get_mac_addr_to_mta_index(vport, addr);
4277 status = hclge_set_mta_table_item(vport, tbl_idx, true);
4282 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
4283 const unsigned char *addr)
4285 struct hclge_vport *vport = hclge_get_vport(handle);
4287 return hclge_rm_mc_addr_common(vport, addr);
4290 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
4291 const unsigned char *addr)
4293 struct hclge_dev *hdev = vport->back;
4294 struct hclge_mac_vlan_tbl_entry_cmd req;
4295 enum hclge_cmd_status status;
4296 struct hclge_desc desc[3];
4299 /* mac addr check */
4300 if (!is_multicast_ether_addr(addr)) {
4301 dev_dbg(&hdev->pdev->dev,
4302 "Remove mc mac err! invalid mac:%pM.\n",
4307 memset(&req, 0, sizeof(req));
4308 hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
4309 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
4310 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
4311 hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
4312 hclge_prepare_mac_addr(&req, addr);
4313 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
4315 /* This mac addr exist, remove this handle's VFID for it */
4316 hclge_update_desc_vfid(desc, vport->vport_id, true);
4318 if (hclge_is_all_function_id_zero(desc))
4319 /* All the vfid is zero, so need to delete this entry */
4320 status = hclge_remove_mac_vlan_tbl(vport, &req);
4322 /* Not all the vfid is zero, update the vfid */
4323 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
4326 /* This mac addr do not exist, can't delete it */
4327 dev_err(&hdev->pdev->dev,
4328 "Rm multicast mac addr failed, ret = %d.\n",
4333 /* Set MTB table for this MAC address */
4334 tbl_idx = hclge_get_mac_addr_to_mta_index(vport, addr);
4335 status = hclge_set_mta_table_item(vport, tbl_idx, false);
4340 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
4341 u16 cmdq_resp, u8 resp_code)
4343 #define HCLGE_ETHERTYPE_SUCCESS_ADD 0
4344 #define HCLGE_ETHERTYPE_ALREADY_ADD 1
4345 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2
4346 #define HCLGE_ETHERTYPE_KEY_CONFLICT 3
4351 dev_err(&hdev->pdev->dev,
4352 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
4357 switch (resp_code) {
4358 case HCLGE_ETHERTYPE_SUCCESS_ADD:
4359 case HCLGE_ETHERTYPE_ALREADY_ADD:
4362 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
4363 dev_err(&hdev->pdev->dev,
4364 "add mac ethertype failed for manager table overflow.\n");
4365 return_status = -EIO;
4367 case HCLGE_ETHERTYPE_KEY_CONFLICT:
4368 dev_err(&hdev->pdev->dev,
4369 "add mac ethertype failed for key conflict.\n");
4370 return_status = -EIO;
4373 dev_err(&hdev->pdev->dev,
4374 "add mac ethertype failed for undefined, code=%d.\n",
4376 return_status = -EIO;
4379 return return_status;
4382 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
4383 const struct hclge_mac_mgr_tbl_entry_cmd *req)
4385 struct hclge_desc desc;
4390 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
4391 memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
4393 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4395 dev_err(&hdev->pdev->dev,
4396 "add mac ethertype failed for cmd_send, ret =%d.\n",
4401 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
4402 retval = le16_to_cpu(desc.retval);
4404 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
4407 static int init_mgr_tbl(struct hclge_dev *hdev)
4412 for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
4413 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
4415 dev_err(&hdev->pdev->dev,
4416 "add mac ethertype failed, ret =%d.\n",
4425 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
4427 struct hclge_vport *vport = hclge_get_vport(handle);
4428 struct hclge_dev *hdev = vport->back;
4430 ether_addr_copy(p, hdev->hw.mac.mac_addr);
4433 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
4436 const unsigned char *new_addr = (const unsigned char *)p;
4437 struct hclge_vport *vport = hclge_get_vport(handle);
4438 struct hclge_dev *hdev = vport->back;
4441 /* mac addr check */
4442 if (is_zero_ether_addr(new_addr) ||
4443 is_broadcast_ether_addr(new_addr) ||
4444 is_multicast_ether_addr(new_addr)) {
4445 dev_err(&hdev->pdev->dev,
4446 "Change uc mac err! invalid mac:%p.\n",
4451 if (!is_first && hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
4452 dev_warn(&hdev->pdev->dev,
4453 "remove old uc mac address fail.\n");
4455 ret = hclge_add_uc_addr(handle, new_addr);
4457 dev_err(&hdev->pdev->dev,
4458 "add uc mac address fail, ret =%d.\n",
4462 hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
4463 dev_err(&hdev->pdev->dev,
4464 "restore uc mac address fail.\n");
4469 ret = hclge_pause_addr_cfg(hdev, new_addr);
4471 dev_err(&hdev->pdev->dev,
4472 "configure mac pause address fail, ret =%d.\n",
4477 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
4482 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
4485 struct hclge_vlan_filter_ctrl_cmd *req;
4486 struct hclge_desc desc;
4489 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
4491 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
4492 req->vlan_type = vlan_type;
4493 req->vlan_fe = filter_en;
4495 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4497 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
4505 #define HCLGE_FILTER_TYPE_VF 0
4506 #define HCLGE_FILTER_TYPE_PORT 1
4508 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
4510 struct hclge_vport *vport = hclge_get_vport(handle);
4511 struct hclge_dev *hdev = vport->back;
4513 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, enable);
4516 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
4517 bool is_kill, u16 vlan, u8 qos,
4520 #define HCLGE_MAX_VF_BYTES 16
4521 struct hclge_vlan_filter_vf_cfg_cmd *req0;
4522 struct hclge_vlan_filter_vf_cfg_cmd *req1;
4523 struct hclge_desc desc[2];
4528 hclge_cmd_setup_basic_desc(&desc[0],
4529 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
4530 hclge_cmd_setup_basic_desc(&desc[1],
4531 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
4533 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4535 vf_byte_off = vfid / 8;
4536 vf_byte_val = 1 << (vfid % 8);
4538 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
4539 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
4541 req0->vlan_id = cpu_to_le16(vlan);
4542 req0->vlan_cfg = is_kill;
4544 if (vf_byte_off < HCLGE_MAX_VF_BYTES)
4545 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
4547 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
4549 ret = hclge_cmd_send(&hdev->hw, desc, 2);
4551 dev_err(&hdev->pdev->dev,
4552 "Send vf vlan command fail, ret =%d.\n",
4558 if (!req0->resp_code || req0->resp_code == 1)
4561 dev_err(&hdev->pdev->dev,
4562 "Add vf vlan filter fail, ret =%d.\n",
4565 if (!req0->resp_code)
4568 dev_err(&hdev->pdev->dev,
4569 "Kill vf vlan filter fail, ret =%d.\n",
4576 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
4577 u16 vlan_id, bool is_kill)
4579 struct hclge_vlan_filter_pf_cfg_cmd *req;
4580 struct hclge_desc desc;
4581 u8 vlan_offset_byte_val;
4582 u8 vlan_offset_byte;
4586 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
4588 vlan_offset_160 = vlan_id / 160;
4589 vlan_offset_byte = (vlan_id % 160) / 8;
4590 vlan_offset_byte_val = 1 << (vlan_id % 8);
4592 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
4593 req->vlan_offset = vlan_offset_160;
4594 req->vlan_cfg = is_kill;
4595 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
4597 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4599 dev_err(&hdev->pdev->dev,
4600 "port vlan command, send fail, ret =%d.\n", ret);
4604 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
4605 u16 vport_id, u16 vlan_id, u8 qos,
4608 u16 vport_idx, vport_num = 0;
4611 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
4614 dev_err(&hdev->pdev->dev,
4615 "Set %d vport vlan filter config fail, ret =%d.\n",
4620 /* vlan 0 may be added twice when 8021q module is enabled */
4621 if (!is_kill && !vlan_id &&
4622 test_bit(vport_id, hdev->vlan_table[vlan_id]))
4625 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
4626 dev_err(&hdev->pdev->dev,
4627 "Add port vlan failed, vport %d is already in vlan %d\n",
4633 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
4634 dev_err(&hdev->pdev->dev,
4635 "Delete port vlan failed, vport %d is not in vlan %d\n",
4640 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], VLAN_N_VID)
4643 if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
4644 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
4650 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
4651 u16 vlan_id, bool is_kill)
4653 struct hclge_vport *vport = hclge_get_vport(handle);
4654 struct hclge_dev *hdev = vport->back;
4656 return hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id, vlan_id,
4660 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
4661 u16 vlan, u8 qos, __be16 proto)
4663 struct hclge_vport *vport = hclge_get_vport(handle);
4664 struct hclge_dev *hdev = vport->back;
4666 if ((vfid >= hdev->num_alloc_vfs) || (vlan > 4095) || (qos > 7))
4668 if (proto != htons(ETH_P_8021Q))
4669 return -EPROTONOSUPPORT;
4671 return hclge_set_vlan_filter_hw(hdev, proto, vfid, vlan, qos, false);
4674 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
4676 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
4677 struct hclge_vport_vtag_tx_cfg_cmd *req;
4678 struct hclge_dev *hdev = vport->back;
4679 struct hclge_desc desc;
4682 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
4684 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
4685 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
4686 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
4687 hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG_B,
4688 vcfg->accept_tag ? 1 : 0);
4689 hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG_B,
4690 vcfg->accept_untag ? 1 : 0);
4691 hnae_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
4692 vcfg->insert_tag1_en ? 1 : 0);
4693 hnae_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
4694 vcfg->insert_tag2_en ? 1 : 0);
4695 hnae_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
4697 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
4698 req->vf_bitmap[req->vf_offset] =
4699 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
4701 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4703 dev_err(&hdev->pdev->dev,
4704 "Send port txvlan cfg command fail, ret =%d\n",
4710 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
4712 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
4713 struct hclge_vport_vtag_rx_cfg_cmd *req;
4714 struct hclge_dev *hdev = vport->back;
4715 struct hclge_desc desc;
4718 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
4720 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
4721 hnae_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
4722 vcfg->strip_tag1_en ? 1 : 0);
4723 hnae_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
4724 vcfg->strip_tag2_en ? 1 : 0);
4725 hnae_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
4726 vcfg->vlan1_vlan_prionly ? 1 : 0);
4727 hnae_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
4728 vcfg->vlan2_vlan_prionly ? 1 : 0);
4730 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
4731 req->vf_bitmap[req->vf_offset] =
4732 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
4734 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4736 dev_err(&hdev->pdev->dev,
4737 "Send port rxvlan cfg command fail, ret =%d\n",
4743 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
4745 struct hclge_rx_vlan_type_cfg_cmd *rx_req;
4746 struct hclge_tx_vlan_type_cfg_cmd *tx_req;
4747 struct hclge_desc desc;
4750 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
4751 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
4752 rx_req->ot_fst_vlan_type =
4753 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
4754 rx_req->ot_sec_vlan_type =
4755 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
4756 rx_req->in_fst_vlan_type =
4757 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
4758 rx_req->in_sec_vlan_type =
4759 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
4761 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4763 dev_err(&hdev->pdev->dev,
4764 "Send rxvlan protocol type command fail, ret =%d\n",
4769 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
4771 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)&desc.data;
4772 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
4773 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
4775 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4777 dev_err(&hdev->pdev->dev,
4778 "Send txvlan protocol type command fail, ret =%d\n",
4784 static int hclge_init_vlan_config(struct hclge_dev *hdev)
4786 #define HCLGE_DEF_VLAN_TYPE 0x8100
4788 struct hnae3_handle *handle;
4789 struct hclge_vport *vport;
4793 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, true);
4797 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, true);
4801 hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
4802 hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
4803 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
4804 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
4805 hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
4806 hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
4808 ret = hclge_set_vlan_protocol_type(hdev);
4812 for (i = 0; i < hdev->num_alloc_vport; i++) {
4813 vport = &hdev->vport[i];
4814 vport->txvlan_cfg.accept_tag = true;
4815 vport->txvlan_cfg.accept_untag = true;
4816 vport->txvlan_cfg.insert_tag1_en = false;
4817 vport->txvlan_cfg.insert_tag2_en = false;
4818 vport->txvlan_cfg.default_tag1 = 0;
4819 vport->txvlan_cfg.default_tag2 = 0;
4821 ret = hclge_set_vlan_tx_offload_cfg(vport);
4825 vport->rxvlan_cfg.strip_tag1_en = false;
4826 vport->rxvlan_cfg.strip_tag2_en = true;
4827 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
4828 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
4830 ret = hclge_set_vlan_rx_offload_cfg(vport);
4835 handle = &hdev->vport[0].nic;
4836 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
4839 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
4841 struct hclge_vport *vport = hclge_get_vport(handle);
4843 vport->rxvlan_cfg.strip_tag1_en = false;
4844 vport->rxvlan_cfg.strip_tag2_en = enable;
4845 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
4846 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
4848 return hclge_set_vlan_rx_offload_cfg(vport);
4851 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mtu)
4853 struct hclge_config_max_frm_size_cmd *req;
4854 struct hclge_desc desc;
4858 max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
4860 if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
4861 max_frm_size > HCLGE_MAC_MAX_FRAME)
4864 max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
4866 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
4868 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
4869 req->max_frm_size = cpu_to_le16(max_frm_size);
4871 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4873 dev_err(&hdev->pdev->dev, "set mtu fail, ret =%d.\n", ret);
4877 hdev->mps = max_frm_size;
4882 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
4884 struct hclge_vport *vport = hclge_get_vport(handle);
4885 struct hclge_dev *hdev = vport->back;
4888 ret = hclge_set_mac_mtu(hdev, new_mtu);
4890 dev_err(&hdev->pdev->dev,
4891 "Change mtu fail, ret =%d\n", ret);
4895 ret = hclge_buffer_alloc(hdev);
4897 dev_err(&hdev->pdev->dev,
4898 "Allocate buffer fail, ret =%d\n", ret);
4903 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
4906 struct hclge_reset_tqp_queue_cmd *req;
4907 struct hclge_desc desc;
4910 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
4912 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
4913 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
4914 hnae_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable);
4916 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4918 dev_err(&hdev->pdev->dev,
4919 "Send tqp reset cmd error, status =%d\n", ret);
4926 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
4928 struct hclge_reset_tqp_queue_cmd *req;
4929 struct hclge_desc desc;
4932 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
4934 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
4935 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
4937 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4939 dev_err(&hdev->pdev->dev,
4940 "Get reset status error, status =%d\n", ret);
4944 return hnae_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
4947 static u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle,
4950 struct hnae3_queue *queue;
4951 struct hclge_tqp *tqp;
4953 queue = handle->kinfo.tqp[queue_id];
4954 tqp = container_of(queue, struct hclge_tqp, q);
4959 void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
4961 struct hclge_vport *vport = hclge_get_vport(handle);
4962 struct hclge_dev *hdev = vport->back;
4963 int reset_try_times = 0;
4968 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4971 queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
4973 ret = hclge_tqp_enable(hdev, queue_id, 0, false);
4975 dev_warn(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
4979 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
4981 dev_warn(&hdev->pdev->dev,
4982 "Send reset tqp cmd fail, ret = %d\n", ret);
4986 reset_try_times = 0;
4987 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
4988 /* Wait for tqp hw reset */
4990 reset_status = hclge_get_reset_status(hdev, queue_gid);
4995 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
4996 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
5000 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
5002 dev_warn(&hdev->pdev->dev,
5003 "Deassert the soft reset fail, ret = %d\n", ret);
5008 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
5010 struct hclge_dev *hdev = vport->back;
5011 int reset_try_times = 0;
5016 queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
5018 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
5020 dev_warn(&hdev->pdev->dev,
5021 "Send reset tqp cmd fail, ret = %d\n", ret);
5025 reset_try_times = 0;
5026 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
5027 /* Wait for tqp hw reset */
5029 reset_status = hclge_get_reset_status(hdev, queue_gid);
5034 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
5035 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
5039 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
5041 dev_warn(&hdev->pdev->dev,
5042 "Deassert the soft reset fail, ret = %d\n", ret);
5045 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
5047 struct hclge_vport *vport = hclge_get_vport(handle);
5048 struct hclge_dev *hdev = vport->back;
5050 return hdev->fw_version;
5053 static void hclge_get_flowctrl_adv(struct hnae3_handle *handle,
5056 struct hclge_vport *vport = hclge_get_vport(handle);
5057 struct hclge_dev *hdev = vport->back;
5058 struct phy_device *phydev = hdev->hw.mac.phydev;
5063 *flowctrl_adv |= (phydev->advertising & ADVERTISED_Pause) |
5064 (phydev->advertising & ADVERTISED_Asym_Pause);
5067 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
5069 struct phy_device *phydev = hdev->hw.mac.phydev;
5074 phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
5077 phydev->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
5080 phydev->advertising ^= ADVERTISED_Asym_Pause;
5083 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
5088 hdev->fc_mode_last_time = HCLGE_FC_FULL;
5089 else if (rx_en && !tx_en)
5090 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
5091 else if (!rx_en && tx_en)
5092 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
5094 hdev->fc_mode_last_time = HCLGE_FC_NONE;
5096 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
5099 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
5101 dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n",
5106 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
5111 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
5113 struct phy_device *phydev = hdev->hw.mac.phydev;
5114 u16 remote_advertising = 0;
5115 u16 local_advertising = 0;
5116 u32 rx_pause, tx_pause;
5119 if (!phydev->link || !phydev->autoneg)
5122 if (phydev->advertising & ADVERTISED_Pause)
5123 local_advertising = ADVERTISE_PAUSE_CAP;
5125 if (phydev->advertising & ADVERTISED_Asym_Pause)
5126 local_advertising |= ADVERTISE_PAUSE_ASYM;
5129 remote_advertising = LPA_PAUSE_CAP;
5131 if (phydev->asym_pause)
5132 remote_advertising |= LPA_PAUSE_ASYM;
5134 flowctl = mii_resolve_flowctrl_fdx(local_advertising,
5135 remote_advertising);
5136 tx_pause = flowctl & FLOW_CTRL_TX;
5137 rx_pause = flowctl & FLOW_CTRL_RX;
5139 if (phydev->duplex == HCLGE_MAC_HALF) {
5144 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
5147 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
5148 u32 *rx_en, u32 *tx_en)
5150 struct hclge_vport *vport = hclge_get_vport(handle);
5151 struct hclge_dev *hdev = vport->back;
5153 *auto_neg = hclge_get_autoneg(handle);
5155 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
5161 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
5164 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
5167 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
5176 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
5177 u32 rx_en, u32 tx_en)
5179 struct hclge_vport *vport = hclge_get_vport(handle);
5180 struct hclge_dev *hdev = vport->back;
5181 struct phy_device *phydev = hdev->hw.mac.phydev;
5184 fc_autoneg = hclge_get_autoneg(handle);
5185 if (auto_neg != fc_autoneg) {
5186 dev_info(&hdev->pdev->dev,
5187 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
5191 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
5192 dev_info(&hdev->pdev->dev,
5193 "Priority flow control enabled. Cannot set link flow control.\n");
5197 hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
5200 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
5202 /* Only support flow control negotiation for netdev with
5203 * phy attached for now.
5208 return phy_start_aneg(phydev);
5211 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
5212 u8 *auto_neg, u32 *speed, u8 *duplex)
5214 struct hclge_vport *vport = hclge_get_vport(handle);
5215 struct hclge_dev *hdev = vport->back;
5218 *speed = hdev->hw.mac.speed;
5220 *duplex = hdev->hw.mac.duplex;
5222 *auto_neg = hdev->hw.mac.autoneg;
5225 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type)
5227 struct hclge_vport *vport = hclge_get_vport(handle);
5228 struct hclge_dev *hdev = vport->back;
5231 *media_type = hdev->hw.mac.media_type;
5234 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
5235 u8 *tp_mdix_ctrl, u8 *tp_mdix)
5237 struct hclge_vport *vport = hclge_get_vport(handle);
5238 struct hclge_dev *hdev = vport->back;
5239 struct phy_device *phydev = hdev->hw.mac.phydev;
5240 int mdix_ctrl, mdix, retval, is_resolved;
5243 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
5244 *tp_mdix = ETH_TP_MDI_INVALID;
5248 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
5250 retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
5251 mdix_ctrl = hnae_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
5252 HCLGE_PHY_MDIX_CTRL_S);
5254 retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
5255 mdix = hnae_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
5256 is_resolved = hnae_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
5258 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
5260 switch (mdix_ctrl) {
5262 *tp_mdix_ctrl = ETH_TP_MDI;
5265 *tp_mdix_ctrl = ETH_TP_MDI_X;
5268 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
5271 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
5276 *tp_mdix = ETH_TP_MDI_INVALID;
5278 *tp_mdix = ETH_TP_MDI_X;
5280 *tp_mdix = ETH_TP_MDI;
5283 static int hclge_init_client_instance(struct hnae3_client *client,
5284 struct hnae3_ae_dev *ae_dev)
5286 struct hclge_dev *hdev = ae_dev->priv;
5287 struct hclge_vport *vport;
5290 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
5291 vport = &hdev->vport[i];
5293 switch (client->type) {
5294 case HNAE3_CLIENT_KNIC:
5296 hdev->nic_client = client;
5297 vport->nic.client = client;
5298 ret = client->ops->init_instance(&vport->nic);
5302 if (hdev->roce_client &&
5303 hnae3_dev_roce_supported(hdev)) {
5304 struct hnae3_client *rc = hdev->roce_client;
5306 ret = hclge_init_roce_base_info(vport);
5310 ret = rc->ops->init_instance(&vport->roce);
5316 case HNAE3_CLIENT_UNIC:
5317 hdev->nic_client = client;
5318 vport->nic.client = client;
5320 ret = client->ops->init_instance(&vport->nic);
5325 case HNAE3_CLIENT_ROCE:
5326 if (hnae3_dev_roce_supported(hdev)) {
5327 hdev->roce_client = client;
5328 vport->roce.client = client;
5331 if (hdev->roce_client && hdev->nic_client) {
5332 ret = hclge_init_roce_base_info(vport);
5336 ret = client->ops->init_instance(&vport->roce);
5348 static void hclge_uninit_client_instance(struct hnae3_client *client,
5349 struct hnae3_ae_dev *ae_dev)
5351 struct hclge_dev *hdev = ae_dev->priv;
5352 struct hclge_vport *vport;
5355 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
5356 vport = &hdev->vport[i];
5357 if (hdev->roce_client) {
5358 hdev->roce_client->ops->uninit_instance(&vport->roce,
5360 hdev->roce_client = NULL;
5361 vport->roce.client = NULL;
5363 if (client->type == HNAE3_CLIENT_ROCE)
5365 if (client->ops->uninit_instance) {
5366 client->ops->uninit_instance(&vport->nic, 0);
5367 hdev->nic_client = NULL;
5368 vport->nic.client = NULL;
5373 static int hclge_pci_init(struct hclge_dev *hdev)
5375 struct pci_dev *pdev = hdev->pdev;
5376 struct hclge_hw *hw;
5379 ret = pci_enable_device(pdev);
5381 dev_err(&pdev->dev, "failed to enable PCI device\n");
5385 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
5387 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
5390 "can't set consistent PCI DMA");
5391 goto err_disable_device;
5393 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
5396 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
5398 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
5399 goto err_disable_device;
5402 pci_set_master(pdev);
5405 hw->io_base = pcim_iomap(pdev, 2, 0);
5407 dev_err(&pdev->dev, "Can't map configuration register space\n");
5409 goto err_clr_master;
5412 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
5416 pci_clear_master(pdev);
5417 pci_release_regions(pdev);
5419 pci_disable_device(pdev);
5424 static void hclge_pci_uninit(struct hclge_dev *hdev)
5426 struct pci_dev *pdev = hdev->pdev;
5428 pcim_iounmap(pdev, hdev->hw.io_base);
5429 pci_free_irq_vectors(pdev);
5430 pci_clear_master(pdev);
5431 pci_release_mem_regions(pdev);
5432 pci_disable_device(pdev);
5435 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
5437 struct pci_dev *pdev = ae_dev->pdev;
5438 struct hclge_dev *hdev;
5441 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
5448 hdev->ae_dev = ae_dev;
5449 hdev->reset_type = HNAE3_NONE_RESET;
5450 hdev->reset_request = 0;
5451 hdev->reset_pending = 0;
5452 ae_dev->priv = hdev;
5454 ret = hclge_pci_init(hdev);
5456 dev_err(&pdev->dev, "PCI init failed\n");
5460 /* Firmware command queue initialize */
5461 ret = hclge_cmd_queue_init(hdev);
5463 dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
5464 goto err_pci_uninit;
5467 /* Firmware command initialize */
5468 ret = hclge_cmd_init(hdev);
5470 goto err_cmd_uninit;
5472 ret = hclge_get_cap(hdev);
5474 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
5476 goto err_cmd_uninit;
5479 ret = hclge_configure(hdev);
5481 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
5482 goto err_cmd_uninit;
5485 ret = hclge_init_msi(hdev);
5487 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
5488 goto err_cmd_uninit;
5491 ret = hclge_misc_irq_init(hdev);
5494 "Misc IRQ(vector0) init error, ret = %d.\n",
5496 goto err_msi_uninit;
5499 ret = hclge_alloc_tqps(hdev);
5501 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
5502 goto err_msi_irq_uninit;
5505 ret = hclge_alloc_vport(hdev);
5507 dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
5508 goto err_msi_irq_uninit;
5511 ret = hclge_map_tqp(hdev);
5513 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
5514 goto err_msi_irq_uninit;
5517 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
5518 ret = hclge_mac_mdio_config(hdev);
5520 dev_err(&hdev->pdev->dev,
5521 "mdio config fail ret=%d\n", ret);
5522 goto err_msi_irq_uninit;
5526 ret = hclge_mac_init(hdev);
5528 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
5529 goto err_mdiobus_unreg;
5532 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
5534 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
5535 goto err_mdiobus_unreg;
5538 ret = hclge_init_vlan_config(hdev);
5540 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
5541 goto err_mdiobus_unreg;
5544 ret = hclge_tm_schd_init(hdev);
5546 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
5547 goto err_mdiobus_unreg;
5550 hclge_rss_init_cfg(hdev);
5551 ret = hclge_rss_init_hw(hdev);
5553 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
5554 goto err_mdiobus_unreg;
5557 ret = init_mgr_tbl(hdev);
5559 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
5560 goto err_mdiobus_unreg;
5563 hclge_dcb_ops_set(hdev);
5565 timer_setup(&hdev->service_timer, hclge_service_timer, 0);
5566 INIT_WORK(&hdev->service_task, hclge_service_task);
5567 INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
5568 INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
5570 /* Enable MISC vector(vector0) */
5571 hclge_enable_vector(&hdev->misc_vector, true);
5573 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
5574 set_bit(HCLGE_STATE_DOWN, &hdev->state);
5575 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
5576 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
5577 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
5578 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
5580 pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
5584 if (hdev->hw.mac.phydev)
5585 mdiobus_unregister(hdev->hw.mac.mdio_bus);
5587 hclge_misc_irq_uninit(hdev);
5589 pci_free_irq_vectors(pdev);
5591 hclge_destroy_cmd_queue(&hdev->hw);
5593 pcim_iounmap(pdev, hdev->hw.io_base);
5594 pci_clear_master(pdev);
5595 pci_release_regions(pdev);
5596 pci_disable_device(pdev);
5601 static void hclge_stats_clear(struct hclge_dev *hdev)
5603 memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
5606 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
5608 struct hclge_dev *hdev = ae_dev->priv;
5609 struct pci_dev *pdev = ae_dev->pdev;
5612 set_bit(HCLGE_STATE_DOWN, &hdev->state);
5614 hclge_stats_clear(hdev);
5615 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
5617 ret = hclge_cmd_init(hdev);
5619 dev_err(&pdev->dev, "Cmd queue init failed\n");
5623 ret = hclge_get_cap(hdev);
5625 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
5630 ret = hclge_configure(hdev);
5632 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
5636 ret = hclge_map_tqp(hdev);
5638 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
5642 ret = hclge_mac_init(hdev);
5644 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
5648 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
5650 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
5654 ret = hclge_init_vlan_config(hdev);
5656 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
5660 ret = hclge_tm_init_hw(hdev);
5662 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
5666 ret = hclge_rss_init_hw(hdev);
5668 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
5672 /* Enable MISC vector(vector0) */
5673 hclge_enable_vector(&hdev->misc_vector, true);
5675 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
5681 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
5683 struct hclge_dev *hdev = ae_dev->priv;
5684 struct hclge_mac *mac = &hdev->hw.mac;
5686 set_bit(HCLGE_STATE_DOWN, &hdev->state);
5688 if (hdev->service_timer.function)
5689 del_timer_sync(&hdev->service_timer);
5690 if (hdev->service_task.func)
5691 cancel_work_sync(&hdev->service_task);
5692 if (hdev->rst_service_task.func)
5693 cancel_work_sync(&hdev->rst_service_task);
5694 if (hdev->mbx_service_task.func)
5695 cancel_work_sync(&hdev->mbx_service_task);
5698 mdiobus_unregister(mac->mdio_bus);
5700 /* Disable MISC vector(vector0) */
5701 hclge_enable_vector(&hdev->misc_vector, false);
5702 hclge_destroy_cmd_queue(&hdev->hw);
5703 hclge_misc_irq_uninit(hdev);
5704 hclge_pci_uninit(hdev);
5705 ae_dev->priv = NULL;
5708 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
5710 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
5711 struct hclge_vport *vport = hclge_get_vport(handle);
5712 struct hclge_dev *hdev = vport->back;
5714 return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps);
5717 static void hclge_get_channels(struct hnae3_handle *handle,
5718 struct ethtool_channels *ch)
5720 struct hclge_vport *vport = hclge_get_vport(handle);
5722 ch->max_combined = hclge_get_max_channels(handle);
5723 ch->other_count = 1;
5725 ch->combined_count = vport->alloc_tqps;
5728 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
5729 u16 *free_tqps, u16 *max_rss_size)
5731 struct hclge_vport *vport = hclge_get_vport(handle);
5732 struct hclge_dev *hdev = vport->back;
5736 for (i = 0; i < hdev->num_tqps; i++) {
5737 if (!hdev->htqp[i].alloced)
5740 *free_tqps = temp_tqps;
5741 *max_rss_size = hdev->rss_size_max;
5744 static void hclge_release_tqp(struct hclge_vport *vport)
5746 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
5747 struct hclge_dev *hdev = vport->back;
5750 for (i = 0; i < kinfo->num_tqps; i++) {
5751 struct hclge_tqp *tqp =
5752 container_of(kinfo->tqp[i], struct hclge_tqp, q);
5754 tqp->q.handle = NULL;
5755 tqp->q.tqp_index = 0;
5756 tqp->alloced = false;
5759 devm_kfree(&hdev->pdev->dev, kinfo->tqp);
5763 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num)
5765 struct hclge_vport *vport = hclge_get_vport(handle);
5766 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
5767 struct hclge_dev *hdev = vport->back;
5768 int cur_rss_size = kinfo->rss_size;
5769 int cur_tqps = kinfo->num_tqps;
5770 u16 tc_offset[HCLGE_MAX_TC_NUM];
5771 u16 tc_valid[HCLGE_MAX_TC_NUM];
5772 u16 tc_size[HCLGE_MAX_TC_NUM];
5777 hclge_release_tqp(vport);
5779 ret = hclge_knic_setup(vport, new_tqps_num);
5781 dev_err(&hdev->pdev->dev, "setup nic fail, ret =%d\n", ret);
5785 ret = hclge_map_tqp_to_vport(hdev, vport);
5787 dev_err(&hdev->pdev->dev, "map vport tqp fail, ret =%d\n", ret);
5791 ret = hclge_tm_schd_init(hdev);
5793 dev_err(&hdev->pdev->dev, "tm schd init fail, ret =%d\n", ret);
5797 roundup_size = roundup_pow_of_two(kinfo->rss_size);
5798 roundup_size = ilog2(roundup_size);
5799 /* Set the RSS TC mode according to the new RSS size */
5800 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
5803 if (!(hdev->hw_tc_map & BIT(i)))
5807 tc_size[i] = roundup_size;
5808 tc_offset[i] = kinfo->rss_size * i;
5810 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
5814 /* Reinitializes the rss indirect table according to the new RSS size */
5815 rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
5819 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
5820 rss_indir[i] = i % kinfo->rss_size;
5822 ret = hclge_set_rss(handle, rss_indir, NULL, 0);
5824 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
5830 dev_info(&hdev->pdev->dev,
5831 "Channels changed, rss_size from %d to %d, tqps from %d to %d",
5832 cur_rss_size, kinfo->rss_size,
5833 cur_tqps, kinfo->rss_size * kinfo->num_tc);
5838 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
5839 u32 *regs_num_64_bit)
5841 struct hclge_desc desc;
5845 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
5846 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5848 dev_err(&hdev->pdev->dev,
5849 "Query register number cmd failed, ret = %d.\n", ret);
5853 *regs_num_32_bit = le32_to_cpu(desc.data[0]);
5854 *regs_num_64_bit = le32_to_cpu(desc.data[1]);
5856 total_num = *regs_num_32_bit + *regs_num_64_bit;
5863 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
5866 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
5868 struct hclge_desc *desc;
5869 u32 *reg_val = data;
5878 cmd_num = DIV_ROUND_UP(regs_num + 2, HCLGE_32_BIT_REG_RTN_DATANUM);
5879 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
5883 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
5884 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
5886 dev_err(&hdev->pdev->dev,
5887 "Query 32 bit register cmd failed, ret = %d.\n", ret);
5892 for (i = 0; i < cmd_num; i++) {
5894 desc_data = (__le32 *)(&desc[i].data[0]);
5895 n = HCLGE_32_BIT_REG_RTN_DATANUM - 2;
5897 desc_data = (__le32 *)(&desc[i]);
5898 n = HCLGE_32_BIT_REG_RTN_DATANUM;
5900 for (k = 0; k < n; k++) {
5901 *reg_val++ = le32_to_cpu(*desc_data++);
5913 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
5916 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
5918 struct hclge_desc *desc;
5919 u64 *reg_val = data;
5928 cmd_num = DIV_ROUND_UP(regs_num + 1, HCLGE_64_BIT_REG_RTN_DATANUM);
5929 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
5933 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
5934 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
5936 dev_err(&hdev->pdev->dev,
5937 "Query 64 bit register cmd failed, ret = %d.\n", ret);
5942 for (i = 0; i < cmd_num; i++) {
5944 desc_data = (__le64 *)(&desc[i].data[0]);
5945 n = HCLGE_64_BIT_REG_RTN_DATANUM - 1;
5947 desc_data = (__le64 *)(&desc[i]);
5948 n = HCLGE_64_BIT_REG_RTN_DATANUM;
5950 for (k = 0; k < n; k++) {
5951 *reg_val++ = le64_to_cpu(*desc_data++);
5963 static int hclge_get_regs_len(struct hnae3_handle *handle)
5965 struct hclge_vport *vport = hclge_get_vport(handle);
5966 struct hclge_dev *hdev = vport->back;
5967 u32 regs_num_32_bit, regs_num_64_bit;
5970 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
5972 dev_err(&hdev->pdev->dev,
5973 "Get register number failed, ret = %d.\n", ret);
5977 return regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64);
5980 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
5983 struct hclge_vport *vport = hclge_get_vport(handle);
5984 struct hclge_dev *hdev = vport->back;
5985 u32 regs_num_32_bit, regs_num_64_bit;
5988 *version = hdev->fw_version;
5990 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
5992 dev_err(&hdev->pdev->dev,
5993 "Get register number failed, ret = %d.\n", ret);
5997 ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, data);
5999 dev_err(&hdev->pdev->dev,
6000 "Get 32 bit register failed, ret = %d.\n", ret);
6004 data = (u32 *)data + regs_num_32_bit;
6005 ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit,
6008 dev_err(&hdev->pdev->dev,
6009 "Get 64 bit register failed, ret = %d.\n", ret);
6012 static int hclge_set_led_status_sfp(struct hclge_dev *hdev, u8 speed_led_status,
6013 u8 act_led_status, u8 link_led_status,
6014 u8 locate_led_status)
6016 struct hclge_set_led_state_cmd *req;
6017 struct hclge_desc desc;
6020 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
6022 req = (struct hclge_set_led_state_cmd *)desc.data;
6023 hnae_set_field(req->port_speed_led_config, HCLGE_LED_PORT_SPEED_STATE_M,
6024 HCLGE_LED_PORT_SPEED_STATE_S, speed_led_status);
6025 hnae_set_field(req->link_led_config, HCLGE_LED_ACTIVITY_STATE_M,
6026 HCLGE_LED_ACTIVITY_STATE_S, act_led_status);
6027 hnae_set_field(req->activity_led_config, HCLGE_LED_LINK_STATE_M,
6028 HCLGE_LED_LINK_STATE_S, link_led_status);
6029 hnae_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
6030 HCLGE_LED_LOCATE_STATE_S, locate_led_status);
6032 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6034 dev_err(&hdev->pdev->dev,
6035 "Send set led state cmd error, ret =%d\n", ret);
6040 enum hclge_led_status {
6043 HCLGE_LED_NO_CHANGE = 0xFF,
6046 static int hclge_set_led_id(struct hnae3_handle *handle,
6047 enum ethtool_phys_id_state status)
6049 #define BLINK_FREQUENCY 2
6050 struct hclge_vport *vport = hclge_get_vport(handle);
6051 struct hclge_dev *hdev = vport->back;
6052 struct phy_device *phydev = hdev->hw.mac.phydev;
6055 if (phydev || hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
6059 case ETHTOOL_ID_ACTIVE:
6060 ret = hclge_set_led_status_sfp(hdev,
6061 HCLGE_LED_NO_CHANGE,
6062 HCLGE_LED_NO_CHANGE,
6063 HCLGE_LED_NO_CHANGE,
6066 case ETHTOOL_ID_INACTIVE:
6067 ret = hclge_set_led_status_sfp(hdev,
6068 HCLGE_LED_NO_CHANGE,
6069 HCLGE_LED_NO_CHANGE,
6070 HCLGE_LED_NO_CHANGE,
6081 enum hclge_led_port_speed {
6082 HCLGE_SPEED_LED_FOR_1G,
6083 HCLGE_SPEED_LED_FOR_10G,
6084 HCLGE_SPEED_LED_FOR_25G,
6085 HCLGE_SPEED_LED_FOR_40G,
6086 HCLGE_SPEED_LED_FOR_50G,
6087 HCLGE_SPEED_LED_FOR_100G,
6090 static u8 hclge_led_get_speed_status(u32 speed)
6095 case HCLGE_MAC_SPEED_1G:
6096 speed_led = HCLGE_SPEED_LED_FOR_1G;
6098 case HCLGE_MAC_SPEED_10G:
6099 speed_led = HCLGE_SPEED_LED_FOR_10G;
6101 case HCLGE_MAC_SPEED_25G:
6102 speed_led = HCLGE_SPEED_LED_FOR_25G;
6104 case HCLGE_MAC_SPEED_40G:
6105 speed_led = HCLGE_SPEED_LED_FOR_40G;
6107 case HCLGE_MAC_SPEED_50G:
6108 speed_led = HCLGE_SPEED_LED_FOR_50G;
6110 case HCLGE_MAC_SPEED_100G:
6111 speed_led = HCLGE_SPEED_LED_FOR_100G;
6114 speed_led = HCLGE_LED_NO_CHANGE;
6120 static int hclge_update_led_status(struct hclge_dev *hdev)
6122 u8 port_speed_status, link_status, activity_status;
6123 u64 rx_pkts, tx_pkts;
6125 if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
6128 port_speed_status = hclge_led_get_speed_status(hdev->hw.mac.speed);
6130 rx_pkts = hdev->hw_stats.mac_stats.mac_rx_total_pkt_num;
6131 tx_pkts = hdev->hw_stats.mac_stats.mac_tx_total_pkt_num;
6132 if (rx_pkts != hdev->rx_pkts_for_led ||
6133 tx_pkts != hdev->tx_pkts_for_led)
6134 activity_status = HCLGE_LED_ON;
6136 activity_status = HCLGE_LED_OFF;
6137 hdev->rx_pkts_for_led = rx_pkts;
6138 hdev->tx_pkts_for_led = tx_pkts;
6140 if (hdev->hw.mac.link)
6141 link_status = HCLGE_LED_ON;
6143 link_status = HCLGE_LED_OFF;
6145 return hclge_set_led_status_sfp(hdev, port_speed_status,
6146 activity_status, link_status,
6147 HCLGE_LED_NO_CHANGE);
6150 static void hclge_get_link_mode(struct hnae3_handle *handle,
6151 unsigned long *supported,
6152 unsigned long *advertising)
6154 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
6155 struct hclge_vport *vport = hclge_get_vport(handle);
6156 struct hclge_dev *hdev = vport->back;
6157 unsigned int idx = 0;
6159 for (; idx < size; idx++) {
6160 supported[idx] = hdev->hw.mac.supported[idx];
6161 advertising[idx] = hdev->hw.mac.advertising[idx];
6165 static void hclge_get_port_type(struct hnae3_handle *handle,
6168 struct hclge_vport *vport = hclge_get_vport(handle);
6169 struct hclge_dev *hdev = vport->back;
6170 u8 media_type = hdev->hw.mac.media_type;
6172 switch (media_type) {
6173 case HNAE3_MEDIA_TYPE_FIBER:
6174 *port_type = PORT_FIBRE;
6176 case HNAE3_MEDIA_TYPE_COPPER:
6177 *port_type = PORT_TP;
6179 case HNAE3_MEDIA_TYPE_UNKNOWN:
6181 *port_type = PORT_OTHER;
6186 static const struct hnae3_ae_ops hclge_ops = {
6187 .init_ae_dev = hclge_init_ae_dev,
6188 .uninit_ae_dev = hclge_uninit_ae_dev,
6189 .init_client_instance = hclge_init_client_instance,
6190 .uninit_client_instance = hclge_uninit_client_instance,
6191 .map_ring_to_vector = hclge_map_ring_to_vector,
6192 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
6193 .get_vector = hclge_get_vector,
6194 .put_vector = hclge_put_vector,
6195 .set_promisc_mode = hclge_set_promisc_mode,
6196 .set_loopback = hclge_set_loopback,
6197 .start = hclge_ae_start,
6198 .stop = hclge_ae_stop,
6199 .get_status = hclge_get_status,
6200 .get_ksettings_an_result = hclge_get_ksettings_an_result,
6201 .update_speed_duplex_h = hclge_update_speed_duplex_h,
6202 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
6203 .get_media_type = hclge_get_media_type,
6204 .get_rss_key_size = hclge_get_rss_key_size,
6205 .get_rss_indir_size = hclge_get_rss_indir_size,
6206 .get_rss = hclge_get_rss,
6207 .set_rss = hclge_set_rss,
6208 .set_rss_tuple = hclge_set_rss_tuple,
6209 .get_rss_tuple = hclge_get_rss_tuple,
6210 .get_tc_size = hclge_get_tc_size,
6211 .get_mac_addr = hclge_get_mac_addr,
6212 .set_mac_addr = hclge_set_mac_addr,
6213 .add_uc_addr = hclge_add_uc_addr,
6214 .rm_uc_addr = hclge_rm_uc_addr,
6215 .add_mc_addr = hclge_add_mc_addr,
6216 .rm_mc_addr = hclge_rm_mc_addr,
6217 .set_autoneg = hclge_set_autoneg,
6218 .get_autoneg = hclge_get_autoneg,
6219 .get_pauseparam = hclge_get_pauseparam,
6220 .set_pauseparam = hclge_set_pauseparam,
6221 .set_mtu = hclge_set_mtu,
6222 .reset_queue = hclge_reset_tqp,
6223 .get_stats = hclge_get_stats,
6224 .update_stats = hclge_update_stats,
6225 .get_strings = hclge_get_strings,
6226 .get_sset_count = hclge_get_sset_count,
6227 .get_fw_version = hclge_get_fw_version,
6228 .get_mdix_mode = hclge_get_mdix_mode,
6229 .enable_vlan_filter = hclge_enable_vlan_filter,
6230 .set_vlan_filter = hclge_set_vlan_filter,
6231 .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
6232 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
6233 .reset_event = hclge_reset_event,
6234 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
6235 .set_channels = hclge_set_channels,
6236 .get_channels = hclge_get_channels,
6237 .get_flowctrl_adv = hclge_get_flowctrl_adv,
6238 .get_regs_len = hclge_get_regs_len,
6239 .get_regs = hclge_get_regs,
6240 .set_led_id = hclge_set_led_id,
6241 .get_link_mode = hclge_get_link_mode,
6242 .get_port_type = hclge_get_port_type,
6245 static struct hnae3_ae_algo ae_algo = {
6248 .pdev_id_table = ae_algo_pci_tbl,
6251 static int hclge_init(void)
6253 pr_info("%s is initializing\n", HCLGE_NAME);
6255 hnae3_register_ae_algo(&ae_algo);
6260 static void hclge_exit(void)
6262 hnae3_unregister_ae_algo(&ae_algo);
6264 module_init(hclge_init);
6265 module_exit(hclge_exit);
6267 MODULE_LICENSE("GPL");
6268 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
6269 MODULE_DESCRIPTION("HCLGE Driver");
6270 MODULE_VERSION(HCLGE_MOD_VERSION);