2 * drivers/net/ethernet/mellanox/mlxsw/spectrum.c
3 * Copyright (c) 2015-2017 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015-2017 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
37 #include <linux/kernel.h>
38 #include <linux/module.h>
39 #include <linux/types.h>
40 #include <linux/pci.h>
41 #include <linux/netdevice.h>
42 #include <linux/etherdevice.h>
43 #include <linux/ethtool.h>
44 #include <linux/slab.h>
45 #include <linux/device.h>
46 #include <linux/skbuff.h>
47 #include <linux/if_vlan.h>
48 #include <linux/if_bridge.h>
49 #include <linux/workqueue.h>
50 #include <linux/jiffies.h>
51 #include <linux/bitops.h>
52 #include <linux/list.h>
53 #include <linux/notifier.h>
54 #include <linux/dcbnl.h>
55 #include <linux/inetdevice.h>
56 #include <net/switchdev.h>
57 #include <net/pkt_cls.h>
58 #include <net/tc_act/tc_mirred.h>
59 #include <net/netevent.h>
60 #include <net/tc_act/tc_sample.h>
69 #include "spectrum_cnt.h"
70 #include "spectrum_dpipe.h"
71 #include "../mlxfw/mlxfw.h"
73 #define MLXSW_FWREV_MAJOR 13
74 #define MLXSW_FWREV_MINOR 1420
75 #define MLXSW_FWREV_SUBMINOR 122
77 static const struct mlxsw_fw_rev mlxsw_sp_supported_fw_rev = {
78 .major = MLXSW_FWREV_MAJOR,
79 .minor = MLXSW_FWREV_MINOR,
80 .subminor = MLXSW_FWREV_SUBMINOR
83 #define MLXSW_SP_FW_FILENAME \
84 "mlxsw_spectrum-" __stringify(MLXSW_FWREV_MAJOR) \
85 "." __stringify(MLXSW_FWREV_MINOR) \
86 "." __stringify(MLXSW_FWREV_SUBMINOR) ".mfa2"
88 static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum";
89 static const char mlxsw_sp_driver_version[] = "1.0";
95 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
98 * Packet control type.
99 * 0 - Ethernet control (e.g. EMADs, LACP)
102 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
105 * Packet protocol type. Must be set to 1 (Ethernet).
107 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
109 /* tx_hdr_rx_is_router
110 * Packet is sent from the router. Valid for data packets only.
112 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
115 * Indicates if the 'fid' field is valid and should be used for
116 * forwarding lookup. Valid for data packets only.
118 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
121 * Switch partition ID. Must be set to 0.
123 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
125 /* tx_hdr_control_tclass
126 * Indicates if the packet should use the control TClass and not one
127 * of the data TClasses.
129 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
132 * Egress TClass to be used on the egress device on the egress port.
134 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4);
137 * Destination local port for unicast packets.
138 * Destination multicast ID for multicast packets.
140 * Control packets are directed to a specific egress port, while data
141 * packets are transmitted through the CPU port (0) into the switch partition,
142 * where forwarding rules are applied.
144 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
147 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
148 * set, otherwise calculated based on the packet's VID using VID to FID mapping.
149 * Valid for data packets only.
151 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16);
155 * 6 - Control packets
157 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
159 struct mlxsw_sp_mlxfw_dev {
160 struct mlxfw_dev mlxfw_dev;
161 struct mlxsw_sp *mlxsw_sp;
164 static int mlxsw_sp_component_query(struct mlxfw_dev *mlxfw_dev,
165 u16 component_index, u32 *p_max_size,
166 u8 *p_align_bits, u16 *p_max_write_size)
168 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
169 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
170 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
171 char mcqi_pl[MLXSW_REG_MCQI_LEN];
174 mlxsw_reg_mcqi_pack(mcqi_pl, component_index);
175 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcqi), mcqi_pl);
178 mlxsw_reg_mcqi_unpack(mcqi_pl, p_max_size, p_align_bits,
181 *p_align_bits = max_t(u8, *p_align_bits, 2);
182 *p_max_write_size = min_t(u16, *p_max_write_size,
183 MLXSW_REG_MCDA_MAX_DATA_LEN);
187 static int mlxsw_sp_fsm_lock(struct mlxfw_dev *mlxfw_dev, u32 *fwhandle)
189 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
190 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
191 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
192 char mcc_pl[MLXSW_REG_MCC_LEN];
196 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, 0, 0);
197 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
201 mlxsw_reg_mcc_unpack(mcc_pl, fwhandle, NULL, &control_state);
202 if (control_state != MLXFW_FSM_STATE_IDLE)
205 mlxsw_reg_mcc_pack(mcc_pl,
206 MLXSW_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE,
208 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
211 static int mlxsw_sp_fsm_component_update(struct mlxfw_dev *mlxfw_dev,
212 u32 fwhandle, u16 component_index,
215 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
216 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
217 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
218 char mcc_pl[MLXSW_REG_MCC_LEN];
220 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_UPDATE_COMPONENT,
221 component_index, fwhandle, component_size);
222 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
225 static int mlxsw_sp_fsm_block_download(struct mlxfw_dev *mlxfw_dev,
226 u32 fwhandle, u8 *data, u16 size,
229 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
230 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
231 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
232 char mcda_pl[MLXSW_REG_MCDA_LEN];
234 mlxsw_reg_mcda_pack(mcda_pl, fwhandle, offset, size, data);
235 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcda), mcda_pl);
238 static int mlxsw_sp_fsm_component_verify(struct mlxfw_dev *mlxfw_dev,
239 u32 fwhandle, u16 component_index)
241 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
242 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
243 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
244 char mcc_pl[MLXSW_REG_MCC_LEN];
246 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_VERIFY_COMPONENT,
247 component_index, fwhandle, 0);
248 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
251 static int mlxsw_sp_fsm_activate(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
253 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
254 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
255 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
256 char mcc_pl[MLXSW_REG_MCC_LEN];
258 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_ACTIVATE, 0,
260 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
263 static int mlxsw_sp_fsm_query_state(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
264 enum mlxfw_fsm_state *fsm_state,
265 enum mlxfw_fsm_state_err *fsm_state_err)
267 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
268 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
269 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
270 char mcc_pl[MLXSW_REG_MCC_LEN];
275 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, fwhandle, 0);
276 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
280 mlxsw_reg_mcc_unpack(mcc_pl, NULL, &error_code, &control_state);
281 *fsm_state = control_state;
282 *fsm_state_err = min_t(enum mlxfw_fsm_state_err, error_code,
283 MLXFW_FSM_STATE_ERR_MAX);
287 static void mlxsw_sp_fsm_cancel(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
289 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
290 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
291 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
292 char mcc_pl[MLXSW_REG_MCC_LEN];
294 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_CANCEL, 0,
296 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
299 static void mlxsw_sp_fsm_release(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
301 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
302 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
303 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
304 char mcc_pl[MLXSW_REG_MCC_LEN];
306 mlxsw_reg_mcc_pack(mcc_pl,
307 MLXSW_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE, 0,
309 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
312 static const struct mlxfw_dev_ops mlxsw_sp_mlxfw_dev_ops = {
313 .component_query = mlxsw_sp_component_query,
314 .fsm_lock = mlxsw_sp_fsm_lock,
315 .fsm_component_update = mlxsw_sp_fsm_component_update,
316 .fsm_block_download = mlxsw_sp_fsm_block_download,
317 .fsm_component_verify = mlxsw_sp_fsm_component_verify,
318 .fsm_activate = mlxsw_sp_fsm_activate,
319 .fsm_query_state = mlxsw_sp_fsm_query_state,
320 .fsm_cancel = mlxsw_sp_fsm_cancel,
321 .fsm_release = mlxsw_sp_fsm_release
324 static bool mlxsw_sp_fw_rev_ge(const struct mlxsw_fw_rev *a,
325 const struct mlxsw_fw_rev *b)
327 if (a->major != b->major)
328 return a->major > b->major;
329 if (a->minor != b->minor)
330 return a->minor > b->minor;
331 return a->subminor >= b->subminor;
334 static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp)
336 const struct mlxsw_fw_rev *rev = &mlxsw_sp->bus_info->fw_rev;
337 struct mlxsw_sp_mlxfw_dev mlxsw_sp_mlxfw_dev = {
339 .ops = &mlxsw_sp_mlxfw_dev_ops,
340 .psid = mlxsw_sp->bus_info->psid,
341 .psid_size = strlen(mlxsw_sp->bus_info->psid),
345 const struct firmware *firmware;
348 if (mlxsw_sp_fw_rev_ge(rev, &mlxsw_sp_supported_fw_rev))
351 dev_info(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d out of data\n",
352 rev->major, rev->minor, rev->subminor);
353 dev_info(mlxsw_sp->bus_info->dev, "Upgrading firmware using file %s\n",
354 MLXSW_SP_FW_FILENAME);
356 err = request_firmware_direct(&firmware, MLXSW_SP_FW_FILENAME,
357 mlxsw_sp->bus_info->dev);
359 dev_err(mlxsw_sp->bus_info->dev, "Could not request firmware file %s\n",
360 MLXSW_SP_FW_FILENAME);
364 err = mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev, firmware);
365 release_firmware(firmware);
369 int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
370 unsigned int counter_index, u64 *packets,
373 char mgpc_pl[MLXSW_REG_MGPC_LEN];
376 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP,
377 MLXSW_REG_MGPC_COUNTER_SET_TYPE_PACKETS_BYTES);
378 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
381 *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl);
382 *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl);
386 static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp,
387 unsigned int counter_index)
389 char mgpc_pl[MLXSW_REG_MGPC_LEN];
391 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR,
392 MLXSW_REG_MGPC_COUNTER_SET_TYPE_PACKETS_BYTES);
393 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
396 int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp,
397 unsigned int *p_counter_index)
401 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
405 err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index);
407 goto err_counter_clear;
411 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
416 void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
417 unsigned int counter_index)
419 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
423 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
424 const struct mlxsw_tx_info *tx_info)
426 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
428 memset(txhdr, 0, MLXSW_TXHDR_LEN);
430 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
431 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
432 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
433 mlxsw_tx_hdr_swid_set(txhdr, 0);
434 mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
435 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
436 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
439 int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
442 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
443 enum mlxsw_reg_spms_state spms_state;
448 case BR_STATE_FORWARDING:
449 spms_state = MLXSW_REG_SPMS_STATE_FORWARDING;
451 case BR_STATE_LEARNING:
452 spms_state = MLXSW_REG_SPMS_STATE_LEARNING;
454 case BR_STATE_LISTENING: /* fall-through */
455 case BR_STATE_DISABLED: /* fall-through */
456 case BR_STATE_BLOCKING:
457 spms_state = MLXSW_REG_SPMS_STATE_DISCARDING;
463 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
466 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
467 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
469 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
474 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
476 char spad_pl[MLXSW_REG_SPAD_LEN] = {0};
479 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl);
482 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac);
486 static int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp)
490 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_SPAN))
493 mlxsw_sp->span.entries_count = MLXSW_CORE_RES_GET(mlxsw_sp->core,
495 mlxsw_sp->span.entries = kcalloc(mlxsw_sp->span.entries_count,
496 sizeof(struct mlxsw_sp_span_entry),
498 if (!mlxsw_sp->span.entries)
501 for (i = 0; i < mlxsw_sp->span.entries_count; i++)
502 INIT_LIST_HEAD(&mlxsw_sp->span.entries[i].bound_ports_list);
507 static void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp)
511 for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
512 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
514 WARN_ON_ONCE(!list_empty(&curr->bound_ports_list));
516 kfree(mlxsw_sp->span.entries);
519 static struct mlxsw_sp_span_entry *
520 mlxsw_sp_span_entry_create(struct mlxsw_sp_port *port)
522 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
523 struct mlxsw_sp_span_entry *span_entry;
524 char mpat_pl[MLXSW_REG_MPAT_LEN];
525 u8 local_port = port->local_port;
530 /* find a free entry to use */
532 for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
533 if (!mlxsw_sp->span.entries[i].used) {
535 span_entry = &mlxsw_sp->span.entries[i];
542 /* create a new port analayzer entry for local_port */
543 mlxsw_reg_mpat_pack(mpat_pl, index, local_port, true);
544 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
548 span_entry->used = true;
549 span_entry->id = index;
550 span_entry->ref_count = 1;
551 span_entry->local_port = local_port;
555 static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp *mlxsw_sp,
556 struct mlxsw_sp_span_entry *span_entry)
558 u8 local_port = span_entry->local_port;
559 char mpat_pl[MLXSW_REG_MPAT_LEN];
560 int pa_id = span_entry->id;
562 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, false);
563 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
564 span_entry->used = false;
567 static struct mlxsw_sp_span_entry *
568 mlxsw_sp_span_entry_find(struct mlxsw_sp_port *port)
570 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
573 for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
574 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
576 if (curr->used && curr->local_port == port->local_port)
582 static struct mlxsw_sp_span_entry
583 *mlxsw_sp_span_entry_get(struct mlxsw_sp_port *port)
585 struct mlxsw_sp_span_entry *span_entry;
587 span_entry = mlxsw_sp_span_entry_find(port);
589 /* Already exists, just take a reference */
590 span_entry->ref_count++;
594 return mlxsw_sp_span_entry_create(port);
597 static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp,
598 struct mlxsw_sp_span_entry *span_entry)
600 WARN_ON(!span_entry->ref_count);
601 if (--span_entry->ref_count == 0)
602 mlxsw_sp_span_entry_destroy(mlxsw_sp, span_entry);
606 static bool mlxsw_sp_span_is_egress_mirror(struct mlxsw_sp_port *port)
608 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
609 struct mlxsw_sp_span_inspected_port *p;
612 for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
613 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
615 list_for_each_entry(p, &curr->bound_ports_list, list)
616 if (p->local_port == port->local_port &&
617 p->type == MLXSW_SP_SPAN_EGRESS)
624 static int mlxsw_sp_span_mtu_to_buffsize(const struct mlxsw_sp *mlxsw_sp,
627 return mlxsw_sp_bytes_cells(mlxsw_sp, mtu * 5 / 2) + 1;
630 static int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu)
632 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
633 char sbib_pl[MLXSW_REG_SBIB_LEN];
636 /* If port is egress mirrored, the shared buffer size should be
637 * updated according to the mtu value
639 if (mlxsw_sp_span_is_egress_mirror(port)) {
640 u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp, mtu);
642 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize);
643 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
645 netdev_err(port->dev, "Could not update shared buffer for mirroring\n");
653 static struct mlxsw_sp_span_inspected_port *
654 mlxsw_sp_span_entry_bound_port_find(struct mlxsw_sp_port *port,
655 struct mlxsw_sp_span_entry *span_entry)
657 struct mlxsw_sp_span_inspected_port *p;
659 list_for_each_entry(p, &span_entry->bound_ports_list, list)
660 if (port->local_port == p->local_port)
666 mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port,
667 struct mlxsw_sp_span_entry *span_entry,
668 enum mlxsw_sp_span_type type)
670 struct mlxsw_sp_span_inspected_port *inspected_port;
671 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
672 char mpar_pl[MLXSW_REG_MPAR_LEN];
673 char sbib_pl[MLXSW_REG_SBIB_LEN];
674 int pa_id = span_entry->id;
677 /* if it is an egress SPAN, bind a shared buffer to it */
678 if (type == MLXSW_SP_SPAN_EGRESS) {
679 u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp,
682 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize);
683 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
685 netdev_err(port->dev, "Could not create shared buffer for mirroring\n");
690 /* bind the port to the SPAN entry */
691 mlxsw_reg_mpar_pack(mpar_pl, port->local_port,
692 (enum mlxsw_reg_mpar_i_e) type, true, pa_id);
693 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
695 goto err_mpar_reg_write;
697 inspected_port = kzalloc(sizeof(*inspected_port), GFP_KERNEL);
698 if (!inspected_port) {
700 goto err_inspected_port_alloc;
702 inspected_port->local_port = port->local_port;
703 inspected_port->type = type;
704 list_add_tail(&inspected_port->list, &span_entry->bound_ports_list);
709 err_inspected_port_alloc:
710 if (type == MLXSW_SP_SPAN_EGRESS) {
711 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
712 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
718 mlxsw_sp_span_inspected_port_unbind(struct mlxsw_sp_port *port,
719 struct mlxsw_sp_span_entry *span_entry,
720 enum mlxsw_sp_span_type type)
722 struct mlxsw_sp_span_inspected_port *inspected_port;
723 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
724 char mpar_pl[MLXSW_REG_MPAR_LEN];
725 char sbib_pl[MLXSW_REG_SBIB_LEN];
726 int pa_id = span_entry->id;
728 inspected_port = mlxsw_sp_span_entry_bound_port_find(port, span_entry);
732 /* remove the inspected port */
733 mlxsw_reg_mpar_pack(mpar_pl, port->local_port,
734 (enum mlxsw_reg_mpar_i_e) type, false, pa_id);
735 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
737 /* remove the SBIB buffer if it was egress SPAN */
738 if (type == MLXSW_SP_SPAN_EGRESS) {
739 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
740 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
743 mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
745 list_del(&inspected_port->list);
746 kfree(inspected_port);
749 static int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from,
750 struct mlxsw_sp_port *to,
751 enum mlxsw_sp_span_type type)
753 struct mlxsw_sp *mlxsw_sp = from->mlxsw_sp;
754 struct mlxsw_sp_span_entry *span_entry;
757 span_entry = mlxsw_sp_span_entry_get(to);
761 netdev_dbg(from->dev, "Adding inspected port to SPAN entry %d\n",
764 err = mlxsw_sp_span_inspected_port_bind(from, span_entry, type);
771 mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
775 static void mlxsw_sp_span_mirror_remove(struct mlxsw_sp_port *from,
776 struct mlxsw_sp_port *to,
777 enum mlxsw_sp_span_type type)
779 struct mlxsw_sp_span_entry *span_entry;
781 span_entry = mlxsw_sp_span_entry_find(to);
783 netdev_err(from->dev, "no span entry found\n");
787 netdev_dbg(from->dev, "removing inspected port from SPAN entry %d\n",
789 mlxsw_sp_span_inspected_port_unbind(from, span_entry, type);
792 static int mlxsw_sp_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port,
793 bool enable, u32 rate)
795 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
796 char mpsc_pl[MLXSW_REG_MPSC_LEN];
798 mlxsw_reg_mpsc_pack(mpsc_pl, mlxsw_sp_port->local_port, enable, rate);
799 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpsc), mpsc_pl);
802 static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
805 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
806 char paos_pl[MLXSW_REG_PAOS_LEN];
808 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port,
809 is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
810 MLXSW_PORT_ADMIN_STATUS_DOWN);
811 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
814 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
817 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
818 char ppad_pl[MLXSW_REG_PPAD_LEN];
820 mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port);
821 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr);
822 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl);
825 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
827 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
828 unsigned char *addr = mlxsw_sp_port->dev->dev_addr;
830 ether_addr_copy(addr, mlxsw_sp->base_mac);
831 addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port;
832 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr);
835 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
837 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
838 char pmtu_pl[MLXSW_REG_PMTU_LEN];
842 mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
843 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0);
844 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
847 max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
852 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu);
853 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
856 static int __mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp, u8 local_port,
859 char pspa_pl[MLXSW_REG_PSPA_LEN];
861 mlxsw_reg_pspa_pack(pspa_pl, swid, local_port);
862 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
865 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
867 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
869 return __mlxsw_sp_port_swid_set(mlxsw_sp, mlxsw_sp_port->local_port,
873 static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
876 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
877 char svpe_pl[MLXSW_REG_SVPE_LEN];
879 mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable);
880 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl);
883 int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port,
884 enum mlxsw_reg_svfa_mt mt, bool valid, u16 fid,
887 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
888 char svfa_pl[MLXSW_REG_SVFA_LEN];
890 mlxsw_reg_svfa_pack(svfa_pl, mlxsw_sp_port->local_port, mt, valid,
892 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
895 int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
898 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
902 spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
905 mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid,
907 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl);
912 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port,
915 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
916 char spvid_pl[MLXSW_REG_SPVID_LEN];
918 mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid);
919 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl);
922 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port,
925 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
926 char spaft_pl[MLXSW_REG_SPAFT_LEN];
928 mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow);
929 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl);
932 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
937 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false);
941 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid);
944 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true);
946 goto err_port_allow_untagged_set;
949 mlxsw_sp_port->pvid = vid;
952 err_port_allow_untagged_set:
953 __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid);
958 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
960 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
961 char sspr_pl[MLXSW_REG_SSPR_LEN];
963 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port);
964 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
967 static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
968 u8 local_port, u8 *p_module,
969 u8 *p_width, u8 *p_lane)
971 char pmlp_pl[MLXSW_REG_PMLP_LEN];
974 mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
975 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
978 *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
979 *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl);
980 *p_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
984 static int mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u8 local_port,
985 u8 module, u8 width, u8 lane)
987 char pmlp_pl[MLXSW_REG_PMLP_LEN];
990 mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
991 mlxsw_reg_pmlp_width_set(pmlp_pl, width);
992 for (i = 0; i < width; i++) {
993 mlxsw_reg_pmlp_module_set(pmlp_pl, i, module);
994 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, lane + i); /* Rx & Tx */
997 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
1000 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u8 local_port)
1002 char pmlp_pl[MLXSW_REG_PMLP_LEN];
1004 mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
1005 mlxsw_reg_pmlp_width_set(pmlp_pl, 0);
1006 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
1009 static int mlxsw_sp_port_open(struct net_device *dev)
1011 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1014 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
1017 netif_start_queue(dev);
1021 static int mlxsw_sp_port_stop(struct net_device *dev)
1023 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1025 netif_stop_queue(dev);
1026 return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1029 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
1030 struct net_device *dev)
1032 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1033 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1034 struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
1035 const struct mlxsw_tx_info tx_info = {
1036 .local_port = mlxsw_sp_port->local_port,
1042 if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info))
1043 return NETDEV_TX_BUSY;
1045 if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
1046 struct sk_buff *skb_orig = skb;
1048 skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
1050 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
1051 dev_kfree_skb_any(skb_orig);
1052 return NETDEV_TX_OK;
1054 dev_consume_skb_any(skb_orig);
1057 if (eth_skb_pad(skb)) {
1058 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
1059 return NETDEV_TX_OK;
1062 mlxsw_sp_txhdr_construct(skb, &tx_info);
1063 /* TX header is consumed by HW on the way so we shouldn't count its
1064 * bytes as being sent.
1066 len = skb->len - MLXSW_TXHDR_LEN;
1068 /* Due to a race we might fail here because of a full queue. In that
1069 * unlikely case we simply drop the packet.
1071 err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info);
1074 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
1075 u64_stats_update_begin(&pcpu_stats->syncp);
1076 pcpu_stats->tx_packets++;
1077 pcpu_stats->tx_bytes += len;
1078 u64_stats_update_end(&pcpu_stats->syncp);
1080 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
1081 dev_kfree_skb_any(skb);
1083 return NETDEV_TX_OK;
1086 static void mlxsw_sp_set_rx_mode(struct net_device *dev)
1090 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
1092 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1093 struct sockaddr *addr = p;
1096 if (!is_valid_ether_addr(addr->sa_data))
1097 return -EADDRNOTAVAIL;
1099 err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data);
1102 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1106 static u16 mlxsw_sp_pg_buf_threshold_get(const struct mlxsw_sp *mlxsw_sp,
1109 return 2 * mlxsw_sp_bytes_cells(mlxsw_sp, mtu);
1112 #define MLXSW_SP_CELL_FACTOR 2 /* 2 * cell_size / (IPG + cell_size + 1) */
1114 static u16 mlxsw_sp_pfc_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu,
1117 delay = mlxsw_sp_bytes_cells(mlxsw_sp, DIV_ROUND_UP(delay,
1119 return MLXSW_SP_CELL_FACTOR * delay + mlxsw_sp_bytes_cells(mlxsw_sp,
1123 /* Maximum delay buffer needed in case of PAUSE frames, in bytes.
1124 * Assumes 100m cable and maximum MTU.
1126 #define MLXSW_SP_PAUSE_DELAY 58752
1128 static u16 mlxsw_sp_pg_buf_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu,
1129 u16 delay, bool pfc, bool pause)
1132 return mlxsw_sp_pfc_delay_get(mlxsw_sp, mtu, delay);
1134 return mlxsw_sp_bytes_cells(mlxsw_sp, MLXSW_SP_PAUSE_DELAY);
1139 static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int index, u16 size, u16 thres,
1143 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, index, size);
1145 mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, index, size,
1149 int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
1150 u8 *prio_tc, bool pause_en,
1151 struct ieee_pfc *my_pfc)
1153 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1154 u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0;
1155 u16 delay = !!my_pfc ? my_pfc->delay : 0;
1156 char pbmc_pl[MLXSW_REG_PBMC_LEN];
1159 mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0);
1160 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
1164 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1165 bool configure = false;
1170 for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) {
1171 if (prio_tc[j] == i) {
1172 pfc = pfc_en & BIT(j);
1181 lossy = !(pfc || pause_en);
1182 thres = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu);
1183 delay = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay, pfc,
1185 mlxsw_sp_pg_buf_pack(pbmc_pl, i, thres + delay, thres, lossy);
1188 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
1191 static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port,
1192 int mtu, bool pause_en)
1194 u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0};
1195 bool dcb_en = !!mlxsw_sp_port->dcb.ets;
1196 struct ieee_pfc *my_pfc;
1199 prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc;
1200 my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL;
1202 return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc,
1206 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
1208 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1209 bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
1212 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en);
1215 err = mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, mtu);
1217 goto err_span_port_mtu_update;
1218 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
1220 goto err_port_mtu_set;
1225 mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, dev->mtu);
1226 err_span_port_mtu_update:
1227 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
1232 mlxsw_sp_port_get_sw_stats64(const struct net_device *dev,
1233 struct rtnl_link_stats64 *stats)
1235 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1236 struct mlxsw_sp_port_pcpu_stats *p;
1237 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
1242 for_each_possible_cpu(i) {
1243 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i);
1245 start = u64_stats_fetch_begin_irq(&p->syncp);
1246 rx_packets = p->rx_packets;
1247 rx_bytes = p->rx_bytes;
1248 tx_packets = p->tx_packets;
1249 tx_bytes = p->tx_bytes;
1250 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
1252 stats->rx_packets += rx_packets;
1253 stats->rx_bytes += rx_bytes;
1254 stats->tx_packets += tx_packets;
1255 stats->tx_bytes += tx_bytes;
1256 /* tx_dropped is u32, updated without syncp protection. */
1257 tx_dropped += p->tx_dropped;
1259 stats->tx_dropped = tx_dropped;
1263 static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id)
1266 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
1273 static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev,
1277 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
1278 return mlxsw_sp_port_get_sw_stats64(dev, sp);
1284 static int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp,
1285 int prio, char *ppcnt_pl)
1287 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1288 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1290 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio);
1291 return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
1294 static int mlxsw_sp_port_get_hw_stats(struct net_device *dev,
1295 struct rtnl_link_stats64 *stats)
1297 char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
1300 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT,
1306 mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl);
1308 mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl);
1310 mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl);
1312 mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl);
1314 mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl);
1316 stats->rx_crc_errors =
1317 mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl);
1318 stats->rx_frame_errors =
1319 mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl);
1321 stats->rx_length_errors = (
1322 mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) +
1323 mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) +
1324 mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl));
1326 stats->rx_errors = (stats->rx_crc_errors +
1327 stats->rx_frame_errors + stats->rx_length_errors);
1333 static void update_stats_cache(struct work_struct *work)
1335 struct mlxsw_sp_port *mlxsw_sp_port =
1336 container_of(work, struct mlxsw_sp_port,
1337 hw_stats.update_dw.work);
1339 if (!netif_carrier_ok(mlxsw_sp_port->dev))
1342 mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev,
1343 mlxsw_sp_port->hw_stats.cache);
1346 mlxsw_core_schedule_dw(&mlxsw_sp_port->hw_stats.update_dw,
1347 MLXSW_HW_STATS_UPDATE_TIME);
1350 /* Return the stats from a cache that is updated periodically,
1351 * as this function might get called in an atomic context.
1354 mlxsw_sp_port_get_stats64(struct net_device *dev,
1355 struct rtnl_link_stats64 *stats)
1357 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1359 memcpy(stats, mlxsw_sp_port->hw_stats.cache, sizeof(*stats));
1362 static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
1363 u16 vid_begin, u16 vid_end,
1364 bool is_member, bool untagged)
1366 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1370 spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL);
1374 mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin,
1375 vid_end, is_member, untagged);
1376 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl);
1381 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
1382 u16 vid_end, bool is_member, bool untagged)
1387 for (vid = vid_begin; vid <= vid_end;
1388 vid += MLXSW_REG_SPVM_REC_MAX_COUNT) {
1389 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1),
1392 err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e,
1393 is_member, untagged);
1401 int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
1403 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
1404 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1405 struct mlxsw_sp_fid *fid;
1409 list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
1411 fid = mlxsw_sp_port_vlan->fid;
1413 if (!fid || fid->fid >= MLXSW_SP_VFID_BASE)
1416 vid = mlxsw_sp_port_vlan->vid;
1417 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true,
1420 goto err_port_vid_to_fid_set;
1423 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
1425 goto err_port_vp_mode_set;
1429 err_port_vp_mode_set:
1430 err_port_vid_to_fid_set:
1431 list_for_each_entry_continue_reverse(mlxsw_sp_port_vlan,
1432 &mlxsw_sp_port->vlans_list, list) {
1433 fid = mlxsw_sp_port_vlan->fid;
1435 if (!fid || fid->fid >= MLXSW_SP_VFID_BASE)
1438 vid = mlxsw_sp_port_vlan->vid;
1439 mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, fid->fid,
1445 int mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
1447 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
1448 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1451 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
1455 list_for_each_entry_reverse(mlxsw_sp_port_vlan,
1456 &mlxsw_sp_port->vlans_list, list) {
1457 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
1458 u16 vid = mlxsw_sp_port_vlan->vid;
1460 if (!fid || fid->fid >= MLXSW_SP_VFID_BASE)
1463 mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, fid->fid,
1470 static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port)
1472 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp;
1474 list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp,
1475 &mlxsw_sp_port->vlans_list, list)
1476 mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
1479 static struct mlxsw_sp_port_vlan *
1480 mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
1482 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1483 bool untagged = vid == 1;
1486 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged);
1488 return ERR_PTR(err);
1490 mlxsw_sp_port_vlan = kzalloc(sizeof(*mlxsw_sp_port_vlan), GFP_KERNEL);
1491 if (!mlxsw_sp_port_vlan) {
1493 goto err_port_vlan_alloc;
1496 mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port;
1497 mlxsw_sp_port_vlan->vid = vid;
1498 list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list);
1500 return mlxsw_sp_port_vlan;
1502 err_port_vlan_alloc:
1503 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1504 return ERR_PTR(err);
1508 mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1510 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
1511 u16 vid = mlxsw_sp_port_vlan->vid;
1513 list_del(&mlxsw_sp_port_vlan->list);
1514 kfree(mlxsw_sp_port_vlan);
1515 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1518 struct mlxsw_sp_port_vlan *
1519 mlxsw_sp_port_vlan_get(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
1521 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1523 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1524 if (mlxsw_sp_port_vlan)
1525 return mlxsw_sp_port_vlan;
1527 return mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid);
1530 void mlxsw_sp_port_vlan_put(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1532 if (mlxsw_sp_port_vlan->bridge_port)
1533 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
1534 else if (mlxsw_sp_port_vlan->fid)
1535 mlxsw_sp_port_vlan->fid->leave(mlxsw_sp_port_vlan);
1537 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
1540 static int mlxsw_sp_port_add_vid(struct net_device *dev,
1541 __be16 __always_unused proto, u16 vid)
1543 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1545 /* VLAN 0 is added to HW filter when device goes up, but it is
1546 * reserved in our case, so simply return.
1551 return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_get(mlxsw_sp_port, vid));
1554 static int mlxsw_sp_port_kill_vid(struct net_device *dev,
1555 __be16 __always_unused proto, u16 vid)
1557 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1558 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1560 /* VLAN 0 is removed from HW filter when device goes down, but
1561 * it is reserved in our case, so simply return.
1566 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1567 if (!mlxsw_sp_port_vlan)
1569 mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
1574 static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name,
1577 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1578 u8 module = mlxsw_sp_port->mapping.module;
1579 u8 width = mlxsw_sp_port->mapping.width;
1580 u8 lane = mlxsw_sp_port->mapping.lane;
1583 if (!mlxsw_sp_port->split)
1584 err = snprintf(name, len, "p%d", module + 1);
1586 err = snprintf(name, len, "p%ds%d", module + 1,
1595 static struct mlxsw_sp_port_mall_tc_entry *
1596 mlxsw_sp_port_mall_tc_entry_find(struct mlxsw_sp_port *port,
1597 unsigned long cookie) {
1598 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
1600 list_for_each_entry(mall_tc_entry, &port->mall_tc_list, list)
1601 if (mall_tc_entry->cookie == cookie)
1602 return mall_tc_entry;
1608 mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
1609 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror,
1610 const struct tc_action *a,
1613 struct net *net = dev_net(mlxsw_sp_port->dev);
1614 enum mlxsw_sp_span_type span_type;
1615 struct mlxsw_sp_port *to_port;
1616 struct net_device *to_dev;
1619 ifindex = tcf_mirred_ifindex(a);
1620 to_dev = __dev_get_by_index(net, ifindex);
1622 netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n");
1626 if (!mlxsw_sp_port_dev_check(to_dev)) {
1627 netdev_err(mlxsw_sp_port->dev, "Cannot mirror to a non-spectrum port");
1630 to_port = netdev_priv(to_dev);
1632 mirror->to_local_port = to_port->local_port;
1633 mirror->ingress = ingress;
1634 span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
1635 return mlxsw_sp_span_mirror_add(mlxsw_sp_port, to_port, span_type);
1639 mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
1640 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror)
1642 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1643 enum mlxsw_sp_span_type span_type;
1644 struct mlxsw_sp_port *to_port;
1646 to_port = mlxsw_sp->ports[mirror->to_local_port];
1647 span_type = mirror->ingress ?
1648 MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
1649 mlxsw_sp_span_mirror_remove(mlxsw_sp_port, to_port, span_type);
1653 mlxsw_sp_port_add_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port,
1654 struct tc_cls_matchall_offload *cls,
1655 const struct tc_action *a,
1660 if (!mlxsw_sp_port->sample)
1662 if (rtnl_dereference(mlxsw_sp_port->sample->psample_group)) {
1663 netdev_err(mlxsw_sp_port->dev, "sample already active\n");
1666 if (tcf_sample_rate(a) > MLXSW_REG_MPSC_RATE_MAX) {
1667 netdev_err(mlxsw_sp_port->dev, "sample rate not supported\n");
1671 rcu_assign_pointer(mlxsw_sp_port->sample->psample_group,
1672 tcf_sample_psample_group(a));
1673 mlxsw_sp_port->sample->truncate = tcf_sample_truncate(a);
1674 mlxsw_sp_port->sample->trunc_size = tcf_sample_trunc_size(a);
1675 mlxsw_sp_port->sample->rate = tcf_sample_rate(a);
1677 err = mlxsw_sp_port_sample_set(mlxsw_sp_port, true, tcf_sample_rate(a));
1679 goto err_port_sample_set;
1682 err_port_sample_set:
1683 RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL);
1688 mlxsw_sp_port_del_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port)
1690 if (!mlxsw_sp_port->sample)
1693 mlxsw_sp_port_sample_set(mlxsw_sp_port, false, 1);
1694 RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL);
1697 static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
1699 struct tc_cls_matchall_offload *cls,
1702 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
1703 const struct tc_action *a;
1707 if (!tc_single_action(cls->exts)) {
1708 netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n");
1712 mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
1715 mall_tc_entry->cookie = cls->cookie;
1717 tcf_exts_to_list(cls->exts, &actions);
1718 a = list_first_entry(&actions, struct tc_action, list);
1720 if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) {
1721 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror;
1723 mall_tc_entry->type = MLXSW_SP_PORT_MALL_MIRROR;
1724 mirror = &mall_tc_entry->mirror;
1725 err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port,
1726 mirror, a, ingress);
1727 } else if (is_tcf_sample(a) && protocol == htons(ETH_P_ALL)) {
1728 mall_tc_entry->type = MLXSW_SP_PORT_MALL_SAMPLE;
1729 err = mlxsw_sp_port_add_cls_matchall_sample(mlxsw_sp_port, cls,
1736 goto err_add_action;
1738 list_add_tail(&mall_tc_entry->list, &mlxsw_sp_port->mall_tc_list);
1742 kfree(mall_tc_entry);
1746 static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
1747 struct tc_cls_matchall_offload *cls)
1749 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
1751 mall_tc_entry = mlxsw_sp_port_mall_tc_entry_find(mlxsw_sp_port,
1753 if (!mall_tc_entry) {
1754 netdev_dbg(mlxsw_sp_port->dev, "tc entry not found on port\n");
1757 list_del(&mall_tc_entry->list);
1759 switch (mall_tc_entry->type) {
1760 case MLXSW_SP_PORT_MALL_MIRROR:
1761 mlxsw_sp_port_del_cls_matchall_mirror(mlxsw_sp_port,
1762 &mall_tc_entry->mirror);
1764 case MLXSW_SP_PORT_MALL_SAMPLE:
1765 mlxsw_sp_port_del_cls_matchall_sample(mlxsw_sp_port);
1771 kfree(mall_tc_entry);
1774 static int mlxsw_sp_setup_tc(struct net_device *dev, u32 handle,
1775 __be16 proto, struct tc_to_netdev *tc)
1777 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1778 bool ingress = TC_H_MAJ(handle) == TC_H_MAJ(TC_H_INGRESS);
1781 case TC_SETUP_MATCHALL:
1782 switch (tc->cls_mall->command) {
1783 case TC_CLSMATCHALL_REPLACE:
1784 return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port,
1788 case TC_CLSMATCHALL_DESTROY:
1789 mlxsw_sp_port_del_cls_matchall(mlxsw_sp_port,
1795 case TC_SETUP_CLSFLOWER:
1796 switch (tc->cls_flower->command) {
1797 case TC_CLSFLOWER_REPLACE:
1798 return mlxsw_sp_flower_replace(mlxsw_sp_port, ingress,
1799 proto, tc->cls_flower);
1800 case TC_CLSFLOWER_DESTROY:
1801 mlxsw_sp_flower_destroy(mlxsw_sp_port, ingress,
1804 case TC_CLSFLOWER_STATS:
1805 return mlxsw_sp_flower_stats(mlxsw_sp_port, ingress,
1815 static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
1816 .ndo_open = mlxsw_sp_port_open,
1817 .ndo_stop = mlxsw_sp_port_stop,
1818 .ndo_start_xmit = mlxsw_sp_port_xmit,
1819 .ndo_setup_tc = mlxsw_sp_setup_tc,
1820 .ndo_set_rx_mode = mlxsw_sp_set_rx_mode,
1821 .ndo_set_mac_address = mlxsw_sp_port_set_mac_address,
1822 .ndo_change_mtu = mlxsw_sp_port_change_mtu,
1823 .ndo_get_stats64 = mlxsw_sp_port_get_stats64,
1824 .ndo_has_offload_stats = mlxsw_sp_port_has_offload_stats,
1825 .ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats,
1826 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid,
1827 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid,
1828 .ndo_fdb_add = switchdev_port_fdb_add,
1829 .ndo_fdb_del = switchdev_port_fdb_del,
1830 .ndo_fdb_dump = switchdev_port_fdb_dump,
1831 .ndo_bridge_setlink = switchdev_port_bridge_setlink,
1832 .ndo_bridge_getlink = switchdev_port_bridge_getlink,
1833 .ndo_bridge_dellink = switchdev_port_bridge_dellink,
1834 .ndo_get_phys_port_name = mlxsw_sp_port_get_phys_port_name,
1837 static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
1838 struct ethtool_drvinfo *drvinfo)
1840 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1841 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1843 strlcpy(drvinfo->driver, mlxsw_sp_driver_name, sizeof(drvinfo->driver));
1844 strlcpy(drvinfo->version, mlxsw_sp_driver_version,
1845 sizeof(drvinfo->version));
1846 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
1848 mlxsw_sp->bus_info->fw_rev.major,
1849 mlxsw_sp->bus_info->fw_rev.minor,
1850 mlxsw_sp->bus_info->fw_rev.subminor);
1851 strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name,
1852 sizeof(drvinfo->bus_info));
1855 static void mlxsw_sp_port_get_pauseparam(struct net_device *dev,
1856 struct ethtool_pauseparam *pause)
1858 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1860 pause->rx_pause = mlxsw_sp_port->link.rx_pause;
1861 pause->tx_pause = mlxsw_sp_port->link.tx_pause;
1864 static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port,
1865 struct ethtool_pauseparam *pause)
1867 char pfcc_pl[MLXSW_REG_PFCC_LEN];
1869 mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port);
1870 mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause);
1871 mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause);
1873 return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc),
1877 static int mlxsw_sp_port_set_pauseparam(struct net_device *dev,
1878 struct ethtool_pauseparam *pause)
1880 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1881 bool pause_en = pause->tx_pause || pause->rx_pause;
1884 if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) {
1885 netdev_err(dev, "PFC already enabled on port\n");
1889 if (pause->autoneg) {
1890 netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n");
1894 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
1896 netdev_err(dev, "Failed to configure port's headroom\n");
1900 err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause);
1902 netdev_err(dev, "Failed to set PAUSE parameters\n");
1903 goto err_port_pause_configure;
1906 mlxsw_sp_port->link.rx_pause = pause->rx_pause;
1907 mlxsw_sp_port->link.tx_pause = pause->tx_pause;
1911 err_port_pause_configure:
1912 pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
1913 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
1917 struct mlxsw_sp_port_hw_stats {
1918 char str[ETH_GSTRING_LEN];
1919 u64 (*getter)(const char *payload);
1923 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = {
1925 .str = "a_frames_transmitted_ok",
1926 .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get,
1929 .str = "a_frames_received_ok",
1930 .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get,
1933 .str = "a_frame_check_sequence_errors",
1934 .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get,
1937 .str = "a_alignment_errors",
1938 .getter = mlxsw_reg_ppcnt_a_alignment_errors_get,
1941 .str = "a_octets_transmitted_ok",
1942 .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get,
1945 .str = "a_octets_received_ok",
1946 .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get,
1949 .str = "a_multicast_frames_xmitted_ok",
1950 .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get,
1953 .str = "a_broadcast_frames_xmitted_ok",
1954 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get,
1957 .str = "a_multicast_frames_received_ok",
1958 .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get,
1961 .str = "a_broadcast_frames_received_ok",
1962 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get,
1965 .str = "a_in_range_length_errors",
1966 .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get,
1969 .str = "a_out_of_range_length_field",
1970 .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get,
1973 .str = "a_frame_too_long_errors",
1974 .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get,
1977 .str = "a_symbol_error_during_carrier",
1978 .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get,
1981 .str = "a_mac_control_frames_transmitted",
1982 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get,
1985 .str = "a_mac_control_frames_received",
1986 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get,
1989 .str = "a_unsupported_opcodes_received",
1990 .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get,
1993 .str = "a_pause_mac_ctrl_frames_received",
1994 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get,
1997 .str = "a_pause_mac_ctrl_frames_xmitted",
1998 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get,
2002 #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats)
2004 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = {
2006 .str = "rx_octets_prio",
2007 .getter = mlxsw_reg_ppcnt_rx_octets_get,
2010 .str = "rx_frames_prio",
2011 .getter = mlxsw_reg_ppcnt_rx_frames_get,
2014 .str = "tx_octets_prio",
2015 .getter = mlxsw_reg_ppcnt_tx_octets_get,
2018 .str = "tx_frames_prio",
2019 .getter = mlxsw_reg_ppcnt_tx_frames_get,
2022 .str = "rx_pause_prio",
2023 .getter = mlxsw_reg_ppcnt_rx_pause_get,
2026 .str = "rx_pause_duration_prio",
2027 .getter = mlxsw_reg_ppcnt_rx_pause_duration_get,
2030 .str = "tx_pause_prio",
2031 .getter = mlxsw_reg_ppcnt_tx_pause_get,
2034 .str = "tx_pause_duration_prio",
2035 .getter = mlxsw_reg_ppcnt_tx_pause_duration_get,
2039 #define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats)
2041 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = {
2043 .str = "tc_transmit_queue_tc",
2044 .getter = mlxsw_reg_ppcnt_tc_transmit_queue_get,
2045 .cells_bytes = true,
2048 .str = "tc_no_buffer_discard_uc_tc",
2049 .getter = mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get,
2053 #define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats)
2055 #define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \
2056 (MLXSW_SP_PORT_HW_PRIO_STATS_LEN + \
2057 MLXSW_SP_PORT_HW_TC_STATS_LEN) * \
2058 IEEE_8021QAZ_MAX_TCS)
2060 static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio)
2064 for (i = 0; i < MLXSW_SP_PORT_HW_PRIO_STATS_LEN; i++) {
2065 snprintf(*p, ETH_GSTRING_LEN, "%s_%d",
2066 mlxsw_sp_port_hw_prio_stats[i].str, prio);
2067 *p += ETH_GSTRING_LEN;
2071 static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc)
2075 for (i = 0; i < MLXSW_SP_PORT_HW_TC_STATS_LEN; i++) {
2076 snprintf(*p, ETH_GSTRING_LEN, "%s_%d",
2077 mlxsw_sp_port_hw_tc_stats[i].str, tc);
2078 *p += ETH_GSTRING_LEN;
2082 static void mlxsw_sp_port_get_strings(struct net_device *dev,
2083 u32 stringset, u8 *data)
2088 switch (stringset) {
2090 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) {
2091 memcpy(p, mlxsw_sp_port_hw_stats[i].str,
2093 p += ETH_GSTRING_LEN;
2096 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
2097 mlxsw_sp_port_get_prio_strings(&p, i);
2099 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
2100 mlxsw_sp_port_get_tc_strings(&p, i);
2106 static int mlxsw_sp_port_set_phys_id(struct net_device *dev,
2107 enum ethtool_phys_id_state state)
2109 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2110 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2111 char mlcr_pl[MLXSW_REG_MLCR_LEN];
2115 case ETHTOOL_ID_ACTIVE:
2118 case ETHTOOL_ID_INACTIVE:
2125 mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active);
2126 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl);
2130 mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats,
2131 int *p_len, enum mlxsw_reg_ppcnt_grp grp)
2134 case MLXSW_REG_PPCNT_IEEE_8023_CNT:
2135 *p_hw_stats = mlxsw_sp_port_hw_stats;
2136 *p_len = MLXSW_SP_PORT_HW_STATS_LEN;
2138 case MLXSW_REG_PPCNT_PRIO_CNT:
2139 *p_hw_stats = mlxsw_sp_port_hw_prio_stats;
2140 *p_len = MLXSW_SP_PORT_HW_PRIO_STATS_LEN;
2142 case MLXSW_REG_PPCNT_TC_CNT:
2143 *p_hw_stats = mlxsw_sp_port_hw_tc_stats;
2144 *p_len = MLXSW_SP_PORT_HW_TC_STATS_LEN;
2153 static void __mlxsw_sp_port_get_stats(struct net_device *dev,
2154 enum mlxsw_reg_ppcnt_grp grp, int prio,
2155 u64 *data, int data_index)
2157 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2158 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2159 struct mlxsw_sp_port_hw_stats *hw_stats;
2160 char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
2164 err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp);
2167 mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl);
2168 for (i = 0; i < len; i++) {
2169 data[data_index + i] = hw_stats[i].getter(ppcnt_pl);
2170 if (!hw_stats[i].cells_bytes)
2172 data[data_index + i] = mlxsw_sp_cells_bytes(mlxsw_sp,
2173 data[data_index + i]);
2177 static void mlxsw_sp_port_get_stats(struct net_device *dev,
2178 struct ethtool_stats *stats, u64 *data)
2180 int i, data_index = 0;
2182 /* IEEE 802.3 Counters */
2183 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 0,
2185 data_index = MLXSW_SP_PORT_HW_STATS_LEN;
2187 /* Per-Priority Counters */
2188 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2189 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_PRIO_CNT, i,
2191 data_index += MLXSW_SP_PORT_HW_PRIO_STATS_LEN;
2194 /* Per-TC Counters */
2195 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2196 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_TC_CNT, i,
2198 data_index += MLXSW_SP_PORT_HW_TC_STATS_LEN;
2202 static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset)
2206 return MLXSW_SP_PORT_ETHTOOL_STATS_LEN;
2212 struct mlxsw_sp_port_link_mode {
2213 enum ethtool_link_mode_bit_indices mask_ethtool;
2218 static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = {
2220 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
2221 .mask_ethtool = ETHTOOL_LINK_MODE_100baseT_Full_BIT,
2225 .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII |
2226 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
2227 .mask_ethtool = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
2228 .speed = SPEED_1000,
2231 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
2232 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
2233 .speed = SPEED_10000,
2236 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
2237 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
2238 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
2239 .speed = SPEED_10000,
2242 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
2243 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
2244 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
2245 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR,
2246 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
2247 .speed = SPEED_10000,
2250 .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
2251 .mask_ethtool = ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT,
2252 .speed = SPEED_20000,
2255 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
2256 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
2257 .speed = SPEED_40000,
2260 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4,
2261 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
2262 .speed = SPEED_40000,
2265 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4,
2266 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
2267 .speed = SPEED_40000,
2270 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4,
2271 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
2272 .speed = SPEED_40000,
2275 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR,
2276 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
2277 .speed = SPEED_25000,
2280 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR,
2281 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
2282 .speed = SPEED_25000,
2285 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
2286 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
2287 .speed = SPEED_25000,
2290 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
2291 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
2292 .speed = SPEED_25000,
2295 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2,
2296 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
2297 .speed = SPEED_50000,
2300 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
2301 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
2302 .speed = SPEED_50000,
2305 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_SR2,
2306 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
2307 .speed = SPEED_50000,
2310 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
2311 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT,
2312 .speed = SPEED_56000,
2315 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
2316 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT,
2317 .speed = SPEED_56000,
2320 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
2321 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT,
2322 .speed = SPEED_56000,
2325 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
2326 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT,
2327 .speed = SPEED_56000,
2330 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4,
2331 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
2332 .speed = SPEED_100000,
2335 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4,
2336 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
2337 .speed = SPEED_100000,
2340 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4,
2341 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
2342 .speed = SPEED_100000,
2345 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
2346 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
2347 .speed = SPEED_100000,
2351 #define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode)
2354 mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto,
2355 struct ethtool_link_ksettings *cmd)
2357 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
2358 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
2359 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
2360 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
2361 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
2362 MLXSW_REG_PTYS_ETH_SPEED_SGMII))
2363 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
2365 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
2366 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
2367 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
2368 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
2369 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX))
2370 ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane);
2373 static void mlxsw_sp_from_ptys_link(u32 ptys_eth_proto, unsigned long *mode)
2377 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2378 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
2379 __set_bit(mlxsw_sp_port_link_mode[i].mask_ethtool,
2384 static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
2385 struct ethtool_link_ksettings *cmd)
2387 u32 speed = SPEED_UNKNOWN;
2388 u8 duplex = DUPLEX_UNKNOWN;
2394 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2395 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) {
2396 speed = mlxsw_sp_port_link_mode[i].speed;
2397 duplex = DUPLEX_FULL;
2402 cmd->base.speed = speed;
2403 cmd->base.duplex = duplex;
2406 static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto)
2408 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
2409 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
2410 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
2411 MLXSW_REG_PTYS_ETH_SPEED_SGMII))
2414 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
2415 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
2416 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4))
2419 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
2420 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
2421 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
2422 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4))
2429 mlxsw_sp_to_ptys_advert_link(const struct ethtool_link_ksettings *cmd)
2434 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2435 if (test_bit(mlxsw_sp_port_link_mode[i].mask_ethtool,
2436 cmd->link_modes.advertising))
2437 ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
2442 static u32 mlxsw_sp_to_ptys_speed(u32 speed)
2447 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2448 if (speed == mlxsw_sp_port_link_mode[i].speed)
2449 ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
2454 static u32 mlxsw_sp_to_ptys_upper_speed(u32 upper_speed)
2459 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2460 if (mlxsw_sp_port_link_mode[i].speed <= upper_speed)
2461 ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
2466 static void mlxsw_sp_port_get_link_supported(u32 eth_proto_cap,
2467 struct ethtool_link_ksettings *cmd)
2469 ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause);
2470 ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
2471 ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
2473 mlxsw_sp_from_ptys_supported_port(eth_proto_cap, cmd);
2474 mlxsw_sp_from_ptys_link(eth_proto_cap, cmd->link_modes.supported);
2477 static void mlxsw_sp_port_get_link_advertise(u32 eth_proto_admin, bool autoneg,
2478 struct ethtool_link_ksettings *cmd)
2483 ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
2484 mlxsw_sp_from_ptys_link(eth_proto_admin, cmd->link_modes.advertising);
2488 mlxsw_sp_port_get_link_lp_advertise(u32 eth_proto_lp, u8 autoneg_status,
2489 struct ethtool_link_ksettings *cmd)
2491 if (autoneg_status != MLXSW_REG_PTYS_AN_STATUS_OK || !eth_proto_lp)
2494 ethtool_link_ksettings_add_link_mode(cmd, lp_advertising, Autoneg);
2495 mlxsw_sp_from_ptys_link(eth_proto_lp, cmd->link_modes.lp_advertising);
2498 static int mlxsw_sp_port_get_link_ksettings(struct net_device *dev,
2499 struct ethtool_link_ksettings *cmd)
2501 u32 eth_proto_cap, eth_proto_admin, eth_proto_oper, eth_proto_lp;
2502 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2503 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2504 char ptys_pl[MLXSW_REG_PTYS_LEN];
2509 autoneg = mlxsw_sp_port->link.autoneg;
2510 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
2511 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
2514 mlxsw_reg_ptys_eth_unpack(ptys_pl, ð_proto_cap, ð_proto_admin,
2517 mlxsw_sp_port_get_link_supported(eth_proto_cap, cmd);
2519 mlxsw_sp_port_get_link_advertise(eth_proto_admin, autoneg, cmd);
2521 eth_proto_lp = mlxsw_reg_ptys_eth_proto_lp_advertise_get(ptys_pl);
2522 autoneg_status = mlxsw_reg_ptys_an_status_get(ptys_pl);
2523 mlxsw_sp_port_get_link_lp_advertise(eth_proto_lp, autoneg_status, cmd);
2525 cmd->base.autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
2526 cmd->base.port = mlxsw_sp_port_connector_port(eth_proto_oper);
2527 mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev), eth_proto_oper,
2534 mlxsw_sp_port_set_link_ksettings(struct net_device *dev,
2535 const struct ethtool_link_ksettings *cmd)
2537 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2538 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2539 char ptys_pl[MLXSW_REG_PTYS_LEN];
2540 u32 eth_proto_cap, eth_proto_new;
2544 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
2545 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
2548 mlxsw_reg_ptys_eth_unpack(ptys_pl, ð_proto_cap, NULL, NULL);
2550 autoneg = cmd->base.autoneg == AUTONEG_ENABLE;
2551 eth_proto_new = autoneg ?
2552 mlxsw_sp_to_ptys_advert_link(cmd) :
2553 mlxsw_sp_to_ptys_speed(cmd->base.speed);
2555 eth_proto_new = eth_proto_new & eth_proto_cap;
2556 if (!eth_proto_new) {
2557 netdev_err(dev, "No supported speed requested\n");
2561 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port,
2563 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
2567 if (!netif_running(dev))
2570 mlxsw_sp_port->link.autoneg = autoneg;
2572 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
2573 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
2578 static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
2579 .get_drvinfo = mlxsw_sp_port_get_drvinfo,
2580 .get_link = ethtool_op_get_link,
2581 .get_pauseparam = mlxsw_sp_port_get_pauseparam,
2582 .set_pauseparam = mlxsw_sp_port_set_pauseparam,
2583 .get_strings = mlxsw_sp_port_get_strings,
2584 .set_phys_id = mlxsw_sp_port_set_phys_id,
2585 .get_ethtool_stats = mlxsw_sp_port_get_stats,
2586 .get_sset_count = mlxsw_sp_port_get_sset_count,
2587 .get_link_ksettings = mlxsw_sp_port_get_link_ksettings,
2588 .set_link_ksettings = mlxsw_sp_port_set_link_ksettings,
2592 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width)
2594 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2595 u32 upper_speed = MLXSW_SP_PORT_BASE_SPEED * width;
2596 char ptys_pl[MLXSW_REG_PTYS_LEN];
2597 u32 eth_proto_admin;
2599 eth_proto_admin = mlxsw_sp_to_ptys_upper_speed(upper_speed);
2600 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port,
2602 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
2605 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
2606 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
2607 bool dwrr, u8 dwrr_weight)
2609 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2610 char qeec_pl[MLXSW_REG_QEEC_LEN];
2612 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
2614 mlxsw_reg_qeec_de_set(qeec_pl, true);
2615 mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr);
2616 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight);
2617 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
2620 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
2621 enum mlxsw_reg_qeec_hr hr, u8 index,
2622 u8 next_index, u32 maxrate)
2624 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2625 char qeec_pl[MLXSW_REG_QEEC_LEN];
2627 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
2629 mlxsw_reg_qeec_mase_set(qeec_pl, true);
2630 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate);
2631 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
2634 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
2635 u8 switch_prio, u8 tclass)
2637 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2638 char qtct_pl[MLXSW_REG_QTCT_LEN];
2640 mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio,
2642 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl);
2645 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
2649 /* Setup the elements hierarcy, so that each TC is linked to
2650 * one subgroup, which are all member in the same group.
2652 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
2653 MLXSW_REG_QEEC_HIERARCY_GROUP, 0, 0, false,
2657 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2658 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
2659 MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i,
2664 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2665 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
2666 MLXSW_REG_QEEC_HIERARCY_TC, i, i,
2672 /* Make sure the max shaper is disabled in all hierarcies that
2675 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
2676 MLXSW_REG_QEEC_HIERARCY_PORT, 0, 0,
2677 MLXSW_REG_QEEC_MAS_DIS);
2680 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2681 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
2682 MLXSW_REG_QEEC_HIERARCY_SUBGROUP,
2684 MLXSW_REG_QEEC_MAS_DIS);
2688 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2689 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
2690 MLXSW_REG_QEEC_HIERARCY_TC,
2692 MLXSW_REG_QEEC_MAS_DIS);
2697 /* Map all priorities to traffic class 0. */
2698 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2699 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0);
2707 static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
2708 bool split, u8 module, u8 width, u8 lane)
2710 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2711 struct mlxsw_sp_port *mlxsw_sp_port;
2712 struct net_device *dev;
2715 dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
2718 SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev);
2719 mlxsw_sp_port = netdev_priv(dev);
2720 mlxsw_sp_port->dev = dev;
2721 mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
2722 mlxsw_sp_port->local_port = local_port;
2723 mlxsw_sp_port->pvid = 1;
2724 mlxsw_sp_port->split = split;
2725 mlxsw_sp_port->mapping.module = module;
2726 mlxsw_sp_port->mapping.width = width;
2727 mlxsw_sp_port->mapping.lane = lane;
2728 mlxsw_sp_port->link.autoneg = 1;
2729 INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list);
2730 INIT_LIST_HEAD(&mlxsw_sp_port->mall_tc_list);
2732 mlxsw_sp_port->pcpu_stats =
2733 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
2734 if (!mlxsw_sp_port->pcpu_stats) {
2736 goto err_alloc_stats;
2739 mlxsw_sp_port->sample = kzalloc(sizeof(*mlxsw_sp_port->sample),
2741 if (!mlxsw_sp_port->sample) {
2743 goto err_alloc_sample;
2746 mlxsw_sp_port->hw_stats.cache =
2747 kzalloc(sizeof(*mlxsw_sp_port->hw_stats.cache), GFP_KERNEL);
2749 if (!mlxsw_sp_port->hw_stats.cache) {
2751 goto err_alloc_hw_stats;
2753 INIT_DELAYED_WORK(&mlxsw_sp_port->hw_stats.update_dw,
2754 &update_stats_cache);
2756 dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
2757 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
2759 err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
2761 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
2762 mlxsw_sp_port->local_port);
2763 goto err_port_swid_set;
2766 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
2768 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
2769 mlxsw_sp_port->local_port);
2770 goto err_dev_addr_init;
2773 netif_carrier_off(dev);
2775 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
2776 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC;
2777 dev->hw_features |= NETIF_F_HW_TC;
2780 dev->max_mtu = ETH_MAX_MTU;
2782 /* Each packet needs to have a Tx header (metadata) on top all other
2785 dev->needed_headroom = MLXSW_TXHDR_LEN;
2787 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
2789 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n",
2790 mlxsw_sp_port->local_port);
2791 goto err_port_system_port_mapping_set;
2794 err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width);
2796 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n",
2797 mlxsw_sp_port->local_port);
2798 goto err_port_speed_by_width_set;
2801 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
2803 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
2804 mlxsw_sp_port->local_port);
2805 goto err_port_mtu_set;
2808 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
2810 goto err_port_admin_status_set;
2812 err = mlxsw_sp_port_buffers_init(mlxsw_sp_port);
2814 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n",
2815 mlxsw_sp_port->local_port);
2816 goto err_port_buffers_init;
2819 err = mlxsw_sp_port_ets_init(mlxsw_sp_port);
2821 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n",
2822 mlxsw_sp_port->local_port);
2823 goto err_port_ets_init;
2826 /* ETS and buffers must be initialized before DCB. */
2827 err = mlxsw_sp_port_dcb_init(mlxsw_sp_port);
2829 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n",
2830 mlxsw_sp_port->local_port);
2831 goto err_port_dcb_init;
2834 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
2836 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set non-virtual mode\n",
2837 mlxsw_sp_port->local_port);
2838 goto err_port_vp_mode_set;
2841 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1);
2842 if (IS_ERR(mlxsw_sp_port_vlan)) {
2843 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n",
2844 mlxsw_sp_port->local_port);
2845 goto err_port_vlan_get;
2848 mlxsw_sp_port_switchdev_init(mlxsw_sp_port);
2849 mlxsw_sp->ports[local_port] = mlxsw_sp_port;
2850 err = register_netdev(dev);
2852 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
2853 mlxsw_sp_port->local_port);
2854 goto err_register_netdev;
2857 mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port,
2858 mlxsw_sp_port, dev, mlxsw_sp_port->split,
2860 mlxsw_core_schedule_dw(&mlxsw_sp_port->hw_stats.update_dw, 0);
2863 err_register_netdev:
2864 mlxsw_sp->ports[local_port] = NULL;
2865 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
2866 mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
2868 err_port_vp_mode_set:
2869 mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
2872 err_port_buffers_init:
2873 err_port_admin_status_set:
2875 err_port_speed_by_width_set:
2876 err_port_system_port_mapping_set:
2878 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
2880 kfree(mlxsw_sp_port->hw_stats.cache);
2882 kfree(mlxsw_sp_port->sample);
2884 free_percpu(mlxsw_sp_port->pcpu_stats);
2890 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
2891 bool split, u8 module, u8 width, u8 lane)
2895 err = mlxsw_core_port_init(mlxsw_sp->core, local_port);
2897 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n",
2901 err = __mlxsw_sp_port_create(mlxsw_sp, local_port, split,
2902 module, width, lane);
2904 goto err_port_create;
2908 mlxsw_core_port_fini(mlxsw_sp->core, local_port);
2912 static void __mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
2914 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
2916 cancel_delayed_work_sync(&mlxsw_sp_port->hw_stats.update_dw);
2917 mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp);
2918 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
2919 mlxsw_sp->ports[local_port] = NULL;
2920 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
2921 mlxsw_sp_port_vlan_flush(mlxsw_sp_port);
2922 mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
2923 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
2924 mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port);
2925 kfree(mlxsw_sp_port->hw_stats.cache);
2926 kfree(mlxsw_sp_port->sample);
2927 free_percpu(mlxsw_sp_port->pcpu_stats);
2928 WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list));
2929 free_netdev(mlxsw_sp_port->dev);
2932 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
2934 __mlxsw_sp_port_remove(mlxsw_sp, local_port);
2935 mlxsw_core_port_fini(mlxsw_sp->core, local_port);
2938 static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u8 local_port)
2940 return mlxsw_sp->ports[local_port] != NULL;
2943 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
2947 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++)
2948 if (mlxsw_sp_port_created(mlxsw_sp, i))
2949 mlxsw_sp_port_remove(mlxsw_sp, i);
2950 kfree(mlxsw_sp->port_to_module);
2951 kfree(mlxsw_sp->ports);
2954 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
2956 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
2957 u8 module, width, lane;
2962 alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports;
2963 mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL);
2964 if (!mlxsw_sp->ports)
2967 mlxsw_sp->port_to_module = kcalloc(max_ports, sizeof(u8), GFP_KERNEL);
2968 if (!mlxsw_sp->port_to_module) {
2970 goto err_port_to_module_alloc;
2973 for (i = 1; i < max_ports; i++) {
2974 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module,
2977 goto err_port_module_info_get;
2980 mlxsw_sp->port_to_module[i] = module;
2981 err = mlxsw_sp_port_create(mlxsw_sp, i, false,
2982 module, width, lane);
2984 goto err_port_create;
2989 err_port_module_info_get:
2990 for (i--; i >= 1; i--)
2991 if (mlxsw_sp_port_created(mlxsw_sp, i))
2992 mlxsw_sp_port_remove(mlxsw_sp, i);
2993 kfree(mlxsw_sp->port_to_module);
2994 err_port_to_module_alloc:
2995 kfree(mlxsw_sp->ports);
2999 static u8 mlxsw_sp_cluster_base_port_get(u8 local_port)
3001 u8 offset = (local_port - 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX;
3003 return local_port - offset;
3006 static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port,
3007 u8 module, unsigned int count)
3009 u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count;
3012 for (i = 0; i < count; i++) {
3013 err = mlxsw_sp_port_module_map(mlxsw_sp, base_port + i, module,
3016 goto err_port_module_map;
3019 for (i = 0; i < count; i++) {
3020 err = __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i, 0);
3022 goto err_port_swid_set;
3025 for (i = 0; i < count; i++) {
3026 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true,
3027 module, width, i * width);
3029 goto err_port_create;
3035 for (i--; i >= 0; i--)
3036 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i))
3037 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
3040 for (i--; i >= 0; i--)
3041 __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i,
3042 MLXSW_PORT_SWID_DISABLED_PORT);
3044 err_port_module_map:
3045 for (i--; i >= 0; i--)
3046 mlxsw_sp_port_module_unmap(mlxsw_sp, base_port + i);
3050 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
3051 u8 base_port, unsigned int count)
3053 u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH;
3056 /* Split by four means we need to re-create two ports, otherwise
3061 for (i = 0; i < count; i++) {
3062 local_port = base_port + i * 2;
3063 module = mlxsw_sp->port_to_module[local_port];
3065 mlxsw_sp_port_module_map(mlxsw_sp, local_port, module, width,
3069 for (i = 0; i < count; i++)
3070 __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i * 2, 0);
3072 for (i = 0; i < count; i++) {
3073 local_port = base_port + i * 2;
3074 module = mlxsw_sp->port_to_module[local_port];
3076 mlxsw_sp_port_create(mlxsw_sp, local_port, false, module,
3081 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
3084 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3085 struct mlxsw_sp_port *mlxsw_sp_port;
3086 u8 module, cur_width, base_port;
3090 mlxsw_sp_port = mlxsw_sp->ports[local_port];
3091 if (!mlxsw_sp_port) {
3092 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
3097 module = mlxsw_sp_port->mapping.module;
3098 cur_width = mlxsw_sp_port->mapping.width;
3100 if (count != 2 && count != 4) {
3101 netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n");
3105 if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) {
3106 netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n");
3110 /* Make sure we have enough slave (even) ports for the split. */
3112 base_port = local_port;
3113 if (mlxsw_sp->ports[base_port + 1]) {
3114 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
3118 base_port = mlxsw_sp_cluster_base_port_get(local_port);
3119 if (mlxsw_sp->ports[base_port + 1] ||
3120 mlxsw_sp->ports[base_port + 3]) {
3121 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
3126 for (i = 0; i < count; i++)
3127 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i))
3128 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
3130 err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count);
3132 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n");
3133 goto err_port_split_create;
3138 err_port_split_create:
3139 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
3143 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port)
3145 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3146 struct mlxsw_sp_port *mlxsw_sp_port;
3147 u8 cur_width, base_port;
3151 mlxsw_sp_port = mlxsw_sp->ports[local_port];
3152 if (!mlxsw_sp_port) {
3153 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
3158 if (!mlxsw_sp_port->split) {
3159 netdev_err(mlxsw_sp_port->dev, "Port wasn't split\n");
3163 cur_width = mlxsw_sp_port->mapping.width;
3164 count = cur_width == 1 ? 4 : 2;
3166 base_port = mlxsw_sp_cluster_base_port_get(local_port);
3168 /* Determine which ports to remove. */
3169 if (count == 2 && local_port >= base_port + 2)
3170 base_port = base_port + 2;
3172 for (i = 0; i < count; i++)
3173 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i))
3174 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
3176 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
3181 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
3182 char *pude_pl, void *priv)
3184 struct mlxsw_sp *mlxsw_sp = priv;
3185 struct mlxsw_sp_port *mlxsw_sp_port;
3186 enum mlxsw_reg_pude_oper_status status;
3189 local_port = mlxsw_reg_pude_local_port_get(pude_pl);
3190 mlxsw_sp_port = mlxsw_sp->ports[local_port];
3194 status = mlxsw_reg_pude_oper_status_get(pude_pl);
3195 if (status == MLXSW_PORT_OPER_STATUS_UP) {
3196 netdev_info(mlxsw_sp_port->dev, "link up\n");
3197 netif_carrier_on(mlxsw_sp_port->dev);
3199 netdev_info(mlxsw_sp_port->dev, "link down\n");
3200 netif_carrier_off(mlxsw_sp_port->dev);
3204 static void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb,
3205 u8 local_port, void *priv)
3207 struct mlxsw_sp *mlxsw_sp = priv;
3208 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
3209 struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
3211 if (unlikely(!mlxsw_sp_port)) {
3212 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
3217 skb->dev = mlxsw_sp_port->dev;
3219 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
3220 u64_stats_update_begin(&pcpu_stats->syncp);
3221 pcpu_stats->rx_packets++;
3222 pcpu_stats->rx_bytes += skb->len;
3223 u64_stats_update_end(&pcpu_stats->syncp);
3225 skb->protocol = eth_type_trans(skb, skb->dev);
3226 netif_receive_skb(skb);
3229 static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port,
3232 skb->offload_fwd_mark = 1;
3233 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
3236 static void mlxsw_sp_rx_listener_sample_func(struct sk_buff *skb, u8 local_port,
3239 struct mlxsw_sp *mlxsw_sp = priv;
3240 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
3241 struct psample_group *psample_group;
3244 if (unlikely(!mlxsw_sp_port)) {
3245 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received for non-existent port\n",
3249 if (unlikely(!mlxsw_sp_port->sample)) {
3250 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received on unsupported port\n",
3255 size = mlxsw_sp_port->sample->truncate ?
3256 mlxsw_sp_port->sample->trunc_size : skb->len;
3259 psample_group = rcu_dereference(mlxsw_sp_port->sample->psample_group);
3262 psample_sample_packet(psample_group, skb, size,
3263 mlxsw_sp_port->dev->ifindex, 0,
3264 mlxsw_sp_port->sample->rate);
3271 #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
3272 MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \
3273 _is_ctrl, SP_##_trap_group, DISCARD)
3275 #define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
3276 MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action, \
3277 _is_ctrl, SP_##_trap_group, DISCARD)
3279 #define MLXSW_SP_EVENTL(_func, _trap_id) \
3280 MLXSW_EVENTL(_func, _trap_id, SP_EVENT)
3282 static const struct mlxsw_listener mlxsw_sp_listener[] = {
3284 MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE),
3286 MLXSW_SP_RXL_NO_MARK(STP, TRAP_TO_CPU, STP, true),
3287 MLXSW_SP_RXL_NO_MARK(LACP, TRAP_TO_CPU, LACP, true),
3288 MLXSW_SP_RXL_NO_MARK(LLDP, TRAP_TO_CPU, LLDP, true),
3289 MLXSW_SP_RXL_MARK(DHCP, MIRROR_TO_CPU, DHCP, false),
3290 MLXSW_SP_RXL_MARK(IGMP_QUERY, MIRROR_TO_CPU, IGMP, false),
3291 MLXSW_SP_RXL_NO_MARK(IGMP_V1_REPORT, TRAP_TO_CPU, IGMP, false),
3292 MLXSW_SP_RXL_NO_MARK(IGMP_V2_REPORT, TRAP_TO_CPU, IGMP, false),
3293 MLXSW_SP_RXL_NO_MARK(IGMP_V2_LEAVE, TRAP_TO_CPU, IGMP, false),
3294 MLXSW_SP_RXL_NO_MARK(IGMP_V3_REPORT, TRAP_TO_CPU, IGMP, false),
3295 MLXSW_SP_RXL_MARK(ARPBC, MIRROR_TO_CPU, ARP, false),
3296 MLXSW_SP_RXL_MARK(ARPUC, MIRROR_TO_CPU, ARP, false),
3297 MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, IP2ME, false),
3299 MLXSW_SP_RXL_NO_MARK(MTUERROR, TRAP_TO_CPU, ROUTER_EXP, false),
3300 MLXSW_SP_RXL_NO_MARK(TTLERROR, TRAP_TO_CPU, ROUTER_EXP, false),
3301 MLXSW_SP_RXL_NO_MARK(LBERROR, TRAP_TO_CPU, ROUTER_EXP, false),
3302 MLXSW_SP_RXL_MARK(OSPF, TRAP_TO_CPU, OSPF, false),
3303 MLXSW_SP_RXL_NO_MARK(IP2ME, TRAP_TO_CPU, IP2ME, false),
3304 MLXSW_SP_RXL_NO_MARK(RTR_INGRESS0, TRAP_TO_CPU, REMOTE_ROUTE, false),
3305 MLXSW_SP_RXL_NO_MARK(HOST_MISS_IPV4, TRAP_TO_CPU, ARP_MISS, false),
3306 MLXSW_SP_RXL_NO_MARK(BGP_IPV4, TRAP_TO_CPU, BGP_IPV4, false),
3307 /* PKT Sample trap */
3308 MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU,
3309 false, SP_IP2ME, DISCARD)
3312 static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
3314 char qpcr_pl[MLXSW_REG_QPCR_LEN];
3315 enum mlxsw_reg_qpcr_ir_units ir_units;
3316 int max_cpu_policers;
3322 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS))
3325 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
3327 ir_units = MLXSW_REG_QPCR_IR_UNITS_M;
3328 for (i = 0; i < max_cpu_policers; i++) {
3331 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP:
3332 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP:
3333 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP:
3334 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF:
3338 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP:
3342 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP_IPV4:
3343 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP:
3344 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP:
3345 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP_MISS:
3346 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
3347 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE:
3351 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME:
3360 mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate,
3362 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl);
3370 static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core)
3372 char htgt_pl[MLXSW_REG_HTGT_LEN];
3373 enum mlxsw_reg_htgt_trap_group i;
3374 int max_cpu_policers;
3375 int max_trap_groups;
3380 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS))
3383 max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS);
3384 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
3386 for (i = 0; i < max_trap_groups; i++) {
3389 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP:
3390 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP:
3391 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP:
3392 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF:
3396 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP_IPV4:
3397 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP:
3401 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP:
3402 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME:
3406 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP:
3410 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP_MISS:
3411 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
3412 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE:
3416 case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT:
3417 priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY;
3418 tc = MLXSW_REG_HTGT_DEFAULT_TC;
3419 policer_id = MLXSW_REG_HTGT_INVALID_POLICER;
3425 if (max_cpu_policers <= policer_id &&
3426 policer_id != MLXSW_REG_HTGT_INVALID_POLICER)
3429 mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc);
3430 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
3438 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
3443 err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core);
3447 err = mlxsw_sp_trap_groups_set(mlxsw_sp->core);
3451 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener); i++) {
3452 err = mlxsw_core_trap_register(mlxsw_sp->core,
3453 &mlxsw_sp_listener[i],
3456 goto err_listener_register;
3461 err_listener_register:
3462 for (i--; i >= 0; i--) {
3463 mlxsw_core_trap_unregister(mlxsw_sp->core,
3464 &mlxsw_sp_listener[i],
3470 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
3474 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener); i++) {
3475 mlxsw_core_trap_unregister(mlxsw_sp->core,
3476 &mlxsw_sp_listener[i],
3481 static int __mlxsw_sp_flood_init(struct mlxsw_core *mlxsw_core,
3482 enum mlxsw_reg_sfgc_type type,
3483 enum mlxsw_reg_sfgc_bridge_type bridge_type)
3485 enum mlxsw_flood_table_type table_type;
3486 enum mlxsw_sp_flood_table flood_table;
3487 char sfgc_pl[MLXSW_REG_SFGC_LEN];
3489 if (bridge_type == MLXSW_REG_SFGC_BRIDGE_TYPE_VFID)
3490 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID;
3492 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
3495 case MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST:
3496 flood_table = MLXSW_SP_FLOOD_TABLE_UC;
3498 case MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4:
3499 flood_table = MLXSW_SP_FLOOD_TABLE_MC;
3502 flood_table = MLXSW_SP_FLOOD_TABLE_BC;
3505 mlxsw_reg_sfgc_pack(sfgc_pl, type, bridge_type, table_type,
3507 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(sfgc), sfgc_pl);
3510 static int mlxsw_sp_flood_init(struct mlxsw_sp *mlxsw_sp)
3514 for (type = 0; type < MLXSW_REG_SFGC_TYPE_MAX; type++) {
3515 if (type == MLXSW_REG_SFGC_TYPE_RESERVED)
3518 err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
3519 MLXSW_REG_SFGC_BRIDGE_TYPE_VFID);
3523 err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
3524 MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID);
3532 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
3534 char slcr_pl[MLXSW_REG_SLCR_LEN];
3537 mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC |
3538 MLXSW_REG_SLCR_LAG_HASH_DMAC |
3539 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE |
3540 MLXSW_REG_SLCR_LAG_HASH_VLANID |
3541 MLXSW_REG_SLCR_LAG_HASH_SIP |
3542 MLXSW_REG_SLCR_LAG_HASH_DIP |
3543 MLXSW_REG_SLCR_LAG_HASH_SPORT |
3544 MLXSW_REG_SLCR_LAG_HASH_DPORT |
3545 MLXSW_REG_SLCR_LAG_HASH_IPPROTO);
3546 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
3550 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG) ||
3551 !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS))
3554 mlxsw_sp->lags = kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG),
3555 sizeof(struct mlxsw_sp_upper),
3557 if (!mlxsw_sp->lags)
3563 static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp)
3565 kfree(mlxsw_sp->lags);
3568 static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core)
3570 char htgt_pl[MLXSW_REG_HTGT_LEN];
3572 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
3573 MLXSW_REG_HTGT_INVALID_POLICER,
3574 MLXSW_REG_HTGT_DEFAULT_PRIORITY,
3575 MLXSW_REG_HTGT_DEFAULT_TC);
3576 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
3579 static int mlxsw_sp_dummy_fid_init(struct mlxsw_sp *mlxsw_sp)
3581 return mlxsw_sp_fid_op(mlxsw_sp, MLXSW_SP_DUMMY_FID, true);
3584 static void mlxsw_sp_dummy_fid_fini(struct mlxsw_sp *mlxsw_sp)
3586 mlxsw_sp_fid_op(mlxsw_sp, MLXSW_SP_DUMMY_FID, false);
3589 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
3590 const struct mlxsw_bus_info *mlxsw_bus_info)
3592 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3595 mlxsw_sp->core = mlxsw_core;
3596 mlxsw_sp->bus_info = mlxsw_bus_info;
3597 INIT_LIST_HEAD(&mlxsw_sp->fids);
3598 INIT_LIST_HEAD(&mlxsw_sp->vfids.list);
3600 err = mlxsw_sp_fw_rev_validate(mlxsw_sp);
3602 dev_err(mlxsw_sp->bus_info->dev, "Could not upgrade firmware\n");
3606 err = mlxsw_sp_base_mac_get(mlxsw_sp);
3608 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n");
3612 err = mlxsw_sp_traps_init(mlxsw_sp);
3614 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n");
3618 err = mlxsw_sp_flood_init(mlxsw_sp);
3620 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize flood tables\n");
3621 goto err_flood_init;
3624 err = mlxsw_sp_buffers_init(mlxsw_sp);
3626 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n");
3627 goto err_buffers_init;
3630 err = mlxsw_sp_lag_init(mlxsw_sp);
3632 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n");
3636 err = mlxsw_sp_switchdev_init(mlxsw_sp);
3638 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n");
3639 goto err_switchdev_init;
3642 err = mlxsw_sp_router_init(mlxsw_sp);
3644 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n");
3645 goto err_router_init;
3648 err = mlxsw_sp_span_init(mlxsw_sp);
3650 dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n");
3654 err = mlxsw_sp_acl_init(mlxsw_sp);
3656 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n");
3660 err = mlxsw_sp_counter_pool_init(mlxsw_sp);
3662 dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n");
3663 goto err_counter_pool_init;
3666 err = mlxsw_sp_dpipe_init(mlxsw_sp);
3668 dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n");
3669 goto err_dpipe_init;
3672 err = mlxsw_sp_dummy_fid_init(mlxsw_sp);
3674 dev_err(mlxsw_sp->bus_info->dev, "Failed to init dummy FID\n");
3675 goto err_dummy_fid_init;
3678 err = mlxsw_sp_ports_create(mlxsw_sp);
3680 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
3681 goto err_ports_create;
3687 mlxsw_sp_dummy_fid_fini(mlxsw_sp);
3689 mlxsw_sp_dpipe_fini(mlxsw_sp);
3691 mlxsw_sp_counter_pool_fini(mlxsw_sp);
3692 err_counter_pool_init:
3693 mlxsw_sp_acl_fini(mlxsw_sp);
3695 mlxsw_sp_span_fini(mlxsw_sp);
3697 mlxsw_sp_router_fini(mlxsw_sp);
3699 mlxsw_sp_switchdev_fini(mlxsw_sp);
3701 mlxsw_sp_lag_fini(mlxsw_sp);
3703 mlxsw_sp_buffers_fini(mlxsw_sp);
3706 mlxsw_sp_traps_fini(mlxsw_sp);
3710 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
3712 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3714 mlxsw_sp_ports_remove(mlxsw_sp);
3715 mlxsw_sp_dummy_fid_fini(mlxsw_sp);
3716 mlxsw_sp_dpipe_fini(mlxsw_sp);
3717 mlxsw_sp_counter_pool_fini(mlxsw_sp);
3718 mlxsw_sp_acl_fini(mlxsw_sp);
3719 mlxsw_sp_span_fini(mlxsw_sp);
3720 mlxsw_sp_router_fini(mlxsw_sp);
3721 mlxsw_sp_switchdev_fini(mlxsw_sp);
3722 mlxsw_sp_lag_fini(mlxsw_sp);
3723 mlxsw_sp_buffers_fini(mlxsw_sp);
3724 mlxsw_sp_traps_fini(mlxsw_sp);
3725 WARN_ON(!list_empty(&mlxsw_sp->vfids.list));
3726 WARN_ON(!list_empty(&mlxsw_sp->fids));
3729 static struct mlxsw_config_profile mlxsw_sp_config_profile = {
3730 .used_max_vepa_channels = 1,
3731 .max_vepa_channels = 0,
3733 .max_mid = MLXSW_SP_MID_MAX,
3736 .used_flood_tables = 1,
3737 .used_flood_mode = 1,
3739 .max_fid_offset_flood_tables = 3,
3740 .fid_offset_flood_table_size = VLAN_N_VID - 1,
3741 .max_fid_flood_tables = 3,
3742 .fid_flood_table_size = MLXSW_SP_VFID_MAX,
3743 .used_max_ib_mc = 1,
3747 .used_kvd_split_data = 1,
3748 .kvd_hash_granularity = MLXSW_SP_KVD_GRANULARITY,
3749 .kvd_hash_single_parts = 2,
3750 .kvd_hash_double_parts = 1,
3751 .kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE,
3755 .type = MLXSW_PORT_SWID_TYPE_ETH,
3758 .resource_query_enable = 1,
3761 static struct mlxsw_driver mlxsw_sp_driver = {
3762 .kind = mlxsw_sp_driver_name,
3763 .priv_size = sizeof(struct mlxsw_sp),
3764 .init = mlxsw_sp_init,
3765 .fini = mlxsw_sp_fini,
3766 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set,
3767 .port_split = mlxsw_sp_port_split,
3768 .port_unsplit = mlxsw_sp_port_unsplit,
3769 .sb_pool_get = mlxsw_sp_sb_pool_get,
3770 .sb_pool_set = mlxsw_sp_sb_pool_set,
3771 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get,
3772 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set,
3773 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get,
3774 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set,
3775 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot,
3776 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear,
3777 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
3778 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
3779 .txhdr_construct = mlxsw_sp_txhdr_construct,
3780 .txhdr_len = MLXSW_TXHDR_LEN,
3781 .profile = &mlxsw_sp_config_profile,
3784 bool mlxsw_sp_port_dev_check(const struct net_device *dev)
3786 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
3789 static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev, void *data)
3791 struct mlxsw_sp_port **p_mlxsw_sp_port = data;
3794 if (mlxsw_sp_port_dev_check(lower_dev)) {
3795 *p_mlxsw_sp_port = netdev_priv(lower_dev);
3802 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev)
3804 struct mlxsw_sp_port *mlxsw_sp_port;
3806 if (mlxsw_sp_port_dev_check(dev))
3807 return netdev_priv(dev);
3809 mlxsw_sp_port = NULL;
3810 netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &mlxsw_sp_port);
3812 return mlxsw_sp_port;
3815 struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev)
3817 struct mlxsw_sp_port *mlxsw_sp_port;
3819 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
3820 return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL;
3823 static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev)
3825 struct mlxsw_sp_port *mlxsw_sp_port;
3827 if (mlxsw_sp_port_dev_check(dev))
3828 return netdev_priv(dev);
3830 mlxsw_sp_port = NULL;
3831 netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk,
3834 return mlxsw_sp_port;
3837 struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev)
3839 struct mlxsw_sp_port *mlxsw_sp_port;
3842 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev);
3844 dev_hold(mlxsw_sp_port->dev);
3846 return mlxsw_sp_port;
3849 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port)
3851 dev_put(mlxsw_sp_port->dev);
3854 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
3856 char sldr_pl[MLXSW_REG_SLDR_LEN];
3858 mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id);
3859 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3862 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
3864 char sldr_pl[MLXSW_REG_SLDR_LEN];
3866 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id);
3867 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3870 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
3871 u16 lag_id, u8 port_index)
3873 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3874 char slcor_pl[MLXSW_REG_SLCOR_LEN];
3876 mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port,
3877 lag_id, port_index);
3878 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3881 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
3884 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3885 char slcor_pl[MLXSW_REG_SLCOR_LEN];
3887 mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port,
3889 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3892 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port,
3895 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3896 char slcor_pl[MLXSW_REG_SLCOR_LEN];
3898 mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port,
3900 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3903 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port,
3906 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3907 char slcor_pl[MLXSW_REG_SLCOR_LEN];
3909 mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port,
3911 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3914 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp,
3915 struct net_device *lag_dev,
3918 struct mlxsw_sp_upper *lag;
3919 int free_lag_id = -1;
3923 max_lag = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG);
3924 for (i = 0; i < max_lag; i++) {
3925 lag = mlxsw_sp_lag_get(mlxsw_sp, i);
3926 if (lag->ref_count) {
3927 if (lag->dev == lag_dev) {
3931 } else if (free_lag_id < 0) {
3935 if (free_lag_id < 0)
3937 *p_lag_id = free_lag_id;
3942 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
3943 struct net_device *lag_dev,
3944 struct netdev_lag_upper_info *lag_upper_info)
3948 if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0)
3950 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
3955 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
3956 u16 lag_id, u8 *p_port_index)
3958 u64 max_lag_members;
3961 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
3963 for (i = 0; i < max_lag_members; i++) {
3964 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) {
3972 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
3973 struct net_device *lag_dev)
3975 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3976 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
3977 struct mlxsw_sp_upper *lag;
3982 err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id);
3985 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
3986 if (!lag->ref_count) {
3987 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id);
3993 err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index);
3996 err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index);
3998 goto err_col_port_add;
3999 err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, lag_id);
4001 goto err_col_port_enable;
4003 mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index,
4004 mlxsw_sp_port->local_port);
4005 mlxsw_sp_port->lag_id = lag_id;
4006 mlxsw_sp_port->lagged = 1;
4009 /* Port is no longer usable as a router interface */
4010 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, 1);
4011 if (mlxsw_sp_port_vlan->fid)
4012 mlxsw_sp_port_vlan->fid->leave(mlxsw_sp_port_vlan);
4016 err_col_port_enable:
4017 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
4019 if (!lag->ref_count)
4020 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
4024 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
4025 struct net_device *lag_dev)
4027 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4028 u16 lag_id = mlxsw_sp_port->lag_id;
4029 struct mlxsw_sp_upper *lag;
4031 if (!mlxsw_sp_port->lagged)
4033 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
4034 WARN_ON(lag->ref_count == 0);
4036 mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id);
4037 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
4039 /* Any VLANs configured on the port are no longer valid */
4040 mlxsw_sp_port_vlan_flush(mlxsw_sp_port);
4042 if (lag->ref_count == 1)
4043 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
4045 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
4046 mlxsw_sp_port->local_port);
4047 mlxsw_sp_port->lagged = 0;
4050 mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1);
4051 /* Make sure untagged frames are allowed to ingress */
4052 mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
4055 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
4058 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4059 char sldr_pl[MLXSW_REG_SLDR_LEN];
4061 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id,
4062 mlxsw_sp_port->local_port);
4063 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
4066 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
4069 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4070 char sldr_pl[MLXSW_REG_SLDR_LEN];
4072 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id,
4073 mlxsw_sp_port->local_port);
4074 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
4077 static int mlxsw_sp_port_lag_tx_en_set(struct mlxsw_sp_port *mlxsw_sp_port,
4078 bool lag_tx_enabled)
4081 return mlxsw_sp_lag_dist_port_add(mlxsw_sp_port,
4082 mlxsw_sp_port->lag_id);
4084 return mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port,
4085 mlxsw_sp_port->lag_id);
4088 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port,
4089 struct netdev_lag_lower_state_info *info)
4091 return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled);
4094 static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
4097 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4098 enum mlxsw_reg_spms_state spms_state;
4103 spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING :
4104 MLXSW_REG_SPMS_STATE_DISCARDING;
4106 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
4109 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
4111 for (vid = 0; vid < VLAN_N_VID; vid++)
4112 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
4114 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
4119 static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port)
4123 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
4126 err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true);
4128 goto err_port_stp_set;
4129 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 2, VLAN_N_VID - 1,
4132 goto err_port_vlan_set;
4136 mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
4138 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
4142 static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port)
4144 mlxsw_sp_port_vlan_set(mlxsw_sp_port, 2, VLAN_N_VID - 1,
4146 mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
4147 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
4150 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
4151 struct net_device *dev,
4152 unsigned long event, void *ptr)
4154 struct netdev_notifier_changeupper_info *info;
4155 struct mlxsw_sp_port *mlxsw_sp_port;
4156 struct net_device *upper_dev;
4157 struct mlxsw_sp *mlxsw_sp;
4160 mlxsw_sp_port = netdev_priv(dev);
4161 mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4165 case NETDEV_PRECHANGEUPPER:
4166 upper_dev = info->upper_dev;
4167 if (!is_vlan_dev(upper_dev) &&
4168 !netif_is_lag_master(upper_dev) &&
4169 !netif_is_bridge_master(upper_dev) &&
4170 !netif_is_ovs_master(upper_dev))
4174 if (netif_is_lag_master(upper_dev) &&
4175 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
4178 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev))
4180 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) &&
4181 !netif_is_lag_master(vlan_dev_real_dev(upper_dev)))
4183 if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev))
4185 if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev))
4188 case NETDEV_CHANGEUPPER:
4189 upper_dev = info->upper_dev;
4190 if (netif_is_bridge_master(upper_dev)) {
4192 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
4196 mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
4199 } else if (netif_is_lag_master(upper_dev)) {
4201 err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
4204 mlxsw_sp_port_lag_leave(mlxsw_sp_port,
4206 } else if (netif_is_ovs_master(upper_dev)) {
4208 err = mlxsw_sp_port_ovs_join(mlxsw_sp_port);
4210 mlxsw_sp_port_ovs_leave(mlxsw_sp_port);
4218 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev,
4219 unsigned long event, void *ptr)
4221 struct netdev_notifier_changelowerstate_info *info;
4222 struct mlxsw_sp_port *mlxsw_sp_port;
4225 mlxsw_sp_port = netdev_priv(dev);
4229 case NETDEV_CHANGELOWERSTATE:
4230 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) {
4231 err = mlxsw_sp_port_lag_changed(mlxsw_sp_port,
4232 info->lower_state_info);
4234 netdev_err(dev, "Failed to reflect link aggregation lower state change\n");
4242 static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev,
4243 struct net_device *port_dev,
4244 unsigned long event, void *ptr)
4247 case NETDEV_PRECHANGEUPPER:
4248 case NETDEV_CHANGEUPPER:
4249 return mlxsw_sp_netdevice_port_upper_event(lower_dev, port_dev,
4251 case NETDEV_CHANGELOWERSTATE:
4252 return mlxsw_sp_netdevice_port_lower_event(port_dev, event,
4259 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
4260 unsigned long event, void *ptr)
4262 struct net_device *dev;
4263 struct list_head *iter;
4266 netdev_for_each_lower_dev(lag_dev, dev, iter) {
4267 if (mlxsw_sp_port_dev_check(dev)) {
4268 ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event,
4278 static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
4279 struct net_device *dev,
4280 unsigned long event, void *ptr,
4283 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
4284 struct netdev_notifier_changeupper_info *info = ptr;
4285 struct net_device *upper_dev;
4289 case NETDEV_PRECHANGEUPPER:
4290 upper_dev = info->upper_dev;
4291 if (!netif_is_bridge_master(upper_dev))
4294 case NETDEV_CHANGEUPPER:
4295 upper_dev = info->upper_dev;
4296 if (netif_is_bridge_master(upper_dev)) {
4298 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
4302 mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
4315 static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev,
4316 struct net_device *lag_dev,
4317 unsigned long event,
4320 struct net_device *dev;
4321 struct list_head *iter;
4324 netdev_for_each_lower_dev(lag_dev, dev, iter) {
4325 if (mlxsw_sp_port_dev_check(dev)) {
4326 ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev,
4337 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
4338 unsigned long event, void *ptr)
4340 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
4341 u16 vid = vlan_dev_vlan_id(vlan_dev);
4343 if (mlxsw_sp_port_dev_check(real_dev))
4344 return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, real_dev,
4346 else if (netif_is_lag_master(real_dev))
4347 return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev,
4354 static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr)
4356 struct netdev_notifier_changeupper_info *info = ptr;
4358 if (event != NETDEV_PRECHANGEUPPER && event != NETDEV_CHANGEUPPER)
4360 return netif_is_l3_master(info->upper_dev);
4363 static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
4364 unsigned long event, void *ptr)
4366 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4369 if (event == NETDEV_CHANGEADDR || event == NETDEV_CHANGEMTU)
4370 err = mlxsw_sp_netdevice_router_port_event(dev);
4371 else if (mlxsw_sp_is_vrf_event(event, ptr))
4372 err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr);
4373 else if (mlxsw_sp_port_dev_check(dev))
4374 err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr);
4375 else if (netif_is_lag_master(dev))
4376 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr);
4377 else if (is_vlan_dev(dev))
4378 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
4380 return notifier_from_errno(err);
4383 static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = {
4384 .notifier_call = mlxsw_sp_netdevice_event,
4387 static struct notifier_block mlxsw_sp_inetaddr_nb __read_mostly = {
4388 .notifier_call = mlxsw_sp_inetaddr_event,
4389 .priority = 10, /* Must be called before FIB notifier block */
4392 static struct notifier_block mlxsw_sp_router_netevent_nb __read_mostly = {
4393 .notifier_call = mlxsw_sp_router_netevent_event,
4396 static const struct pci_device_id mlxsw_sp_pci_id_table[] = {
4397 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0},
4401 static struct pci_driver mlxsw_sp_pci_driver = {
4402 .name = mlxsw_sp_driver_name,
4403 .id_table = mlxsw_sp_pci_id_table,
4406 static int __init mlxsw_sp_module_init(void)
4410 register_netdevice_notifier(&mlxsw_sp_netdevice_nb);
4411 register_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
4412 register_netevent_notifier(&mlxsw_sp_router_netevent_nb);
4414 err = mlxsw_core_driver_register(&mlxsw_sp_driver);
4416 goto err_core_driver_register;
4418 err = mlxsw_pci_driver_register(&mlxsw_sp_pci_driver);
4420 goto err_pci_driver_register;
4424 err_pci_driver_register:
4425 mlxsw_core_driver_unregister(&mlxsw_sp_driver);
4426 err_core_driver_register:
4427 unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb);
4428 unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
4429 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
4433 static void __exit mlxsw_sp_module_exit(void)
4435 mlxsw_pci_driver_unregister(&mlxsw_sp_pci_driver);
4436 mlxsw_core_driver_unregister(&mlxsw_sp_driver);
4437 unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb);
4438 unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
4439 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
4442 module_init(mlxsw_sp_module_init);
4443 module_exit(mlxsw_sp_module_exit);
4445 MODULE_LICENSE("Dual BSD/GPL");
4446 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
4447 MODULE_DESCRIPTION("Mellanox Spectrum driver");
4448 MODULE_DEVICE_TABLE(pci, mlxsw_sp_pci_id_table);
4449 MODULE_FIRMWARE(MLXSW_SP_FW_FILENAME);