1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/device.h>
7 #include <linux/export.h>
9 #include <linux/if_link.h>
10 #include <linux/netdevice.h>
11 #include <linux/completion.h>
12 #include <linux/skbuff.h>
13 #include <linux/etherdevice.h>
14 #include <linux/types.h>
15 #include <linux/string.h>
16 #include <linux/gfp.h>
17 #include <linux/random.h>
18 #include <linux/jiffies.h>
19 #include <linux/mutex.h>
20 #include <linux/rcupdate.h>
21 #include <linux/slab.h>
22 #include <linux/workqueue.h>
23 #include <asm/byteorder.h>
24 #include <net/devlink.h>
25 #include <trace/events/devlink.h>
34 #include "resources.h"
36 static LIST_HEAD(mlxsw_core_driver_list);
37 static DEFINE_SPINLOCK(mlxsw_core_driver_list_lock);
39 static const char mlxsw_core_driver_name[] = "mlxsw_core";
41 static struct workqueue_struct *mlxsw_wq;
42 static struct workqueue_struct *mlxsw_owq;
44 struct mlxsw_core_port {
45 struct devlink_port devlink_port;
46 void *port_driver_priv;
50 void *mlxsw_core_port_driver_priv(struct mlxsw_core_port *mlxsw_core_port)
52 return mlxsw_core_port->port_driver_priv;
54 EXPORT_SYMBOL(mlxsw_core_port_driver_priv);
56 static bool mlxsw_core_port_check(struct mlxsw_core_port *mlxsw_core_port)
58 return mlxsw_core_port->port_driver_priv != NULL;
62 struct mlxsw_driver *driver;
63 const struct mlxsw_bus *bus;
65 const struct mlxsw_bus_info *bus_info;
66 struct workqueue_struct *emad_wq;
67 struct list_head rx_listener_list;
68 struct list_head event_listener_list;
71 struct list_head trans_list;
72 spinlock_t trans_list_lock; /* protects trans_list writes */
76 u8 *mapping; /* lag_id+port_index to local_port mapping */
79 struct mlxsw_hwmon *hwmon;
80 struct mlxsw_thermal *thermal;
81 struct mlxsw_core_port *ports;
82 unsigned int max_ports;
83 bool fw_flash_in_progress;
84 unsigned long driver_priv[0];
85 /* driver_priv has to be always the last item */
88 #define MLXSW_PORT_MAX_PORTS_DEFAULT 0x40
90 static int mlxsw_ports_init(struct mlxsw_core *mlxsw_core)
92 /* Switch ports are numbered from 1 to queried value */
93 if (MLXSW_CORE_RES_VALID(mlxsw_core, MAX_SYSTEM_PORT))
94 mlxsw_core->max_ports = MLXSW_CORE_RES_GET(mlxsw_core,
97 mlxsw_core->max_ports = MLXSW_PORT_MAX_PORTS_DEFAULT + 1;
99 mlxsw_core->ports = kcalloc(mlxsw_core->max_ports,
100 sizeof(struct mlxsw_core_port), GFP_KERNEL);
101 if (!mlxsw_core->ports)
107 static void mlxsw_ports_fini(struct mlxsw_core *mlxsw_core)
109 kfree(mlxsw_core->ports);
112 unsigned int mlxsw_core_max_ports(const struct mlxsw_core *mlxsw_core)
114 return mlxsw_core->max_ports;
116 EXPORT_SYMBOL(mlxsw_core_max_ports);
118 void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core)
120 return mlxsw_core->driver_priv;
122 EXPORT_SYMBOL(mlxsw_core_driver_priv);
124 bool mlxsw_core_res_query_enabled(const struct mlxsw_core *mlxsw_core)
126 return mlxsw_core->driver->res_query_enabled;
128 EXPORT_SYMBOL(mlxsw_core_res_query_enabled);
131 mlxsw_core_fw_rev_minor_subminor_validate(const struct mlxsw_fw_rev *rev,
132 const struct mlxsw_fw_rev *req_rev)
134 return rev->minor > req_rev->minor ||
135 (rev->minor == req_rev->minor &&
136 rev->subminor >= req_rev->subminor);
138 EXPORT_SYMBOL(mlxsw_core_fw_rev_minor_subminor_validate);
140 struct mlxsw_rx_listener_item {
141 struct list_head list;
142 struct mlxsw_rx_listener rxl;
146 struct mlxsw_event_listener_item {
147 struct list_head list;
148 struct mlxsw_event_listener el;
157 * Destination MAC in EMAD's Ethernet header.
158 * Must be set to 01:02:c9:00:00:01
160 MLXSW_ITEM_BUF(emad, eth_hdr, dmac, 0x00, 6);
163 * Source MAC in EMAD's Ethernet header.
164 * Must be set to 00:02:c9:01:02:03
166 MLXSW_ITEM_BUF(emad, eth_hdr, smac, 0x06, 6);
168 /* emad_eth_hdr_ethertype
169 * Ethertype in EMAD's Ethernet header.
170 * Must be set to 0x8932
172 MLXSW_ITEM32(emad, eth_hdr, ethertype, 0x0C, 16, 16);
174 /* emad_eth_hdr_mlx_proto
176 * Must be set to 0x0.
178 MLXSW_ITEM32(emad, eth_hdr, mlx_proto, 0x0C, 8, 8);
181 * Mellanox protocol version.
182 * Must be set to 0x0.
184 MLXSW_ITEM32(emad, eth_hdr, ver, 0x0C, 4, 4);
188 * Must be set to 0x1 (operation TLV).
190 MLXSW_ITEM32(emad, op_tlv, type, 0x00, 27, 5);
193 * Length of the operation TLV in u32.
194 * Must be set to 0x4.
196 MLXSW_ITEM32(emad, op_tlv, len, 0x00, 16, 11);
199 * Direct route bit. Setting to 1 indicates the EMAD is a direct route
200 * EMAD. DR TLV must follow.
202 * Note: Currently not supported and must not be set.
204 MLXSW_ITEM32(emad, op_tlv, dr, 0x00, 15, 1);
206 /* emad_op_tlv_status
207 * Returned status in case of EMAD response. Must be set to 0 in case
210 * 0x1 - device is busy. Requester should retry
211 * 0x2 - Mellanox protocol version not supported
213 * 0x4 - register not supported
214 * 0x5 - operation class not supported
215 * 0x6 - EMAD method not supported
216 * 0x7 - bad parameter (e.g. port out of range)
217 * 0x8 - resource not available
218 * 0x9 - message receipt acknowledgment. Requester should retry
219 * 0x70 - internal error
221 MLXSW_ITEM32(emad, op_tlv, status, 0x00, 8, 7);
223 /* emad_op_tlv_register_id
224 * Register ID of register within register TLV.
226 MLXSW_ITEM32(emad, op_tlv, register_id, 0x04, 16, 16);
229 * Response bit. Setting to 1 indicates Response, otherwise request.
231 MLXSW_ITEM32(emad, op_tlv, r, 0x04, 15, 1);
233 /* emad_op_tlv_method
237 * 0x3 - send (currently not supported)
240 MLXSW_ITEM32(emad, op_tlv, method, 0x04, 8, 7);
243 * EMAD operation class. Must be set to 0x1 (REG_ACCESS).
245 MLXSW_ITEM32(emad, op_tlv, class, 0x04, 0, 8);
248 * EMAD transaction ID. Used for pairing request and response EMADs.
250 MLXSW_ITEM64(emad, op_tlv, tid, 0x08, 0, 64);
254 * Must be set to 0x3 (register TLV).
256 MLXSW_ITEM32(emad, reg_tlv, type, 0x00, 27, 5);
259 * Length of the operation TLV in u32.
261 MLXSW_ITEM32(emad, reg_tlv, len, 0x00, 16, 11);
265 * Must be set to 0x0 (end TLV).
267 MLXSW_ITEM32(emad, end_tlv, type, 0x00, 27, 5);
270 * Length of the end TLV in u32.
273 MLXSW_ITEM32(emad, end_tlv, len, 0x00, 16, 11);
275 enum mlxsw_core_reg_access_type {
276 MLXSW_CORE_REG_ACCESS_TYPE_QUERY,
277 MLXSW_CORE_REG_ACCESS_TYPE_WRITE,
280 static inline const char *
281 mlxsw_core_reg_access_type_str(enum mlxsw_core_reg_access_type type)
284 case MLXSW_CORE_REG_ACCESS_TYPE_QUERY:
286 case MLXSW_CORE_REG_ACCESS_TYPE_WRITE:
292 static void mlxsw_emad_pack_end_tlv(char *end_tlv)
294 mlxsw_emad_end_tlv_type_set(end_tlv, MLXSW_EMAD_TLV_TYPE_END);
295 mlxsw_emad_end_tlv_len_set(end_tlv, MLXSW_EMAD_END_TLV_LEN);
298 static void mlxsw_emad_pack_reg_tlv(char *reg_tlv,
299 const struct mlxsw_reg_info *reg,
302 mlxsw_emad_reg_tlv_type_set(reg_tlv, MLXSW_EMAD_TLV_TYPE_REG);
303 mlxsw_emad_reg_tlv_len_set(reg_tlv, reg->len / sizeof(u32) + 1);
304 memcpy(reg_tlv + sizeof(u32), payload, reg->len);
307 static void mlxsw_emad_pack_op_tlv(char *op_tlv,
308 const struct mlxsw_reg_info *reg,
309 enum mlxsw_core_reg_access_type type,
312 mlxsw_emad_op_tlv_type_set(op_tlv, MLXSW_EMAD_TLV_TYPE_OP);
313 mlxsw_emad_op_tlv_len_set(op_tlv, MLXSW_EMAD_OP_TLV_LEN);
314 mlxsw_emad_op_tlv_dr_set(op_tlv, 0);
315 mlxsw_emad_op_tlv_status_set(op_tlv, 0);
316 mlxsw_emad_op_tlv_register_id_set(op_tlv, reg->id);
317 mlxsw_emad_op_tlv_r_set(op_tlv, MLXSW_EMAD_OP_TLV_REQUEST);
318 if (type == MLXSW_CORE_REG_ACCESS_TYPE_QUERY)
319 mlxsw_emad_op_tlv_method_set(op_tlv,
320 MLXSW_EMAD_OP_TLV_METHOD_QUERY);
322 mlxsw_emad_op_tlv_method_set(op_tlv,
323 MLXSW_EMAD_OP_TLV_METHOD_WRITE);
324 mlxsw_emad_op_tlv_class_set(op_tlv,
325 MLXSW_EMAD_OP_TLV_CLASS_REG_ACCESS);
326 mlxsw_emad_op_tlv_tid_set(op_tlv, tid);
329 static int mlxsw_emad_construct_eth_hdr(struct sk_buff *skb)
331 char *eth_hdr = skb_push(skb, MLXSW_EMAD_ETH_HDR_LEN);
333 mlxsw_emad_eth_hdr_dmac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_DMAC);
334 mlxsw_emad_eth_hdr_smac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_SMAC);
335 mlxsw_emad_eth_hdr_ethertype_set(eth_hdr, MLXSW_EMAD_EH_ETHERTYPE);
336 mlxsw_emad_eth_hdr_mlx_proto_set(eth_hdr, MLXSW_EMAD_EH_MLX_PROTO);
337 mlxsw_emad_eth_hdr_ver_set(eth_hdr, MLXSW_EMAD_EH_PROTO_VERSION);
339 skb_reset_mac_header(skb);
344 static void mlxsw_emad_construct(struct sk_buff *skb,
345 const struct mlxsw_reg_info *reg,
347 enum mlxsw_core_reg_access_type type,
352 buf = skb_push(skb, MLXSW_EMAD_END_TLV_LEN * sizeof(u32));
353 mlxsw_emad_pack_end_tlv(buf);
355 buf = skb_push(skb, reg->len + sizeof(u32));
356 mlxsw_emad_pack_reg_tlv(buf, reg, payload);
358 buf = skb_push(skb, MLXSW_EMAD_OP_TLV_LEN * sizeof(u32));
359 mlxsw_emad_pack_op_tlv(buf, reg, type, tid);
361 mlxsw_emad_construct_eth_hdr(skb);
364 static char *mlxsw_emad_op_tlv(const struct sk_buff *skb)
366 return ((char *) (skb->data + MLXSW_EMAD_ETH_HDR_LEN));
369 static char *mlxsw_emad_reg_tlv(const struct sk_buff *skb)
371 return ((char *) (skb->data + MLXSW_EMAD_ETH_HDR_LEN +
372 MLXSW_EMAD_OP_TLV_LEN * sizeof(u32)));
375 static char *mlxsw_emad_reg_payload(const char *op_tlv)
377 return ((char *) (op_tlv + (MLXSW_EMAD_OP_TLV_LEN + 1) * sizeof(u32)));
380 static u64 mlxsw_emad_get_tid(const struct sk_buff *skb)
384 op_tlv = mlxsw_emad_op_tlv(skb);
385 return mlxsw_emad_op_tlv_tid_get(op_tlv);
388 static bool mlxsw_emad_is_resp(const struct sk_buff *skb)
392 op_tlv = mlxsw_emad_op_tlv(skb);
393 return (mlxsw_emad_op_tlv_r_get(op_tlv) == MLXSW_EMAD_OP_TLV_RESPONSE);
396 static int mlxsw_emad_process_status(char *op_tlv,
397 enum mlxsw_emad_op_tlv_status *p_status)
399 *p_status = mlxsw_emad_op_tlv_status_get(op_tlv);
402 case MLXSW_EMAD_OP_TLV_STATUS_SUCCESS:
404 case MLXSW_EMAD_OP_TLV_STATUS_BUSY:
405 case MLXSW_EMAD_OP_TLV_STATUS_MESSAGE_RECEIPT_ACK:
407 case MLXSW_EMAD_OP_TLV_STATUS_VERSION_NOT_SUPPORTED:
408 case MLXSW_EMAD_OP_TLV_STATUS_UNKNOWN_TLV:
409 case MLXSW_EMAD_OP_TLV_STATUS_REGISTER_NOT_SUPPORTED:
410 case MLXSW_EMAD_OP_TLV_STATUS_CLASS_NOT_SUPPORTED:
411 case MLXSW_EMAD_OP_TLV_STATUS_METHOD_NOT_SUPPORTED:
412 case MLXSW_EMAD_OP_TLV_STATUS_BAD_PARAMETER:
413 case MLXSW_EMAD_OP_TLV_STATUS_RESOURCE_NOT_AVAILABLE:
414 case MLXSW_EMAD_OP_TLV_STATUS_INTERNAL_ERROR:
421 mlxsw_emad_process_status_skb(struct sk_buff *skb,
422 enum mlxsw_emad_op_tlv_status *p_status)
424 return mlxsw_emad_process_status(mlxsw_emad_op_tlv(skb), p_status);
427 struct mlxsw_reg_trans {
428 struct list_head list;
429 struct list_head bulk_list;
430 struct mlxsw_core *core;
431 struct sk_buff *tx_skb;
432 struct mlxsw_tx_info tx_info;
433 struct delayed_work timeout_dw;
434 unsigned int retries;
436 struct completion completion;
438 mlxsw_reg_trans_cb_t *cb;
439 unsigned long cb_priv;
440 const struct mlxsw_reg_info *reg;
441 enum mlxsw_core_reg_access_type type;
443 enum mlxsw_emad_op_tlv_status emad_status;
447 #define MLXSW_EMAD_TIMEOUT_DURING_FW_FLASH_MS 3000
448 #define MLXSW_EMAD_TIMEOUT_MS 200
450 static void mlxsw_emad_trans_timeout_schedule(struct mlxsw_reg_trans *trans)
452 unsigned long timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS);
454 if (trans->core->fw_flash_in_progress)
455 timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_DURING_FW_FLASH_MS);
457 queue_delayed_work(trans->core->emad_wq, &trans->timeout_dw, timeout);
460 static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core,
461 struct mlxsw_reg_trans *trans)
466 skb = skb_copy(trans->tx_skb, GFP_KERNEL);
470 trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), false, 0,
471 skb->data + mlxsw_core->driver->txhdr_len,
472 skb->len - mlxsw_core->driver->txhdr_len);
474 atomic_set(&trans->active, 1);
475 err = mlxsw_core_skb_transmit(mlxsw_core, skb, &trans->tx_info);
480 mlxsw_emad_trans_timeout_schedule(trans);
484 static void mlxsw_emad_trans_finish(struct mlxsw_reg_trans *trans, int err)
486 struct mlxsw_core *mlxsw_core = trans->core;
488 dev_kfree_skb(trans->tx_skb);
489 spin_lock_bh(&mlxsw_core->emad.trans_list_lock);
490 list_del_rcu(&trans->list);
491 spin_unlock_bh(&mlxsw_core->emad.trans_list_lock);
493 complete(&trans->completion);
496 static void mlxsw_emad_transmit_retry(struct mlxsw_core *mlxsw_core,
497 struct mlxsw_reg_trans *trans)
501 if (trans->retries < MLXSW_EMAD_MAX_RETRY) {
503 err = mlxsw_emad_transmit(trans->core, trans);
509 mlxsw_emad_trans_finish(trans, err);
512 static void mlxsw_emad_trans_timeout_work(struct work_struct *work)
514 struct mlxsw_reg_trans *trans = container_of(work,
515 struct mlxsw_reg_trans,
518 if (!atomic_dec_and_test(&trans->active))
521 mlxsw_emad_transmit_retry(trans->core, trans);
524 static void mlxsw_emad_process_response(struct mlxsw_core *mlxsw_core,
525 struct mlxsw_reg_trans *trans,
530 if (!atomic_dec_and_test(&trans->active))
533 err = mlxsw_emad_process_status_skb(skb, &trans->emad_status);
534 if (err == -EAGAIN) {
535 mlxsw_emad_transmit_retry(mlxsw_core, trans);
538 char *op_tlv = mlxsw_emad_op_tlv(skb);
541 trans->cb(mlxsw_core,
542 mlxsw_emad_reg_payload(op_tlv),
543 trans->reg->len, trans->cb_priv);
545 mlxsw_emad_trans_finish(trans, err);
549 /* called with rcu read lock held */
550 static void mlxsw_emad_rx_listener_func(struct sk_buff *skb, u8 local_port,
553 struct mlxsw_core *mlxsw_core = priv;
554 struct mlxsw_reg_trans *trans;
556 trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), true, 0,
557 skb->data, skb->len);
559 if (!mlxsw_emad_is_resp(skb))
562 list_for_each_entry_rcu(trans, &mlxsw_core->emad.trans_list, list) {
563 if (mlxsw_emad_get_tid(skb) == trans->tid) {
564 mlxsw_emad_process_response(mlxsw_core, trans, skb);
573 static const struct mlxsw_listener mlxsw_emad_rx_listener =
574 MLXSW_RXL(mlxsw_emad_rx_listener_func, ETHEMAD, TRAP_TO_CPU, false,
577 static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
579 struct workqueue_struct *emad_wq;
583 if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
586 emad_wq = alloc_workqueue("mlxsw_core_emad", 0, 0);
589 mlxsw_core->emad_wq = emad_wq;
591 /* Set the upper 32 bits of the transaction ID field to a random
592 * number. This allows us to discard EMADs addressed to other
595 get_random_bytes(&tid, 4);
597 atomic64_set(&mlxsw_core->emad.tid, tid);
599 INIT_LIST_HEAD(&mlxsw_core->emad.trans_list);
600 spin_lock_init(&mlxsw_core->emad.trans_list_lock);
602 err = mlxsw_core_trap_register(mlxsw_core, &mlxsw_emad_rx_listener,
607 err = mlxsw_core->driver->basic_trap_groups_set(mlxsw_core);
609 goto err_emad_trap_set;
610 mlxsw_core->emad.use_emad = true;
615 mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener,
617 destroy_workqueue(mlxsw_core->emad_wq);
621 static void mlxsw_emad_fini(struct mlxsw_core *mlxsw_core)
624 if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
627 mlxsw_core->emad.use_emad = false;
628 mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener,
630 destroy_workqueue(mlxsw_core->emad_wq);
633 static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core,
639 emad_len = (reg_len + sizeof(u32) + MLXSW_EMAD_ETH_HDR_LEN +
640 (MLXSW_EMAD_OP_TLV_LEN + MLXSW_EMAD_END_TLV_LEN) *
641 sizeof(u32) + mlxsw_core->driver->txhdr_len);
642 if (emad_len > MLXSW_EMAD_MAX_FRAME_LEN)
645 skb = netdev_alloc_skb(NULL, emad_len);
648 memset(skb->data, 0, emad_len);
649 skb_reserve(skb, emad_len);
654 static int mlxsw_emad_reg_access(struct mlxsw_core *mlxsw_core,
655 const struct mlxsw_reg_info *reg,
657 enum mlxsw_core_reg_access_type type,
658 struct mlxsw_reg_trans *trans,
659 struct list_head *bulk_list,
660 mlxsw_reg_trans_cb_t *cb,
661 unsigned long cb_priv, u64 tid)
666 dev_dbg(mlxsw_core->bus_info->dev, "EMAD reg access (tid=%llx,reg_id=%x(%s),type=%s)\n",
667 tid, reg->id, mlxsw_reg_id_str(reg->id),
668 mlxsw_core_reg_access_type_str(type));
670 skb = mlxsw_emad_alloc(mlxsw_core, reg->len);
674 list_add_tail(&trans->bulk_list, bulk_list);
675 trans->core = mlxsw_core;
677 trans->tx_info.local_port = MLXSW_PORT_CPU_PORT;
678 trans->tx_info.is_emad = true;
679 INIT_DELAYED_WORK(&trans->timeout_dw, mlxsw_emad_trans_timeout_work);
681 init_completion(&trans->completion);
683 trans->cb_priv = cb_priv;
687 mlxsw_emad_construct(skb, reg, payload, type, trans->tid);
688 mlxsw_core->driver->txhdr_construct(skb, &trans->tx_info);
690 spin_lock_bh(&mlxsw_core->emad.trans_list_lock);
691 list_add_tail_rcu(&trans->list, &mlxsw_core->emad.trans_list);
692 spin_unlock_bh(&mlxsw_core->emad.trans_list_lock);
693 err = mlxsw_emad_transmit(mlxsw_core, trans);
699 spin_lock_bh(&mlxsw_core->emad.trans_list_lock);
700 list_del_rcu(&trans->list);
701 spin_unlock_bh(&mlxsw_core->emad.trans_list_lock);
702 list_del(&trans->bulk_list);
703 dev_kfree_skb(trans->tx_skb);
711 int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver)
713 spin_lock(&mlxsw_core_driver_list_lock);
714 list_add_tail(&mlxsw_driver->list, &mlxsw_core_driver_list);
715 spin_unlock(&mlxsw_core_driver_list_lock);
718 EXPORT_SYMBOL(mlxsw_core_driver_register);
720 void mlxsw_core_driver_unregister(struct mlxsw_driver *mlxsw_driver)
722 spin_lock(&mlxsw_core_driver_list_lock);
723 list_del(&mlxsw_driver->list);
724 spin_unlock(&mlxsw_core_driver_list_lock);
726 EXPORT_SYMBOL(mlxsw_core_driver_unregister);
728 static struct mlxsw_driver *__driver_find(const char *kind)
730 struct mlxsw_driver *mlxsw_driver;
732 list_for_each_entry(mlxsw_driver, &mlxsw_core_driver_list, list) {
733 if (strcmp(mlxsw_driver->kind, kind) == 0)
739 static struct mlxsw_driver *mlxsw_core_driver_get(const char *kind)
741 struct mlxsw_driver *mlxsw_driver;
743 spin_lock(&mlxsw_core_driver_list_lock);
744 mlxsw_driver = __driver_find(kind);
745 spin_unlock(&mlxsw_core_driver_list_lock);
749 static int mlxsw_devlink_port_split(struct devlink *devlink,
750 unsigned int port_index,
752 struct netlink_ext_ack *extack)
754 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
756 if (port_index >= mlxsw_core->max_ports) {
757 NL_SET_ERR_MSG_MOD(extack, "Port index exceeds maximum number of ports");
760 if (!mlxsw_core->driver->port_split)
762 return mlxsw_core->driver->port_split(mlxsw_core, port_index, count,
766 static int mlxsw_devlink_port_unsplit(struct devlink *devlink,
767 unsigned int port_index,
768 struct netlink_ext_ack *extack)
770 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
772 if (port_index >= mlxsw_core->max_ports) {
773 NL_SET_ERR_MSG_MOD(extack, "Port index exceeds maximum number of ports");
776 if (!mlxsw_core->driver->port_unsplit)
778 return mlxsw_core->driver->port_unsplit(mlxsw_core, port_index,
783 mlxsw_devlink_sb_pool_get(struct devlink *devlink,
784 unsigned int sb_index, u16 pool_index,
785 struct devlink_sb_pool_info *pool_info)
787 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
788 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
790 if (!mlxsw_driver->sb_pool_get)
792 return mlxsw_driver->sb_pool_get(mlxsw_core, sb_index,
793 pool_index, pool_info);
797 mlxsw_devlink_sb_pool_set(struct devlink *devlink,
798 unsigned int sb_index, u16 pool_index, u32 size,
799 enum devlink_sb_threshold_type threshold_type,
800 struct netlink_ext_ack *extack)
802 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
803 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
805 if (!mlxsw_driver->sb_pool_set)
807 return mlxsw_driver->sb_pool_set(mlxsw_core, sb_index,
808 pool_index, size, threshold_type,
812 static void *__dl_port(struct devlink_port *devlink_port)
814 return container_of(devlink_port, struct mlxsw_core_port, devlink_port);
817 static int mlxsw_devlink_port_type_set(struct devlink_port *devlink_port,
818 enum devlink_port_type port_type)
820 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
821 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
822 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
824 if (!mlxsw_driver->port_type_set)
827 return mlxsw_driver->port_type_set(mlxsw_core,
828 mlxsw_core_port->local_port,
832 static int mlxsw_devlink_sb_port_pool_get(struct devlink_port *devlink_port,
833 unsigned int sb_index, u16 pool_index,
836 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
837 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
838 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
840 if (!mlxsw_driver->sb_port_pool_get ||
841 !mlxsw_core_port_check(mlxsw_core_port))
843 return mlxsw_driver->sb_port_pool_get(mlxsw_core_port, sb_index,
844 pool_index, p_threshold);
847 static int mlxsw_devlink_sb_port_pool_set(struct devlink_port *devlink_port,
848 unsigned int sb_index, u16 pool_index,
850 struct netlink_ext_ack *extack)
852 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
853 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
854 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
856 if (!mlxsw_driver->sb_port_pool_set ||
857 !mlxsw_core_port_check(mlxsw_core_port))
859 return mlxsw_driver->sb_port_pool_set(mlxsw_core_port, sb_index,
860 pool_index, threshold, extack);
864 mlxsw_devlink_sb_tc_pool_bind_get(struct devlink_port *devlink_port,
865 unsigned int sb_index, u16 tc_index,
866 enum devlink_sb_pool_type pool_type,
867 u16 *p_pool_index, u32 *p_threshold)
869 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
870 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
871 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
873 if (!mlxsw_driver->sb_tc_pool_bind_get ||
874 !mlxsw_core_port_check(mlxsw_core_port))
876 return mlxsw_driver->sb_tc_pool_bind_get(mlxsw_core_port, sb_index,
878 p_pool_index, p_threshold);
882 mlxsw_devlink_sb_tc_pool_bind_set(struct devlink_port *devlink_port,
883 unsigned int sb_index, u16 tc_index,
884 enum devlink_sb_pool_type pool_type,
885 u16 pool_index, u32 threshold,
886 struct netlink_ext_ack *extack)
888 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
889 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
890 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
892 if (!mlxsw_driver->sb_tc_pool_bind_set ||
893 !mlxsw_core_port_check(mlxsw_core_port))
895 return mlxsw_driver->sb_tc_pool_bind_set(mlxsw_core_port, sb_index,
897 pool_index, threshold, extack);
900 static int mlxsw_devlink_sb_occ_snapshot(struct devlink *devlink,
901 unsigned int sb_index)
903 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
904 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
906 if (!mlxsw_driver->sb_occ_snapshot)
908 return mlxsw_driver->sb_occ_snapshot(mlxsw_core, sb_index);
911 static int mlxsw_devlink_sb_occ_max_clear(struct devlink *devlink,
912 unsigned int sb_index)
914 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
915 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
917 if (!mlxsw_driver->sb_occ_max_clear)
919 return mlxsw_driver->sb_occ_max_clear(mlxsw_core, sb_index);
923 mlxsw_devlink_sb_occ_port_pool_get(struct devlink_port *devlink_port,
924 unsigned int sb_index, u16 pool_index,
925 u32 *p_cur, u32 *p_max)
927 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
928 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
929 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
931 if (!mlxsw_driver->sb_occ_port_pool_get ||
932 !mlxsw_core_port_check(mlxsw_core_port))
934 return mlxsw_driver->sb_occ_port_pool_get(mlxsw_core_port, sb_index,
935 pool_index, p_cur, p_max);
939 mlxsw_devlink_sb_occ_tc_port_bind_get(struct devlink_port *devlink_port,
940 unsigned int sb_index, u16 tc_index,
941 enum devlink_sb_pool_type pool_type,
942 u32 *p_cur, u32 *p_max)
944 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
945 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
946 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
948 if (!mlxsw_driver->sb_occ_tc_port_bind_get ||
949 !mlxsw_core_port_check(mlxsw_core_port))
951 return mlxsw_driver->sb_occ_tc_port_bind_get(mlxsw_core_port,
953 pool_type, p_cur, p_max);
957 mlxsw_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req,
958 struct netlink_ext_ack *extack)
960 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
961 char fw_info_psid[MLXSW_REG_MGIR_FW_INFO_PSID_SIZE];
962 u32 hw_rev, fw_major, fw_minor, fw_sub_minor;
963 char mgir_pl[MLXSW_REG_MGIR_LEN];
967 err = devlink_info_driver_name_put(req,
968 mlxsw_core->bus_info->device_kind);
972 mlxsw_reg_mgir_pack(mgir_pl);
973 err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mgir), mgir_pl);
976 mlxsw_reg_mgir_unpack(mgir_pl, &hw_rev, fw_info_psid, &fw_major,
977 &fw_minor, &fw_sub_minor);
979 sprintf(buf, "%X", hw_rev);
980 err = devlink_info_version_fixed_put(req, "hw.revision", buf);
984 err = devlink_info_version_fixed_put(req, "fw.psid", fw_info_psid);
988 sprintf(buf, "%d.%d.%d", fw_major, fw_minor, fw_sub_minor);
989 err = devlink_info_version_running_put(req, "fw.version", buf);
997 mlxsw_devlink_core_bus_device_reload_down(struct devlink *devlink,
999 struct netlink_ext_ack *extack)
1001 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1003 if (!(mlxsw_core->bus->features & MLXSW_BUS_F_RESET))
1006 mlxsw_core_bus_device_unregister(mlxsw_core, true);
1011 mlxsw_devlink_core_bus_device_reload_up(struct devlink *devlink,
1012 struct netlink_ext_ack *extack)
1014 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1016 return mlxsw_core_bus_device_register(mlxsw_core->bus_info,
1018 mlxsw_core->bus_priv, true,
1022 static int mlxsw_devlink_flash_update(struct devlink *devlink,
1023 const char *file_name,
1024 const char *component,
1025 struct netlink_ext_ack *extack)
1027 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1028 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1030 if (!mlxsw_driver->flash_update)
1032 return mlxsw_driver->flash_update(mlxsw_core, file_name,
1036 static int mlxsw_devlink_trap_init(struct devlink *devlink,
1037 const struct devlink_trap *trap,
1040 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1041 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1043 if (!mlxsw_driver->trap_init)
1045 return mlxsw_driver->trap_init(mlxsw_core, trap, trap_ctx);
1048 static void mlxsw_devlink_trap_fini(struct devlink *devlink,
1049 const struct devlink_trap *trap,
1052 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1053 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1055 if (!mlxsw_driver->trap_fini)
1057 mlxsw_driver->trap_fini(mlxsw_core, trap, trap_ctx);
1060 static int mlxsw_devlink_trap_action_set(struct devlink *devlink,
1061 const struct devlink_trap *trap,
1062 enum devlink_trap_action action)
1064 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1065 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1067 if (!mlxsw_driver->trap_action_set)
1069 return mlxsw_driver->trap_action_set(mlxsw_core, trap, action);
1073 mlxsw_devlink_trap_group_init(struct devlink *devlink,
1074 const struct devlink_trap_group *group)
1076 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1077 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1079 if (!mlxsw_driver->trap_group_init)
1081 return mlxsw_driver->trap_group_init(mlxsw_core, group);
1084 static const struct devlink_ops mlxsw_devlink_ops = {
1085 .reload_down = mlxsw_devlink_core_bus_device_reload_down,
1086 .reload_up = mlxsw_devlink_core_bus_device_reload_up,
1087 .port_type_set = mlxsw_devlink_port_type_set,
1088 .port_split = mlxsw_devlink_port_split,
1089 .port_unsplit = mlxsw_devlink_port_unsplit,
1090 .sb_pool_get = mlxsw_devlink_sb_pool_get,
1091 .sb_pool_set = mlxsw_devlink_sb_pool_set,
1092 .sb_port_pool_get = mlxsw_devlink_sb_port_pool_get,
1093 .sb_port_pool_set = mlxsw_devlink_sb_port_pool_set,
1094 .sb_tc_pool_bind_get = mlxsw_devlink_sb_tc_pool_bind_get,
1095 .sb_tc_pool_bind_set = mlxsw_devlink_sb_tc_pool_bind_set,
1096 .sb_occ_snapshot = mlxsw_devlink_sb_occ_snapshot,
1097 .sb_occ_max_clear = mlxsw_devlink_sb_occ_max_clear,
1098 .sb_occ_port_pool_get = mlxsw_devlink_sb_occ_port_pool_get,
1099 .sb_occ_tc_port_bind_get = mlxsw_devlink_sb_occ_tc_port_bind_get,
1100 .info_get = mlxsw_devlink_info_get,
1101 .flash_update = mlxsw_devlink_flash_update,
1102 .trap_init = mlxsw_devlink_trap_init,
1103 .trap_fini = mlxsw_devlink_trap_fini,
1104 .trap_action_set = mlxsw_devlink_trap_action_set,
1105 .trap_group_init = mlxsw_devlink_trap_group_init,
1109 __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
1110 const struct mlxsw_bus *mlxsw_bus,
1111 void *bus_priv, bool reload,
1112 struct devlink *devlink,
1113 struct netlink_ext_ack *extack)
1115 const char *device_kind = mlxsw_bus_info->device_kind;
1116 struct mlxsw_core *mlxsw_core;
1117 struct mlxsw_driver *mlxsw_driver;
1118 struct mlxsw_res *res;
1122 mlxsw_driver = mlxsw_core_driver_get(device_kind);
1127 alloc_size = sizeof(*mlxsw_core) + mlxsw_driver->priv_size;
1128 devlink = devlink_alloc(&mlxsw_devlink_ops, alloc_size);
1131 goto err_devlink_alloc;
1135 mlxsw_core = devlink_priv(devlink);
1136 INIT_LIST_HEAD(&mlxsw_core->rx_listener_list);
1137 INIT_LIST_HEAD(&mlxsw_core->event_listener_list);
1138 mlxsw_core->driver = mlxsw_driver;
1139 mlxsw_core->bus = mlxsw_bus;
1140 mlxsw_core->bus_priv = bus_priv;
1141 mlxsw_core->bus_info = mlxsw_bus_info;
1143 res = mlxsw_driver->res_query_enabled ? &mlxsw_core->res : NULL;
1144 err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile, res);
1148 if (mlxsw_driver->resources_register && !reload) {
1149 err = mlxsw_driver->resources_register(mlxsw_core);
1151 goto err_register_resources;
1154 err = mlxsw_ports_init(mlxsw_core);
1156 goto err_ports_init;
1158 if (MLXSW_CORE_RES_VALID(mlxsw_core, MAX_LAG) &&
1159 MLXSW_CORE_RES_VALID(mlxsw_core, MAX_LAG_MEMBERS)) {
1160 alloc_size = sizeof(u8) *
1161 MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG) *
1162 MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG_MEMBERS);
1163 mlxsw_core->lag.mapping = kzalloc(alloc_size, GFP_KERNEL);
1164 if (!mlxsw_core->lag.mapping) {
1166 goto err_alloc_lag_mapping;
1170 err = mlxsw_emad_init(mlxsw_core);
1175 err = devlink_register(devlink, mlxsw_bus_info->dev);
1177 goto err_devlink_register;
1180 if (mlxsw_driver->params_register && !reload) {
1181 err = mlxsw_driver->params_register(mlxsw_core);
1183 goto err_register_params;
1186 if (mlxsw_driver->init) {
1187 err = mlxsw_driver->init(mlxsw_core, mlxsw_bus_info, extack);
1189 goto err_driver_init;
1192 err = mlxsw_hwmon_init(mlxsw_core, mlxsw_bus_info, &mlxsw_core->hwmon);
1194 goto err_hwmon_init;
1196 err = mlxsw_thermal_init(mlxsw_core, mlxsw_bus_info,
1197 &mlxsw_core->thermal);
1199 goto err_thermal_init;
1201 if (mlxsw_driver->params_register && !reload)
1202 devlink_params_publish(devlink);
1207 mlxsw_hwmon_fini(mlxsw_core->hwmon);
1209 if (mlxsw_core->driver->fini)
1210 mlxsw_core->driver->fini(mlxsw_core);
1212 if (mlxsw_driver->params_unregister && !reload)
1213 mlxsw_driver->params_unregister(mlxsw_core);
1214 err_register_params:
1216 devlink_unregister(devlink);
1217 err_devlink_register:
1218 mlxsw_emad_fini(mlxsw_core);
1220 kfree(mlxsw_core->lag.mapping);
1221 err_alloc_lag_mapping:
1222 mlxsw_ports_fini(mlxsw_core);
1225 devlink_resources_unregister(devlink, NULL);
1226 err_register_resources:
1227 mlxsw_bus->fini(bus_priv);
1230 devlink_free(devlink);
1235 int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
1236 const struct mlxsw_bus *mlxsw_bus,
1237 void *bus_priv, bool reload,
1238 struct devlink *devlink,
1239 struct netlink_ext_ack *extack)
1241 bool called_again = false;
1245 err = __mlxsw_core_bus_device_register(mlxsw_bus_info, mlxsw_bus,
1248 /* -EAGAIN is returned in case the FW was updated. FW needs
1249 * a reset, so lets try to call __mlxsw_core_bus_device_register()
1252 if (err == -EAGAIN && !called_again) {
1253 called_again = true;
1259 EXPORT_SYMBOL(mlxsw_core_bus_device_register);
1261 void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
1264 struct devlink *devlink = priv_to_devlink(mlxsw_core);
1266 if (devlink_is_reload_failed(devlink)) {
1268 /* Only the parts that were not de-initialized in the
1269 * failed reload attempt need to be de-initialized.
1271 goto reload_fail_deinit;
1276 if (mlxsw_core->driver->params_unregister && !reload)
1277 devlink_params_unpublish(devlink);
1278 mlxsw_thermal_fini(mlxsw_core->thermal);
1279 mlxsw_hwmon_fini(mlxsw_core->hwmon);
1280 if (mlxsw_core->driver->fini)
1281 mlxsw_core->driver->fini(mlxsw_core);
1282 if (mlxsw_core->driver->params_unregister && !reload)
1283 mlxsw_core->driver->params_unregister(mlxsw_core);
1285 devlink_unregister(devlink);
1286 mlxsw_emad_fini(mlxsw_core);
1287 kfree(mlxsw_core->lag.mapping);
1288 mlxsw_ports_fini(mlxsw_core);
1290 devlink_resources_unregister(devlink, NULL);
1291 mlxsw_core->bus->fini(mlxsw_core->bus_priv);
1296 if (mlxsw_core->driver->params_unregister)
1297 mlxsw_core->driver->params_unregister(mlxsw_core);
1298 devlink_unregister(devlink);
1299 devlink_resources_unregister(devlink, NULL);
1300 devlink_free(devlink);
1302 EXPORT_SYMBOL(mlxsw_core_bus_device_unregister);
1304 bool mlxsw_core_skb_transmit_busy(struct mlxsw_core *mlxsw_core,
1305 const struct mlxsw_tx_info *tx_info)
1307 return mlxsw_core->bus->skb_transmit_busy(mlxsw_core->bus_priv,
1310 EXPORT_SYMBOL(mlxsw_core_skb_transmit_busy);
1312 int mlxsw_core_skb_transmit(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
1313 const struct mlxsw_tx_info *tx_info)
1315 return mlxsw_core->bus->skb_transmit(mlxsw_core->bus_priv, skb,
1318 EXPORT_SYMBOL(mlxsw_core_skb_transmit);
1320 void mlxsw_core_ptp_transmitted(struct mlxsw_core *mlxsw_core,
1321 struct sk_buff *skb, u8 local_port)
1323 if (mlxsw_core->driver->ptp_transmitted)
1324 mlxsw_core->driver->ptp_transmitted(mlxsw_core, skb,
1327 EXPORT_SYMBOL(mlxsw_core_ptp_transmitted);
1329 static bool __is_rx_listener_equal(const struct mlxsw_rx_listener *rxl_a,
1330 const struct mlxsw_rx_listener *rxl_b)
1332 return (rxl_a->func == rxl_b->func &&
1333 rxl_a->local_port == rxl_b->local_port &&
1334 rxl_a->trap_id == rxl_b->trap_id);
1337 static struct mlxsw_rx_listener_item *
1338 __find_rx_listener_item(struct mlxsw_core *mlxsw_core,
1339 const struct mlxsw_rx_listener *rxl,
1342 struct mlxsw_rx_listener_item *rxl_item;
1344 list_for_each_entry(rxl_item, &mlxsw_core->rx_listener_list, list) {
1345 if (__is_rx_listener_equal(&rxl_item->rxl, rxl) &&
1346 rxl_item->priv == priv)
1352 int mlxsw_core_rx_listener_register(struct mlxsw_core *mlxsw_core,
1353 const struct mlxsw_rx_listener *rxl,
1356 struct mlxsw_rx_listener_item *rxl_item;
1358 rxl_item = __find_rx_listener_item(mlxsw_core, rxl, priv);
1361 rxl_item = kmalloc(sizeof(*rxl_item), GFP_KERNEL);
1364 rxl_item->rxl = *rxl;
1365 rxl_item->priv = priv;
1367 list_add_rcu(&rxl_item->list, &mlxsw_core->rx_listener_list);
1370 EXPORT_SYMBOL(mlxsw_core_rx_listener_register);
1372 void mlxsw_core_rx_listener_unregister(struct mlxsw_core *mlxsw_core,
1373 const struct mlxsw_rx_listener *rxl,
1376 struct mlxsw_rx_listener_item *rxl_item;
1378 rxl_item = __find_rx_listener_item(mlxsw_core, rxl, priv);
1381 list_del_rcu(&rxl_item->list);
1385 EXPORT_SYMBOL(mlxsw_core_rx_listener_unregister);
1387 static void mlxsw_core_event_listener_func(struct sk_buff *skb, u8 local_port,
1390 struct mlxsw_event_listener_item *event_listener_item = priv;
1391 struct mlxsw_reg_info reg;
1393 char *op_tlv = mlxsw_emad_op_tlv(skb);
1394 char *reg_tlv = mlxsw_emad_reg_tlv(skb);
1396 reg.id = mlxsw_emad_op_tlv_register_id_get(op_tlv);
1397 reg.len = (mlxsw_emad_reg_tlv_len_get(reg_tlv) - 1) * sizeof(u32);
1398 payload = mlxsw_emad_reg_payload(op_tlv);
1399 event_listener_item->el.func(®, payload, event_listener_item->priv);
1403 static bool __is_event_listener_equal(const struct mlxsw_event_listener *el_a,
1404 const struct mlxsw_event_listener *el_b)
1406 return (el_a->func == el_b->func &&
1407 el_a->trap_id == el_b->trap_id);
1410 static struct mlxsw_event_listener_item *
1411 __find_event_listener_item(struct mlxsw_core *mlxsw_core,
1412 const struct mlxsw_event_listener *el,
1415 struct mlxsw_event_listener_item *el_item;
1417 list_for_each_entry(el_item, &mlxsw_core->event_listener_list, list) {
1418 if (__is_event_listener_equal(&el_item->el, el) &&
1419 el_item->priv == priv)
1425 int mlxsw_core_event_listener_register(struct mlxsw_core *mlxsw_core,
1426 const struct mlxsw_event_listener *el,
1430 struct mlxsw_event_listener_item *el_item;
1431 const struct mlxsw_rx_listener rxl = {
1432 .func = mlxsw_core_event_listener_func,
1433 .local_port = MLXSW_PORT_DONT_CARE,
1434 .trap_id = el->trap_id,
1437 el_item = __find_event_listener_item(mlxsw_core, el, priv);
1440 el_item = kmalloc(sizeof(*el_item), GFP_KERNEL);
1444 el_item->priv = priv;
1446 err = mlxsw_core_rx_listener_register(mlxsw_core, &rxl, el_item);
1448 goto err_rx_listener_register;
1450 /* No reason to save item if we did not manage to register an RX
1453 list_add_rcu(&el_item->list, &mlxsw_core->event_listener_list);
1457 err_rx_listener_register:
1461 EXPORT_SYMBOL(mlxsw_core_event_listener_register);
1463 void mlxsw_core_event_listener_unregister(struct mlxsw_core *mlxsw_core,
1464 const struct mlxsw_event_listener *el,
1467 struct mlxsw_event_listener_item *el_item;
1468 const struct mlxsw_rx_listener rxl = {
1469 .func = mlxsw_core_event_listener_func,
1470 .local_port = MLXSW_PORT_DONT_CARE,
1471 .trap_id = el->trap_id,
1474 el_item = __find_event_listener_item(mlxsw_core, el, priv);
1477 mlxsw_core_rx_listener_unregister(mlxsw_core, &rxl, el_item);
1478 list_del(&el_item->list);
1481 EXPORT_SYMBOL(mlxsw_core_event_listener_unregister);
1483 static int mlxsw_core_listener_register(struct mlxsw_core *mlxsw_core,
1484 const struct mlxsw_listener *listener,
1487 if (listener->is_event)
1488 return mlxsw_core_event_listener_register(mlxsw_core,
1489 &listener->u.event_listener,
1492 return mlxsw_core_rx_listener_register(mlxsw_core,
1493 &listener->u.rx_listener,
1497 static void mlxsw_core_listener_unregister(struct mlxsw_core *mlxsw_core,
1498 const struct mlxsw_listener *listener,
1501 if (listener->is_event)
1502 mlxsw_core_event_listener_unregister(mlxsw_core,
1503 &listener->u.event_listener,
1506 mlxsw_core_rx_listener_unregister(mlxsw_core,
1507 &listener->u.rx_listener,
1511 int mlxsw_core_trap_register(struct mlxsw_core *mlxsw_core,
1512 const struct mlxsw_listener *listener, void *priv)
1514 char hpkt_pl[MLXSW_REG_HPKT_LEN];
1517 err = mlxsw_core_listener_register(mlxsw_core, listener, priv);
1521 mlxsw_reg_hpkt_pack(hpkt_pl, listener->action, listener->trap_id,
1522 listener->trap_group, listener->is_ctrl);
1523 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
1530 mlxsw_core_listener_unregister(mlxsw_core, listener, priv);
1533 EXPORT_SYMBOL(mlxsw_core_trap_register);
1535 void mlxsw_core_trap_unregister(struct mlxsw_core *mlxsw_core,
1536 const struct mlxsw_listener *listener,
1539 char hpkt_pl[MLXSW_REG_HPKT_LEN];
1541 if (!listener->is_event) {
1542 mlxsw_reg_hpkt_pack(hpkt_pl, listener->unreg_action,
1543 listener->trap_id, listener->trap_group,
1545 mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
1548 mlxsw_core_listener_unregister(mlxsw_core, listener, priv);
1550 EXPORT_SYMBOL(mlxsw_core_trap_unregister);
1552 int mlxsw_core_trap_action_set(struct mlxsw_core *mlxsw_core,
1553 const struct mlxsw_listener *listener,
1554 enum mlxsw_reg_hpkt_action action)
1556 char hpkt_pl[MLXSW_REG_HPKT_LEN];
1558 mlxsw_reg_hpkt_pack(hpkt_pl, action, listener->trap_id,
1559 listener->trap_group, listener->is_ctrl);
1560 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
1562 EXPORT_SYMBOL(mlxsw_core_trap_action_set);
1564 static u64 mlxsw_core_tid_get(struct mlxsw_core *mlxsw_core)
1566 return atomic64_inc_return(&mlxsw_core->emad.tid);
1569 static int mlxsw_core_reg_access_emad(struct mlxsw_core *mlxsw_core,
1570 const struct mlxsw_reg_info *reg,
1572 enum mlxsw_core_reg_access_type type,
1573 struct list_head *bulk_list,
1574 mlxsw_reg_trans_cb_t *cb,
1575 unsigned long cb_priv)
1577 u64 tid = mlxsw_core_tid_get(mlxsw_core);
1578 struct mlxsw_reg_trans *trans;
1581 trans = kzalloc(sizeof(*trans), GFP_KERNEL);
1585 err = mlxsw_emad_reg_access(mlxsw_core, reg, payload, type, trans,
1586 bulk_list, cb, cb_priv, tid);
1594 int mlxsw_reg_trans_query(struct mlxsw_core *mlxsw_core,
1595 const struct mlxsw_reg_info *reg, char *payload,
1596 struct list_head *bulk_list,
1597 mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv)
1599 return mlxsw_core_reg_access_emad(mlxsw_core, reg, payload,
1600 MLXSW_CORE_REG_ACCESS_TYPE_QUERY,
1601 bulk_list, cb, cb_priv);
1603 EXPORT_SYMBOL(mlxsw_reg_trans_query);
1605 int mlxsw_reg_trans_write(struct mlxsw_core *mlxsw_core,
1606 const struct mlxsw_reg_info *reg, char *payload,
1607 struct list_head *bulk_list,
1608 mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv)
1610 return mlxsw_core_reg_access_emad(mlxsw_core, reg, payload,
1611 MLXSW_CORE_REG_ACCESS_TYPE_WRITE,
1612 bulk_list, cb, cb_priv);
1614 EXPORT_SYMBOL(mlxsw_reg_trans_write);
1616 static int mlxsw_reg_trans_wait(struct mlxsw_reg_trans *trans)
1618 struct mlxsw_core *mlxsw_core = trans->core;
1621 wait_for_completion(&trans->completion);
1622 cancel_delayed_work_sync(&trans->timeout_dw);
1626 dev_warn(mlxsw_core->bus_info->dev, "EMAD retries (%d/%d) (tid=%llx)\n",
1627 trans->retries, MLXSW_EMAD_MAX_RETRY, trans->tid);
1629 dev_err(mlxsw_core->bus_info->dev, "EMAD reg access failed (tid=%llx,reg_id=%x(%s),type=%s,status=%x(%s))\n",
1630 trans->tid, trans->reg->id,
1631 mlxsw_reg_id_str(trans->reg->id),
1632 mlxsw_core_reg_access_type_str(trans->type),
1634 mlxsw_emad_op_tlv_status_str(trans->emad_status));
1635 trace_devlink_hwerr(priv_to_devlink(mlxsw_core),
1637 mlxsw_emad_op_tlv_status_str(trans->emad_status));
1640 list_del(&trans->bulk_list);
1641 kfree_rcu(trans, rcu);
1645 int mlxsw_reg_trans_bulk_wait(struct list_head *bulk_list)
1647 struct mlxsw_reg_trans *trans;
1648 struct mlxsw_reg_trans *tmp;
1652 list_for_each_entry_safe(trans, tmp, bulk_list, bulk_list) {
1653 err = mlxsw_reg_trans_wait(trans);
1654 if (err && sum_err == 0)
1655 sum_err = err; /* first error to be returned */
1659 EXPORT_SYMBOL(mlxsw_reg_trans_bulk_wait);
1661 static int mlxsw_core_reg_access_cmd(struct mlxsw_core *mlxsw_core,
1662 const struct mlxsw_reg_info *reg,
1664 enum mlxsw_core_reg_access_type type)
1666 enum mlxsw_emad_op_tlv_status status;
1669 char *in_mbox, *out_mbox, *tmp;
1671 dev_dbg(mlxsw_core->bus_info->dev, "Reg cmd access (reg_id=%x(%s),type=%s)\n",
1672 reg->id, mlxsw_reg_id_str(reg->id),
1673 mlxsw_core_reg_access_type_str(type));
1675 in_mbox = mlxsw_cmd_mbox_alloc();
1679 out_mbox = mlxsw_cmd_mbox_alloc();
1685 mlxsw_emad_pack_op_tlv(in_mbox, reg, type,
1686 mlxsw_core_tid_get(mlxsw_core));
1687 tmp = in_mbox + MLXSW_EMAD_OP_TLV_LEN * sizeof(u32);
1688 mlxsw_emad_pack_reg_tlv(tmp, reg, payload);
1690 /* There is a special treatment needed for MRSR (reset) register.
1691 * The command interface will return error after the command
1692 * is executed, so tell the lower layer to expect it
1693 * and cope accordingly.
1695 reset_ok = reg->id == MLXSW_REG_MRSR_ID;
1699 err = mlxsw_cmd_access_reg(mlxsw_core, reset_ok, in_mbox, out_mbox);
1701 err = mlxsw_emad_process_status(out_mbox, &status);
1703 if (err == -EAGAIN && n_retry++ < MLXSW_EMAD_MAX_RETRY)
1705 dev_err(mlxsw_core->bus_info->dev, "Reg cmd access status failed (status=%x(%s))\n",
1706 status, mlxsw_emad_op_tlv_status_str(status));
1711 memcpy(payload, mlxsw_emad_reg_payload(out_mbox),
1714 mlxsw_cmd_mbox_free(out_mbox);
1716 mlxsw_cmd_mbox_free(in_mbox);
1718 dev_err(mlxsw_core->bus_info->dev, "Reg cmd access failed (reg_id=%x(%s),type=%s)\n",
1719 reg->id, mlxsw_reg_id_str(reg->id),
1720 mlxsw_core_reg_access_type_str(type));
1724 static void mlxsw_core_reg_access_cb(struct mlxsw_core *mlxsw_core,
1725 char *payload, size_t payload_len,
1726 unsigned long cb_priv)
1728 char *orig_payload = (char *) cb_priv;
1730 memcpy(orig_payload, payload, payload_len);
1733 static int mlxsw_core_reg_access(struct mlxsw_core *mlxsw_core,
1734 const struct mlxsw_reg_info *reg,
1736 enum mlxsw_core_reg_access_type type)
1738 LIST_HEAD(bulk_list);
1741 /* During initialization EMAD interface is not available to us,
1742 * so we default to command interface. We switch to EMAD interface
1743 * after setting the appropriate traps.
1745 if (!mlxsw_core->emad.use_emad)
1746 return mlxsw_core_reg_access_cmd(mlxsw_core, reg,
1749 err = mlxsw_core_reg_access_emad(mlxsw_core, reg,
1750 payload, type, &bulk_list,
1751 mlxsw_core_reg_access_cb,
1752 (unsigned long) payload);
1755 return mlxsw_reg_trans_bulk_wait(&bulk_list);
1758 int mlxsw_reg_query(struct mlxsw_core *mlxsw_core,
1759 const struct mlxsw_reg_info *reg, char *payload)
1761 return mlxsw_core_reg_access(mlxsw_core, reg, payload,
1762 MLXSW_CORE_REG_ACCESS_TYPE_QUERY);
1764 EXPORT_SYMBOL(mlxsw_reg_query);
1766 int mlxsw_reg_write(struct mlxsw_core *mlxsw_core,
1767 const struct mlxsw_reg_info *reg, char *payload)
1769 return mlxsw_core_reg_access(mlxsw_core, reg, payload,
1770 MLXSW_CORE_REG_ACCESS_TYPE_WRITE);
1772 EXPORT_SYMBOL(mlxsw_reg_write);
1774 void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
1775 struct mlxsw_rx_info *rx_info)
1777 struct mlxsw_rx_listener_item *rxl_item;
1778 const struct mlxsw_rx_listener *rxl;
1782 if (rx_info->is_lag) {
1783 dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: lag_id = %d, lag_port_index = 0x%x\n",
1784 __func__, rx_info->u.lag_id,
1786 /* Upper layer does not care if the skb came from LAG or not,
1787 * so just get the local_port for the lag port and push it up.
1789 local_port = mlxsw_core_lag_mapping_get(mlxsw_core,
1791 rx_info->lag_port_index);
1793 local_port = rx_info->u.sys_port;
1796 dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: local_port = %d, trap_id = 0x%x\n",
1797 __func__, local_port, rx_info->trap_id);
1799 if ((rx_info->trap_id >= MLXSW_TRAP_ID_MAX) ||
1800 (local_port >= mlxsw_core->max_ports))
1804 list_for_each_entry_rcu(rxl_item, &mlxsw_core->rx_listener_list, list) {
1805 rxl = &rxl_item->rxl;
1806 if ((rxl->local_port == MLXSW_PORT_DONT_CARE ||
1807 rxl->local_port == local_port) &&
1808 rxl->trap_id == rx_info->trap_id) {
1817 rxl->func(skb, local_port, rxl_item->priv);
1823 EXPORT_SYMBOL(mlxsw_core_skb_receive);
1825 static int mlxsw_core_lag_mapping_index(struct mlxsw_core *mlxsw_core,
1826 u16 lag_id, u8 port_index)
1828 return MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG_MEMBERS) * lag_id +
1832 void mlxsw_core_lag_mapping_set(struct mlxsw_core *mlxsw_core,
1833 u16 lag_id, u8 port_index, u8 local_port)
1835 int index = mlxsw_core_lag_mapping_index(mlxsw_core,
1836 lag_id, port_index);
1838 mlxsw_core->lag.mapping[index] = local_port;
1840 EXPORT_SYMBOL(mlxsw_core_lag_mapping_set);
1842 u8 mlxsw_core_lag_mapping_get(struct mlxsw_core *mlxsw_core,
1843 u16 lag_id, u8 port_index)
1845 int index = mlxsw_core_lag_mapping_index(mlxsw_core,
1846 lag_id, port_index);
1848 return mlxsw_core->lag.mapping[index];
1850 EXPORT_SYMBOL(mlxsw_core_lag_mapping_get);
1852 void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core,
1853 u16 lag_id, u8 local_port)
1857 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG_MEMBERS); i++) {
1858 int index = mlxsw_core_lag_mapping_index(mlxsw_core,
1861 if (mlxsw_core->lag.mapping[index] == local_port)
1862 mlxsw_core->lag.mapping[index] = 0;
1865 EXPORT_SYMBOL(mlxsw_core_lag_mapping_clear);
1867 bool mlxsw_core_res_valid(struct mlxsw_core *mlxsw_core,
1868 enum mlxsw_res_id res_id)
1870 return mlxsw_res_valid(&mlxsw_core->res, res_id);
1872 EXPORT_SYMBOL(mlxsw_core_res_valid);
1874 u64 mlxsw_core_res_get(struct mlxsw_core *mlxsw_core,
1875 enum mlxsw_res_id res_id)
1877 return mlxsw_res_get(&mlxsw_core->res, res_id);
1879 EXPORT_SYMBOL(mlxsw_core_res_get);
1881 static int __mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port,
1882 enum devlink_port_flavour flavour,
1883 u32 port_number, bool split,
1884 u32 split_port_subnumber,
1885 const unsigned char *switch_id,
1886 unsigned char switch_id_len)
1888 struct devlink *devlink = priv_to_devlink(mlxsw_core);
1889 struct mlxsw_core_port *mlxsw_core_port =
1890 &mlxsw_core->ports[local_port];
1891 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
1894 mlxsw_core_port->local_port = local_port;
1895 devlink_port_attrs_set(devlink_port, flavour, port_number,
1896 split, split_port_subnumber,
1897 switch_id, switch_id_len);
1898 err = devlink_port_register(devlink, devlink_port, local_port);
1900 memset(mlxsw_core_port, 0, sizeof(*mlxsw_core_port));
1904 static void __mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u8 local_port)
1906 struct mlxsw_core_port *mlxsw_core_port =
1907 &mlxsw_core->ports[local_port];
1908 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
1910 devlink_port_unregister(devlink_port);
1911 memset(mlxsw_core_port, 0, sizeof(*mlxsw_core_port));
1914 int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port,
1915 u32 port_number, bool split,
1916 u32 split_port_subnumber,
1917 const unsigned char *switch_id,
1918 unsigned char switch_id_len)
1920 return __mlxsw_core_port_init(mlxsw_core, local_port,
1921 DEVLINK_PORT_FLAVOUR_PHYSICAL,
1922 port_number, split, split_port_subnumber,
1923 switch_id, switch_id_len);
1925 EXPORT_SYMBOL(mlxsw_core_port_init);
1927 void mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u8 local_port)
1929 __mlxsw_core_port_fini(mlxsw_core, local_port);
1931 EXPORT_SYMBOL(mlxsw_core_port_fini);
1933 int mlxsw_core_cpu_port_init(struct mlxsw_core *mlxsw_core,
1934 void *port_driver_priv,
1935 const unsigned char *switch_id,
1936 unsigned char switch_id_len)
1938 struct mlxsw_core_port *mlxsw_core_port =
1939 &mlxsw_core->ports[MLXSW_PORT_CPU_PORT];
1942 err = __mlxsw_core_port_init(mlxsw_core, MLXSW_PORT_CPU_PORT,
1943 DEVLINK_PORT_FLAVOUR_CPU,
1945 switch_id, switch_id_len);
1949 mlxsw_core_port->port_driver_priv = port_driver_priv;
1952 EXPORT_SYMBOL(mlxsw_core_cpu_port_init);
1954 void mlxsw_core_cpu_port_fini(struct mlxsw_core *mlxsw_core)
1956 __mlxsw_core_port_fini(mlxsw_core, MLXSW_PORT_CPU_PORT);
1958 EXPORT_SYMBOL(mlxsw_core_cpu_port_fini);
1960 void mlxsw_core_port_eth_set(struct mlxsw_core *mlxsw_core, u8 local_port,
1961 void *port_driver_priv, struct net_device *dev)
1963 struct mlxsw_core_port *mlxsw_core_port =
1964 &mlxsw_core->ports[local_port];
1965 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
1967 mlxsw_core_port->port_driver_priv = port_driver_priv;
1968 devlink_port_type_eth_set(devlink_port, dev);
1970 EXPORT_SYMBOL(mlxsw_core_port_eth_set);
1972 void mlxsw_core_port_ib_set(struct mlxsw_core *mlxsw_core, u8 local_port,
1973 void *port_driver_priv)
1975 struct mlxsw_core_port *mlxsw_core_port =
1976 &mlxsw_core->ports[local_port];
1977 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
1979 mlxsw_core_port->port_driver_priv = port_driver_priv;
1980 devlink_port_type_ib_set(devlink_port, NULL);
1982 EXPORT_SYMBOL(mlxsw_core_port_ib_set);
1984 void mlxsw_core_port_clear(struct mlxsw_core *mlxsw_core, u8 local_port,
1985 void *port_driver_priv)
1987 struct mlxsw_core_port *mlxsw_core_port =
1988 &mlxsw_core->ports[local_port];
1989 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
1991 mlxsw_core_port->port_driver_priv = port_driver_priv;
1992 devlink_port_type_clear(devlink_port);
1994 EXPORT_SYMBOL(mlxsw_core_port_clear);
1996 enum devlink_port_type mlxsw_core_port_type_get(struct mlxsw_core *mlxsw_core,
1999 struct mlxsw_core_port *mlxsw_core_port =
2000 &mlxsw_core->ports[local_port];
2001 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
2003 return devlink_port->type;
2005 EXPORT_SYMBOL(mlxsw_core_port_type_get);
2008 struct devlink_port *
2009 mlxsw_core_port_devlink_port_get(struct mlxsw_core *mlxsw_core,
2012 struct mlxsw_core_port *mlxsw_core_port =
2013 &mlxsw_core->ports[local_port];
2014 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
2016 return devlink_port;
2018 EXPORT_SYMBOL(mlxsw_core_port_devlink_port_get);
2020 static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core,
2021 const char *buf, size_t size)
2023 __be32 *m = (__be32 *) buf;
2025 int count = size / sizeof(__be32);
2027 for (i = count - 1; i >= 0; i--)
2032 for (i = 0; i < count; i += 4)
2033 dev_dbg(mlxsw_core->bus_info->dev, "%04x - %08x %08x %08x %08x\n",
2034 i * 4, be32_to_cpu(m[i]), be32_to_cpu(m[i + 1]),
2035 be32_to_cpu(m[i + 2]), be32_to_cpu(m[i + 3]));
2038 int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod,
2039 u32 in_mod, bool out_mbox_direct, bool reset_ok,
2040 char *in_mbox, size_t in_mbox_size,
2041 char *out_mbox, size_t out_mbox_size)
2046 BUG_ON(in_mbox_size % sizeof(u32) || out_mbox_size % sizeof(u32));
2047 if (!mlxsw_core->bus->cmd_exec)
2050 dev_dbg(mlxsw_core->bus_info->dev, "Cmd exec (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n",
2051 opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod, in_mod);
2053 dev_dbg(mlxsw_core->bus_info->dev, "Input mailbox:\n");
2054 mlxsw_core_buf_dump_dbg(mlxsw_core, in_mbox, in_mbox_size);
2057 err = mlxsw_core->bus->cmd_exec(mlxsw_core->bus_priv, opcode,
2058 opcode_mod, in_mod, out_mbox_direct,
2059 in_mbox, in_mbox_size,
2060 out_mbox, out_mbox_size, &status);
2062 if (!err && out_mbox) {
2063 dev_dbg(mlxsw_core->bus_info->dev, "Output mailbox:\n");
2064 mlxsw_core_buf_dump_dbg(mlxsw_core, out_mbox, out_mbox_size);
2067 if (reset_ok && err == -EIO &&
2068 status == MLXSW_CMD_STATUS_RUNNING_RESET) {
2070 } else if (err == -EIO && status != MLXSW_CMD_STATUS_OK) {
2071 dev_err(mlxsw_core->bus_info->dev, "Cmd exec failed (opcode=%x(%s),opcode_mod=%x,in_mod=%x,status=%x(%s))\n",
2072 opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod,
2073 in_mod, status, mlxsw_cmd_status_str(status));
2074 } else if (err == -ETIMEDOUT) {
2075 dev_err(mlxsw_core->bus_info->dev, "Cmd exec timed-out (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n",
2076 opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod,
2082 EXPORT_SYMBOL(mlxsw_cmd_exec);
2084 int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay)
2086 return queue_delayed_work(mlxsw_wq, dwork, delay);
2088 EXPORT_SYMBOL(mlxsw_core_schedule_dw);
2090 bool mlxsw_core_schedule_work(struct work_struct *work)
2092 return queue_work(mlxsw_owq, work);
2094 EXPORT_SYMBOL(mlxsw_core_schedule_work);
2096 void mlxsw_core_flush_owq(void)
2098 flush_workqueue(mlxsw_owq);
2100 EXPORT_SYMBOL(mlxsw_core_flush_owq);
2102 int mlxsw_core_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
2103 const struct mlxsw_config_profile *profile,
2104 u64 *p_single_size, u64 *p_double_size,
2107 struct mlxsw_driver *driver = mlxsw_core->driver;
2109 if (!driver->kvd_sizes_get)
2112 return driver->kvd_sizes_get(mlxsw_core, profile,
2113 p_single_size, p_double_size,
2116 EXPORT_SYMBOL(mlxsw_core_kvd_sizes_get);
2118 void mlxsw_core_fw_flash_start(struct mlxsw_core *mlxsw_core)
2120 mlxsw_core->fw_flash_in_progress = true;
2122 EXPORT_SYMBOL(mlxsw_core_fw_flash_start);
2124 void mlxsw_core_fw_flash_end(struct mlxsw_core *mlxsw_core)
2126 mlxsw_core->fw_flash_in_progress = false;
2128 EXPORT_SYMBOL(mlxsw_core_fw_flash_end);
2130 int mlxsw_core_resources_query(struct mlxsw_core *mlxsw_core, char *mbox,
2131 struct mlxsw_res *res)
2141 mlxsw_cmd_mbox_zero(mbox);
2143 for (index = 0; index < MLXSW_CMD_QUERY_RESOURCES_MAX_QUERIES;
2145 err = mlxsw_cmd_query_resources(mlxsw_core, mbox, index);
2149 for (i = 0; i < MLXSW_CMD_QUERY_RESOURCES_PER_QUERY; i++) {
2150 id = mlxsw_cmd_mbox_query_resource_id_get(mbox, i);
2151 data = mlxsw_cmd_mbox_query_resource_data_get(mbox, i);
2153 if (id == MLXSW_CMD_QUERY_RESOURCES_TABLE_END_ID)
2156 mlxsw_res_parse(res, id, data);
2160 /* If after MLXSW_RESOURCES_QUERY_MAX_QUERIES we still didn't get
2161 * MLXSW_RESOURCES_TABLE_END_ID, something went bad in the FW.
2165 EXPORT_SYMBOL(mlxsw_core_resources_query);
2167 u32 mlxsw_core_read_frc_h(struct mlxsw_core *mlxsw_core)
2169 return mlxsw_core->bus->read_frc_h(mlxsw_core->bus_priv);
2171 EXPORT_SYMBOL(mlxsw_core_read_frc_h);
2173 u32 mlxsw_core_read_frc_l(struct mlxsw_core *mlxsw_core)
2175 return mlxsw_core->bus->read_frc_l(mlxsw_core->bus_priv);
2177 EXPORT_SYMBOL(mlxsw_core_read_frc_l);
2179 static int __init mlxsw_core_module_init(void)
2183 mlxsw_wq = alloc_workqueue(mlxsw_core_driver_name, 0, 0);
2186 mlxsw_owq = alloc_ordered_workqueue("%s_ordered", 0,
2187 mlxsw_core_driver_name);
2190 goto err_alloc_ordered_workqueue;
2194 err_alloc_ordered_workqueue:
2195 destroy_workqueue(mlxsw_wq);
2199 static void __exit mlxsw_core_module_exit(void)
2201 destroy_workqueue(mlxsw_owq);
2202 destroy_workqueue(mlxsw_wq);
2205 module_init(mlxsw_core_module_init);
2206 module_exit(mlxsw_core_module_exit);
2208 MODULE_LICENSE("Dual BSD/GPL");
2209 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
2210 MODULE_DESCRIPTION("Mellanox switch device core driver");