1 /* Broadcom NetXtreme-C/E network driver.
3 * Copyright (c) 2017 Broadcom Limited
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
10 #include <linux/pci.h>
11 #include <linux/netdevice.h>
12 #include <net/devlink.h>
16 #include "bnxt_devlink.h"
17 #include "bnxt_ethtool.h"
20 bnxt_dl_flash_update(struct devlink *dl, const char *filename,
21 const char *region, struct netlink_ext_ack *extack)
23 struct bnxt *bp = bnxt_get_bp_from_dl(dl);
30 NL_SET_ERR_MSG_MOD(extack,
31 "flash update not supported from a VF");
35 devlink_flash_update_begin_notify(dl);
36 devlink_flash_update_status_notify(dl, "Preparing to flash", region, 0,
38 rc = bnxt_flash_package_from_file(bp->dev, filename, 0);
40 devlink_flash_update_status_notify(dl, "Flashing done", region,
43 devlink_flash_update_status_notify(dl, "Flashing failed",
45 devlink_flash_update_end_notify(dl);
49 static int bnxt_fw_reporter_diagnose(struct devlink_health_reporter *reporter,
50 struct devlink_fmsg *fmsg,
51 struct netlink_ext_ack *extack)
53 struct bnxt *bp = devlink_health_reporter_priv(reporter);
54 u32 val, health_status;
57 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
60 val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
61 health_status = val & 0xffff;
63 if (health_status < BNXT_FW_STATUS_HEALTHY) {
64 rc = devlink_fmsg_string_pair_put(fmsg, "Description",
65 "Not yet completed initialization");
68 } else if (health_status > BNXT_FW_STATUS_HEALTHY) {
69 rc = devlink_fmsg_string_pair_put(fmsg, "Description",
70 "Encountered fatal error and cannot recover");
76 rc = devlink_fmsg_u32_pair_put(fmsg, "Error code", val >> 16);
81 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
82 rc = devlink_fmsg_u32_pair_put(fmsg, "Reset count", val);
89 static const struct devlink_health_reporter_ops bnxt_dl_fw_reporter_ops = {
91 .diagnose = bnxt_fw_reporter_diagnose,
94 static int bnxt_fw_reset_recover(struct devlink_health_reporter *reporter,
96 struct netlink_ext_ack *extack)
98 struct bnxt *bp = devlink_health_reporter_priv(reporter);
108 struct devlink_health_reporter_ops bnxt_dl_fw_reset_reporter_ops = {
110 .recover = bnxt_fw_reset_recover,
113 static int bnxt_fw_fatal_recover(struct devlink_health_reporter *reporter,
115 struct netlink_ext_ack *extack)
117 struct bnxt *bp = devlink_health_reporter_priv(reporter);
118 struct bnxt_fw_reporter_ctx *fw_reporter_ctx = priv_ctx;
124 bp->fw_health->fatal = true;
125 event = fw_reporter_ctx->sp_event;
126 if (event == BNXT_FW_RESET_NOTIFY_SP_EVENT)
128 else if (event == BNXT_FW_EXCEPTION_SP_EVENT)
129 bnxt_fw_exception(bp);
135 struct devlink_health_reporter_ops bnxt_dl_fw_fatal_reporter_ops = {
137 .recover = bnxt_fw_fatal_recover,
140 void bnxt_dl_fw_reporters_create(struct bnxt *bp)
142 struct bnxt_fw_health *health = bp->fw_health;
144 if (!bp->dl || !health)
147 if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) || health->fw_reset_reporter)
150 health->fw_reset_reporter =
151 devlink_health_reporter_create(bp->dl,
152 &bnxt_dl_fw_reset_reporter_ops,
154 if (IS_ERR(health->fw_reset_reporter)) {
155 netdev_warn(bp->dev, "Failed to create FW fatal health reporter, rc = %ld\n",
156 PTR_ERR(health->fw_reset_reporter));
157 health->fw_reset_reporter = NULL;
158 bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET;
162 if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
165 if (!health->fw_reporter) {
166 health->fw_reporter =
167 devlink_health_reporter_create(bp->dl,
168 &bnxt_dl_fw_reporter_ops,
170 if (IS_ERR(health->fw_reporter)) {
171 netdev_warn(bp->dev, "Failed to create FW health reporter, rc = %ld\n",
172 PTR_ERR(health->fw_reporter));
173 health->fw_reporter = NULL;
174 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
179 if (health->fw_fatal_reporter)
182 health->fw_fatal_reporter =
183 devlink_health_reporter_create(bp->dl,
184 &bnxt_dl_fw_fatal_reporter_ops,
186 if (IS_ERR(health->fw_fatal_reporter)) {
187 netdev_warn(bp->dev, "Failed to create FW fatal health reporter, rc = %ld\n",
188 PTR_ERR(health->fw_fatal_reporter));
189 health->fw_fatal_reporter = NULL;
190 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
194 void bnxt_dl_fw_reporters_destroy(struct bnxt *bp, bool all)
196 struct bnxt_fw_health *health = bp->fw_health;
198 if (!bp->dl || !health)
201 if ((all || !(bp->fw_cap & BNXT_FW_CAP_HOT_RESET)) &&
202 health->fw_reset_reporter) {
203 devlink_health_reporter_destroy(health->fw_reset_reporter);
204 health->fw_reset_reporter = NULL;
207 if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) && !all)
210 if (health->fw_reporter) {
211 devlink_health_reporter_destroy(health->fw_reporter);
212 health->fw_reporter = NULL;
215 if (health->fw_fatal_reporter) {
216 devlink_health_reporter_destroy(health->fw_fatal_reporter);
217 health->fw_fatal_reporter = NULL;
221 void bnxt_devlink_health_report(struct bnxt *bp, unsigned long event)
223 struct bnxt_fw_health *fw_health = bp->fw_health;
224 struct bnxt_fw_reporter_ctx fw_reporter_ctx;
226 fw_reporter_ctx.sp_event = event;
228 case BNXT_FW_RESET_NOTIFY_SP_EVENT:
229 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
230 if (!fw_health->fw_fatal_reporter)
233 devlink_health_report(fw_health->fw_fatal_reporter,
234 "FW fatal async event received",
238 if (!fw_health->fw_reset_reporter)
241 devlink_health_report(fw_health->fw_reset_reporter,
242 "FW non-fatal reset event received",
246 case BNXT_FW_EXCEPTION_SP_EVENT:
247 if (!fw_health->fw_fatal_reporter)
250 devlink_health_report(fw_health->fw_fatal_reporter,
251 "FW fatal error reported",
257 void bnxt_dl_health_status_update(struct bnxt *bp, bool healthy)
259 struct bnxt_fw_health *health = bp->fw_health;
263 state = DEVLINK_HEALTH_REPORTER_STATE_HEALTHY;
265 state = DEVLINK_HEALTH_REPORTER_STATE_ERROR;
268 devlink_health_reporter_state_update(health->fw_fatal_reporter,
271 devlink_health_reporter_state_update(health->fw_reset_reporter,
274 health->fatal = false;
277 void bnxt_dl_health_recovery_done(struct bnxt *bp)
279 struct bnxt_fw_health *hlth = bp->fw_health;
282 devlink_health_reporter_recovery_done(hlth->fw_fatal_reporter);
284 devlink_health_reporter_recovery_done(hlth->fw_reset_reporter);
287 static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
288 struct netlink_ext_ack *extack);
290 static const struct devlink_ops bnxt_dl_ops = {
291 #ifdef CONFIG_BNXT_SRIOV
292 .eswitch_mode_set = bnxt_dl_eswitch_mode_set,
293 .eswitch_mode_get = bnxt_dl_eswitch_mode_get,
294 #endif /* CONFIG_BNXT_SRIOV */
295 .info_get = bnxt_dl_info_get,
296 .flash_update = bnxt_dl_flash_update,
299 static const struct devlink_ops bnxt_vf_dl_ops;
301 enum bnxt_dl_param_id {
302 BNXT_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
303 BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK,
306 static const struct bnxt_dl_nvm_param nvm_params[] = {
307 {DEVLINK_PARAM_GENERIC_ID_ENABLE_SRIOV, NVM_OFF_ENABLE_SRIOV,
308 BNXT_NVM_SHARED_CFG, 1, 1},
309 {DEVLINK_PARAM_GENERIC_ID_IGNORE_ARI, NVM_OFF_IGNORE_ARI,
310 BNXT_NVM_SHARED_CFG, 1, 1},
311 {DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MAX,
312 NVM_OFF_MSIX_VEC_PER_PF_MAX, BNXT_NVM_SHARED_CFG, 10, 4},
313 {DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN,
314 NVM_OFF_MSIX_VEC_PER_PF_MIN, BNXT_NVM_SHARED_CFG, 7, 4},
315 {BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK, NVM_OFF_DIS_GRE_VER_CHECK,
316 BNXT_NVM_SHARED_CFG, 1, 1},
319 union bnxt_nvm_data {
324 static void bnxt_copy_to_nvm_data(union bnxt_nvm_data *dst,
325 union devlink_param_value *src,
326 int nvm_num_bits, int dl_num_bytes)
330 if (nvm_num_bits == 1) {
331 dst->val8 = src->vbool;
334 if (dl_num_bytes == 4)
336 else if (dl_num_bytes == 2)
337 val32 = (u32)src->vu16;
338 else if (dl_num_bytes == 1)
339 val32 = (u32)src->vu8;
340 dst->val32 = cpu_to_le32(val32);
343 static void bnxt_copy_from_nvm_data(union devlink_param_value *dst,
344 union bnxt_nvm_data *src,
345 int nvm_num_bits, int dl_num_bytes)
349 if (nvm_num_bits == 1) {
350 dst->vbool = src->val8;
353 val32 = le32_to_cpu(src->val32);
354 if (dl_num_bytes == 4)
356 else if (dl_num_bytes == 2)
357 dst->vu16 = (u16)val32;
358 else if (dl_num_bytes == 1)
359 dst->vu8 = (u8)val32;
362 static int bnxt_hwrm_get_nvm_cfg_ver(struct bnxt *bp,
363 union devlink_param_value *nvm_cfg_ver)
365 struct hwrm_nvm_get_variable_input req = {0};
366 union bnxt_nvm_data *data;
367 dma_addr_t data_dma_addr;
370 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_VARIABLE, -1, -1);
371 data = dma_alloc_coherent(&bp->pdev->dev, sizeof(*data),
372 &data_dma_addr, GFP_KERNEL);
376 req.dest_data_addr = cpu_to_le64(data_dma_addr);
377 req.data_len = cpu_to_le16(BNXT_NVM_CFG_VER_BITS);
378 req.option_num = cpu_to_le16(NVM_OFF_NVM_CFG_VER);
380 rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
382 bnxt_copy_from_nvm_data(nvm_cfg_ver, data,
383 BNXT_NVM_CFG_VER_BITS,
384 BNXT_NVM_CFG_VER_BYTES);
386 dma_free_coherent(&bp->pdev->dev, sizeof(*data), data, data_dma_addr);
390 static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
391 struct netlink_ext_ack *extack)
393 struct bnxt *bp = bnxt_get_bp_from_dl(dl);
394 union devlink_param_value nvm_cfg_ver;
395 struct hwrm_ver_get_output *ver_resp;
396 char mgmt_ver[FW_VER_STR_LEN];
397 char roce_ver[FW_VER_STR_LEN];
398 char fw_ver[FW_VER_STR_LEN];
402 rc = devlink_info_driver_name_put(req, DRV_MODULE_NAME);
406 sprintf(buf, "%X", bp->chip_num);
407 rc = devlink_info_version_fixed_put(req,
408 DEVLINK_INFO_VERSION_GENERIC_ASIC_ID, buf);
412 ver_resp = &bp->ver_resp;
413 sprintf(buf, "%X", ver_resp->chip_rev);
414 rc = devlink_info_version_fixed_put(req,
415 DEVLINK_INFO_VERSION_GENERIC_ASIC_REV, buf);
420 sprintf(buf, "%02X-%02X-%02X-%02X-%02X-%02X-%02X-%02X",
421 bp->dsn[7], bp->dsn[6], bp->dsn[5], bp->dsn[4],
422 bp->dsn[3], bp->dsn[2], bp->dsn[1], bp->dsn[0]);
423 rc = devlink_info_serial_number_put(req, buf);
428 if (strlen(ver_resp->active_pkg_name)) {
430 devlink_info_version_running_put(req,
431 DEVLINK_INFO_VERSION_GENERIC_FW,
432 ver_resp->active_pkg_name);
437 if (BNXT_PF(bp) && !bnxt_hwrm_get_nvm_cfg_ver(bp, &nvm_cfg_ver)) {
438 u32 ver = nvm_cfg_ver.vu32;
440 sprintf(buf, "%X.%X.%X", (ver >> 16) & 0xF, (ver >> 8) & 0xF,
442 rc = devlink_info_version_running_put(req,
443 DEVLINK_INFO_VERSION_GENERIC_FW_PSID, buf);
448 if (ver_resp->flags & VER_GET_RESP_FLAGS_EXT_VER_AVAIL) {
449 snprintf(fw_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
450 ver_resp->hwrm_fw_major, ver_resp->hwrm_fw_minor,
451 ver_resp->hwrm_fw_build, ver_resp->hwrm_fw_patch);
453 snprintf(mgmt_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
454 ver_resp->mgmt_fw_major, ver_resp->mgmt_fw_minor,
455 ver_resp->mgmt_fw_build, ver_resp->mgmt_fw_patch);
457 snprintf(roce_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
458 ver_resp->roce_fw_major, ver_resp->roce_fw_minor,
459 ver_resp->roce_fw_build, ver_resp->roce_fw_patch);
461 snprintf(fw_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
462 ver_resp->hwrm_fw_maj_8b, ver_resp->hwrm_fw_min_8b,
463 ver_resp->hwrm_fw_bld_8b, ver_resp->hwrm_fw_rsvd_8b);
465 snprintf(mgmt_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
466 ver_resp->mgmt_fw_maj_8b, ver_resp->mgmt_fw_min_8b,
467 ver_resp->mgmt_fw_bld_8b, ver_resp->mgmt_fw_rsvd_8b);
469 snprintf(roce_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
470 ver_resp->roce_fw_maj_8b, ver_resp->roce_fw_min_8b,
471 ver_resp->roce_fw_bld_8b, ver_resp->roce_fw_rsvd_8b);
473 rc = devlink_info_version_running_put(req,
474 DEVLINK_INFO_VERSION_GENERIC_FW_APP, fw_ver);
478 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) {
479 rc = devlink_info_version_running_put(req,
480 DEVLINK_INFO_VERSION_GENERIC_FW_MGMT, mgmt_ver);
484 rc = devlink_info_version_running_put(req,
485 DEVLINK_INFO_VERSION_GENERIC_FW_ROCE, roce_ver);
492 static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
493 int msg_len, union devlink_param_value *val)
495 struct hwrm_nvm_get_variable_input *req = msg;
496 struct bnxt_dl_nvm_param nvm_param;
497 union bnxt_nvm_data *data;
498 dma_addr_t data_dma_addr;
501 /* Get/Set NVM CFG parameter is supported only on PFs */
505 for (i = 0; i < ARRAY_SIZE(nvm_params); i++) {
506 if (nvm_params[i].id == param_id) {
507 nvm_param = nvm_params[i];
512 if (i == ARRAY_SIZE(nvm_params))
515 if (nvm_param.dir_type == BNXT_NVM_PORT_CFG)
516 idx = bp->pf.port_id;
517 else if (nvm_param.dir_type == BNXT_NVM_FUNC_CFG)
518 idx = bp->pf.fw_fid - BNXT_FIRST_PF_FID;
520 data = dma_alloc_coherent(&bp->pdev->dev, sizeof(*data),
521 &data_dma_addr, GFP_KERNEL);
525 req->dest_data_addr = cpu_to_le64(data_dma_addr);
526 req->data_len = cpu_to_le16(nvm_param.nvm_num_bits);
527 req->option_num = cpu_to_le16(nvm_param.offset);
528 req->index_0 = cpu_to_le16(idx);
530 req->dimensions = cpu_to_le16(1);
532 if (req->req_type == cpu_to_le16(HWRM_NVM_SET_VARIABLE)) {
533 bnxt_copy_to_nvm_data(data, val, nvm_param.nvm_num_bits,
534 nvm_param.dl_num_bytes);
535 rc = hwrm_send_message(bp, msg, msg_len, HWRM_CMD_TIMEOUT);
537 rc = hwrm_send_message_silent(bp, msg, msg_len,
540 bnxt_copy_from_nvm_data(val, data,
541 nvm_param.nvm_num_bits,
542 nvm_param.dl_num_bytes);
544 struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
547 NVM_GET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST)
551 dma_free_coherent(&bp->pdev->dev, sizeof(*data), data, data_dma_addr);
553 netdev_err(bp->dev, "PF does not have admin privileges to modify NVM config\n");
557 static int bnxt_dl_nvm_param_get(struct devlink *dl, u32 id,
558 struct devlink_param_gset_ctx *ctx)
560 struct hwrm_nvm_get_variable_input req = {0};
561 struct bnxt *bp = bnxt_get_bp_from_dl(dl);
564 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_VARIABLE, -1, -1);
565 rc = bnxt_hwrm_nvm_req(bp, id, &req, sizeof(req), &ctx->val);
567 if (id == BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK)
568 ctx->val.vbool = !ctx->val.vbool;
573 static int bnxt_dl_nvm_param_set(struct devlink *dl, u32 id,
574 struct devlink_param_gset_ctx *ctx)
576 struct hwrm_nvm_set_variable_input req = {0};
577 struct bnxt *bp = bnxt_get_bp_from_dl(dl);
579 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_SET_VARIABLE, -1, -1);
581 if (id == BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK)
582 ctx->val.vbool = !ctx->val.vbool;
584 return bnxt_hwrm_nvm_req(bp, id, &req, sizeof(req), &ctx->val);
587 static int bnxt_dl_msix_validate(struct devlink *dl, u32 id,
588 union devlink_param_value val,
589 struct netlink_ext_ack *extack)
593 if (id == DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MAX)
594 max_val = BNXT_MSIX_VEC_MAX;
596 if (id == DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN)
597 max_val = BNXT_MSIX_VEC_MIN_MAX;
599 if (val.vu32 > max_val) {
600 NL_SET_ERR_MSG_MOD(extack, "MSIX value is exceeding the range");
607 static const struct devlink_param bnxt_dl_params[] = {
608 DEVLINK_PARAM_GENERIC(ENABLE_SRIOV,
609 BIT(DEVLINK_PARAM_CMODE_PERMANENT),
610 bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set,
612 DEVLINK_PARAM_GENERIC(IGNORE_ARI,
613 BIT(DEVLINK_PARAM_CMODE_PERMANENT),
614 bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set,
616 DEVLINK_PARAM_GENERIC(MSIX_VEC_PER_PF_MAX,
617 BIT(DEVLINK_PARAM_CMODE_PERMANENT),
618 bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set,
619 bnxt_dl_msix_validate),
620 DEVLINK_PARAM_GENERIC(MSIX_VEC_PER_PF_MIN,
621 BIT(DEVLINK_PARAM_CMODE_PERMANENT),
622 bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set,
623 bnxt_dl_msix_validate),
624 DEVLINK_PARAM_DRIVER(BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK,
625 "gre_ver_check", DEVLINK_PARAM_TYPE_BOOL,
626 BIT(DEVLINK_PARAM_CMODE_PERMANENT),
627 bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set,
631 static const struct devlink_param bnxt_dl_port_params[] = {
634 static int bnxt_dl_params_register(struct bnxt *bp)
638 if (bp->hwrm_spec_code < 0x10600)
641 rc = devlink_params_register(bp->dl, bnxt_dl_params,
642 ARRAY_SIZE(bnxt_dl_params));
644 netdev_warn(bp->dev, "devlink_params_register failed. rc=%d",
648 rc = devlink_port_params_register(&bp->dl_port, bnxt_dl_port_params,
649 ARRAY_SIZE(bnxt_dl_port_params));
651 netdev_err(bp->dev, "devlink_port_params_register failed");
652 devlink_params_unregister(bp->dl, bnxt_dl_params,
653 ARRAY_SIZE(bnxt_dl_params));
656 devlink_params_publish(bp->dl);
661 static void bnxt_dl_params_unregister(struct bnxt *bp)
663 if (bp->hwrm_spec_code < 0x10600)
666 devlink_params_unregister(bp->dl, bnxt_dl_params,
667 ARRAY_SIZE(bnxt_dl_params));
668 devlink_port_params_unregister(&bp->dl_port, bnxt_dl_port_params,
669 ARRAY_SIZE(bnxt_dl_port_params));
672 int bnxt_dl_register(struct bnxt *bp)
678 dl = devlink_alloc(&bnxt_dl_ops, sizeof(struct bnxt_dl));
680 dl = devlink_alloc(&bnxt_vf_dl_ops, sizeof(struct bnxt_dl));
682 netdev_warn(bp->dev, "devlink_alloc failed");
686 bnxt_link_bp_to_dl(bp, dl);
688 /* Add switchdev eswitch mode setting, if SRIOV supported */
689 if (pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV) &&
690 bp->hwrm_spec_code > 0x10803)
691 bp->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY;
693 rc = devlink_register(dl, &bp->pdev->dev);
695 netdev_warn(bp->dev, "devlink_register failed. rc=%d", rc);
702 devlink_port_attrs_set(&bp->dl_port, DEVLINK_PORT_FLAVOUR_PHYSICAL,
703 bp->pf.port_id, false, 0, bp->dsn,
705 rc = devlink_port_register(dl, &bp->dl_port, bp->pf.port_id);
707 netdev_err(bp->dev, "devlink_port_register failed");
711 rc = bnxt_dl_params_register(bp);
713 goto err_dl_port_unreg;
718 devlink_port_unregister(&bp->dl_port);
720 devlink_unregister(dl);
722 bnxt_link_bp_to_dl(bp, NULL);
727 void bnxt_dl_unregister(struct bnxt *bp)
729 struct devlink *dl = bp->dl;
735 bnxt_dl_params_unregister(bp);
736 devlink_port_unregister(&bp->dl_port);
738 devlink_unregister(dl);