2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/interrupt.h>
34 #include <linux/notifier.h>
35 #include <linux/module.h>
36 #include <linux/mlx5/driver.h>
37 #include <linux/mlx5/vport.h>
38 #include <linux/mlx5/eq.h>
39 #include <linux/mlx5/cmd.h>
40 #ifdef CONFIG_RFS_ACCEL
41 #include <linux/cpu_rmap.h>
43 #include "mlx5_core.h"
45 #include "fpga/core.h"
47 #include "lib/clock.h"
48 #include "diag/fw_tracer.h"
51 MLX5_EQE_OWNER_INIT_VAL = 0x1,
55 MLX5_EQ_STATE_ARMED = 0x9,
56 MLX5_EQ_STATE_FIRED = 0xa,
57 MLX5_EQ_STATE_ALWAYS_ARMED = 0xb,
61 MLX5_EQ_DOORBEL_OFFSET = 0x40,
64 struct mlx5_irq_info {
66 char name[MLX5_MAX_IRQ_NAME];
67 void *context; /* dev_id provided to request_irq */
70 struct mlx5_eq_table {
71 struct list_head comp_eqs_list;
72 struct mlx5_eq pages_eq;
73 struct mlx5_eq cmd_eq;
74 struct mlx5_eq async_eq;
76 struct atomic_notifier_head nh[MLX5_EVENT_TYPE_MAX];
78 /* Since CQ DB is stored in async_eq */
79 struct mlx5_nb cq_err_nb;
81 struct mutex lock; /* sync async eqs creations */
83 struct mlx5_irq_info *irq_info;
84 #ifdef CONFIG_RFS_ACCEL
85 struct cpu_rmap *rmap;
89 #define MLX5_ASYNC_EVENT_MASK ((1ull << MLX5_EVENT_TYPE_PATH_MIG) | \
90 (1ull << MLX5_EVENT_TYPE_COMM_EST) | \
91 (1ull << MLX5_EVENT_TYPE_SQ_DRAINED) | \
92 (1ull << MLX5_EVENT_TYPE_CQ_ERROR) | \
93 (1ull << MLX5_EVENT_TYPE_WQ_CATAS_ERROR) | \
94 (1ull << MLX5_EVENT_TYPE_PATH_MIG_FAILED) | \
95 (1ull << MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
96 (1ull << MLX5_EVENT_TYPE_WQ_ACCESS_ERROR) | \
97 (1ull << MLX5_EVENT_TYPE_PORT_CHANGE) | \
98 (1ull << MLX5_EVENT_TYPE_SRQ_CATAS_ERROR) | \
99 (1ull << MLX5_EVENT_TYPE_SRQ_LAST_WQE) | \
100 (1ull << MLX5_EVENT_TYPE_SRQ_RQ_LIMIT))
102 static int mlx5_cmd_destroy_eq(struct mlx5_core_dev *dev, u8 eqn)
104 u32 out[MLX5_ST_SZ_DW(destroy_eq_out)] = {0};
105 u32 in[MLX5_ST_SZ_DW(destroy_eq_in)] = {0};
107 MLX5_SET(destroy_eq_in, in, opcode, MLX5_CMD_OP_DESTROY_EQ);
108 MLX5_SET(destroy_eq_in, in, eq_number, eqn);
109 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
112 /* caller must eventually call mlx5_cq_put on the returned cq */
113 static struct mlx5_core_cq *mlx5_eq_cq_get(struct mlx5_eq *eq, u32 cqn)
115 struct mlx5_cq_table *table = &eq->cq_table;
116 struct mlx5_core_cq *cq = NULL;
119 cq = radix_tree_lookup(&table->tree, cqn);
127 static irqreturn_t mlx5_eq_comp_int(int irq, void *eq_ptr)
129 struct mlx5_eq_comp *eq_comp = eq_ptr;
130 struct mlx5_eq *eq = eq_ptr;
131 struct mlx5_eqe *eqe;
135 while ((eqe = next_eqe_sw(eq))) {
136 struct mlx5_core_cq *cq;
137 /* Make sure we read EQ entry contents after we've
138 * checked the ownership bit.
141 /* Assume (eqe->type) is always MLX5_EVENT_TYPE_COMP */
142 cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff;
144 cq = mlx5_eq_cq_get(eq, cqn);
150 mlx5_core_warn(eq->dev, "Completion event for bogus CQ 0x%x\n", cqn);
156 /* The HCA will think the queue has overflowed if we
157 * don't tell it we've been processing events. We
158 * create our EQs with MLX5_NUM_SPARE_EQE extra
159 * entries, so we must update our consumer index at
162 if (unlikely(set_ci >= MLX5_NUM_SPARE_EQE)) {
171 tasklet_schedule(&eq_comp->tasklet_ctx.task);
176 /* Some architectures don't latch interrupts when they are disabled, so using
177 * mlx5_eq_poll_irq_disabled could end up losing interrupts while trying to
178 * avoid losing them. It is not recommended to use it, unless this is the last
181 u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq_comp *eq)
185 disable_irq(eq->core.irqn);
186 count_eqe = eq->core.cons_index;
187 mlx5_eq_comp_int(eq->core.irqn, eq);
188 count_eqe = eq->core.cons_index - count_eqe;
189 enable_irq(eq->core.irqn);
194 static irqreturn_t mlx5_eq_async_int(int irq, void *eq_ptr)
196 struct mlx5_eq *eq = eq_ptr;
197 struct mlx5_eq_table *eqt;
198 struct mlx5_core_dev *dev;
199 struct mlx5_eqe *eqe;
203 eqt = dev->priv.eq_table;
205 while ((eqe = next_eqe_sw(eq))) {
207 * Make sure we read EQ entry contents after we've
208 * checked the ownership bit.
212 if (likely(eqe->type < MLX5_EVENT_TYPE_MAX))
213 atomic_notifier_call_chain(&eqt->nh[eqe->type], eqe->type, eqe);
215 mlx5_core_warn_once(dev, "notifier_call_chain is not setup for eqe: %d\n", eqe->type);
217 atomic_notifier_call_chain(&eqt->nh[MLX5_EVENT_TYPE_NOTIFY_ANY], eqe->type, eqe);
222 /* The HCA will think the queue has overflowed if we
223 * don't tell it we've been processing events. We
224 * create our EQs with MLX5_NUM_SPARE_EQE extra
225 * entries, so we must update our consumer index at
228 if (unlikely(set_ci >= MLX5_NUM_SPARE_EQE)) {
239 static void init_eq_buf(struct mlx5_eq *eq)
241 struct mlx5_eqe *eqe;
244 for (i = 0; i < eq->nent; i++) {
245 eqe = get_eqe(eq, i);
246 eqe->owner = MLX5_EQE_OWNER_INIT_VAL;
251 create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, const char *name,
252 struct mlx5_eq_param *param)
254 struct mlx5_eq_table *eq_table = dev->priv.eq_table;
255 struct mlx5_cq_table *cq_table = &eq->cq_table;
256 u32 out[MLX5_ST_SZ_DW(create_eq_out)] = {0};
257 struct mlx5_priv *priv = &dev->priv;
258 u8 vecidx = param->index;
265 if (eq_table->irq_info[vecidx].context)
269 memset(cq_table, 0, sizeof(*cq_table));
270 spin_lock_init(&cq_table->lock);
271 INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC);
273 eq->nent = roundup_pow_of_two(param->nent + MLX5_NUM_SPARE_EQE);
275 err = mlx5_buf_alloc(dev, eq->nent * MLX5_EQE_SIZE, &eq->buf);
281 inlen = MLX5_ST_SZ_BYTES(create_eq_in) +
282 MLX5_FLD_SZ_BYTES(create_eq_in, pas[0]) * eq->buf.npages;
284 in = kvzalloc(inlen, GFP_KERNEL);
290 pas = (__be64 *)MLX5_ADDR_OF(create_eq_in, in, pas);
291 mlx5_fill_page_array(&eq->buf, pas);
293 MLX5_SET(create_eq_in, in, opcode, MLX5_CMD_OP_CREATE_EQ);
294 if (!param->mask && MLX5_CAP_GEN(dev, log_max_uctx))
295 MLX5_SET(create_eq_in, in, uid, MLX5_SHARED_RESOURCE_UID);
297 MLX5_SET64(create_eq_in, in, event_bitmask, param->mask);
299 eqc = MLX5_ADDR_OF(create_eq_in, in, eq_context_entry);
300 MLX5_SET(eqc, eqc, log_eq_size, ilog2(eq->nent));
301 MLX5_SET(eqc, eqc, uar_page, priv->uar->index);
302 MLX5_SET(eqc, eqc, intr, vecidx);
303 MLX5_SET(eqc, eqc, log_page_size,
304 eq->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
306 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
310 snprintf(eq_table->irq_info[vecidx].name, MLX5_MAX_IRQ_NAME, "%s@pci:%s",
311 name, pci_name(dev->pdev));
312 eq_table->irq_info[vecidx].context = param->context;
315 eq->eqn = MLX5_GET(create_eq_out, out, eq_number);
316 eq->irqn = pci_irq_vector(dev->pdev, vecidx);
318 eq->doorbell = priv->uar->map + MLX5_EQ_DOORBEL_OFFSET;
319 err = request_irq(eq->irqn, param->handler, 0,
320 eq_table->irq_info[vecidx].name, param->context);
324 err = mlx5_debug_eq_add(dev, eq);
328 /* EQs are created in ARMED state
336 free_irq(eq->irqn, eq);
339 mlx5_cmd_destroy_eq(dev, eq->eqn);
345 mlx5_buf_free(dev, &eq->buf);
349 static int destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
351 struct mlx5_eq_table *eq_table = dev->priv.eq_table;
352 struct mlx5_irq_info *irq_info;
355 irq_info = &eq_table->irq_info[eq->vecidx];
357 mlx5_debug_eq_remove(dev, eq);
359 free_irq(eq->irqn, irq_info->context);
360 irq_info->context = NULL;
362 err = mlx5_cmd_destroy_eq(dev, eq->eqn);
364 mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n",
366 synchronize_irq(eq->irqn);
368 mlx5_buf_free(dev, &eq->buf);
373 int mlx5_eq_add_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq)
375 struct mlx5_cq_table *table = &eq->cq_table;
378 spin_lock(&table->lock);
379 err = radix_tree_insert(&table->tree, cq->cqn, cq);
380 spin_unlock(&table->lock);
385 int mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq)
387 struct mlx5_cq_table *table = &eq->cq_table;
388 struct mlx5_core_cq *tmp;
390 spin_lock(&table->lock);
391 tmp = radix_tree_delete(&table->tree, cq->cqn);
392 spin_unlock(&table->lock);
395 mlx5_core_warn(eq->dev, "cq 0x%x not found in eq 0x%x tree\n", eq->eqn, cq->cqn);
400 mlx5_core_warn(eq->dev, "corruption on cqn 0x%x in eq 0x%x\n", eq->eqn, cq->cqn);
407 int mlx5_eq_table_init(struct mlx5_core_dev *dev)
409 struct mlx5_eq_table *eq_table;
412 eq_table = kvzalloc(sizeof(*eq_table), GFP_KERNEL);
416 dev->priv.eq_table = eq_table;
418 err = mlx5_eq_debugfs_init(dev);
420 goto kvfree_eq_table;
422 mutex_init(&eq_table->lock);
423 for (i = 0; i < MLX5_EVENT_TYPE_MAX; i++)
424 ATOMIC_INIT_NOTIFIER_HEAD(&eq_table->nh[i]);
430 dev->priv.eq_table = NULL;
434 void mlx5_eq_table_cleanup(struct mlx5_core_dev *dev)
436 mlx5_eq_debugfs_cleanup(dev);
437 kvfree(dev->priv.eq_table);
442 static int create_async_eq(struct mlx5_core_dev *dev, const char *name,
443 struct mlx5_eq *eq, struct mlx5_eq_param *param)
445 struct mlx5_eq_table *eq_table = dev->priv.eq_table;
448 mutex_lock(&eq_table->lock);
449 if (param->index >= MLX5_EQ_MAX_ASYNC_EQS) {
454 err = create_map_eq(dev, eq, name, param);
456 mutex_unlock(&eq_table->lock);
460 static int destroy_async_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
462 struct mlx5_eq_table *eq_table = dev->priv.eq_table;
465 mutex_lock(&eq_table->lock);
466 err = destroy_unmap_eq(dev, eq);
467 mutex_unlock(&eq_table->lock);
471 static int cq_err_event_notifier(struct notifier_block *nb,
472 unsigned long type, void *data)
474 struct mlx5_eq_table *eqt;
475 struct mlx5_core_cq *cq;
476 struct mlx5_eqe *eqe;
480 /* type == MLX5_EVENT_TYPE_CQ_ERROR */
482 eqt = mlx5_nb_cof(nb, struct mlx5_eq_table, cq_err_nb);
486 cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff;
487 mlx5_core_warn(eq->dev, "CQ error on CQN 0x%x, syndrome 0x%x\n",
488 cqn, eqe->data.cq_err.syndrome);
490 cq = mlx5_eq_cq_get(eq, cqn);
492 mlx5_core_warn(eq->dev, "Async event for bogus CQ 0x%x\n", cqn);
503 static u64 gather_async_events_mask(struct mlx5_core_dev *dev)
505 u64 async_event_mask = MLX5_ASYNC_EVENT_MASK;
507 if (MLX5_VPORT_MANAGER(dev))
508 async_event_mask |= (1ull << MLX5_EVENT_TYPE_NIC_VPORT_CHANGE);
510 if (MLX5_CAP_GEN(dev, general_notification_event))
511 async_event_mask |= (1ull << MLX5_EVENT_TYPE_GENERAL_EVENT);
513 if (MLX5_CAP_GEN(dev, port_module_event))
514 async_event_mask |= (1ull << MLX5_EVENT_TYPE_PORT_MODULE_EVENT);
516 mlx5_core_dbg(dev, "port_module_event is not set\n");
518 if (MLX5_PPS_CAP(dev))
519 async_event_mask |= (1ull << MLX5_EVENT_TYPE_PPS_EVENT);
521 if (MLX5_CAP_GEN(dev, fpga))
522 async_event_mask |= (1ull << MLX5_EVENT_TYPE_FPGA_ERROR) |
523 (1ull << MLX5_EVENT_TYPE_FPGA_QP_ERROR);
524 if (MLX5_CAP_GEN_MAX(dev, dct))
525 async_event_mask |= (1ull << MLX5_EVENT_TYPE_DCT_DRAINED);
527 if (MLX5_CAP_GEN(dev, temp_warn_event))
528 async_event_mask |= (1ull << MLX5_EVENT_TYPE_TEMP_WARN_EVENT);
530 if (MLX5_CAP_MCAM_REG(dev, tracer_registers))
531 async_event_mask |= (1ull << MLX5_EVENT_TYPE_DEVICE_TRACER);
533 if (MLX5_CAP_GEN(dev, max_num_of_monitor_counters))
534 async_event_mask |= (1ull << MLX5_EVENT_TYPE_MONITOR_COUNTER);
536 if (mlx5_core_is_ecpf_esw_manager(dev))
537 async_event_mask |= (1ull << MLX5_EVENT_TYPE_HOST_PARAMS_CHANGE);
539 return async_event_mask;
542 static int create_async_eqs(struct mlx5_core_dev *dev)
544 struct mlx5_eq_table *table = dev->priv.eq_table;
545 struct mlx5_eq_param param = {};
548 MLX5_NB_INIT(&table->cq_err_nb, cq_err_event_notifier, CQ_ERROR);
549 mlx5_eq_notifier_register(dev, &table->cq_err_nb);
551 param = (struct mlx5_eq_param) {
552 .index = MLX5_EQ_CMD_IDX,
553 .mask = 1ull << MLX5_EVENT_TYPE_CMD,
554 .nent = MLX5_NUM_CMD_EQE,
555 .context = &table->cmd_eq,
556 .handler = mlx5_eq_async_int,
558 err = create_async_eq(dev, "mlx5_cmd_eq", &table->cmd_eq, ¶m);
560 mlx5_core_warn(dev, "failed to create cmd EQ %d\n", err);
564 mlx5_cmd_use_events(dev);
566 param = (struct mlx5_eq_param) {
567 .index = MLX5_EQ_ASYNC_IDX,
568 .mask = gather_async_events_mask(dev),
569 .nent = MLX5_NUM_ASYNC_EQE,
570 .context = &table->async_eq,
571 .handler = mlx5_eq_async_int,
573 err = create_async_eq(dev, "mlx5_async_eq", &table->async_eq, ¶m);
575 mlx5_core_warn(dev, "failed to create async EQ %d\n", err);
579 param = (struct mlx5_eq_param) {
580 .index = MLX5_EQ_PAGEREQ_IDX,
581 .mask = 1 << MLX5_EVENT_TYPE_PAGE_REQUEST,
582 .nent = /* TODO: sriov max_vf + */ 1,
583 .context = &table->pages_eq,
584 .handler = mlx5_eq_async_int,
586 err = create_async_eq(dev, "mlx5_pages_eq", &table->pages_eq, ¶m);
588 mlx5_core_warn(dev, "failed to create pages EQ %d\n", err);
595 destroy_async_eq(dev, &table->async_eq);
598 mlx5_cmd_use_polling(dev);
599 destroy_async_eq(dev, &table->cmd_eq);
601 mlx5_eq_notifier_unregister(dev, &table->cq_err_nb);
605 static void destroy_async_eqs(struct mlx5_core_dev *dev)
607 struct mlx5_eq_table *table = dev->priv.eq_table;
610 err = destroy_async_eq(dev, &table->pages_eq);
612 mlx5_core_err(dev, "failed to destroy pages eq, err(%d)\n",
615 err = destroy_async_eq(dev, &table->async_eq);
617 mlx5_core_err(dev, "failed to destroy async eq, err(%d)\n",
620 mlx5_cmd_use_polling(dev);
622 err = destroy_async_eq(dev, &table->cmd_eq);
624 mlx5_core_err(dev, "failed to destroy command eq, err(%d)\n",
627 mlx5_eq_notifier_unregister(dev, &table->cq_err_nb);
630 struct mlx5_eq *mlx5_get_async_eq(struct mlx5_core_dev *dev)
632 return &dev->priv.eq_table->async_eq;
635 void mlx5_eq_synchronize_async_irq(struct mlx5_core_dev *dev)
637 synchronize_irq(dev->priv.eq_table->async_eq.irqn);
640 void mlx5_eq_synchronize_cmd_irq(struct mlx5_core_dev *dev)
642 synchronize_irq(dev->priv.eq_table->cmd_eq.irqn);
645 /* Generic EQ API for mlx5_core consumers
646 * Needed For RDMA ODP EQ for now
649 mlx5_eq_create_generic(struct mlx5_core_dev *dev, const char *name,
650 struct mlx5_eq_param *param)
652 struct mlx5_eq *eq = kvzalloc(sizeof(*eq), GFP_KERNEL);
656 return ERR_PTR(-ENOMEM);
658 err = create_async_eq(dev, name, eq, param);
666 EXPORT_SYMBOL(mlx5_eq_create_generic);
668 int mlx5_eq_destroy_generic(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
675 err = destroy_async_eq(dev, eq);
683 EXPORT_SYMBOL(mlx5_eq_destroy_generic);
685 struct mlx5_eqe *mlx5_eq_get_eqe(struct mlx5_eq *eq, u32 cc)
687 u32 ci = eq->cons_index + cc;
688 struct mlx5_eqe *eqe;
690 eqe = get_eqe(eq, ci & (eq->nent - 1));
691 eqe = ((eqe->owner & 1) ^ !!(ci & eq->nent)) ? NULL : eqe;
692 /* Make sure we read EQ entry contents after we've
693 * checked the ownership bit.
700 EXPORT_SYMBOL(mlx5_eq_get_eqe);
702 void mlx5_eq_update_ci(struct mlx5_eq *eq, u32 cc, bool arm)
704 __be32 __iomem *addr = eq->doorbell + (arm ? 0 : 2);
707 eq->cons_index += cc;
708 val = (eq->cons_index & 0xffffff) | (eq->eqn << 24);
710 __raw_writel((__force u32)cpu_to_be32(val), addr);
711 /* We still want ordering, just not swabbing, so add a barrier */
714 EXPORT_SYMBOL(mlx5_eq_update_ci);
718 static int set_comp_irq_affinity_hint(struct mlx5_core_dev *mdev, int i)
720 struct mlx5_priv *priv = &mdev->priv;
721 int vecidx = MLX5_EQ_VEC_COMP_BASE + i;
722 int irq = pci_irq_vector(mdev->pdev, vecidx);
723 struct mlx5_irq_info *irq_info = &priv->eq_table->irq_info[vecidx];
725 if (!zalloc_cpumask_var(&irq_info->mask, GFP_KERNEL)) {
726 mlx5_core_warn(mdev, "zalloc_cpumask_var failed");
730 cpumask_set_cpu(cpumask_local_spread(i, priv->numa_node),
733 if (IS_ENABLED(CONFIG_SMP) &&
734 irq_set_affinity_hint(irq, irq_info->mask))
735 mlx5_core_warn(mdev, "irq_set_affinity_hint failed, irq 0x%.4x", irq);
740 static void clear_comp_irq_affinity_hint(struct mlx5_core_dev *mdev, int i)
742 int vecidx = MLX5_EQ_VEC_COMP_BASE + i;
743 struct mlx5_priv *priv = &mdev->priv;
744 int irq = pci_irq_vector(mdev->pdev, vecidx);
745 struct mlx5_irq_info *irq_info = &priv->eq_table->irq_info[vecidx];
747 irq_set_affinity_hint(irq, NULL);
748 free_cpumask_var(irq_info->mask);
751 static int set_comp_irq_affinity_hints(struct mlx5_core_dev *mdev)
756 for (i = 0; i < mdev->priv.eq_table->num_comp_vectors; i++) {
757 err = set_comp_irq_affinity_hint(mdev, i);
765 for (i--; i >= 0; i--)
766 clear_comp_irq_affinity_hint(mdev, i);
771 static void clear_comp_irqs_affinity_hints(struct mlx5_core_dev *mdev)
775 for (i = 0; i < mdev->priv.eq_table->num_comp_vectors; i++)
776 clear_comp_irq_affinity_hint(mdev, i);
779 static void destroy_comp_eqs(struct mlx5_core_dev *dev)
781 struct mlx5_eq_table *table = dev->priv.eq_table;
782 struct mlx5_eq_comp *eq, *n;
784 clear_comp_irqs_affinity_hints(dev);
786 #ifdef CONFIG_RFS_ACCEL
788 free_irq_cpu_rmap(table->rmap);
792 list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
794 if (destroy_unmap_eq(dev, &eq->core))
795 mlx5_core_warn(dev, "failed to destroy comp EQ 0x%x\n",
797 tasklet_disable(&eq->tasklet_ctx.task);
802 static int create_comp_eqs(struct mlx5_core_dev *dev)
804 struct mlx5_eq_table *table = dev->priv.eq_table;
805 char name[MLX5_MAX_IRQ_NAME];
806 struct mlx5_eq_comp *eq;
812 INIT_LIST_HEAD(&table->comp_eqs_list);
813 ncomp_vec = table->num_comp_vectors;
814 nent = MLX5_COMP_EQ_SIZE;
815 #ifdef CONFIG_RFS_ACCEL
816 table->rmap = alloc_irq_cpu_rmap(ncomp_vec);
820 for (i = 0; i < ncomp_vec; i++) {
821 int vecidx = i + MLX5_EQ_VEC_COMP_BASE;
822 struct mlx5_eq_param param = {};
824 eq = kzalloc(sizeof(*eq), GFP_KERNEL);
830 INIT_LIST_HEAD(&eq->tasklet_ctx.list);
831 INIT_LIST_HEAD(&eq->tasklet_ctx.process_list);
832 spin_lock_init(&eq->tasklet_ctx.lock);
833 tasklet_init(&eq->tasklet_ctx.task, mlx5_cq_tasklet_cb,
834 (unsigned long)&eq->tasklet_ctx);
836 #ifdef CONFIG_RFS_ACCEL
837 irq_cpu_rmap_add(table->rmap, pci_irq_vector(dev->pdev, vecidx));
839 snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", i);
840 param = (struct mlx5_eq_param) {
844 .context = &eq->core,
845 .handler = mlx5_eq_comp_int
847 err = create_map_eq(dev, &eq->core, name, ¶m);
852 mlx5_core_dbg(dev, "allocated completion EQN %d\n", eq->core.eqn);
853 /* add tail, to keep the list ordered, for mlx5_vector2eqn to work */
854 list_add_tail(&eq->list, &table->comp_eqs_list);
857 err = set_comp_irq_affinity_hints(dev);
859 mlx5_core_err(dev, "Failed to alloc affinity hint cpumask\n");
866 destroy_comp_eqs(dev);
870 int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
873 struct mlx5_eq_table *table = dev->priv.eq_table;
874 struct mlx5_eq_comp *eq, *n;
878 list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
881 *irqn = eq->core.irqn;
889 EXPORT_SYMBOL(mlx5_vector2eqn);
891 unsigned int mlx5_comp_vectors_count(struct mlx5_core_dev *dev)
893 return dev->priv.eq_table->num_comp_vectors;
895 EXPORT_SYMBOL(mlx5_comp_vectors_count);
898 mlx5_comp_irq_get_affinity_mask(struct mlx5_core_dev *dev, int vector)
900 /* TODO: consider irq_get_affinity_mask(irq) */
901 return dev->priv.eq_table->irq_info[vector + MLX5_EQ_VEC_COMP_BASE].mask;
903 EXPORT_SYMBOL(mlx5_comp_irq_get_affinity_mask);
905 #ifdef CONFIG_RFS_ACCEL
906 struct cpu_rmap *mlx5_eq_table_get_rmap(struct mlx5_core_dev *dev)
908 return dev->priv.eq_table->rmap;
912 struct mlx5_eq_comp *mlx5_eqn2comp_eq(struct mlx5_core_dev *dev, int eqn)
914 struct mlx5_eq_table *table = dev->priv.eq_table;
915 struct mlx5_eq_comp *eq;
917 list_for_each_entry(eq, &table->comp_eqs_list, list) {
918 if (eq->core.eqn == eqn)
922 return ERR_PTR(-ENOENT);
925 /* This function should only be called after mlx5_cmd_force_teardown_hca */
926 void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev)
928 struct mlx5_eq_table *table = dev->priv.eq_table;
931 clear_comp_irqs_affinity_hints(dev);
933 #ifdef CONFIG_RFS_ACCEL
935 free_irq_cpu_rmap(table->rmap);
940 mutex_lock(&table->lock); /* sync with create/destroy_async_eq */
941 max_eqs = table->num_comp_vectors + MLX5_EQ_VEC_COMP_BASE;
942 for (i = max_eqs - 1; i >= 0; i--) {
943 if (!table->irq_info[i].context)
945 free_irq(pci_irq_vector(dev->pdev, i), table->irq_info[i].context);
946 table->irq_info[i].context = NULL;
948 mutex_unlock(&table->lock);
949 pci_free_irq_vectors(dev->pdev);
952 static int alloc_irq_vectors(struct mlx5_core_dev *dev)
954 struct mlx5_priv *priv = &dev->priv;
955 struct mlx5_eq_table *table = priv->eq_table;
956 int num_eqs = MLX5_CAP_GEN(dev, max_num_eqs) ?
957 MLX5_CAP_GEN(dev, max_num_eqs) :
958 1 << MLX5_CAP_GEN(dev, log_max_eq);
962 nvec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() +
963 MLX5_EQ_VEC_COMP_BASE;
964 nvec = min_t(int, nvec, num_eqs);
965 if (nvec <= MLX5_EQ_VEC_COMP_BASE)
968 table->irq_info = kcalloc(nvec, sizeof(*table->irq_info), GFP_KERNEL);
969 if (!table->irq_info)
972 nvec = pci_alloc_irq_vectors(dev->pdev, MLX5_EQ_VEC_COMP_BASE + 1,
976 goto err_free_irq_info;
979 table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE;
984 kfree(table->irq_info);
988 static void free_irq_vectors(struct mlx5_core_dev *dev)
990 struct mlx5_priv *priv = &dev->priv;
992 pci_free_irq_vectors(dev->pdev);
993 kfree(priv->eq_table->irq_info);
996 int mlx5_eq_table_create(struct mlx5_core_dev *dev)
1000 err = alloc_irq_vectors(dev);
1002 mlx5_core_err(dev, "alloc irq vectors failed\n");
1006 err = create_async_eqs(dev);
1008 mlx5_core_err(dev, "Failed to create async EQs\n");
1012 err = create_comp_eqs(dev);
1014 mlx5_core_err(dev, "Failed to create completion EQs\n");
1020 destroy_async_eqs(dev);
1022 free_irq_vectors(dev);
1026 void mlx5_eq_table_destroy(struct mlx5_core_dev *dev)
1028 destroy_comp_eqs(dev);
1029 destroy_async_eqs(dev);
1030 free_irq_vectors(dev);
1033 int mlx5_eq_notifier_register(struct mlx5_core_dev *dev, struct mlx5_nb *nb)
1035 struct mlx5_eq_table *eqt = dev->priv.eq_table;
1037 if (nb->event_type >= MLX5_EVENT_TYPE_MAX)
1040 return atomic_notifier_chain_register(&eqt->nh[nb->event_type], &nb->nb);
1043 int mlx5_eq_notifier_unregister(struct mlx5_core_dev *dev, struct mlx5_nb *nb)
1045 struct mlx5_eq_table *eqt = dev->priv.eq_table;
1047 if (nb->event_type >= MLX5_EVENT_TYPE_MAX)
1050 return atomic_notifier_chain_unregister(&eqt->nh[nb->event_type], &nb->nb);