1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3 #include <linux/init.h>
4 #include <linux/kernel.h>
5 #include <linux/module.h>
7 #include <linux/io-64-nonatomic-lo-hi.h>
8 #include <linux/dmaengine.h>
10 #include <linux/msi.h>
11 #include <uapi/linux/idxd.h>
12 #include "../dmaengine.h"
14 #include "registers.h"
16 static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
19 /* Interrupt control bits */
20 void idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id)
22 struct irq_data *data = irq_get_irq_data(idxd->msix_entries[vec_id].vector);
24 pci_msi_mask_irq(data);
27 void idxd_mask_msix_vectors(struct idxd_device *idxd)
29 struct pci_dev *pdev = idxd->pdev;
30 int msixcnt = pci_msix_vec_count(pdev);
33 for (i = 0; i < msixcnt; i++)
34 idxd_mask_msix_vector(idxd, i);
37 void idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id)
39 struct irq_data *data = irq_get_irq_data(idxd->msix_entries[vec_id].vector);
41 pci_msi_unmask_irq(data);
44 void idxd_unmask_error_interrupts(struct idxd_device *idxd)
46 union genctrl_reg genctrl;
48 genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET);
49 genctrl.softerr_int_en = 1;
50 iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET);
53 void idxd_mask_error_interrupts(struct idxd_device *idxd)
55 union genctrl_reg genctrl;
57 genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET);
58 genctrl.softerr_int_en = 0;
59 iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET);
62 static void free_hw_descs(struct idxd_wq *wq)
66 for (i = 0; i < wq->num_descs; i++)
67 kfree(wq->hw_descs[i]);
72 static int alloc_hw_descs(struct idxd_wq *wq, int num)
74 struct device *dev = &wq->idxd->pdev->dev;
76 int node = dev_to_node(dev);
78 wq->hw_descs = kcalloc_node(num, sizeof(struct dsa_hw_desc *),
83 for (i = 0; i < num; i++) {
84 wq->hw_descs[i] = kzalloc_node(sizeof(*wq->hw_descs[i]),
86 if (!wq->hw_descs[i]) {
95 static void free_descs(struct idxd_wq *wq)
99 for (i = 0; i < wq->num_descs; i++)
105 static int alloc_descs(struct idxd_wq *wq, int num)
107 struct device *dev = &wq->idxd->pdev->dev;
109 int node = dev_to_node(dev);
111 wq->descs = kcalloc_node(num, sizeof(struct idxd_desc *),
116 for (i = 0; i < num; i++) {
117 wq->descs[i] = kzalloc_node(sizeof(*wq->descs[i]),
128 /* WQ control bits */
129 int idxd_wq_alloc_resources(struct idxd_wq *wq)
131 struct idxd_device *idxd = wq->idxd;
132 struct device *dev = &idxd->pdev->dev;
133 int rc, num_descs, i;
135 if (wq->type != IDXD_WQT_KERNEL)
138 wq->num_descs = wq->size;
139 num_descs = wq->size;
141 rc = alloc_hw_descs(wq, num_descs);
145 wq->compls_size = num_descs * sizeof(struct dsa_completion_record);
146 wq->compls = dma_alloc_coherent(dev, wq->compls_size,
147 &wq->compls_addr, GFP_KERNEL);
150 goto fail_alloc_compls;
153 rc = alloc_descs(wq, num_descs);
155 goto fail_alloc_descs;
157 rc = sbitmap_queue_init_node(&wq->sbq, num_descs, -1, false, GFP_KERNEL,
160 goto fail_sbitmap_init;
162 for (i = 0; i < num_descs; i++) {
163 struct idxd_desc *desc = wq->descs[i];
165 desc->hw = wq->hw_descs[i];
166 desc->completion = &wq->compls[i];
167 desc->compl_dma = wq->compls_addr +
168 sizeof(struct dsa_completion_record) * i;
172 dma_async_tx_descriptor_init(&desc->txd, &wq->dma_chan);
173 desc->txd.tx_submit = idxd_dma_tx_submit;
181 dma_free_coherent(dev, wq->compls_size, wq->compls, wq->compls_addr);
187 void idxd_wq_free_resources(struct idxd_wq *wq)
189 struct device *dev = &wq->idxd->pdev->dev;
191 if (wq->type != IDXD_WQT_KERNEL)
196 dma_free_coherent(dev, wq->compls_size, wq->compls, wq->compls_addr);
197 sbitmap_queue_free(&wq->sbq);
200 int idxd_wq_enable(struct idxd_wq *wq)
202 struct idxd_device *idxd = wq->idxd;
203 struct device *dev = &idxd->pdev->dev;
206 if (wq->state == IDXD_WQ_ENABLED) {
207 dev_dbg(dev, "WQ %d already enabled\n", wq->id);
211 idxd_cmd_exec(idxd, IDXD_CMD_ENABLE_WQ, wq->id, &status);
213 if (status != IDXD_CMDSTS_SUCCESS &&
214 status != IDXD_CMDSTS_ERR_WQ_ENABLED) {
215 dev_dbg(dev, "WQ enable failed: %#x\n", status);
219 wq->state = IDXD_WQ_ENABLED;
220 dev_dbg(dev, "WQ %d enabled\n", wq->id);
224 int idxd_wq_disable(struct idxd_wq *wq)
226 struct idxd_device *idxd = wq->idxd;
227 struct device *dev = &idxd->pdev->dev;
230 dev_dbg(dev, "Disabling WQ %d\n", wq->id);
232 if (wq->state != IDXD_WQ_ENABLED) {
233 dev_dbg(dev, "WQ %d in wrong state: %d\n", wq->id, wq->state);
237 operand = BIT(wq->id % 16) | ((wq->id / 16) << 16);
238 idxd_cmd_exec(idxd, IDXD_CMD_DISABLE_WQ, operand, &status);
240 if (status != IDXD_CMDSTS_SUCCESS) {
241 dev_dbg(dev, "WQ disable failed: %#x\n", status);
245 wq->state = IDXD_WQ_DISABLED;
246 dev_dbg(dev, "WQ %d disabled\n", wq->id);
250 void idxd_wq_drain(struct idxd_wq *wq)
252 struct idxd_device *idxd = wq->idxd;
253 struct device *dev = &idxd->pdev->dev;
256 if (wq->state != IDXD_WQ_ENABLED) {
257 dev_dbg(dev, "WQ %d in wrong state: %d\n", wq->id, wq->state);
261 dev_dbg(dev, "Draining WQ %d\n", wq->id);
262 operand = BIT(wq->id % 16) | ((wq->id / 16) << 16);
263 idxd_cmd_exec(idxd, IDXD_CMD_DRAIN_WQ, operand, NULL);
266 int idxd_wq_map_portal(struct idxd_wq *wq)
268 struct idxd_device *idxd = wq->idxd;
269 struct pci_dev *pdev = idxd->pdev;
270 struct device *dev = &pdev->dev;
271 resource_size_t start;
273 start = pci_resource_start(pdev, IDXD_WQ_BAR);
274 start = start + wq->id * IDXD_PORTAL_SIZE;
276 wq->dportal = devm_ioremap(dev, start, IDXD_PORTAL_SIZE);
279 dev_dbg(dev, "wq %d portal mapped at %p\n", wq->id, wq->dportal);
284 void idxd_wq_unmap_portal(struct idxd_wq *wq)
286 struct device *dev = &wq->idxd->pdev->dev;
288 devm_iounmap(dev, wq->dportal);
291 void idxd_wq_disable_cleanup(struct idxd_wq *wq)
293 struct idxd_device *idxd = wq->idxd;
294 struct device *dev = &idxd->pdev->dev;
297 lockdep_assert_held(&idxd->dev_lock);
298 memset(&wq->wqcfg, 0, sizeof(wq->wqcfg));
299 wq->type = IDXD_WQT_NONE;
304 clear_bit(WQ_FLAG_DEDICATED, &wq->flags);
305 memset(wq->name, 0, WQ_NAME_SIZE);
307 for (i = 0; i < 8; i++) {
308 wq_offset = idxd->wqcfg_offset + wq->id * 32 + i * sizeof(u32);
309 iowrite32(0, idxd->reg_base + wq_offset);
310 dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n",
311 wq->id, i, wq_offset,
312 ioread32(idxd->reg_base + wq_offset));
316 /* Device control bits */
317 static inline bool idxd_is_enabled(struct idxd_device *idxd)
319 union gensts_reg gensts;
321 gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
323 if (gensts.state == IDXD_DEVICE_STATE_ENABLED)
329 * This is function is only used for reset during probe and will
330 * poll for completion. Once the device is setup with interrupts,
331 * all commands will be done via interrupt completion.
333 void idxd_device_init_reset(struct idxd_device *idxd)
335 struct device *dev = &idxd->pdev->dev;
336 union idxd_command_reg cmd;
339 memset(&cmd, 0, sizeof(cmd));
340 cmd.cmd = IDXD_CMD_RESET_DEVICE;
341 dev_dbg(dev, "%s: sending reset for init.\n", __func__);
342 spin_lock_irqsave(&idxd->dev_lock, flags);
343 iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET);
345 while (ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET) &
348 spin_unlock_irqrestore(&idxd->dev_lock, flags);
351 static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
354 union idxd_command_reg cmd;
355 DECLARE_COMPLETION_ONSTACK(done);
358 memset(&cmd, 0, sizeof(cmd));
360 cmd.operand = operand;
363 spin_lock_irqsave(&idxd->dev_lock, flags);
364 wait_event_lock_irq(idxd->cmd_waitq,
365 !test_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags),
368 dev_dbg(&idxd->pdev->dev, "%s: sending cmd: %#x op: %#x\n",
369 __func__, cmd_code, operand);
371 __set_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags);
372 idxd->cmd_done = &done;
373 iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET);
376 * After command submitted, release lock and go to sleep until
377 * the command completes via interrupt.
379 spin_unlock_irqrestore(&idxd->dev_lock, flags);
380 wait_for_completion(&done);
381 spin_lock_irqsave(&idxd->dev_lock, flags);
383 *status = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET);
384 __clear_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags);
385 /* Wake up other pending commands */
386 wake_up(&idxd->cmd_waitq);
387 spin_unlock_irqrestore(&idxd->dev_lock, flags);
390 int idxd_device_enable(struct idxd_device *idxd)
392 struct device *dev = &idxd->pdev->dev;
395 if (idxd_is_enabled(idxd)) {
396 dev_dbg(dev, "Device already enabled\n");
400 idxd_cmd_exec(idxd, IDXD_CMD_ENABLE_DEVICE, 0, &status);
402 /* If the command is successful or if the device was enabled */
403 if (status != IDXD_CMDSTS_SUCCESS &&
404 status != IDXD_CMDSTS_ERR_DEV_ENABLED) {
405 dev_dbg(dev, "%s: err_code: %#x\n", __func__, status);
409 idxd->state = IDXD_DEV_ENABLED;
413 void idxd_device_wqs_clear_state(struct idxd_device *idxd)
417 lockdep_assert_held(&idxd->dev_lock);
419 for (i = 0; i < idxd->max_wqs; i++) {
420 struct idxd_wq *wq = &idxd->wqs[i];
422 if (wq->state == IDXD_WQ_ENABLED) {
423 idxd_wq_disable_cleanup(wq);
424 wq->state = IDXD_WQ_DISABLED;
429 int idxd_device_disable(struct idxd_device *idxd)
431 struct device *dev = &idxd->pdev->dev;
435 if (!idxd_is_enabled(idxd)) {
436 dev_dbg(dev, "Device is not enabled\n");
440 idxd_cmd_exec(idxd, IDXD_CMD_DISABLE_DEVICE, 0, &status);
442 /* If the command is successful or if the device was disabled */
443 if (status != IDXD_CMDSTS_SUCCESS &&
444 !(status & IDXD_CMDSTS_ERR_DIS_DEV_EN)) {
445 dev_dbg(dev, "%s: err_code: %#x\n", __func__, status);
449 spin_lock_irqsave(&idxd->dev_lock, flags);
450 idxd_device_wqs_clear_state(idxd);
451 idxd->state = IDXD_DEV_CONF_READY;
452 spin_unlock_irqrestore(&idxd->dev_lock, flags);
456 void idxd_device_reset(struct idxd_device *idxd)
460 idxd_cmd_exec(idxd, IDXD_CMD_RESET_DEVICE, 0, NULL);
461 spin_lock_irqsave(&idxd->dev_lock, flags);
462 idxd_device_wqs_clear_state(idxd);
463 idxd->state = IDXD_DEV_CONF_READY;
464 spin_unlock_irqrestore(&idxd->dev_lock, flags);
467 /* Device configuration bits */
468 static void idxd_group_config_write(struct idxd_group *group)
470 struct idxd_device *idxd = group->idxd;
471 struct device *dev = &idxd->pdev->dev;
475 dev_dbg(dev, "Writing group %d cfg registers\n", group->id);
478 for (i = 0; i < 4; i++) {
479 grpcfg_offset = idxd->grpcfg_offset +
480 group->id * 64 + i * sizeof(u64);
481 iowrite64(group->grpcfg.wqs[i],
482 idxd->reg_base + grpcfg_offset);
483 dev_dbg(dev, "GRPCFG wq[%d:%d: %#x]: %#llx\n",
484 group->id, i, grpcfg_offset,
485 ioread64(idxd->reg_base + grpcfg_offset));
488 /* setup GRPENGCFG */
489 grpcfg_offset = idxd->grpcfg_offset + group->id * 64 + 32;
490 iowrite64(group->grpcfg.engines, idxd->reg_base + grpcfg_offset);
491 dev_dbg(dev, "GRPCFG engs[%d: %#x]: %#llx\n", group->id,
492 grpcfg_offset, ioread64(idxd->reg_base + grpcfg_offset));
495 grpcfg_offset = idxd->grpcfg_offset + group->id * 64 + 40;
496 iowrite32(group->grpcfg.flags.bits, idxd->reg_base + grpcfg_offset);
497 dev_dbg(dev, "GRPFLAGS flags[%d: %#x]: %#x\n",
498 group->id, grpcfg_offset,
499 ioread32(idxd->reg_base + grpcfg_offset));
502 static int idxd_groups_config_write(struct idxd_device *idxd)
505 union gencfg_reg reg;
507 struct device *dev = &idxd->pdev->dev;
509 /* Setup bandwidth token limit */
510 if (idxd->token_limit) {
511 reg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET);
512 reg.token_limit = idxd->token_limit;
513 iowrite32(reg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
516 dev_dbg(dev, "GENCFG(%#x): %#x\n", IDXD_GENCFG_OFFSET,
517 ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET));
519 for (i = 0; i < idxd->max_groups; i++) {
520 struct idxd_group *group = &idxd->groups[i];
522 idxd_group_config_write(group);
528 static int idxd_wq_config_write(struct idxd_wq *wq)
530 struct idxd_device *idxd = wq->idxd;
531 struct device *dev = &idxd->pdev->dev;
538 memset(&wq->wqcfg, 0, sizeof(union wqcfg));
541 wq->wqcfg.wq_size = wq->size;
544 dev_warn(dev, "Incorrect work queue size: 0\n");
549 wq->wqcfg.wq_thresh = wq->threshold;
552 wq->wqcfg.priv = !!(wq->type == IDXD_WQT_KERNEL);
555 wq->wqcfg.priority = wq->priority;
558 wq->wqcfg.max_xfer_shift = idxd->hw.gen_cap.max_xfer_shift;
559 wq->wqcfg.max_batch_shift = idxd->hw.gen_cap.max_batch_shift;
561 dev_dbg(dev, "WQ %d CFGs\n", wq->id);
562 for (i = 0; i < 8; i++) {
563 wq_offset = idxd->wqcfg_offset + wq->id * 32 + i * sizeof(u32);
564 iowrite32(wq->wqcfg.bits[i], idxd->reg_base + wq_offset);
565 dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n",
566 wq->id, i, wq_offset,
567 ioread32(idxd->reg_base + wq_offset));
573 static int idxd_wqs_config_write(struct idxd_device *idxd)
577 for (i = 0; i < idxd->max_wqs; i++) {
578 struct idxd_wq *wq = &idxd->wqs[i];
580 rc = idxd_wq_config_write(wq);
588 static void idxd_group_flags_setup(struct idxd_device *idxd)
592 /* TC-A 0 and TC-B 1 should be defaults */
593 for (i = 0; i < idxd->max_groups; i++) {
594 struct idxd_group *group = &idxd->groups[i];
596 if (group->tc_a == -1)
597 group->tc_a = group->grpcfg.flags.tc_a = 0;
599 group->grpcfg.flags.tc_a = group->tc_a;
600 if (group->tc_b == -1)
601 group->tc_b = group->grpcfg.flags.tc_b = 1;
603 group->grpcfg.flags.tc_b = group->tc_b;
604 group->grpcfg.flags.use_token_limit = group->use_token_limit;
605 group->grpcfg.flags.tokens_reserved = group->tokens_reserved;
606 if (group->tokens_allowed)
607 group->grpcfg.flags.tokens_allowed =
608 group->tokens_allowed;
610 group->grpcfg.flags.tokens_allowed = idxd->max_tokens;
614 static int idxd_engines_setup(struct idxd_device *idxd)
617 struct idxd_engine *eng;
618 struct idxd_group *group;
620 for (i = 0; i < idxd->max_groups; i++) {
621 group = &idxd->groups[i];
622 group->grpcfg.engines = 0;
625 for (i = 0; i < idxd->max_engines; i++) {
626 eng = &idxd->engines[i];
632 group->grpcfg.engines |= BIT(eng->id);
642 static int idxd_wqs_setup(struct idxd_device *idxd)
645 struct idxd_group *group;
646 int i, j, configured = 0;
647 struct device *dev = &idxd->pdev->dev;
649 for (i = 0; i < idxd->max_groups; i++) {
650 group = &idxd->groups[i];
651 for (j = 0; j < 4; j++)
652 group->grpcfg.wqs[j] = 0;
655 for (i = 0; i < idxd->max_wqs; i++) {
664 if (!wq_dedicated(wq)) {
665 dev_warn(dev, "No shared workqueue support.\n");
669 group->grpcfg.wqs[wq->id / 64] |= BIT(wq->id % 64);
679 int idxd_device_config(struct idxd_device *idxd)
683 lockdep_assert_held(&idxd->dev_lock);
684 rc = idxd_wqs_setup(idxd);
688 rc = idxd_engines_setup(idxd);
692 idxd_group_flags_setup(idxd);
694 rc = idxd_wqs_config_write(idxd);
698 rc = idxd_groups_config_write(idxd);