OSDN Git Service

drm/nouveau/intr: add nvkm_subdev_intr() compatibility
authorBen Skeggs <bskeggs@redhat.com>
Wed, 1 Jun 2022 10:46:53 +0000 (20:46 +1000)
committerBen Skeggs <bskeggs@redhat.com>
Wed, 9 Nov 2022 00:44:35 +0000 (10:44 +1000)
It's quite a lot of tedious and error-prone work to switch over all the
subdevs at once, so allow an nvkm_intr to request new-style handlers to
be created that wrap the existing interfaces.

This will allow a more gradual transition.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Reviewed-by: Lyude Paul <lyude@redhat.com>
drivers/gpu/drm/nouveau/include/nvkm/core/device.h
drivers/gpu/drm/nouveau/include/nvkm/core/intr.h
drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h
drivers/gpu/drm/nouveau/nvkm/core/intr.c

index 555e3fe..f65b500 100644 (file)
@@ -69,6 +69,7 @@ struct nvkm_device {
                int irq;
                bool alloc;
                bool armed;
+               bool legacy_done;
        } intr;
 };
 
index ec7db13..a003d6a 100644 (file)
@@ -30,6 +30,7 @@ struct nvkm_intr {
                int inst;
                int leaf;
                u32 mask; /* 0-terminated. */
+               bool legacy; /* auto-create "legacy" nvkm_subdev_intr() handler */
        } *data;
 
        struct nvkm_subdev *subdev;
index 96113c8..20e1fc9 100644 (file)
@@ -21,6 +21,8 @@ struct nvkm_subdev {
        u32 debug;
        struct list_head head;
 
+       struct nvkm_inth inth;
+
        void **pself;
        bool oneinit;
 };
index ff95e01..3f3fe3d 100644 (file)
@@ -265,12 +265,73 @@ nvkm_intr_add(const struct nvkm_intr_func *func, const struct nvkm_intr_data *da
        return 0;
 }
 
+static irqreturn_t
+nvkm_intr_subdev(struct nvkm_inth *inth)
+{
+       struct nvkm_subdev *subdev = container_of(inth, typeof(*subdev), inth);
+
+       nvkm_subdev_intr(subdev);
+       return IRQ_HANDLED;
+}
+
+static void
+nvkm_intr_subdev_add_dev(struct nvkm_intr *intr, enum nvkm_subdev_type type, int inst)
+{
+       struct nvkm_subdev *subdev;
+       enum nvkm_intr_prio prio;
+       int ret;
+
+       subdev = nvkm_device_subdev(intr->subdev->device, type, inst);
+       if (!subdev || !subdev->func->intr)
+               return;
+
+       if (type == NVKM_ENGINE_DISP)
+               prio = NVKM_INTR_PRIO_VBLANK;
+       else
+               prio = NVKM_INTR_PRIO_NORMAL;
+
+       ret = nvkm_inth_add(intr, NVKM_INTR_SUBDEV, prio, subdev, nvkm_intr_subdev, &subdev->inth);
+       if (WARN_ON(ret))
+               return;
+
+       nvkm_inth_allow(&subdev->inth);
+}
+
+static void
+nvkm_intr_subdev_add(struct nvkm_intr *intr)
+{
+       const struct nvkm_intr_data *data;
+       struct nvkm_device *device = intr->subdev->device;
+       struct nvkm_top_device *tdev;
+
+       for (data = intr->data; data && data->mask; data++) {
+               if (data->legacy) {
+                       if (data->type == NVKM_SUBDEV_TOP) {
+                               list_for_each_entry(tdev, &device->top->device, head) {
+                                       if (tdev->intr < 0 || !(data->mask & BIT(tdev->intr)))
+                                               continue;
+
+                                       nvkm_intr_subdev_add_dev(intr, tdev->type, tdev->inst);
+                               }
+                       } else {
+                               nvkm_intr_subdev_add_dev(intr, data->type, data->inst);
+                       }
+               }
+       }
+}
+
 void
 nvkm_intr_rearm(struct nvkm_device *device)
 {
        struct nvkm_intr *intr;
        int i;
 
+       if (unlikely(!device->intr.legacy_done)) {
+               list_for_each_entry(intr, &device->intr.intr, head)
+                       nvkm_intr_subdev_add(intr);
+               device->intr.legacy_done = true;
+       }
+
        spin_lock_irq(&device->intr.lock);
        list_for_each_entry(intr, &device->intr.intr, head) {
                for (i = 0; intr->func->block && i < intr->leaves; i++) {