OSDN Git Service

drm/amd/display: fix linux dp link lost handled only one time
authorHersen Wu <hersenxs.wu@amd.com>
Tue, 17 Jan 2023 15:58:34 +0000 (10:58 -0500)
committerAlex Deucher <alexander.deucher@amd.com>
Tue, 31 Jan 2023 19:03:43 +0000 (14:03 -0500)
[Why]
linux amdgpu defer handle link lost irq. dm add handle
request to irq work queue for the first irq of link lost.
if link training fails for link lost handle, link will not
be enabled anymore.

[How]
allow adding handle request of link lost to work queue
before running dp link training for link lost.

Signed-off-by: Hersen Wu <hersenxs.wu@amd.com>
Acked-by: Alex Hung <alex.hung@amd.com>
Tested-by: Daniel Wheeler <daniel.wheeler@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/dc/dc_link.h

index ffbbd50..f4451fa 100644 (file)
@@ -1301,10 +1301,28 @@ static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
        else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
                        dc_link_check_link_loss_status(dc_link, &offload_work->data) &&
                        dc_link_dp_allow_hpd_rx_irq(dc_link)) {
-               dc_link_dp_handle_link_loss(dc_link);
+               /* offload_work->data is from handle_hpd_rx_irq->
+                * schedule_hpd_rx_offload_work.this is defer handle
+                * for hpd short pulse. upon here, link status may be
+                * changed, need get latest link status from dpcd
+                * registers. if link status is good, skip run link
+                * training again.
+                */
+               union hpd_irq_data irq_data;
+
+               memset(&irq_data, 0, sizeof(irq_data));
+
+               /* before dc_link_dp_handle_link_loss, allow new link lost handle
+                * request be added to work queue if link lost at end of dc_link_
+                * dp_handle_link_loss
+                */
                spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
                offload_work->offload_wq->is_handling_link_loss = false;
                spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
+
+               if ((dp_read_hpd_rx_irq_data(dc_link, &irq_data) == DC_OK) &&
+                       dc_link_check_link_loss_status(dc_link, &irq_data))
+                       dc_link_dp_handle_link_loss(dc_link);
        }
        mutex_unlock(&adev->dm.dc_lock);
 
@@ -3237,7 +3255,7 @@ static void handle_hpd_rx_irq(void *param)
        union hpd_irq_data hpd_irq_data;
        bool link_loss = false;
        bool has_left_work = false;
-       int idx = aconnector->base.index;
+       int idx = dc_link->link_index;
        struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
 
        memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
@@ -3379,7 +3397,7 @@ static void register_hpd_handlers(struct amdgpu_device *adev)
                                        (void *) aconnector);
 
                        if (adev->dm.hpd_rx_offload_wq)
-                               adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
+                               adev->dm.hpd_rx_offload_wq[dc_link->link_index].aconnector =
                                        aconnector;
                }
        }
index 85b5784..64d5d9b 100644 (file)
@@ -433,6 +433,9 @@ void dc_link_dp_handle_link_loss(struct dc_link *link);
 bool dc_link_dp_allow_hpd_rx_irq(const struct dc_link *link);
 bool dc_link_check_link_loss_status(struct dc_link *link,
                                       union hpd_irq_data *hpd_irq_dpcd_data);
+enum dc_status dp_read_hpd_rx_irq_data(
+       struct dc_link *link,
+       union hpd_irq_data *irq_data);
 struct dc_sink_init_data;
 
 struct dc_sink *dc_link_add_remote_sink(