OSDN Git Service

Merge tag 'amd-drm-fixes-5.7-2020-04-08' of git://people.freedesktop.org/~agd5f/linux...
authorDave Airlie <airlied@redhat.com>
Thu, 9 Apr 2020 20:42:52 +0000 (06:42 +1000)
committerDave Airlie <airlied@redhat.com>
Thu, 9 Apr 2020 20:42:53 +0000 (06:42 +1000)
amd-drm-fixes-5.7-2020-04-08:

amdgpu:
- Various Renoir fixes
- Fix gfx clockgating sequence on gfx10
- RAS fixes
- Avoid MST property creation after registration
- Various cursor/viewport fixes
- Fix a confusing log message about optional firmwares

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Alex Deucher <alexdeucher@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200408222240.3942-1-alexander.deucher@amd.com
22 files changed:
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c
drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
drivers/gpu/drm/amd/display/dc/core/dc.c
drivers/gpu/drm/amd/display/dc/dc_hw_types.h
drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
drivers/gpu/drm/amd/display/include/dal_asic_id.h
drivers/gpu/drm/amd/powerplay/amd_powerplay.c
drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
drivers/gpu/drm/amd/powerplay/renoir_ppt.c
drivers/gpu/drm/amd/powerplay/renoir_ppt.h

index faa3e71..559dc24 100644 (file)
@@ -2340,8 +2340,6 @@ static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
 {
        int i, r;
 
-       amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
-       amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
 
        for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
                if (!adev->ip_blocks[i].status.valid)
@@ -3356,6 +3354,9 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
                }
        }
 
+       amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
+       amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
+
        amdgpu_amdkfd_suspend(adev, !fbcon);
 
        amdgpu_ras_suspend(adev);
index f197f1b..abe94a5 100644 (file)
@@ -89,7 +89,8 @@ void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
                        adev->pm.ac_power = true;
                else
                        adev->pm.ac_power = false;
-               if (adev->powerplay.pp_funcs->enable_bapm)
+               if (adev->powerplay.pp_funcs &&
+                   adev->powerplay.pp_funcs->enable_bapm)
                        amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power);
                mutex_unlock(&adev->pm.mutex);
 
index be50867..deaa268 100644 (file)
@@ -818,7 +818,7 @@ static int psp_ras_initialize(struct psp_context *psp)
 
        if (!psp->adev->psp.ta_ras_ucode_size ||
            !psp->adev->psp.ta_ras_start_addr) {
-               dev_warn(psp->adev->dev, "RAS: ras ta ucode is not available\n");
+               dev_info(psp->adev->dev, "RAS: optional ras ta ucode is not available\n");
                return 0;
        }
 
@@ -902,7 +902,7 @@ static int psp_hdcp_initialize(struct psp_context *psp)
 
        if (!psp->adev->psp.ta_hdcp_ucode_size ||
            !psp->adev->psp.ta_hdcp_start_addr) {
-               dev_warn(psp->adev->dev, "HDCP: hdcp ta ucode is not available\n");
+               dev_info(psp->adev->dev, "HDCP: optional hdcp ta ucode is not available\n");
                return 0;
        }
 
@@ -1048,7 +1048,7 @@ static int psp_dtm_initialize(struct psp_context *psp)
 
        if (!psp->adev->psp.ta_dtm_ucode_size ||
            !psp->adev->psp.ta_dtm_start_addr) {
-               dev_warn(psp->adev->dev, "DTM: dtm ta ucode is not available\n");
+               dev_info(psp->adev->dev, "DTM: optional dtm ta ucode is not available\n");
                return 0;
        }
 
index 3c32a94..ab379b4 100644 (file)
@@ -1424,12 +1424,22 @@ static void amdgpu_ras_do_recovery(struct work_struct *work)
 {
        struct amdgpu_ras *ras =
                container_of(work, struct amdgpu_ras, recovery_work);
+       struct amdgpu_device *remote_adev = NULL;
+       struct amdgpu_device *adev = ras->adev;
+       struct list_head device_list, *device_list_handle =  NULL;
+       struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev, false);
+
+       /* Build list of devices to query RAS related errors */
+       if  (hive && adev->gmc.xgmi.num_physical_nodes > 1) {
+               device_list_handle = &hive->device_list;
+       } else {
+               list_add_tail(&adev->gmc.xgmi.head, &device_list);
+               device_list_handle = &device_list;
+       }
 
-       /*
-        * Query and print non zero error counter per IP block for
-        * awareness before recovering GPU.
-        */
-       amdgpu_ras_log_on_err_counter(ras->adev);
+       list_for_each_entry(remote_adev, device_list_handle, gmc.xgmi.head) {
+               amdgpu_ras_log_on_err_counter(remote_adev);
+       }
 
        if (amdgpu_device_should_recover_gpu(ras->adev))
                amdgpu_device_gpu_recover(ras->adev, 0);
index f6e3f59..d78059f 100644 (file)
@@ -279,7 +279,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_2_nv12[] =
 
 #define DEFAULT_SH_MEM_CONFIG \
        ((SH_MEM_ADDRESS_MODE_64 << SH_MEM_CONFIG__ADDRESS_MODE__SHIFT) | \
-        (SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \
+        (SH_MEM_ALIGNMENT_MODE_DWORD << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \
         (SH_MEM_RETRY_MODE_ALL << SH_MEM_CONFIG__RETRY_MODE__SHIFT) | \
         (3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT))
 
@@ -4104,6 +4104,12 @@ static void gfx_v10_0_update_medium_grain_clock_gating(struct amdgpu_device *ade
 
        /* It is disabled by HW by default */
        if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
+               /* 0 - Disable some blocks' MGCG */
+               WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xe0000000);
+               WREG32_SOC15(GC, 0, mmCGTT_WD_CLK_CTRL, 0xff000000);
+               WREG32_SOC15(GC, 0, mmCGTT_VGT_CLK_CTRL, 0xff000000);
+               WREG32_SOC15(GC, 0, mmCGTT_IA_CLK_CTRL, 0xff000000);
+
                /* 1 - RLC_CGTT_MGCG_OVERRIDE */
                def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
                data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
@@ -4143,19 +4149,20 @@ static void gfx_v10_0_update_medium_grain_clock_gating(struct amdgpu_device *ade
                if (def != data)
                        WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
 
-               /* 2 - disable MGLS in RLC */
+               /* 2 - disable MGLS in CP */
+               data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
+               if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) {
+                       data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
+                       WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
+               }
+
+               /* 3 - disable MGLS in RLC */
                data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
                if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) {
                        data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
                        WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data);
                }
 
-               /* 3 - disable MGLS in CP */
-               data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
-               if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) {
-                       data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
-                       WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
-               }
        }
 }
 
@@ -4266,7 +4273,7 @@ static int gfx_v10_0_update_gfx_clock_gating(struct amdgpu_device *adev,
                /* ===  CGCG /CGLS for GFX 3D Only === */
                gfx_v10_0_update_3d_clock_gating(adev, enable);
                /* ===  MGCG + MGLS === */
-               gfx_v10_0_update_medium_grain_clock_gating(adev, enable);
+               /* gfx_v10_0_update_medium_grain_clock_gating(adev, enable); */
        }
 
        if (adev->cg_flags &
index 608ffe3..e6b113e 100644 (file)
@@ -1217,6 +1217,8 @@ static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev)
                        adev->gfx.mec_fw_write_wait = true;
                break;
        default:
+               adev->gfx.me_fw_write_wait = true;
+               adev->gfx.mec_fw_write_wait = true;
                break;
        }
 }
index cceb46f..dce945e 100644 (file)
@@ -710,14 +710,16 @@ static int gfx_v9_4_query_utc_edc_status(struct amdgpu_device *adev,
 
                sec_count = REG_GET_FIELD(data, VML2_MEM_ECC_CNTL, SEC_COUNT);
                if (sec_count) {
-                       DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
+                       dev_info(adev->dev,
+                                "Instance[%d]: SubBlock %s, SEC %d\n", i,
                                 vml2_mems[i], sec_count);
                        err_data->ce_count += sec_count;
                }
 
                ded_count = REG_GET_FIELD(data, VML2_MEM_ECC_CNTL, DED_COUNT);
                if (ded_count) {
-                       DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i,
+                       dev_info(adev->dev,
+                                "Instance[%d]: SubBlock %s, DED %d\n", i,
                                 vml2_mems[i], ded_count);
                        err_data->ue_count += ded_count;
                }
index 0d413fa..c0e3efc 100644 (file)
@@ -1539,8 +1539,11 @@ static const struct soc15_reg_entry mmhub_v9_4_edc_cnt_regs[] = {
        { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3), 0, 0, 0 },
 };
 
-static int mmhub_v9_4_get_ras_error_count(const struct soc15_reg_entry *reg,
-       uint32_t value, uint32_t *sec_count, uint32_t *ded_count)
+static int mmhub_v9_4_get_ras_error_count(struct amdgpu_device *adev,
+                                         const struct soc15_reg_entry *reg,
+                                         uint32_t value,
+                                         uint32_t *sec_count,
+                                         uint32_t *ded_count)
 {
        uint32_t i;
        uint32_t sec_cnt, ded_cnt;
@@ -1553,7 +1556,7 @@ static int mmhub_v9_4_get_ras_error_count(const struct soc15_reg_entry *reg,
                                mmhub_v9_4_ras_fields[i].sec_count_mask) >>
                                mmhub_v9_4_ras_fields[i].sec_count_shift;
                if (sec_cnt) {
-                       DRM_INFO("MMHUB SubBlock %s, SEC %d\n",
+                       dev_info(adev->dev, "MMHUB SubBlock %s, SEC %d\n",
                                mmhub_v9_4_ras_fields[i].name,
                                sec_cnt);
                        *sec_count += sec_cnt;
@@ -1563,7 +1566,7 @@ static int mmhub_v9_4_get_ras_error_count(const struct soc15_reg_entry *reg,
                                mmhub_v9_4_ras_fields[i].ded_count_mask) >>
                                mmhub_v9_4_ras_fields[i].ded_count_shift;
                if (ded_cnt) {
-                       DRM_INFO("MMHUB SubBlock %s, DED %d\n",
+                       dev_info(adev->dev, "MMHUB SubBlock %s, DED %d\n",
                                mmhub_v9_4_ras_fields[i].name,
                                ded_cnt);
                        *ded_count += ded_cnt;
@@ -1588,7 +1591,7 @@ static void mmhub_v9_4_query_ras_error_count(struct amdgpu_device *adev,
                reg_value =
                        RREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v9_4_edc_cnt_regs[i]));
                if (reg_value)
-                       mmhub_v9_4_get_ras_error_count(&mmhub_v9_4_edc_cnt_regs[i],
+                       mmhub_v9_4_get_ras_error_count(adev, &mmhub_v9_4_edc_cnt_regs[i],
                                reg_value, &sec_count, &ded_count);
        }
 
index bab587a..f7c5cdc 100644 (file)
@@ -4723,10 +4723,10 @@ amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
 static int
 amdgpu_dm_connector_late_register(struct drm_connector *connector)
 {
+#if defined(CONFIG_DEBUG_FS)
        struct amdgpu_dm_connector *amdgpu_dm_connector =
                to_amdgpu_dm_connector(connector);
 
-#if defined(CONFIG_DEBUG_FS)
        connector_debugfs_init(amdgpu_dm_connector);
 #endif
 
@@ -5929,7 +5929,8 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
                                adev->mode_info.underscan_vborder_property,
                                0);
 
-       drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
+       if (!aconnector->mst_port)
+               drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
 
        /* This defaults to the max in the range, but we want 8bpc for non-edp. */
        aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
@@ -5948,8 +5949,9 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
                        &aconnector->base.base,
                        dm->ddev->mode_config.hdr_output_metadata_property, 0);
 
-               drm_connector_attach_vrr_capable_property(
-                       &aconnector->base);
+               if (!aconnector->mst_port)
+                       drm_connector_attach_vrr_capable_property(&aconnector->base);
+
 #ifdef CONFIG_DRM_AMD_DC_HDCP
                if (adev->dm.hdcp_workqueue)
                        drm_connector_attach_content_protection_property(&aconnector->base, true);
@@ -6272,12 +6274,6 @@ static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
            y <= -amdgpu_crtc->max_cursor_height)
                return 0;
 
-       if (crtc->primary->state) {
-               /* avivo cursor are offset into the total surface */
-               x += crtc->primary->state->src_x >> 16;
-               y += crtc->primary->state->src_y >> 16;
-       }
-
        if (x < 0) {
                xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
                x = 0;
@@ -6287,6 +6283,7 @@ static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
                y = 0;
        }
        position->enable = true;
+       position->translate_by_source = true;
        position->x = x;
        position->y = y;
        position->x_hotspot = xorigin;
index e8208df..fabbe78 100644 (file)
@@ -410,6 +410,14 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
        drm_connector_attach_encoder(&aconnector->base,
                                     &aconnector->mst_encoder->base);
 
+       connector->max_bpc_property = master->base.max_bpc_property;
+       if (connector->max_bpc_property)
+               drm_connector_attach_max_bpc_property(connector, 8, 16);
+
+       connector->vrr_capable_property = master->base.vrr_capable_property;
+       if (connector->vrr_capable_property)
+               drm_connector_attach_vrr_capable_property(connector);
+
        drm_object_attach_property(
                &connector->base,
                dev->mode_config.path_property,
index ab267dd..24c5765 100644 (file)
@@ -643,7 +643,7 @@ static void rn_clk_mgr_helper_populate_bw_params(struct clk_bw_params *bw_params
        /* Find lowest DPM, FCLK is filled in reverse order*/
 
        for (i = PP_SMU_NUM_FCLK_DPM_LEVELS - 1; i >= 0; i--) {
-               if (clock_table->FClocks[i].Freq != 0) {
+               if (clock_table->FClocks[i].Freq != 0 && clock_table->FClocks[i].Vol != 0) {
                        j = i;
                        break;
                }
index f21bbb2..8489f1e 100644 (file)
@@ -283,6 +283,8 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc,
        int i = 0;
        bool ret = false;
 
+       stream->adjust = *adjust;
+
        for (i = 0; i < MAX_PIPES; i++) {
                struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
 
@@ -1859,8 +1861,9 @@ enum surface_update_type dc_check_update_surfaces_for_stream(
                // Else we fallback to mem compare.
                } else if (memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) {
                        dc->optimized_required = true;
-               } else if (dc->wm_optimized_required)
-                       dc->optimized_required = true;
+               }
+
+               dc->optimized_required |= dc->wm_optimized_required;
        }
 
        return type;
@@ -2462,7 +2465,7 @@ void dc_commit_updates_for_stream(struct dc *dc,
        enum surface_update_type update_type;
        struct dc_state *context;
        struct dc_context *dc_ctx = dc->ctx;
-       int i;
+       int i, j;
 
        stream_status = dc_stream_get_status(stream);
        context = dc->current_state;
@@ -2500,6 +2503,17 @@ void dc_commit_updates_for_stream(struct dc *dc,
 
                copy_surface_update_to_plane(surface, &srf_updates[i]);
 
+               if (update_type >= UPDATE_TYPE_MED) {
+                       for (j = 0; j < dc->res_pool->pipe_count; j++) {
+                               struct pipe_ctx *pipe_ctx =
+                                       &context->res_ctx.pipe_ctx[j];
+
+                               if (pipe_ctx->plane_state != surface)
+                                       continue;
+
+                               resource_build_scaling_params(pipe_ctx);
+                       }
+               }
        }
 
        copy_stream_update_to_stream(dc, context, stream, stream_update);
index 25c50bc..a8dc308 100644 (file)
@@ -385,6 +385,8 @@ struct dc_cursor_position {
         */
        bool enable;
 
+       /* Translate cursor x/y by the source rectangle for each plane. */
+       bool translate_by_source;
 };
 
 struct dc_cursor_mi_param {
index 0976e37..c279982 100644 (file)
@@ -2685,6 +2685,23 @@ void dce110_set_cursor_position(struct pipe_ctx *pipe_ctx)
                .mirror = pipe_ctx->plane_state->horizontal_mirror
        };
 
+       /**
+        * If the cursor's source viewport is clipped then we need to
+        * translate the cursor to appear in the correct position on
+        * the screen.
+        *
+        * This translation isn't affected by scaling so it needs to be
+        * done *after* we adjust the position for the scale factor.
+        *
+        * This is only done by opt-in for now since there are still
+        * some usecases like tiled display that might enable the
+        * cursor on both streams while expecting dc to clip it.
+        */
+       if (pos_cpy.translate_by_source) {
+               pos_cpy.x += pipe_ctx->plane_state->src_rect.x;
+               pos_cpy.y += pipe_ctx->plane_state->src_rect.y;
+       }
+
        if (pipe_ctx->plane_state->address.type
                        == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
                pos_cpy.enable = false;
index 0be0100..b035754 100644 (file)
@@ -3021,12 +3021,50 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
        int x_pos = pos_cpy.x;
        int y_pos = pos_cpy.y;
 
-       // translate cursor from stream space to plane space
+       /**
+        * DC cursor is stream space, HW cursor is plane space and drawn
+        * as part of the framebuffer.
+        *
+        * Cursor position can't be negative, but hotspot can be used to
+        * shift cursor out of the plane bounds. Hotspot must be smaller
+        * than the cursor size.
+        */
+
+       /**
+        * Translate cursor from stream space to plane space.
+        *
+        * If the cursor is scaled then we need to scale the position
+        * to be in the approximately correct place. We can't do anything
+        * about the actual size being incorrect, that's a limitation of
+        * the hardware.
+        */
        x_pos = (x_pos - x_plane) * pipe_ctx->plane_state->src_rect.width /
                        pipe_ctx->plane_state->dst_rect.width;
        y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.height /
                        pipe_ctx->plane_state->dst_rect.height;
 
+       /**
+        * If the cursor's source viewport is clipped then we need to
+        * translate the cursor to appear in the correct position on
+        * the screen.
+        *
+        * This translation isn't affected by scaling so it needs to be
+        * done *after* we adjust the position for the scale factor.
+        *
+        * This is only done by opt-in for now since there are still
+        * some usecases like tiled display that might enable the
+        * cursor on both streams while expecting dc to clip it.
+        */
+       if (pos_cpy.translate_by_source) {
+               x_pos += pipe_ctx->plane_state->src_rect.x;
+               y_pos += pipe_ctx->plane_state->src_rect.y;
+       }
+
+       /**
+        * If the position is negative then we need to add to the hotspot
+        * to shift the cursor outside the plane.
+        */
+
        if (x_pos < 0) {
                pos_cpy.x_hotspot -= x_pos;
                x_pos = 0;
index 8b71222..07265ca 100644 (file)
@@ -585,7 +585,7 @@ static const struct dc_debug_options debug_defaults_drv = {
                .disable_pplib_clock_request = false,
                .disable_pplib_wm_range = false,
                .pplib_wm_report_mode = WM_REPORT_DEFAULT,
-               .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
+               .pipe_split_policy = MPC_SPLIT_DYNAMIC,
                .force_single_disp_pipe_split = true,
                .disable_dcc = DCC_ENABLE,
                .voltage_align_fclk = true,
index 2333182..22f421e 100644 (file)
@@ -1373,6 +1373,7 @@ static void dcn20_update_dchubp_dpp(
        }
 
        if (pipe_ctx->update_flags.bits.viewport ||
+                       (context == dc->current_state && plane_state->update_flags.bits.position_change) ||
                        (context == dc->current_state && plane_state->update_flags.bits.scaling_change) ||
                        (context == dc->current_state && pipe_ctx->stream->update_flags.bits.scaling)) {
 
index 8a87d0e..2359e88 100644 (file)
 #define RAVEN2_A0 0x81
 #define RAVEN1_F0 0xF0
 #define RAVEN_UNKNOWN 0xFF
+#define RENOIR_A0 0x91
 #ifndef ASICREV_IS_RAVEN
 #define ASICREV_IS_RAVEN(eChipRev) ((eChipRev >= RAVEN_A0) && eChipRev < RAVEN_UNKNOWN)
 #endif
@@ -171,8 +172,6 @@ enum {
 #define ASICREV_IS_NAVI10_P(eChipRev)        (eChipRev < NV_NAVI12_P_A0)
 #define ASICREV_IS_NAVI12_P(eChipRev)        ((eChipRev >= NV_NAVI12_P_A0) && (eChipRev < NV_NAVI14_M_A0))
 #define ASICREV_IS_NAVI14_M(eChipRev)        ((eChipRev >= NV_NAVI14_M_A0) && (eChipRev < NV_UNKNOWN))
-#define RENOIR_A0 0x91
-#define DEVICE_ID_RENOIR_1636 0x1636   // Renoir
 #define ASICREV_IS_RENOIR(eChipRev) ((eChipRev >= RENOIR_A0) && (eChipRev < RAVEN1_F0))
 
 /*
@@ -183,6 +182,9 @@ enum {
 #define DEVICE_ID_TEMASH_9839 0x9839
 #define DEVICE_ID_TEMASH_983D 0x983D
 
+/* RENOIR */
+#define DEVICE_ID_RENOIR_1636 0x1636
+
 /* Asic Family IDs for different asic family. */
 #define FAMILY_CI 120 /* Sea Islands: Hawaii (P), Bonaire (M) */
 #define FAMILY_KV 125 /* Fusion => Kaveri: Spectre, Spooky; Kabini: Kalindi */
index c195575..2a12614 100644 (file)
@@ -1452,7 +1452,8 @@ static int pp_get_asic_baco_state(void *handle, int *state)
        if (!hwmgr)
                return -EINVAL;
 
-       if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_asic_baco_state)
+       if (!(hwmgr->not_vf && amdgpu_dpm) ||
+               !hwmgr->hwmgr_func->get_asic_baco_state)
                return 0;
 
        mutex_lock(&hwmgr->smu_lock);
index 5db8c56..1ef0923 100644 (file)
@@ -794,8 +794,21 @@ static int arcturus_force_clk_levels(struct smu_context *smu,
        struct arcturus_dpm_table *dpm_table;
        struct arcturus_single_dpm_table *single_dpm_table;
        uint32_t soft_min_level, soft_max_level;
+       uint32_t smu_version;
        int ret = 0;
 
+       ret = smu_get_smc_version(smu, NULL, &smu_version);
+       if (ret) {
+               pr_err("Failed to get smu version!\n");
+               return ret;
+       }
+
+       if (smu_version >= 0x361200) {
+               pr_err("Forcing clock level is not supported with "
+                      "54.18 and onwards SMU firmwares\n");
+               return -EOPNOTSUPP;
+       }
+
        soft_min_level = mask ? (ffs(mask) - 1) : 0;
        soft_max_level = mask ? (fls(mask) - 1) : 0;
 
@@ -1512,6 +1525,38 @@ static int arcturus_set_power_profile_mode(struct smu_context *smu,
        return 0;
 }
 
+static int arcturus_set_performance_level(struct smu_context *smu,
+                                         enum amd_dpm_forced_level level)
+{
+       uint32_t smu_version;
+       int ret;
+
+       ret = smu_get_smc_version(smu, NULL, &smu_version);
+       if (ret) {
+               pr_err("Failed to get smu version!\n");
+               return ret;
+       }
+
+       switch (level) {
+       case AMD_DPM_FORCED_LEVEL_HIGH:
+       case AMD_DPM_FORCED_LEVEL_LOW:
+       case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
+       case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
+       case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
+       case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
+               if (smu_version >= 0x361200) {
+                       pr_err("Forcing clock level is not supported with "
+                              "54.18 and onwards SMU firmwares\n");
+                       return -EOPNOTSUPP;
+               }
+               break;
+       default:
+               break;
+       }
+
+       return smu_v11_0_set_performance_level(smu, level);
+}
+
 static void arcturus_dump_pptable(struct smu_context *smu)
 {
        struct smu_table_context *table_context = &smu->smu_table;
@@ -2285,7 +2330,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
        .get_profiling_clk_mask = arcturus_get_profiling_clk_mask,
        .get_power_profile_mode = arcturus_get_power_profile_mode,
        .set_power_profile_mode = arcturus_set_power_profile_mode,
-       .set_performance_level = smu_v11_0_set_performance_level,
+       .set_performance_level = arcturus_set_performance_level,
        /* debug (internal used) */
        .dump_pptable = arcturus_dump_pptable,
        .get_power_limit = arcturus_get_power_limit,
index 7bf52ec..ff73a73 100644 (file)
@@ -239,6 +239,7 @@ static int renoir_print_clk_levels(struct smu_context *smu,
        uint32_t cur_value = 0, value = 0, count = 0, min = 0, max = 0;
        DpmClocks_t *clk_table = smu->smu_table.clocks_table;
        SmuMetrics_t metrics;
+       bool cur_value_match_level = false;
 
        if (!clk_table || clk_type >= SMU_CLK_COUNT)
                return -EINVAL;
@@ -297,8 +298,13 @@ static int renoir_print_clk_levels(struct smu_context *smu,
                GET_DPM_CUR_FREQ(clk_table, clk_type, i, value);
                size += sprintf(buf + size, "%d: %uMhz %s\n", i, value,
                                cur_value == value ? "*" : "");
+               if (cur_value == value)
+                       cur_value_match_level = true;
        }
 
+       if (!cur_value_match_level)
+               size += sprintf(buf + size, "   %uMhz *\n", cur_value);
+
        return size;
 }
 
@@ -887,6 +893,17 @@ static int renoir_read_sensor(struct smu_context *smu,
        return ret;
 }
 
+static bool renoir_is_dpm_running(struct smu_context *smu)
+{
+       /*
+        * Util now, the pmfw hasn't exported the interface of SMU
+        * feature mask to APU SKU so just force on all the feature
+        * at early initial stage.
+        */
+       return true;
+
+}
+
 static const struct pptable_funcs renoir_ppt_funcs = {
        .get_smu_msg_index = renoir_get_smu_msg_index,
        .get_smu_clk_index = renoir_get_smu_clk_index,
@@ -927,6 +944,7 @@ static const struct pptable_funcs renoir_ppt_funcs = {
        .mode2_reset = smu_v12_0_mode2_reset,
        .set_soft_freq_limited_range = smu_v12_0_set_soft_freq_limited_range,
        .set_driver_table_location = smu_v12_0_set_driver_table_location,
+       .is_dpm_running = renoir_is_dpm_running,
 };
 
 void renoir_set_ppt_funcs(struct smu_context *smu)
index 2a390dd..89cd6da 100644 (file)
@@ -37,7 +37,7 @@ extern void renoir_set_ppt_funcs(struct smu_context *smu);
                        freq = table->SocClocks[dpm_level].Freq;        \
                        break;                                          \
                case SMU_MCLK:                                          \
-                       freq = table->MemClocks[dpm_level].Freq;        \
+                       freq = table->FClocks[dpm_level].Freq;  \
                        break;                                          \
                case SMU_DCEFCLK:                                       \
                        freq = table->DcfClocks[dpm_level].Freq;        \