2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
29 #include "dm_services_types.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/inc/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
40 #include "amdgpu_display.h"
41 #include "amdgpu_ucode.h"
43 #include "amdgpu_dm.h"
44 #ifdef CONFIG_DRM_AMD_DC_HDCP
45 #include "amdgpu_dm_hdcp.h"
46 #include <drm/drm_hdcp.h>
48 #include "amdgpu_pm.h"
50 #include "amd_shared.h"
51 #include "amdgpu_dm_irq.h"
52 #include "dm_helpers.h"
53 #include "amdgpu_dm_mst_types.h"
54 #if defined(CONFIG_DEBUG_FS)
55 #include "amdgpu_dm_debugfs.h"
58 #include "ivsrcid/ivsrcid_vislands30.h"
60 #include <linux/module.h>
61 #include <linux/moduleparam.h>
62 #include <linux/version.h>
63 #include <linux/types.h>
64 #include <linux/pm_runtime.h>
65 #include <linux/pci.h>
66 #include <linux/firmware.h>
67 #include <linux/component.h>
69 #include <drm/drm_atomic.h>
70 #include <drm/drm_atomic_uapi.h>
71 #include <drm/drm_atomic_helper.h>
72 #include <drm/drm_dp_mst_helper.h>
73 #include <drm/drm_fb_helper.h>
74 #include <drm/drm_fourcc.h>
75 #include <drm/drm_edid.h>
76 #include <drm/drm_vblank.h>
77 #include <drm/drm_audio_component.h>
78 #include <drm/drm_hdcp.h>
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
88 #include "soc15_common.h"
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
98 #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
99 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
101 #define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
102 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
104 /* Number of bytes in PSP header for firmware. */
105 #define PSP_HEADER_BYTES 0x100
107 /* Number of bytes in PSP footer for firmware. */
108 #define PSP_FOOTER_BYTES 0x100
113 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
114 * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
115 * requests into DC requests, and DC responses into DRM responses.
117 * The root control structure is &struct amdgpu_display_manager.
120 /* basic init/fini API */
121 static int amdgpu_dm_init(struct amdgpu_device *adev);
122 static void amdgpu_dm_fini(struct amdgpu_device *adev);
125 * initializes drm_device display related structures, based on the information
126 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
127 * drm_encoder, drm_mode_config
129 * Returns 0 on success
131 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
132 /* removes and deallocates the drm structures, created by the above function */
133 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
135 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
136 struct drm_plane *plane,
137 unsigned long possible_crtcs,
138 const struct dc_plane_cap *plane_cap);
139 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
140 struct drm_plane *plane,
141 uint32_t link_index);
142 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
143 struct amdgpu_dm_connector *amdgpu_dm_connector,
145 struct amdgpu_encoder *amdgpu_encoder);
146 static int amdgpu_dm_encoder_init(struct drm_device *dev,
147 struct amdgpu_encoder *aencoder,
148 uint32_t link_index);
150 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
152 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
153 struct drm_atomic_state *state,
156 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
158 static int amdgpu_dm_atomic_check(struct drm_device *dev,
159 struct drm_atomic_state *state);
161 static void handle_cursor_update(struct drm_plane *plane,
162 struct drm_plane_state *old_plane_state);
164 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
165 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
166 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
167 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
171 * dm_vblank_get_counter
174 * Get counter for number of vertical blanks
177 * struct amdgpu_device *adev - [in] desired amdgpu device
178 * int disp_idx - [in] which CRTC to get the counter from
181 * Counter for vertical blanks
183 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
185 if (crtc >= adev->mode_info.num_crtc)
188 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
189 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
193 if (acrtc_state->stream == NULL) {
194 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
199 return dc_stream_get_vblank_counter(acrtc_state->stream);
203 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
204 u32 *vbl, u32 *position)
206 uint32_t v_blank_start, v_blank_end, h_position, v_position;
208 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
211 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
212 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
215 if (acrtc_state->stream == NULL) {
216 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
222 * TODO rework base driver to use values directly.
223 * for now parse it back into reg-format
225 dc_stream_get_scanoutpos(acrtc_state->stream,
231 *position = v_position | (h_position << 16);
232 *vbl = v_blank_start | (v_blank_end << 16);
238 static bool dm_is_idle(void *handle)
244 static int dm_wait_for_idle(void *handle)
250 static bool dm_check_soft_reset(void *handle)
255 static int dm_soft_reset(void *handle)
261 static struct amdgpu_crtc *
262 get_crtc_by_otg_inst(struct amdgpu_device *adev,
265 struct drm_device *dev = adev->ddev;
266 struct drm_crtc *crtc;
267 struct amdgpu_crtc *amdgpu_crtc;
269 if (otg_inst == -1) {
271 return adev->mode_info.crtcs[0];
274 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
275 amdgpu_crtc = to_amdgpu_crtc(crtc);
277 if (amdgpu_crtc->otg_inst == otg_inst)
284 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
286 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
287 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
291 * dm_pflip_high_irq() - Handle pageflip interrupt
292 * @interrupt_params: ignored
294 * Handles the pageflip interrupt by notifying all interested parties
295 * that the pageflip has been completed.
297 static void dm_pflip_high_irq(void *interrupt_params)
299 struct amdgpu_crtc *amdgpu_crtc;
300 struct common_irq_params *irq_params = interrupt_params;
301 struct amdgpu_device *adev = irq_params->adev;
303 struct drm_pending_vblank_event *e;
304 struct dm_crtc_state *acrtc_state;
305 uint32_t vpos, hpos, v_blank_start, v_blank_end;
308 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
310 /* IRQ could occur when in initial stage */
311 /* TODO work and BO cleanup */
312 if (amdgpu_crtc == NULL) {
313 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
317 spin_lock_irqsave(&adev->ddev->event_lock, flags);
319 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
320 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
321 amdgpu_crtc->pflip_status,
322 AMDGPU_FLIP_SUBMITTED,
323 amdgpu_crtc->crtc_id,
325 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
329 /* page flip completed. */
330 e = amdgpu_crtc->event;
331 amdgpu_crtc->event = NULL;
336 acrtc_state = to_dm_crtc_state(amdgpu_crtc->base.state);
337 vrr_active = amdgpu_dm_vrr_active(acrtc_state);
339 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
341 !dc_stream_get_scanoutpos(acrtc_state->stream, &v_blank_start,
342 &v_blank_end, &hpos, &vpos) ||
343 (vpos < v_blank_start)) {
344 /* Update to correct count and vblank timestamp if racing with
345 * vblank irq. This also updates to the correct vblank timestamp
346 * even in VRR mode, as scanout is past the front-porch atm.
348 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
350 /* Wake up userspace by sending the pageflip event with proper
351 * count and timestamp of vblank of flip completion.
354 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
356 /* Event sent, so done with vblank for this flip */
357 drm_crtc_vblank_put(&amdgpu_crtc->base);
360 /* VRR active and inside front-porch: vblank count and
361 * timestamp for pageflip event will only be up to date after
362 * drm_crtc_handle_vblank() has been executed from late vblank
363 * irq handler after start of back-porch (vline 0). We queue the
364 * pageflip event for send-out by drm_crtc_handle_vblank() with
365 * updated timestamp and count, once it runs after us.
367 * We need to open-code this instead of using the helper
368 * drm_crtc_arm_vblank_event(), as that helper would
369 * call drm_crtc_accurate_vblank_count(), which we must
370 * not call in VRR mode while we are in front-porch!
373 /* sequence will be replaced by real count during send-out. */
374 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
375 e->pipe = amdgpu_crtc->crtc_id;
377 list_add_tail(&e->base.link, &adev->ddev->vblank_event_list);
381 /* Keep track of vblank of this flip for flip throttling. We use the
382 * cooked hw counter, as that one incremented at start of this vblank
383 * of pageflip completion, so last_flip_vblank is the forbidden count
384 * for queueing new pageflips if vsync + VRR is enabled.
386 amdgpu_crtc->last_flip_vblank =
387 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
389 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
390 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
392 DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
393 amdgpu_crtc->crtc_id, amdgpu_crtc,
394 vrr_active, (int) !e);
397 static void dm_vupdate_high_irq(void *interrupt_params)
399 struct common_irq_params *irq_params = interrupt_params;
400 struct amdgpu_device *adev = irq_params->adev;
401 struct amdgpu_crtc *acrtc;
402 struct dm_crtc_state *acrtc_state;
405 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
408 acrtc_state = to_dm_crtc_state(acrtc->base.state);
410 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
412 amdgpu_dm_vrr_active(acrtc_state));
414 /* Core vblank handling is done here after end of front-porch in
415 * vrr mode, as vblank timestamping will give valid results
416 * while now done after front-porch. This will also deliver
417 * page-flip completion events that have been queued to us
418 * if a pageflip happened inside front-porch.
420 if (amdgpu_dm_vrr_active(acrtc_state)) {
421 drm_crtc_handle_vblank(&acrtc->base);
423 /* BTR processing for pre-DCE12 ASICs */
424 if (acrtc_state->stream &&
425 adev->family < AMDGPU_FAMILY_AI) {
426 spin_lock_irqsave(&adev->ddev->event_lock, flags);
427 mod_freesync_handle_v_update(
428 adev->dm.freesync_module,
430 &acrtc_state->vrr_params);
432 dc_stream_adjust_vmin_vmax(
435 &acrtc_state->vrr_params.adjust);
436 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
443 * dm_crtc_high_irq() - Handles CRTC interrupt
444 * @interrupt_params: ignored
446 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
449 static void dm_crtc_high_irq(void *interrupt_params)
451 struct common_irq_params *irq_params = interrupt_params;
452 struct amdgpu_device *adev = irq_params->adev;
453 struct amdgpu_crtc *acrtc;
454 struct dm_crtc_state *acrtc_state;
457 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
460 acrtc_state = to_dm_crtc_state(acrtc->base.state);
462 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
464 amdgpu_dm_vrr_active(acrtc_state));
466 /* Core vblank handling at start of front-porch is only possible
467 * in non-vrr mode, as only there vblank timestamping will give
468 * valid results while done in front-porch. Otherwise defer it
469 * to dm_vupdate_high_irq after end of front-porch.
471 if (!amdgpu_dm_vrr_active(acrtc_state))
472 drm_crtc_handle_vblank(&acrtc->base);
474 /* Following stuff must happen at start of vblank, for crc
475 * computation and below-the-range btr support in vrr mode.
477 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
479 if (acrtc_state->stream && adev->family >= AMDGPU_FAMILY_AI &&
480 acrtc_state->vrr_params.supported &&
481 acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
482 spin_lock_irqsave(&adev->ddev->event_lock, flags);
483 mod_freesync_handle_v_update(
484 adev->dm.freesync_module,
486 &acrtc_state->vrr_params);
488 dc_stream_adjust_vmin_vmax(
491 &acrtc_state->vrr_params.adjust);
492 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
497 #if defined(CONFIG_DRM_AMD_DC_DCN)
499 * dm_dcn_crtc_high_irq() - Handles VStartup interrupt for DCN generation ASICs
500 * @interrupt params - interrupt parameters
502 * Notify DRM's vblank event handler at VSTARTUP
504 * Unlike DCE hardware, we trigger the handler at VSTARTUP. at which:
505 * * We are close enough to VUPDATE - the point of no return for hw
506 * * We are in the fixed portion of variable front porch when vrr is enabled
507 * * We are before VUPDATE, where double-buffered vrr registers are swapped
509 * It is therefore the correct place to signal vblank, send user flip events,
512 static void dm_dcn_crtc_high_irq(void *interrupt_params)
514 struct common_irq_params *irq_params = interrupt_params;
515 struct amdgpu_device *adev = irq_params->adev;
516 struct amdgpu_crtc *acrtc;
517 struct dm_crtc_state *acrtc_state;
520 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
525 acrtc_state = to_dm_crtc_state(acrtc->base.state);
527 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
528 amdgpu_dm_vrr_active(acrtc_state),
529 acrtc_state->active_planes);
531 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
532 drm_crtc_handle_vblank(&acrtc->base);
534 spin_lock_irqsave(&adev->ddev->event_lock, flags);
536 if (acrtc_state->vrr_params.supported &&
537 acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
538 mod_freesync_handle_v_update(
539 adev->dm.freesync_module,
541 &acrtc_state->vrr_params);
543 dc_stream_adjust_vmin_vmax(
546 &acrtc_state->vrr_params.adjust);
550 * If there aren't any active_planes then DCH HUBP may be clock-gated.
551 * In that case, pageflip completion interrupts won't fire and pageflip
552 * completion events won't get delivered. Prevent this by sending
553 * pending pageflip events from here if a flip is still pending.
555 * If any planes are enabled, use dm_pflip_high_irq() instead, to
556 * avoid race conditions between flip programming and completion,
557 * which could cause too early flip completion events.
559 if (acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
560 acrtc_state->active_planes == 0) {
562 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
564 drm_crtc_vblank_put(&acrtc->base);
566 acrtc->pflip_status = AMDGPU_FLIP_NONE;
569 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
573 static int dm_set_clockgating_state(void *handle,
574 enum amd_clockgating_state state)
579 static int dm_set_powergating_state(void *handle,
580 enum amd_powergating_state state)
585 /* Prototypes of private functions */
586 static int dm_early_init(void* handle);
588 /* Allocate memory for FBC compressed data */
589 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
591 struct drm_device *dev = connector->dev;
592 struct amdgpu_device *adev = dev->dev_private;
593 struct dm_comressor_info *compressor = &adev->dm.compressor;
594 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
595 struct drm_display_mode *mode;
596 unsigned long max_size = 0;
598 if (adev->dm.dc->fbc_compressor == NULL)
601 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
604 if (compressor->bo_ptr)
608 list_for_each_entry(mode, &connector->modes, head) {
609 if (max_size < mode->htotal * mode->vtotal)
610 max_size = mode->htotal * mode->vtotal;
614 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
615 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
616 &compressor->gpu_addr, &compressor->cpu_addr);
619 DRM_ERROR("DM: Failed to initialize FBC\n");
621 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
622 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
629 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
630 int pipe, bool *enabled,
631 unsigned char *buf, int max_bytes)
633 struct drm_device *dev = dev_get_drvdata(kdev);
634 struct amdgpu_device *adev = dev->dev_private;
635 struct drm_connector *connector;
636 struct drm_connector_list_iter conn_iter;
637 struct amdgpu_dm_connector *aconnector;
642 mutex_lock(&adev->dm.audio_lock);
644 drm_connector_list_iter_begin(dev, &conn_iter);
645 drm_for_each_connector_iter(connector, &conn_iter) {
646 aconnector = to_amdgpu_dm_connector(connector);
647 if (aconnector->audio_inst != port)
651 ret = drm_eld_size(connector->eld);
652 memcpy(buf, connector->eld, min(max_bytes, ret));
656 drm_connector_list_iter_end(&conn_iter);
658 mutex_unlock(&adev->dm.audio_lock);
660 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
665 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
666 .get_eld = amdgpu_dm_audio_component_get_eld,
669 static int amdgpu_dm_audio_component_bind(struct device *kdev,
670 struct device *hda_kdev, void *data)
672 struct drm_device *dev = dev_get_drvdata(kdev);
673 struct amdgpu_device *adev = dev->dev_private;
674 struct drm_audio_component *acomp = data;
676 acomp->ops = &amdgpu_dm_audio_component_ops;
678 adev->dm.audio_component = acomp;
683 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
684 struct device *hda_kdev, void *data)
686 struct drm_device *dev = dev_get_drvdata(kdev);
687 struct amdgpu_device *adev = dev->dev_private;
688 struct drm_audio_component *acomp = data;
692 adev->dm.audio_component = NULL;
695 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
696 .bind = amdgpu_dm_audio_component_bind,
697 .unbind = amdgpu_dm_audio_component_unbind,
700 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
707 adev->mode_info.audio.enabled = true;
709 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
711 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
712 adev->mode_info.audio.pin[i].channels = -1;
713 adev->mode_info.audio.pin[i].rate = -1;
714 adev->mode_info.audio.pin[i].bits_per_sample = -1;
715 adev->mode_info.audio.pin[i].status_bits = 0;
716 adev->mode_info.audio.pin[i].category_code = 0;
717 adev->mode_info.audio.pin[i].connected = false;
718 adev->mode_info.audio.pin[i].id =
719 adev->dm.dc->res_pool->audios[i]->inst;
720 adev->mode_info.audio.pin[i].offset = 0;
723 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
727 adev->dm.audio_registered = true;
732 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
737 if (!adev->mode_info.audio.enabled)
740 if (adev->dm.audio_registered) {
741 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
742 adev->dm.audio_registered = false;
745 /* TODO: Disable audio? */
747 adev->mode_info.audio.enabled = false;
750 void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
752 struct drm_audio_component *acomp = adev->dm.audio_component;
754 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
755 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
757 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
762 static int dm_dmub_hw_init(struct amdgpu_device *adev)
764 const struct dmcub_firmware_header_v1_0 *hdr;
765 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
766 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
767 const struct firmware *dmub_fw = adev->dm.dmub_fw;
768 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
769 struct abm *abm = adev->dm.dc->res_pool->abm;
770 struct dmub_srv_hw_params hw_params;
771 enum dmub_status status;
772 const unsigned char *fw_inst_const, *fw_bss_data;
773 uint32_t i, fw_inst_const_size, fw_bss_data_size;
777 /* DMUB isn't supported on the ASIC. */
781 DRM_ERROR("No framebuffer info for DMUB service.\n");
786 /* Firmware required for DMUB support. */
787 DRM_ERROR("No firmware provided for DMUB.\n");
791 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
792 if (status != DMUB_STATUS_OK) {
793 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
797 if (!has_hw_support) {
798 DRM_INFO("DMUB unsupported on ASIC\n");
802 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
804 fw_inst_const = dmub_fw->data +
805 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
808 fw_bss_data = dmub_fw->data +
809 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
810 le32_to_cpu(hdr->inst_const_bytes);
812 /* Copy firmware and bios info into FB memory. */
813 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
814 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
816 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
818 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
819 * amdgpu_ucode_init_single_fw will load dmub firmware
820 * fw_inst_const part to cw0; otherwise, the firmware back door load
821 * will be done by dm_dmub_hw_init
823 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
824 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
828 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr, fw_bss_data,
831 /* Copy firmware bios info into FB memory. */
832 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
835 /* Reset regions that need to be reset. */
836 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
837 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
839 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
840 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
842 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
843 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
845 /* Initialize hardware. */
846 memset(&hw_params, 0, sizeof(hw_params));
847 hw_params.fb_base = adev->gmc.fb_start;
848 hw_params.fb_offset = adev->gmc.aper_base;
850 /* backdoor load firmware and trigger dmub running */
851 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
852 hw_params.load_inst_const = true;
855 hw_params.psp_version = dmcu->psp_version;
857 for (i = 0; i < fb_info->num_fb; ++i)
858 hw_params.fb[i] = &fb_info->fb[i];
860 status = dmub_srv_hw_init(dmub_srv, &hw_params);
861 if (status != DMUB_STATUS_OK) {
862 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
866 /* Wait for firmware load to finish. */
867 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
868 if (status != DMUB_STATUS_OK)
869 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
871 /* Init DMCU and ABM if available. */
873 dmcu->funcs->dmcu_init(dmcu);
874 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
877 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
878 if (!adev->dm.dc->ctx->dmub_srv) {
879 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
883 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
884 adev->dm.dmcub_fw_version);
889 static int amdgpu_dm_init(struct amdgpu_device *adev)
891 struct dc_init_data init_data;
892 #ifdef CONFIG_DRM_AMD_DC_HDCP
893 struct dc_callback_init init_params;
897 adev->dm.ddev = adev->ddev;
898 adev->dm.adev = adev;
900 /* Zero all the fields */
901 memset(&init_data, 0, sizeof(init_data));
902 #ifdef CONFIG_DRM_AMD_DC_HDCP
903 memset(&init_params, 0, sizeof(init_params));
906 mutex_init(&adev->dm.dc_lock);
907 mutex_init(&adev->dm.audio_lock);
909 if(amdgpu_dm_irq_init(adev)) {
910 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
914 init_data.asic_id.chip_family = adev->family;
916 init_data.asic_id.pci_revision_id = adev->pdev->revision;
917 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
919 init_data.asic_id.vram_width = adev->gmc.vram_width;
920 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
921 init_data.asic_id.atombios_base_address =
922 adev->mode_info.atom_context->bios;
924 init_data.driver = adev;
926 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
928 if (!adev->dm.cgs_device) {
929 DRM_ERROR("amdgpu: failed to create cgs device.\n");
933 init_data.cgs_device = adev->dm.cgs_device;
935 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
937 switch (adev->asic_type) {
942 init_data.flags.gpu_vm_support = true;
948 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
949 init_data.flags.fbc_support = true;
951 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
952 init_data.flags.multi_mon_pp_mclk_switch = true;
954 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
955 init_data.flags.disable_fractional_pwm = true;
957 init_data.flags.power_down_display_on_boot = true;
959 init_data.soc_bounding_box = adev->dm.soc_bounding_box;
961 /* Display Core create. */
962 adev->dm.dc = dc_create(&init_data);
965 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
967 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
971 r = dm_dmub_hw_init(adev);
973 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
977 dc_hardware_init(adev->dm.dc);
979 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
980 if (!adev->dm.freesync_module) {
982 "amdgpu: failed to initialize freesync_module.\n");
984 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
985 adev->dm.freesync_module);
987 amdgpu_dm_init_color_mod();
989 #ifdef CONFIG_DRM_AMD_DC_HDCP
990 if (adev->asic_type >= CHIP_RAVEN) {
991 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
993 if (!adev->dm.hdcp_workqueue)
994 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
996 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
998 dc_init_callbacks(adev->dm.dc, &init_params);
1001 if (amdgpu_dm_initialize_drm_device(adev)) {
1003 "amdgpu: failed to initialize sw for display support.\n");
1007 /* Update the actual used number of crtc */
1008 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
1010 /* TODO: Add_display_info? */
1012 /* TODO use dynamic cursor width */
1013 adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1014 adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1016 if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
1018 "amdgpu: failed to initialize sw for display support.\n");
1022 DRM_DEBUG_DRIVER("KMS initialized.\n");
1026 amdgpu_dm_fini(adev);
1031 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1033 amdgpu_dm_audio_fini(adev);
1035 amdgpu_dm_destroy_drm_device(&adev->dm);
1037 #ifdef CONFIG_DRM_AMD_DC_HDCP
1038 if (adev->dm.hdcp_workqueue) {
1039 hdcp_destroy(adev->dm.hdcp_workqueue);
1040 adev->dm.hdcp_workqueue = NULL;
1044 dc_deinit_callbacks(adev->dm.dc);
1046 if (adev->dm.dc->ctx->dmub_srv) {
1047 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1048 adev->dm.dc->ctx->dmub_srv = NULL;
1051 if (adev->dm.dmub_bo)
1052 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1053 &adev->dm.dmub_bo_gpu_addr,
1054 &adev->dm.dmub_bo_cpu_addr);
1056 /* DC Destroy TODO: Replace destroy DAL */
1058 dc_destroy(&adev->dm.dc);
1060 * TODO: pageflip, vlank interrupt
1062 * amdgpu_dm_irq_fini(adev);
1065 if (adev->dm.cgs_device) {
1066 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1067 adev->dm.cgs_device = NULL;
1069 if (adev->dm.freesync_module) {
1070 mod_freesync_destroy(adev->dm.freesync_module);
1071 adev->dm.freesync_module = NULL;
1074 mutex_destroy(&adev->dm.audio_lock);
1075 mutex_destroy(&adev->dm.dc_lock);
1080 static int load_dmcu_fw(struct amdgpu_device *adev)
1082 const char *fw_name_dmcu = NULL;
1084 const struct dmcu_firmware_header_v1_0 *hdr;
1086 switch(adev->asic_type) {
1096 case CHIP_POLARIS11:
1097 case CHIP_POLARIS10:
1098 case CHIP_POLARIS12:
1108 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1111 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1112 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1113 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1114 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1119 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1123 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1124 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1128 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1130 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1131 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1132 adev->dm.fw_dmcu = NULL;
1136 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1141 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1143 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1145 release_firmware(adev->dm.fw_dmcu);
1146 adev->dm.fw_dmcu = NULL;
1150 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1151 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1152 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1153 adev->firmware.fw_size +=
1154 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1156 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1157 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1158 adev->firmware.fw_size +=
1159 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1161 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1163 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1168 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1170 struct amdgpu_device *adev = ctx;
1172 return dm_read_reg(adev->dm.dc->ctx, address);
1175 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1178 struct amdgpu_device *adev = ctx;
1180 return dm_write_reg(adev->dm.dc->ctx, address, value);
1183 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1185 struct dmub_srv_create_params create_params;
1186 struct dmub_srv_region_params region_params;
1187 struct dmub_srv_region_info region_info;
1188 struct dmub_srv_fb_params fb_params;
1189 struct dmub_srv_fb_info *fb_info;
1190 struct dmub_srv *dmub_srv;
1191 const struct dmcub_firmware_header_v1_0 *hdr;
1192 const char *fw_name_dmub;
1193 enum dmub_asic dmub_asic;
1194 enum dmub_status status;
1197 switch (adev->asic_type) {
1199 dmub_asic = DMUB_ASIC_DCN21;
1200 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1204 /* ASIC doesn't support DMUB. */
1208 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1210 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1214 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1216 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1220 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1222 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1223 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1224 AMDGPU_UCODE_ID_DMCUB;
1225 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1227 adev->firmware.fw_size +=
1228 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1230 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1231 adev->dm.dmcub_fw_version);
1234 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1236 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1237 dmub_srv = adev->dm.dmub_srv;
1240 DRM_ERROR("Failed to allocate DMUB service!\n");
1244 memset(&create_params, 0, sizeof(create_params));
1245 create_params.user_ctx = adev;
1246 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1247 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1248 create_params.asic = dmub_asic;
1250 /* Create the DMUB service. */
1251 status = dmub_srv_create(dmub_srv, &create_params);
1252 if (status != DMUB_STATUS_OK) {
1253 DRM_ERROR("Error creating DMUB service: %d\n", status);
1257 /* Calculate the size of all the regions for the DMUB service. */
1258 memset(®ion_params, 0, sizeof(region_params));
1260 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1261 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1262 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1263 region_params.vbios_size = adev->bios_size;
1264 region_params.fw_bss_data =
1265 adev->dm.dmub_fw->data +
1266 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1267 le32_to_cpu(hdr->inst_const_bytes);
1269 status = dmub_srv_calc_region_info(dmub_srv, ®ion_params,
1272 if (status != DMUB_STATUS_OK) {
1273 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1278 * Allocate a framebuffer based on the total size of all the regions.
1279 * TODO: Move this into GART.
1281 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1282 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1283 &adev->dm.dmub_bo_gpu_addr,
1284 &adev->dm.dmub_bo_cpu_addr);
1288 /* Rebase the regions on the framebuffer address. */
1289 memset(&fb_params, 0, sizeof(fb_params));
1290 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1291 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1292 fb_params.region_info = ®ion_info;
1294 adev->dm.dmub_fb_info =
1295 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1296 fb_info = adev->dm.dmub_fb_info;
1300 "Failed to allocate framebuffer info for DMUB service!\n");
1304 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1305 if (status != DMUB_STATUS_OK) {
1306 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1313 static int dm_sw_init(void *handle)
1315 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1318 r = dm_dmub_sw_init(adev);
1322 return load_dmcu_fw(adev);
1325 static int dm_sw_fini(void *handle)
1327 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1329 kfree(adev->dm.dmub_fb_info);
1330 adev->dm.dmub_fb_info = NULL;
1332 if (adev->dm.dmub_srv) {
1333 dmub_srv_destroy(adev->dm.dmub_srv);
1334 adev->dm.dmub_srv = NULL;
1337 if (adev->dm.dmub_fw) {
1338 release_firmware(adev->dm.dmub_fw);
1339 adev->dm.dmub_fw = NULL;
1342 if(adev->dm.fw_dmcu) {
1343 release_firmware(adev->dm.fw_dmcu);
1344 adev->dm.fw_dmcu = NULL;
1350 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1352 struct amdgpu_dm_connector *aconnector;
1353 struct drm_connector *connector;
1354 struct drm_connector_list_iter iter;
1357 drm_connector_list_iter_begin(dev, &iter);
1358 drm_for_each_connector_iter(connector, &iter) {
1359 aconnector = to_amdgpu_dm_connector(connector);
1360 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1361 aconnector->mst_mgr.aux) {
1362 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1364 aconnector->base.base.id);
1366 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1368 DRM_ERROR("DM_MST: Failed to start MST\n");
1369 aconnector->dc_link->type =
1370 dc_connection_single;
1375 drm_connector_list_iter_end(&iter);
1380 static int dm_late_init(void *handle)
1382 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1384 struct dmcu_iram_parameters params;
1385 unsigned int linear_lut[16];
1387 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1390 for (i = 0; i < 16; i++)
1391 linear_lut[i] = 0xFFFF * i / 15;
1394 params.backlight_ramping_start = 0xCCCC;
1395 params.backlight_ramping_reduction = 0xCCCCCCCC;
1396 params.backlight_lut_array_size = 16;
1397 params.backlight_lut_array = linear_lut;
1399 /* Min backlight level after ABM reduction, Don't allow below 1%
1400 * 0xFFFF x 0.01 = 0x28F
1402 params.min_abm_backlight = 0x28F;
1404 /* todo will enable for navi10 */
1405 if (adev->asic_type <= CHIP_RAVEN) {
1406 ret = dmcu_load_iram(dmcu, params);
1412 return detect_mst_link_for_all_connectors(adev->ddev);
1415 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1417 struct amdgpu_dm_connector *aconnector;
1418 struct drm_connector *connector;
1419 struct drm_connector_list_iter iter;
1420 struct drm_dp_mst_topology_mgr *mgr;
1422 bool need_hotplug = false;
1424 drm_connector_list_iter_begin(dev, &iter);
1425 drm_for_each_connector_iter(connector, &iter) {
1426 aconnector = to_amdgpu_dm_connector(connector);
1427 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1428 aconnector->mst_port)
1431 mgr = &aconnector->mst_mgr;
1434 drm_dp_mst_topology_mgr_suspend(mgr);
1436 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1438 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1439 need_hotplug = true;
1443 drm_connector_list_iter_end(&iter);
1446 drm_kms_helper_hotplug_event(dev);
1449 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1451 struct smu_context *smu = &adev->smu;
1454 if (!is_support_sw_smu(adev))
1457 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1458 * on window driver dc implementation.
1459 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1460 * should be passed to smu during boot up and resume from s3.
1461 * boot up: dc calculate dcn watermark clock settings within dc_create,
1462 * dcn20_resource_construct
1463 * then call pplib functions below to pass the settings to smu:
1464 * smu_set_watermarks_for_clock_ranges
1465 * smu_set_watermarks_table
1466 * navi10_set_watermarks_table
1467 * smu_write_watermarks_table
1469 * For Renoir, clock settings of dcn watermark are also fixed values.
1470 * dc has implemented different flow for window driver:
1471 * dc_hardware_init / dc_set_power_state
1476 * smu_set_watermarks_for_clock_ranges
1477 * renoir_set_watermarks_table
1478 * smu_write_watermarks_table
1481 * dc_hardware_init -> amdgpu_dm_init
1482 * dc_set_power_state --> dm_resume
1484 * therefore, this function apply to navi10/12/14 but not Renoir
1487 switch(adev->asic_type) {
1496 mutex_lock(&smu->mutex);
1498 /* pass data to smu controller */
1499 if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
1500 !(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
1501 ret = smu_write_watermarks_table(smu);
1504 mutex_unlock(&smu->mutex);
1505 DRM_ERROR("Failed to update WMTABLE!\n");
1508 smu->watermarks_bitmap |= WATERMARKS_LOADED;
1511 mutex_unlock(&smu->mutex);
1517 * dm_hw_init() - Initialize DC device
1518 * @handle: The base driver device containing the amdgpu_dm device.
1520 * Initialize the &struct amdgpu_display_manager device. This involves calling
1521 * the initializers of each DM component, then populating the struct with them.
1523 * Although the function implies hardware initialization, both hardware and
1524 * software are initialized here. Splitting them out to their relevant init
1525 * hooks is a future TODO item.
1527 * Some notable things that are initialized here:
1529 * - Display Core, both software and hardware
1530 * - DC modules that we need (freesync and color management)
1531 * - DRM software states
1532 * - Interrupt sources and handlers
1534 * - Debug FS entries, if enabled
1536 static int dm_hw_init(void *handle)
1538 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1539 /* Create DAL display manager */
1540 amdgpu_dm_init(adev);
1541 amdgpu_dm_hpd_init(adev);
1547 * dm_hw_fini() - Teardown DC device
1548 * @handle: The base driver device containing the amdgpu_dm device.
1550 * Teardown components within &struct amdgpu_display_manager that require
1551 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1552 * were loaded. Also flush IRQ workqueues and disable them.
1554 static int dm_hw_fini(void *handle)
1556 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1558 amdgpu_dm_hpd_fini(adev);
1560 amdgpu_dm_irq_fini(adev);
1561 amdgpu_dm_fini(adev);
1565 static int dm_suspend(void *handle)
1567 struct amdgpu_device *adev = handle;
1568 struct amdgpu_display_manager *dm = &adev->dm;
1571 WARN_ON(adev->dm.cached_state);
1572 adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
1574 s3_handle_mst(adev->ddev, true);
1576 amdgpu_dm_irq_suspend(adev);
1579 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1584 static struct amdgpu_dm_connector *
1585 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1586 struct drm_crtc *crtc)
1589 struct drm_connector_state *new_con_state;
1590 struct drm_connector *connector;
1591 struct drm_crtc *crtc_from_state;
1593 for_each_new_connector_in_state(state, connector, new_con_state, i) {
1594 crtc_from_state = new_con_state->crtc;
1596 if (crtc_from_state == crtc)
1597 return to_amdgpu_dm_connector(connector);
1603 static void emulated_link_detect(struct dc_link *link)
1605 struct dc_sink_init_data sink_init_data = { 0 };
1606 struct display_sink_capability sink_caps = { 0 };
1607 enum dc_edid_status edid_status;
1608 struct dc_context *dc_ctx = link->ctx;
1609 struct dc_sink *sink = NULL;
1610 struct dc_sink *prev_sink = NULL;
1612 link->type = dc_connection_none;
1613 prev_sink = link->local_sink;
1615 if (prev_sink != NULL)
1616 dc_sink_retain(prev_sink);
1618 switch (link->connector_signal) {
1619 case SIGNAL_TYPE_HDMI_TYPE_A: {
1620 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1621 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1625 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1626 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1627 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1631 case SIGNAL_TYPE_DVI_DUAL_LINK: {
1632 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1633 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1637 case SIGNAL_TYPE_LVDS: {
1638 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1639 sink_caps.signal = SIGNAL_TYPE_LVDS;
1643 case SIGNAL_TYPE_EDP: {
1644 sink_caps.transaction_type =
1645 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1646 sink_caps.signal = SIGNAL_TYPE_EDP;
1650 case SIGNAL_TYPE_DISPLAY_PORT: {
1651 sink_caps.transaction_type =
1652 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1653 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1658 DC_ERROR("Invalid connector type! signal:%d\n",
1659 link->connector_signal);
1663 sink_init_data.link = link;
1664 sink_init_data.sink_signal = sink_caps.signal;
1666 sink = dc_sink_create(&sink_init_data);
1668 DC_ERROR("Failed to create sink!\n");
1672 /* dc_sink_create returns a new reference */
1673 link->local_sink = sink;
1675 edid_status = dm_helpers_read_local_edid(
1680 if (edid_status != EDID_OK)
1681 DC_ERROR("Failed to read EDID");
1685 static int dm_resume(void *handle)
1687 struct amdgpu_device *adev = handle;
1688 struct drm_device *ddev = adev->ddev;
1689 struct amdgpu_display_manager *dm = &adev->dm;
1690 struct amdgpu_dm_connector *aconnector;
1691 struct drm_connector *connector;
1692 struct drm_connector_list_iter iter;
1693 struct drm_crtc *crtc;
1694 struct drm_crtc_state *new_crtc_state;
1695 struct dm_crtc_state *dm_new_crtc_state;
1696 struct drm_plane *plane;
1697 struct drm_plane_state *new_plane_state;
1698 struct dm_plane_state *dm_new_plane_state;
1699 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
1700 enum dc_connection_type new_connection_type = dc_connection_none;
1703 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
1704 dc_release_state(dm_state->context);
1705 dm_state->context = dc_create_state(dm->dc);
1706 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
1707 dc_resource_state_construct(dm->dc, dm_state->context);
1709 /* Before powering on DC we need to re-initialize DMUB. */
1710 r = dm_dmub_hw_init(adev);
1712 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1714 /* power on hardware */
1715 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1717 /* program HPD filter */
1721 * early enable HPD Rx IRQ, should be done before set mode as short
1722 * pulse interrupts are used for MST
1724 amdgpu_dm_irq_resume_early(adev);
1726 /* On resume we need to rewrite the MSTM control bits to enable MST*/
1727 s3_handle_mst(ddev, false);
1730 drm_connector_list_iter_begin(ddev, &iter);
1731 drm_for_each_connector_iter(connector, &iter) {
1732 aconnector = to_amdgpu_dm_connector(connector);
1735 * this is the case when traversing through already created
1736 * MST connectors, should be skipped
1738 if (aconnector->mst_port)
1741 mutex_lock(&aconnector->hpd_lock);
1742 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
1743 DRM_ERROR("KMS: Failed to detect connector\n");
1745 if (aconnector->base.force && new_connection_type == dc_connection_none)
1746 emulated_link_detect(aconnector->dc_link);
1748 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
1750 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
1751 aconnector->fake_enable = false;
1753 if (aconnector->dc_sink)
1754 dc_sink_release(aconnector->dc_sink);
1755 aconnector->dc_sink = NULL;
1756 amdgpu_dm_update_connector_after_detect(aconnector);
1757 mutex_unlock(&aconnector->hpd_lock);
1759 drm_connector_list_iter_end(&iter);
1761 /* Force mode set in atomic commit */
1762 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
1763 new_crtc_state->active_changed = true;
1766 * atomic_check is expected to create the dc states. We need to release
1767 * them here, since they were duplicated as part of the suspend
1770 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
1771 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1772 if (dm_new_crtc_state->stream) {
1773 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
1774 dc_stream_release(dm_new_crtc_state->stream);
1775 dm_new_crtc_state->stream = NULL;
1779 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
1780 dm_new_plane_state = to_dm_plane_state(new_plane_state);
1781 if (dm_new_plane_state->dc_state) {
1782 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
1783 dc_plane_state_release(dm_new_plane_state->dc_state);
1784 dm_new_plane_state->dc_state = NULL;
1788 drm_atomic_helper_resume(ddev, dm->cached_state);
1790 dm->cached_state = NULL;
1792 amdgpu_dm_irq_resume_late(adev);
1794 amdgpu_dm_smu_write_watermarks_table(adev);
1802 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
1803 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
1804 * the base driver's device list to be initialized and torn down accordingly.
1806 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
1809 static const struct amd_ip_funcs amdgpu_dm_funcs = {
1811 .early_init = dm_early_init,
1812 .late_init = dm_late_init,
1813 .sw_init = dm_sw_init,
1814 .sw_fini = dm_sw_fini,
1815 .hw_init = dm_hw_init,
1816 .hw_fini = dm_hw_fini,
1817 .suspend = dm_suspend,
1818 .resume = dm_resume,
1819 .is_idle = dm_is_idle,
1820 .wait_for_idle = dm_wait_for_idle,
1821 .check_soft_reset = dm_check_soft_reset,
1822 .soft_reset = dm_soft_reset,
1823 .set_clockgating_state = dm_set_clockgating_state,
1824 .set_powergating_state = dm_set_powergating_state,
1827 const struct amdgpu_ip_block_version dm_ip_block =
1829 .type = AMD_IP_BLOCK_TYPE_DCE,
1833 .funcs = &amdgpu_dm_funcs,
1843 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
1844 .fb_create = amdgpu_display_user_framebuffer_create,
1845 .output_poll_changed = drm_fb_helper_output_poll_changed,
1846 .atomic_check = amdgpu_dm_atomic_check,
1847 .atomic_commit = amdgpu_dm_atomic_commit,
1850 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
1851 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
1854 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
1856 u32 max_cll, min_cll, max, min, q, r;
1857 struct amdgpu_dm_backlight_caps *caps;
1858 struct amdgpu_display_manager *dm;
1859 struct drm_connector *conn_base;
1860 struct amdgpu_device *adev;
1861 static const u8 pre_computed_values[] = {
1862 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
1863 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
1865 if (!aconnector || !aconnector->dc_link)
1868 conn_base = &aconnector->base;
1869 adev = conn_base->dev->dev_private;
1871 caps = &dm->backlight_caps;
1872 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
1873 caps->aux_support = false;
1874 max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
1875 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
1877 if (caps->ext_caps->bits.oled == 1 ||
1878 caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
1879 caps->ext_caps->bits.hdr_aux_backlight_control == 1)
1880 caps->aux_support = true;
1882 /* From the specification (CTA-861-G), for calculating the maximum
1883 * luminance we need to use:
1884 * Luminance = 50*2**(CV/32)
1885 * Where CV is a one-byte value.
1886 * For calculating this expression we may need float point precision;
1887 * to avoid this complexity level, we take advantage that CV is divided
1888 * by a constant. From the Euclids division algorithm, we know that CV
1889 * can be written as: CV = 32*q + r. Next, we replace CV in the
1890 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
1891 * need to pre-compute the value of r/32. For pre-computing the values
1892 * We just used the following Ruby line:
1893 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
1894 * The results of the above expressions can be verified at
1895 * pre_computed_values.
1899 max = (1 << q) * pre_computed_values[r];
1901 // min luminance: maxLum * (CV/255)^2 / 100
1902 q = DIV_ROUND_CLOSEST(min_cll, 255);
1903 min = max * DIV_ROUND_CLOSEST((q * q), 100);
1905 caps->aux_max_input_signal = max;
1906 caps->aux_min_input_signal = min;
1909 void amdgpu_dm_update_connector_after_detect(
1910 struct amdgpu_dm_connector *aconnector)
1912 struct drm_connector *connector = &aconnector->base;
1913 struct drm_device *dev = connector->dev;
1914 struct dc_sink *sink;
1916 /* MST handled by drm_mst framework */
1917 if (aconnector->mst_mgr.mst_state == true)
1921 sink = aconnector->dc_link->local_sink;
1923 dc_sink_retain(sink);
1926 * Edid mgmt connector gets first update only in mode_valid hook and then
1927 * the connector sink is set to either fake or physical sink depends on link status.
1928 * Skip if already done during boot.
1930 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
1931 && aconnector->dc_em_sink) {
1934 * For S3 resume with headless use eml_sink to fake stream
1935 * because on resume connector->sink is set to NULL
1937 mutex_lock(&dev->mode_config.mutex);
1940 if (aconnector->dc_sink) {
1941 amdgpu_dm_update_freesync_caps(connector, NULL);
1943 * retain and release below are used to
1944 * bump up refcount for sink because the link doesn't point
1945 * to it anymore after disconnect, so on next crtc to connector
1946 * reshuffle by UMD we will get into unwanted dc_sink release
1948 dc_sink_release(aconnector->dc_sink);
1950 aconnector->dc_sink = sink;
1951 dc_sink_retain(aconnector->dc_sink);
1952 amdgpu_dm_update_freesync_caps(connector,
1955 amdgpu_dm_update_freesync_caps(connector, NULL);
1956 if (!aconnector->dc_sink) {
1957 aconnector->dc_sink = aconnector->dc_em_sink;
1958 dc_sink_retain(aconnector->dc_sink);
1962 mutex_unlock(&dev->mode_config.mutex);
1965 dc_sink_release(sink);
1970 * TODO: temporary guard to look for proper fix
1971 * if this sink is MST sink, we should not do anything
1973 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
1974 dc_sink_release(sink);
1978 if (aconnector->dc_sink == sink) {
1980 * We got a DP short pulse (Link Loss, DP CTS, etc...).
1983 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
1984 aconnector->connector_id);
1986 dc_sink_release(sink);
1990 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
1991 aconnector->connector_id, aconnector->dc_sink, sink);
1993 mutex_lock(&dev->mode_config.mutex);
1996 * 1. Update status of the drm connector
1997 * 2. Send an event and let userspace tell us what to do
2001 * TODO: check if we still need the S3 mode update workaround.
2002 * If yes, put it here.
2004 if (aconnector->dc_sink)
2005 amdgpu_dm_update_freesync_caps(connector, NULL);
2007 aconnector->dc_sink = sink;
2008 dc_sink_retain(aconnector->dc_sink);
2009 if (sink->dc_edid.length == 0) {
2010 aconnector->edid = NULL;
2011 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2014 (struct edid *) sink->dc_edid.raw_edid;
2017 drm_connector_update_edid_property(connector,
2019 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2022 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2023 update_connector_ext_caps(aconnector);
2025 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2026 amdgpu_dm_update_freesync_caps(connector, NULL);
2027 drm_connector_update_edid_property(connector, NULL);
2028 aconnector->num_modes = 0;
2029 dc_sink_release(aconnector->dc_sink);
2030 aconnector->dc_sink = NULL;
2031 aconnector->edid = NULL;
2032 #ifdef CONFIG_DRM_AMD_DC_HDCP
2033 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2034 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2035 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2039 mutex_unlock(&dev->mode_config.mutex);
2042 dc_sink_release(sink);
2045 static void handle_hpd_irq(void *param)
2047 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2048 struct drm_connector *connector = &aconnector->base;
2049 struct drm_device *dev = connector->dev;
2050 enum dc_connection_type new_connection_type = dc_connection_none;
2051 #ifdef CONFIG_DRM_AMD_DC_HDCP
2052 struct amdgpu_device *adev = dev->dev_private;
2056 * In case of failure or MST no need to update connector status or notify the OS
2057 * since (for MST case) MST does this in its own context.
2059 mutex_lock(&aconnector->hpd_lock);
2061 #ifdef CONFIG_DRM_AMD_DC_HDCP
2062 if (adev->dm.hdcp_workqueue)
2063 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2065 if (aconnector->fake_enable)
2066 aconnector->fake_enable = false;
2068 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2069 DRM_ERROR("KMS: Failed to detect connector\n");
2071 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2072 emulated_link_detect(aconnector->dc_link);
2075 drm_modeset_lock_all(dev);
2076 dm_restore_drm_connector_state(dev, connector);
2077 drm_modeset_unlock_all(dev);
2079 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2080 drm_kms_helper_hotplug_event(dev);
2082 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2083 amdgpu_dm_update_connector_after_detect(aconnector);
2086 drm_modeset_lock_all(dev);
2087 dm_restore_drm_connector_state(dev, connector);
2088 drm_modeset_unlock_all(dev);
2090 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2091 drm_kms_helper_hotplug_event(dev);
2093 mutex_unlock(&aconnector->hpd_lock);
2097 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2099 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2101 bool new_irq_handled = false;
2103 int dpcd_bytes_to_read;
2105 const int max_process_count = 30;
2106 int process_count = 0;
2108 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2110 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2111 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2112 /* DPCD 0x200 - 0x201 for downstream IRQ */
2113 dpcd_addr = DP_SINK_COUNT;
2115 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2116 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2117 dpcd_addr = DP_SINK_COUNT_ESI;
2120 dret = drm_dp_dpcd_read(
2121 &aconnector->dm_dp_aux.aux,
2124 dpcd_bytes_to_read);
2126 while (dret == dpcd_bytes_to_read &&
2127 process_count < max_process_count) {
2133 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2134 /* handle HPD short pulse irq */
2135 if (aconnector->mst_mgr.mst_state)
2137 &aconnector->mst_mgr,
2141 if (new_irq_handled) {
2142 /* ACK at DPCD to notify down stream */
2143 const int ack_dpcd_bytes_to_write =
2144 dpcd_bytes_to_read - 1;
2146 for (retry = 0; retry < 3; retry++) {
2149 wret = drm_dp_dpcd_write(
2150 &aconnector->dm_dp_aux.aux,
2153 ack_dpcd_bytes_to_write);
2154 if (wret == ack_dpcd_bytes_to_write)
2158 /* check if there is new irq to be handled */
2159 dret = drm_dp_dpcd_read(
2160 &aconnector->dm_dp_aux.aux,
2163 dpcd_bytes_to_read);
2165 new_irq_handled = false;
2171 if (process_count == max_process_count)
2172 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2175 static void handle_hpd_rx_irq(void *param)
2177 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2178 struct drm_connector *connector = &aconnector->base;
2179 struct drm_device *dev = connector->dev;
2180 struct dc_link *dc_link = aconnector->dc_link;
2181 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2182 enum dc_connection_type new_connection_type = dc_connection_none;
2183 #ifdef CONFIG_DRM_AMD_DC_HDCP
2184 union hpd_irq_data hpd_irq_data;
2185 struct amdgpu_device *adev = dev->dev_private;
2187 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2191 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2192 * conflict, after implement i2c helper, this mutex should be
2195 if (dc_link->type != dc_connection_mst_branch)
2196 mutex_lock(&aconnector->hpd_lock);
2199 #ifdef CONFIG_DRM_AMD_DC_HDCP
2200 if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) &&
2202 if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
2204 !is_mst_root_connector) {
2205 /* Downstream Port status changed. */
2206 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2207 DRM_ERROR("KMS: Failed to detect connector\n");
2209 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2210 emulated_link_detect(dc_link);
2212 if (aconnector->fake_enable)
2213 aconnector->fake_enable = false;
2215 amdgpu_dm_update_connector_after_detect(aconnector);
2218 drm_modeset_lock_all(dev);
2219 dm_restore_drm_connector_state(dev, connector);
2220 drm_modeset_unlock_all(dev);
2222 drm_kms_helper_hotplug_event(dev);
2223 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2225 if (aconnector->fake_enable)
2226 aconnector->fake_enable = false;
2228 amdgpu_dm_update_connector_after_detect(aconnector);
2231 drm_modeset_lock_all(dev);
2232 dm_restore_drm_connector_state(dev, connector);
2233 drm_modeset_unlock_all(dev);
2235 drm_kms_helper_hotplug_event(dev);
2238 #ifdef CONFIG_DRM_AMD_DC_HDCP
2239 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2240 if (adev->dm.hdcp_workqueue)
2241 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
2244 if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2245 (dc_link->type == dc_connection_mst_branch))
2246 dm_handle_hpd_rx_irq(aconnector);
2248 if (dc_link->type != dc_connection_mst_branch) {
2249 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2250 mutex_unlock(&aconnector->hpd_lock);
2254 static void register_hpd_handlers(struct amdgpu_device *adev)
2256 struct drm_device *dev = adev->ddev;
2257 struct drm_connector *connector;
2258 struct amdgpu_dm_connector *aconnector;
2259 const struct dc_link *dc_link;
2260 struct dc_interrupt_params int_params = {0};
2262 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2263 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2265 list_for_each_entry(connector,
2266 &dev->mode_config.connector_list, head) {
2268 aconnector = to_amdgpu_dm_connector(connector);
2269 dc_link = aconnector->dc_link;
2271 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2272 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2273 int_params.irq_source = dc_link->irq_source_hpd;
2275 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2277 (void *) aconnector);
2280 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2282 /* Also register for DP short pulse (hpd_rx). */
2283 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2284 int_params.irq_source = dc_link->irq_source_hpd_rx;
2286 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2288 (void *) aconnector);
2293 /* Register IRQ sources and initialize IRQ callbacks */
2294 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2296 struct dc *dc = adev->dm.dc;
2297 struct common_irq_params *c_irq_params;
2298 struct dc_interrupt_params int_params = {0};
2301 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2303 if (adev->asic_type >= CHIP_VEGA10)
2304 client_id = SOC15_IH_CLIENTID_DCE;
2306 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2307 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2310 * Actions of amdgpu_irq_add_id():
2311 * 1. Register a set() function with base driver.
2312 * Base driver will call set() function to enable/disable an
2313 * interrupt in DC hardware.
2314 * 2. Register amdgpu_dm_irq_handler().
2315 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2316 * coming from DC hardware.
2317 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2318 * for acknowledging and handling. */
2320 /* Use VBLANK interrupt */
2321 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2322 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2324 DRM_ERROR("Failed to add crtc irq id!\n");
2328 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2329 int_params.irq_source =
2330 dc_interrupt_to_irq_source(dc, i, 0);
2332 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2334 c_irq_params->adev = adev;
2335 c_irq_params->irq_src = int_params.irq_source;
2337 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2338 dm_crtc_high_irq, c_irq_params);
2341 /* Use VUPDATE interrupt */
2342 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2343 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2345 DRM_ERROR("Failed to add vupdate irq id!\n");
2349 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2350 int_params.irq_source =
2351 dc_interrupt_to_irq_source(dc, i, 0);
2353 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2355 c_irq_params->adev = adev;
2356 c_irq_params->irq_src = int_params.irq_source;
2358 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2359 dm_vupdate_high_irq, c_irq_params);
2362 /* Use GRPH_PFLIP interrupt */
2363 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2364 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2365 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2367 DRM_ERROR("Failed to add page flip irq id!\n");
2371 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2372 int_params.irq_source =
2373 dc_interrupt_to_irq_source(dc, i, 0);
2375 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2377 c_irq_params->adev = adev;
2378 c_irq_params->irq_src = int_params.irq_source;
2380 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2381 dm_pflip_high_irq, c_irq_params);
2386 r = amdgpu_irq_add_id(adev, client_id,
2387 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2389 DRM_ERROR("Failed to add hpd irq id!\n");
2393 register_hpd_handlers(adev);
2398 #if defined(CONFIG_DRM_AMD_DC_DCN)
2399 /* Register IRQ sources and initialize IRQ callbacks */
2400 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2402 struct dc *dc = adev->dm.dc;
2403 struct common_irq_params *c_irq_params;
2404 struct dc_interrupt_params int_params = {0};
2408 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2409 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2412 * Actions of amdgpu_irq_add_id():
2413 * 1. Register a set() function with base driver.
2414 * Base driver will call set() function to enable/disable an
2415 * interrupt in DC hardware.
2416 * 2. Register amdgpu_dm_irq_handler().
2417 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2418 * coming from DC hardware.
2419 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2420 * for acknowledging and handling.
2423 /* Use VSTARTUP interrupt */
2424 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2425 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2427 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2430 DRM_ERROR("Failed to add crtc irq id!\n");
2434 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2435 int_params.irq_source =
2436 dc_interrupt_to_irq_source(dc, i, 0);
2438 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2440 c_irq_params->adev = adev;
2441 c_irq_params->irq_src = int_params.irq_source;
2443 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2444 dm_dcn_crtc_high_irq, c_irq_params);
2447 /* Use GRPH_PFLIP interrupt */
2448 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2449 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2451 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
2453 DRM_ERROR("Failed to add page flip irq id!\n");
2457 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2458 int_params.irq_source =
2459 dc_interrupt_to_irq_source(dc, i, 0);
2461 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2463 c_irq_params->adev = adev;
2464 c_irq_params->irq_src = int_params.irq_source;
2466 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2467 dm_pflip_high_irq, c_irq_params);
2472 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
2475 DRM_ERROR("Failed to add hpd irq id!\n");
2479 register_hpd_handlers(adev);
2486 * Acquires the lock for the atomic state object and returns
2487 * the new atomic state.
2489 * This should only be called during atomic check.
2491 static int dm_atomic_get_state(struct drm_atomic_state *state,
2492 struct dm_atomic_state **dm_state)
2494 struct drm_device *dev = state->dev;
2495 struct amdgpu_device *adev = dev->dev_private;
2496 struct amdgpu_display_manager *dm = &adev->dm;
2497 struct drm_private_state *priv_state;
2502 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2503 if (IS_ERR(priv_state))
2504 return PTR_ERR(priv_state);
2506 *dm_state = to_dm_atomic_state(priv_state);
2511 struct dm_atomic_state *
2512 dm_atomic_get_new_state(struct drm_atomic_state *state)
2514 struct drm_device *dev = state->dev;
2515 struct amdgpu_device *adev = dev->dev_private;
2516 struct amdgpu_display_manager *dm = &adev->dm;
2517 struct drm_private_obj *obj;
2518 struct drm_private_state *new_obj_state;
2521 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
2522 if (obj->funcs == dm->atomic_obj.funcs)
2523 return to_dm_atomic_state(new_obj_state);
2529 struct dm_atomic_state *
2530 dm_atomic_get_old_state(struct drm_atomic_state *state)
2532 struct drm_device *dev = state->dev;
2533 struct amdgpu_device *adev = dev->dev_private;
2534 struct amdgpu_display_manager *dm = &adev->dm;
2535 struct drm_private_obj *obj;
2536 struct drm_private_state *old_obj_state;
2539 for_each_old_private_obj_in_state(state, obj, old_obj_state, i) {
2540 if (obj->funcs == dm->atomic_obj.funcs)
2541 return to_dm_atomic_state(old_obj_state);
2547 static struct drm_private_state *
2548 dm_atomic_duplicate_state(struct drm_private_obj *obj)
2550 struct dm_atomic_state *old_state, *new_state;
2552 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
2556 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
2558 old_state = to_dm_atomic_state(obj->state);
2560 if (old_state && old_state->context)
2561 new_state->context = dc_copy_state(old_state->context);
2563 if (!new_state->context) {
2568 return &new_state->base;
2571 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
2572 struct drm_private_state *state)
2574 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
2576 if (dm_state && dm_state->context)
2577 dc_release_state(dm_state->context);
2582 static struct drm_private_state_funcs dm_atomic_state_funcs = {
2583 .atomic_duplicate_state = dm_atomic_duplicate_state,
2584 .atomic_destroy_state = dm_atomic_destroy_state,
2587 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
2589 struct dm_atomic_state *state;
2592 adev->mode_info.mode_config_initialized = true;
2594 adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
2595 adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
2597 adev->ddev->mode_config.max_width = 16384;
2598 adev->ddev->mode_config.max_height = 16384;
2600 adev->ddev->mode_config.preferred_depth = 24;
2601 adev->ddev->mode_config.prefer_shadow = 1;
2602 /* indicates support for immediate flip */
2603 adev->ddev->mode_config.async_page_flip = true;
2605 adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
2607 state = kzalloc(sizeof(*state), GFP_KERNEL);
2611 state->context = dc_create_state(adev->dm.dc);
2612 if (!state->context) {
2617 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
2619 drm_atomic_private_obj_init(adev->ddev,
2620 &adev->dm.atomic_obj,
2622 &dm_atomic_state_funcs);
2624 r = amdgpu_display_modeset_create_props(adev);
2628 r = amdgpu_dm_audio_init(adev);
2635 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
2636 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
2637 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
2639 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2640 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2642 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
2644 #if defined(CONFIG_ACPI)
2645 struct amdgpu_dm_backlight_caps caps;
2647 if (dm->backlight_caps.caps_valid)
2650 amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
2651 if (caps.caps_valid) {
2652 dm->backlight_caps.caps_valid = true;
2653 if (caps.aux_support)
2655 dm->backlight_caps.min_input_signal = caps.min_input_signal;
2656 dm->backlight_caps.max_input_signal = caps.max_input_signal;
2658 dm->backlight_caps.min_input_signal =
2659 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2660 dm->backlight_caps.max_input_signal =
2661 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2664 if (dm->backlight_caps.aux_support)
2667 dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2668 dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2672 static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
2679 rc = dc_link_set_backlight_level_nits(link, true, brightness,
2680 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
2685 static u32 convert_brightness(const struct amdgpu_dm_backlight_caps *caps,
2686 const uint32_t user_brightness)
2688 u32 min, max, conversion_pace;
2689 u32 brightness = user_brightness;
2694 if (!caps->aux_support) {
2695 max = caps->max_input_signal;
2696 min = caps->min_input_signal;
2698 * The brightness input is in the range 0-255
2699 * It needs to be rescaled to be between the
2700 * requested min and max input signal
2701 * It also needs to be scaled up by 0x101 to
2702 * match the DC interface which has a range of
2705 conversion_pace = 0x101;
2710 / AMDGPU_MAX_BL_LEVEL
2711 + min * conversion_pace;
2714 * We are doing a linear interpolation here, which is OK but
2715 * does not provide the optimal result. We probably want
2716 * something close to the Perceptual Quantizer (PQ) curve.
2718 max = caps->aux_max_input_signal;
2719 min = caps->aux_min_input_signal;
2721 brightness = (AMDGPU_MAX_BL_LEVEL - user_brightness) * min
2722 + user_brightness * max;
2723 // Multiple the value by 1000 since we use millinits
2725 brightness = DIV_ROUND_CLOSEST(brightness, AMDGPU_MAX_BL_LEVEL);
2732 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
2734 struct amdgpu_display_manager *dm = bl_get_data(bd);
2735 struct amdgpu_dm_backlight_caps caps;
2736 struct dc_link *link = NULL;
2740 amdgpu_dm_update_backlight_caps(dm);
2741 caps = dm->backlight_caps;
2743 link = (struct dc_link *)dm->backlight_link;
2745 brightness = convert_brightness(&caps, bd->props.brightness);
2746 // Change brightness based on AUX property
2747 if (caps.aux_support)
2748 return set_backlight_via_aux(link, brightness);
2750 rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
2755 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
2757 struct amdgpu_display_manager *dm = bl_get_data(bd);
2758 int ret = dc_link_get_backlight_level(dm->backlight_link);
2760 if (ret == DC_ERROR_UNEXPECTED)
2761 return bd->props.brightness;
2765 static const struct backlight_ops amdgpu_dm_backlight_ops = {
2766 .options = BL_CORE_SUSPENDRESUME,
2767 .get_brightness = amdgpu_dm_backlight_get_brightness,
2768 .update_status = amdgpu_dm_backlight_update_status,
2772 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
2775 struct backlight_properties props = { 0 };
2777 amdgpu_dm_update_backlight_caps(dm);
2779 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
2780 props.brightness = AMDGPU_MAX_BL_LEVEL;
2781 props.type = BACKLIGHT_RAW;
2783 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
2784 dm->adev->ddev->primary->index);
2786 dm->backlight_dev = backlight_device_register(bl_name,
2787 dm->adev->ddev->dev,
2789 &amdgpu_dm_backlight_ops,
2792 if (IS_ERR(dm->backlight_dev))
2793 DRM_ERROR("DM: Backlight registration failed!\n");
2795 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
2800 static int initialize_plane(struct amdgpu_display_manager *dm,
2801 struct amdgpu_mode_info *mode_info, int plane_id,
2802 enum drm_plane_type plane_type,
2803 const struct dc_plane_cap *plane_cap)
2805 struct drm_plane *plane;
2806 unsigned long possible_crtcs;
2809 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
2811 DRM_ERROR("KMS: Failed to allocate plane\n");
2814 plane->type = plane_type;
2817 * HACK: IGT tests expect that the primary plane for a CRTC
2818 * can only have one possible CRTC. Only expose support for
2819 * any CRTC if they're not going to be used as a primary plane
2820 * for a CRTC - like overlay or underlay planes.
2822 possible_crtcs = 1 << plane_id;
2823 if (plane_id >= dm->dc->caps.max_streams)
2824 possible_crtcs = 0xff;
2826 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
2829 DRM_ERROR("KMS: Failed to initialize plane\n");
2835 mode_info->planes[plane_id] = plane;
2841 static void register_backlight_device(struct amdgpu_display_manager *dm,
2842 struct dc_link *link)
2844 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2845 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2847 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
2848 link->type != dc_connection_none) {
2850 * Event if registration failed, we should continue with
2851 * DM initialization because not having a backlight control
2852 * is better then a black screen.
2854 amdgpu_dm_register_backlight_device(dm);
2856 if (dm->backlight_dev)
2857 dm->backlight_link = link;
2864 * In this architecture, the association
2865 * connector -> encoder -> crtc
2866 * id not really requried. The crtc and connector will hold the
2867 * display_index as an abstraction to use with DAL component
2869 * Returns 0 on success
2871 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
2873 struct amdgpu_display_manager *dm = &adev->dm;
2875 struct amdgpu_dm_connector *aconnector = NULL;
2876 struct amdgpu_encoder *aencoder = NULL;
2877 struct amdgpu_mode_info *mode_info = &adev->mode_info;
2879 int32_t primary_planes;
2880 enum dc_connection_type new_connection_type = dc_connection_none;
2881 const struct dc_plane_cap *plane;
2883 link_cnt = dm->dc->caps.max_links;
2884 if (amdgpu_dm_mode_config_init(dm->adev)) {
2885 DRM_ERROR("DM: Failed to initialize mode config\n");
2889 /* There is one primary plane per CRTC */
2890 primary_planes = dm->dc->caps.max_streams;
2891 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
2894 * Initialize primary planes, implicit planes for legacy IOCTLS.
2895 * Order is reversed to match iteration order in atomic check.
2897 for (i = (primary_planes - 1); i >= 0; i--) {
2898 plane = &dm->dc->caps.planes[i];
2900 if (initialize_plane(dm, mode_info, i,
2901 DRM_PLANE_TYPE_PRIMARY, plane)) {
2902 DRM_ERROR("KMS: Failed to initialize primary plane\n");
2908 * Initialize overlay planes, index starting after primary planes.
2909 * These planes have a higher DRM index than the primary planes since
2910 * they should be considered as having a higher z-order.
2911 * Order is reversed to match iteration order in atomic check.
2913 * Only support DCN for now, and only expose one so we don't encourage
2914 * userspace to use up all the pipes.
2916 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
2917 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
2919 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
2922 if (!plane->blends_with_above || !plane->blends_with_below)
2925 if (!plane->pixel_format_support.argb8888)
2928 if (initialize_plane(dm, NULL, primary_planes + i,
2929 DRM_PLANE_TYPE_OVERLAY, plane)) {
2930 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
2934 /* Only create one overlay plane. */
2938 for (i = 0; i < dm->dc->caps.max_streams; i++)
2939 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
2940 DRM_ERROR("KMS: Failed to initialize crtc\n");
2944 dm->display_indexes_num = dm->dc->caps.max_streams;
2946 /* loops over all connectors on the board */
2947 for (i = 0; i < link_cnt; i++) {
2948 struct dc_link *link = NULL;
2950 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
2952 "KMS: Cannot support more than %d display indexes\n",
2953 AMDGPU_DM_MAX_DISPLAY_INDEX);
2957 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
2961 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
2965 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
2966 DRM_ERROR("KMS: Failed to initialize encoder\n");
2970 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
2971 DRM_ERROR("KMS: Failed to initialize connector\n");
2975 link = dc_get_link_at_index(dm->dc, i);
2977 if (!dc_link_detect_sink(link, &new_connection_type))
2978 DRM_ERROR("KMS: Failed to detect connector\n");
2980 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2981 emulated_link_detect(link);
2982 amdgpu_dm_update_connector_after_detect(aconnector);
2984 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
2985 amdgpu_dm_update_connector_after_detect(aconnector);
2986 register_backlight_device(dm, link);
2987 if (amdgpu_dc_feature_mask & DC_PSR_MASK)
2988 amdgpu_dm_set_psr_caps(link);
2994 /* Software is initialized. Now we can register interrupt handlers. */
2995 switch (adev->asic_type) {
3005 case CHIP_POLARIS11:
3006 case CHIP_POLARIS10:
3007 case CHIP_POLARIS12:
3012 if (dce110_register_irq_handlers(dm->adev)) {
3013 DRM_ERROR("DM: Failed to initialize IRQ\n");
3017 #if defined(CONFIG_DRM_AMD_DC_DCN)
3023 if (dcn10_register_irq_handlers(dm->adev)) {
3024 DRM_ERROR("DM: Failed to initialize IRQ\n");
3030 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3034 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
3035 dm->dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
3037 /* No userspace support. */
3038 dm->dc->debug.disable_tri_buf = true;
3048 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3050 drm_mode_config_cleanup(dm->ddev);
3051 drm_atomic_private_obj_fini(&dm->atomic_obj);
3055 /******************************************************************************
3056 * amdgpu_display_funcs functions
3057 *****************************************************************************/
3060 * dm_bandwidth_update - program display watermarks
3062 * @adev: amdgpu_device pointer
3064 * Calculate and program the display watermarks and line buffer allocation.
3066 static void dm_bandwidth_update(struct amdgpu_device *adev)
3068 /* TODO: implement later */
3071 static const struct amdgpu_display_funcs dm_display_funcs = {
3072 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3073 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3074 .backlight_set_level = NULL, /* never called for DC */
3075 .backlight_get_level = NULL, /* never called for DC */
3076 .hpd_sense = NULL,/* called unconditionally */
3077 .hpd_set_polarity = NULL, /* called unconditionally */
3078 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3079 .page_flip_get_scanoutpos =
3080 dm_crtc_get_scanoutpos,/* called unconditionally */
3081 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3082 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
3085 #if defined(CONFIG_DEBUG_KERNEL_DC)
3087 static ssize_t s3_debug_store(struct device *device,
3088 struct device_attribute *attr,
3094 struct drm_device *drm_dev = dev_get_drvdata(device);
3095 struct amdgpu_device *adev = drm_dev->dev_private;
3097 ret = kstrtoint(buf, 0, &s3_state);
3102 drm_kms_helper_hotplug_event(adev->ddev);
3107 return ret == 0 ? count : 0;
3110 DEVICE_ATTR_WO(s3_debug);
3114 static int dm_early_init(void *handle)
3116 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3118 switch (adev->asic_type) {
3121 adev->mode_info.num_crtc = 6;
3122 adev->mode_info.num_hpd = 6;
3123 adev->mode_info.num_dig = 6;
3126 adev->mode_info.num_crtc = 4;
3127 adev->mode_info.num_hpd = 6;
3128 adev->mode_info.num_dig = 7;
3132 adev->mode_info.num_crtc = 2;
3133 adev->mode_info.num_hpd = 6;
3134 adev->mode_info.num_dig = 6;
3138 adev->mode_info.num_crtc = 6;
3139 adev->mode_info.num_hpd = 6;
3140 adev->mode_info.num_dig = 7;
3143 adev->mode_info.num_crtc = 3;
3144 adev->mode_info.num_hpd = 6;
3145 adev->mode_info.num_dig = 9;
3148 adev->mode_info.num_crtc = 2;
3149 adev->mode_info.num_hpd = 6;
3150 adev->mode_info.num_dig = 9;
3152 case CHIP_POLARIS11:
3153 case CHIP_POLARIS12:
3154 adev->mode_info.num_crtc = 5;
3155 adev->mode_info.num_hpd = 5;
3156 adev->mode_info.num_dig = 5;
3158 case CHIP_POLARIS10:
3160 adev->mode_info.num_crtc = 6;
3161 adev->mode_info.num_hpd = 6;
3162 adev->mode_info.num_dig = 6;
3167 adev->mode_info.num_crtc = 6;
3168 adev->mode_info.num_hpd = 6;
3169 adev->mode_info.num_dig = 6;
3171 #if defined(CONFIG_DRM_AMD_DC_DCN)
3173 adev->mode_info.num_crtc = 4;
3174 adev->mode_info.num_hpd = 4;
3175 adev->mode_info.num_dig = 4;
3180 adev->mode_info.num_crtc = 6;
3181 adev->mode_info.num_hpd = 6;
3182 adev->mode_info.num_dig = 6;
3185 adev->mode_info.num_crtc = 5;
3186 adev->mode_info.num_hpd = 5;
3187 adev->mode_info.num_dig = 5;
3190 adev->mode_info.num_crtc = 4;
3191 adev->mode_info.num_hpd = 4;
3192 adev->mode_info.num_dig = 4;
3195 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3199 amdgpu_dm_set_irq_funcs(adev);
3201 if (adev->mode_info.funcs == NULL)
3202 adev->mode_info.funcs = &dm_display_funcs;
3205 * Note: Do NOT change adev->audio_endpt_rreg and
3206 * adev->audio_endpt_wreg because they are initialised in
3207 * amdgpu_device_init()
3209 #if defined(CONFIG_DEBUG_KERNEL_DC)
3212 &dev_attr_s3_debug);
3218 static bool modeset_required(struct drm_crtc_state *crtc_state,
3219 struct dc_stream_state *new_stream,
3220 struct dc_stream_state *old_stream)
3222 if (!drm_atomic_crtc_needs_modeset(crtc_state))
3225 if (!crtc_state->enable)
3228 return crtc_state->active;
3231 static bool modereset_required(struct drm_crtc_state *crtc_state)
3233 if (!drm_atomic_crtc_needs_modeset(crtc_state))
3236 return !crtc_state->enable || !crtc_state->active;
3239 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3241 drm_encoder_cleanup(encoder);
3245 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3246 .destroy = amdgpu_dm_encoder_destroy,
3250 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3251 struct dc_scaling_info *scaling_info)
3253 int scale_w, scale_h;
3255 memset(scaling_info, 0, sizeof(*scaling_info));
3257 /* Source is fixed 16.16 but we ignore mantissa for now... */
3258 scaling_info->src_rect.x = state->src_x >> 16;
3259 scaling_info->src_rect.y = state->src_y >> 16;
3261 scaling_info->src_rect.width = state->src_w >> 16;
3262 if (scaling_info->src_rect.width == 0)
3265 scaling_info->src_rect.height = state->src_h >> 16;
3266 if (scaling_info->src_rect.height == 0)
3269 scaling_info->dst_rect.x = state->crtc_x;
3270 scaling_info->dst_rect.y = state->crtc_y;
3272 if (state->crtc_w == 0)
3275 scaling_info->dst_rect.width = state->crtc_w;
3277 if (state->crtc_h == 0)
3280 scaling_info->dst_rect.height = state->crtc_h;
3282 /* DRM doesn't specify clipping on destination output. */
3283 scaling_info->clip_rect = scaling_info->dst_rect;
3285 /* TODO: Validate scaling per-format with DC plane caps */
3286 scale_w = scaling_info->dst_rect.width * 1000 /
3287 scaling_info->src_rect.width;
3289 if (scale_w < 250 || scale_w > 16000)
3292 scale_h = scaling_info->dst_rect.height * 1000 /
3293 scaling_info->src_rect.height;
3295 if (scale_h < 250 || scale_h > 16000)
3299 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3300 * assume reasonable defaults based on the format.
3306 static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
3307 uint64_t *tiling_flags)
3309 struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
3310 int r = amdgpu_bo_reserve(rbo, false);
3313 /* Don't show error message when returning -ERESTARTSYS */
3314 if (r != -ERESTARTSYS)
3315 DRM_ERROR("Unable to reserve buffer: %d\n", r);
3320 amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
3322 amdgpu_bo_unreserve(rbo);
3327 static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
3329 uint32_t offset = AMDGPU_TILING_GET(tiling_flags, DCC_OFFSET_256B);
3331 return offset ? (address + offset * 256) : 0;
3335 fill_plane_dcc_attributes(struct amdgpu_device *adev,
3336 const struct amdgpu_framebuffer *afb,
3337 const enum surface_pixel_format format,
3338 const enum dc_rotation_angle rotation,
3339 const struct plane_size *plane_size,
3340 const union dc_tiling_info *tiling_info,
3341 const uint64_t info,
3342 struct dc_plane_dcc_param *dcc,
3343 struct dc_plane_address *address)
3345 struct dc *dc = adev->dm.dc;
3346 struct dc_dcc_surface_param input;
3347 struct dc_surface_dcc_cap output;
3348 uint32_t offset = AMDGPU_TILING_GET(info, DCC_OFFSET_256B);
3349 uint32_t i64b = AMDGPU_TILING_GET(info, DCC_INDEPENDENT_64B) != 0;
3350 uint64_t dcc_address;
3352 memset(&input, 0, sizeof(input));
3353 memset(&output, 0, sizeof(output));
3358 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3361 if (!dc->cap_funcs.get_dcc_compression_cap)
3364 input.format = format;
3365 input.surface_size.width = plane_size->surface_size.width;
3366 input.surface_size.height = plane_size->surface_size.height;
3367 input.swizzle_mode = tiling_info->gfx9.swizzle;
3369 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3370 input.scan = SCAN_DIRECTION_HORIZONTAL;
3371 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3372 input.scan = SCAN_DIRECTION_VERTICAL;
3374 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3377 if (!output.capable)
3380 if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0)
3385 AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1;
3386 dcc->independent_64b_blks = i64b;
3388 dcc_address = get_dcc_address(afb->address, info);
3389 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
3390 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
3396 fill_plane_buffer_attributes(struct amdgpu_device *adev,
3397 const struct amdgpu_framebuffer *afb,
3398 const enum surface_pixel_format format,
3399 const enum dc_rotation_angle rotation,
3400 const uint64_t tiling_flags,
3401 union dc_tiling_info *tiling_info,
3402 struct plane_size *plane_size,
3403 struct dc_plane_dcc_param *dcc,
3404 struct dc_plane_address *address)
3406 const struct drm_framebuffer *fb = &afb->base;
3409 memset(tiling_info, 0, sizeof(*tiling_info));
3410 memset(plane_size, 0, sizeof(*plane_size));
3411 memset(dcc, 0, sizeof(*dcc));
3412 memset(address, 0, sizeof(*address));
3414 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
3415 plane_size->surface_size.x = 0;
3416 plane_size->surface_size.y = 0;
3417 plane_size->surface_size.width = fb->width;
3418 plane_size->surface_size.height = fb->height;
3419 plane_size->surface_pitch =
3420 fb->pitches[0] / fb->format->cpp[0];
3422 address->type = PLN_ADDR_TYPE_GRAPHICS;
3423 address->grph.addr.low_part = lower_32_bits(afb->address);
3424 address->grph.addr.high_part = upper_32_bits(afb->address);
3425 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
3426 uint64_t chroma_addr = afb->address + fb->offsets[1];
3428 plane_size->surface_size.x = 0;
3429 plane_size->surface_size.y = 0;
3430 plane_size->surface_size.width = fb->width;
3431 plane_size->surface_size.height = fb->height;
3432 plane_size->surface_pitch =
3433 fb->pitches[0] / fb->format->cpp[0];
3435 plane_size->chroma_size.x = 0;
3436 plane_size->chroma_size.y = 0;
3437 /* TODO: set these based on surface format */
3438 plane_size->chroma_size.width = fb->width / 2;
3439 plane_size->chroma_size.height = fb->height / 2;
3441 plane_size->chroma_pitch =
3442 fb->pitches[1] / fb->format->cpp[1];
3444 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
3445 address->video_progressive.luma_addr.low_part =
3446 lower_32_bits(afb->address);
3447 address->video_progressive.luma_addr.high_part =
3448 upper_32_bits(afb->address);
3449 address->video_progressive.chroma_addr.low_part =
3450 lower_32_bits(chroma_addr);
3451 address->video_progressive.chroma_addr.high_part =
3452 upper_32_bits(chroma_addr);
3455 /* Fill GFX8 params */
3456 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3457 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3459 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3460 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3461 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3462 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3463 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3465 /* XXX fix me for VI */
3466 tiling_info->gfx8.num_banks = num_banks;
3467 tiling_info->gfx8.array_mode =
3468 DC_ARRAY_2D_TILED_THIN1;
3469 tiling_info->gfx8.tile_split = tile_split;
3470 tiling_info->gfx8.bank_width = bankw;
3471 tiling_info->gfx8.bank_height = bankh;
3472 tiling_info->gfx8.tile_aspect = mtaspect;
3473 tiling_info->gfx8.tile_mode =
3474 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3475 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3476 == DC_ARRAY_1D_TILED_THIN1) {
3477 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3480 tiling_info->gfx8.pipe_config =
3481 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3483 if (adev->asic_type == CHIP_VEGA10 ||
3484 adev->asic_type == CHIP_VEGA12 ||
3485 adev->asic_type == CHIP_VEGA20 ||
3486 adev->asic_type == CHIP_NAVI10 ||
3487 adev->asic_type == CHIP_NAVI14 ||
3488 adev->asic_type == CHIP_NAVI12 ||
3489 adev->asic_type == CHIP_RENOIR ||
3490 adev->asic_type == CHIP_RAVEN) {
3491 /* Fill GFX9 params */
3492 tiling_info->gfx9.num_pipes =
3493 adev->gfx.config.gb_addr_config_fields.num_pipes;
3494 tiling_info->gfx9.num_banks =
3495 adev->gfx.config.gb_addr_config_fields.num_banks;
3496 tiling_info->gfx9.pipe_interleave =
3497 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3498 tiling_info->gfx9.num_shader_engines =
3499 adev->gfx.config.gb_addr_config_fields.num_se;
3500 tiling_info->gfx9.max_compressed_frags =
3501 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3502 tiling_info->gfx9.num_rb_per_se =
3503 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3504 tiling_info->gfx9.swizzle =
3505 AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
3506 tiling_info->gfx9.shaderEnable = 1;
3508 ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
3509 plane_size, tiling_info,
3510 tiling_flags, dcc, address);
3519 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
3520 bool *per_pixel_alpha, bool *global_alpha,
3521 int *global_alpha_value)
3523 *per_pixel_alpha = false;
3524 *global_alpha = false;
3525 *global_alpha_value = 0xff;
3527 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
3530 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
3531 static const uint32_t alpha_formats[] = {
3532 DRM_FORMAT_ARGB8888,
3533 DRM_FORMAT_RGBA8888,
3534 DRM_FORMAT_ABGR8888,
3536 uint32_t format = plane_state->fb->format->format;
3539 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
3540 if (format == alpha_formats[i]) {
3541 *per_pixel_alpha = true;
3547 if (plane_state->alpha < 0xffff) {
3548 *global_alpha = true;
3549 *global_alpha_value = plane_state->alpha >> 8;
3554 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
3555 const enum surface_pixel_format format,
3556 enum dc_color_space *color_space)
3560 *color_space = COLOR_SPACE_SRGB;
3562 /* DRM color properties only affect non-RGB formats. */
3563 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3566 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
3568 switch (plane_state->color_encoding) {
3569 case DRM_COLOR_YCBCR_BT601:
3571 *color_space = COLOR_SPACE_YCBCR601;
3573 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
3576 case DRM_COLOR_YCBCR_BT709:
3578 *color_space = COLOR_SPACE_YCBCR709;
3580 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
3583 case DRM_COLOR_YCBCR_BT2020:
3585 *color_space = COLOR_SPACE_2020_YCBCR;
3598 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
3599 const struct drm_plane_state *plane_state,
3600 const uint64_t tiling_flags,
3601 struct dc_plane_info *plane_info,
3602 struct dc_plane_address *address)
3604 const struct drm_framebuffer *fb = plane_state->fb;
3605 const struct amdgpu_framebuffer *afb =
3606 to_amdgpu_framebuffer(plane_state->fb);
3607 struct drm_format_name_buf format_name;
3610 memset(plane_info, 0, sizeof(*plane_info));
3612 switch (fb->format->format) {
3614 plane_info->format =
3615 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
3617 case DRM_FORMAT_RGB565:
3618 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
3620 case DRM_FORMAT_XRGB8888:
3621 case DRM_FORMAT_ARGB8888:
3622 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
3624 case DRM_FORMAT_XRGB2101010:
3625 case DRM_FORMAT_ARGB2101010:
3626 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
3628 case DRM_FORMAT_XBGR2101010:
3629 case DRM_FORMAT_ABGR2101010:
3630 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
3632 case DRM_FORMAT_XBGR8888:
3633 case DRM_FORMAT_ABGR8888:
3634 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
3636 case DRM_FORMAT_NV21:
3637 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
3639 case DRM_FORMAT_NV12:
3640 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
3644 "Unsupported screen format %s\n",
3645 drm_get_format_name(fb->format->format, &format_name));
3649 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
3650 case DRM_MODE_ROTATE_0:
3651 plane_info->rotation = ROTATION_ANGLE_0;
3653 case DRM_MODE_ROTATE_90:
3654 plane_info->rotation = ROTATION_ANGLE_90;
3656 case DRM_MODE_ROTATE_180:
3657 plane_info->rotation = ROTATION_ANGLE_180;
3659 case DRM_MODE_ROTATE_270:
3660 plane_info->rotation = ROTATION_ANGLE_270;
3663 plane_info->rotation = ROTATION_ANGLE_0;
3667 plane_info->visible = true;
3668 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
3670 plane_info->layer_index = 0;
3672 ret = fill_plane_color_attributes(plane_state, plane_info->format,
3673 &plane_info->color_space);
3677 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
3678 plane_info->rotation, tiling_flags,
3679 &plane_info->tiling_info,
3680 &plane_info->plane_size,
3681 &plane_info->dcc, address);
3685 fill_blending_from_plane_state(
3686 plane_state, &plane_info->per_pixel_alpha,
3687 &plane_info->global_alpha, &plane_info->global_alpha_value);
3692 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
3693 struct dc_plane_state *dc_plane_state,
3694 struct drm_plane_state *plane_state,
3695 struct drm_crtc_state *crtc_state)
3697 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
3698 const struct amdgpu_framebuffer *amdgpu_fb =
3699 to_amdgpu_framebuffer(plane_state->fb);
3700 struct dc_scaling_info scaling_info;
3701 struct dc_plane_info plane_info;
3702 uint64_t tiling_flags;
3705 ret = fill_dc_scaling_info(plane_state, &scaling_info);
3709 dc_plane_state->src_rect = scaling_info.src_rect;
3710 dc_plane_state->dst_rect = scaling_info.dst_rect;
3711 dc_plane_state->clip_rect = scaling_info.clip_rect;
3712 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
3714 ret = get_fb_info(amdgpu_fb, &tiling_flags);
3718 ret = fill_dc_plane_info_and_addr(adev, plane_state, tiling_flags,
3720 &dc_plane_state->address);
3724 dc_plane_state->format = plane_info.format;
3725 dc_plane_state->color_space = plane_info.color_space;
3726 dc_plane_state->format = plane_info.format;
3727 dc_plane_state->plane_size = plane_info.plane_size;
3728 dc_plane_state->rotation = plane_info.rotation;
3729 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
3730 dc_plane_state->stereo_format = plane_info.stereo_format;
3731 dc_plane_state->tiling_info = plane_info.tiling_info;
3732 dc_plane_state->visible = plane_info.visible;
3733 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
3734 dc_plane_state->global_alpha = plane_info.global_alpha;
3735 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
3736 dc_plane_state->dcc = plane_info.dcc;
3737 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
3740 * Always set input transfer function, since plane state is refreshed
3743 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
3750 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
3751 const struct dm_connector_state *dm_state,
3752 struct dc_stream_state *stream)
3754 enum amdgpu_rmx_type rmx_type;
3756 struct rect src = { 0 }; /* viewport in composition space*/
3757 struct rect dst = { 0 }; /* stream addressable area */
3759 /* no mode. nothing to be done */
3763 /* Full screen scaling by default */
3764 src.width = mode->hdisplay;
3765 src.height = mode->vdisplay;
3766 dst.width = stream->timing.h_addressable;
3767 dst.height = stream->timing.v_addressable;
3770 rmx_type = dm_state->scaling;
3771 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
3772 if (src.width * dst.height <
3773 src.height * dst.width) {
3774 /* height needs less upscaling/more downscaling */
3775 dst.width = src.width *
3776 dst.height / src.height;
3778 /* width needs less upscaling/more downscaling */
3779 dst.height = src.height *
3780 dst.width / src.width;
3782 } else if (rmx_type == RMX_CENTER) {
3786 dst.x = (stream->timing.h_addressable - dst.width) / 2;
3787 dst.y = (stream->timing.v_addressable - dst.height) / 2;
3789 if (dm_state->underscan_enable) {
3790 dst.x += dm_state->underscan_hborder / 2;
3791 dst.y += dm_state->underscan_vborder / 2;
3792 dst.width -= dm_state->underscan_hborder;
3793 dst.height -= dm_state->underscan_vborder;
3800 DRM_DEBUG_DRIVER("Destination Rectangle x:%d y:%d width:%d height:%d\n",
3801 dst.x, dst.y, dst.width, dst.height);
3805 static enum dc_color_depth
3806 convert_color_depth_from_display_info(const struct drm_connector *connector,
3807 const struct drm_connector_state *state,
3815 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
3816 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
3818 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
3820 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
3823 bpc = (uint8_t)connector->display_info.bpc;
3824 /* Assume 8 bpc by default if no bpc is specified. */
3825 bpc = bpc ? bpc : 8;
3829 state = connector->state;
3833 * Cap display bpc based on the user requested value.
3835 * The value for state->max_bpc may not correctly updated
3836 * depending on when the connector gets added to the state
3837 * or if this was called outside of atomic check, so it
3838 * can't be used directly.
3840 bpc = min(bpc, state->max_requested_bpc);
3842 /* Round down to the nearest even number. */
3843 bpc = bpc - (bpc & 1);
3849 * Temporary Work around, DRM doesn't parse color depth for
3850 * EDID revision before 1.4
3851 * TODO: Fix edid parsing
3853 return COLOR_DEPTH_888;
3855 return COLOR_DEPTH_666;
3857 return COLOR_DEPTH_888;
3859 return COLOR_DEPTH_101010;
3861 return COLOR_DEPTH_121212;
3863 return COLOR_DEPTH_141414;
3865 return COLOR_DEPTH_161616;
3867 return COLOR_DEPTH_UNDEFINED;
3871 static enum dc_aspect_ratio
3872 get_aspect_ratio(const struct drm_display_mode *mode_in)
3874 /* 1-1 mapping, since both enums follow the HDMI spec. */
3875 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
3878 static enum dc_color_space
3879 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
3881 enum dc_color_space color_space = COLOR_SPACE_SRGB;
3883 switch (dc_crtc_timing->pixel_encoding) {
3884 case PIXEL_ENCODING_YCBCR422:
3885 case PIXEL_ENCODING_YCBCR444:
3886 case PIXEL_ENCODING_YCBCR420:
3889 * 27030khz is the separation point between HDTV and SDTV
3890 * according to HDMI spec, we use YCbCr709 and YCbCr601
3893 if (dc_crtc_timing->pix_clk_100hz > 270300) {
3894 if (dc_crtc_timing->flags.Y_ONLY)
3896 COLOR_SPACE_YCBCR709_LIMITED;
3898 color_space = COLOR_SPACE_YCBCR709;
3900 if (dc_crtc_timing->flags.Y_ONLY)
3902 COLOR_SPACE_YCBCR601_LIMITED;
3904 color_space = COLOR_SPACE_YCBCR601;
3909 case PIXEL_ENCODING_RGB:
3910 color_space = COLOR_SPACE_SRGB;
3921 static bool adjust_colour_depth_from_display_info(
3922 struct dc_crtc_timing *timing_out,
3923 const struct drm_display_info *info)
3925 enum dc_color_depth depth = timing_out->display_color_depth;
3928 normalized_clk = timing_out->pix_clk_100hz / 10;
3929 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
3930 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
3931 normalized_clk /= 2;
3932 /* Adjusting pix clock following on HDMI spec based on colour depth */
3934 case COLOR_DEPTH_888:
3936 case COLOR_DEPTH_101010:
3937 normalized_clk = (normalized_clk * 30) / 24;
3939 case COLOR_DEPTH_121212:
3940 normalized_clk = (normalized_clk * 36) / 24;
3942 case COLOR_DEPTH_161616:
3943 normalized_clk = (normalized_clk * 48) / 24;
3946 /* The above depths are the only ones valid for HDMI. */
3949 if (normalized_clk <= info->max_tmds_clock) {
3950 timing_out->display_color_depth = depth;
3953 } while (--depth > COLOR_DEPTH_666);
3957 static void fill_stream_properties_from_drm_display_mode(
3958 struct dc_stream_state *stream,
3959 const struct drm_display_mode *mode_in,
3960 const struct drm_connector *connector,
3961 const struct drm_connector_state *connector_state,
3962 const struct dc_stream_state *old_stream)
3964 struct dc_crtc_timing *timing_out = &stream->timing;
3965 const struct drm_display_info *info = &connector->display_info;
3966 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
3967 struct hdmi_vendor_infoframe hv_frame;
3968 struct hdmi_avi_infoframe avi_frame;
3970 memset(&hv_frame, 0, sizeof(hv_frame));
3971 memset(&avi_frame, 0, sizeof(avi_frame));
3973 timing_out->h_border_left = 0;
3974 timing_out->h_border_right = 0;
3975 timing_out->v_border_top = 0;
3976 timing_out->v_border_bottom = 0;
3977 /* TODO: un-hardcode */
3978 if (drm_mode_is_420_only(info, mode_in)
3979 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
3980 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
3981 else if (drm_mode_is_420_also(info, mode_in)
3982 && aconnector->force_yuv420_output)
3983 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
3984 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
3985 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
3986 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
3988 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
3990 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
3991 timing_out->display_color_depth = convert_color_depth_from_display_info(
3992 connector, connector_state,
3993 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420));
3994 timing_out->scan_type = SCANNING_TYPE_NODATA;
3995 timing_out->hdmi_vic = 0;
3998 timing_out->vic = old_stream->timing.vic;
3999 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4000 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4002 timing_out->vic = drm_match_cea_mode(mode_in);
4003 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4004 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4005 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4006 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4009 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4010 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4011 timing_out->vic = avi_frame.video_code;
4012 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4013 timing_out->hdmi_vic = hv_frame.vic;
4016 timing_out->h_addressable = mode_in->crtc_hdisplay;
4017 timing_out->h_total = mode_in->crtc_htotal;
4018 timing_out->h_sync_width =
4019 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4020 timing_out->h_front_porch =
4021 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4022 timing_out->v_total = mode_in->crtc_vtotal;
4023 timing_out->v_addressable = mode_in->crtc_vdisplay;
4024 timing_out->v_front_porch =
4025 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4026 timing_out->v_sync_width =
4027 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
4028 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
4029 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
4031 stream->output_color_space = get_output_color_space(timing_out);
4033 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4034 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
4035 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4036 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4037 drm_mode_is_420_also(info, mode_in) &&
4038 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4039 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4040 adjust_colour_depth_from_display_info(timing_out, info);
4045 static void fill_audio_info(struct audio_info *audio_info,
4046 const struct drm_connector *drm_connector,
4047 const struct dc_sink *dc_sink)
4050 int cea_revision = 0;
4051 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
4053 audio_info->manufacture_id = edid_caps->manufacturer_id;
4054 audio_info->product_id = edid_caps->product_id;
4056 cea_revision = drm_connector->display_info.cea_rev;
4058 strscpy(audio_info->display_name,
4059 edid_caps->display_name,
4060 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
4062 if (cea_revision >= 3) {
4063 audio_info->mode_count = edid_caps->audio_mode_count;
4065 for (i = 0; i < audio_info->mode_count; ++i) {
4066 audio_info->modes[i].format_code =
4067 (enum audio_format_code)
4068 (edid_caps->audio_modes[i].format_code);
4069 audio_info->modes[i].channel_count =
4070 edid_caps->audio_modes[i].channel_count;
4071 audio_info->modes[i].sample_rates.all =
4072 edid_caps->audio_modes[i].sample_rate;
4073 audio_info->modes[i].sample_size =
4074 edid_caps->audio_modes[i].sample_size;
4078 audio_info->flags.all = edid_caps->speaker_flags;
4080 /* TODO: We only check for the progressive mode, check for interlace mode too */
4081 if (drm_connector->latency_present[0]) {
4082 audio_info->video_latency = drm_connector->video_latency[0];
4083 audio_info->audio_latency = drm_connector->audio_latency[0];
4086 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
4091 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
4092 struct drm_display_mode *dst_mode)
4094 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
4095 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
4096 dst_mode->crtc_clock = src_mode->crtc_clock;
4097 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
4098 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
4099 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
4100 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
4101 dst_mode->crtc_htotal = src_mode->crtc_htotal;
4102 dst_mode->crtc_hskew = src_mode->crtc_hskew;
4103 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
4104 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
4105 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
4106 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
4107 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
4111 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
4112 const struct drm_display_mode *native_mode,
4115 if (scale_enabled) {
4116 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4117 } else if (native_mode->clock == drm_mode->clock &&
4118 native_mode->htotal == drm_mode->htotal &&
4119 native_mode->vtotal == drm_mode->vtotal) {
4120 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4122 /* no scaling nor amdgpu inserted, no need to patch */
4126 static struct dc_sink *
4127 create_fake_sink(struct amdgpu_dm_connector *aconnector)
4129 struct dc_sink_init_data sink_init_data = { 0 };
4130 struct dc_sink *sink = NULL;
4131 sink_init_data.link = aconnector->dc_link;
4132 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
4134 sink = dc_sink_create(&sink_init_data);
4136 DRM_ERROR("Failed to create sink!\n");
4139 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
4144 static void set_multisync_trigger_params(
4145 struct dc_stream_state *stream)
4147 if (stream->triggered_crtc_reset.enabled) {
4148 stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
4149 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
4153 static void set_master_stream(struct dc_stream_state *stream_set[],
4156 int j, highest_rfr = 0, master_stream = 0;
4158 for (j = 0; j < stream_count; j++) {
4159 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
4160 int refresh_rate = 0;
4162 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
4163 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
4164 if (refresh_rate > highest_rfr) {
4165 highest_rfr = refresh_rate;
4170 for (j = 0; j < stream_count; j++) {
4172 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
4176 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
4180 if (context->stream_count < 2)
4182 for (i = 0; i < context->stream_count ; i++) {
4183 if (!context->streams[i])
4186 * TODO: add a function to read AMD VSDB bits and set
4187 * crtc_sync_master.multi_sync_enabled flag
4188 * For now it's set to false
4190 set_multisync_trigger_params(context->streams[i]);
4192 set_master_stream(context->streams, context->stream_count);
4195 static struct dc_stream_state *
4196 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
4197 const struct drm_display_mode *drm_mode,
4198 const struct dm_connector_state *dm_state,
4199 const struct dc_stream_state *old_stream)
4201 struct drm_display_mode *preferred_mode = NULL;
4202 struct drm_connector *drm_connector;
4203 const struct drm_connector_state *con_state =
4204 dm_state ? &dm_state->base : NULL;
4205 struct dc_stream_state *stream = NULL;
4206 struct drm_display_mode mode = *drm_mode;
4207 bool native_mode_found = false;
4208 bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
4210 int preferred_refresh = 0;
4211 #if defined(CONFIG_DRM_AMD_DC_DCN)
4212 struct dsc_dec_dpcd_caps dsc_caps;
4214 uint32_t link_bandwidth_kbps;
4216 struct dc_sink *sink = NULL;
4217 if (aconnector == NULL) {
4218 DRM_ERROR("aconnector is NULL!\n");
4222 drm_connector = &aconnector->base;
4224 if (!aconnector->dc_sink) {
4225 sink = create_fake_sink(aconnector);
4229 sink = aconnector->dc_sink;
4230 dc_sink_retain(sink);
4233 stream = dc_create_stream_for_sink(sink);
4235 if (stream == NULL) {
4236 DRM_ERROR("Failed to create stream for sink!\n");
4240 stream->dm_stream_context = aconnector;
4242 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
4243 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
4245 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
4246 /* Search for preferred mode */
4247 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
4248 native_mode_found = true;
4252 if (!native_mode_found)
4253 preferred_mode = list_first_entry_or_null(
4254 &aconnector->base.modes,
4255 struct drm_display_mode,
4258 mode_refresh = drm_mode_vrefresh(&mode);
4260 if (preferred_mode == NULL) {
4262 * This may not be an error, the use case is when we have no
4263 * usermode calls to reset and set mode upon hotplug. In this
4264 * case, we call set mode ourselves to restore the previous mode
4265 * and the modelist may not be filled in in time.
4267 DRM_DEBUG_DRIVER("No preferred mode found\n");
4269 decide_crtc_timing_for_drm_display_mode(
4270 &mode, preferred_mode,
4271 dm_state ? (dm_state->scaling != RMX_OFF) : false);
4272 preferred_refresh = drm_mode_vrefresh(preferred_mode);
4276 drm_mode_set_crtcinfo(&mode, 0);
4279 * If scaling is enabled and refresh rate didn't change
4280 * we copy the vic and polarities of the old timings
4282 if (!scale || mode_refresh != preferred_refresh)
4283 fill_stream_properties_from_drm_display_mode(stream,
4284 &mode, &aconnector->base, con_state, NULL);
4286 fill_stream_properties_from_drm_display_mode(stream,
4287 &mode, &aconnector->base, con_state, old_stream);
4289 stream->timing.flags.DSC = 0;
4291 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4292 #if defined(CONFIG_DRM_AMD_DC_DCN)
4293 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
4294 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
4295 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_ext_caps.raw,
4298 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
4299 dc_link_get_link_cap(aconnector->dc_link));
4301 #if defined(CONFIG_DRM_AMD_DC_DCN)
4302 if (dsc_caps.is_dsc_supported)
4303 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
4305 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
4306 link_bandwidth_kbps,
4308 &stream->timing.dsc_cfg))
4309 stream->timing.flags.DSC = 1;
4313 update_stream_scaling_settings(&mode, dm_state, stream);
4316 &stream->audio_info,
4320 update_stream_signal(stream, sink);
4322 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4323 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket, false, false);
4324 if (stream->link->psr_feature_enabled) {
4325 struct dc *core_dc = stream->link->ctx->dc;
4327 if (dc_is_dmcu_initialized(core_dc)) {
4328 struct dmcu *dmcu = core_dc->res_pool->dmcu;
4330 stream->psr_version = dmcu->dmcu_version.psr_version;
4333 // should decide stream support vsc sdp colorimetry capability
4334 // before building vsc info packet
4336 stream->use_vsc_sdp_for_colorimetry = false;
4337 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
4338 stream->use_vsc_sdp_for_colorimetry =
4339 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
4341 if (stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 &&
4342 stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) {
4343 stream->use_vsc_sdp_for_colorimetry = true;
4346 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
4350 dc_sink_release(sink);
4355 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
4357 drm_crtc_cleanup(crtc);
4361 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
4362 struct drm_crtc_state *state)
4364 struct dm_crtc_state *cur = to_dm_crtc_state(state);
4366 /* TODO Destroy dc_stream objects are stream object is flattened */
4368 dc_stream_release(cur->stream);
4371 __drm_atomic_helper_crtc_destroy_state(state);
4377 static void dm_crtc_reset_state(struct drm_crtc *crtc)
4379 struct dm_crtc_state *state;
4382 dm_crtc_destroy_state(crtc, crtc->state);
4384 state = kzalloc(sizeof(*state), GFP_KERNEL);
4385 if (WARN_ON(!state))
4388 crtc->state = &state->base;
4389 crtc->state->crtc = crtc;
4393 static struct drm_crtc_state *
4394 dm_crtc_duplicate_state(struct drm_crtc *crtc)
4396 struct dm_crtc_state *state, *cur;
4398 cur = to_dm_crtc_state(crtc->state);
4400 if (WARN_ON(!crtc->state))
4403 state = kzalloc(sizeof(*state), GFP_KERNEL);
4407 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
4410 state->stream = cur->stream;
4411 dc_stream_retain(state->stream);
4414 state->active_planes = cur->active_planes;
4415 state->interrupts_enabled = cur->interrupts_enabled;
4416 state->vrr_params = cur->vrr_params;
4417 state->vrr_infopacket = cur->vrr_infopacket;
4418 state->abm_level = cur->abm_level;
4419 state->vrr_supported = cur->vrr_supported;
4420 state->freesync_config = cur->freesync_config;
4421 state->crc_src = cur->crc_src;
4422 state->cm_has_degamma = cur->cm_has_degamma;
4423 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
4425 /* TODO Duplicate dc_stream after objects are stream object is flattened */
4427 return &state->base;
4430 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
4432 enum dc_irq_source irq_source;
4433 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4434 struct amdgpu_device *adev = crtc->dev->dev_private;
4437 /* Do not set vupdate for DCN hardware */
4438 if (adev->family > AMDGPU_FAMILY_AI)
4441 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
4443 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4445 DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
4446 acrtc->crtc_id, enable ? "en" : "dis", rc);
4450 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
4452 enum dc_irq_source irq_source;
4453 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4454 struct amdgpu_device *adev = crtc->dev->dev_private;
4455 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
4459 /* vblank irq on -> Only need vupdate irq in vrr mode */
4460 if (amdgpu_dm_vrr_active(acrtc_state))
4461 rc = dm_set_vupdate_irq(crtc, true);
4463 /* vblank irq off -> vupdate irq off */
4464 rc = dm_set_vupdate_irq(crtc, false);
4470 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
4471 return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4474 static int dm_enable_vblank(struct drm_crtc *crtc)
4476 return dm_set_vblank(crtc, true);
4479 static void dm_disable_vblank(struct drm_crtc *crtc)
4481 dm_set_vblank(crtc, false);
4484 /* Implemented only the options currently availible for the driver */
4485 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
4486 .reset = dm_crtc_reset_state,
4487 .destroy = amdgpu_dm_crtc_destroy,
4488 .gamma_set = drm_atomic_helper_legacy_gamma_set,
4489 .set_config = drm_atomic_helper_set_config,
4490 .page_flip = drm_atomic_helper_page_flip,
4491 .atomic_duplicate_state = dm_crtc_duplicate_state,
4492 .atomic_destroy_state = dm_crtc_destroy_state,
4493 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
4494 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
4495 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
4496 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
4497 .enable_vblank = dm_enable_vblank,
4498 .disable_vblank = dm_disable_vblank,
4499 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
4502 static enum drm_connector_status
4503 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
4506 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4510 * 1. This interface is NOT called in context of HPD irq.
4511 * 2. This interface *is called* in context of user-mode ioctl. Which
4512 * makes it a bad place for *any* MST-related activity.
4515 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
4516 !aconnector->fake_enable)
4517 connected = (aconnector->dc_sink != NULL);
4519 connected = (aconnector->base.force == DRM_FORCE_ON);
4521 return (connected ? connector_status_connected :
4522 connector_status_disconnected);
4525 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
4526 struct drm_connector_state *connector_state,
4527 struct drm_property *property,
4530 struct drm_device *dev = connector->dev;
4531 struct amdgpu_device *adev = dev->dev_private;
4532 struct dm_connector_state *dm_old_state =
4533 to_dm_connector_state(connector->state);
4534 struct dm_connector_state *dm_new_state =
4535 to_dm_connector_state(connector_state);
4539 if (property == dev->mode_config.scaling_mode_property) {
4540 enum amdgpu_rmx_type rmx_type;
4543 case DRM_MODE_SCALE_CENTER:
4544 rmx_type = RMX_CENTER;
4546 case DRM_MODE_SCALE_ASPECT:
4547 rmx_type = RMX_ASPECT;
4549 case DRM_MODE_SCALE_FULLSCREEN:
4550 rmx_type = RMX_FULL;
4552 case DRM_MODE_SCALE_NONE:
4558 if (dm_old_state->scaling == rmx_type)
4561 dm_new_state->scaling = rmx_type;
4563 } else if (property == adev->mode_info.underscan_hborder_property) {
4564 dm_new_state->underscan_hborder = val;
4566 } else if (property == adev->mode_info.underscan_vborder_property) {
4567 dm_new_state->underscan_vborder = val;
4569 } else if (property == adev->mode_info.underscan_property) {
4570 dm_new_state->underscan_enable = val;
4572 } else if (property == adev->mode_info.abm_level_property) {
4573 dm_new_state->abm_level = val;
4580 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
4581 const struct drm_connector_state *state,
4582 struct drm_property *property,
4585 struct drm_device *dev = connector->dev;
4586 struct amdgpu_device *adev = dev->dev_private;
4587 struct dm_connector_state *dm_state =
4588 to_dm_connector_state(state);
4591 if (property == dev->mode_config.scaling_mode_property) {
4592 switch (dm_state->scaling) {
4594 *val = DRM_MODE_SCALE_CENTER;
4597 *val = DRM_MODE_SCALE_ASPECT;
4600 *val = DRM_MODE_SCALE_FULLSCREEN;
4604 *val = DRM_MODE_SCALE_NONE;
4608 } else if (property == adev->mode_info.underscan_hborder_property) {
4609 *val = dm_state->underscan_hborder;
4611 } else if (property == adev->mode_info.underscan_vborder_property) {
4612 *val = dm_state->underscan_vborder;
4614 } else if (property == adev->mode_info.underscan_property) {
4615 *val = dm_state->underscan_enable;
4617 } else if (property == adev->mode_info.abm_level_property) {
4618 *val = dm_state->abm_level;
4625 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
4627 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
4629 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
4632 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
4634 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4635 const struct dc_link *link = aconnector->dc_link;
4636 struct amdgpu_device *adev = connector->dev->dev_private;
4637 struct amdgpu_display_manager *dm = &adev->dm;
4639 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4640 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4642 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4643 link->type != dc_connection_none &&
4644 dm->backlight_dev) {
4645 backlight_device_unregister(dm->backlight_dev);
4646 dm->backlight_dev = NULL;
4650 if (aconnector->dc_em_sink)
4651 dc_sink_release(aconnector->dc_em_sink);
4652 aconnector->dc_em_sink = NULL;
4653 if (aconnector->dc_sink)
4654 dc_sink_release(aconnector->dc_sink);
4655 aconnector->dc_sink = NULL;
4657 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
4658 drm_connector_unregister(connector);
4659 drm_connector_cleanup(connector);
4660 if (aconnector->i2c) {
4661 i2c_del_adapter(&aconnector->i2c->base);
4662 kfree(aconnector->i2c);
4668 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
4670 struct dm_connector_state *state =
4671 to_dm_connector_state(connector->state);
4673 if (connector->state)
4674 __drm_atomic_helper_connector_destroy_state(connector->state);
4678 state = kzalloc(sizeof(*state), GFP_KERNEL);
4681 state->scaling = RMX_OFF;
4682 state->underscan_enable = false;
4683 state->underscan_hborder = 0;
4684 state->underscan_vborder = 0;
4685 state->base.max_requested_bpc = 8;
4686 state->vcpi_slots = 0;
4688 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4689 state->abm_level = amdgpu_dm_abm_level;
4691 __drm_atomic_helper_connector_reset(connector, &state->base);
4695 struct drm_connector_state *
4696 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
4698 struct dm_connector_state *state =
4699 to_dm_connector_state(connector->state);
4701 struct dm_connector_state *new_state =
4702 kmemdup(state, sizeof(*state), GFP_KERNEL);
4707 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
4709 new_state->freesync_capable = state->freesync_capable;
4710 new_state->abm_level = state->abm_level;
4711 new_state->scaling = state->scaling;
4712 new_state->underscan_enable = state->underscan_enable;
4713 new_state->underscan_hborder = state->underscan_hborder;
4714 new_state->underscan_vborder = state->underscan_vborder;
4715 new_state->vcpi_slots = state->vcpi_slots;
4716 new_state->pbn = state->pbn;
4717 return &new_state->base;
4721 amdgpu_dm_connector_late_register(struct drm_connector *connector)
4723 struct amdgpu_dm_connector *amdgpu_dm_connector =
4724 to_amdgpu_dm_connector(connector);
4726 #if defined(CONFIG_DEBUG_FS)
4727 connector_debugfs_init(amdgpu_dm_connector);
4733 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
4734 .reset = amdgpu_dm_connector_funcs_reset,
4735 .detect = amdgpu_dm_connector_detect,
4736 .fill_modes = drm_helper_probe_single_connector_modes,
4737 .destroy = amdgpu_dm_connector_destroy,
4738 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
4739 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4740 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
4741 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
4742 .late_register = amdgpu_dm_connector_late_register,
4743 .early_unregister = amdgpu_dm_connector_unregister
4746 static int get_modes(struct drm_connector *connector)
4748 return amdgpu_dm_connector_get_modes(connector);
4751 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
4753 struct dc_sink_init_data init_params = {
4754 .link = aconnector->dc_link,
4755 .sink_signal = SIGNAL_TYPE_VIRTUAL
4759 if (!aconnector->base.edid_blob_ptr) {
4760 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
4761 aconnector->base.name);
4763 aconnector->base.force = DRM_FORCE_OFF;
4764 aconnector->base.override_edid = false;
4768 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
4770 aconnector->edid = edid;
4772 aconnector->dc_em_sink = dc_link_add_remote_sink(
4773 aconnector->dc_link,
4775 (edid->extensions + 1) * EDID_LENGTH,
4778 if (aconnector->base.force == DRM_FORCE_ON) {
4779 aconnector->dc_sink = aconnector->dc_link->local_sink ?
4780 aconnector->dc_link->local_sink :
4781 aconnector->dc_em_sink;
4782 dc_sink_retain(aconnector->dc_sink);
4786 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
4788 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
4791 * In case of headless boot with force on for DP managed connector
4792 * Those settings have to be != 0 to get initial modeset
4794 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4795 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
4796 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
4800 aconnector->base.override_edid = true;
4801 create_eml_sink(aconnector);
4804 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
4805 struct drm_display_mode *mode)
4807 int result = MODE_ERROR;
4808 struct dc_sink *dc_sink;
4809 struct amdgpu_device *adev = connector->dev->dev_private;
4810 /* TODO: Unhardcode stream count */
4811 struct dc_stream_state *stream;
4812 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4813 enum dc_status dc_result = DC_OK;
4815 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
4816 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
4820 * Only run this the first time mode_valid is called to initilialize
4823 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
4824 !aconnector->dc_em_sink)
4825 handle_edid_mgmt(aconnector);
4827 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
4829 if (dc_sink == NULL) {
4830 DRM_ERROR("dc_sink is NULL!\n");
4834 stream = create_stream_for_sink(aconnector, mode, NULL, NULL);
4835 if (stream == NULL) {
4836 DRM_ERROR("Failed to create stream for sink!\n");
4840 dc_result = dc_validate_stream(adev->dm.dc, stream);
4842 if (dc_result == DC_OK)
4845 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d\n",
4851 dc_stream_release(stream);
4854 /* TODO: error handling*/
4858 static int fill_hdr_info_packet(const struct drm_connector_state *state,
4859 struct dc_info_packet *out)
4861 struct hdmi_drm_infoframe frame;
4862 unsigned char buf[30]; /* 26 + 4 */
4866 memset(out, 0, sizeof(*out));
4868 if (!state->hdr_output_metadata)
4871 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
4875 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
4879 /* Static metadata is a fixed 26 bytes + 4 byte header. */
4883 /* Prepare the infopacket for DC. */
4884 switch (state->connector->connector_type) {
4885 case DRM_MODE_CONNECTOR_HDMIA:
4886 out->hb0 = 0x87; /* type */
4887 out->hb1 = 0x01; /* version */
4888 out->hb2 = 0x1A; /* length */
4889 out->sb[0] = buf[3]; /* checksum */
4893 case DRM_MODE_CONNECTOR_DisplayPort:
4894 case DRM_MODE_CONNECTOR_eDP:
4895 out->hb0 = 0x00; /* sdp id, zero */
4896 out->hb1 = 0x87; /* type */
4897 out->hb2 = 0x1D; /* payload len - 1 */
4898 out->hb3 = (0x13 << 2); /* sdp version */
4899 out->sb[0] = 0x01; /* version */
4900 out->sb[1] = 0x1A; /* length */
4908 memcpy(&out->sb[i], &buf[4], 26);
4911 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
4912 sizeof(out->sb), false);
4918 is_hdr_metadata_different(const struct drm_connector_state *old_state,
4919 const struct drm_connector_state *new_state)
4921 struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
4922 struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
4924 if (old_blob != new_blob) {
4925 if (old_blob && new_blob &&
4926 old_blob->length == new_blob->length)
4927 return memcmp(old_blob->data, new_blob->data,
4937 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
4938 struct drm_atomic_state *state)
4940 struct drm_connector_state *new_con_state =
4941 drm_atomic_get_new_connector_state(state, conn);
4942 struct drm_connector_state *old_con_state =
4943 drm_atomic_get_old_connector_state(state, conn);
4944 struct drm_crtc *crtc = new_con_state->crtc;
4945 struct drm_crtc_state *new_crtc_state;
4951 if (is_hdr_metadata_different(old_con_state, new_con_state)) {
4952 struct dc_info_packet hdr_infopacket;
4954 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
4958 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
4959 if (IS_ERR(new_crtc_state))
4960 return PTR_ERR(new_crtc_state);
4963 * DC considers the stream backends changed if the
4964 * static metadata changes. Forcing the modeset also
4965 * gives a simple way for userspace to switch from
4966 * 8bpc to 10bpc when setting the metadata to enter
4969 * Changing the static metadata after it's been
4970 * set is permissible, however. So only force a
4971 * modeset if we're entering or exiting HDR.
4973 new_crtc_state->mode_changed =
4974 !old_con_state->hdr_output_metadata ||
4975 !new_con_state->hdr_output_metadata;
4981 static const struct drm_connector_helper_funcs
4982 amdgpu_dm_connector_helper_funcs = {
4984 * If hotplugging a second bigger display in FB Con mode, bigger resolution
4985 * modes will be filtered by drm_mode_validate_size(), and those modes
4986 * are missing after user start lightdm. So we need to renew modes list.
4987 * in get_modes call back, not just return the modes count
4989 .get_modes = get_modes,
4990 .mode_valid = amdgpu_dm_connector_mode_valid,
4991 .atomic_check = amdgpu_dm_connector_atomic_check,
4994 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
4998 static bool does_crtc_have_active_cursor(struct drm_crtc_state *new_crtc_state)
5000 struct drm_device *dev = new_crtc_state->crtc->dev;
5001 struct drm_plane *plane;
5003 drm_for_each_plane_mask(plane, dev, new_crtc_state->plane_mask) {
5004 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5011 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
5013 struct drm_atomic_state *state = new_crtc_state->state;
5014 struct drm_plane *plane;
5017 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
5018 struct drm_plane_state *new_plane_state;
5020 /* Cursor planes are "fake". */
5021 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5024 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
5026 if (!new_plane_state) {
5028 * The plane is enable on the CRTC and hasn't changed
5029 * state. This means that it previously passed
5030 * validation and is therefore enabled.
5036 /* We need a framebuffer to be considered enabled. */
5037 num_active += (new_plane_state->fb != NULL);
5044 * Sets whether interrupts should be enabled on a specific CRTC.
5045 * We require that the stream be enabled and that there exist active
5046 * DC planes on the stream.
5049 dm_update_crtc_interrupt_state(struct drm_crtc *crtc,
5050 struct drm_crtc_state *new_crtc_state)
5052 struct dm_crtc_state *dm_new_crtc_state =
5053 to_dm_crtc_state(new_crtc_state);
5055 dm_new_crtc_state->active_planes = 0;
5056 dm_new_crtc_state->interrupts_enabled = false;
5058 if (!dm_new_crtc_state->stream)
5061 dm_new_crtc_state->active_planes =
5062 count_crtc_active_planes(new_crtc_state);
5064 dm_new_crtc_state->interrupts_enabled =
5065 dm_new_crtc_state->active_planes > 0;
5068 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
5069 struct drm_crtc_state *state)
5071 struct amdgpu_device *adev = crtc->dev->dev_private;
5072 struct dc *dc = adev->dm.dc;
5073 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
5077 * Update interrupt state for the CRTC. This needs to happen whenever
5078 * the CRTC has changed or whenever any of its planes have changed.
5079 * Atomic check satisfies both of these requirements since the CRTC
5080 * is added to the state by DRM during drm_atomic_helper_check_planes.
5082 dm_update_crtc_interrupt_state(crtc, state);
5084 if (unlikely(!dm_crtc_state->stream &&
5085 modeset_required(state, NULL, dm_crtc_state->stream))) {
5090 /* In some use cases, like reset, no stream is attached */
5091 if (!dm_crtc_state->stream)
5095 * We want at least one hardware plane enabled to use
5096 * the stream with a cursor enabled.
5098 if (state->enable && state->active &&
5099 does_crtc_have_active_cursor(state) &&
5100 dm_crtc_state->active_planes == 0)
5103 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
5109 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
5110 const struct drm_display_mode *mode,
5111 struct drm_display_mode *adjusted_mode)
5116 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
5117 .disable = dm_crtc_helper_disable,
5118 .atomic_check = dm_crtc_helper_atomic_check,
5119 .mode_fixup = dm_crtc_helper_mode_fixup,
5120 .get_scanout_position = amdgpu_crtc_get_scanout_position,
5123 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
5128 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
5130 switch (display_color_depth) {
5131 case COLOR_DEPTH_666:
5133 case COLOR_DEPTH_888:
5135 case COLOR_DEPTH_101010:
5137 case COLOR_DEPTH_121212:
5139 case COLOR_DEPTH_141414:
5141 case COLOR_DEPTH_161616:
5149 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
5150 struct drm_crtc_state *crtc_state,
5151 struct drm_connector_state *conn_state)
5153 struct drm_atomic_state *state = crtc_state->state;
5154 struct drm_connector *connector = conn_state->connector;
5155 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5156 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
5157 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
5158 struct drm_dp_mst_topology_mgr *mst_mgr;
5159 struct drm_dp_mst_port *mst_port;
5160 enum dc_color_depth color_depth;
5162 bool is_y420 = false;
5164 if (!aconnector->port || !aconnector->dc_sink)
5167 mst_port = aconnector->port;
5168 mst_mgr = &aconnector->mst_port->mst_mgr;
5170 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
5173 if (!state->duplicated) {
5174 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
5175 aconnector->force_yuv420_output;
5176 color_depth = convert_color_depth_from_display_info(connector, conn_state,
5178 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
5179 clock = adjusted_mode->clock;
5180 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
5182 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
5185 dm_new_connector_state->pbn,
5187 if (dm_new_connector_state->vcpi_slots < 0) {
5188 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
5189 return dm_new_connector_state->vcpi_slots;
5194 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
5195 .disable = dm_encoder_helper_disable,
5196 .atomic_check = dm_encoder_helper_atomic_check
5199 #if defined(CONFIG_DRM_AMD_DC_DCN)
5200 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
5201 struct dc_state *dc_state)
5203 struct dc_stream_state *stream = NULL;
5204 struct drm_connector *connector;
5205 struct drm_connector_state *new_con_state, *old_con_state;
5206 struct amdgpu_dm_connector *aconnector;
5207 struct dm_connector_state *dm_conn_state;
5208 int i, j, clock, bpp;
5209 int vcpi, pbn_div, pbn = 0;
5211 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
5213 aconnector = to_amdgpu_dm_connector(connector);
5215 if (!aconnector->port)
5218 if (!new_con_state || !new_con_state->crtc)
5221 dm_conn_state = to_dm_connector_state(new_con_state);
5223 for (j = 0; j < dc_state->stream_count; j++) {
5224 stream = dc_state->streams[j];
5228 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
5237 if (stream->timing.flags.DSC != 1) {
5238 drm_dp_mst_atomic_enable_dsc(state,
5246 pbn_div = dm_mst_get_pbn_divider(stream->link);
5247 bpp = stream->timing.dsc_cfg.bits_per_pixel;
5248 clock = stream->timing.pix_clk_100hz / 10;
5249 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
5250 vcpi = drm_dp_mst_atomic_enable_dsc(state,
5257 dm_conn_state->pbn = pbn;
5258 dm_conn_state->vcpi_slots = vcpi;
5264 static void dm_drm_plane_reset(struct drm_plane *plane)
5266 struct dm_plane_state *amdgpu_state = NULL;
5269 plane->funcs->atomic_destroy_state(plane, plane->state);
5271 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
5272 WARN_ON(amdgpu_state == NULL);
5275 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
5278 static struct drm_plane_state *
5279 dm_drm_plane_duplicate_state(struct drm_plane *plane)
5281 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
5283 old_dm_plane_state = to_dm_plane_state(plane->state);
5284 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
5285 if (!dm_plane_state)
5288 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
5290 if (old_dm_plane_state->dc_state) {
5291 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
5292 dc_plane_state_retain(dm_plane_state->dc_state);
5295 return &dm_plane_state->base;
5298 void dm_drm_plane_destroy_state(struct drm_plane *plane,
5299 struct drm_plane_state *state)
5301 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
5303 if (dm_plane_state->dc_state)
5304 dc_plane_state_release(dm_plane_state->dc_state);
5306 drm_atomic_helper_plane_destroy_state(plane, state);
5309 static const struct drm_plane_funcs dm_plane_funcs = {
5310 .update_plane = drm_atomic_helper_update_plane,
5311 .disable_plane = drm_atomic_helper_disable_plane,
5312 .destroy = drm_primary_helper_destroy,
5313 .reset = dm_drm_plane_reset,
5314 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
5315 .atomic_destroy_state = dm_drm_plane_destroy_state,
5318 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
5319 struct drm_plane_state *new_state)
5321 struct amdgpu_framebuffer *afb;
5322 struct drm_gem_object *obj;
5323 struct amdgpu_device *adev;
5324 struct amdgpu_bo *rbo;
5325 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
5326 struct list_head list;
5327 struct ttm_validate_buffer tv;
5328 struct ww_acquire_ctx ticket;
5329 uint64_t tiling_flags;
5333 dm_plane_state_old = to_dm_plane_state(plane->state);
5334 dm_plane_state_new = to_dm_plane_state(new_state);
5336 if (!new_state->fb) {
5337 DRM_DEBUG_DRIVER("No FB bound\n");
5341 afb = to_amdgpu_framebuffer(new_state->fb);
5342 obj = new_state->fb->obj[0];
5343 rbo = gem_to_amdgpu_bo(obj);
5344 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
5345 INIT_LIST_HEAD(&list);
5349 list_add(&tv.head, &list);
5351 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
5353 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
5357 if (plane->type != DRM_PLANE_TYPE_CURSOR)
5358 domain = amdgpu_display_supported_domains(adev, rbo->flags);
5360 domain = AMDGPU_GEM_DOMAIN_VRAM;
5362 r = amdgpu_bo_pin(rbo, domain);
5363 if (unlikely(r != 0)) {
5364 if (r != -ERESTARTSYS)
5365 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
5366 ttm_eu_backoff_reservation(&ticket, &list);
5370 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
5371 if (unlikely(r != 0)) {
5372 amdgpu_bo_unpin(rbo);
5373 ttm_eu_backoff_reservation(&ticket, &list);
5374 DRM_ERROR("%p bind failed\n", rbo);
5378 amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
5380 ttm_eu_backoff_reservation(&ticket, &list);
5382 afb->address = amdgpu_bo_gpu_offset(rbo);
5386 if (dm_plane_state_new->dc_state &&
5387 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
5388 struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
5390 fill_plane_buffer_attributes(
5391 adev, afb, plane_state->format, plane_state->rotation,
5392 tiling_flags, &plane_state->tiling_info,
5393 &plane_state->plane_size, &plane_state->dcc,
5394 &plane_state->address);
5400 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
5401 struct drm_plane_state *old_state)
5403 struct amdgpu_bo *rbo;
5409 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
5410 r = amdgpu_bo_reserve(rbo, false);
5412 DRM_ERROR("failed to reserve rbo before unpin\n");
5416 amdgpu_bo_unpin(rbo);
5417 amdgpu_bo_unreserve(rbo);
5418 amdgpu_bo_unref(&rbo);
5421 static int dm_plane_atomic_check(struct drm_plane *plane,
5422 struct drm_plane_state *state)
5424 struct amdgpu_device *adev = plane->dev->dev_private;
5425 struct dc *dc = adev->dm.dc;
5426 struct dm_plane_state *dm_plane_state;
5427 struct dc_scaling_info scaling_info;
5430 dm_plane_state = to_dm_plane_state(state);
5432 if (!dm_plane_state->dc_state)
5435 ret = fill_dc_scaling_info(state, &scaling_info);
5439 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
5445 static int dm_plane_atomic_async_check(struct drm_plane *plane,
5446 struct drm_plane_state *new_plane_state)
5448 /* Only support async updates on cursor planes. */
5449 if (plane->type != DRM_PLANE_TYPE_CURSOR)
5455 static void dm_plane_atomic_async_update(struct drm_plane *plane,
5456 struct drm_plane_state *new_state)
5458 struct drm_plane_state *old_state =
5459 drm_atomic_get_old_plane_state(new_state->state, plane);
5461 swap(plane->state->fb, new_state->fb);
5463 plane->state->src_x = new_state->src_x;
5464 plane->state->src_y = new_state->src_y;
5465 plane->state->src_w = new_state->src_w;
5466 plane->state->src_h = new_state->src_h;
5467 plane->state->crtc_x = new_state->crtc_x;
5468 plane->state->crtc_y = new_state->crtc_y;
5469 plane->state->crtc_w = new_state->crtc_w;
5470 plane->state->crtc_h = new_state->crtc_h;
5472 handle_cursor_update(plane, old_state);
5475 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
5476 .prepare_fb = dm_plane_helper_prepare_fb,
5477 .cleanup_fb = dm_plane_helper_cleanup_fb,
5478 .atomic_check = dm_plane_atomic_check,
5479 .atomic_async_check = dm_plane_atomic_async_check,
5480 .atomic_async_update = dm_plane_atomic_async_update
5484 * TODO: these are currently initialized to rgb formats only.
5485 * For future use cases we should either initialize them dynamically based on
5486 * plane capabilities, or initialize this array to all formats, so internal drm
5487 * check will succeed, and let DC implement proper check
5489 static const uint32_t rgb_formats[] = {
5490 DRM_FORMAT_XRGB8888,
5491 DRM_FORMAT_ARGB8888,
5492 DRM_FORMAT_RGBA8888,
5493 DRM_FORMAT_XRGB2101010,
5494 DRM_FORMAT_XBGR2101010,
5495 DRM_FORMAT_ARGB2101010,
5496 DRM_FORMAT_ABGR2101010,
5497 DRM_FORMAT_XBGR8888,
5498 DRM_FORMAT_ABGR8888,
5502 static const uint32_t overlay_formats[] = {
5503 DRM_FORMAT_XRGB8888,
5504 DRM_FORMAT_ARGB8888,
5505 DRM_FORMAT_RGBA8888,
5506 DRM_FORMAT_XBGR8888,
5507 DRM_FORMAT_ABGR8888,
5511 static const u32 cursor_formats[] = {
5515 static int get_plane_formats(const struct drm_plane *plane,
5516 const struct dc_plane_cap *plane_cap,
5517 uint32_t *formats, int max_formats)
5519 int i, num_formats = 0;
5522 * TODO: Query support for each group of formats directly from
5523 * DC plane caps. This will require adding more formats to the
5527 switch (plane->type) {
5528 case DRM_PLANE_TYPE_PRIMARY:
5529 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
5530 if (num_formats >= max_formats)
5533 formats[num_formats++] = rgb_formats[i];
5536 if (plane_cap && plane_cap->pixel_format_support.nv12)
5537 formats[num_formats++] = DRM_FORMAT_NV12;
5540 case DRM_PLANE_TYPE_OVERLAY:
5541 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
5542 if (num_formats >= max_formats)
5545 formats[num_formats++] = overlay_formats[i];
5549 case DRM_PLANE_TYPE_CURSOR:
5550 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
5551 if (num_formats >= max_formats)
5554 formats[num_formats++] = cursor_formats[i];
5562 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
5563 struct drm_plane *plane,
5564 unsigned long possible_crtcs,
5565 const struct dc_plane_cap *plane_cap)
5567 uint32_t formats[32];
5571 num_formats = get_plane_formats(plane, plane_cap, formats,
5572 ARRAY_SIZE(formats));
5574 res = drm_universal_plane_init(dm->adev->ddev, plane, possible_crtcs,
5575 &dm_plane_funcs, formats, num_formats,
5576 NULL, plane->type, NULL);
5580 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
5581 plane_cap && plane_cap->per_pixel_alpha) {
5582 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
5583 BIT(DRM_MODE_BLEND_PREMULTI);
5585 drm_plane_create_alpha_property(plane);
5586 drm_plane_create_blend_mode_property(plane, blend_caps);
5589 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
5590 plane_cap && plane_cap->pixel_format_support.nv12) {
5591 /* This only affects YUV formats. */
5592 drm_plane_create_color_properties(
5594 BIT(DRM_COLOR_YCBCR_BT601) |
5595 BIT(DRM_COLOR_YCBCR_BT709),
5596 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
5597 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
5598 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
5601 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
5603 /* Create (reset) the plane state */
5604 if (plane->funcs->reset)
5605 plane->funcs->reset(plane);
5610 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
5611 struct drm_plane *plane,
5612 uint32_t crtc_index)
5614 struct amdgpu_crtc *acrtc = NULL;
5615 struct drm_plane *cursor_plane;
5619 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
5623 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
5624 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
5626 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
5630 res = drm_crtc_init_with_planes(
5635 &amdgpu_dm_crtc_funcs, NULL);
5640 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
5642 /* Create (reset) the plane state */
5643 if (acrtc->base.funcs->reset)
5644 acrtc->base.funcs->reset(&acrtc->base);
5646 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
5647 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
5649 acrtc->crtc_id = crtc_index;
5650 acrtc->base.enabled = false;
5651 acrtc->otg_inst = -1;
5653 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
5654 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
5655 true, MAX_COLOR_LUT_ENTRIES);
5656 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
5662 kfree(cursor_plane);
5667 static int to_drm_connector_type(enum signal_type st)
5670 case SIGNAL_TYPE_HDMI_TYPE_A:
5671 return DRM_MODE_CONNECTOR_HDMIA;
5672 case SIGNAL_TYPE_EDP:
5673 return DRM_MODE_CONNECTOR_eDP;
5674 case SIGNAL_TYPE_LVDS:
5675 return DRM_MODE_CONNECTOR_LVDS;
5676 case SIGNAL_TYPE_RGB:
5677 return DRM_MODE_CONNECTOR_VGA;
5678 case SIGNAL_TYPE_DISPLAY_PORT:
5679 case SIGNAL_TYPE_DISPLAY_PORT_MST:
5680 return DRM_MODE_CONNECTOR_DisplayPort;
5681 case SIGNAL_TYPE_DVI_DUAL_LINK:
5682 case SIGNAL_TYPE_DVI_SINGLE_LINK:
5683 return DRM_MODE_CONNECTOR_DVID;
5684 case SIGNAL_TYPE_VIRTUAL:
5685 return DRM_MODE_CONNECTOR_VIRTUAL;
5688 return DRM_MODE_CONNECTOR_Unknown;
5692 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
5694 struct drm_encoder *encoder;
5696 /* There is only one encoder per connector */
5697 drm_connector_for_each_possible_encoder(connector, encoder)
5703 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
5705 struct drm_encoder *encoder;
5706 struct amdgpu_encoder *amdgpu_encoder;
5708 encoder = amdgpu_dm_connector_to_encoder(connector);
5710 if (encoder == NULL)
5713 amdgpu_encoder = to_amdgpu_encoder(encoder);
5715 amdgpu_encoder->native_mode.clock = 0;
5717 if (!list_empty(&connector->probed_modes)) {
5718 struct drm_display_mode *preferred_mode = NULL;
5720 list_for_each_entry(preferred_mode,
5721 &connector->probed_modes,
5723 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
5724 amdgpu_encoder->native_mode = *preferred_mode;
5732 static struct drm_display_mode *
5733 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
5735 int hdisplay, int vdisplay)
5737 struct drm_device *dev = encoder->dev;
5738 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
5739 struct drm_display_mode *mode = NULL;
5740 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
5742 mode = drm_mode_duplicate(dev, native_mode);
5747 mode->hdisplay = hdisplay;
5748 mode->vdisplay = vdisplay;
5749 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
5750 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
5756 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
5757 struct drm_connector *connector)
5759 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
5760 struct drm_display_mode *mode = NULL;
5761 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
5762 struct amdgpu_dm_connector *amdgpu_dm_connector =
5763 to_amdgpu_dm_connector(connector);
5767 char name[DRM_DISPLAY_MODE_LEN];
5770 } common_modes[] = {
5771 { "640x480", 640, 480},
5772 { "800x600", 800, 600},
5773 { "1024x768", 1024, 768},
5774 { "1280x720", 1280, 720},
5775 { "1280x800", 1280, 800},
5776 {"1280x1024", 1280, 1024},
5777 { "1440x900", 1440, 900},
5778 {"1680x1050", 1680, 1050},
5779 {"1600x1200", 1600, 1200},
5780 {"1920x1080", 1920, 1080},
5781 {"1920x1200", 1920, 1200}
5784 n = ARRAY_SIZE(common_modes);
5786 for (i = 0; i < n; i++) {
5787 struct drm_display_mode *curmode = NULL;
5788 bool mode_existed = false;
5790 if (common_modes[i].w > native_mode->hdisplay ||
5791 common_modes[i].h > native_mode->vdisplay ||
5792 (common_modes[i].w == native_mode->hdisplay &&
5793 common_modes[i].h == native_mode->vdisplay))
5796 list_for_each_entry(curmode, &connector->probed_modes, head) {
5797 if (common_modes[i].w == curmode->hdisplay &&
5798 common_modes[i].h == curmode->vdisplay) {
5799 mode_existed = true;
5807 mode = amdgpu_dm_create_common_mode(encoder,
5808 common_modes[i].name, common_modes[i].w,
5810 drm_mode_probed_add(connector, mode);
5811 amdgpu_dm_connector->num_modes++;
5815 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
5818 struct amdgpu_dm_connector *amdgpu_dm_connector =
5819 to_amdgpu_dm_connector(connector);
5822 /* empty probed_modes */
5823 INIT_LIST_HEAD(&connector->probed_modes);
5824 amdgpu_dm_connector->num_modes =
5825 drm_add_edid_modes(connector, edid);
5827 /* sorting the probed modes before calling function
5828 * amdgpu_dm_get_native_mode() since EDID can have
5829 * more than one preferred mode. The modes that are
5830 * later in the probed mode list could be of higher
5831 * and preferred resolution. For example, 3840x2160
5832 * resolution in base EDID preferred timing and 4096x2160
5833 * preferred resolution in DID extension block later.
5835 drm_mode_sort(&connector->probed_modes);
5836 amdgpu_dm_get_native_mode(connector);
5838 amdgpu_dm_connector->num_modes = 0;
5842 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
5844 struct amdgpu_dm_connector *amdgpu_dm_connector =
5845 to_amdgpu_dm_connector(connector);
5846 struct drm_encoder *encoder;
5847 struct edid *edid = amdgpu_dm_connector->edid;
5849 encoder = amdgpu_dm_connector_to_encoder(connector);
5851 if (!edid || !drm_edid_is_valid(edid)) {
5852 amdgpu_dm_connector->num_modes =
5853 drm_add_modes_noedid(connector, 640, 480);
5855 amdgpu_dm_connector_ddc_get_modes(connector, edid);
5856 amdgpu_dm_connector_add_common_modes(encoder, connector);
5858 amdgpu_dm_fbc_init(connector);
5860 return amdgpu_dm_connector->num_modes;
5863 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
5864 struct amdgpu_dm_connector *aconnector,
5866 struct dc_link *link,
5869 struct amdgpu_device *adev = dm->ddev->dev_private;
5872 * Some of the properties below require access to state, like bpc.
5873 * Allocate some default initial connector state with our reset helper.
5875 if (aconnector->base.funcs->reset)
5876 aconnector->base.funcs->reset(&aconnector->base);
5878 aconnector->connector_id = link_index;
5879 aconnector->dc_link = link;
5880 aconnector->base.interlace_allowed = false;
5881 aconnector->base.doublescan_allowed = false;
5882 aconnector->base.stereo_allowed = false;
5883 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
5884 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
5885 aconnector->audio_inst = -1;
5886 mutex_init(&aconnector->hpd_lock);
5889 * configure support HPD hot plug connector_>polled default value is 0
5890 * which means HPD hot plug not supported
5892 switch (connector_type) {
5893 case DRM_MODE_CONNECTOR_HDMIA:
5894 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
5895 aconnector->base.ycbcr_420_allowed =
5896 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
5898 case DRM_MODE_CONNECTOR_DisplayPort:
5899 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
5900 aconnector->base.ycbcr_420_allowed =
5901 link->link_enc->features.dp_ycbcr420_supported ? true : false;
5903 case DRM_MODE_CONNECTOR_DVID:
5904 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
5910 drm_object_attach_property(&aconnector->base.base,
5911 dm->ddev->mode_config.scaling_mode_property,
5912 DRM_MODE_SCALE_NONE);
5914 drm_object_attach_property(&aconnector->base.base,
5915 adev->mode_info.underscan_property,
5917 drm_object_attach_property(&aconnector->base.base,
5918 adev->mode_info.underscan_hborder_property,
5920 drm_object_attach_property(&aconnector->base.base,
5921 adev->mode_info.underscan_vborder_property,
5924 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
5926 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
5927 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
5928 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
5930 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
5931 dc_is_dmcu_initialized(adev->dm.dc)) {
5932 drm_object_attach_property(&aconnector->base.base,
5933 adev->mode_info.abm_level_property, 0);
5936 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
5937 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
5938 connector_type == DRM_MODE_CONNECTOR_eDP) {
5939 drm_object_attach_property(
5940 &aconnector->base.base,
5941 dm->ddev->mode_config.hdr_output_metadata_property, 0);
5943 drm_connector_attach_vrr_capable_property(
5945 #ifdef CONFIG_DRM_AMD_DC_HDCP
5946 if (adev->dm.hdcp_workqueue)
5947 drm_connector_attach_content_protection_property(&aconnector->base, true);
5952 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
5953 struct i2c_msg *msgs, int num)
5955 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
5956 struct ddc_service *ddc_service = i2c->ddc_service;
5957 struct i2c_command cmd;
5961 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
5966 cmd.number_of_payloads = num;
5967 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
5970 for (i = 0; i < num; i++) {
5971 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
5972 cmd.payloads[i].address = msgs[i].addr;
5973 cmd.payloads[i].length = msgs[i].len;
5974 cmd.payloads[i].data = msgs[i].buf;
5978 ddc_service->ctx->dc,
5979 ddc_service->ddc_pin->hw_info.ddc_channel,
5983 kfree(cmd.payloads);
5987 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
5989 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
5992 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
5993 .master_xfer = amdgpu_dm_i2c_xfer,
5994 .functionality = amdgpu_dm_i2c_func,
5997 static struct amdgpu_i2c_adapter *
5998 create_i2c(struct ddc_service *ddc_service,
6002 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
6003 struct amdgpu_i2c_adapter *i2c;
6005 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
6008 i2c->base.owner = THIS_MODULE;
6009 i2c->base.class = I2C_CLASS_DDC;
6010 i2c->base.dev.parent = &adev->pdev->dev;
6011 i2c->base.algo = &amdgpu_dm_i2c_algo;
6012 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
6013 i2c_set_adapdata(&i2c->base, i2c);
6014 i2c->ddc_service = ddc_service;
6015 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
6022 * Note: this function assumes that dc_link_detect() was called for the
6023 * dc_link which will be represented by this aconnector.
6025 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
6026 struct amdgpu_dm_connector *aconnector,
6027 uint32_t link_index,
6028 struct amdgpu_encoder *aencoder)
6032 struct dc *dc = dm->dc;
6033 struct dc_link *link = dc_get_link_at_index(dc, link_index);
6034 struct amdgpu_i2c_adapter *i2c;
6036 link->priv = aconnector;
6038 DRM_DEBUG_DRIVER("%s()\n", __func__);
6040 i2c = create_i2c(link->ddc, link->link_index, &res);
6042 DRM_ERROR("Failed to create i2c adapter data\n");
6046 aconnector->i2c = i2c;
6047 res = i2c_add_adapter(&i2c->base);
6050 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
6054 connector_type = to_drm_connector_type(link->connector_signal);
6056 res = drm_connector_init_with_ddc(
6059 &amdgpu_dm_connector_funcs,
6064 DRM_ERROR("connector_init failed\n");
6065 aconnector->connector_id = -1;
6069 drm_connector_helper_add(
6071 &amdgpu_dm_connector_helper_funcs);
6073 amdgpu_dm_connector_init_helper(
6080 drm_connector_attach_encoder(
6081 &aconnector->base, &aencoder->base);
6083 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
6084 || connector_type == DRM_MODE_CONNECTOR_eDP)
6085 amdgpu_dm_initialize_dp_connector(dm, aconnector);
6090 aconnector->i2c = NULL;
6095 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
6097 switch (adev->mode_info.num_crtc) {
6114 static int amdgpu_dm_encoder_init(struct drm_device *dev,
6115 struct amdgpu_encoder *aencoder,
6116 uint32_t link_index)
6118 struct amdgpu_device *adev = dev->dev_private;
6120 int res = drm_encoder_init(dev,
6122 &amdgpu_dm_encoder_funcs,
6123 DRM_MODE_ENCODER_TMDS,
6126 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
6129 aencoder->encoder_id = link_index;
6131 aencoder->encoder_id = -1;
6133 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
6138 static void manage_dm_interrupts(struct amdgpu_device *adev,
6139 struct amdgpu_crtc *acrtc,
6143 * this is not correct translation but will work as soon as VBLANK
6144 * constant is the same as PFLIP
6147 amdgpu_display_crtc_idx_to_irq_type(
6152 drm_crtc_vblank_on(&acrtc->base);
6155 &adev->pageflip_irq,
6161 &adev->pageflip_irq,
6163 drm_crtc_vblank_off(&acrtc->base);
6168 is_scaling_state_different(const struct dm_connector_state *dm_state,
6169 const struct dm_connector_state *old_dm_state)
6171 if (dm_state->scaling != old_dm_state->scaling)
6173 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
6174 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
6176 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
6177 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
6179 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
6180 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
6185 #ifdef CONFIG_DRM_AMD_DC_HDCP
6186 static bool is_content_protection_different(struct drm_connector_state *state,
6187 const struct drm_connector_state *old_state,
6188 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
6190 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6192 if (old_state->hdcp_content_type != state->hdcp_content_type &&
6193 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
6194 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6198 /* CP is being re enabled, ignore this */
6199 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
6200 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
6201 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
6205 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */
6206 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
6207 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
6208 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6210 /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
6211 * hot-plug, headless s3, dpms
6213 if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && connector->dpms == DRM_MODE_DPMS_ON &&
6214 aconnector->dc_sink != NULL)
6217 if (old_state->content_protection == state->content_protection)
6220 if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
6227 static void remove_stream(struct amdgpu_device *adev,
6228 struct amdgpu_crtc *acrtc,
6229 struct dc_stream_state *stream)
6231 /* this is the update mode case */
6233 acrtc->otg_inst = -1;
6234 acrtc->enabled = false;
6237 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
6238 struct dc_cursor_position *position)
6240 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6242 int xorigin = 0, yorigin = 0;
6244 position->enable = false;
6248 if (!crtc || !plane->state->fb)
6251 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
6252 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
6253 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
6255 plane->state->crtc_w,
6256 plane->state->crtc_h);
6260 x = plane->state->crtc_x;
6261 y = plane->state->crtc_y;
6263 if (x <= -amdgpu_crtc->max_cursor_width ||
6264 y <= -amdgpu_crtc->max_cursor_height)
6267 if (crtc->primary->state) {
6268 /* avivo cursor are offset into the total surface */
6269 x += crtc->primary->state->src_x >> 16;
6270 y += crtc->primary->state->src_y >> 16;
6274 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
6278 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
6281 position->enable = true;
6284 position->x_hotspot = xorigin;
6285 position->y_hotspot = yorigin;
6290 static void handle_cursor_update(struct drm_plane *plane,
6291 struct drm_plane_state *old_plane_state)
6293 struct amdgpu_device *adev = plane->dev->dev_private;
6294 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
6295 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
6296 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
6297 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6298 uint64_t address = afb ? afb->address : 0;
6299 struct dc_cursor_position position;
6300 struct dc_cursor_attributes attributes;
6303 if (!plane->state->fb && !old_plane_state->fb)
6306 DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
6308 amdgpu_crtc->crtc_id,
6309 plane->state->crtc_w,
6310 plane->state->crtc_h);
6312 ret = get_cursor_position(plane, crtc, &position);
6316 if (!position.enable) {
6317 /* turn off cursor */
6318 if (crtc_state && crtc_state->stream) {
6319 mutex_lock(&adev->dm.dc_lock);
6320 dc_stream_set_cursor_position(crtc_state->stream,
6322 mutex_unlock(&adev->dm.dc_lock);
6327 amdgpu_crtc->cursor_width = plane->state->crtc_w;
6328 amdgpu_crtc->cursor_height = plane->state->crtc_h;
6330 memset(&attributes, 0, sizeof(attributes));
6331 attributes.address.high_part = upper_32_bits(address);
6332 attributes.address.low_part = lower_32_bits(address);
6333 attributes.width = plane->state->crtc_w;
6334 attributes.height = plane->state->crtc_h;
6335 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
6336 attributes.rotation_angle = 0;
6337 attributes.attribute_flags.value = 0;
6339 attributes.pitch = attributes.width;
6341 if (crtc_state->stream) {
6342 mutex_lock(&adev->dm.dc_lock);
6343 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
6345 DRM_ERROR("DC failed to set cursor attributes\n");
6347 if (!dc_stream_set_cursor_position(crtc_state->stream,
6349 DRM_ERROR("DC failed to set cursor position\n");
6350 mutex_unlock(&adev->dm.dc_lock);
6354 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
6357 assert_spin_locked(&acrtc->base.dev->event_lock);
6358 WARN_ON(acrtc->event);
6360 acrtc->event = acrtc->base.state->event;
6362 /* Set the flip status */
6363 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
6365 /* Mark this event as consumed */
6366 acrtc->base.state->event = NULL;
6368 DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
6372 static void update_freesync_state_on_stream(
6373 struct amdgpu_display_manager *dm,
6374 struct dm_crtc_state *new_crtc_state,
6375 struct dc_stream_state *new_stream,
6376 struct dc_plane_state *surface,
6377 u32 flip_timestamp_in_us)
6379 struct mod_vrr_params vrr_params;
6380 struct dc_info_packet vrr_infopacket = {0};
6381 struct amdgpu_device *adev = dm->adev;
6382 unsigned long flags;
6388 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6389 * For now it's sufficient to just guard against these conditions.
6392 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6395 spin_lock_irqsave(&adev->ddev->event_lock, flags);
6396 vrr_params = new_crtc_state->vrr_params;
6399 mod_freesync_handle_preflip(
6400 dm->freesync_module,
6403 flip_timestamp_in_us,
6406 if (adev->family < AMDGPU_FAMILY_AI &&
6407 amdgpu_dm_vrr_active(new_crtc_state)) {
6408 mod_freesync_handle_v_update(dm->freesync_module,
6409 new_stream, &vrr_params);
6411 /* Need to call this before the frame ends. */
6412 dc_stream_adjust_vmin_vmax(dm->dc,
6413 new_crtc_state->stream,
6414 &vrr_params.adjust);
6418 mod_freesync_build_vrr_infopacket(
6419 dm->freesync_module,
6423 TRANSFER_FUNC_UNKNOWN,
6426 new_crtc_state->freesync_timing_changed |=
6427 (memcmp(&new_crtc_state->vrr_params.adjust,
6429 sizeof(vrr_params.adjust)) != 0);
6431 new_crtc_state->freesync_vrr_info_changed |=
6432 (memcmp(&new_crtc_state->vrr_infopacket,
6434 sizeof(vrr_infopacket)) != 0);
6436 new_crtc_state->vrr_params = vrr_params;
6437 new_crtc_state->vrr_infopacket = vrr_infopacket;
6439 new_stream->adjust = new_crtc_state->vrr_params.adjust;
6440 new_stream->vrr_infopacket = vrr_infopacket;
6442 if (new_crtc_state->freesync_vrr_info_changed)
6443 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
6444 new_crtc_state->base.crtc->base.id,
6445 (int)new_crtc_state->base.vrr_enabled,
6446 (int)vrr_params.state);
6448 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
6451 static void pre_update_freesync_state_on_stream(
6452 struct amdgpu_display_manager *dm,
6453 struct dm_crtc_state *new_crtc_state)
6455 struct dc_stream_state *new_stream = new_crtc_state->stream;
6456 struct mod_vrr_params vrr_params;
6457 struct mod_freesync_config config = new_crtc_state->freesync_config;
6458 struct amdgpu_device *adev = dm->adev;
6459 unsigned long flags;
6465 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6466 * For now it's sufficient to just guard against these conditions.
6468 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6471 spin_lock_irqsave(&adev->ddev->event_lock, flags);
6472 vrr_params = new_crtc_state->vrr_params;
6474 if (new_crtc_state->vrr_supported &&
6475 config.min_refresh_in_uhz &&
6476 config.max_refresh_in_uhz) {
6477 config.state = new_crtc_state->base.vrr_enabled ?
6478 VRR_STATE_ACTIVE_VARIABLE :
6481 config.state = VRR_STATE_UNSUPPORTED;
6484 mod_freesync_build_vrr_params(dm->freesync_module,
6486 &config, &vrr_params);
6488 new_crtc_state->freesync_timing_changed |=
6489 (memcmp(&new_crtc_state->vrr_params.adjust,
6491 sizeof(vrr_params.adjust)) != 0);
6493 new_crtc_state->vrr_params = vrr_params;
6494 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
6497 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
6498 struct dm_crtc_state *new_state)
6500 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
6501 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
6503 if (!old_vrr_active && new_vrr_active) {
6504 /* Transition VRR inactive -> active:
6505 * While VRR is active, we must not disable vblank irq, as a
6506 * reenable after disable would compute bogus vblank/pflip
6507 * timestamps if it likely happened inside display front-porch.
6509 * We also need vupdate irq for the actual core vblank handling
6512 dm_set_vupdate_irq(new_state->base.crtc, true);
6513 drm_crtc_vblank_get(new_state->base.crtc);
6514 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
6515 __func__, new_state->base.crtc->base.id);
6516 } else if (old_vrr_active && !new_vrr_active) {
6517 /* Transition VRR active -> inactive:
6518 * Allow vblank irq disable again for fixed refresh rate.
6520 dm_set_vupdate_irq(new_state->base.crtc, false);
6521 drm_crtc_vblank_put(new_state->base.crtc);
6522 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
6523 __func__, new_state->base.crtc->base.id);
6527 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
6529 struct drm_plane *plane;
6530 struct drm_plane_state *old_plane_state, *new_plane_state;
6534 * TODO: Make this per-stream so we don't issue redundant updates for
6535 * commits with multiple streams.
6537 for_each_oldnew_plane_in_state(state, plane, old_plane_state,
6539 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6540 handle_cursor_update(plane, old_plane_state);
6543 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
6544 struct dc_state *dc_state,
6545 struct drm_device *dev,
6546 struct amdgpu_display_manager *dm,
6547 struct drm_crtc *pcrtc,
6548 bool wait_for_vblank)
6551 uint64_t timestamp_ns;
6552 struct drm_plane *plane;
6553 struct drm_plane_state *old_plane_state, *new_plane_state;
6554 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
6555 struct drm_crtc_state *new_pcrtc_state =
6556 drm_atomic_get_new_crtc_state(state, pcrtc);
6557 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
6558 struct dm_crtc_state *dm_old_crtc_state =
6559 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
6560 int planes_count = 0, vpos, hpos;
6562 unsigned long flags;
6563 struct amdgpu_bo *abo;
6564 uint64_t tiling_flags;
6565 uint32_t target_vblank, last_flip_vblank;
6566 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
6567 bool pflip_present = false;
6569 struct dc_surface_update surface_updates[MAX_SURFACES];
6570 struct dc_plane_info plane_infos[MAX_SURFACES];
6571 struct dc_scaling_info scaling_infos[MAX_SURFACES];
6572 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
6573 struct dc_stream_update stream_update;
6576 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
6579 dm_error("Failed to allocate update bundle\n");
6584 * Disable the cursor first if we're disabling all the planes.
6585 * It'll remain on the screen after the planes are re-enabled
6588 if (acrtc_state->active_planes == 0)
6589 amdgpu_dm_commit_cursors(state);
6591 /* update planes when needed */
6592 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
6593 struct drm_crtc *crtc = new_plane_state->crtc;
6594 struct drm_crtc_state *new_crtc_state;
6595 struct drm_framebuffer *fb = new_plane_state->fb;
6596 bool plane_needs_flip;
6597 struct dc_plane_state *dc_plane;
6598 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
6600 /* Cursor plane is handled after stream updates */
6601 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6604 if (!fb || !crtc || pcrtc != crtc)
6607 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
6608 if (!new_crtc_state->active)
6611 dc_plane = dm_new_plane_state->dc_state;
6613 bundle->surface_updates[planes_count].surface = dc_plane;
6614 if (new_pcrtc_state->color_mgmt_changed) {
6615 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
6616 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
6619 fill_dc_scaling_info(new_plane_state,
6620 &bundle->scaling_infos[planes_count]);
6622 bundle->surface_updates[planes_count].scaling_info =
6623 &bundle->scaling_infos[planes_count];
6625 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
6627 pflip_present = pflip_present || plane_needs_flip;
6629 if (!plane_needs_flip) {
6634 abo = gem_to_amdgpu_bo(fb->obj[0]);
6637 * Wait for all fences on this FB. Do limited wait to avoid
6638 * deadlock during GPU reset when this fence will not signal
6639 * but we hold reservation lock for the BO.
6641 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
6643 msecs_to_jiffies(5000));
6644 if (unlikely(r <= 0))
6645 DRM_ERROR("Waiting for fences timed out!");
6648 * TODO This might fail and hence better not used, wait
6649 * explicitly on fences instead
6650 * and in general should be called for
6651 * blocking commit to as per framework helpers
6653 r = amdgpu_bo_reserve(abo, true);
6654 if (unlikely(r != 0))
6655 DRM_ERROR("failed to reserve buffer before flip\n");
6657 amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
6659 amdgpu_bo_unreserve(abo);
6661 fill_dc_plane_info_and_addr(
6662 dm->adev, new_plane_state, tiling_flags,
6663 &bundle->plane_infos[planes_count],
6664 &bundle->flip_addrs[planes_count].address);
6666 bundle->surface_updates[planes_count].plane_info =
6667 &bundle->plane_infos[planes_count];
6670 * Only allow immediate flips for fast updates that don't
6671 * change FB pitch, DCC state, rotation or mirroing.
6673 bundle->flip_addrs[planes_count].flip_immediate =
6674 crtc->state->async_flip &&
6675 acrtc_state->update_type == UPDATE_TYPE_FAST;
6677 timestamp_ns = ktime_get_ns();
6678 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
6679 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
6680 bundle->surface_updates[planes_count].surface = dc_plane;
6682 if (!bundle->surface_updates[planes_count].surface) {
6683 DRM_ERROR("No surface for CRTC: id=%d\n",
6684 acrtc_attach->crtc_id);
6688 if (plane == pcrtc->primary)
6689 update_freesync_state_on_stream(
6692 acrtc_state->stream,
6694 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
6696 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
6698 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
6699 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
6705 if (pflip_present) {
6707 /* Use old throttling in non-vrr fixed refresh rate mode
6708 * to keep flip scheduling based on target vblank counts
6709 * working in a backwards compatible way, e.g., for
6710 * clients using the GLX_OML_sync_control extension or
6711 * DRI3/Present extension with defined target_msc.
6713 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
6716 /* For variable refresh rate mode only:
6717 * Get vblank of last completed flip to avoid > 1 vrr
6718 * flips per video frame by use of throttling, but allow
6719 * flip programming anywhere in the possibly large
6720 * variable vrr vblank interval for fine-grained flip
6721 * timing control and more opportunity to avoid stutter
6722 * on late submission of flips.
6724 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
6725 last_flip_vblank = acrtc_attach->last_flip_vblank;
6726 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
6729 target_vblank = last_flip_vblank + wait_for_vblank;
6732 * Wait until we're out of the vertical blank period before the one
6733 * targeted by the flip
6735 while ((acrtc_attach->enabled &&
6736 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
6737 0, &vpos, &hpos, NULL,
6738 NULL, &pcrtc->hwmode)
6739 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
6740 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
6741 (int)(target_vblank -
6742 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
6743 usleep_range(1000, 1100);
6746 if (acrtc_attach->base.state->event) {
6747 drm_crtc_vblank_get(pcrtc);
6749 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
6751 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
6752 prepare_flip_isr(acrtc_attach);
6754 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
6757 if (acrtc_state->stream) {
6758 if (acrtc_state->freesync_vrr_info_changed)
6759 bundle->stream_update.vrr_infopacket =
6760 &acrtc_state->stream->vrr_infopacket;
6764 /* Update the planes if changed or disable if we don't have any. */
6765 if ((planes_count || acrtc_state->active_planes == 0) &&
6766 acrtc_state->stream) {
6767 bundle->stream_update.stream = acrtc_state->stream;
6768 if (new_pcrtc_state->mode_changed) {
6769 bundle->stream_update.src = acrtc_state->stream->src;
6770 bundle->stream_update.dst = acrtc_state->stream->dst;
6773 if (new_pcrtc_state->color_mgmt_changed) {
6775 * TODO: This isn't fully correct since we've actually
6776 * already modified the stream in place.
6778 bundle->stream_update.gamut_remap =
6779 &acrtc_state->stream->gamut_remap_matrix;
6780 bundle->stream_update.output_csc_transform =
6781 &acrtc_state->stream->csc_color_matrix;
6782 bundle->stream_update.out_transfer_func =
6783 acrtc_state->stream->out_transfer_func;
6786 acrtc_state->stream->abm_level = acrtc_state->abm_level;
6787 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
6788 bundle->stream_update.abm_level = &acrtc_state->abm_level;
6791 * If FreeSync state on the stream has changed then we need to
6792 * re-adjust the min/max bounds now that DC doesn't handle this
6793 * as part of commit.
6795 if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
6796 amdgpu_dm_vrr_active(acrtc_state)) {
6797 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
6798 dc_stream_adjust_vmin_vmax(
6799 dm->dc, acrtc_state->stream,
6800 &acrtc_state->vrr_params.adjust);
6801 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
6803 mutex_lock(&dm->dc_lock);
6804 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
6805 acrtc_state->stream->link->psr_allow_active)
6806 amdgpu_dm_psr_disable(acrtc_state->stream);
6808 dc_commit_updates_for_stream(dm->dc,
6809 bundle->surface_updates,
6811 acrtc_state->stream,
6812 &bundle->stream_update,
6815 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
6816 acrtc_state->stream->psr_version &&
6817 !acrtc_state->stream->link->psr_feature_enabled)
6818 amdgpu_dm_link_setup_psr(acrtc_state->stream);
6819 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
6820 acrtc_state->stream->link->psr_feature_enabled &&
6821 !acrtc_state->stream->link->psr_allow_active) {
6822 amdgpu_dm_psr_enable(acrtc_state->stream);
6825 mutex_unlock(&dm->dc_lock);
6829 * Update cursor state *after* programming all the planes.
6830 * This avoids redundant programming in the case where we're going
6831 * to be disabling a single plane - those pipes are being disabled.
6833 if (acrtc_state->active_planes)
6834 amdgpu_dm_commit_cursors(state);
6840 static void amdgpu_dm_commit_audio(struct drm_device *dev,
6841 struct drm_atomic_state *state)
6843 struct amdgpu_device *adev = dev->dev_private;
6844 struct amdgpu_dm_connector *aconnector;
6845 struct drm_connector *connector;
6846 struct drm_connector_state *old_con_state, *new_con_state;
6847 struct drm_crtc_state *new_crtc_state;
6848 struct dm_crtc_state *new_dm_crtc_state;
6849 const struct dc_stream_status *status;
6852 /* Notify device removals. */
6853 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6854 if (old_con_state->crtc != new_con_state->crtc) {
6855 /* CRTC changes require notification. */
6859 if (!new_con_state->crtc)
6862 new_crtc_state = drm_atomic_get_new_crtc_state(
6863 state, new_con_state->crtc);
6865 if (!new_crtc_state)
6868 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
6872 aconnector = to_amdgpu_dm_connector(connector);
6874 mutex_lock(&adev->dm.audio_lock);
6875 inst = aconnector->audio_inst;
6876 aconnector->audio_inst = -1;
6877 mutex_unlock(&adev->dm.audio_lock);
6879 amdgpu_dm_audio_eld_notify(adev, inst);
6882 /* Notify audio device additions. */
6883 for_each_new_connector_in_state(state, connector, new_con_state, i) {
6884 if (!new_con_state->crtc)
6887 new_crtc_state = drm_atomic_get_new_crtc_state(
6888 state, new_con_state->crtc);
6890 if (!new_crtc_state)
6893 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
6896 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
6897 if (!new_dm_crtc_state->stream)
6900 status = dc_stream_get_status(new_dm_crtc_state->stream);
6904 aconnector = to_amdgpu_dm_connector(connector);
6906 mutex_lock(&adev->dm.audio_lock);
6907 inst = status->audio_inst;
6908 aconnector->audio_inst = inst;
6909 mutex_unlock(&adev->dm.audio_lock);
6911 amdgpu_dm_audio_eld_notify(adev, inst);
6916 * Enable interrupts on CRTCs that are newly active, undergone
6917 * a modeset, or have active planes again.
6919 * Done in two passes, based on the for_modeset flag:
6920 * Pass 1: For CRTCs going through modeset
6921 * Pass 2: For CRTCs going from 0 to n active planes
6923 * Interrupts can only be enabled after the planes are programmed,
6924 * so this requires a two-pass approach since we don't want to
6925 * just defer the interrupts until after commit planes every time.
6927 static void amdgpu_dm_enable_crtc_interrupts(struct drm_device *dev,
6928 struct drm_atomic_state *state,
6931 struct amdgpu_device *adev = dev->dev_private;
6932 struct drm_crtc *crtc;
6933 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
6935 #ifdef CONFIG_DEBUG_FS
6936 enum amdgpu_dm_pipe_crc_source source;
6939 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
6940 new_crtc_state, i) {
6941 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6942 struct dm_crtc_state *dm_new_crtc_state =
6943 to_dm_crtc_state(new_crtc_state);
6944 struct dm_crtc_state *dm_old_crtc_state =
6945 to_dm_crtc_state(old_crtc_state);
6946 bool modeset = drm_atomic_crtc_needs_modeset(new_crtc_state);
6949 run_pass = (for_modeset && modeset) ||
6950 (!for_modeset && !modeset &&
6951 !dm_old_crtc_state->interrupts_enabled);
6956 if (!dm_new_crtc_state->interrupts_enabled)
6959 manage_dm_interrupts(adev, acrtc, true);
6961 #ifdef CONFIG_DEBUG_FS
6962 /* The stream has changed so CRC capture needs to re-enabled. */
6963 source = dm_new_crtc_state->crc_src;
6964 if (amdgpu_dm_is_valid_crc_source(source)) {
6965 amdgpu_dm_crtc_configure_crc_source(
6966 crtc, dm_new_crtc_state,
6967 dm_new_crtc_state->crc_src);
6974 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
6975 * @crtc_state: the DRM CRTC state
6976 * @stream_state: the DC stream state.
6978 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
6979 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
6981 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
6982 struct dc_stream_state *stream_state)
6984 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
6987 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
6988 struct drm_atomic_state *state,
6991 struct drm_crtc *crtc;
6992 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
6993 struct amdgpu_device *adev = dev->dev_private;
6997 * We evade vblank and pflip interrupts on CRTCs that are undergoing
6998 * a modeset, being disabled, or have no active planes.
7000 * It's done in atomic commit rather than commit tail for now since
7001 * some of these interrupt handlers access the current CRTC state and
7002 * potentially the stream pointer itself.
7004 * Since the atomic state is swapped within atomic commit and not within
7005 * commit tail this would leave to new state (that hasn't been committed yet)
7006 * being accesssed from within the handlers.
7008 * TODO: Fix this so we can do this in commit tail and not have to block
7011 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7012 struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7013 struct dm_crtc_state *dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7014 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7016 if (dm_old_crtc_state->interrupts_enabled &&
7017 (!dm_new_crtc_state->interrupts_enabled ||
7018 drm_atomic_crtc_needs_modeset(new_crtc_state)))
7019 manage_dm_interrupts(adev, acrtc, false);
7022 * Add check here for SoC's that support hardware cursor plane, to
7023 * unset legacy_cursor_update
7026 return drm_atomic_helper_commit(dev, state, nonblock);
7028 /*TODO Handle EINTR, reenable IRQ*/
7032 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
7033 * @state: The atomic state to commit
7035 * This will tell DC to commit the constructed DC state from atomic_check,
7036 * programming the hardware. Any failures here implies a hardware failure, since
7037 * atomic check should have filtered anything non-kosher.
7039 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
7041 struct drm_device *dev = state->dev;
7042 struct amdgpu_device *adev = dev->dev_private;
7043 struct amdgpu_display_manager *dm = &adev->dm;
7044 struct dm_atomic_state *dm_state;
7045 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
7047 struct drm_crtc *crtc;
7048 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7049 unsigned long flags;
7050 bool wait_for_vblank = true;
7051 struct drm_connector *connector;
7052 struct drm_connector_state *old_con_state, *new_con_state;
7053 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7054 int crtc_disable_count = 0;
7056 drm_atomic_helper_update_legacy_modeset_state(dev, state);
7058 dm_state = dm_atomic_get_new_state(state);
7059 if (dm_state && dm_state->context) {
7060 dc_state = dm_state->context;
7062 /* No state changes, retain current state. */
7063 dc_state_temp = dc_create_state(dm->dc);
7064 ASSERT(dc_state_temp);
7065 dc_state = dc_state_temp;
7066 dc_resource_state_copy_construct_current(dm->dc, dc_state);
7069 /* update changed items */
7070 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7071 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7073 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7074 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7077 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7078 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
7079 "connectors_changed:%d\n",
7081 new_crtc_state->enable,
7082 new_crtc_state->active,
7083 new_crtc_state->planes_changed,
7084 new_crtc_state->mode_changed,
7085 new_crtc_state->active_changed,
7086 new_crtc_state->connectors_changed);
7088 /* Copy all transient state flags into dc state */
7089 if (dm_new_crtc_state->stream) {
7090 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
7091 dm_new_crtc_state->stream);
7094 /* handles headless hotplug case, updating new_state and
7095 * aconnector as needed
7098 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
7100 DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
7102 if (!dm_new_crtc_state->stream) {
7104 * this could happen because of issues with
7105 * userspace notifications delivery.
7106 * In this case userspace tries to set mode on
7107 * display which is disconnected in fact.
7108 * dc_sink is NULL in this case on aconnector.
7109 * We expect reset mode will come soon.
7111 * This can also happen when unplug is done
7112 * during resume sequence ended
7114 * In this case, we want to pretend we still
7115 * have a sink to keep the pipe running so that
7116 * hw state is consistent with the sw state
7118 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7119 __func__, acrtc->base.base.id);
7123 if (dm_old_crtc_state->stream)
7124 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7126 pm_runtime_get_noresume(dev->dev);
7128 acrtc->enabled = true;
7129 acrtc->hw_mode = new_crtc_state->mode;
7130 crtc->hwmode = new_crtc_state->mode;
7131 } else if (modereset_required(new_crtc_state)) {
7132 DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
7133 /* i.e. reset mode */
7134 if (dm_old_crtc_state->stream) {
7135 if (dm_old_crtc_state->stream->link->psr_allow_active)
7136 amdgpu_dm_psr_disable(dm_old_crtc_state->stream);
7138 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7141 } /* for_each_crtc_in_state() */
7144 dm_enable_per_frame_crtc_master_sync(dc_state);
7145 mutex_lock(&dm->dc_lock);
7146 WARN_ON(!dc_commit_state(dm->dc, dc_state));
7147 mutex_unlock(&dm->dc_lock);
7150 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7151 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7153 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7155 if (dm_new_crtc_state->stream != NULL) {
7156 const struct dc_stream_status *status =
7157 dc_stream_get_status(dm_new_crtc_state->stream);
7160 status = dc_stream_get_status_from_state(dc_state,
7161 dm_new_crtc_state->stream);
7164 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
7166 acrtc->otg_inst = status->primary_otg_inst;
7169 #ifdef CONFIG_DRM_AMD_DC_HDCP
7170 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7171 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7172 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7173 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7175 new_crtc_state = NULL;
7178 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7180 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7182 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
7183 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
7184 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
7185 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7189 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
7190 hdcp_update_display(
7191 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
7192 new_con_state->hdcp_content_type,
7193 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
7198 /* Handle connector state changes */
7199 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7200 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7201 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
7202 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7203 struct dc_surface_update dummy_updates[MAX_SURFACES];
7204 struct dc_stream_update stream_update;
7205 struct dc_info_packet hdr_packet;
7206 struct dc_stream_status *status = NULL;
7207 bool abm_changed, hdr_changed, scaling_changed;
7209 memset(&dummy_updates, 0, sizeof(dummy_updates));
7210 memset(&stream_update, 0, sizeof(stream_update));
7213 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7214 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
7217 /* Skip any modesets/resets */
7218 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
7221 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7222 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7224 scaling_changed = is_scaling_state_different(dm_new_con_state,
7227 abm_changed = dm_new_crtc_state->abm_level !=
7228 dm_old_crtc_state->abm_level;
7231 is_hdr_metadata_different(old_con_state, new_con_state);
7233 if (!scaling_changed && !abm_changed && !hdr_changed)
7236 stream_update.stream = dm_new_crtc_state->stream;
7237 if (scaling_changed) {
7238 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
7239 dm_new_con_state, dm_new_crtc_state->stream);
7241 stream_update.src = dm_new_crtc_state->stream->src;
7242 stream_update.dst = dm_new_crtc_state->stream->dst;
7246 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
7248 stream_update.abm_level = &dm_new_crtc_state->abm_level;
7252 fill_hdr_info_packet(new_con_state, &hdr_packet);
7253 stream_update.hdr_static_metadata = &hdr_packet;
7256 status = dc_stream_get_status(dm_new_crtc_state->stream);
7258 WARN_ON(!status->plane_count);
7261 * TODO: DC refuses to perform stream updates without a dc_surface_update.
7262 * Here we create an empty update on each plane.
7263 * To fix this, DC should permit updating only stream properties.
7265 for (j = 0; j < status->plane_count; j++)
7266 dummy_updates[j].surface = status->plane_states[0];
7269 mutex_lock(&dm->dc_lock);
7270 dc_commit_updates_for_stream(dm->dc,
7272 status->plane_count,
7273 dm_new_crtc_state->stream,
7276 mutex_unlock(&dm->dc_lock);
7279 /* Count number of newly disabled CRTCs for dropping PM refs later. */
7280 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
7281 new_crtc_state, i) {
7282 if (old_crtc_state->active && !new_crtc_state->active)
7283 crtc_disable_count++;
7285 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7286 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7288 /* Update freesync active state. */
7289 pre_update_freesync_state_on_stream(dm, dm_new_crtc_state);
7291 /* Handle vrr on->off / off->on transitions */
7292 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
7296 /* Enable interrupts for CRTCs going through a modeset. */
7297 amdgpu_dm_enable_crtc_interrupts(dev, state, true);
7299 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
7300 if (new_crtc_state->async_flip)
7301 wait_for_vblank = false;
7303 /* update planes when needed per crtc*/
7304 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
7305 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7307 if (dm_new_crtc_state->stream)
7308 amdgpu_dm_commit_planes(state, dc_state, dev,
7309 dm, crtc, wait_for_vblank);
7312 /* Enable interrupts for CRTCs going from 0 to n active planes. */
7313 amdgpu_dm_enable_crtc_interrupts(dev, state, false);
7315 /* Update audio instances for each connector. */
7316 amdgpu_dm_commit_audio(dev, state);
7319 * send vblank event on all events not handled in flip and
7320 * mark consumed event for drm_atomic_helper_commit_hw_done
7322 spin_lock_irqsave(&adev->ddev->event_lock, flags);
7323 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7325 if (new_crtc_state->event)
7326 drm_send_event_locked(dev, &new_crtc_state->event->base);
7328 new_crtc_state->event = NULL;
7330 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
7332 /* Signal HW programming completion */
7333 drm_atomic_helper_commit_hw_done(state);
7335 if (wait_for_vblank)
7336 drm_atomic_helper_wait_for_flip_done(dev, state);
7338 drm_atomic_helper_cleanup_planes(dev, state);
7341 * Finally, drop a runtime PM reference for each newly disabled CRTC,
7342 * so we can put the GPU into runtime suspend if we're not driving any
7345 for (i = 0; i < crtc_disable_count; i++)
7346 pm_runtime_put_autosuspend(dev->dev);
7347 pm_runtime_mark_last_busy(dev->dev);
7350 dc_release_state(dc_state_temp);
7354 static int dm_force_atomic_commit(struct drm_connector *connector)
7357 struct drm_device *ddev = connector->dev;
7358 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
7359 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7360 struct drm_plane *plane = disconnected_acrtc->base.primary;
7361 struct drm_connector_state *conn_state;
7362 struct drm_crtc_state *crtc_state;
7363 struct drm_plane_state *plane_state;
7368 state->acquire_ctx = ddev->mode_config.acquire_ctx;
7370 /* Construct an atomic state to restore previous display setting */
7373 * Attach connectors to drm_atomic_state
7375 conn_state = drm_atomic_get_connector_state(state, connector);
7377 ret = PTR_ERR_OR_ZERO(conn_state);
7381 /* Attach crtc to drm_atomic_state*/
7382 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
7384 ret = PTR_ERR_OR_ZERO(crtc_state);
7388 /* force a restore */
7389 crtc_state->mode_changed = true;
7391 /* Attach plane to drm_atomic_state */
7392 plane_state = drm_atomic_get_plane_state(state, plane);
7394 ret = PTR_ERR_OR_ZERO(plane_state);
7399 /* Call commit internally with the state we just constructed */
7400 ret = drm_atomic_commit(state);
7405 DRM_ERROR("Restoring old state failed with %i\n", ret);
7406 drm_atomic_state_put(state);
7412 * This function handles all cases when set mode does not come upon hotplug.
7413 * This includes when a display is unplugged then plugged back into the
7414 * same port and when running without usermode desktop manager supprot
7416 void dm_restore_drm_connector_state(struct drm_device *dev,
7417 struct drm_connector *connector)
7419 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7420 struct amdgpu_crtc *disconnected_acrtc;
7421 struct dm_crtc_state *acrtc_state;
7423 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
7426 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7427 if (!disconnected_acrtc)
7430 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
7431 if (!acrtc_state->stream)
7435 * If the previous sink is not released and different from the current,
7436 * we deduce we are in a state where we can not rely on usermode call
7437 * to turn on the display, so we do it here
7439 if (acrtc_state->stream->sink != aconnector->dc_sink)
7440 dm_force_atomic_commit(&aconnector->base);
7444 * Grabs all modesetting locks to serialize against any blocking commits,
7445 * Waits for completion of all non blocking commits.
7447 static int do_aquire_global_lock(struct drm_device *dev,
7448 struct drm_atomic_state *state)
7450 struct drm_crtc *crtc;
7451 struct drm_crtc_commit *commit;
7455 * Adding all modeset locks to aquire_ctx will
7456 * ensure that when the framework release it the
7457 * extra locks we are locking here will get released to
7459 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
7463 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7464 spin_lock(&crtc->commit_lock);
7465 commit = list_first_entry_or_null(&crtc->commit_list,
7466 struct drm_crtc_commit, commit_entry);
7468 drm_crtc_commit_get(commit);
7469 spin_unlock(&crtc->commit_lock);
7475 * Make sure all pending HW programming completed and
7478 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
7481 ret = wait_for_completion_interruptible_timeout(
7482 &commit->flip_done, 10*HZ);
7485 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
7486 "timed out\n", crtc->base.id, crtc->name);
7488 drm_crtc_commit_put(commit);
7491 return ret < 0 ? ret : 0;
7494 static void get_freesync_config_for_crtc(
7495 struct dm_crtc_state *new_crtc_state,
7496 struct dm_connector_state *new_con_state)
7498 struct mod_freesync_config config = {0};
7499 struct amdgpu_dm_connector *aconnector =
7500 to_amdgpu_dm_connector(new_con_state->base.connector);
7501 struct drm_display_mode *mode = &new_crtc_state->base.mode;
7502 int vrefresh = drm_mode_vrefresh(mode);
7504 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
7505 vrefresh >= aconnector->min_vfreq &&
7506 vrefresh <= aconnector->max_vfreq;
7508 if (new_crtc_state->vrr_supported) {
7509 new_crtc_state->stream->ignore_msa_timing_param = true;
7510 config.state = new_crtc_state->base.vrr_enabled ?
7511 VRR_STATE_ACTIVE_VARIABLE :
7513 config.min_refresh_in_uhz =
7514 aconnector->min_vfreq * 1000000;
7515 config.max_refresh_in_uhz =
7516 aconnector->max_vfreq * 1000000;
7517 config.vsif_supported = true;
7521 new_crtc_state->freesync_config = config;
7524 static void reset_freesync_config_for_crtc(
7525 struct dm_crtc_state *new_crtc_state)
7527 new_crtc_state->vrr_supported = false;
7529 memset(&new_crtc_state->vrr_params, 0,
7530 sizeof(new_crtc_state->vrr_params));
7531 memset(&new_crtc_state->vrr_infopacket, 0,
7532 sizeof(new_crtc_state->vrr_infopacket));
7535 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
7536 struct drm_atomic_state *state,
7537 struct drm_crtc *crtc,
7538 struct drm_crtc_state *old_crtc_state,
7539 struct drm_crtc_state *new_crtc_state,
7541 bool *lock_and_validation_needed)
7543 struct dm_atomic_state *dm_state = NULL;
7544 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7545 struct dc_stream_state *new_stream;
7549 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
7550 * update changed items
7552 struct amdgpu_crtc *acrtc = NULL;
7553 struct amdgpu_dm_connector *aconnector = NULL;
7554 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
7555 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
7559 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7560 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7561 acrtc = to_amdgpu_crtc(crtc);
7562 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
7564 /* TODO This hack should go away */
7565 if (aconnector && enable) {
7566 /* Make sure fake sink is created in plug-in scenario */
7567 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
7569 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
7572 if (IS_ERR(drm_new_conn_state)) {
7573 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
7577 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
7578 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
7580 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7583 new_stream = create_stream_for_sink(aconnector,
7584 &new_crtc_state->mode,
7586 dm_old_crtc_state->stream);
7589 * we can have no stream on ACTION_SET if a display
7590 * was disconnected during S3, in this case it is not an
7591 * error, the OS will be updated after detection, and
7592 * will do the right thing on next atomic commit
7596 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7597 __func__, acrtc->base.base.id);
7602 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
7604 ret = fill_hdr_info_packet(drm_new_conn_state,
7605 &new_stream->hdr_static_metadata);
7610 * If we already removed the old stream from the context
7611 * (and set the new stream to NULL) then we can't reuse
7612 * the old stream even if the stream and scaling are unchanged.
7613 * We'll hit the BUG_ON and black screen.
7615 * TODO: Refactor this function to allow this check to work
7616 * in all conditions.
7618 if (dm_new_crtc_state->stream &&
7619 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
7620 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
7621 new_crtc_state->mode_changed = false;
7622 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
7623 new_crtc_state->mode_changed);
7627 /* mode_changed flag may get updated above, need to check again */
7628 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7632 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7633 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
7634 "connectors_changed:%d\n",
7636 new_crtc_state->enable,
7637 new_crtc_state->active,
7638 new_crtc_state->planes_changed,
7639 new_crtc_state->mode_changed,
7640 new_crtc_state->active_changed,
7641 new_crtc_state->connectors_changed);
7643 /* Remove stream for any changed/disabled CRTC */
7646 if (!dm_old_crtc_state->stream)
7649 ret = dm_atomic_get_state(state, &dm_state);
7653 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
7656 /* i.e. reset mode */
7657 if (dc_remove_stream_from_ctx(
7660 dm_old_crtc_state->stream) != DC_OK) {
7665 dc_stream_release(dm_old_crtc_state->stream);
7666 dm_new_crtc_state->stream = NULL;
7668 reset_freesync_config_for_crtc(dm_new_crtc_state);
7670 *lock_and_validation_needed = true;
7672 } else {/* Add stream for any updated/enabled CRTC */
7674 * Quick fix to prevent NULL pointer on new_stream when
7675 * added MST connectors not found in existing crtc_state in the chained mode
7676 * TODO: need to dig out the root cause of that
7678 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
7681 if (modereset_required(new_crtc_state))
7684 if (modeset_required(new_crtc_state, new_stream,
7685 dm_old_crtc_state->stream)) {
7687 WARN_ON(dm_new_crtc_state->stream);
7689 ret = dm_atomic_get_state(state, &dm_state);
7693 dm_new_crtc_state->stream = new_stream;
7695 dc_stream_retain(new_stream);
7697 DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
7700 if (dc_add_stream_to_ctx(
7703 dm_new_crtc_state->stream) != DC_OK) {
7708 *lock_and_validation_needed = true;
7713 /* Release extra reference */
7715 dc_stream_release(new_stream);
7718 * We want to do dc stream updates that do not require a
7719 * full modeset below.
7721 if (!(enable && aconnector && new_crtc_state->enable &&
7722 new_crtc_state->active))
7725 * Given above conditions, the dc state cannot be NULL because:
7726 * 1. We're in the process of enabling CRTCs (just been added
7727 * to the dc context, or already is on the context)
7728 * 2. Has a valid connector attached, and
7729 * 3. Is currently active and enabled.
7730 * => The dc stream state currently exists.
7732 BUG_ON(dm_new_crtc_state->stream == NULL);
7734 /* Scaling or underscan settings */
7735 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
7736 update_stream_scaling_settings(
7737 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
7740 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
7743 * Color management settings. We also update color properties
7744 * when a modeset is needed, to ensure it gets reprogrammed.
7746 if (dm_new_crtc_state->base.color_mgmt_changed ||
7747 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
7748 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
7753 /* Update Freesync settings. */
7754 get_freesync_config_for_crtc(dm_new_crtc_state,
7761 dc_stream_release(new_stream);
7765 static bool should_reset_plane(struct drm_atomic_state *state,
7766 struct drm_plane *plane,
7767 struct drm_plane_state *old_plane_state,
7768 struct drm_plane_state *new_plane_state)
7770 struct drm_plane *other;
7771 struct drm_plane_state *old_other_state, *new_other_state;
7772 struct drm_crtc_state *new_crtc_state;
7776 * TODO: Remove this hack once the checks below are sufficient
7777 * enough to determine when we need to reset all the planes on
7780 if (state->allow_modeset)
7783 /* Exit early if we know that we're adding or removing the plane. */
7784 if (old_plane_state->crtc != new_plane_state->crtc)
7787 /* old crtc == new_crtc == NULL, plane not in context. */
7788 if (!new_plane_state->crtc)
7792 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
7794 if (!new_crtc_state)
7797 /* CRTC Degamma changes currently require us to recreate planes. */
7798 if (new_crtc_state->color_mgmt_changed)
7801 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
7805 * If there are any new primary or overlay planes being added or
7806 * removed then the z-order can potentially change. To ensure
7807 * correct z-order and pipe acquisition the current DC architecture
7808 * requires us to remove and recreate all existing planes.
7810 * TODO: Come up with a more elegant solution for this.
7812 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
7813 if (other->type == DRM_PLANE_TYPE_CURSOR)
7816 if (old_other_state->crtc != new_plane_state->crtc &&
7817 new_other_state->crtc != new_plane_state->crtc)
7820 if (old_other_state->crtc != new_other_state->crtc)
7823 /* TODO: Remove this once we can handle fast format changes. */
7824 if (old_other_state->fb && new_other_state->fb &&
7825 old_other_state->fb->format != new_other_state->fb->format)
7832 static int dm_update_plane_state(struct dc *dc,
7833 struct drm_atomic_state *state,
7834 struct drm_plane *plane,
7835 struct drm_plane_state *old_plane_state,
7836 struct drm_plane_state *new_plane_state,
7838 bool *lock_and_validation_needed)
7841 struct dm_atomic_state *dm_state = NULL;
7842 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
7843 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7844 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
7845 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
7850 new_plane_crtc = new_plane_state->crtc;
7851 old_plane_crtc = old_plane_state->crtc;
7852 dm_new_plane_state = to_dm_plane_state(new_plane_state);
7853 dm_old_plane_state = to_dm_plane_state(old_plane_state);
7855 /*TODO Implement atomic check for cursor plane */
7856 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7859 needs_reset = should_reset_plane(state, plane, old_plane_state,
7862 /* Remove any changed/removed planes */
7867 if (!old_plane_crtc)
7870 old_crtc_state = drm_atomic_get_old_crtc_state(
7871 state, old_plane_crtc);
7872 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7874 if (!dm_old_crtc_state->stream)
7877 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
7878 plane->base.id, old_plane_crtc->base.id);
7880 ret = dm_atomic_get_state(state, &dm_state);
7884 if (!dc_remove_plane_from_context(
7886 dm_old_crtc_state->stream,
7887 dm_old_plane_state->dc_state,
7888 dm_state->context)) {
7895 dc_plane_state_release(dm_old_plane_state->dc_state);
7896 dm_new_plane_state->dc_state = NULL;
7898 *lock_and_validation_needed = true;
7900 } else { /* Add new planes */
7901 struct dc_plane_state *dc_new_plane_state;
7903 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
7906 if (!new_plane_crtc)
7909 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
7910 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7912 if (!dm_new_crtc_state->stream)
7918 WARN_ON(dm_new_plane_state->dc_state);
7920 dc_new_plane_state = dc_create_plane_state(dc);
7921 if (!dc_new_plane_state)
7924 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
7925 plane->base.id, new_plane_crtc->base.id);
7927 ret = fill_dc_plane_attributes(
7928 new_plane_crtc->dev->dev_private,
7933 dc_plane_state_release(dc_new_plane_state);
7937 ret = dm_atomic_get_state(state, &dm_state);
7939 dc_plane_state_release(dc_new_plane_state);
7944 * Any atomic check errors that occur after this will
7945 * not need a release. The plane state will be attached
7946 * to the stream, and therefore part of the atomic
7947 * state. It'll be released when the atomic state is
7950 if (!dc_add_plane_to_context(
7952 dm_new_crtc_state->stream,
7954 dm_state->context)) {
7956 dc_plane_state_release(dc_new_plane_state);
7960 dm_new_plane_state->dc_state = dc_new_plane_state;
7962 /* Tell DC to do a full surface update every time there
7963 * is a plane change. Inefficient, but works for now.
7965 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
7967 *lock_and_validation_needed = true;
7975 dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
7976 struct drm_atomic_state *state,
7977 enum surface_update_type *out_type)
7979 struct dc *dc = dm->dc;
7980 struct dm_atomic_state *dm_state = NULL, *old_dm_state = NULL;
7981 int i, j, num_plane, ret = 0;
7982 struct drm_plane_state *old_plane_state, *new_plane_state;
7983 struct dm_plane_state *new_dm_plane_state, *old_dm_plane_state;
7984 struct drm_crtc *new_plane_crtc;
7985 struct drm_plane *plane;
7987 struct drm_crtc *crtc;
7988 struct drm_crtc_state *new_crtc_state, *old_crtc_state;
7989 struct dm_crtc_state *new_dm_crtc_state, *old_dm_crtc_state;
7990 struct dc_stream_status *status = NULL;
7991 enum surface_update_type update_type = UPDATE_TYPE_FAST;
7992 struct surface_info_bundle {
7993 struct dc_surface_update surface_updates[MAX_SURFACES];
7994 struct dc_plane_info plane_infos[MAX_SURFACES];
7995 struct dc_scaling_info scaling_infos[MAX_SURFACES];
7996 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
7997 struct dc_stream_update stream_update;
8000 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8003 DRM_ERROR("Failed to allocate update bundle\n");
8004 /* Set type to FULL to avoid crashing in DC*/
8005 update_type = UPDATE_TYPE_FULL;
8009 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8011 memset(bundle, 0, sizeof(struct surface_info_bundle));
8013 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8014 old_dm_crtc_state = to_dm_crtc_state(old_crtc_state);
8017 if (new_dm_crtc_state->stream != old_dm_crtc_state->stream) {
8018 update_type = UPDATE_TYPE_FULL;
8022 if (!new_dm_crtc_state->stream)
8025 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, j) {
8026 const struct amdgpu_framebuffer *amdgpu_fb =
8027 to_amdgpu_framebuffer(new_plane_state->fb);
8028 struct dc_plane_info *plane_info = &bundle->plane_infos[num_plane];
8029 struct dc_flip_addrs *flip_addr = &bundle->flip_addrs[num_plane];
8030 struct dc_scaling_info *scaling_info = &bundle->scaling_infos[num_plane];
8031 uint64_t tiling_flags;
8033 new_plane_crtc = new_plane_state->crtc;
8034 new_dm_plane_state = to_dm_plane_state(new_plane_state);
8035 old_dm_plane_state = to_dm_plane_state(old_plane_state);
8037 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8040 if (new_dm_plane_state->dc_state != old_dm_plane_state->dc_state) {
8041 update_type = UPDATE_TYPE_FULL;
8045 if (crtc != new_plane_crtc)
8048 bundle->surface_updates[num_plane].surface =
8049 new_dm_plane_state->dc_state;
8051 if (new_crtc_state->mode_changed) {
8052 bundle->stream_update.dst = new_dm_crtc_state->stream->dst;
8053 bundle->stream_update.src = new_dm_crtc_state->stream->src;
8056 if (new_crtc_state->color_mgmt_changed) {
8057 bundle->surface_updates[num_plane].gamma =
8058 new_dm_plane_state->dc_state->gamma_correction;
8059 bundle->surface_updates[num_plane].in_transfer_func =
8060 new_dm_plane_state->dc_state->in_transfer_func;
8061 bundle->stream_update.gamut_remap =
8062 &new_dm_crtc_state->stream->gamut_remap_matrix;
8063 bundle->stream_update.output_csc_transform =
8064 &new_dm_crtc_state->stream->csc_color_matrix;
8065 bundle->stream_update.out_transfer_func =
8066 new_dm_crtc_state->stream->out_transfer_func;
8069 ret = fill_dc_scaling_info(new_plane_state,
8074 bundle->surface_updates[num_plane].scaling_info = scaling_info;
8077 ret = get_fb_info(amdgpu_fb, &tiling_flags);
8081 ret = fill_dc_plane_info_and_addr(
8082 dm->adev, new_plane_state, tiling_flags,
8084 &flip_addr->address);
8088 bundle->surface_updates[num_plane].plane_info = plane_info;
8089 bundle->surface_updates[num_plane].flip_addr = flip_addr;
8098 ret = dm_atomic_get_state(state, &dm_state);
8102 old_dm_state = dm_atomic_get_old_state(state);
8103 if (!old_dm_state) {
8108 status = dc_stream_get_status_from_state(old_dm_state->context,
8109 new_dm_crtc_state->stream);
8110 bundle->stream_update.stream = new_dm_crtc_state->stream;
8112 * TODO: DC modifies the surface during this call so we need
8113 * to lock here - find a way to do this without locking.
8115 mutex_lock(&dm->dc_lock);
8116 update_type = dc_check_update_surfaces_for_stream(
8117 dc, bundle->surface_updates, num_plane,
8118 &bundle->stream_update, status);
8119 mutex_unlock(&dm->dc_lock);
8121 if (update_type > UPDATE_TYPE_MED) {
8122 update_type = UPDATE_TYPE_FULL;
8130 *out_type = update_type;
8134 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
8136 struct drm_connector *connector;
8137 struct drm_connector_state *conn_state;
8138 struct amdgpu_dm_connector *aconnector = NULL;
8140 for_each_new_connector_in_state(state, connector, conn_state, i) {
8141 if (conn_state->crtc != crtc)
8144 aconnector = to_amdgpu_dm_connector(connector);
8145 if (!aconnector->port || !aconnector->mst_port)
8154 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
8158 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
8159 * @dev: The DRM device
8160 * @state: The atomic state to commit
8162 * Validate that the given atomic state is programmable by DC into hardware.
8163 * This involves constructing a &struct dc_state reflecting the new hardware
8164 * state we wish to commit, then querying DC to see if it is programmable. It's
8165 * important not to modify the existing DC state. Otherwise, atomic_check
8166 * may unexpectedly commit hardware changes.
8168 * When validating the DC state, it's important that the right locks are
8169 * acquired. For full updates case which removes/adds/updates streams on one
8170 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
8171 * that any such full update commit will wait for completion of any outstanding
8172 * flip using DRMs synchronization events. See
8173 * dm_determine_update_type_for_commit()
8175 * Note that DM adds the affected connectors for all CRTCs in state, when that
8176 * might not seem necessary. This is because DC stream creation requires the
8177 * DC sink, which is tied to the DRM connector state. Cleaning this up should
8178 * be possible but non-trivial - a possible TODO item.
8180 * Return: -Error code if validation failed.
8182 static int amdgpu_dm_atomic_check(struct drm_device *dev,
8183 struct drm_atomic_state *state)
8185 struct amdgpu_device *adev = dev->dev_private;
8186 struct dm_atomic_state *dm_state = NULL;
8187 struct dc *dc = adev->dm.dc;
8188 struct drm_connector *connector;
8189 struct drm_connector_state *old_con_state, *new_con_state;
8190 struct drm_crtc *crtc;
8191 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8192 struct drm_plane *plane;
8193 struct drm_plane_state *old_plane_state, *new_plane_state;
8194 enum surface_update_type update_type = UPDATE_TYPE_FAST;
8195 enum surface_update_type overall_update_type = UPDATE_TYPE_FAST;
8200 * This bool will be set for true for any modeset/reset
8201 * or plane update which implies non fast surface update.
8203 bool lock_and_validation_needed = false;
8205 ret = drm_atomic_helper_check_modeset(dev, state);
8209 if (adev->asic_type >= CHIP_NAVI10) {
8210 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8211 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8212 ret = add_affected_mst_dsc_crtcs(state, crtc);
8219 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8220 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
8221 !new_crtc_state->color_mgmt_changed &&
8222 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
8225 if (!new_crtc_state->enable)
8228 ret = drm_atomic_add_affected_connectors(state, crtc);
8232 ret = drm_atomic_add_affected_planes(state, crtc);
8238 * Add all primary and overlay planes on the CRTC to the state
8239 * whenever a plane is enabled to maintain correct z-ordering
8240 * and to enable fast surface updates.
8242 drm_for_each_crtc(crtc, dev) {
8243 bool modified = false;
8245 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8246 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8249 if (new_plane_state->crtc == crtc ||
8250 old_plane_state->crtc == crtc) {
8259 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
8260 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8264 drm_atomic_get_plane_state(state, plane);
8266 if (IS_ERR(new_plane_state)) {
8267 ret = PTR_ERR(new_plane_state);
8273 /* Remove exiting planes if they are modified */
8274 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8275 ret = dm_update_plane_state(dc, state, plane,
8279 &lock_and_validation_needed);
8284 /* Disable all crtcs which require disable */
8285 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8286 ret = dm_update_crtc_state(&adev->dm, state, crtc,
8290 &lock_and_validation_needed);
8295 /* Enable all crtcs which require enable */
8296 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8297 ret = dm_update_crtc_state(&adev->dm, state, crtc,
8301 &lock_and_validation_needed);
8306 /* Add new/modified planes */
8307 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8308 ret = dm_update_plane_state(dc, state, plane,
8312 &lock_and_validation_needed);
8317 /* Run this here since we want to validate the streams we created */
8318 ret = drm_atomic_helper_check_planes(dev, state);
8322 if (state->legacy_cursor_update) {
8324 * This is a fast cursor update coming from the plane update
8325 * helper, check if it can be done asynchronously for better
8328 state->async_update =
8329 !drm_atomic_helper_async_check(dev, state);
8332 * Skip the remaining global validation if this is an async
8333 * update. Cursor updates can be done without affecting
8334 * state or bandwidth calcs and this avoids the performance
8335 * penalty of locking the private state object and
8336 * allocating a new dc_state.
8338 if (state->async_update)
8342 /* Check scaling and underscan changes*/
8343 /* TODO Removed scaling changes validation due to inability to commit
8344 * new stream into context w\o causing full reset. Need to
8345 * decide how to handle.
8347 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8348 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8349 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8350 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8352 /* Skip any modesets/resets */
8353 if (!acrtc || drm_atomic_crtc_needs_modeset(
8354 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
8357 /* Skip any thing not scale or underscan changes */
8358 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
8361 overall_update_type = UPDATE_TYPE_FULL;
8362 lock_and_validation_needed = true;
8365 ret = dm_determine_update_type_for_commit(&adev->dm, state, &update_type);
8369 if (overall_update_type < update_type)
8370 overall_update_type = update_type;
8373 * lock_and_validation_needed was an old way to determine if we need to set
8374 * the global lock. Leaving it in to check if we broke any corner cases
8375 * lock_and_validation_needed true = UPDATE_TYPE_FULL or UPDATE_TYPE_MED
8376 * lock_and_validation_needed false = UPDATE_TYPE_FAST
8378 if (lock_and_validation_needed && overall_update_type <= UPDATE_TYPE_FAST)
8379 WARN(1, "Global lock should be Set, overall_update_type should be UPDATE_TYPE_MED or UPDATE_TYPE_FULL");
8381 if (overall_update_type > UPDATE_TYPE_FAST) {
8382 ret = dm_atomic_get_state(state, &dm_state);
8386 ret = do_aquire_global_lock(dev, state);
8390 #if defined(CONFIG_DRM_AMD_DC_DCN)
8391 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
8394 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
8400 * Perform validation of MST topology in the state:
8401 * We need to perform MST atomic check before calling
8402 * dc_validate_global_state(), or there is a chance
8403 * to get stuck in an infinite loop and hang eventually.
8405 ret = drm_dp_mst_atomic_check(state);
8409 if (dc_validate_global_state(dc, dm_state->context, false) != DC_OK) {
8415 * The commit is a fast update. Fast updates shouldn't change
8416 * the DC context, affect global validation, and can have their
8417 * commit work done in parallel with other commits not touching
8418 * the same resource. If we have a new DC context as part of
8419 * the DM atomic state from validation we need to free it and
8420 * retain the existing one instead.
8422 struct dm_atomic_state *new_dm_state, *old_dm_state;
8424 new_dm_state = dm_atomic_get_new_state(state);
8425 old_dm_state = dm_atomic_get_old_state(state);
8427 if (new_dm_state && old_dm_state) {
8428 if (new_dm_state->context)
8429 dc_release_state(new_dm_state->context);
8431 new_dm_state->context = old_dm_state->context;
8433 if (old_dm_state->context)
8434 dc_retain_state(old_dm_state->context);
8438 /* Store the overall update type for use later in atomic check. */
8439 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
8440 struct dm_crtc_state *dm_new_crtc_state =
8441 to_dm_crtc_state(new_crtc_state);
8443 dm_new_crtc_state->update_type = (int)overall_update_type;
8446 /* Must be success */
8451 if (ret == -EDEADLK)
8452 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
8453 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
8454 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
8456 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
8461 static bool is_dp_capable_without_timing_msa(struct dc *dc,
8462 struct amdgpu_dm_connector *amdgpu_dm_connector)
8465 bool capable = false;
8467 if (amdgpu_dm_connector->dc_link &&
8468 dm_helpers_dp_read_dpcd(
8470 amdgpu_dm_connector->dc_link,
8471 DP_DOWN_STREAM_PORT_COUNT,
8473 sizeof(dpcd_data))) {
8474 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
8479 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
8483 bool edid_check_required;
8484 struct detailed_timing *timing;
8485 struct detailed_non_pixel *data;
8486 struct detailed_data_monitor_range *range;
8487 struct amdgpu_dm_connector *amdgpu_dm_connector =
8488 to_amdgpu_dm_connector(connector);
8489 struct dm_connector_state *dm_con_state = NULL;
8491 struct drm_device *dev = connector->dev;
8492 struct amdgpu_device *adev = dev->dev_private;
8493 bool freesync_capable = false;
8495 if (!connector->state) {
8496 DRM_ERROR("%s - Connector has no state", __func__);
8501 dm_con_state = to_dm_connector_state(connector->state);
8503 amdgpu_dm_connector->min_vfreq = 0;
8504 amdgpu_dm_connector->max_vfreq = 0;
8505 amdgpu_dm_connector->pixel_clock_mhz = 0;
8510 dm_con_state = to_dm_connector_state(connector->state);
8512 edid_check_required = false;
8513 if (!amdgpu_dm_connector->dc_sink) {
8514 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
8517 if (!adev->dm.freesync_module)
8520 * if edid non zero restrict freesync only for dp and edp
8523 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
8524 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
8525 edid_check_required = is_dp_capable_without_timing_msa(
8527 amdgpu_dm_connector);
8530 if (edid_check_required == true && (edid->version > 1 ||
8531 (edid->version == 1 && edid->revision > 1))) {
8532 for (i = 0; i < 4; i++) {
8534 timing = &edid->detailed_timings[i];
8535 data = &timing->data.other_data;
8536 range = &data->data.range;
8538 * Check if monitor has continuous frequency mode
8540 if (data->type != EDID_DETAIL_MONITOR_RANGE)
8543 * Check for flag range limits only. If flag == 1 then
8544 * no additional timing information provided.
8545 * Default GTF, GTF Secondary curve and CVT are not
8548 if (range->flags != 1)
8551 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
8552 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
8553 amdgpu_dm_connector->pixel_clock_mhz =
8554 range->pixel_clock_mhz * 10;
8558 if (amdgpu_dm_connector->max_vfreq -
8559 amdgpu_dm_connector->min_vfreq > 10) {
8561 freesync_capable = true;
8567 dm_con_state->freesync_capable = freesync_capable;
8569 if (connector->vrr_capable_property)
8570 drm_connector_set_vrr_capable_property(connector,
8574 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
8576 uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
8578 if (!(link->connector_signal & SIGNAL_TYPE_EDP))
8580 if (link->type == dc_connection_none)
8582 if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
8583 dpcd_data, sizeof(dpcd_data))) {
8584 link->psr_feature_enabled = dpcd_data[0] ? true:false;
8585 DRM_INFO("PSR support:%d\n", link->psr_feature_enabled);
8590 * amdgpu_dm_link_setup_psr() - configure psr link
8591 * @stream: stream state
8593 * Return: true if success
8595 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
8597 struct dc_link *link = NULL;
8598 struct psr_config psr_config = {0};
8599 struct psr_context psr_context = {0};
8600 struct dc *dc = NULL;
8606 link = stream->link;
8609 psr_config.psr_version = dc->res_pool->dmcu->dmcu_version.psr_version;
8611 if (psr_config.psr_version > 0) {
8612 psr_config.psr_exit_link_training_required = 0x1;
8613 psr_config.psr_frame_capture_indication_req = 0;
8614 psr_config.psr_rfb_setup_time = 0x37;
8615 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
8616 psr_config.allow_smu_optimizations = 0x0;
8618 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
8621 DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_feature_enabled);
8627 * amdgpu_dm_psr_enable() - enable psr f/w
8628 * @stream: stream state
8630 * Return: true if success
8632 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
8634 struct dc_link *link = stream->link;
8635 unsigned int vsync_rate_hz = 0;
8636 struct dc_static_screen_params params = {0};
8637 /* Calculate number of static frames before generating interrupt to
8640 // Init fail safe of 2 frames static
8641 unsigned int num_frames_static = 2;
8643 DRM_DEBUG_DRIVER("Enabling psr...\n");
8645 vsync_rate_hz = div64_u64(div64_u64((
8646 stream->timing.pix_clk_100hz * 100),
8647 stream->timing.v_total),
8648 stream->timing.h_total);
8651 * Calculate number of frames such that at least 30 ms of time has
8654 if (vsync_rate_hz != 0) {
8655 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
8656 num_frames_static = (30000 / frame_time_microsec) + 1;
8659 params.triggers.cursor_update = true;
8660 params.triggers.overlay_update = true;
8661 params.triggers.surface_update = true;
8662 params.num_frames = num_frames_static;
8664 dc_stream_set_static_screen_params(link->ctx->dc,
8668 return dc_link_set_psr_allow_active(link, true, false);
8672 * amdgpu_dm_psr_disable() - disable psr f/w
8673 * @stream: stream state
8675 * Return: true if success
8677 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
8680 DRM_DEBUG_DRIVER("Disabling psr...\n");
8682 return dc_link_set_psr_allow_active(stream->link, false, true);