OSDN Git Service

drm: Fix HDCP failures when SRM fw is missing
[tomoyo/tomoyo-test1.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/inc/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37
38 #include "vid.h"
39 #include "amdgpu.h"
40 #include "amdgpu_display.h"
41 #include "amdgpu_ucode.h"
42 #include "atom.h"
43 #include "amdgpu_dm.h"
44 #ifdef CONFIG_DRM_AMD_DC_HDCP
45 #include "amdgpu_dm_hdcp.h"
46 #include <drm/drm_hdcp.h>
47 #endif
48 #include "amdgpu_pm.h"
49
50 #include "amd_shared.h"
51 #include "amdgpu_dm_irq.h"
52 #include "dm_helpers.h"
53 #include "amdgpu_dm_mst_types.h"
54 #if defined(CONFIG_DEBUG_FS)
55 #include "amdgpu_dm_debugfs.h"
56 #endif
57
58 #include "ivsrcid/ivsrcid_vislands30.h"
59
60 #include <linux/module.h>
61 #include <linux/moduleparam.h>
62 #include <linux/version.h>
63 #include <linux/types.h>
64 #include <linux/pm_runtime.h>
65 #include <linux/pci.h>
66 #include <linux/firmware.h>
67 #include <linux/component.h>
68
69 #include <drm/drm_atomic.h>
70 #include <drm/drm_atomic_uapi.h>
71 #include <drm/drm_atomic_helper.h>
72 #include <drm/drm_dp_mst_helper.h>
73 #include <drm/drm_fb_helper.h>
74 #include <drm/drm_fourcc.h>
75 #include <drm/drm_edid.h>
76 #include <drm/drm_vblank.h>
77 #include <drm/drm_audio_component.h>
78 #include <drm/drm_hdcp.h>
79
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
82
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
87
88 #include "soc15_common.h"
89 #endif
90
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
94
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
97
98 #define FIRMWARE_RAVEN_DMCU             "amdgpu/raven_dmcu.bin"
99 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
100
101 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
102 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
103
104 /* Number of bytes in PSP header for firmware. */
105 #define PSP_HEADER_BYTES 0x100
106
107 /* Number of bytes in PSP footer for firmware. */
108 #define PSP_FOOTER_BYTES 0x100
109
110 /**
111  * DOC: overview
112  *
113  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
114  * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
115  * requests into DC requests, and DC responses into DRM responses.
116  *
117  * The root control structure is &struct amdgpu_display_manager.
118  */
119
120 /* basic init/fini API */
121 static int amdgpu_dm_init(struct amdgpu_device *adev);
122 static void amdgpu_dm_fini(struct amdgpu_device *adev);
123
124 /*
125  * initializes drm_device display related structures, based on the information
126  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
127  * drm_encoder, drm_mode_config
128  *
129  * Returns 0 on success
130  */
131 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
132 /* removes and deallocates the drm structures, created by the above function */
133 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
134
135 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
136                                 struct drm_plane *plane,
137                                 unsigned long possible_crtcs,
138                                 const struct dc_plane_cap *plane_cap);
139 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
140                                struct drm_plane *plane,
141                                uint32_t link_index);
142 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
143                                     struct amdgpu_dm_connector *amdgpu_dm_connector,
144                                     uint32_t link_index,
145                                     struct amdgpu_encoder *amdgpu_encoder);
146 static int amdgpu_dm_encoder_init(struct drm_device *dev,
147                                   struct amdgpu_encoder *aencoder,
148                                   uint32_t link_index);
149
150 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
151
152 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
153                                    struct drm_atomic_state *state,
154                                    bool nonblock);
155
156 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
157
158 static int amdgpu_dm_atomic_check(struct drm_device *dev,
159                                   struct drm_atomic_state *state);
160
161 static void handle_cursor_update(struct drm_plane *plane,
162                                  struct drm_plane_state *old_plane_state);
163
164 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
165 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
166 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
167 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
168
169
170 /*
171  * dm_vblank_get_counter
172  *
173  * @brief
174  * Get counter for number of vertical blanks
175  *
176  * @param
177  * struct amdgpu_device *adev - [in] desired amdgpu device
178  * int disp_idx - [in] which CRTC to get the counter from
179  *
180  * @return
181  * Counter for vertical blanks
182  */
183 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
184 {
185         if (crtc >= adev->mode_info.num_crtc)
186                 return 0;
187         else {
188                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
189                 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
190                                 acrtc->base.state);
191
192
193                 if (acrtc_state->stream == NULL) {
194                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
195                                   crtc);
196                         return 0;
197                 }
198
199                 return dc_stream_get_vblank_counter(acrtc_state->stream);
200         }
201 }
202
203 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
204                                   u32 *vbl, u32 *position)
205 {
206         uint32_t v_blank_start, v_blank_end, h_position, v_position;
207
208         if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
209                 return -EINVAL;
210         else {
211                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
212                 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
213                                                 acrtc->base.state);
214
215                 if (acrtc_state->stream ==  NULL) {
216                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
217                                   crtc);
218                         return 0;
219                 }
220
221                 /*
222                  * TODO rework base driver to use values directly.
223                  * for now parse it back into reg-format
224                  */
225                 dc_stream_get_scanoutpos(acrtc_state->stream,
226                                          &v_blank_start,
227                                          &v_blank_end,
228                                          &h_position,
229                                          &v_position);
230
231                 *position = v_position | (h_position << 16);
232                 *vbl = v_blank_start | (v_blank_end << 16);
233         }
234
235         return 0;
236 }
237
238 static bool dm_is_idle(void *handle)
239 {
240         /* XXX todo */
241         return true;
242 }
243
244 static int dm_wait_for_idle(void *handle)
245 {
246         /* XXX todo */
247         return 0;
248 }
249
250 static bool dm_check_soft_reset(void *handle)
251 {
252         return false;
253 }
254
255 static int dm_soft_reset(void *handle)
256 {
257         /* XXX todo */
258         return 0;
259 }
260
261 static struct amdgpu_crtc *
262 get_crtc_by_otg_inst(struct amdgpu_device *adev,
263                      int otg_inst)
264 {
265         struct drm_device *dev = adev->ddev;
266         struct drm_crtc *crtc;
267         struct amdgpu_crtc *amdgpu_crtc;
268
269         if (otg_inst == -1) {
270                 WARN_ON(1);
271                 return adev->mode_info.crtcs[0];
272         }
273
274         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
275                 amdgpu_crtc = to_amdgpu_crtc(crtc);
276
277                 if (amdgpu_crtc->otg_inst == otg_inst)
278                         return amdgpu_crtc;
279         }
280
281         return NULL;
282 }
283
284 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
285 {
286         return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
287                dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
288 }
289
290 /**
291  * dm_pflip_high_irq() - Handle pageflip interrupt
292  * @interrupt_params: ignored
293  *
294  * Handles the pageflip interrupt by notifying all interested parties
295  * that the pageflip has been completed.
296  */
297 static void dm_pflip_high_irq(void *interrupt_params)
298 {
299         struct amdgpu_crtc *amdgpu_crtc;
300         struct common_irq_params *irq_params = interrupt_params;
301         struct amdgpu_device *adev = irq_params->adev;
302         unsigned long flags;
303         struct drm_pending_vblank_event *e;
304         struct dm_crtc_state *acrtc_state;
305         uint32_t vpos, hpos, v_blank_start, v_blank_end;
306         bool vrr_active;
307
308         amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
309
310         /* IRQ could occur when in initial stage */
311         /* TODO work and BO cleanup */
312         if (amdgpu_crtc == NULL) {
313                 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
314                 return;
315         }
316
317         spin_lock_irqsave(&adev->ddev->event_lock, flags);
318
319         if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
320                 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
321                                                  amdgpu_crtc->pflip_status,
322                                                  AMDGPU_FLIP_SUBMITTED,
323                                                  amdgpu_crtc->crtc_id,
324                                                  amdgpu_crtc);
325                 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
326                 return;
327         }
328
329         /* page flip completed. */
330         e = amdgpu_crtc->event;
331         amdgpu_crtc->event = NULL;
332
333         if (!e)
334                 WARN_ON(1);
335
336         acrtc_state = to_dm_crtc_state(amdgpu_crtc->base.state);
337         vrr_active = amdgpu_dm_vrr_active(acrtc_state);
338
339         /* Fixed refresh rate, or VRR scanout position outside front-porch? */
340         if (!vrr_active ||
341             !dc_stream_get_scanoutpos(acrtc_state->stream, &v_blank_start,
342                                       &v_blank_end, &hpos, &vpos) ||
343             (vpos < v_blank_start)) {
344                 /* Update to correct count and vblank timestamp if racing with
345                  * vblank irq. This also updates to the correct vblank timestamp
346                  * even in VRR mode, as scanout is past the front-porch atm.
347                  */
348                 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
349
350                 /* Wake up userspace by sending the pageflip event with proper
351                  * count and timestamp of vblank of flip completion.
352                  */
353                 if (e) {
354                         drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
355
356                         /* Event sent, so done with vblank for this flip */
357                         drm_crtc_vblank_put(&amdgpu_crtc->base);
358                 }
359         } else if (e) {
360                 /* VRR active and inside front-porch: vblank count and
361                  * timestamp for pageflip event will only be up to date after
362                  * drm_crtc_handle_vblank() has been executed from late vblank
363                  * irq handler after start of back-porch (vline 0). We queue the
364                  * pageflip event for send-out by drm_crtc_handle_vblank() with
365                  * updated timestamp and count, once it runs after us.
366                  *
367                  * We need to open-code this instead of using the helper
368                  * drm_crtc_arm_vblank_event(), as that helper would
369                  * call drm_crtc_accurate_vblank_count(), which we must
370                  * not call in VRR mode while we are in front-porch!
371                  */
372
373                 /* sequence will be replaced by real count during send-out. */
374                 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
375                 e->pipe = amdgpu_crtc->crtc_id;
376
377                 list_add_tail(&e->base.link, &adev->ddev->vblank_event_list);
378                 e = NULL;
379         }
380
381         /* Keep track of vblank of this flip for flip throttling. We use the
382          * cooked hw counter, as that one incremented at start of this vblank
383          * of pageflip completion, so last_flip_vblank is the forbidden count
384          * for queueing new pageflips if vsync + VRR is enabled.
385          */
386         amdgpu_crtc->last_flip_vblank =
387                 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
388
389         amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
390         spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
391
392         DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
393                          amdgpu_crtc->crtc_id, amdgpu_crtc,
394                          vrr_active, (int) !e);
395 }
396
397 static void dm_vupdate_high_irq(void *interrupt_params)
398 {
399         struct common_irq_params *irq_params = interrupt_params;
400         struct amdgpu_device *adev = irq_params->adev;
401         struct amdgpu_crtc *acrtc;
402         struct dm_crtc_state *acrtc_state;
403         unsigned long flags;
404
405         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
406
407         if (acrtc) {
408                 acrtc_state = to_dm_crtc_state(acrtc->base.state);
409
410                 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
411                               acrtc->crtc_id,
412                               amdgpu_dm_vrr_active(acrtc_state));
413
414                 /* Core vblank handling is done here after end of front-porch in
415                  * vrr mode, as vblank timestamping will give valid results
416                  * while now done after front-porch. This will also deliver
417                  * page-flip completion events that have been queued to us
418                  * if a pageflip happened inside front-porch.
419                  */
420                 if (amdgpu_dm_vrr_active(acrtc_state)) {
421                         drm_crtc_handle_vblank(&acrtc->base);
422
423                         /* BTR processing for pre-DCE12 ASICs */
424                         if (acrtc_state->stream &&
425                             adev->family < AMDGPU_FAMILY_AI) {
426                                 spin_lock_irqsave(&adev->ddev->event_lock, flags);
427                                 mod_freesync_handle_v_update(
428                                     adev->dm.freesync_module,
429                                     acrtc_state->stream,
430                                     &acrtc_state->vrr_params);
431
432                                 dc_stream_adjust_vmin_vmax(
433                                     adev->dm.dc,
434                                     acrtc_state->stream,
435                                     &acrtc_state->vrr_params.adjust);
436                                 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
437                         }
438                 }
439         }
440 }
441
442 /**
443  * dm_crtc_high_irq() - Handles CRTC interrupt
444  * @interrupt_params: ignored
445  *
446  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
447  * event handler.
448  */
449 static void dm_crtc_high_irq(void *interrupt_params)
450 {
451         struct common_irq_params *irq_params = interrupt_params;
452         struct amdgpu_device *adev = irq_params->adev;
453         struct amdgpu_crtc *acrtc;
454         struct dm_crtc_state *acrtc_state;
455         unsigned long flags;
456
457         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
458
459         if (acrtc) {
460                 acrtc_state = to_dm_crtc_state(acrtc->base.state);
461
462                 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
463                               acrtc->crtc_id,
464                               amdgpu_dm_vrr_active(acrtc_state));
465
466                 /* Core vblank handling at start of front-porch is only possible
467                  * in non-vrr mode, as only there vblank timestamping will give
468                  * valid results while done in front-porch. Otherwise defer it
469                  * to dm_vupdate_high_irq after end of front-porch.
470                  */
471                 if (!amdgpu_dm_vrr_active(acrtc_state))
472                         drm_crtc_handle_vblank(&acrtc->base);
473
474                 /* Following stuff must happen at start of vblank, for crc
475                  * computation and below-the-range btr support in vrr mode.
476                  */
477                 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
478
479                 if (acrtc_state->stream && adev->family >= AMDGPU_FAMILY_AI &&
480                     acrtc_state->vrr_params.supported &&
481                     acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
482                         spin_lock_irqsave(&adev->ddev->event_lock, flags);
483                         mod_freesync_handle_v_update(
484                                 adev->dm.freesync_module,
485                                 acrtc_state->stream,
486                                 &acrtc_state->vrr_params);
487
488                         dc_stream_adjust_vmin_vmax(
489                                 adev->dm.dc,
490                                 acrtc_state->stream,
491                                 &acrtc_state->vrr_params.adjust);
492                         spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
493                 }
494         }
495 }
496
497 #if defined(CONFIG_DRM_AMD_DC_DCN)
498 /**
499  * dm_dcn_crtc_high_irq() - Handles VStartup interrupt for DCN generation ASICs
500  * @interrupt params - interrupt parameters
501  *
502  * Notify DRM's vblank event handler at VSTARTUP
503  *
504  * Unlike DCE hardware, we trigger the handler at VSTARTUP. at which:
505  * * We are close enough to VUPDATE - the point of no return for hw
506  * * We are in the fixed portion of variable front porch when vrr is enabled
507  * * We are before VUPDATE, where double-buffered vrr registers are swapped
508  *
509  * It is therefore the correct place to signal vblank, send user flip events,
510  * and update VRR.
511  */
512 static void dm_dcn_crtc_high_irq(void *interrupt_params)
513 {
514         struct common_irq_params *irq_params = interrupt_params;
515         struct amdgpu_device *adev = irq_params->adev;
516         struct amdgpu_crtc *acrtc;
517         struct dm_crtc_state *acrtc_state;
518         unsigned long flags;
519
520         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
521
522         if (!acrtc)
523                 return;
524
525         acrtc_state = to_dm_crtc_state(acrtc->base.state);
526
527         DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
528                          amdgpu_dm_vrr_active(acrtc_state),
529                          acrtc_state->active_planes);
530
531         amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
532         drm_crtc_handle_vblank(&acrtc->base);
533
534         spin_lock_irqsave(&adev->ddev->event_lock, flags);
535
536         if (acrtc_state->vrr_params.supported &&
537             acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
538                 mod_freesync_handle_v_update(
539                 adev->dm.freesync_module,
540                 acrtc_state->stream,
541                 &acrtc_state->vrr_params);
542
543                 dc_stream_adjust_vmin_vmax(
544                         adev->dm.dc,
545                         acrtc_state->stream,
546                         &acrtc_state->vrr_params.adjust);
547         }
548
549         /*
550          * If there aren't any active_planes then DCH HUBP may be clock-gated.
551          * In that case, pageflip completion interrupts won't fire and pageflip
552          * completion events won't get delivered. Prevent this by sending
553          * pending pageflip events from here if a flip is still pending.
554          *
555          * If any planes are enabled, use dm_pflip_high_irq() instead, to
556          * avoid race conditions between flip programming and completion,
557          * which could cause too early flip completion events.
558          */
559         if (acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
560             acrtc_state->active_planes == 0) {
561                 if (acrtc->event) {
562                         drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
563                         acrtc->event = NULL;
564                         drm_crtc_vblank_put(&acrtc->base);
565                 }
566                 acrtc->pflip_status = AMDGPU_FLIP_NONE;
567         }
568
569         spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
570 }
571 #endif
572
573 static int dm_set_clockgating_state(void *handle,
574                   enum amd_clockgating_state state)
575 {
576         return 0;
577 }
578
579 static int dm_set_powergating_state(void *handle,
580                   enum amd_powergating_state state)
581 {
582         return 0;
583 }
584
585 /* Prototypes of private functions */
586 static int dm_early_init(void* handle);
587
588 /* Allocate memory for FBC compressed data  */
589 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
590 {
591         struct drm_device *dev = connector->dev;
592         struct amdgpu_device *adev = dev->dev_private;
593         struct dm_comressor_info *compressor = &adev->dm.compressor;
594         struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
595         struct drm_display_mode *mode;
596         unsigned long max_size = 0;
597
598         if (adev->dm.dc->fbc_compressor == NULL)
599                 return;
600
601         if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
602                 return;
603
604         if (compressor->bo_ptr)
605                 return;
606
607
608         list_for_each_entry(mode, &connector->modes, head) {
609                 if (max_size < mode->htotal * mode->vtotal)
610                         max_size = mode->htotal * mode->vtotal;
611         }
612
613         if (max_size) {
614                 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
615                             AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
616                             &compressor->gpu_addr, &compressor->cpu_addr);
617
618                 if (r)
619                         DRM_ERROR("DM: Failed to initialize FBC\n");
620                 else {
621                         adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
622                         DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
623                 }
624
625         }
626
627 }
628
629 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
630                                           int pipe, bool *enabled,
631                                           unsigned char *buf, int max_bytes)
632 {
633         struct drm_device *dev = dev_get_drvdata(kdev);
634         struct amdgpu_device *adev = dev->dev_private;
635         struct drm_connector *connector;
636         struct drm_connector_list_iter conn_iter;
637         struct amdgpu_dm_connector *aconnector;
638         int ret = 0;
639
640         *enabled = false;
641
642         mutex_lock(&adev->dm.audio_lock);
643
644         drm_connector_list_iter_begin(dev, &conn_iter);
645         drm_for_each_connector_iter(connector, &conn_iter) {
646                 aconnector = to_amdgpu_dm_connector(connector);
647                 if (aconnector->audio_inst != port)
648                         continue;
649
650                 *enabled = true;
651                 ret = drm_eld_size(connector->eld);
652                 memcpy(buf, connector->eld, min(max_bytes, ret));
653
654                 break;
655         }
656         drm_connector_list_iter_end(&conn_iter);
657
658         mutex_unlock(&adev->dm.audio_lock);
659
660         DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
661
662         return ret;
663 }
664
665 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
666         .get_eld = amdgpu_dm_audio_component_get_eld,
667 };
668
669 static int amdgpu_dm_audio_component_bind(struct device *kdev,
670                                        struct device *hda_kdev, void *data)
671 {
672         struct drm_device *dev = dev_get_drvdata(kdev);
673         struct amdgpu_device *adev = dev->dev_private;
674         struct drm_audio_component *acomp = data;
675
676         acomp->ops = &amdgpu_dm_audio_component_ops;
677         acomp->dev = kdev;
678         adev->dm.audio_component = acomp;
679
680         return 0;
681 }
682
683 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
684                                           struct device *hda_kdev, void *data)
685 {
686         struct drm_device *dev = dev_get_drvdata(kdev);
687         struct amdgpu_device *adev = dev->dev_private;
688         struct drm_audio_component *acomp = data;
689
690         acomp->ops = NULL;
691         acomp->dev = NULL;
692         adev->dm.audio_component = NULL;
693 }
694
695 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
696         .bind   = amdgpu_dm_audio_component_bind,
697         .unbind = amdgpu_dm_audio_component_unbind,
698 };
699
700 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
701 {
702         int i, ret;
703
704         if (!amdgpu_audio)
705                 return 0;
706
707         adev->mode_info.audio.enabled = true;
708
709         adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
710
711         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
712                 adev->mode_info.audio.pin[i].channels = -1;
713                 adev->mode_info.audio.pin[i].rate = -1;
714                 adev->mode_info.audio.pin[i].bits_per_sample = -1;
715                 adev->mode_info.audio.pin[i].status_bits = 0;
716                 adev->mode_info.audio.pin[i].category_code = 0;
717                 adev->mode_info.audio.pin[i].connected = false;
718                 adev->mode_info.audio.pin[i].id =
719                         adev->dm.dc->res_pool->audios[i]->inst;
720                 adev->mode_info.audio.pin[i].offset = 0;
721         }
722
723         ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
724         if (ret < 0)
725                 return ret;
726
727         adev->dm.audio_registered = true;
728
729         return 0;
730 }
731
732 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
733 {
734         if (!amdgpu_audio)
735                 return;
736
737         if (!adev->mode_info.audio.enabled)
738                 return;
739
740         if (adev->dm.audio_registered) {
741                 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
742                 adev->dm.audio_registered = false;
743         }
744
745         /* TODO: Disable audio? */
746
747         adev->mode_info.audio.enabled = false;
748 }
749
750 void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
751 {
752         struct drm_audio_component *acomp = adev->dm.audio_component;
753
754         if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
755                 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
756
757                 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
758                                                  pin, -1);
759         }
760 }
761
762 static int dm_dmub_hw_init(struct amdgpu_device *adev)
763 {
764         const struct dmcub_firmware_header_v1_0 *hdr;
765         struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
766         struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
767         const struct firmware *dmub_fw = adev->dm.dmub_fw;
768         struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
769         struct abm *abm = adev->dm.dc->res_pool->abm;
770         struct dmub_srv_hw_params hw_params;
771         enum dmub_status status;
772         const unsigned char *fw_inst_const, *fw_bss_data;
773         uint32_t i, fw_inst_const_size, fw_bss_data_size;
774         bool has_hw_support;
775
776         if (!dmub_srv)
777                 /* DMUB isn't supported on the ASIC. */
778                 return 0;
779
780         if (!fb_info) {
781                 DRM_ERROR("No framebuffer info for DMUB service.\n");
782                 return -EINVAL;
783         }
784
785         if (!dmub_fw) {
786                 /* Firmware required for DMUB support. */
787                 DRM_ERROR("No firmware provided for DMUB.\n");
788                 return -EINVAL;
789         }
790
791         status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
792         if (status != DMUB_STATUS_OK) {
793                 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
794                 return -EINVAL;
795         }
796
797         if (!has_hw_support) {
798                 DRM_INFO("DMUB unsupported on ASIC\n");
799                 return 0;
800         }
801
802         hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
803
804         fw_inst_const = dmub_fw->data +
805                         le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
806                         PSP_HEADER_BYTES;
807
808         fw_bss_data = dmub_fw->data +
809                       le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
810                       le32_to_cpu(hdr->inst_const_bytes);
811
812         /* Copy firmware and bios info into FB memory. */
813         fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
814                              PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
815
816         fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
817
818         /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
819          * amdgpu_ucode_init_single_fw will load dmub firmware
820          * fw_inst_const part to cw0; otherwise, the firmware back door load
821          * will be done by dm_dmub_hw_init
822          */
823         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
824                 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
825                                 fw_inst_const_size);
826         }
827
828         memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr, fw_bss_data,
829                fw_bss_data_size);
830
831         /* Copy firmware bios info into FB memory. */
832         memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
833                adev->bios_size);
834
835         /* Reset regions that need to be reset. */
836         memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
837         fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
838
839         memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
840                fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
841
842         memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
843                fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
844
845         /* Initialize hardware. */
846         memset(&hw_params, 0, sizeof(hw_params));
847         hw_params.fb_base = adev->gmc.fb_start;
848         hw_params.fb_offset = adev->gmc.aper_base;
849
850         /* backdoor load firmware and trigger dmub running */
851         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
852                 hw_params.load_inst_const = true;
853
854         if (dmcu)
855                 hw_params.psp_version = dmcu->psp_version;
856
857         for (i = 0; i < fb_info->num_fb; ++i)
858                 hw_params.fb[i] = &fb_info->fb[i];
859
860         status = dmub_srv_hw_init(dmub_srv, &hw_params);
861         if (status != DMUB_STATUS_OK) {
862                 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
863                 return -EINVAL;
864         }
865
866         /* Wait for firmware load to finish. */
867         status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
868         if (status != DMUB_STATUS_OK)
869                 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
870
871         /* Init DMCU and ABM if available. */
872         if (dmcu && abm) {
873                 dmcu->funcs->dmcu_init(dmcu);
874                 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
875         }
876
877         adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
878         if (!adev->dm.dc->ctx->dmub_srv) {
879                 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
880                 return -ENOMEM;
881         }
882
883         DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
884                  adev->dm.dmcub_fw_version);
885
886         return 0;
887 }
888
889 static int amdgpu_dm_init(struct amdgpu_device *adev)
890 {
891         struct dc_init_data init_data;
892 #ifdef CONFIG_DRM_AMD_DC_HDCP
893         struct dc_callback_init init_params;
894 #endif
895         int r;
896
897         adev->dm.ddev = adev->ddev;
898         adev->dm.adev = adev;
899
900         /* Zero all the fields */
901         memset(&init_data, 0, sizeof(init_data));
902 #ifdef CONFIG_DRM_AMD_DC_HDCP
903         memset(&init_params, 0, sizeof(init_params));
904 #endif
905
906         mutex_init(&adev->dm.dc_lock);
907         mutex_init(&adev->dm.audio_lock);
908
909         if(amdgpu_dm_irq_init(adev)) {
910                 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
911                 goto error;
912         }
913
914         init_data.asic_id.chip_family = adev->family;
915
916         init_data.asic_id.pci_revision_id = adev->pdev->revision;
917         init_data.asic_id.hw_internal_rev = adev->external_rev_id;
918
919         init_data.asic_id.vram_width = adev->gmc.vram_width;
920         /* TODO: initialize init_data.asic_id.vram_type here!!!! */
921         init_data.asic_id.atombios_base_address =
922                 adev->mode_info.atom_context->bios;
923
924         init_data.driver = adev;
925
926         adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
927
928         if (!adev->dm.cgs_device) {
929                 DRM_ERROR("amdgpu: failed to create cgs device.\n");
930                 goto error;
931         }
932
933         init_data.cgs_device = adev->dm.cgs_device;
934
935         init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
936
937         switch (adev->asic_type) {
938         case CHIP_CARRIZO:
939         case CHIP_STONEY:
940         case CHIP_RAVEN:
941         case CHIP_RENOIR:
942                 init_data.flags.gpu_vm_support = true;
943                 break;
944         default:
945                 break;
946         }
947
948         if (amdgpu_dc_feature_mask & DC_FBC_MASK)
949                 init_data.flags.fbc_support = true;
950
951         if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
952                 init_data.flags.multi_mon_pp_mclk_switch = true;
953
954         if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
955                 init_data.flags.disable_fractional_pwm = true;
956
957         init_data.flags.power_down_display_on_boot = true;
958
959         init_data.soc_bounding_box = adev->dm.soc_bounding_box;
960
961         /* Display Core create. */
962         adev->dm.dc = dc_create(&init_data);
963
964         if (adev->dm.dc) {
965                 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
966         } else {
967                 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
968                 goto error;
969         }
970
971         r = dm_dmub_hw_init(adev);
972         if (r) {
973                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
974                 goto error;
975         }
976
977         dc_hardware_init(adev->dm.dc);
978
979         adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
980         if (!adev->dm.freesync_module) {
981                 DRM_ERROR(
982                 "amdgpu: failed to initialize freesync_module.\n");
983         } else
984                 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
985                                 adev->dm.freesync_module);
986
987         amdgpu_dm_init_color_mod();
988
989 #ifdef CONFIG_DRM_AMD_DC_HDCP
990         if (adev->asic_type >= CHIP_RAVEN) {
991                 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
992
993                 if (!adev->dm.hdcp_workqueue)
994                         DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
995                 else
996                         DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
997
998                 dc_init_callbacks(adev->dm.dc, &init_params);
999         }
1000 #endif
1001         if (amdgpu_dm_initialize_drm_device(adev)) {
1002                 DRM_ERROR(
1003                 "amdgpu: failed to initialize sw for display support.\n");
1004                 goto error;
1005         }
1006
1007         /* Update the actual used number of crtc */
1008         adev->mode_info.num_crtc = adev->dm.display_indexes_num;
1009
1010         /* TODO: Add_display_info? */
1011
1012         /* TODO use dynamic cursor width */
1013         adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1014         adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1015
1016         if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
1017                 DRM_ERROR(
1018                 "amdgpu: failed to initialize sw for display support.\n");
1019                 goto error;
1020         }
1021
1022         DRM_DEBUG_DRIVER("KMS initialized.\n");
1023
1024         return 0;
1025 error:
1026         amdgpu_dm_fini(adev);
1027
1028         return -EINVAL;
1029 }
1030
1031 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1032 {
1033         amdgpu_dm_audio_fini(adev);
1034
1035         amdgpu_dm_destroy_drm_device(&adev->dm);
1036
1037 #ifdef CONFIG_DRM_AMD_DC_HDCP
1038         if (adev->dm.hdcp_workqueue) {
1039                 hdcp_destroy(adev->dm.hdcp_workqueue);
1040                 adev->dm.hdcp_workqueue = NULL;
1041         }
1042
1043         if (adev->dm.dc)
1044                 dc_deinit_callbacks(adev->dm.dc);
1045 #endif
1046         if (adev->dm.dc->ctx->dmub_srv) {
1047                 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1048                 adev->dm.dc->ctx->dmub_srv = NULL;
1049         }
1050
1051         if (adev->dm.dmub_bo)
1052                 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1053                                       &adev->dm.dmub_bo_gpu_addr,
1054                                       &adev->dm.dmub_bo_cpu_addr);
1055
1056         /* DC Destroy TODO: Replace destroy DAL */
1057         if (adev->dm.dc)
1058                 dc_destroy(&adev->dm.dc);
1059         /*
1060          * TODO: pageflip, vlank interrupt
1061          *
1062          * amdgpu_dm_irq_fini(adev);
1063          */
1064
1065         if (adev->dm.cgs_device) {
1066                 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1067                 adev->dm.cgs_device = NULL;
1068         }
1069         if (adev->dm.freesync_module) {
1070                 mod_freesync_destroy(adev->dm.freesync_module);
1071                 adev->dm.freesync_module = NULL;
1072         }
1073
1074         mutex_destroy(&adev->dm.audio_lock);
1075         mutex_destroy(&adev->dm.dc_lock);
1076
1077         return;
1078 }
1079
1080 static int load_dmcu_fw(struct amdgpu_device *adev)
1081 {
1082         const char *fw_name_dmcu = NULL;
1083         int r;
1084         const struct dmcu_firmware_header_v1_0 *hdr;
1085
1086         switch(adev->asic_type) {
1087         case CHIP_BONAIRE:
1088         case CHIP_HAWAII:
1089         case CHIP_KAVERI:
1090         case CHIP_KABINI:
1091         case CHIP_MULLINS:
1092         case CHIP_TONGA:
1093         case CHIP_FIJI:
1094         case CHIP_CARRIZO:
1095         case CHIP_STONEY:
1096         case CHIP_POLARIS11:
1097         case CHIP_POLARIS10:
1098         case CHIP_POLARIS12:
1099         case CHIP_VEGAM:
1100         case CHIP_VEGA10:
1101         case CHIP_VEGA12:
1102         case CHIP_VEGA20:
1103         case CHIP_NAVI10:
1104         case CHIP_NAVI14:
1105         case CHIP_RENOIR:
1106                 return 0;
1107         case CHIP_NAVI12:
1108                 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1109                 break;
1110         case CHIP_RAVEN:
1111                 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1112                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1113                 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1114                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1115                 else
1116                         return 0;
1117                 break;
1118         default:
1119                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1120                 return -EINVAL;
1121         }
1122
1123         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1124                 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1125                 return 0;
1126         }
1127
1128         r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1129         if (r == -ENOENT) {
1130                 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1131                 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1132                 adev->dm.fw_dmcu = NULL;
1133                 return 0;
1134         }
1135         if (r) {
1136                 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1137                         fw_name_dmcu);
1138                 return r;
1139         }
1140
1141         r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1142         if (r) {
1143                 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1144                         fw_name_dmcu);
1145                 release_firmware(adev->dm.fw_dmcu);
1146                 adev->dm.fw_dmcu = NULL;
1147                 return r;
1148         }
1149
1150         hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1151         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1152         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1153         adev->firmware.fw_size +=
1154                 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1155
1156         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1157         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1158         adev->firmware.fw_size +=
1159                 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1160
1161         adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1162
1163         DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1164
1165         return 0;
1166 }
1167
1168 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1169 {
1170         struct amdgpu_device *adev = ctx;
1171
1172         return dm_read_reg(adev->dm.dc->ctx, address);
1173 }
1174
1175 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1176                                      uint32_t value)
1177 {
1178         struct amdgpu_device *adev = ctx;
1179
1180         return dm_write_reg(adev->dm.dc->ctx, address, value);
1181 }
1182
1183 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1184 {
1185         struct dmub_srv_create_params create_params;
1186         struct dmub_srv_region_params region_params;
1187         struct dmub_srv_region_info region_info;
1188         struct dmub_srv_fb_params fb_params;
1189         struct dmub_srv_fb_info *fb_info;
1190         struct dmub_srv *dmub_srv;
1191         const struct dmcub_firmware_header_v1_0 *hdr;
1192         const char *fw_name_dmub;
1193         enum dmub_asic dmub_asic;
1194         enum dmub_status status;
1195         int r;
1196
1197         switch (adev->asic_type) {
1198         case CHIP_RENOIR:
1199                 dmub_asic = DMUB_ASIC_DCN21;
1200                 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1201                 break;
1202
1203         default:
1204                 /* ASIC doesn't support DMUB. */
1205                 return 0;
1206         }
1207
1208         r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1209         if (r) {
1210                 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1211                 return 0;
1212         }
1213
1214         r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1215         if (r) {
1216                 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1217                 return 0;
1218         }
1219
1220         hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1221
1222         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1223                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1224                         AMDGPU_UCODE_ID_DMCUB;
1225                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1226                         adev->dm.dmub_fw;
1227                 adev->firmware.fw_size +=
1228                         ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1229
1230                 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1231                          adev->dm.dmcub_fw_version);
1232         }
1233
1234         adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1235
1236         adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1237         dmub_srv = adev->dm.dmub_srv;
1238
1239         if (!dmub_srv) {
1240                 DRM_ERROR("Failed to allocate DMUB service!\n");
1241                 return -ENOMEM;
1242         }
1243
1244         memset(&create_params, 0, sizeof(create_params));
1245         create_params.user_ctx = adev;
1246         create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1247         create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1248         create_params.asic = dmub_asic;
1249
1250         /* Create the DMUB service. */
1251         status = dmub_srv_create(dmub_srv, &create_params);
1252         if (status != DMUB_STATUS_OK) {
1253                 DRM_ERROR("Error creating DMUB service: %d\n", status);
1254                 return -EINVAL;
1255         }
1256
1257         /* Calculate the size of all the regions for the DMUB service. */
1258         memset(&region_params, 0, sizeof(region_params));
1259
1260         region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1261                                         PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1262         region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1263         region_params.vbios_size = adev->bios_size;
1264         region_params.fw_bss_data =
1265                 adev->dm.dmub_fw->data +
1266                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1267                 le32_to_cpu(hdr->inst_const_bytes);
1268
1269         status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1270                                            &region_info);
1271
1272         if (status != DMUB_STATUS_OK) {
1273                 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1274                 return -EINVAL;
1275         }
1276
1277         /*
1278          * Allocate a framebuffer based on the total size of all the regions.
1279          * TODO: Move this into GART.
1280          */
1281         r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1282                                     AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1283                                     &adev->dm.dmub_bo_gpu_addr,
1284                                     &adev->dm.dmub_bo_cpu_addr);
1285         if (r)
1286                 return r;
1287
1288         /* Rebase the regions on the framebuffer address. */
1289         memset(&fb_params, 0, sizeof(fb_params));
1290         fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1291         fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1292         fb_params.region_info = &region_info;
1293
1294         adev->dm.dmub_fb_info =
1295                 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1296         fb_info = adev->dm.dmub_fb_info;
1297
1298         if (!fb_info) {
1299                 DRM_ERROR(
1300                         "Failed to allocate framebuffer info for DMUB service!\n");
1301                 return -ENOMEM;
1302         }
1303
1304         status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1305         if (status != DMUB_STATUS_OK) {
1306                 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1307                 return -EINVAL;
1308         }
1309
1310         return 0;
1311 }
1312
1313 static int dm_sw_init(void *handle)
1314 {
1315         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1316         int r;
1317
1318         r = dm_dmub_sw_init(adev);
1319         if (r)
1320                 return r;
1321
1322         return load_dmcu_fw(adev);
1323 }
1324
1325 static int dm_sw_fini(void *handle)
1326 {
1327         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1328
1329         kfree(adev->dm.dmub_fb_info);
1330         adev->dm.dmub_fb_info = NULL;
1331
1332         if (adev->dm.dmub_srv) {
1333                 dmub_srv_destroy(adev->dm.dmub_srv);
1334                 adev->dm.dmub_srv = NULL;
1335         }
1336
1337         if (adev->dm.dmub_fw) {
1338                 release_firmware(adev->dm.dmub_fw);
1339                 adev->dm.dmub_fw = NULL;
1340         }
1341
1342         if(adev->dm.fw_dmcu) {
1343                 release_firmware(adev->dm.fw_dmcu);
1344                 adev->dm.fw_dmcu = NULL;
1345         }
1346
1347         return 0;
1348 }
1349
1350 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1351 {
1352         struct amdgpu_dm_connector *aconnector;
1353         struct drm_connector *connector;
1354         struct drm_connector_list_iter iter;
1355         int ret = 0;
1356
1357         drm_connector_list_iter_begin(dev, &iter);
1358         drm_for_each_connector_iter(connector, &iter) {
1359                 aconnector = to_amdgpu_dm_connector(connector);
1360                 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1361                     aconnector->mst_mgr.aux) {
1362                         DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1363                                          aconnector,
1364                                          aconnector->base.base.id);
1365
1366                         ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1367                         if (ret < 0) {
1368                                 DRM_ERROR("DM_MST: Failed to start MST\n");
1369                                 aconnector->dc_link->type =
1370                                         dc_connection_single;
1371                                 break;
1372                         }
1373                 }
1374         }
1375         drm_connector_list_iter_end(&iter);
1376
1377         return ret;
1378 }
1379
1380 static int dm_late_init(void *handle)
1381 {
1382         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1383
1384         struct dmcu_iram_parameters params;
1385         unsigned int linear_lut[16];
1386         int i;
1387         struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1388         bool ret = false;
1389
1390         for (i = 0; i < 16; i++)
1391                 linear_lut[i] = 0xFFFF * i / 15;
1392
1393         params.set = 0;
1394         params.backlight_ramping_start = 0xCCCC;
1395         params.backlight_ramping_reduction = 0xCCCCCCCC;
1396         params.backlight_lut_array_size = 16;
1397         params.backlight_lut_array = linear_lut;
1398
1399         /* Min backlight level after ABM reduction,  Don't allow below 1%
1400          * 0xFFFF x 0.01 = 0x28F
1401          */
1402         params.min_abm_backlight = 0x28F;
1403
1404         /* todo will enable for navi10 */
1405         if (adev->asic_type <= CHIP_RAVEN) {
1406                 ret = dmcu_load_iram(dmcu, params);
1407
1408                 if (!ret)
1409                         return -EINVAL;
1410         }
1411
1412         return detect_mst_link_for_all_connectors(adev->ddev);
1413 }
1414
1415 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1416 {
1417         struct amdgpu_dm_connector *aconnector;
1418         struct drm_connector *connector;
1419         struct drm_connector_list_iter iter;
1420         struct drm_dp_mst_topology_mgr *mgr;
1421         int ret;
1422         bool need_hotplug = false;
1423
1424         drm_connector_list_iter_begin(dev, &iter);
1425         drm_for_each_connector_iter(connector, &iter) {
1426                 aconnector = to_amdgpu_dm_connector(connector);
1427                 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1428                     aconnector->mst_port)
1429                         continue;
1430
1431                 mgr = &aconnector->mst_mgr;
1432
1433                 if (suspend) {
1434                         drm_dp_mst_topology_mgr_suspend(mgr);
1435                 } else {
1436                         ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1437                         if (ret < 0) {
1438                                 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1439                                 need_hotplug = true;
1440                         }
1441                 }
1442         }
1443         drm_connector_list_iter_end(&iter);
1444
1445         if (need_hotplug)
1446                 drm_kms_helper_hotplug_event(dev);
1447 }
1448
1449 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1450 {
1451         struct smu_context *smu = &adev->smu;
1452         int ret = 0;
1453
1454         if (!is_support_sw_smu(adev))
1455                 return 0;
1456
1457         /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1458          * on window driver dc implementation.
1459          * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1460          * should be passed to smu during boot up and resume from s3.
1461          * boot up: dc calculate dcn watermark clock settings within dc_create,
1462          * dcn20_resource_construct
1463          * then call pplib functions below to pass the settings to smu:
1464          * smu_set_watermarks_for_clock_ranges
1465          * smu_set_watermarks_table
1466          * navi10_set_watermarks_table
1467          * smu_write_watermarks_table
1468          *
1469          * For Renoir, clock settings of dcn watermark are also fixed values.
1470          * dc has implemented different flow for window driver:
1471          * dc_hardware_init / dc_set_power_state
1472          * dcn10_init_hw
1473          * notify_wm_ranges
1474          * set_wm_ranges
1475          * -- Linux
1476          * smu_set_watermarks_for_clock_ranges
1477          * renoir_set_watermarks_table
1478          * smu_write_watermarks_table
1479          *
1480          * For Linux,
1481          * dc_hardware_init -> amdgpu_dm_init
1482          * dc_set_power_state --> dm_resume
1483          *
1484          * therefore, this function apply to navi10/12/14 but not Renoir
1485          * *
1486          */
1487         switch(adev->asic_type) {
1488         case CHIP_NAVI10:
1489         case CHIP_NAVI14:
1490         case CHIP_NAVI12:
1491                 break;
1492         default:
1493                 return 0;
1494         }
1495
1496         mutex_lock(&smu->mutex);
1497
1498         /* pass data to smu controller */
1499         if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
1500                         !(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
1501                 ret = smu_write_watermarks_table(smu);
1502
1503                 if (ret) {
1504                         mutex_unlock(&smu->mutex);
1505                         DRM_ERROR("Failed to update WMTABLE!\n");
1506                         return ret;
1507                 }
1508                 smu->watermarks_bitmap |= WATERMARKS_LOADED;
1509         }
1510
1511         mutex_unlock(&smu->mutex);
1512
1513         return 0;
1514 }
1515
1516 /**
1517  * dm_hw_init() - Initialize DC device
1518  * @handle: The base driver device containing the amdgpu_dm device.
1519  *
1520  * Initialize the &struct amdgpu_display_manager device. This involves calling
1521  * the initializers of each DM component, then populating the struct with them.
1522  *
1523  * Although the function implies hardware initialization, both hardware and
1524  * software are initialized here. Splitting them out to their relevant init
1525  * hooks is a future TODO item.
1526  *
1527  * Some notable things that are initialized here:
1528  *
1529  * - Display Core, both software and hardware
1530  * - DC modules that we need (freesync and color management)
1531  * - DRM software states
1532  * - Interrupt sources and handlers
1533  * - Vblank support
1534  * - Debug FS entries, if enabled
1535  */
1536 static int dm_hw_init(void *handle)
1537 {
1538         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1539         /* Create DAL display manager */
1540         amdgpu_dm_init(adev);
1541         amdgpu_dm_hpd_init(adev);
1542
1543         return 0;
1544 }
1545
1546 /**
1547  * dm_hw_fini() - Teardown DC device
1548  * @handle: The base driver device containing the amdgpu_dm device.
1549  *
1550  * Teardown components within &struct amdgpu_display_manager that require
1551  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1552  * were loaded. Also flush IRQ workqueues and disable them.
1553  */
1554 static int dm_hw_fini(void *handle)
1555 {
1556         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1557
1558         amdgpu_dm_hpd_fini(adev);
1559
1560         amdgpu_dm_irq_fini(adev);
1561         amdgpu_dm_fini(adev);
1562         return 0;
1563 }
1564
1565 static int dm_suspend(void *handle)
1566 {
1567         struct amdgpu_device *adev = handle;
1568         struct amdgpu_display_manager *dm = &adev->dm;
1569         int ret = 0;
1570
1571         WARN_ON(adev->dm.cached_state);
1572         adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
1573
1574         s3_handle_mst(adev->ddev, true);
1575
1576         amdgpu_dm_irq_suspend(adev);
1577
1578
1579         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1580
1581         return ret;
1582 }
1583
1584 static struct amdgpu_dm_connector *
1585 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1586                                              struct drm_crtc *crtc)
1587 {
1588         uint32_t i;
1589         struct drm_connector_state *new_con_state;
1590         struct drm_connector *connector;
1591         struct drm_crtc *crtc_from_state;
1592
1593         for_each_new_connector_in_state(state, connector, new_con_state, i) {
1594                 crtc_from_state = new_con_state->crtc;
1595
1596                 if (crtc_from_state == crtc)
1597                         return to_amdgpu_dm_connector(connector);
1598         }
1599
1600         return NULL;
1601 }
1602
1603 static void emulated_link_detect(struct dc_link *link)
1604 {
1605         struct dc_sink_init_data sink_init_data = { 0 };
1606         struct display_sink_capability sink_caps = { 0 };
1607         enum dc_edid_status edid_status;
1608         struct dc_context *dc_ctx = link->ctx;
1609         struct dc_sink *sink = NULL;
1610         struct dc_sink *prev_sink = NULL;
1611
1612         link->type = dc_connection_none;
1613         prev_sink = link->local_sink;
1614
1615         if (prev_sink != NULL)
1616                 dc_sink_retain(prev_sink);
1617
1618         switch (link->connector_signal) {
1619         case SIGNAL_TYPE_HDMI_TYPE_A: {
1620                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1621                 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1622                 break;
1623         }
1624
1625         case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1626                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1627                 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1628                 break;
1629         }
1630
1631         case SIGNAL_TYPE_DVI_DUAL_LINK: {
1632                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1633                 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1634                 break;
1635         }
1636
1637         case SIGNAL_TYPE_LVDS: {
1638                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1639                 sink_caps.signal = SIGNAL_TYPE_LVDS;
1640                 break;
1641         }
1642
1643         case SIGNAL_TYPE_EDP: {
1644                 sink_caps.transaction_type =
1645                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1646                 sink_caps.signal = SIGNAL_TYPE_EDP;
1647                 break;
1648         }
1649
1650         case SIGNAL_TYPE_DISPLAY_PORT: {
1651                 sink_caps.transaction_type =
1652                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1653                 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1654                 break;
1655         }
1656
1657         default:
1658                 DC_ERROR("Invalid connector type! signal:%d\n",
1659                         link->connector_signal);
1660                 return;
1661         }
1662
1663         sink_init_data.link = link;
1664         sink_init_data.sink_signal = sink_caps.signal;
1665
1666         sink = dc_sink_create(&sink_init_data);
1667         if (!sink) {
1668                 DC_ERROR("Failed to create sink!\n");
1669                 return;
1670         }
1671
1672         /* dc_sink_create returns a new reference */
1673         link->local_sink = sink;
1674
1675         edid_status = dm_helpers_read_local_edid(
1676                         link->ctx,
1677                         link,
1678                         sink);
1679
1680         if (edid_status != EDID_OK)
1681                 DC_ERROR("Failed to read EDID");
1682
1683 }
1684
1685 static int dm_resume(void *handle)
1686 {
1687         struct amdgpu_device *adev = handle;
1688         struct drm_device *ddev = adev->ddev;
1689         struct amdgpu_display_manager *dm = &adev->dm;
1690         struct amdgpu_dm_connector *aconnector;
1691         struct drm_connector *connector;
1692         struct drm_connector_list_iter iter;
1693         struct drm_crtc *crtc;
1694         struct drm_crtc_state *new_crtc_state;
1695         struct dm_crtc_state *dm_new_crtc_state;
1696         struct drm_plane *plane;
1697         struct drm_plane_state *new_plane_state;
1698         struct dm_plane_state *dm_new_plane_state;
1699         struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
1700         enum dc_connection_type new_connection_type = dc_connection_none;
1701         int i, r;
1702
1703         /* Recreate dc_state - DC invalidates it when setting power state to S3. */
1704         dc_release_state(dm_state->context);
1705         dm_state->context = dc_create_state(dm->dc);
1706         /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
1707         dc_resource_state_construct(dm->dc, dm_state->context);
1708
1709         /* Before powering on DC we need to re-initialize DMUB. */
1710         r = dm_dmub_hw_init(adev);
1711         if (r)
1712                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1713
1714         /* power on hardware */
1715         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1716
1717         /* program HPD filter */
1718         dc_resume(dm->dc);
1719
1720         /*
1721          * early enable HPD Rx IRQ, should be done before set mode as short
1722          * pulse interrupts are used for MST
1723          */
1724         amdgpu_dm_irq_resume_early(adev);
1725
1726         /* On resume we need to rewrite the MSTM control bits to enable MST*/
1727         s3_handle_mst(ddev, false);
1728
1729         /* Do detection*/
1730         drm_connector_list_iter_begin(ddev, &iter);
1731         drm_for_each_connector_iter(connector, &iter) {
1732                 aconnector = to_amdgpu_dm_connector(connector);
1733
1734                 /*
1735                  * this is the case when traversing through already created
1736                  * MST connectors, should be skipped
1737                  */
1738                 if (aconnector->mst_port)
1739                         continue;
1740
1741                 mutex_lock(&aconnector->hpd_lock);
1742                 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
1743                         DRM_ERROR("KMS: Failed to detect connector\n");
1744
1745                 if (aconnector->base.force && new_connection_type == dc_connection_none)
1746                         emulated_link_detect(aconnector->dc_link);
1747                 else
1748                         dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
1749
1750                 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
1751                         aconnector->fake_enable = false;
1752
1753                 if (aconnector->dc_sink)
1754                         dc_sink_release(aconnector->dc_sink);
1755                 aconnector->dc_sink = NULL;
1756                 amdgpu_dm_update_connector_after_detect(aconnector);
1757                 mutex_unlock(&aconnector->hpd_lock);
1758         }
1759         drm_connector_list_iter_end(&iter);
1760
1761         /* Force mode set in atomic commit */
1762         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
1763                 new_crtc_state->active_changed = true;
1764
1765         /*
1766          * atomic_check is expected to create the dc states. We need to release
1767          * them here, since they were duplicated as part of the suspend
1768          * procedure.
1769          */
1770         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
1771                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1772                 if (dm_new_crtc_state->stream) {
1773                         WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
1774                         dc_stream_release(dm_new_crtc_state->stream);
1775                         dm_new_crtc_state->stream = NULL;
1776                 }
1777         }
1778
1779         for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
1780                 dm_new_plane_state = to_dm_plane_state(new_plane_state);
1781                 if (dm_new_plane_state->dc_state) {
1782                         WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
1783                         dc_plane_state_release(dm_new_plane_state->dc_state);
1784                         dm_new_plane_state->dc_state = NULL;
1785                 }
1786         }
1787
1788         drm_atomic_helper_resume(ddev, dm->cached_state);
1789
1790         dm->cached_state = NULL;
1791
1792         amdgpu_dm_irq_resume_late(adev);
1793
1794         amdgpu_dm_smu_write_watermarks_table(adev);
1795
1796         return 0;
1797 }
1798
1799 /**
1800  * DOC: DM Lifecycle
1801  *
1802  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
1803  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
1804  * the base driver's device list to be initialized and torn down accordingly.
1805  *
1806  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
1807  */
1808
1809 static const struct amd_ip_funcs amdgpu_dm_funcs = {
1810         .name = "dm",
1811         .early_init = dm_early_init,
1812         .late_init = dm_late_init,
1813         .sw_init = dm_sw_init,
1814         .sw_fini = dm_sw_fini,
1815         .hw_init = dm_hw_init,
1816         .hw_fini = dm_hw_fini,
1817         .suspend = dm_suspend,
1818         .resume = dm_resume,
1819         .is_idle = dm_is_idle,
1820         .wait_for_idle = dm_wait_for_idle,
1821         .check_soft_reset = dm_check_soft_reset,
1822         .soft_reset = dm_soft_reset,
1823         .set_clockgating_state = dm_set_clockgating_state,
1824         .set_powergating_state = dm_set_powergating_state,
1825 };
1826
1827 const struct amdgpu_ip_block_version dm_ip_block =
1828 {
1829         .type = AMD_IP_BLOCK_TYPE_DCE,
1830         .major = 1,
1831         .minor = 0,
1832         .rev = 0,
1833         .funcs = &amdgpu_dm_funcs,
1834 };
1835
1836
1837 /**
1838  * DOC: atomic
1839  *
1840  * *WIP*
1841  */
1842
1843 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
1844         .fb_create = amdgpu_display_user_framebuffer_create,
1845         .output_poll_changed = drm_fb_helper_output_poll_changed,
1846         .atomic_check = amdgpu_dm_atomic_check,
1847         .atomic_commit = amdgpu_dm_atomic_commit,
1848 };
1849
1850 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
1851         .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
1852 };
1853
1854 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
1855 {
1856         u32 max_cll, min_cll, max, min, q, r;
1857         struct amdgpu_dm_backlight_caps *caps;
1858         struct amdgpu_display_manager *dm;
1859         struct drm_connector *conn_base;
1860         struct amdgpu_device *adev;
1861         static const u8 pre_computed_values[] = {
1862                 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
1863                 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
1864
1865         if (!aconnector || !aconnector->dc_link)
1866                 return;
1867
1868         conn_base = &aconnector->base;
1869         adev = conn_base->dev->dev_private;
1870         dm = &adev->dm;
1871         caps = &dm->backlight_caps;
1872         caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
1873         caps->aux_support = false;
1874         max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
1875         min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
1876
1877         if (caps->ext_caps->bits.oled == 1 ||
1878             caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
1879             caps->ext_caps->bits.hdr_aux_backlight_control == 1)
1880                 caps->aux_support = true;
1881
1882         /* From the specification (CTA-861-G), for calculating the maximum
1883          * luminance we need to use:
1884          *      Luminance = 50*2**(CV/32)
1885          * Where CV is a one-byte value.
1886          * For calculating this expression we may need float point precision;
1887          * to avoid this complexity level, we take advantage that CV is divided
1888          * by a constant. From the Euclids division algorithm, we know that CV
1889          * can be written as: CV = 32*q + r. Next, we replace CV in the
1890          * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
1891          * need to pre-compute the value of r/32. For pre-computing the values
1892          * We just used the following Ruby line:
1893          *      (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
1894          * The results of the above expressions can be verified at
1895          * pre_computed_values.
1896          */
1897         q = max_cll >> 5;
1898         r = max_cll % 32;
1899         max = (1 << q) * pre_computed_values[r];
1900
1901         // min luminance: maxLum * (CV/255)^2 / 100
1902         q = DIV_ROUND_CLOSEST(min_cll, 255);
1903         min = max * DIV_ROUND_CLOSEST((q * q), 100);
1904
1905         caps->aux_max_input_signal = max;
1906         caps->aux_min_input_signal = min;
1907 }
1908
1909 void amdgpu_dm_update_connector_after_detect(
1910                 struct amdgpu_dm_connector *aconnector)
1911 {
1912         struct drm_connector *connector = &aconnector->base;
1913         struct drm_device *dev = connector->dev;
1914         struct dc_sink *sink;
1915
1916         /* MST handled by drm_mst framework */
1917         if (aconnector->mst_mgr.mst_state == true)
1918                 return;
1919
1920
1921         sink = aconnector->dc_link->local_sink;
1922         if (sink)
1923                 dc_sink_retain(sink);
1924
1925         /*
1926          * Edid mgmt connector gets first update only in mode_valid hook and then
1927          * the connector sink is set to either fake or physical sink depends on link status.
1928          * Skip if already done during boot.
1929          */
1930         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
1931                         && aconnector->dc_em_sink) {
1932
1933                 /*
1934                  * For S3 resume with headless use eml_sink to fake stream
1935                  * because on resume connector->sink is set to NULL
1936                  */
1937                 mutex_lock(&dev->mode_config.mutex);
1938
1939                 if (sink) {
1940                         if (aconnector->dc_sink) {
1941                                 amdgpu_dm_update_freesync_caps(connector, NULL);
1942                                 /*
1943                                  * retain and release below are used to
1944                                  * bump up refcount for sink because the link doesn't point
1945                                  * to it anymore after disconnect, so on next crtc to connector
1946                                  * reshuffle by UMD we will get into unwanted dc_sink release
1947                                  */
1948                                 dc_sink_release(aconnector->dc_sink);
1949                         }
1950                         aconnector->dc_sink = sink;
1951                         dc_sink_retain(aconnector->dc_sink);
1952                         amdgpu_dm_update_freesync_caps(connector,
1953                                         aconnector->edid);
1954                 } else {
1955                         amdgpu_dm_update_freesync_caps(connector, NULL);
1956                         if (!aconnector->dc_sink) {
1957                                 aconnector->dc_sink = aconnector->dc_em_sink;
1958                                 dc_sink_retain(aconnector->dc_sink);
1959                         }
1960                 }
1961
1962                 mutex_unlock(&dev->mode_config.mutex);
1963
1964                 if (sink)
1965                         dc_sink_release(sink);
1966                 return;
1967         }
1968
1969         /*
1970          * TODO: temporary guard to look for proper fix
1971          * if this sink is MST sink, we should not do anything
1972          */
1973         if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
1974                 dc_sink_release(sink);
1975                 return;
1976         }
1977
1978         if (aconnector->dc_sink == sink) {
1979                 /*
1980                  * We got a DP short pulse (Link Loss, DP CTS, etc...).
1981                  * Do nothing!!
1982                  */
1983                 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
1984                                 aconnector->connector_id);
1985                 if (sink)
1986                         dc_sink_release(sink);
1987                 return;
1988         }
1989
1990         DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
1991                 aconnector->connector_id, aconnector->dc_sink, sink);
1992
1993         mutex_lock(&dev->mode_config.mutex);
1994
1995         /*
1996          * 1. Update status of the drm connector
1997          * 2. Send an event and let userspace tell us what to do
1998          */
1999         if (sink) {
2000                 /*
2001                  * TODO: check if we still need the S3 mode update workaround.
2002                  * If yes, put it here.
2003                  */
2004                 if (aconnector->dc_sink)
2005                         amdgpu_dm_update_freesync_caps(connector, NULL);
2006
2007                 aconnector->dc_sink = sink;
2008                 dc_sink_retain(aconnector->dc_sink);
2009                 if (sink->dc_edid.length == 0) {
2010                         aconnector->edid = NULL;
2011                         drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2012                 } else {
2013                         aconnector->edid =
2014                                 (struct edid *) sink->dc_edid.raw_edid;
2015
2016
2017                         drm_connector_update_edid_property(connector,
2018                                         aconnector->edid);
2019                         drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2020                                             aconnector->edid);
2021                 }
2022                 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2023                 update_connector_ext_caps(aconnector);
2024         } else {
2025                 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2026                 amdgpu_dm_update_freesync_caps(connector, NULL);
2027                 drm_connector_update_edid_property(connector, NULL);
2028                 aconnector->num_modes = 0;
2029                 dc_sink_release(aconnector->dc_sink);
2030                 aconnector->dc_sink = NULL;
2031                 aconnector->edid = NULL;
2032 #ifdef CONFIG_DRM_AMD_DC_HDCP
2033                 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2034                 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2035                         connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2036 #endif
2037         }
2038
2039         mutex_unlock(&dev->mode_config.mutex);
2040
2041         if (sink)
2042                 dc_sink_release(sink);
2043 }
2044
2045 static void handle_hpd_irq(void *param)
2046 {
2047         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2048         struct drm_connector *connector = &aconnector->base;
2049         struct drm_device *dev = connector->dev;
2050         enum dc_connection_type new_connection_type = dc_connection_none;
2051 #ifdef CONFIG_DRM_AMD_DC_HDCP
2052         struct amdgpu_device *adev = dev->dev_private;
2053 #endif
2054
2055         /*
2056          * In case of failure or MST no need to update connector status or notify the OS
2057          * since (for MST case) MST does this in its own context.
2058          */
2059         mutex_lock(&aconnector->hpd_lock);
2060
2061 #ifdef CONFIG_DRM_AMD_DC_HDCP
2062         if (adev->dm.hdcp_workqueue)
2063                 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2064 #endif
2065         if (aconnector->fake_enable)
2066                 aconnector->fake_enable = false;
2067
2068         if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2069                 DRM_ERROR("KMS: Failed to detect connector\n");
2070
2071         if (aconnector->base.force && new_connection_type == dc_connection_none) {
2072                 emulated_link_detect(aconnector->dc_link);
2073
2074
2075                 drm_modeset_lock_all(dev);
2076                 dm_restore_drm_connector_state(dev, connector);
2077                 drm_modeset_unlock_all(dev);
2078
2079                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2080                         drm_kms_helper_hotplug_event(dev);
2081
2082         } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2083                 amdgpu_dm_update_connector_after_detect(aconnector);
2084
2085
2086                 drm_modeset_lock_all(dev);
2087                 dm_restore_drm_connector_state(dev, connector);
2088                 drm_modeset_unlock_all(dev);
2089
2090                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2091                         drm_kms_helper_hotplug_event(dev);
2092         }
2093         mutex_unlock(&aconnector->hpd_lock);
2094
2095 }
2096
2097 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2098 {
2099         uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2100         uint8_t dret;
2101         bool new_irq_handled = false;
2102         int dpcd_addr;
2103         int dpcd_bytes_to_read;
2104
2105         const int max_process_count = 30;
2106         int process_count = 0;
2107
2108         const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2109
2110         if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2111                 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2112                 /* DPCD 0x200 - 0x201 for downstream IRQ */
2113                 dpcd_addr = DP_SINK_COUNT;
2114         } else {
2115                 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2116                 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2117                 dpcd_addr = DP_SINK_COUNT_ESI;
2118         }
2119
2120         dret = drm_dp_dpcd_read(
2121                 &aconnector->dm_dp_aux.aux,
2122                 dpcd_addr,
2123                 esi,
2124                 dpcd_bytes_to_read);
2125
2126         while (dret == dpcd_bytes_to_read &&
2127                 process_count < max_process_count) {
2128                 uint8_t retry;
2129                 dret = 0;
2130
2131                 process_count++;
2132
2133                 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2134                 /* handle HPD short pulse irq */
2135                 if (aconnector->mst_mgr.mst_state)
2136                         drm_dp_mst_hpd_irq(
2137                                 &aconnector->mst_mgr,
2138                                 esi,
2139                                 &new_irq_handled);
2140
2141                 if (new_irq_handled) {
2142                         /* ACK at DPCD to notify down stream */
2143                         const int ack_dpcd_bytes_to_write =
2144                                 dpcd_bytes_to_read - 1;
2145
2146                         for (retry = 0; retry < 3; retry++) {
2147                                 uint8_t wret;
2148
2149                                 wret = drm_dp_dpcd_write(
2150                                         &aconnector->dm_dp_aux.aux,
2151                                         dpcd_addr + 1,
2152                                         &esi[1],
2153                                         ack_dpcd_bytes_to_write);
2154                                 if (wret == ack_dpcd_bytes_to_write)
2155                                         break;
2156                         }
2157
2158                         /* check if there is new irq to be handled */
2159                         dret = drm_dp_dpcd_read(
2160                                 &aconnector->dm_dp_aux.aux,
2161                                 dpcd_addr,
2162                                 esi,
2163                                 dpcd_bytes_to_read);
2164
2165                         new_irq_handled = false;
2166                 } else {
2167                         break;
2168                 }
2169         }
2170
2171         if (process_count == max_process_count)
2172                 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2173 }
2174
2175 static void handle_hpd_rx_irq(void *param)
2176 {
2177         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2178         struct drm_connector *connector = &aconnector->base;
2179         struct drm_device *dev = connector->dev;
2180         struct dc_link *dc_link = aconnector->dc_link;
2181         bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2182         enum dc_connection_type new_connection_type = dc_connection_none;
2183 #ifdef CONFIG_DRM_AMD_DC_HDCP
2184         union hpd_irq_data hpd_irq_data;
2185         struct amdgpu_device *adev = dev->dev_private;
2186
2187         memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2188 #endif
2189
2190         /*
2191          * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2192          * conflict, after implement i2c helper, this mutex should be
2193          * retired.
2194          */
2195         if (dc_link->type != dc_connection_mst_branch)
2196                 mutex_lock(&aconnector->hpd_lock);
2197
2198
2199 #ifdef CONFIG_DRM_AMD_DC_HDCP
2200         if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) &&
2201 #else
2202         if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
2203 #endif
2204                         !is_mst_root_connector) {
2205                 /* Downstream Port status changed. */
2206                 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2207                         DRM_ERROR("KMS: Failed to detect connector\n");
2208
2209                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2210                         emulated_link_detect(dc_link);
2211
2212                         if (aconnector->fake_enable)
2213                                 aconnector->fake_enable = false;
2214
2215                         amdgpu_dm_update_connector_after_detect(aconnector);
2216
2217
2218                         drm_modeset_lock_all(dev);
2219                         dm_restore_drm_connector_state(dev, connector);
2220                         drm_modeset_unlock_all(dev);
2221
2222                         drm_kms_helper_hotplug_event(dev);
2223                 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2224
2225                         if (aconnector->fake_enable)
2226                                 aconnector->fake_enable = false;
2227
2228                         amdgpu_dm_update_connector_after_detect(aconnector);
2229
2230
2231                         drm_modeset_lock_all(dev);
2232                         dm_restore_drm_connector_state(dev, connector);
2233                         drm_modeset_unlock_all(dev);
2234
2235                         drm_kms_helper_hotplug_event(dev);
2236                 }
2237         }
2238 #ifdef CONFIG_DRM_AMD_DC_HDCP
2239         if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2240                 if (adev->dm.hdcp_workqueue)
2241                         hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
2242         }
2243 #endif
2244         if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2245             (dc_link->type == dc_connection_mst_branch))
2246                 dm_handle_hpd_rx_irq(aconnector);
2247
2248         if (dc_link->type != dc_connection_mst_branch) {
2249                 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2250                 mutex_unlock(&aconnector->hpd_lock);
2251         }
2252 }
2253
2254 static void register_hpd_handlers(struct amdgpu_device *adev)
2255 {
2256         struct drm_device *dev = adev->ddev;
2257         struct drm_connector *connector;
2258         struct amdgpu_dm_connector *aconnector;
2259         const struct dc_link *dc_link;
2260         struct dc_interrupt_params int_params = {0};
2261
2262         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2263         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2264
2265         list_for_each_entry(connector,
2266                         &dev->mode_config.connector_list, head) {
2267
2268                 aconnector = to_amdgpu_dm_connector(connector);
2269                 dc_link = aconnector->dc_link;
2270
2271                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2272                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2273                         int_params.irq_source = dc_link->irq_source_hpd;
2274
2275                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
2276                                         handle_hpd_irq,
2277                                         (void *) aconnector);
2278                 }
2279
2280                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2281
2282                         /* Also register for DP short pulse (hpd_rx). */
2283                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2284                         int_params.irq_source = dc_link->irq_source_hpd_rx;
2285
2286                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
2287                                         handle_hpd_rx_irq,
2288                                         (void *) aconnector);
2289                 }
2290         }
2291 }
2292
2293 /* Register IRQ sources and initialize IRQ callbacks */
2294 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2295 {
2296         struct dc *dc = adev->dm.dc;
2297         struct common_irq_params *c_irq_params;
2298         struct dc_interrupt_params int_params = {0};
2299         int r;
2300         int i;
2301         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2302
2303         if (adev->asic_type >= CHIP_VEGA10)
2304                 client_id = SOC15_IH_CLIENTID_DCE;
2305
2306         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2307         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2308
2309         /*
2310          * Actions of amdgpu_irq_add_id():
2311          * 1. Register a set() function with base driver.
2312          *    Base driver will call set() function to enable/disable an
2313          *    interrupt in DC hardware.
2314          * 2. Register amdgpu_dm_irq_handler().
2315          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2316          *    coming from DC hardware.
2317          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2318          *    for acknowledging and handling. */
2319
2320         /* Use VBLANK interrupt */
2321         for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2322                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2323                 if (r) {
2324                         DRM_ERROR("Failed to add crtc irq id!\n");
2325                         return r;
2326                 }
2327
2328                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2329                 int_params.irq_source =
2330                         dc_interrupt_to_irq_source(dc, i, 0);
2331
2332                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2333
2334                 c_irq_params->adev = adev;
2335                 c_irq_params->irq_src = int_params.irq_source;
2336
2337                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2338                                 dm_crtc_high_irq, c_irq_params);
2339         }
2340
2341         /* Use VUPDATE interrupt */
2342         for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2343                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2344                 if (r) {
2345                         DRM_ERROR("Failed to add vupdate irq id!\n");
2346                         return r;
2347                 }
2348
2349                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2350                 int_params.irq_source =
2351                         dc_interrupt_to_irq_source(dc, i, 0);
2352
2353                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2354
2355                 c_irq_params->adev = adev;
2356                 c_irq_params->irq_src = int_params.irq_source;
2357
2358                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2359                                 dm_vupdate_high_irq, c_irq_params);
2360         }
2361
2362         /* Use GRPH_PFLIP interrupt */
2363         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2364                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2365                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2366                 if (r) {
2367                         DRM_ERROR("Failed to add page flip irq id!\n");
2368                         return r;
2369                 }
2370
2371                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2372                 int_params.irq_source =
2373                         dc_interrupt_to_irq_source(dc, i, 0);
2374
2375                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2376
2377                 c_irq_params->adev = adev;
2378                 c_irq_params->irq_src = int_params.irq_source;
2379
2380                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2381                                 dm_pflip_high_irq, c_irq_params);
2382
2383         }
2384
2385         /* HPD */
2386         r = amdgpu_irq_add_id(adev, client_id,
2387                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2388         if (r) {
2389                 DRM_ERROR("Failed to add hpd irq id!\n");
2390                 return r;
2391         }
2392
2393         register_hpd_handlers(adev);
2394
2395         return 0;
2396 }
2397
2398 #if defined(CONFIG_DRM_AMD_DC_DCN)
2399 /* Register IRQ sources and initialize IRQ callbacks */
2400 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2401 {
2402         struct dc *dc = adev->dm.dc;
2403         struct common_irq_params *c_irq_params;
2404         struct dc_interrupt_params int_params = {0};
2405         int r;
2406         int i;
2407
2408         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2409         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2410
2411         /*
2412          * Actions of amdgpu_irq_add_id():
2413          * 1. Register a set() function with base driver.
2414          *    Base driver will call set() function to enable/disable an
2415          *    interrupt in DC hardware.
2416          * 2. Register amdgpu_dm_irq_handler().
2417          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2418          *    coming from DC hardware.
2419          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2420          *    for acknowledging and handling.
2421          */
2422
2423         /* Use VSTARTUP interrupt */
2424         for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2425                         i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2426                         i++) {
2427                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2428
2429                 if (r) {
2430                         DRM_ERROR("Failed to add crtc irq id!\n");
2431                         return r;
2432                 }
2433
2434                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2435                 int_params.irq_source =
2436                         dc_interrupt_to_irq_source(dc, i, 0);
2437
2438                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2439
2440                 c_irq_params->adev = adev;
2441                 c_irq_params->irq_src = int_params.irq_source;
2442
2443                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2444                                 dm_dcn_crtc_high_irq, c_irq_params);
2445         }
2446
2447         /* Use GRPH_PFLIP interrupt */
2448         for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2449                         i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2450                         i++) {
2451                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
2452                 if (r) {
2453                         DRM_ERROR("Failed to add page flip irq id!\n");
2454                         return r;
2455                 }
2456
2457                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2458                 int_params.irq_source =
2459                         dc_interrupt_to_irq_source(dc, i, 0);
2460
2461                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2462
2463                 c_irq_params->adev = adev;
2464                 c_irq_params->irq_src = int_params.irq_source;
2465
2466                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2467                                 dm_pflip_high_irq, c_irq_params);
2468
2469         }
2470
2471         /* HPD */
2472         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
2473                         &adev->hpd_irq);
2474         if (r) {
2475                 DRM_ERROR("Failed to add hpd irq id!\n");
2476                 return r;
2477         }
2478
2479         register_hpd_handlers(adev);
2480
2481         return 0;
2482 }
2483 #endif
2484
2485 /*
2486  * Acquires the lock for the atomic state object and returns
2487  * the new atomic state.
2488  *
2489  * This should only be called during atomic check.
2490  */
2491 static int dm_atomic_get_state(struct drm_atomic_state *state,
2492                                struct dm_atomic_state **dm_state)
2493 {
2494         struct drm_device *dev = state->dev;
2495         struct amdgpu_device *adev = dev->dev_private;
2496         struct amdgpu_display_manager *dm = &adev->dm;
2497         struct drm_private_state *priv_state;
2498
2499         if (*dm_state)
2500                 return 0;
2501
2502         priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2503         if (IS_ERR(priv_state))
2504                 return PTR_ERR(priv_state);
2505
2506         *dm_state = to_dm_atomic_state(priv_state);
2507
2508         return 0;
2509 }
2510
2511 struct dm_atomic_state *
2512 dm_atomic_get_new_state(struct drm_atomic_state *state)
2513 {
2514         struct drm_device *dev = state->dev;
2515         struct amdgpu_device *adev = dev->dev_private;
2516         struct amdgpu_display_manager *dm = &adev->dm;
2517         struct drm_private_obj *obj;
2518         struct drm_private_state *new_obj_state;
2519         int i;
2520
2521         for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
2522                 if (obj->funcs == dm->atomic_obj.funcs)
2523                         return to_dm_atomic_state(new_obj_state);
2524         }
2525
2526         return NULL;
2527 }
2528
2529 struct dm_atomic_state *
2530 dm_atomic_get_old_state(struct drm_atomic_state *state)
2531 {
2532         struct drm_device *dev = state->dev;
2533         struct amdgpu_device *adev = dev->dev_private;
2534         struct amdgpu_display_manager *dm = &adev->dm;
2535         struct drm_private_obj *obj;
2536         struct drm_private_state *old_obj_state;
2537         int i;
2538
2539         for_each_old_private_obj_in_state(state, obj, old_obj_state, i) {
2540                 if (obj->funcs == dm->atomic_obj.funcs)
2541                         return to_dm_atomic_state(old_obj_state);
2542         }
2543
2544         return NULL;
2545 }
2546
2547 static struct drm_private_state *
2548 dm_atomic_duplicate_state(struct drm_private_obj *obj)
2549 {
2550         struct dm_atomic_state *old_state, *new_state;
2551
2552         new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
2553         if (!new_state)
2554                 return NULL;
2555
2556         __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
2557
2558         old_state = to_dm_atomic_state(obj->state);
2559
2560         if (old_state && old_state->context)
2561                 new_state->context = dc_copy_state(old_state->context);
2562
2563         if (!new_state->context) {
2564                 kfree(new_state);
2565                 return NULL;
2566         }
2567
2568         return &new_state->base;
2569 }
2570
2571 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
2572                                     struct drm_private_state *state)
2573 {
2574         struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
2575
2576         if (dm_state && dm_state->context)
2577                 dc_release_state(dm_state->context);
2578
2579         kfree(dm_state);
2580 }
2581
2582 static struct drm_private_state_funcs dm_atomic_state_funcs = {
2583         .atomic_duplicate_state = dm_atomic_duplicate_state,
2584         .atomic_destroy_state = dm_atomic_destroy_state,
2585 };
2586
2587 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
2588 {
2589         struct dm_atomic_state *state;
2590         int r;
2591
2592         adev->mode_info.mode_config_initialized = true;
2593
2594         adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
2595         adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
2596
2597         adev->ddev->mode_config.max_width = 16384;
2598         adev->ddev->mode_config.max_height = 16384;
2599
2600         adev->ddev->mode_config.preferred_depth = 24;
2601         adev->ddev->mode_config.prefer_shadow = 1;
2602         /* indicates support for immediate flip */
2603         adev->ddev->mode_config.async_page_flip = true;
2604
2605         adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
2606
2607         state = kzalloc(sizeof(*state), GFP_KERNEL);
2608         if (!state)
2609                 return -ENOMEM;
2610
2611         state->context = dc_create_state(adev->dm.dc);
2612         if (!state->context) {
2613                 kfree(state);
2614                 return -ENOMEM;
2615         }
2616
2617         dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
2618
2619         drm_atomic_private_obj_init(adev->ddev,
2620                                     &adev->dm.atomic_obj,
2621                                     &state->base,
2622                                     &dm_atomic_state_funcs);
2623
2624         r = amdgpu_display_modeset_create_props(adev);
2625         if (r)
2626                 return r;
2627
2628         r = amdgpu_dm_audio_init(adev);
2629         if (r)
2630                 return r;
2631
2632         return 0;
2633 }
2634
2635 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
2636 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
2637 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
2638
2639 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2640         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2641
2642 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
2643 {
2644 #if defined(CONFIG_ACPI)
2645         struct amdgpu_dm_backlight_caps caps;
2646
2647         if (dm->backlight_caps.caps_valid)
2648                 return;
2649
2650         amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
2651         if (caps.caps_valid) {
2652                 dm->backlight_caps.caps_valid = true;
2653                 if (caps.aux_support)
2654                         return;
2655                 dm->backlight_caps.min_input_signal = caps.min_input_signal;
2656                 dm->backlight_caps.max_input_signal = caps.max_input_signal;
2657         } else {
2658                 dm->backlight_caps.min_input_signal =
2659                                 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2660                 dm->backlight_caps.max_input_signal =
2661                                 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2662         }
2663 #else
2664         if (dm->backlight_caps.aux_support)
2665                 return;
2666
2667         dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2668         dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2669 #endif
2670 }
2671
2672 static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
2673 {
2674         bool rc;
2675
2676         if (!link)
2677                 return 1;
2678
2679         rc = dc_link_set_backlight_level_nits(link, true, brightness,
2680                                               AUX_BL_DEFAULT_TRANSITION_TIME_MS);
2681
2682         return rc ? 0 : 1;
2683 }
2684
2685 static u32 convert_brightness(const struct amdgpu_dm_backlight_caps *caps,
2686                               const uint32_t user_brightness)
2687 {
2688         u32 min, max, conversion_pace;
2689         u32 brightness = user_brightness;
2690
2691         if (!caps)
2692                 goto out;
2693
2694         if (!caps->aux_support) {
2695                 max = caps->max_input_signal;
2696                 min = caps->min_input_signal;
2697                 /*
2698                  * The brightness input is in the range 0-255
2699                  * It needs to be rescaled to be between the
2700                  * requested min and max input signal
2701                  * It also needs to be scaled up by 0x101 to
2702                  * match the DC interface which has a range of
2703                  * 0 to 0xffff
2704                  */
2705                 conversion_pace = 0x101;
2706                 brightness =
2707                         user_brightness
2708                         * conversion_pace
2709                         * (max - min)
2710                         / AMDGPU_MAX_BL_LEVEL
2711                         + min * conversion_pace;
2712         } else {
2713                 /* TODO
2714                  * We are doing a linear interpolation here, which is OK but
2715                  * does not provide the optimal result. We probably want
2716                  * something close to the Perceptual Quantizer (PQ) curve.
2717                  */
2718                 max = caps->aux_max_input_signal;
2719                 min = caps->aux_min_input_signal;
2720
2721                 brightness = (AMDGPU_MAX_BL_LEVEL - user_brightness) * min
2722                                + user_brightness * max;
2723                 // Multiple the value by 1000 since we use millinits
2724                 brightness *= 1000;
2725                 brightness = DIV_ROUND_CLOSEST(brightness, AMDGPU_MAX_BL_LEVEL);
2726         }
2727
2728 out:
2729         return brightness;
2730 }
2731
2732 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
2733 {
2734         struct amdgpu_display_manager *dm = bl_get_data(bd);
2735         struct amdgpu_dm_backlight_caps caps;
2736         struct dc_link *link = NULL;
2737         u32 brightness;
2738         bool rc;
2739
2740         amdgpu_dm_update_backlight_caps(dm);
2741         caps = dm->backlight_caps;
2742
2743         link = (struct dc_link *)dm->backlight_link;
2744
2745         brightness = convert_brightness(&caps, bd->props.brightness);
2746         // Change brightness based on AUX property
2747         if (caps.aux_support)
2748                 return set_backlight_via_aux(link, brightness);
2749
2750         rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
2751
2752         return rc ? 0 : 1;
2753 }
2754
2755 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
2756 {
2757         struct amdgpu_display_manager *dm = bl_get_data(bd);
2758         int ret = dc_link_get_backlight_level(dm->backlight_link);
2759
2760         if (ret == DC_ERROR_UNEXPECTED)
2761                 return bd->props.brightness;
2762         return ret;
2763 }
2764
2765 static const struct backlight_ops amdgpu_dm_backlight_ops = {
2766         .options = BL_CORE_SUSPENDRESUME,
2767         .get_brightness = amdgpu_dm_backlight_get_brightness,
2768         .update_status  = amdgpu_dm_backlight_update_status,
2769 };
2770
2771 static void
2772 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
2773 {
2774         char bl_name[16];
2775         struct backlight_properties props = { 0 };
2776
2777         amdgpu_dm_update_backlight_caps(dm);
2778
2779         props.max_brightness = AMDGPU_MAX_BL_LEVEL;
2780         props.brightness = AMDGPU_MAX_BL_LEVEL;
2781         props.type = BACKLIGHT_RAW;
2782
2783         snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
2784                         dm->adev->ddev->primary->index);
2785
2786         dm->backlight_dev = backlight_device_register(bl_name,
2787                         dm->adev->ddev->dev,
2788                         dm,
2789                         &amdgpu_dm_backlight_ops,
2790                         &props);
2791
2792         if (IS_ERR(dm->backlight_dev))
2793                 DRM_ERROR("DM: Backlight registration failed!\n");
2794         else
2795                 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
2796 }
2797
2798 #endif
2799
2800 static int initialize_plane(struct amdgpu_display_manager *dm,
2801                             struct amdgpu_mode_info *mode_info, int plane_id,
2802                             enum drm_plane_type plane_type,
2803                             const struct dc_plane_cap *plane_cap)
2804 {
2805         struct drm_plane *plane;
2806         unsigned long possible_crtcs;
2807         int ret = 0;
2808
2809         plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
2810         if (!plane) {
2811                 DRM_ERROR("KMS: Failed to allocate plane\n");
2812                 return -ENOMEM;
2813         }
2814         plane->type = plane_type;
2815
2816         /*
2817          * HACK: IGT tests expect that the primary plane for a CRTC
2818          * can only have one possible CRTC. Only expose support for
2819          * any CRTC if they're not going to be used as a primary plane
2820          * for a CRTC - like overlay or underlay planes.
2821          */
2822         possible_crtcs = 1 << plane_id;
2823         if (plane_id >= dm->dc->caps.max_streams)
2824                 possible_crtcs = 0xff;
2825
2826         ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
2827
2828         if (ret) {
2829                 DRM_ERROR("KMS: Failed to initialize plane\n");
2830                 kfree(plane);
2831                 return ret;
2832         }
2833
2834         if (mode_info)
2835                 mode_info->planes[plane_id] = plane;
2836
2837         return ret;
2838 }
2839
2840
2841 static void register_backlight_device(struct amdgpu_display_manager *dm,
2842                                       struct dc_link *link)
2843 {
2844 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2845         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2846
2847         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
2848             link->type != dc_connection_none) {
2849                 /*
2850                  * Event if registration failed, we should continue with
2851                  * DM initialization because not having a backlight control
2852                  * is better then a black screen.
2853                  */
2854                 amdgpu_dm_register_backlight_device(dm);
2855
2856                 if (dm->backlight_dev)
2857                         dm->backlight_link = link;
2858         }
2859 #endif
2860 }
2861
2862
2863 /*
2864  * In this architecture, the association
2865  * connector -> encoder -> crtc
2866  * id not really requried. The crtc and connector will hold the
2867  * display_index as an abstraction to use with DAL component
2868  *
2869  * Returns 0 on success
2870  */
2871 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
2872 {
2873         struct amdgpu_display_manager *dm = &adev->dm;
2874         int32_t i;
2875         struct amdgpu_dm_connector *aconnector = NULL;
2876         struct amdgpu_encoder *aencoder = NULL;
2877         struct amdgpu_mode_info *mode_info = &adev->mode_info;
2878         uint32_t link_cnt;
2879         int32_t primary_planes;
2880         enum dc_connection_type new_connection_type = dc_connection_none;
2881         const struct dc_plane_cap *plane;
2882
2883         link_cnt = dm->dc->caps.max_links;
2884         if (amdgpu_dm_mode_config_init(dm->adev)) {
2885                 DRM_ERROR("DM: Failed to initialize mode config\n");
2886                 return -EINVAL;
2887         }
2888
2889         /* There is one primary plane per CRTC */
2890         primary_planes = dm->dc->caps.max_streams;
2891         ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
2892
2893         /*
2894          * Initialize primary planes, implicit planes for legacy IOCTLS.
2895          * Order is reversed to match iteration order in atomic check.
2896          */
2897         for (i = (primary_planes - 1); i >= 0; i--) {
2898                 plane = &dm->dc->caps.planes[i];
2899
2900                 if (initialize_plane(dm, mode_info, i,
2901                                      DRM_PLANE_TYPE_PRIMARY, plane)) {
2902                         DRM_ERROR("KMS: Failed to initialize primary plane\n");
2903                         goto fail;
2904                 }
2905         }
2906
2907         /*
2908          * Initialize overlay planes, index starting after primary planes.
2909          * These planes have a higher DRM index than the primary planes since
2910          * they should be considered as having a higher z-order.
2911          * Order is reversed to match iteration order in atomic check.
2912          *
2913          * Only support DCN for now, and only expose one so we don't encourage
2914          * userspace to use up all the pipes.
2915          */
2916         for (i = 0; i < dm->dc->caps.max_planes; ++i) {
2917                 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
2918
2919                 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
2920                         continue;
2921
2922                 if (!plane->blends_with_above || !plane->blends_with_below)
2923                         continue;
2924
2925                 if (!plane->pixel_format_support.argb8888)
2926                         continue;
2927
2928                 if (initialize_plane(dm, NULL, primary_planes + i,
2929                                      DRM_PLANE_TYPE_OVERLAY, plane)) {
2930                         DRM_ERROR("KMS: Failed to initialize overlay plane\n");
2931                         goto fail;
2932                 }
2933
2934                 /* Only create one overlay plane. */
2935                 break;
2936         }
2937
2938         for (i = 0; i < dm->dc->caps.max_streams; i++)
2939                 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
2940                         DRM_ERROR("KMS: Failed to initialize crtc\n");
2941                         goto fail;
2942                 }
2943
2944         dm->display_indexes_num = dm->dc->caps.max_streams;
2945
2946         /* loops over all connectors on the board */
2947         for (i = 0; i < link_cnt; i++) {
2948                 struct dc_link *link = NULL;
2949
2950                 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
2951                         DRM_ERROR(
2952                                 "KMS: Cannot support more than %d display indexes\n",
2953                                         AMDGPU_DM_MAX_DISPLAY_INDEX);
2954                         continue;
2955                 }
2956
2957                 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
2958                 if (!aconnector)
2959                         goto fail;
2960
2961                 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
2962                 if (!aencoder)
2963                         goto fail;
2964
2965                 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
2966                         DRM_ERROR("KMS: Failed to initialize encoder\n");
2967                         goto fail;
2968                 }
2969
2970                 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
2971                         DRM_ERROR("KMS: Failed to initialize connector\n");
2972                         goto fail;
2973                 }
2974
2975                 link = dc_get_link_at_index(dm->dc, i);
2976
2977                 if (!dc_link_detect_sink(link, &new_connection_type))
2978                         DRM_ERROR("KMS: Failed to detect connector\n");
2979
2980                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2981                         emulated_link_detect(link);
2982                         amdgpu_dm_update_connector_after_detect(aconnector);
2983
2984                 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
2985                         amdgpu_dm_update_connector_after_detect(aconnector);
2986                         register_backlight_device(dm, link);
2987                         if (amdgpu_dc_feature_mask & DC_PSR_MASK)
2988                                 amdgpu_dm_set_psr_caps(link);
2989                 }
2990
2991
2992         }
2993
2994         /* Software is initialized. Now we can register interrupt handlers. */
2995         switch (adev->asic_type) {
2996         case CHIP_BONAIRE:
2997         case CHIP_HAWAII:
2998         case CHIP_KAVERI:
2999         case CHIP_KABINI:
3000         case CHIP_MULLINS:
3001         case CHIP_TONGA:
3002         case CHIP_FIJI:
3003         case CHIP_CARRIZO:
3004         case CHIP_STONEY:
3005         case CHIP_POLARIS11:
3006         case CHIP_POLARIS10:
3007         case CHIP_POLARIS12:
3008         case CHIP_VEGAM:
3009         case CHIP_VEGA10:
3010         case CHIP_VEGA12:
3011         case CHIP_VEGA20:
3012                 if (dce110_register_irq_handlers(dm->adev)) {
3013                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3014                         goto fail;
3015                 }
3016                 break;
3017 #if defined(CONFIG_DRM_AMD_DC_DCN)
3018         case CHIP_RAVEN:
3019         case CHIP_NAVI12:
3020         case CHIP_NAVI10:
3021         case CHIP_NAVI14:
3022         case CHIP_RENOIR:
3023                 if (dcn10_register_irq_handlers(dm->adev)) {
3024                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3025                         goto fail;
3026                 }
3027                 break;
3028 #endif
3029         default:
3030                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3031                 goto fail;
3032         }
3033
3034         if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
3035                 dm->dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
3036
3037         /* No userspace support. */
3038         dm->dc->debug.disable_tri_buf = true;
3039
3040         return 0;
3041 fail:
3042         kfree(aencoder);
3043         kfree(aconnector);
3044
3045         return -EINVAL;
3046 }
3047
3048 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3049 {
3050         drm_mode_config_cleanup(dm->ddev);
3051         drm_atomic_private_obj_fini(&dm->atomic_obj);
3052         return;
3053 }
3054
3055 /******************************************************************************
3056  * amdgpu_display_funcs functions
3057  *****************************************************************************/
3058
3059 /*
3060  * dm_bandwidth_update - program display watermarks
3061  *
3062  * @adev: amdgpu_device pointer
3063  *
3064  * Calculate and program the display watermarks and line buffer allocation.
3065  */
3066 static void dm_bandwidth_update(struct amdgpu_device *adev)
3067 {
3068         /* TODO: implement later */
3069 }
3070
3071 static const struct amdgpu_display_funcs dm_display_funcs = {
3072         .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3073         .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3074         .backlight_set_level = NULL, /* never called for DC */
3075         .backlight_get_level = NULL, /* never called for DC */
3076         .hpd_sense = NULL,/* called unconditionally */
3077         .hpd_set_polarity = NULL, /* called unconditionally */
3078         .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3079         .page_flip_get_scanoutpos =
3080                 dm_crtc_get_scanoutpos,/* called unconditionally */
3081         .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3082         .add_connector = NULL, /* VBIOS parsing. DAL does it. */
3083 };
3084
3085 #if defined(CONFIG_DEBUG_KERNEL_DC)
3086
3087 static ssize_t s3_debug_store(struct device *device,
3088                               struct device_attribute *attr,
3089                               const char *buf,
3090                               size_t count)
3091 {
3092         int ret;
3093         int s3_state;
3094         struct drm_device *drm_dev = dev_get_drvdata(device);
3095         struct amdgpu_device *adev = drm_dev->dev_private;
3096
3097         ret = kstrtoint(buf, 0, &s3_state);
3098
3099         if (ret == 0) {
3100                 if (s3_state) {
3101                         dm_resume(adev);
3102                         drm_kms_helper_hotplug_event(adev->ddev);
3103                 } else
3104                         dm_suspend(adev);
3105         }
3106
3107         return ret == 0 ? count : 0;
3108 }
3109
3110 DEVICE_ATTR_WO(s3_debug);
3111
3112 #endif
3113
3114 static int dm_early_init(void *handle)
3115 {
3116         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3117
3118         switch (adev->asic_type) {
3119         case CHIP_BONAIRE:
3120         case CHIP_HAWAII:
3121                 adev->mode_info.num_crtc = 6;
3122                 adev->mode_info.num_hpd = 6;
3123                 adev->mode_info.num_dig = 6;
3124                 break;
3125         case CHIP_KAVERI:
3126                 adev->mode_info.num_crtc = 4;
3127                 adev->mode_info.num_hpd = 6;
3128                 adev->mode_info.num_dig = 7;
3129                 break;
3130         case CHIP_KABINI:
3131         case CHIP_MULLINS:
3132                 adev->mode_info.num_crtc = 2;
3133                 adev->mode_info.num_hpd = 6;
3134                 adev->mode_info.num_dig = 6;
3135                 break;
3136         case CHIP_FIJI:
3137         case CHIP_TONGA:
3138                 adev->mode_info.num_crtc = 6;
3139                 adev->mode_info.num_hpd = 6;
3140                 adev->mode_info.num_dig = 7;
3141                 break;
3142         case CHIP_CARRIZO:
3143                 adev->mode_info.num_crtc = 3;
3144                 adev->mode_info.num_hpd = 6;
3145                 adev->mode_info.num_dig = 9;
3146                 break;
3147         case CHIP_STONEY:
3148                 adev->mode_info.num_crtc = 2;
3149                 adev->mode_info.num_hpd = 6;
3150                 adev->mode_info.num_dig = 9;
3151                 break;
3152         case CHIP_POLARIS11:
3153         case CHIP_POLARIS12:
3154                 adev->mode_info.num_crtc = 5;
3155                 adev->mode_info.num_hpd = 5;
3156                 adev->mode_info.num_dig = 5;
3157                 break;
3158         case CHIP_POLARIS10:
3159         case CHIP_VEGAM:
3160                 adev->mode_info.num_crtc = 6;
3161                 adev->mode_info.num_hpd = 6;
3162                 adev->mode_info.num_dig = 6;
3163                 break;
3164         case CHIP_VEGA10:
3165         case CHIP_VEGA12:
3166         case CHIP_VEGA20:
3167                 adev->mode_info.num_crtc = 6;
3168                 adev->mode_info.num_hpd = 6;
3169                 adev->mode_info.num_dig = 6;
3170                 break;
3171 #if defined(CONFIG_DRM_AMD_DC_DCN)
3172         case CHIP_RAVEN:
3173                 adev->mode_info.num_crtc = 4;
3174                 adev->mode_info.num_hpd = 4;
3175                 adev->mode_info.num_dig = 4;
3176                 break;
3177 #endif
3178         case CHIP_NAVI10:
3179         case CHIP_NAVI12:
3180                 adev->mode_info.num_crtc = 6;
3181                 adev->mode_info.num_hpd = 6;
3182                 adev->mode_info.num_dig = 6;
3183                 break;
3184         case CHIP_NAVI14:
3185                 adev->mode_info.num_crtc = 5;
3186                 adev->mode_info.num_hpd = 5;
3187                 adev->mode_info.num_dig = 5;
3188                 break;
3189         case CHIP_RENOIR:
3190                 adev->mode_info.num_crtc = 4;
3191                 adev->mode_info.num_hpd = 4;
3192                 adev->mode_info.num_dig = 4;
3193                 break;
3194         default:
3195                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3196                 return -EINVAL;
3197         }
3198
3199         amdgpu_dm_set_irq_funcs(adev);
3200
3201         if (adev->mode_info.funcs == NULL)
3202                 adev->mode_info.funcs = &dm_display_funcs;
3203
3204         /*
3205          * Note: Do NOT change adev->audio_endpt_rreg and
3206          * adev->audio_endpt_wreg because they are initialised in
3207          * amdgpu_device_init()
3208          */
3209 #if defined(CONFIG_DEBUG_KERNEL_DC)
3210         device_create_file(
3211                 adev->ddev->dev,
3212                 &dev_attr_s3_debug);
3213 #endif
3214
3215         return 0;
3216 }
3217
3218 static bool modeset_required(struct drm_crtc_state *crtc_state,
3219                              struct dc_stream_state *new_stream,
3220                              struct dc_stream_state *old_stream)
3221 {
3222         if (!drm_atomic_crtc_needs_modeset(crtc_state))
3223                 return false;
3224
3225         if (!crtc_state->enable)
3226                 return false;
3227
3228         return crtc_state->active;
3229 }
3230
3231 static bool modereset_required(struct drm_crtc_state *crtc_state)
3232 {
3233         if (!drm_atomic_crtc_needs_modeset(crtc_state))
3234                 return false;
3235
3236         return !crtc_state->enable || !crtc_state->active;
3237 }
3238
3239 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3240 {
3241         drm_encoder_cleanup(encoder);
3242         kfree(encoder);
3243 }
3244
3245 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3246         .destroy = amdgpu_dm_encoder_destroy,
3247 };
3248
3249
3250 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3251                                 struct dc_scaling_info *scaling_info)
3252 {
3253         int scale_w, scale_h;
3254
3255         memset(scaling_info, 0, sizeof(*scaling_info));
3256
3257         /* Source is fixed 16.16 but we ignore mantissa for now... */
3258         scaling_info->src_rect.x = state->src_x >> 16;
3259         scaling_info->src_rect.y = state->src_y >> 16;
3260
3261         scaling_info->src_rect.width = state->src_w >> 16;
3262         if (scaling_info->src_rect.width == 0)
3263                 return -EINVAL;
3264
3265         scaling_info->src_rect.height = state->src_h >> 16;
3266         if (scaling_info->src_rect.height == 0)
3267                 return -EINVAL;
3268
3269         scaling_info->dst_rect.x = state->crtc_x;
3270         scaling_info->dst_rect.y = state->crtc_y;
3271
3272         if (state->crtc_w == 0)
3273                 return -EINVAL;
3274
3275         scaling_info->dst_rect.width = state->crtc_w;
3276
3277         if (state->crtc_h == 0)
3278                 return -EINVAL;
3279
3280         scaling_info->dst_rect.height = state->crtc_h;
3281
3282         /* DRM doesn't specify clipping on destination output. */
3283         scaling_info->clip_rect = scaling_info->dst_rect;
3284
3285         /* TODO: Validate scaling per-format with DC plane caps */
3286         scale_w = scaling_info->dst_rect.width * 1000 /
3287                   scaling_info->src_rect.width;
3288
3289         if (scale_w < 250 || scale_w > 16000)
3290                 return -EINVAL;
3291
3292         scale_h = scaling_info->dst_rect.height * 1000 /
3293                   scaling_info->src_rect.height;
3294
3295         if (scale_h < 250 || scale_h > 16000)
3296                 return -EINVAL;
3297
3298         /*
3299          * The "scaling_quality" can be ignored for now, quality = 0 has DC
3300          * assume reasonable defaults based on the format.
3301          */
3302
3303         return 0;
3304 }
3305
3306 static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
3307                        uint64_t *tiling_flags)
3308 {
3309         struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
3310         int r = amdgpu_bo_reserve(rbo, false);
3311
3312         if (unlikely(r)) {
3313                 /* Don't show error message when returning -ERESTARTSYS */
3314                 if (r != -ERESTARTSYS)
3315                         DRM_ERROR("Unable to reserve buffer: %d\n", r);
3316                 return r;
3317         }
3318
3319         if (tiling_flags)
3320                 amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
3321
3322         amdgpu_bo_unreserve(rbo);
3323
3324         return r;
3325 }
3326
3327 static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
3328 {
3329         uint32_t offset = AMDGPU_TILING_GET(tiling_flags, DCC_OFFSET_256B);
3330
3331         return offset ? (address + offset * 256) : 0;
3332 }
3333
3334 static int
3335 fill_plane_dcc_attributes(struct amdgpu_device *adev,
3336                           const struct amdgpu_framebuffer *afb,
3337                           const enum surface_pixel_format format,
3338                           const enum dc_rotation_angle rotation,
3339                           const struct plane_size *plane_size,
3340                           const union dc_tiling_info *tiling_info,
3341                           const uint64_t info,
3342                           struct dc_plane_dcc_param *dcc,
3343                           struct dc_plane_address *address)
3344 {
3345         struct dc *dc = adev->dm.dc;
3346         struct dc_dcc_surface_param input;
3347         struct dc_surface_dcc_cap output;
3348         uint32_t offset = AMDGPU_TILING_GET(info, DCC_OFFSET_256B);
3349         uint32_t i64b = AMDGPU_TILING_GET(info, DCC_INDEPENDENT_64B) != 0;
3350         uint64_t dcc_address;
3351
3352         memset(&input, 0, sizeof(input));
3353         memset(&output, 0, sizeof(output));
3354
3355         if (!offset)
3356                 return 0;
3357
3358         if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3359                 return 0;
3360
3361         if (!dc->cap_funcs.get_dcc_compression_cap)
3362                 return -EINVAL;
3363
3364         input.format = format;
3365         input.surface_size.width = plane_size->surface_size.width;
3366         input.surface_size.height = plane_size->surface_size.height;
3367         input.swizzle_mode = tiling_info->gfx9.swizzle;
3368
3369         if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3370                 input.scan = SCAN_DIRECTION_HORIZONTAL;
3371         else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3372                 input.scan = SCAN_DIRECTION_VERTICAL;
3373
3374         if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3375                 return -EINVAL;
3376
3377         if (!output.capable)
3378                 return -EINVAL;
3379
3380         if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0)
3381                 return -EINVAL;
3382
3383         dcc->enable = 1;
3384         dcc->meta_pitch =
3385                 AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1;
3386         dcc->independent_64b_blks = i64b;
3387
3388         dcc_address = get_dcc_address(afb->address, info);
3389         address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
3390         address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
3391
3392         return 0;
3393 }
3394
3395 static int
3396 fill_plane_buffer_attributes(struct amdgpu_device *adev,
3397                              const struct amdgpu_framebuffer *afb,
3398                              const enum surface_pixel_format format,
3399                              const enum dc_rotation_angle rotation,
3400                              const uint64_t tiling_flags,
3401                              union dc_tiling_info *tiling_info,
3402                              struct plane_size *plane_size,
3403                              struct dc_plane_dcc_param *dcc,
3404                              struct dc_plane_address *address)
3405 {
3406         const struct drm_framebuffer *fb = &afb->base;
3407         int ret;
3408
3409         memset(tiling_info, 0, sizeof(*tiling_info));
3410         memset(plane_size, 0, sizeof(*plane_size));
3411         memset(dcc, 0, sizeof(*dcc));
3412         memset(address, 0, sizeof(*address));
3413
3414         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
3415                 plane_size->surface_size.x = 0;
3416                 plane_size->surface_size.y = 0;
3417                 plane_size->surface_size.width = fb->width;
3418                 plane_size->surface_size.height = fb->height;
3419                 plane_size->surface_pitch =
3420                         fb->pitches[0] / fb->format->cpp[0];
3421
3422                 address->type = PLN_ADDR_TYPE_GRAPHICS;
3423                 address->grph.addr.low_part = lower_32_bits(afb->address);
3424                 address->grph.addr.high_part = upper_32_bits(afb->address);
3425         } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
3426                 uint64_t chroma_addr = afb->address + fb->offsets[1];
3427
3428                 plane_size->surface_size.x = 0;
3429                 plane_size->surface_size.y = 0;
3430                 plane_size->surface_size.width = fb->width;
3431                 plane_size->surface_size.height = fb->height;
3432                 plane_size->surface_pitch =
3433                         fb->pitches[0] / fb->format->cpp[0];
3434
3435                 plane_size->chroma_size.x = 0;
3436                 plane_size->chroma_size.y = 0;
3437                 /* TODO: set these based on surface format */
3438                 plane_size->chroma_size.width = fb->width / 2;
3439                 plane_size->chroma_size.height = fb->height / 2;
3440
3441                 plane_size->chroma_pitch =
3442                         fb->pitches[1] / fb->format->cpp[1];
3443
3444                 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
3445                 address->video_progressive.luma_addr.low_part =
3446                         lower_32_bits(afb->address);
3447                 address->video_progressive.luma_addr.high_part =
3448                         upper_32_bits(afb->address);
3449                 address->video_progressive.chroma_addr.low_part =
3450                         lower_32_bits(chroma_addr);
3451                 address->video_progressive.chroma_addr.high_part =
3452                         upper_32_bits(chroma_addr);
3453         }
3454
3455         /* Fill GFX8 params */
3456         if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3457                 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3458
3459                 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3460                 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3461                 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3462                 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3463                 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3464
3465                 /* XXX fix me for VI */
3466                 tiling_info->gfx8.num_banks = num_banks;
3467                 tiling_info->gfx8.array_mode =
3468                                 DC_ARRAY_2D_TILED_THIN1;
3469                 tiling_info->gfx8.tile_split = tile_split;
3470                 tiling_info->gfx8.bank_width = bankw;
3471                 tiling_info->gfx8.bank_height = bankh;
3472                 tiling_info->gfx8.tile_aspect = mtaspect;
3473                 tiling_info->gfx8.tile_mode =
3474                                 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3475         } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3476                         == DC_ARRAY_1D_TILED_THIN1) {
3477                 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3478         }
3479
3480         tiling_info->gfx8.pipe_config =
3481                         AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3482
3483         if (adev->asic_type == CHIP_VEGA10 ||
3484             adev->asic_type == CHIP_VEGA12 ||
3485             adev->asic_type == CHIP_VEGA20 ||
3486             adev->asic_type == CHIP_NAVI10 ||
3487             adev->asic_type == CHIP_NAVI14 ||
3488             adev->asic_type == CHIP_NAVI12 ||
3489             adev->asic_type == CHIP_RENOIR ||
3490             adev->asic_type == CHIP_RAVEN) {
3491                 /* Fill GFX9 params */
3492                 tiling_info->gfx9.num_pipes =
3493                         adev->gfx.config.gb_addr_config_fields.num_pipes;
3494                 tiling_info->gfx9.num_banks =
3495                         adev->gfx.config.gb_addr_config_fields.num_banks;
3496                 tiling_info->gfx9.pipe_interleave =
3497                         adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3498                 tiling_info->gfx9.num_shader_engines =
3499                         adev->gfx.config.gb_addr_config_fields.num_se;
3500                 tiling_info->gfx9.max_compressed_frags =
3501                         adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3502                 tiling_info->gfx9.num_rb_per_se =
3503                         adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3504                 tiling_info->gfx9.swizzle =
3505                         AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
3506                 tiling_info->gfx9.shaderEnable = 1;
3507
3508                 ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
3509                                                 plane_size, tiling_info,
3510                                                 tiling_flags, dcc, address);
3511                 if (ret)
3512                         return ret;
3513         }
3514
3515         return 0;
3516 }
3517
3518 static void
3519 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
3520                                bool *per_pixel_alpha, bool *global_alpha,
3521                                int *global_alpha_value)
3522 {
3523         *per_pixel_alpha = false;
3524         *global_alpha = false;
3525         *global_alpha_value = 0xff;
3526
3527         if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
3528                 return;
3529
3530         if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
3531                 static const uint32_t alpha_formats[] = {
3532                         DRM_FORMAT_ARGB8888,
3533                         DRM_FORMAT_RGBA8888,
3534                         DRM_FORMAT_ABGR8888,
3535                 };
3536                 uint32_t format = plane_state->fb->format->format;
3537                 unsigned int i;
3538
3539                 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
3540                         if (format == alpha_formats[i]) {
3541                                 *per_pixel_alpha = true;
3542                                 break;
3543                         }
3544                 }
3545         }
3546
3547         if (plane_state->alpha < 0xffff) {
3548                 *global_alpha = true;
3549                 *global_alpha_value = plane_state->alpha >> 8;
3550         }
3551 }
3552
3553 static int
3554 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
3555                             const enum surface_pixel_format format,
3556                             enum dc_color_space *color_space)
3557 {
3558         bool full_range;
3559
3560         *color_space = COLOR_SPACE_SRGB;
3561
3562         /* DRM color properties only affect non-RGB formats. */
3563         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3564                 return 0;
3565
3566         full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
3567
3568         switch (plane_state->color_encoding) {
3569         case DRM_COLOR_YCBCR_BT601:
3570                 if (full_range)
3571                         *color_space = COLOR_SPACE_YCBCR601;
3572                 else
3573                         *color_space = COLOR_SPACE_YCBCR601_LIMITED;
3574                 break;
3575
3576         case DRM_COLOR_YCBCR_BT709:
3577                 if (full_range)
3578                         *color_space = COLOR_SPACE_YCBCR709;
3579                 else
3580                         *color_space = COLOR_SPACE_YCBCR709_LIMITED;
3581                 break;
3582
3583         case DRM_COLOR_YCBCR_BT2020:
3584                 if (full_range)
3585                         *color_space = COLOR_SPACE_2020_YCBCR;
3586                 else
3587                         return -EINVAL;
3588                 break;
3589
3590         default:
3591                 return -EINVAL;
3592         }
3593
3594         return 0;
3595 }
3596
3597 static int
3598 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
3599                             const struct drm_plane_state *plane_state,
3600                             const uint64_t tiling_flags,
3601                             struct dc_plane_info *plane_info,
3602                             struct dc_plane_address *address)
3603 {
3604         const struct drm_framebuffer *fb = plane_state->fb;
3605         const struct amdgpu_framebuffer *afb =
3606                 to_amdgpu_framebuffer(plane_state->fb);
3607         struct drm_format_name_buf format_name;
3608         int ret;
3609
3610         memset(plane_info, 0, sizeof(*plane_info));
3611
3612         switch (fb->format->format) {
3613         case DRM_FORMAT_C8:
3614                 plane_info->format =
3615                         SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
3616                 break;
3617         case DRM_FORMAT_RGB565:
3618                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
3619                 break;
3620         case DRM_FORMAT_XRGB8888:
3621         case DRM_FORMAT_ARGB8888:
3622                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
3623                 break;
3624         case DRM_FORMAT_XRGB2101010:
3625         case DRM_FORMAT_ARGB2101010:
3626                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
3627                 break;
3628         case DRM_FORMAT_XBGR2101010:
3629         case DRM_FORMAT_ABGR2101010:
3630                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
3631                 break;
3632         case DRM_FORMAT_XBGR8888:
3633         case DRM_FORMAT_ABGR8888:
3634                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
3635                 break;
3636         case DRM_FORMAT_NV21:
3637                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
3638                 break;
3639         case DRM_FORMAT_NV12:
3640                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
3641                 break;
3642         case DRM_FORMAT_P010:
3643                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
3644                 break;
3645         default:
3646                 DRM_ERROR(
3647                         "Unsupported screen format %s\n",
3648                         drm_get_format_name(fb->format->format, &format_name));
3649                 return -EINVAL;
3650         }
3651
3652         switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
3653         case DRM_MODE_ROTATE_0:
3654                 plane_info->rotation = ROTATION_ANGLE_0;
3655                 break;
3656         case DRM_MODE_ROTATE_90:
3657                 plane_info->rotation = ROTATION_ANGLE_90;
3658                 break;
3659         case DRM_MODE_ROTATE_180:
3660                 plane_info->rotation = ROTATION_ANGLE_180;
3661                 break;
3662         case DRM_MODE_ROTATE_270:
3663                 plane_info->rotation = ROTATION_ANGLE_270;
3664                 break;
3665         default:
3666                 plane_info->rotation = ROTATION_ANGLE_0;
3667                 break;
3668         }
3669
3670         plane_info->visible = true;
3671         plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
3672
3673         plane_info->layer_index = 0;
3674
3675         ret = fill_plane_color_attributes(plane_state, plane_info->format,
3676                                           &plane_info->color_space);
3677         if (ret)
3678                 return ret;
3679
3680         ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
3681                                            plane_info->rotation, tiling_flags,
3682                                            &plane_info->tiling_info,
3683                                            &plane_info->plane_size,
3684                                            &plane_info->dcc, address);
3685         if (ret)
3686                 return ret;
3687
3688         fill_blending_from_plane_state(
3689                 plane_state, &plane_info->per_pixel_alpha,
3690                 &plane_info->global_alpha, &plane_info->global_alpha_value);
3691
3692         return 0;
3693 }
3694
3695 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
3696                                     struct dc_plane_state *dc_plane_state,
3697                                     struct drm_plane_state *plane_state,
3698                                     struct drm_crtc_state *crtc_state)
3699 {
3700         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
3701         const struct amdgpu_framebuffer *amdgpu_fb =
3702                 to_amdgpu_framebuffer(plane_state->fb);
3703         struct dc_scaling_info scaling_info;
3704         struct dc_plane_info plane_info;
3705         uint64_t tiling_flags;
3706         int ret;
3707
3708         ret = fill_dc_scaling_info(plane_state, &scaling_info);
3709         if (ret)
3710                 return ret;
3711
3712         dc_plane_state->src_rect = scaling_info.src_rect;
3713         dc_plane_state->dst_rect = scaling_info.dst_rect;
3714         dc_plane_state->clip_rect = scaling_info.clip_rect;
3715         dc_plane_state->scaling_quality = scaling_info.scaling_quality;
3716
3717         ret = get_fb_info(amdgpu_fb, &tiling_flags);
3718         if (ret)
3719                 return ret;
3720
3721         ret = fill_dc_plane_info_and_addr(adev, plane_state, tiling_flags,
3722                                           &plane_info,
3723                                           &dc_plane_state->address);
3724         if (ret)
3725                 return ret;
3726
3727         dc_plane_state->format = plane_info.format;
3728         dc_plane_state->color_space = plane_info.color_space;
3729         dc_plane_state->format = plane_info.format;
3730         dc_plane_state->plane_size = plane_info.plane_size;
3731         dc_plane_state->rotation = plane_info.rotation;
3732         dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
3733         dc_plane_state->stereo_format = plane_info.stereo_format;
3734         dc_plane_state->tiling_info = plane_info.tiling_info;
3735         dc_plane_state->visible = plane_info.visible;
3736         dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
3737         dc_plane_state->global_alpha = plane_info.global_alpha;
3738         dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
3739         dc_plane_state->dcc = plane_info.dcc;
3740         dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
3741
3742         /*
3743          * Always set input transfer function, since plane state is refreshed
3744          * every time.
3745          */
3746         ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
3747         if (ret)
3748                 return ret;
3749
3750         return 0;
3751 }
3752
3753 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
3754                                            const struct dm_connector_state *dm_state,
3755                                            struct dc_stream_state *stream)
3756 {
3757         enum amdgpu_rmx_type rmx_type;
3758
3759         struct rect src = { 0 }; /* viewport in composition space*/
3760         struct rect dst = { 0 }; /* stream addressable area */
3761
3762         /* no mode. nothing to be done */
3763         if (!mode)
3764                 return;
3765
3766         /* Full screen scaling by default */
3767         src.width = mode->hdisplay;
3768         src.height = mode->vdisplay;
3769         dst.width = stream->timing.h_addressable;
3770         dst.height = stream->timing.v_addressable;
3771
3772         if (dm_state) {
3773                 rmx_type = dm_state->scaling;
3774                 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
3775                         if (src.width * dst.height <
3776                                         src.height * dst.width) {
3777                                 /* height needs less upscaling/more downscaling */
3778                                 dst.width = src.width *
3779                                                 dst.height / src.height;
3780                         } else {
3781                                 /* width needs less upscaling/more downscaling */
3782                                 dst.height = src.height *
3783                                                 dst.width / src.width;
3784                         }
3785                 } else if (rmx_type == RMX_CENTER) {
3786                         dst = src;
3787                 }
3788
3789                 dst.x = (stream->timing.h_addressable - dst.width) / 2;
3790                 dst.y = (stream->timing.v_addressable - dst.height) / 2;
3791
3792                 if (dm_state->underscan_enable) {
3793                         dst.x += dm_state->underscan_hborder / 2;
3794                         dst.y += dm_state->underscan_vborder / 2;
3795                         dst.width -= dm_state->underscan_hborder;
3796                         dst.height -= dm_state->underscan_vborder;
3797                 }
3798         }
3799
3800         stream->src = src;
3801         stream->dst = dst;
3802
3803         DRM_DEBUG_DRIVER("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
3804                         dst.x, dst.y, dst.width, dst.height);
3805
3806 }
3807
3808 static enum dc_color_depth
3809 convert_color_depth_from_display_info(const struct drm_connector *connector,
3810                                       const struct drm_connector_state *state,
3811                                       bool is_y420)
3812 {
3813         uint8_t bpc;
3814
3815         if (is_y420) {
3816                 bpc = 8;
3817
3818                 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
3819                 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
3820                         bpc = 16;
3821                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
3822                         bpc = 12;
3823                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
3824                         bpc = 10;
3825         } else {
3826                 bpc = (uint8_t)connector->display_info.bpc;
3827                 /* Assume 8 bpc by default if no bpc is specified. */
3828                 bpc = bpc ? bpc : 8;
3829         }
3830
3831         if (!state)
3832                 state = connector->state;
3833
3834         if (state) {
3835                 /*
3836                  * Cap display bpc based on the user requested value.
3837                  *
3838                  * The value for state->max_bpc may not correctly updated
3839                  * depending on when the connector gets added to the state
3840                  * or if this was called outside of atomic check, so it
3841                  * can't be used directly.
3842                  */
3843                 bpc = min(bpc, state->max_requested_bpc);
3844
3845                 /* Round down to the nearest even number. */
3846                 bpc = bpc - (bpc & 1);
3847         }
3848
3849         switch (bpc) {
3850         case 0:
3851                 /*
3852                  * Temporary Work around, DRM doesn't parse color depth for
3853                  * EDID revision before 1.4
3854                  * TODO: Fix edid parsing
3855                  */
3856                 return COLOR_DEPTH_888;
3857         case 6:
3858                 return COLOR_DEPTH_666;
3859         case 8:
3860                 return COLOR_DEPTH_888;
3861         case 10:
3862                 return COLOR_DEPTH_101010;
3863         case 12:
3864                 return COLOR_DEPTH_121212;
3865         case 14:
3866                 return COLOR_DEPTH_141414;
3867         case 16:
3868                 return COLOR_DEPTH_161616;
3869         default:
3870                 return COLOR_DEPTH_UNDEFINED;
3871         }
3872 }
3873
3874 static enum dc_aspect_ratio
3875 get_aspect_ratio(const struct drm_display_mode *mode_in)
3876 {
3877         /* 1-1 mapping, since both enums follow the HDMI spec. */
3878         return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
3879 }
3880
3881 static enum dc_color_space
3882 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
3883 {
3884         enum dc_color_space color_space = COLOR_SPACE_SRGB;
3885
3886         switch (dc_crtc_timing->pixel_encoding) {
3887         case PIXEL_ENCODING_YCBCR422:
3888         case PIXEL_ENCODING_YCBCR444:
3889         case PIXEL_ENCODING_YCBCR420:
3890         {
3891                 /*
3892                  * 27030khz is the separation point between HDTV and SDTV
3893                  * according to HDMI spec, we use YCbCr709 and YCbCr601
3894                  * respectively
3895                  */
3896                 if (dc_crtc_timing->pix_clk_100hz > 270300) {
3897                         if (dc_crtc_timing->flags.Y_ONLY)
3898                                 color_space =
3899                                         COLOR_SPACE_YCBCR709_LIMITED;
3900                         else
3901                                 color_space = COLOR_SPACE_YCBCR709;
3902                 } else {
3903                         if (dc_crtc_timing->flags.Y_ONLY)
3904                                 color_space =
3905                                         COLOR_SPACE_YCBCR601_LIMITED;
3906                         else
3907                                 color_space = COLOR_SPACE_YCBCR601;
3908                 }
3909
3910         }
3911         break;
3912         case PIXEL_ENCODING_RGB:
3913                 color_space = COLOR_SPACE_SRGB;
3914                 break;
3915
3916         default:
3917                 WARN_ON(1);
3918                 break;
3919         }
3920
3921         return color_space;
3922 }
3923
3924 static bool adjust_colour_depth_from_display_info(
3925         struct dc_crtc_timing *timing_out,
3926         const struct drm_display_info *info)
3927 {
3928         enum dc_color_depth depth = timing_out->display_color_depth;
3929         int normalized_clk;
3930         do {
3931                 normalized_clk = timing_out->pix_clk_100hz / 10;
3932                 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
3933                 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
3934                         normalized_clk /= 2;
3935                 /* Adjusting pix clock following on HDMI spec based on colour depth */
3936                 switch (depth) {
3937                 case COLOR_DEPTH_888:
3938                         break;
3939                 case COLOR_DEPTH_101010:
3940                         normalized_clk = (normalized_clk * 30) / 24;
3941                         break;
3942                 case COLOR_DEPTH_121212:
3943                         normalized_clk = (normalized_clk * 36) / 24;
3944                         break;
3945                 case COLOR_DEPTH_161616:
3946                         normalized_clk = (normalized_clk * 48) / 24;
3947                         break;
3948                 default:
3949                         /* The above depths are the only ones valid for HDMI. */
3950                         return false;
3951                 }
3952                 if (normalized_clk <= info->max_tmds_clock) {
3953                         timing_out->display_color_depth = depth;
3954                         return true;
3955                 }
3956         } while (--depth > COLOR_DEPTH_666);
3957         return false;
3958 }
3959
3960 static void fill_stream_properties_from_drm_display_mode(
3961         struct dc_stream_state *stream,
3962         const struct drm_display_mode *mode_in,
3963         const struct drm_connector *connector,
3964         const struct drm_connector_state *connector_state,
3965         const struct dc_stream_state *old_stream)
3966 {
3967         struct dc_crtc_timing *timing_out = &stream->timing;
3968         const struct drm_display_info *info = &connector->display_info;
3969         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
3970         struct hdmi_vendor_infoframe hv_frame;
3971         struct hdmi_avi_infoframe avi_frame;
3972
3973         memset(&hv_frame, 0, sizeof(hv_frame));
3974         memset(&avi_frame, 0, sizeof(avi_frame));
3975
3976         timing_out->h_border_left = 0;
3977         timing_out->h_border_right = 0;
3978         timing_out->v_border_top = 0;
3979         timing_out->v_border_bottom = 0;
3980         /* TODO: un-hardcode */
3981         if (drm_mode_is_420_only(info, mode_in)
3982                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
3983                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
3984         else if (drm_mode_is_420_also(info, mode_in)
3985                         && aconnector->force_yuv420_output)
3986                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
3987         else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
3988                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
3989                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
3990         else
3991                 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
3992
3993         timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
3994         timing_out->display_color_depth = convert_color_depth_from_display_info(
3995                 connector, connector_state,
3996                 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420));
3997         timing_out->scan_type = SCANNING_TYPE_NODATA;
3998         timing_out->hdmi_vic = 0;
3999
4000         if(old_stream) {
4001                 timing_out->vic = old_stream->timing.vic;
4002                 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4003                 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4004         } else {
4005                 timing_out->vic = drm_match_cea_mode(mode_in);
4006                 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4007                         timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4008                 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4009                         timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4010         }
4011
4012         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4013                 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4014                 timing_out->vic = avi_frame.video_code;
4015                 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4016                 timing_out->hdmi_vic = hv_frame.vic;
4017         }
4018
4019         timing_out->h_addressable = mode_in->crtc_hdisplay;
4020         timing_out->h_total = mode_in->crtc_htotal;
4021         timing_out->h_sync_width =
4022                 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4023         timing_out->h_front_porch =
4024                 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4025         timing_out->v_total = mode_in->crtc_vtotal;
4026         timing_out->v_addressable = mode_in->crtc_vdisplay;
4027         timing_out->v_front_porch =
4028                 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4029         timing_out->v_sync_width =
4030                 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
4031         timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
4032         timing_out->aspect_ratio = get_aspect_ratio(mode_in);
4033
4034         stream->output_color_space = get_output_color_space(timing_out);
4035
4036         stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4037         stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
4038         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4039                 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4040                     drm_mode_is_420_also(info, mode_in) &&
4041                     timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4042                         timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4043                         adjust_colour_depth_from_display_info(timing_out, info);
4044                 }
4045         }
4046 }
4047
4048 static void fill_audio_info(struct audio_info *audio_info,
4049                             const struct drm_connector *drm_connector,
4050                             const struct dc_sink *dc_sink)
4051 {
4052         int i = 0;
4053         int cea_revision = 0;
4054         const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
4055
4056         audio_info->manufacture_id = edid_caps->manufacturer_id;
4057         audio_info->product_id = edid_caps->product_id;
4058
4059         cea_revision = drm_connector->display_info.cea_rev;
4060
4061         strscpy(audio_info->display_name,
4062                 edid_caps->display_name,
4063                 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
4064
4065         if (cea_revision >= 3) {
4066                 audio_info->mode_count = edid_caps->audio_mode_count;
4067
4068                 for (i = 0; i < audio_info->mode_count; ++i) {
4069                         audio_info->modes[i].format_code =
4070                                         (enum audio_format_code)
4071                                         (edid_caps->audio_modes[i].format_code);
4072                         audio_info->modes[i].channel_count =
4073                                         edid_caps->audio_modes[i].channel_count;
4074                         audio_info->modes[i].sample_rates.all =
4075                                         edid_caps->audio_modes[i].sample_rate;
4076                         audio_info->modes[i].sample_size =
4077                                         edid_caps->audio_modes[i].sample_size;
4078                 }
4079         }
4080
4081         audio_info->flags.all = edid_caps->speaker_flags;
4082
4083         /* TODO: We only check for the progressive mode, check for interlace mode too */
4084         if (drm_connector->latency_present[0]) {
4085                 audio_info->video_latency = drm_connector->video_latency[0];
4086                 audio_info->audio_latency = drm_connector->audio_latency[0];
4087         }
4088
4089         /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
4090
4091 }
4092
4093 static void
4094 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
4095                                       struct drm_display_mode *dst_mode)
4096 {
4097         dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
4098         dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
4099         dst_mode->crtc_clock = src_mode->crtc_clock;
4100         dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
4101         dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
4102         dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
4103         dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
4104         dst_mode->crtc_htotal = src_mode->crtc_htotal;
4105         dst_mode->crtc_hskew = src_mode->crtc_hskew;
4106         dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
4107         dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
4108         dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
4109         dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
4110         dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
4111 }
4112
4113 static void
4114 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
4115                                         const struct drm_display_mode *native_mode,
4116                                         bool scale_enabled)
4117 {
4118         if (scale_enabled) {
4119                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4120         } else if (native_mode->clock == drm_mode->clock &&
4121                         native_mode->htotal == drm_mode->htotal &&
4122                         native_mode->vtotal == drm_mode->vtotal) {
4123                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4124         } else {
4125                 /* no scaling nor amdgpu inserted, no need to patch */
4126         }
4127 }
4128
4129 static struct dc_sink *
4130 create_fake_sink(struct amdgpu_dm_connector *aconnector)
4131 {
4132         struct dc_sink_init_data sink_init_data = { 0 };
4133         struct dc_sink *sink = NULL;
4134         sink_init_data.link = aconnector->dc_link;
4135         sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
4136
4137         sink = dc_sink_create(&sink_init_data);
4138         if (!sink) {
4139                 DRM_ERROR("Failed to create sink!\n");
4140                 return NULL;
4141         }
4142         sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
4143
4144         return sink;
4145 }
4146
4147 static void set_multisync_trigger_params(
4148                 struct dc_stream_state *stream)
4149 {
4150         if (stream->triggered_crtc_reset.enabled) {
4151                 stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
4152                 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
4153         }
4154 }
4155
4156 static void set_master_stream(struct dc_stream_state *stream_set[],
4157                               int stream_count)
4158 {
4159         int j, highest_rfr = 0, master_stream = 0;
4160
4161         for (j = 0;  j < stream_count; j++) {
4162                 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
4163                         int refresh_rate = 0;
4164
4165                         refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
4166                                 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
4167                         if (refresh_rate > highest_rfr) {
4168                                 highest_rfr = refresh_rate;
4169                                 master_stream = j;
4170                         }
4171                 }
4172         }
4173         for (j = 0;  j < stream_count; j++) {
4174                 if (stream_set[j])
4175                         stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
4176         }
4177 }
4178
4179 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
4180 {
4181         int i = 0;
4182
4183         if (context->stream_count < 2)
4184                 return;
4185         for (i = 0; i < context->stream_count ; i++) {
4186                 if (!context->streams[i])
4187                         continue;
4188                 /*
4189                  * TODO: add a function to read AMD VSDB bits and set
4190                  * crtc_sync_master.multi_sync_enabled flag
4191                  * For now it's set to false
4192                  */
4193                 set_multisync_trigger_params(context->streams[i]);
4194         }
4195         set_master_stream(context->streams, context->stream_count);
4196 }
4197
4198 static struct dc_stream_state *
4199 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
4200                        const struct drm_display_mode *drm_mode,
4201                        const struct dm_connector_state *dm_state,
4202                        const struct dc_stream_state *old_stream)
4203 {
4204         struct drm_display_mode *preferred_mode = NULL;
4205         struct drm_connector *drm_connector;
4206         const struct drm_connector_state *con_state =
4207                 dm_state ? &dm_state->base : NULL;
4208         struct dc_stream_state *stream = NULL;
4209         struct drm_display_mode mode = *drm_mode;
4210         bool native_mode_found = false;
4211         bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
4212         int mode_refresh;
4213         int preferred_refresh = 0;
4214 #if defined(CONFIG_DRM_AMD_DC_DCN)
4215         struct dsc_dec_dpcd_caps dsc_caps;
4216 #endif
4217         uint32_t link_bandwidth_kbps;
4218
4219         struct dc_sink *sink = NULL;
4220         if (aconnector == NULL) {
4221                 DRM_ERROR("aconnector is NULL!\n");
4222                 return stream;
4223         }
4224
4225         drm_connector = &aconnector->base;
4226
4227         if (!aconnector->dc_sink) {
4228                 sink = create_fake_sink(aconnector);
4229                 if (!sink)
4230                         return stream;
4231         } else {
4232                 sink = aconnector->dc_sink;
4233                 dc_sink_retain(sink);
4234         }
4235
4236         stream = dc_create_stream_for_sink(sink);
4237
4238         if (stream == NULL) {
4239                 DRM_ERROR("Failed to create stream for sink!\n");
4240                 goto finish;
4241         }
4242
4243         stream->dm_stream_context = aconnector;
4244
4245         stream->timing.flags.LTE_340MCSC_SCRAMBLE =
4246                 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
4247
4248         list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
4249                 /* Search for preferred mode */
4250                 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
4251                         native_mode_found = true;
4252                         break;
4253                 }
4254         }
4255         if (!native_mode_found)
4256                 preferred_mode = list_first_entry_or_null(
4257                                 &aconnector->base.modes,
4258                                 struct drm_display_mode,
4259                                 head);
4260
4261         mode_refresh = drm_mode_vrefresh(&mode);
4262
4263         if (preferred_mode == NULL) {
4264                 /*
4265                  * This may not be an error, the use case is when we have no
4266                  * usermode calls to reset and set mode upon hotplug. In this
4267                  * case, we call set mode ourselves to restore the previous mode
4268                  * and the modelist may not be filled in in time.
4269                  */
4270                 DRM_DEBUG_DRIVER("No preferred mode found\n");
4271         } else {
4272                 decide_crtc_timing_for_drm_display_mode(
4273                                 &mode, preferred_mode,
4274                                 dm_state ? (dm_state->scaling != RMX_OFF) : false);
4275                 preferred_refresh = drm_mode_vrefresh(preferred_mode);
4276         }
4277
4278         if (!dm_state)
4279                 drm_mode_set_crtcinfo(&mode, 0);
4280
4281         /*
4282         * If scaling is enabled and refresh rate didn't change
4283         * we copy the vic and polarities of the old timings
4284         */
4285         if (!scale || mode_refresh != preferred_refresh)
4286                 fill_stream_properties_from_drm_display_mode(stream,
4287                         &mode, &aconnector->base, con_state, NULL);
4288         else
4289                 fill_stream_properties_from_drm_display_mode(stream,
4290                         &mode, &aconnector->base, con_state, old_stream);
4291
4292         stream->timing.flags.DSC = 0;
4293
4294         if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4295 #if defined(CONFIG_DRM_AMD_DC_DCN)
4296                 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
4297                                       aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
4298                                       aconnector->dc_link->dpcd_caps.dsc_caps.dsc_ext_caps.raw,
4299                                       &dsc_caps);
4300 #endif
4301                 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
4302                                                              dc_link_get_link_cap(aconnector->dc_link));
4303
4304 #if defined(CONFIG_DRM_AMD_DC_DCN)
4305                 if (dsc_caps.is_dsc_supported)
4306                         if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
4307                                                   &dsc_caps,
4308                                                   aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
4309                                                   link_bandwidth_kbps,
4310                                                   &stream->timing,
4311                                                   &stream->timing.dsc_cfg))
4312                                 stream->timing.flags.DSC = 1;
4313 #endif
4314         }
4315
4316         update_stream_scaling_settings(&mode, dm_state, stream);
4317
4318         fill_audio_info(
4319                 &stream->audio_info,
4320                 drm_connector,
4321                 sink);
4322
4323         update_stream_signal(stream, sink);
4324
4325         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4326                 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket, false, false);
4327         if (stream->link->psr_feature_enabled)  {
4328                 struct dc  *core_dc = stream->link->ctx->dc;
4329
4330                 if (dc_is_dmcu_initialized(core_dc)) {
4331                         struct dmcu *dmcu = core_dc->res_pool->dmcu;
4332
4333                         stream->psr_version = dmcu->dmcu_version.psr_version;
4334
4335                         //
4336                         // should decide stream support vsc sdp colorimetry capability
4337                         // before building vsc info packet
4338                         //
4339                         stream->use_vsc_sdp_for_colorimetry = false;
4340                         if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
4341                                 stream->use_vsc_sdp_for_colorimetry =
4342                                         aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
4343                         } else {
4344                                 if (stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 &&
4345                                         stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) {
4346                                         stream->use_vsc_sdp_for_colorimetry = true;
4347                                 }
4348                         }
4349                         mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
4350                 }
4351         }
4352 finish:
4353         dc_sink_release(sink);
4354
4355         return stream;
4356 }
4357
4358 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
4359 {
4360         drm_crtc_cleanup(crtc);
4361         kfree(crtc);
4362 }
4363
4364 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
4365                                   struct drm_crtc_state *state)
4366 {
4367         struct dm_crtc_state *cur = to_dm_crtc_state(state);
4368
4369         /* TODO Destroy dc_stream objects are stream object is flattened */
4370         if (cur->stream)
4371                 dc_stream_release(cur->stream);
4372
4373
4374         __drm_atomic_helper_crtc_destroy_state(state);
4375
4376
4377         kfree(state);
4378 }
4379
4380 static void dm_crtc_reset_state(struct drm_crtc *crtc)
4381 {
4382         struct dm_crtc_state *state;
4383
4384         if (crtc->state)
4385                 dm_crtc_destroy_state(crtc, crtc->state);
4386
4387         state = kzalloc(sizeof(*state), GFP_KERNEL);
4388         if (WARN_ON(!state))
4389                 return;
4390
4391         crtc->state = &state->base;
4392         crtc->state->crtc = crtc;
4393
4394 }
4395
4396 static struct drm_crtc_state *
4397 dm_crtc_duplicate_state(struct drm_crtc *crtc)
4398 {
4399         struct dm_crtc_state *state, *cur;
4400
4401         cur = to_dm_crtc_state(crtc->state);
4402
4403         if (WARN_ON(!crtc->state))
4404                 return NULL;
4405
4406         state = kzalloc(sizeof(*state), GFP_KERNEL);
4407         if (!state)
4408                 return NULL;
4409
4410         __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
4411
4412         if (cur->stream) {
4413                 state->stream = cur->stream;
4414                 dc_stream_retain(state->stream);
4415         }
4416
4417         state->active_planes = cur->active_planes;
4418         state->interrupts_enabled = cur->interrupts_enabled;
4419         state->vrr_params = cur->vrr_params;
4420         state->vrr_infopacket = cur->vrr_infopacket;
4421         state->abm_level = cur->abm_level;
4422         state->vrr_supported = cur->vrr_supported;
4423         state->freesync_config = cur->freesync_config;
4424         state->crc_src = cur->crc_src;
4425         state->cm_has_degamma = cur->cm_has_degamma;
4426         state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
4427
4428         /* TODO Duplicate dc_stream after objects are stream object is flattened */
4429
4430         return &state->base;
4431 }
4432
4433 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
4434 {
4435         enum dc_irq_source irq_source;
4436         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4437         struct amdgpu_device *adev = crtc->dev->dev_private;
4438         int rc;
4439
4440         /* Do not set vupdate for DCN hardware */
4441         if (adev->family > AMDGPU_FAMILY_AI)
4442                 return 0;
4443
4444         irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
4445
4446         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4447
4448         DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
4449                          acrtc->crtc_id, enable ? "en" : "dis", rc);
4450         return rc;
4451 }
4452
4453 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
4454 {
4455         enum dc_irq_source irq_source;
4456         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4457         struct amdgpu_device *adev = crtc->dev->dev_private;
4458         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
4459         int rc = 0;
4460
4461         if (enable) {
4462                 /* vblank irq on -> Only need vupdate irq in vrr mode */
4463                 if (amdgpu_dm_vrr_active(acrtc_state))
4464                         rc = dm_set_vupdate_irq(crtc, true);
4465         } else {
4466                 /* vblank irq off -> vupdate irq off */
4467                 rc = dm_set_vupdate_irq(crtc, false);
4468         }
4469
4470         if (rc)
4471                 return rc;
4472
4473         irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
4474         return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4475 }
4476
4477 static int dm_enable_vblank(struct drm_crtc *crtc)
4478 {
4479         return dm_set_vblank(crtc, true);
4480 }
4481
4482 static void dm_disable_vblank(struct drm_crtc *crtc)
4483 {
4484         dm_set_vblank(crtc, false);
4485 }
4486
4487 /* Implemented only the options currently availible for the driver */
4488 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
4489         .reset = dm_crtc_reset_state,
4490         .destroy = amdgpu_dm_crtc_destroy,
4491         .gamma_set = drm_atomic_helper_legacy_gamma_set,
4492         .set_config = drm_atomic_helper_set_config,
4493         .page_flip = drm_atomic_helper_page_flip,
4494         .atomic_duplicate_state = dm_crtc_duplicate_state,
4495         .atomic_destroy_state = dm_crtc_destroy_state,
4496         .set_crc_source = amdgpu_dm_crtc_set_crc_source,
4497         .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
4498         .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
4499         .get_vblank_counter = amdgpu_get_vblank_counter_kms,
4500         .enable_vblank = dm_enable_vblank,
4501         .disable_vblank = dm_disable_vblank,
4502         .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
4503 };
4504
4505 static enum drm_connector_status
4506 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
4507 {
4508         bool connected;
4509         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4510
4511         /*
4512          * Notes:
4513          * 1. This interface is NOT called in context of HPD irq.
4514          * 2. This interface *is called* in context of user-mode ioctl. Which
4515          * makes it a bad place for *any* MST-related activity.
4516          */
4517
4518         if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
4519             !aconnector->fake_enable)
4520                 connected = (aconnector->dc_sink != NULL);
4521         else
4522                 connected = (aconnector->base.force == DRM_FORCE_ON);
4523
4524         return (connected ? connector_status_connected :
4525                         connector_status_disconnected);
4526 }
4527
4528 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
4529                                             struct drm_connector_state *connector_state,
4530                                             struct drm_property *property,
4531                                             uint64_t val)
4532 {
4533         struct drm_device *dev = connector->dev;
4534         struct amdgpu_device *adev = dev->dev_private;
4535         struct dm_connector_state *dm_old_state =
4536                 to_dm_connector_state(connector->state);
4537         struct dm_connector_state *dm_new_state =
4538                 to_dm_connector_state(connector_state);
4539
4540         int ret = -EINVAL;
4541
4542         if (property == dev->mode_config.scaling_mode_property) {
4543                 enum amdgpu_rmx_type rmx_type;
4544
4545                 switch (val) {
4546                 case DRM_MODE_SCALE_CENTER:
4547                         rmx_type = RMX_CENTER;
4548                         break;
4549                 case DRM_MODE_SCALE_ASPECT:
4550                         rmx_type = RMX_ASPECT;
4551                         break;
4552                 case DRM_MODE_SCALE_FULLSCREEN:
4553                         rmx_type = RMX_FULL;
4554                         break;
4555                 case DRM_MODE_SCALE_NONE:
4556                 default:
4557                         rmx_type = RMX_OFF;
4558                         break;
4559                 }
4560
4561                 if (dm_old_state->scaling == rmx_type)
4562                         return 0;
4563
4564                 dm_new_state->scaling = rmx_type;
4565                 ret = 0;
4566         } else if (property == adev->mode_info.underscan_hborder_property) {
4567                 dm_new_state->underscan_hborder = val;
4568                 ret = 0;
4569         } else if (property == adev->mode_info.underscan_vborder_property) {
4570                 dm_new_state->underscan_vborder = val;
4571                 ret = 0;
4572         } else if (property == adev->mode_info.underscan_property) {
4573                 dm_new_state->underscan_enable = val;
4574                 ret = 0;
4575         } else if (property == adev->mode_info.abm_level_property) {
4576                 dm_new_state->abm_level = val;
4577                 ret = 0;
4578         }
4579
4580         return ret;
4581 }
4582
4583 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
4584                                             const struct drm_connector_state *state,
4585                                             struct drm_property *property,
4586                                             uint64_t *val)
4587 {
4588         struct drm_device *dev = connector->dev;
4589         struct amdgpu_device *adev = dev->dev_private;
4590         struct dm_connector_state *dm_state =
4591                 to_dm_connector_state(state);
4592         int ret = -EINVAL;
4593
4594         if (property == dev->mode_config.scaling_mode_property) {
4595                 switch (dm_state->scaling) {
4596                 case RMX_CENTER:
4597                         *val = DRM_MODE_SCALE_CENTER;
4598                         break;
4599                 case RMX_ASPECT:
4600                         *val = DRM_MODE_SCALE_ASPECT;
4601                         break;
4602                 case RMX_FULL:
4603                         *val = DRM_MODE_SCALE_FULLSCREEN;
4604                         break;
4605                 case RMX_OFF:
4606                 default:
4607                         *val = DRM_MODE_SCALE_NONE;
4608                         break;
4609                 }
4610                 ret = 0;
4611         } else if (property == adev->mode_info.underscan_hborder_property) {
4612                 *val = dm_state->underscan_hborder;
4613                 ret = 0;
4614         } else if (property == adev->mode_info.underscan_vborder_property) {
4615                 *val = dm_state->underscan_vborder;
4616                 ret = 0;
4617         } else if (property == adev->mode_info.underscan_property) {
4618                 *val = dm_state->underscan_enable;
4619                 ret = 0;
4620         } else if (property == adev->mode_info.abm_level_property) {
4621                 *val = dm_state->abm_level;
4622                 ret = 0;
4623         }
4624
4625         return ret;
4626 }
4627
4628 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
4629 {
4630         struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
4631
4632         drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
4633 }
4634
4635 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
4636 {
4637         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4638         const struct dc_link *link = aconnector->dc_link;
4639         struct amdgpu_device *adev = connector->dev->dev_private;
4640         struct amdgpu_display_manager *dm = &adev->dm;
4641
4642 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4643         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4644
4645         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4646             link->type != dc_connection_none &&
4647             dm->backlight_dev) {
4648                 backlight_device_unregister(dm->backlight_dev);
4649                 dm->backlight_dev = NULL;
4650         }
4651 #endif
4652
4653         if (aconnector->dc_em_sink)
4654                 dc_sink_release(aconnector->dc_em_sink);
4655         aconnector->dc_em_sink = NULL;
4656         if (aconnector->dc_sink)
4657                 dc_sink_release(aconnector->dc_sink);
4658         aconnector->dc_sink = NULL;
4659
4660         drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
4661         drm_connector_unregister(connector);
4662         drm_connector_cleanup(connector);
4663         if (aconnector->i2c) {
4664                 i2c_del_adapter(&aconnector->i2c->base);
4665                 kfree(aconnector->i2c);
4666         }
4667
4668         kfree(connector);
4669 }
4670
4671 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
4672 {
4673         struct dm_connector_state *state =
4674                 to_dm_connector_state(connector->state);
4675
4676         if (connector->state)
4677                 __drm_atomic_helper_connector_destroy_state(connector->state);
4678
4679         kfree(state);
4680
4681         state = kzalloc(sizeof(*state), GFP_KERNEL);
4682
4683         if (state) {
4684                 state->scaling = RMX_OFF;
4685                 state->underscan_enable = false;
4686                 state->underscan_hborder = 0;
4687                 state->underscan_vborder = 0;
4688                 state->base.max_requested_bpc = 8;
4689                 state->vcpi_slots = 0;
4690                 state->pbn = 0;
4691                 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4692                         state->abm_level = amdgpu_dm_abm_level;
4693
4694                 __drm_atomic_helper_connector_reset(connector, &state->base);
4695         }
4696 }
4697
4698 struct drm_connector_state *
4699 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
4700 {
4701         struct dm_connector_state *state =
4702                 to_dm_connector_state(connector->state);
4703
4704         struct dm_connector_state *new_state =
4705                         kmemdup(state, sizeof(*state), GFP_KERNEL);
4706
4707         if (!new_state)
4708                 return NULL;
4709
4710         __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
4711
4712         new_state->freesync_capable = state->freesync_capable;
4713         new_state->abm_level = state->abm_level;
4714         new_state->scaling = state->scaling;
4715         new_state->underscan_enable = state->underscan_enable;
4716         new_state->underscan_hborder = state->underscan_hborder;
4717         new_state->underscan_vborder = state->underscan_vborder;
4718         new_state->vcpi_slots = state->vcpi_slots;
4719         new_state->pbn = state->pbn;
4720         return &new_state->base;
4721 }
4722
4723 static int
4724 amdgpu_dm_connector_late_register(struct drm_connector *connector)
4725 {
4726 #if defined(CONFIG_DEBUG_FS)
4727         struct amdgpu_dm_connector *amdgpu_dm_connector =
4728                 to_amdgpu_dm_connector(connector);
4729
4730         connector_debugfs_init(amdgpu_dm_connector);
4731 #endif
4732
4733         return 0;
4734 }
4735
4736 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
4737         .reset = amdgpu_dm_connector_funcs_reset,
4738         .detect = amdgpu_dm_connector_detect,
4739         .fill_modes = drm_helper_probe_single_connector_modes,
4740         .destroy = amdgpu_dm_connector_destroy,
4741         .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
4742         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4743         .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
4744         .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
4745         .late_register = amdgpu_dm_connector_late_register,
4746         .early_unregister = amdgpu_dm_connector_unregister
4747 };
4748
4749 static int get_modes(struct drm_connector *connector)
4750 {
4751         return amdgpu_dm_connector_get_modes(connector);
4752 }
4753
4754 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
4755 {
4756         struct dc_sink_init_data init_params = {
4757                         .link = aconnector->dc_link,
4758                         .sink_signal = SIGNAL_TYPE_VIRTUAL
4759         };
4760         struct edid *edid;
4761
4762         if (!aconnector->base.edid_blob_ptr) {
4763                 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
4764                                 aconnector->base.name);
4765
4766                 aconnector->base.force = DRM_FORCE_OFF;
4767                 aconnector->base.override_edid = false;
4768                 return;
4769         }
4770
4771         edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
4772
4773         aconnector->edid = edid;
4774
4775         aconnector->dc_em_sink = dc_link_add_remote_sink(
4776                 aconnector->dc_link,
4777                 (uint8_t *)edid,
4778                 (edid->extensions + 1) * EDID_LENGTH,
4779                 &init_params);
4780
4781         if (aconnector->base.force == DRM_FORCE_ON) {
4782                 aconnector->dc_sink = aconnector->dc_link->local_sink ?
4783                 aconnector->dc_link->local_sink :
4784                 aconnector->dc_em_sink;
4785                 dc_sink_retain(aconnector->dc_sink);
4786         }
4787 }
4788
4789 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
4790 {
4791         struct dc_link *link = (struct dc_link *)aconnector->dc_link;
4792
4793         /*
4794          * In case of headless boot with force on for DP managed connector
4795          * Those settings have to be != 0 to get initial modeset
4796          */
4797         if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4798                 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
4799                 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
4800         }
4801
4802
4803         aconnector->base.override_edid = true;
4804         create_eml_sink(aconnector);
4805 }
4806
4807 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
4808                                    struct drm_display_mode *mode)
4809 {
4810         int result = MODE_ERROR;
4811         struct dc_sink *dc_sink;
4812         struct amdgpu_device *adev = connector->dev->dev_private;
4813         /* TODO: Unhardcode stream count */
4814         struct dc_stream_state *stream;
4815         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4816         enum dc_status dc_result = DC_OK;
4817
4818         if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
4819                         (mode->flags & DRM_MODE_FLAG_DBLSCAN))
4820                 return result;
4821
4822         /*
4823          * Only run this the first time mode_valid is called to initilialize
4824          * EDID mgmt
4825          */
4826         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
4827                 !aconnector->dc_em_sink)
4828                 handle_edid_mgmt(aconnector);
4829
4830         dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
4831
4832         if (dc_sink == NULL) {
4833                 DRM_ERROR("dc_sink is NULL!\n");
4834                 goto fail;
4835         }
4836
4837         stream = create_stream_for_sink(aconnector, mode, NULL, NULL);
4838         if (stream == NULL) {
4839                 DRM_ERROR("Failed to create stream for sink!\n");
4840                 goto fail;
4841         }
4842
4843         dc_result = dc_validate_stream(adev->dm.dc, stream);
4844
4845         if (dc_result == DC_OK)
4846                 result = MODE_OK;
4847         else
4848                 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d\n",
4849                               mode->hdisplay,
4850                               mode->vdisplay,
4851                               mode->clock,
4852                               dc_result);
4853
4854         dc_stream_release(stream);
4855
4856 fail:
4857         /* TODO: error handling*/
4858         return result;
4859 }
4860
4861 static int fill_hdr_info_packet(const struct drm_connector_state *state,
4862                                 struct dc_info_packet *out)
4863 {
4864         struct hdmi_drm_infoframe frame;
4865         unsigned char buf[30]; /* 26 + 4 */
4866         ssize_t len;
4867         int ret, i;
4868
4869         memset(out, 0, sizeof(*out));
4870
4871         if (!state->hdr_output_metadata)
4872                 return 0;
4873
4874         ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
4875         if (ret)
4876                 return ret;
4877
4878         len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
4879         if (len < 0)
4880                 return (int)len;
4881
4882         /* Static metadata is a fixed 26 bytes + 4 byte header. */
4883         if (len != 30)
4884                 return -EINVAL;
4885
4886         /* Prepare the infopacket for DC. */
4887         switch (state->connector->connector_type) {
4888         case DRM_MODE_CONNECTOR_HDMIA:
4889                 out->hb0 = 0x87; /* type */
4890                 out->hb1 = 0x01; /* version */
4891                 out->hb2 = 0x1A; /* length */
4892                 out->sb[0] = buf[3]; /* checksum */
4893                 i = 1;
4894                 break;
4895
4896         case DRM_MODE_CONNECTOR_DisplayPort:
4897         case DRM_MODE_CONNECTOR_eDP:
4898                 out->hb0 = 0x00; /* sdp id, zero */
4899                 out->hb1 = 0x87; /* type */
4900                 out->hb2 = 0x1D; /* payload len - 1 */
4901                 out->hb3 = (0x13 << 2); /* sdp version */
4902                 out->sb[0] = 0x01; /* version */
4903                 out->sb[1] = 0x1A; /* length */
4904                 i = 2;
4905                 break;
4906
4907         default:
4908                 return -EINVAL;
4909         }
4910
4911         memcpy(&out->sb[i], &buf[4], 26);
4912         out->valid = true;
4913
4914         print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
4915                        sizeof(out->sb), false);
4916
4917         return 0;
4918 }
4919
4920 static bool
4921 is_hdr_metadata_different(const struct drm_connector_state *old_state,
4922                           const struct drm_connector_state *new_state)
4923 {
4924         struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
4925         struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
4926
4927         if (old_blob != new_blob) {
4928                 if (old_blob && new_blob &&
4929                     old_blob->length == new_blob->length)
4930                         return memcmp(old_blob->data, new_blob->data,
4931                                       old_blob->length);
4932
4933                 return true;
4934         }
4935
4936         return false;
4937 }
4938
4939 static int
4940 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
4941                                  struct drm_atomic_state *state)
4942 {
4943         struct drm_connector_state *new_con_state =
4944                 drm_atomic_get_new_connector_state(state, conn);
4945         struct drm_connector_state *old_con_state =
4946                 drm_atomic_get_old_connector_state(state, conn);
4947         struct drm_crtc *crtc = new_con_state->crtc;
4948         struct drm_crtc_state *new_crtc_state;
4949         int ret;
4950
4951         if (!crtc)
4952                 return 0;
4953
4954         if (is_hdr_metadata_different(old_con_state, new_con_state)) {
4955                 struct dc_info_packet hdr_infopacket;
4956
4957                 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
4958                 if (ret)
4959                         return ret;
4960
4961                 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
4962                 if (IS_ERR(new_crtc_state))
4963                         return PTR_ERR(new_crtc_state);
4964
4965                 /*
4966                  * DC considers the stream backends changed if the
4967                  * static metadata changes. Forcing the modeset also
4968                  * gives a simple way for userspace to switch from
4969                  * 8bpc to 10bpc when setting the metadata to enter
4970                  * or exit HDR.
4971                  *
4972                  * Changing the static metadata after it's been
4973                  * set is permissible, however. So only force a
4974                  * modeset if we're entering or exiting HDR.
4975                  */
4976                 new_crtc_state->mode_changed =
4977                         !old_con_state->hdr_output_metadata ||
4978                         !new_con_state->hdr_output_metadata;
4979         }
4980
4981         return 0;
4982 }
4983
4984 static const struct drm_connector_helper_funcs
4985 amdgpu_dm_connector_helper_funcs = {
4986         /*
4987          * If hotplugging a second bigger display in FB Con mode, bigger resolution
4988          * modes will be filtered by drm_mode_validate_size(), and those modes
4989          * are missing after user start lightdm. So we need to renew modes list.
4990          * in get_modes call back, not just return the modes count
4991          */
4992         .get_modes = get_modes,
4993         .mode_valid = amdgpu_dm_connector_mode_valid,
4994         .atomic_check = amdgpu_dm_connector_atomic_check,
4995 };
4996
4997 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
4998 {
4999 }
5000
5001 static bool does_crtc_have_active_cursor(struct drm_crtc_state *new_crtc_state)
5002 {
5003         struct drm_device *dev = new_crtc_state->crtc->dev;
5004         struct drm_plane *plane;
5005
5006         drm_for_each_plane_mask(plane, dev, new_crtc_state->plane_mask) {
5007                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5008                         return true;
5009         }
5010
5011         return false;
5012 }
5013
5014 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
5015 {
5016         struct drm_atomic_state *state = new_crtc_state->state;
5017         struct drm_plane *plane;
5018         int num_active = 0;
5019
5020         drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
5021                 struct drm_plane_state *new_plane_state;
5022
5023                 /* Cursor planes are "fake". */
5024                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5025                         continue;
5026
5027                 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
5028
5029                 if (!new_plane_state) {
5030                         /*
5031                          * The plane is enable on the CRTC and hasn't changed
5032                          * state. This means that it previously passed
5033                          * validation and is therefore enabled.
5034                          */
5035                         num_active += 1;
5036                         continue;
5037                 }
5038
5039                 /* We need a framebuffer to be considered enabled. */
5040                 num_active += (new_plane_state->fb != NULL);
5041         }
5042
5043         return num_active;
5044 }
5045
5046 /*
5047  * Sets whether interrupts should be enabled on a specific CRTC.
5048  * We require that the stream be enabled and that there exist active
5049  * DC planes on the stream.
5050  */
5051 static void
5052 dm_update_crtc_interrupt_state(struct drm_crtc *crtc,
5053                                struct drm_crtc_state *new_crtc_state)
5054 {
5055         struct dm_crtc_state *dm_new_crtc_state =
5056                 to_dm_crtc_state(new_crtc_state);
5057
5058         dm_new_crtc_state->active_planes = 0;
5059         dm_new_crtc_state->interrupts_enabled = false;
5060
5061         if (!dm_new_crtc_state->stream)
5062                 return;
5063
5064         dm_new_crtc_state->active_planes =
5065                 count_crtc_active_planes(new_crtc_state);
5066
5067         dm_new_crtc_state->interrupts_enabled =
5068                 dm_new_crtc_state->active_planes > 0;
5069 }
5070
5071 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
5072                                        struct drm_crtc_state *state)
5073 {
5074         struct amdgpu_device *adev = crtc->dev->dev_private;
5075         struct dc *dc = adev->dm.dc;
5076         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
5077         int ret = -EINVAL;
5078
5079         /*
5080          * Update interrupt state for the CRTC. This needs to happen whenever
5081          * the CRTC has changed or whenever any of its planes have changed.
5082          * Atomic check satisfies both of these requirements since the CRTC
5083          * is added to the state by DRM during drm_atomic_helper_check_planes.
5084          */
5085         dm_update_crtc_interrupt_state(crtc, state);
5086
5087         if (unlikely(!dm_crtc_state->stream &&
5088                      modeset_required(state, NULL, dm_crtc_state->stream))) {
5089                 WARN_ON(1);
5090                 return ret;
5091         }
5092
5093         /* In some use cases, like reset, no stream is attached */
5094         if (!dm_crtc_state->stream)
5095                 return 0;
5096
5097         /*
5098          * We want at least one hardware plane enabled to use
5099          * the stream with a cursor enabled.
5100          */
5101         if (state->enable && state->active &&
5102             does_crtc_have_active_cursor(state) &&
5103             dm_crtc_state->active_planes == 0)
5104                 return -EINVAL;
5105
5106         if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
5107                 return 0;
5108
5109         return ret;
5110 }
5111
5112 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
5113                                       const struct drm_display_mode *mode,
5114                                       struct drm_display_mode *adjusted_mode)
5115 {
5116         return true;
5117 }
5118
5119 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
5120         .disable = dm_crtc_helper_disable,
5121         .atomic_check = dm_crtc_helper_atomic_check,
5122         .mode_fixup = dm_crtc_helper_mode_fixup,
5123         .get_scanout_position = amdgpu_crtc_get_scanout_position,
5124 };
5125
5126 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
5127 {
5128
5129 }
5130
5131 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
5132 {
5133         switch (display_color_depth) {
5134                 case COLOR_DEPTH_666:
5135                         return 6;
5136                 case COLOR_DEPTH_888:
5137                         return 8;
5138                 case COLOR_DEPTH_101010:
5139                         return 10;
5140                 case COLOR_DEPTH_121212:
5141                         return 12;
5142                 case COLOR_DEPTH_141414:
5143                         return 14;
5144                 case COLOR_DEPTH_161616:
5145                         return 16;
5146                 default:
5147                         break;
5148                 }
5149         return 0;
5150 }
5151
5152 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
5153                                           struct drm_crtc_state *crtc_state,
5154                                           struct drm_connector_state *conn_state)
5155 {
5156         struct drm_atomic_state *state = crtc_state->state;
5157         struct drm_connector *connector = conn_state->connector;
5158         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5159         struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
5160         const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
5161         struct drm_dp_mst_topology_mgr *mst_mgr;
5162         struct drm_dp_mst_port *mst_port;
5163         enum dc_color_depth color_depth;
5164         int clock, bpp = 0;
5165         bool is_y420 = false;
5166
5167         if (!aconnector->port || !aconnector->dc_sink)
5168                 return 0;
5169
5170         mst_port = aconnector->port;
5171         mst_mgr = &aconnector->mst_port->mst_mgr;
5172
5173         if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
5174                 return 0;
5175
5176         if (!state->duplicated) {
5177                 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
5178                                 aconnector->force_yuv420_output;
5179                 color_depth = convert_color_depth_from_display_info(connector, conn_state,
5180                                                                     is_y420);
5181                 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
5182                 clock = adjusted_mode->clock;
5183                 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
5184         }
5185         dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
5186                                                                            mst_mgr,
5187                                                                            mst_port,
5188                                                                            dm_new_connector_state->pbn,
5189                                                                            0);
5190         if (dm_new_connector_state->vcpi_slots < 0) {
5191                 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
5192                 return dm_new_connector_state->vcpi_slots;
5193         }
5194         return 0;
5195 }
5196
5197 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
5198         .disable = dm_encoder_helper_disable,
5199         .atomic_check = dm_encoder_helper_atomic_check
5200 };
5201
5202 #if defined(CONFIG_DRM_AMD_DC_DCN)
5203 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
5204                                             struct dc_state *dc_state)
5205 {
5206         struct dc_stream_state *stream = NULL;
5207         struct drm_connector *connector;
5208         struct drm_connector_state *new_con_state, *old_con_state;
5209         struct amdgpu_dm_connector *aconnector;
5210         struct dm_connector_state *dm_conn_state;
5211         int i, j, clock, bpp;
5212         int vcpi, pbn_div, pbn = 0;
5213
5214         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
5215
5216                 aconnector = to_amdgpu_dm_connector(connector);
5217
5218                 if (!aconnector->port)
5219                         continue;
5220
5221                 if (!new_con_state || !new_con_state->crtc)
5222                         continue;
5223
5224                 dm_conn_state = to_dm_connector_state(new_con_state);
5225
5226                 for (j = 0; j < dc_state->stream_count; j++) {
5227                         stream = dc_state->streams[j];
5228                         if (!stream)
5229                                 continue;
5230
5231                         if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
5232                                 break;
5233
5234                         stream = NULL;
5235                 }
5236
5237                 if (!stream)
5238                         continue;
5239
5240                 if (stream->timing.flags.DSC != 1) {
5241                         drm_dp_mst_atomic_enable_dsc(state,
5242                                                      aconnector->port,
5243                                                      dm_conn_state->pbn,
5244                                                      0,
5245                                                      false);
5246                         continue;
5247                 }
5248
5249                 pbn_div = dm_mst_get_pbn_divider(stream->link);
5250                 bpp = stream->timing.dsc_cfg.bits_per_pixel;
5251                 clock = stream->timing.pix_clk_100hz / 10;
5252                 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
5253                 vcpi = drm_dp_mst_atomic_enable_dsc(state,
5254                                                     aconnector->port,
5255                                                     pbn, pbn_div,
5256                                                     true);
5257                 if (vcpi < 0)
5258                         return vcpi;
5259
5260                 dm_conn_state->pbn = pbn;
5261                 dm_conn_state->vcpi_slots = vcpi;
5262         }
5263         return 0;
5264 }
5265 #endif
5266
5267 static void dm_drm_plane_reset(struct drm_plane *plane)
5268 {
5269         struct dm_plane_state *amdgpu_state = NULL;
5270
5271         if (plane->state)
5272                 plane->funcs->atomic_destroy_state(plane, plane->state);
5273
5274         amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
5275         WARN_ON(amdgpu_state == NULL);
5276
5277         if (amdgpu_state)
5278                 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
5279 }
5280
5281 static struct drm_plane_state *
5282 dm_drm_plane_duplicate_state(struct drm_plane *plane)
5283 {
5284         struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
5285
5286         old_dm_plane_state = to_dm_plane_state(plane->state);
5287         dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
5288         if (!dm_plane_state)
5289                 return NULL;
5290
5291         __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
5292
5293         if (old_dm_plane_state->dc_state) {
5294                 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
5295                 dc_plane_state_retain(dm_plane_state->dc_state);
5296         }
5297
5298         return &dm_plane_state->base;
5299 }
5300
5301 void dm_drm_plane_destroy_state(struct drm_plane *plane,
5302                                 struct drm_plane_state *state)
5303 {
5304         struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
5305
5306         if (dm_plane_state->dc_state)
5307                 dc_plane_state_release(dm_plane_state->dc_state);
5308
5309         drm_atomic_helper_plane_destroy_state(plane, state);
5310 }
5311
5312 static const struct drm_plane_funcs dm_plane_funcs = {
5313         .update_plane   = drm_atomic_helper_update_plane,
5314         .disable_plane  = drm_atomic_helper_disable_plane,
5315         .destroy        = drm_primary_helper_destroy,
5316         .reset = dm_drm_plane_reset,
5317         .atomic_duplicate_state = dm_drm_plane_duplicate_state,
5318         .atomic_destroy_state = dm_drm_plane_destroy_state,
5319 };
5320
5321 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
5322                                       struct drm_plane_state *new_state)
5323 {
5324         struct amdgpu_framebuffer *afb;
5325         struct drm_gem_object *obj;
5326         struct amdgpu_device *adev;
5327         struct amdgpu_bo *rbo;
5328         struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
5329         struct list_head list;
5330         struct ttm_validate_buffer tv;
5331         struct ww_acquire_ctx ticket;
5332         uint64_t tiling_flags;
5333         uint32_t domain;
5334         int r;
5335
5336         dm_plane_state_old = to_dm_plane_state(plane->state);
5337         dm_plane_state_new = to_dm_plane_state(new_state);
5338
5339         if (!new_state->fb) {
5340                 DRM_DEBUG_DRIVER("No FB bound\n");
5341                 return 0;
5342         }
5343
5344         afb = to_amdgpu_framebuffer(new_state->fb);
5345         obj = new_state->fb->obj[0];
5346         rbo = gem_to_amdgpu_bo(obj);
5347         adev = amdgpu_ttm_adev(rbo->tbo.bdev);
5348         INIT_LIST_HEAD(&list);
5349
5350         tv.bo = &rbo->tbo;
5351         tv.num_shared = 1;
5352         list_add(&tv.head, &list);
5353
5354         r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
5355         if (r) {
5356                 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
5357                 return r;
5358         }
5359
5360         if (plane->type != DRM_PLANE_TYPE_CURSOR)
5361                 domain = amdgpu_display_supported_domains(adev, rbo->flags);
5362         else
5363                 domain = AMDGPU_GEM_DOMAIN_VRAM;
5364
5365         r = amdgpu_bo_pin(rbo, domain);
5366         if (unlikely(r != 0)) {
5367                 if (r != -ERESTARTSYS)
5368                         DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
5369                 ttm_eu_backoff_reservation(&ticket, &list);
5370                 return r;
5371         }
5372
5373         r = amdgpu_ttm_alloc_gart(&rbo->tbo);
5374         if (unlikely(r != 0)) {
5375                 amdgpu_bo_unpin(rbo);
5376                 ttm_eu_backoff_reservation(&ticket, &list);
5377                 DRM_ERROR("%p bind failed\n", rbo);
5378                 return r;
5379         }
5380
5381         amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
5382
5383         ttm_eu_backoff_reservation(&ticket, &list);
5384
5385         afb->address = amdgpu_bo_gpu_offset(rbo);
5386
5387         amdgpu_bo_ref(rbo);
5388
5389         if (dm_plane_state_new->dc_state &&
5390                         dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
5391                 struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
5392
5393                 fill_plane_buffer_attributes(
5394                         adev, afb, plane_state->format, plane_state->rotation,
5395                         tiling_flags, &plane_state->tiling_info,
5396                         &plane_state->plane_size, &plane_state->dcc,
5397                         &plane_state->address);
5398         }
5399
5400         return 0;
5401 }
5402
5403 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
5404                                        struct drm_plane_state *old_state)
5405 {
5406         struct amdgpu_bo *rbo;
5407         int r;
5408
5409         if (!old_state->fb)
5410                 return;
5411
5412         rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
5413         r = amdgpu_bo_reserve(rbo, false);
5414         if (unlikely(r)) {
5415                 DRM_ERROR("failed to reserve rbo before unpin\n");
5416                 return;
5417         }
5418
5419         amdgpu_bo_unpin(rbo);
5420         amdgpu_bo_unreserve(rbo);
5421         amdgpu_bo_unref(&rbo);
5422 }
5423
5424 static int dm_plane_atomic_check(struct drm_plane *plane,
5425                                  struct drm_plane_state *state)
5426 {
5427         struct amdgpu_device *adev = plane->dev->dev_private;
5428         struct dc *dc = adev->dm.dc;
5429         struct dm_plane_state *dm_plane_state;
5430         struct dc_scaling_info scaling_info;
5431         int ret;
5432
5433         dm_plane_state = to_dm_plane_state(state);
5434
5435         if (!dm_plane_state->dc_state)
5436                 return 0;
5437
5438         ret = fill_dc_scaling_info(state, &scaling_info);
5439         if (ret)
5440                 return ret;
5441
5442         if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
5443                 return 0;
5444
5445         return -EINVAL;
5446 }
5447
5448 static int dm_plane_atomic_async_check(struct drm_plane *plane,
5449                                        struct drm_plane_state *new_plane_state)
5450 {
5451         /* Only support async updates on cursor planes. */
5452         if (plane->type != DRM_PLANE_TYPE_CURSOR)
5453                 return -EINVAL;
5454
5455         return 0;
5456 }
5457
5458 static void dm_plane_atomic_async_update(struct drm_plane *plane,
5459                                          struct drm_plane_state *new_state)
5460 {
5461         struct drm_plane_state *old_state =
5462                 drm_atomic_get_old_plane_state(new_state->state, plane);
5463
5464         swap(plane->state->fb, new_state->fb);
5465
5466         plane->state->src_x = new_state->src_x;
5467         plane->state->src_y = new_state->src_y;
5468         plane->state->src_w = new_state->src_w;
5469         plane->state->src_h = new_state->src_h;
5470         plane->state->crtc_x = new_state->crtc_x;
5471         plane->state->crtc_y = new_state->crtc_y;
5472         plane->state->crtc_w = new_state->crtc_w;
5473         plane->state->crtc_h = new_state->crtc_h;
5474
5475         handle_cursor_update(plane, old_state);
5476 }
5477
5478 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
5479         .prepare_fb = dm_plane_helper_prepare_fb,
5480         .cleanup_fb = dm_plane_helper_cleanup_fb,
5481         .atomic_check = dm_plane_atomic_check,
5482         .atomic_async_check = dm_plane_atomic_async_check,
5483         .atomic_async_update = dm_plane_atomic_async_update
5484 };
5485
5486 /*
5487  * TODO: these are currently initialized to rgb formats only.
5488  * For future use cases we should either initialize them dynamically based on
5489  * plane capabilities, or initialize this array to all formats, so internal drm
5490  * check will succeed, and let DC implement proper check
5491  */
5492 static const uint32_t rgb_formats[] = {
5493         DRM_FORMAT_XRGB8888,
5494         DRM_FORMAT_ARGB8888,
5495         DRM_FORMAT_RGBA8888,
5496         DRM_FORMAT_XRGB2101010,
5497         DRM_FORMAT_XBGR2101010,
5498         DRM_FORMAT_ARGB2101010,
5499         DRM_FORMAT_ABGR2101010,
5500         DRM_FORMAT_XBGR8888,
5501         DRM_FORMAT_ABGR8888,
5502         DRM_FORMAT_RGB565,
5503 };
5504
5505 static const uint32_t overlay_formats[] = {
5506         DRM_FORMAT_XRGB8888,
5507         DRM_FORMAT_ARGB8888,
5508         DRM_FORMAT_RGBA8888,
5509         DRM_FORMAT_XBGR8888,
5510         DRM_FORMAT_ABGR8888,
5511         DRM_FORMAT_RGB565
5512 };
5513
5514 static const u32 cursor_formats[] = {
5515         DRM_FORMAT_ARGB8888
5516 };
5517
5518 static int get_plane_formats(const struct drm_plane *plane,
5519                              const struct dc_plane_cap *plane_cap,
5520                              uint32_t *formats, int max_formats)
5521 {
5522         int i, num_formats = 0;
5523
5524         /*
5525          * TODO: Query support for each group of formats directly from
5526          * DC plane caps. This will require adding more formats to the
5527          * caps list.
5528          */
5529
5530         switch (plane->type) {
5531         case DRM_PLANE_TYPE_PRIMARY:
5532                 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
5533                         if (num_formats >= max_formats)
5534                                 break;
5535
5536                         formats[num_formats++] = rgb_formats[i];
5537                 }
5538
5539                 if (plane_cap && plane_cap->pixel_format_support.nv12)
5540                         formats[num_formats++] = DRM_FORMAT_NV12;
5541                 if (plane_cap && plane_cap->pixel_format_support.p010)
5542                         formats[num_formats++] = DRM_FORMAT_P010;
5543                 break;
5544
5545         case DRM_PLANE_TYPE_OVERLAY:
5546                 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
5547                         if (num_formats >= max_formats)
5548                                 break;
5549
5550                         formats[num_formats++] = overlay_formats[i];
5551                 }
5552                 break;
5553
5554         case DRM_PLANE_TYPE_CURSOR:
5555                 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
5556                         if (num_formats >= max_formats)
5557                                 break;
5558
5559                         formats[num_formats++] = cursor_formats[i];
5560                 }
5561                 break;
5562         }
5563
5564         return num_formats;
5565 }
5566
5567 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
5568                                 struct drm_plane *plane,
5569                                 unsigned long possible_crtcs,
5570                                 const struct dc_plane_cap *plane_cap)
5571 {
5572         uint32_t formats[32];
5573         int num_formats;
5574         int res = -EPERM;
5575
5576         num_formats = get_plane_formats(plane, plane_cap, formats,
5577                                         ARRAY_SIZE(formats));
5578
5579         res = drm_universal_plane_init(dm->adev->ddev, plane, possible_crtcs,
5580                                        &dm_plane_funcs, formats, num_formats,
5581                                        NULL, plane->type, NULL);
5582         if (res)
5583                 return res;
5584
5585         if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
5586             plane_cap && plane_cap->per_pixel_alpha) {
5587                 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
5588                                           BIT(DRM_MODE_BLEND_PREMULTI);
5589
5590                 drm_plane_create_alpha_property(plane);
5591                 drm_plane_create_blend_mode_property(plane, blend_caps);
5592         }
5593
5594         if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
5595             plane_cap &&
5596             (plane_cap->pixel_format_support.nv12 ||
5597              plane_cap->pixel_format_support.p010)) {
5598                 /* This only affects YUV formats. */
5599                 drm_plane_create_color_properties(
5600                         plane,
5601                         BIT(DRM_COLOR_YCBCR_BT601) |
5602                         BIT(DRM_COLOR_YCBCR_BT709) |
5603                         BIT(DRM_COLOR_YCBCR_BT2020),
5604                         BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
5605                         BIT(DRM_COLOR_YCBCR_FULL_RANGE),
5606                         DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
5607         }
5608
5609         drm_plane_helper_add(plane, &dm_plane_helper_funcs);
5610
5611         /* Create (reset) the plane state */
5612         if (plane->funcs->reset)
5613                 plane->funcs->reset(plane);
5614
5615         return 0;
5616 }
5617
5618 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
5619                                struct drm_plane *plane,
5620                                uint32_t crtc_index)
5621 {
5622         struct amdgpu_crtc *acrtc = NULL;
5623         struct drm_plane *cursor_plane;
5624
5625         int res = -ENOMEM;
5626
5627         cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
5628         if (!cursor_plane)
5629                 goto fail;
5630
5631         cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
5632         res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
5633
5634         acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
5635         if (!acrtc)
5636                 goto fail;
5637
5638         res = drm_crtc_init_with_planes(
5639                         dm->ddev,
5640                         &acrtc->base,
5641                         plane,
5642                         cursor_plane,
5643                         &amdgpu_dm_crtc_funcs, NULL);
5644
5645         if (res)
5646                 goto fail;
5647
5648         drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
5649
5650         /* Create (reset) the plane state */
5651         if (acrtc->base.funcs->reset)
5652                 acrtc->base.funcs->reset(&acrtc->base);
5653
5654         acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
5655         acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
5656
5657         acrtc->crtc_id = crtc_index;
5658         acrtc->base.enabled = false;
5659         acrtc->otg_inst = -1;
5660
5661         dm->adev->mode_info.crtcs[crtc_index] = acrtc;
5662         drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
5663                                    true, MAX_COLOR_LUT_ENTRIES);
5664         drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
5665
5666         return 0;
5667
5668 fail:
5669         kfree(acrtc);
5670         kfree(cursor_plane);
5671         return res;
5672 }
5673
5674
5675 static int to_drm_connector_type(enum signal_type st)
5676 {
5677         switch (st) {
5678         case SIGNAL_TYPE_HDMI_TYPE_A:
5679                 return DRM_MODE_CONNECTOR_HDMIA;
5680         case SIGNAL_TYPE_EDP:
5681                 return DRM_MODE_CONNECTOR_eDP;
5682         case SIGNAL_TYPE_LVDS:
5683                 return DRM_MODE_CONNECTOR_LVDS;
5684         case SIGNAL_TYPE_RGB:
5685                 return DRM_MODE_CONNECTOR_VGA;
5686         case SIGNAL_TYPE_DISPLAY_PORT:
5687         case SIGNAL_TYPE_DISPLAY_PORT_MST:
5688                 return DRM_MODE_CONNECTOR_DisplayPort;
5689         case SIGNAL_TYPE_DVI_DUAL_LINK:
5690         case SIGNAL_TYPE_DVI_SINGLE_LINK:
5691                 return DRM_MODE_CONNECTOR_DVID;
5692         case SIGNAL_TYPE_VIRTUAL:
5693                 return DRM_MODE_CONNECTOR_VIRTUAL;
5694
5695         default:
5696                 return DRM_MODE_CONNECTOR_Unknown;
5697         }
5698 }
5699
5700 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
5701 {
5702         struct drm_encoder *encoder;
5703
5704         /* There is only one encoder per connector */
5705         drm_connector_for_each_possible_encoder(connector, encoder)
5706                 return encoder;
5707
5708         return NULL;
5709 }
5710
5711 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
5712 {
5713         struct drm_encoder *encoder;
5714         struct amdgpu_encoder *amdgpu_encoder;
5715
5716         encoder = amdgpu_dm_connector_to_encoder(connector);
5717
5718         if (encoder == NULL)
5719                 return;
5720
5721         amdgpu_encoder = to_amdgpu_encoder(encoder);
5722
5723         amdgpu_encoder->native_mode.clock = 0;
5724
5725         if (!list_empty(&connector->probed_modes)) {
5726                 struct drm_display_mode *preferred_mode = NULL;
5727
5728                 list_for_each_entry(preferred_mode,
5729                                     &connector->probed_modes,
5730                                     head) {
5731                         if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
5732                                 amdgpu_encoder->native_mode = *preferred_mode;
5733
5734                         break;
5735                 }
5736
5737         }
5738 }
5739
5740 static struct drm_display_mode *
5741 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
5742                              char *name,
5743                              int hdisplay, int vdisplay)
5744 {
5745         struct drm_device *dev = encoder->dev;
5746         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
5747         struct drm_display_mode *mode = NULL;
5748         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
5749
5750         mode = drm_mode_duplicate(dev, native_mode);
5751
5752         if (mode == NULL)
5753                 return NULL;
5754
5755         mode->hdisplay = hdisplay;
5756         mode->vdisplay = vdisplay;
5757         mode->type &= ~DRM_MODE_TYPE_PREFERRED;
5758         strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
5759
5760         return mode;
5761
5762 }
5763
5764 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
5765                                                  struct drm_connector *connector)
5766 {
5767         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
5768         struct drm_display_mode *mode = NULL;
5769         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
5770         struct amdgpu_dm_connector *amdgpu_dm_connector =
5771                                 to_amdgpu_dm_connector(connector);
5772         int i;
5773         int n;
5774         struct mode_size {
5775                 char name[DRM_DISPLAY_MODE_LEN];
5776                 int w;
5777                 int h;
5778         } common_modes[] = {
5779                 {  "640x480",  640,  480},
5780                 {  "800x600",  800,  600},
5781                 { "1024x768", 1024,  768},
5782                 { "1280x720", 1280,  720},
5783                 { "1280x800", 1280,  800},
5784                 {"1280x1024", 1280, 1024},
5785                 { "1440x900", 1440,  900},
5786                 {"1680x1050", 1680, 1050},
5787                 {"1600x1200", 1600, 1200},
5788                 {"1920x1080", 1920, 1080},
5789                 {"1920x1200", 1920, 1200}
5790         };
5791
5792         n = ARRAY_SIZE(common_modes);
5793
5794         for (i = 0; i < n; i++) {
5795                 struct drm_display_mode *curmode = NULL;
5796                 bool mode_existed = false;
5797
5798                 if (common_modes[i].w > native_mode->hdisplay ||
5799                     common_modes[i].h > native_mode->vdisplay ||
5800                    (common_modes[i].w == native_mode->hdisplay &&
5801                     common_modes[i].h == native_mode->vdisplay))
5802                         continue;
5803
5804                 list_for_each_entry(curmode, &connector->probed_modes, head) {
5805                         if (common_modes[i].w == curmode->hdisplay &&
5806                             common_modes[i].h == curmode->vdisplay) {
5807                                 mode_existed = true;
5808                                 break;
5809                         }
5810                 }
5811
5812                 if (mode_existed)
5813                         continue;
5814
5815                 mode = amdgpu_dm_create_common_mode(encoder,
5816                                 common_modes[i].name, common_modes[i].w,
5817                                 common_modes[i].h);
5818                 drm_mode_probed_add(connector, mode);
5819                 amdgpu_dm_connector->num_modes++;
5820         }
5821 }
5822
5823 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
5824                                               struct edid *edid)
5825 {
5826         struct amdgpu_dm_connector *amdgpu_dm_connector =
5827                         to_amdgpu_dm_connector(connector);
5828
5829         if (edid) {
5830                 /* empty probed_modes */
5831                 INIT_LIST_HEAD(&connector->probed_modes);
5832                 amdgpu_dm_connector->num_modes =
5833                                 drm_add_edid_modes(connector, edid);
5834
5835                 /* sorting the probed modes before calling function
5836                  * amdgpu_dm_get_native_mode() since EDID can have
5837                  * more than one preferred mode. The modes that are
5838                  * later in the probed mode list could be of higher
5839                  * and preferred resolution. For example, 3840x2160
5840                  * resolution in base EDID preferred timing and 4096x2160
5841                  * preferred resolution in DID extension block later.
5842                  */
5843                 drm_mode_sort(&connector->probed_modes);
5844                 amdgpu_dm_get_native_mode(connector);
5845         } else {
5846                 amdgpu_dm_connector->num_modes = 0;
5847         }
5848 }
5849
5850 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
5851 {
5852         struct amdgpu_dm_connector *amdgpu_dm_connector =
5853                         to_amdgpu_dm_connector(connector);
5854         struct drm_encoder *encoder;
5855         struct edid *edid = amdgpu_dm_connector->edid;
5856
5857         encoder = amdgpu_dm_connector_to_encoder(connector);
5858
5859         if (!edid || !drm_edid_is_valid(edid)) {
5860                 amdgpu_dm_connector->num_modes =
5861                                 drm_add_modes_noedid(connector, 640, 480);
5862         } else {
5863                 amdgpu_dm_connector_ddc_get_modes(connector, edid);
5864                 amdgpu_dm_connector_add_common_modes(encoder, connector);
5865         }
5866         amdgpu_dm_fbc_init(connector);
5867
5868         return amdgpu_dm_connector->num_modes;
5869 }
5870
5871 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
5872                                      struct amdgpu_dm_connector *aconnector,
5873                                      int connector_type,
5874                                      struct dc_link *link,
5875                                      int link_index)
5876 {
5877         struct amdgpu_device *adev = dm->ddev->dev_private;
5878
5879         /*
5880          * Some of the properties below require access to state, like bpc.
5881          * Allocate some default initial connector state with our reset helper.
5882          */
5883         if (aconnector->base.funcs->reset)
5884                 aconnector->base.funcs->reset(&aconnector->base);
5885
5886         aconnector->connector_id = link_index;
5887         aconnector->dc_link = link;
5888         aconnector->base.interlace_allowed = false;
5889         aconnector->base.doublescan_allowed = false;
5890         aconnector->base.stereo_allowed = false;
5891         aconnector->base.dpms = DRM_MODE_DPMS_OFF;
5892         aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
5893         aconnector->audio_inst = -1;
5894         mutex_init(&aconnector->hpd_lock);
5895
5896         /*
5897          * configure support HPD hot plug connector_>polled default value is 0
5898          * which means HPD hot plug not supported
5899          */
5900         switch (connector_type) {
5901         case DRM_MODE_CONNECTOR_HDMIA:
5902                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
5903                 aconnector->base.ycbcr_420_allowed =
5904                         link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
5905                 break;
5906         case DRM_MODE_CONNECTOR_DisplayPort:
5907                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
5908                 aconnector->base.ycbcr_420_allowed =
5909                         link->link_enc->features.dp_ycbcr420_supported ? true : false;
5910                 break;
5911         case DRM_MODE_CONNECTOR_DVID:
5912                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
5913                 break;
5914         default:
5915                 break;
5916         }
5917
5918         drm_object_attach_property(&aconnector->base.base,
5919                                 dm->ddev->mode_config.scaling_mode_property,
5920                                 DRM_MODE_SCALE_NONE);
5921
5922         drm_object_attach_property(&aconnector->base.base,
5923                                 adev->mode_info.underscan_property,
5924                                 UNDERSCAN_OFF);
5925         drm_object_attach_property(&aconnector->base.base,
5926                                 adev->mode_info.underscan_hborder_property,
5927                                 0);
5928         drm_object_attach_property(&aconnector->base.base,
5929                                 adev->mode_info.underscan_vborder_property,
5930                                 0);
5931
5932         if (!aconnector->mst_port)
5933                 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
5934
5935         /* This defaults to the max in the range, but we want 8bpc for non-edp. */
5936         aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
5937         aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
5938
5939         if (connector_type == DRM_MODE_CONNECTOR_eDP &&
5940             dc_is_dmcu_initialized(adev->dm.dc)) {
5941                 drm_object_attach_property(&aconnector->base.base,
5942                                 adev->mode_info.abm_level_property, 0);
5943         }
5944
5945         if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
5946             connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
5947             connector_type == DRM_MODE_CONNECTOR_eDP) {
5948                 drm_object_attach_property(
5949                         &aconnector->base.base,
5950                         dm->ddev->mode_config.hdr_output_metadata_property, 0);
5951
5952                 if (!aconnector->mst_port)
5953                         drm_connector_attach_vrr_capable_property(&aconnector->base);
5954
5955 #ifdef CONFIG_DRM_AMD_DC_HDCP
5956                 if (adev->dm.hdcp_workqueue)
5957                         drm_connector_attach_content_protection_property(&aconnector->base, true);
5958 #endif
5959         }
5960 }
5961
5962 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
5963                               struct i2c_msg *msgs, int num)
5964 {
5965         struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
5966         struct ddc_service *ddc_service = i2c->ddc_service;
5967         struct i2c_command cmd;
5968         int i;
5969         int result = -EIO;
5970
5971         cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
5972
5973         if (!cmd.payloads)
5974                 return result;
5975
5976         cmd.number_of_payloads = num;
5977         cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
5978         cmd.speed = 100;
5979
5980         for (i = 0; i < num; i++) {
5981                 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
5982                 cmd.payloads[i].address = msgs[i].addr;
5983                 cmd.payloads[i].length = msgs[i].len;
5984                 cmd.payloads[i].data = msgs[i].buf;
5985         }
5986
5987         if (dc_submit_i2c(
5988                         ddc_service->ctx->dc,
5989                         ddc_service->ddc_pin->hw_info.ddc_channel,
5990                         &cmd))
5991                 result = num;
5992
5993         kfree(cmd.payloads);
5994         return result;
5995 }
5996
5997 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
5998 {
5999         return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
6000 }
6001
6002 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
6003         .master_xfer = amdgpu_dm_i2c_xfer,
6004         .functionality = amdgpu_dm_i2c_func,
6005 };
6006
6007 static struct amdgpu_i2c_adapter *
6008 create_i2c(struct ddc_service *ddc_service,
6009            int link_index,
6010            int *res)
6011 {
6012         struct amdgpu_device *adev = ddc_service->ctx->driver_context;
6013         struct amdgpu_i2c_adapter *i2c;
6014
6015         i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
6016         if (!i2c)
6017                 return NULL;
6018         i2c->base.owner = THIS_MODULE;
6019         i2c->base.class = I2C_CLASS_DDC;
6020         i2c->base.dev.parent = &adev->pdev->dev;
6021         i2c->base.algo = &amdgpu_dm_i2c_algo;
6022         snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
6023         i2c_set_adapdata(&i2c->base, i2c);
6024         i2c->ddc_service = ddc_service;
6025         i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
6026
6027         return i2c;
6028 }
6029
6030
6031 /*
6032  * Note: this function assumes that dc_link_detect() was called for the
6033  * dc_link which will be represented by this aconnector.
6034  */
6035 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
6036                                     struct amdgpu_dm_connector *aconnector,
6037                                     uint32_t link_index,
6038                                     struct amdgpu_encoder *aencoder)
6039 {
6040         int res = 0;
6041         int connector_type;
6042         struct dc *dc = dm->dc;
6043         struct dc_link *link = dc_get_link_at_index(dc, link_index);
6044         struct amdgpu_i2c_adapter *i2c;
6045
6046         link->priv = aconnector;
6047
6048         DRM_DEBUG_DRIVER("%s()\n", __func__);
6049
6050         i2c = create_i2c(link->ddc, link->link_index, &res);
6051         if (!i2c) {
6052                 DRM_ERROR("Failed to create i2c adapter data\n");
6053                 return -ENOMEM;
6054         }
6055
6056         aconnector->i2c = i2c;
6057         res = i2c_add_adapter(&i2c->base);
6058
6059         if (res) {
6060                 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
6061                 goto out_free;
6062         }
6063
6064         connector_type = to_drm_connector_type(link->connector_signal);
6065
6066         res = drm_connector_init_with_ddc(
6067                         dm->ddev,
6068                         &aconnector->base,
6069                         &amdgpu_dm_connector_funcs,
6070                         connector_type,
6071                         &i2c->base);
6072
6073         if (res) {
6074                 DRM_ERROR("connector_init failed\n");
6075                 aconnector->connector_id = -1;
6076                 goto out_free;
6077         }
6078
6079         drm_connector_helper_add(
6080                         &aconnector->base,
6081                         &amdgpu_dm_connector_helper_funcs);
6082
6083         amdgpu_dm_connector_init_helper(
6084                 dm,
6085                 aconnector,
6086                 connector_type,
6087                 link,
6088                 link_index);
6089
6090         drm_connector_attach_encoder(
6091                 &aconnector->base, &aencoder->base);
6092
6093         if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
6094                 || connector_type == DRM_MODE_CONNECTOR_eDP)
6095                 amdgpu_dm_initialize_dp_connector(dm, aconnector);
6096
6097 out_free:
6098         if (res) {
6099                 kfree(i2c);
6100                 aconnector->i2c = NULL;
6101         }
6102         return res;
6103 }
6104
6105 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
6106 {
6107         switch (adev->mode_info.num_crtc) {
6108         case 1:
6109                 return 0x1;
6110         case 2:
6111                 return 0x3;
6112         case 3:
6113                 return 0x7;
6114         case 4:
6115                 return 0xf;
6116         case 5:
6117                 return 0x1f;
6118         case 6:
6119         default:
6120                 return 0x3f;
6121         }
6122 }
6123
6124 static int amdgpu_dm_encoder_init(struct drm_device *dev,
6125                                   struct amdgpu_encoder *aencoder,
6126                                   uint32_t link_index)
6127 {
6128         struct amdgpu_device *adev = dev->dev_private;
6129
6130         int res = drm_encoder_init(dev,
6131                                    &aencoder->base,
6132                                    &amdgpu_dm_encoder_funcs,
6133                                    DRM_MODE_ENCODER_TMDS,
6134                                    NULL);
6135
6136         aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
6137
6138         if (!res)
6139                 aencoder->encoder_id = link_index;
6140         else
6141                 aencoder->encoder_id = -1;
6142
6143         drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
6144
6145         return res;
6146 }
6147
6148 static void manage_dm_interrupts(struct amdgpu_device *adev,
6149                                  struct amdgpu_crtc *acrtc,
6150                                  bool enable)
6151 {
6152         /*
6153          * this is not correct translation but will work as soon as VBLANK
6154          * constant is the same as PFLIP
6155          */
6156         int irq_type =
6157                 amdgpu_display_crtc_idx_to_irq_type(
6158                         adev,
6159                         acrtc->crtc_id);
6160
6161         if (enable) {
6162                 drm_crtc_vblank_on(&acrtc->base);
6163                 amdgpu_irq_get(
6164                         adev,
6165                         &adev->pageflip_irq,
6166                         irq_type);
6167         } else {
6168
6169                 amdgpu_irq_put(
6170                         adev,
6171                         &adev->pageflip_irq,
6172                         irq_type);
6173                 drm_crtc_vblank_off(&acrtc->base);
6174         }
6175 }
6176
6177 static bool
6178 is_scaling_state_different(const struct dm_connector_state *dm_state,
6179                            const struct dm_connector_state *old_dm_state)
6180 {
6181         if (dm_state->scaling != old_dm_state->scaling)
6182                 return true;
6183         if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
6184                 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
6185                         return true;
6186         } else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
6187                 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
6188                         return true;
6189         } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
6190                    dm_state->underscan_vborder != old_dm_state->underscan_vborder)
6191                 return true;
6192         return false;
6193 }
6194
6195 #ifdef CONFIG_DRM_AMD_DC_HDCP
6196 static bool is_content_protection_different(struct drm_connector_state *state,
6197                                             const struct drm_connector_state *old_state,
6198                                             const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
6199 {
6200         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6201
6202         if (old_state->hdcp_content_type != state->hdcp_content_type &&
6203             state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
6204                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6205                 return true;
6206         }
6207
6208         /* CP is being re enabled, ignore this */
6209         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
6210             state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
6211                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
6212                 return false;
6213         }
6214
6215         /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */
6216         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
6217             state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
6218                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6219
6220         /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
6221          * hot-plug, headless s3, dpms
6222          */
6223         if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && connector->dpms == DRM_MODE_DPMS_ON &&
6224             aconnector->dc_sink != NULL)
6225                 return true;
6226
6227         if (old_state->content_protection == state->content_protection)
6228                 return false;
6229
6230         if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
6231                 return true;
6232
6233         return false;
6234 }
6235
6236 #endif
6237 static void remove_stream(struct amdgpu_device *adev,
6238                           struct amdgpu_crtc *acrtc,
6239                           struct dc_stream_state *stream)
6240 {
6241         /* this is the update mode case */
6242
6243         acrtc->otg_inst = -1;
6244         acrtc->enabled = false;
6245 }
6246
6247 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
6248                                struct dc_cursor_position *position)
6249 {
6250         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6251         int x, y;
6252         int xorigin = 0, yorigin = 0;
6253
6254         position->enable = false;
6255         position->x = 0;
6256         position->y = 0;
6257
6258         if (!crtc || !plane->state->fb)
6259                 return 0;
6260
6261         if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
6262             (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
6263                 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
6264                           __func__,
6265                           plane->state->crtc_w,
6266                           plane->state->crtc_h);
6267                 return -EINVAL;
6268         }
6269
6270         x = plane->state->crtc_x;
6271         y = plane->state->crtc_y;
6272
6273         if (x <= -amdgpu_crtc->max_cursor_width ||
6274             y <= -amdgpu_crtc->max_cursor_height)
6275                 return 0;
6276
6277         if (x < 0) {
6278                 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
6279                 x = 0;
6280         }
6281         if (y < 0) {
6282                 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
6283                 y = 0;
6284         }
6285         position->enable = true;
6286         position->translate_by_source = true;
6287         position->x = x;
6288         position->y = y;
6289         position->x_hotspot = xorigin;
6290         position->y_hotspot = yorigin;
6291
6292         return 0;
6293 }
6294
6295 static void handle_cursor_update(struct drm_plane *plane,
6296                                  struct drm_plane_state *old_plane_state)
6297 {
6298         struct amdgpu_device *adev = plane->dev->dev_private;
6299         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
6300         struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
6301         struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
6302         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6303         uint64_t address = afb ? afb->address : 0;
6304         struct dc_cursor_position position;
6305         struct dc_cursor_attributes attributes;
6306         int ret;
6307
6308         if (!plane->state->fb && !old_plane_state->fb)
6309                 return;
6310
6311         DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
6312                          __func__,
6313                          amdgpu_crtc->crtc_id,
6314                          plane->state->crtc_w,
6315                          plane->state->crtc_h);
6316
6317         ret = get_cursor_position(plane, crtc, &position);
6318         if (ret)
6319                 return;
6320
6321         if (!position.enable) {
6322                 /* turn off cursor */
6323                 if (crtc_state && crtc_state->stream) {
6324                         mutex_lock(&adev->dm.dc_lock);
6325                         dc_stream_set_cursor_position(crtc_state->stream,
6326                                                       &position);
6327                         mutex_unlock(&adev->dm.dc_lock);
6328                 }
6329                 return;
6330         }
6331
6332         amdgpu_crtc->cursor_width = plane->state->crtc_w;
6333         amdgpu_crtc->cursor_height = plane->state->crtc_h;
6334
6335         memset(&attributes, 0, sizeof(attributes));
6336         attributes.address.high_part = upper_32_bits(address);
6337         attributes.address.low_part  = lower_32_bits(address);
6338         attributes.width             = plane->state->crtc_w;
6339         attributes.height            = plane->state->crtc_h;
6340         attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
6341         attributes.rotation_angle    = 0;
6342         attributes.attribute_flags.value = 0;
6343
6344         attributes.pitch = attributes.width;
6345
6346         if (crtc_state->stream) {
6347                 mutex_lock(&adev->dm.dc_lock);
6348                 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
6349                                                          &attributes))
6350                         DRM_ERROR("DC failed to set cursor attributes\n");
6351
6352                 if (!dc_stream_set_cursor_position(crtc_state->stream,
6353                                                    &position))
6354                         DRM_ERROR("DC failed to set cursor position\n");
6355                 mutex_unlock(&adev->dm.dc_lock);
6356         }
6357 }
6358
6359 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
6360 {
6361
6362         assert_spin_locked(&acrtc->base.dev->event_lock);
6363         WARN_ON(acrtc->event);
6364
6365         acrtc->event = acrtc->base.state->event;
6366
6367         /* Set the flip status */
6368         acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
6369
6370         /* Mark this event as consumed */
6371         acrtc->base.state->event = NULL;
6372
6373         DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
6374                                                  acrtc->crtc_id);
6375 }
6376
6377 static void update_freesync_state_on_stream(
6378         struct amdgpu_display_manager *dm,
6379         struct dm_crtc_state *new_crtc_state,
6380         struct dc_stream_state *new_stream,
6381         struct dc_plane_state *surface,
6382         u32 flip_timestamp_in_us)
6383 {
6384         struct mod_vrr_params vrr_params;
6385         struct dc_info_packet vrr_infopacket = {0};
6386         struct amdgpu_device *adev = dm->adev;
6387         unsigned long flags;
6388
6389         if (!new_stream)
6390                 return;
6391
6392         /*
6393          * TODO: Determine why min/max totals and vrefresh can be 0 here.
6394          * For now it's sufficient to just guard against these conditions.
6395          */
6396
6397         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6398                 return;
6399
6400         spin_lock_irqsave(&adev->ddev->event_lock, flags);
6401         vrr_params = new_crtc_state->vrr_params;
6402
6403         if (surface) {
6404                 mod_freesync_handle_preflip(
6405                         dm->freesync_module,
6406                         surface,
6407                         new_stream,
6408                         flip_timestamp_in_us,
6409                         &vrr_params);
6410
6411                 if (adev->family < AMDGPU_FAMILY_AI &&
6412                     amdgpu_dm_vrr_active(new_crtc_state)) {
6413                         mod_freesync_handle_v_update(dm->freesync_module,
6414                                                      new_stream, &vrr_params);
6415
6416                         /* Need to call this before the frame ends. */
6417                         dc_stream_adjust_vmin_vmax(dm->dc,
6418                                                    new_crtc_state->stream,
6419                                                    &vrr_params.adjust);
6420                 }
6421         }
6422
6423         mod_freesync_build_vrr_infopacket(
6424                 dm->freesync_module,
6425                 new_stream,
6426                 &vrr_params,
6427                 PACKET_TYPE_VRR,
6428                 TRANSFER_FUNC_UNKNOWN,
6429                 &vrr_infopacket);
6430
6431         new_crtc_state->freesync_timing_changed |=
6432                 (memcmp(&new_crtc_state->vrr_params.adjust,
6433                         &vrr_params.adjust,
6434                         sizeof(vrr_params.adjust)) != 0);
6435
6436         new_crtc_state->freesync_vrr_info_changed |=
6437                 (memcmp(&new_crtc_state->vrr_infopacket,
6438                         &vrr_infopacket,
6439                         sizeof(vrr_infopacket)) != 0);
6440
6441         new_crtc_state->vrr_params = vrr_params;
6442         new_crtc_state->vrr_infopacket = vrr_infopacket;
6443
6444         new_stream->adjust = new_crtc_state->vrr_params.adjust;
6445         new_stream->vrr_infopacket = vrr_infopacket;
6446
6447         if (new_crtc_state->freesync_vrr_info_changed)
6448                 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
6449                               new_crtc_state->base.crtc->base.id,
6450                               (int)new_crtc_state->base.vrr_enabled,
6451                               (int)vrr_params.state);
6452
6453         spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
6454 }
6455
6456 static void pre_update_freesync_state_on_stream(
6457         struct amdgpu_display_manager *dm,
6458         struct dm_crtc_state *new_crtc_state)
6459 {
6460         struct dc_stream_state *new_stream = new_crtc_state->stream;
6461         struct mod_vrr_params vrr_params;
6462         struct mod_freesync_config config = new_crtc_state->freesync_config;
6463         struct amdgpu_device *adev = dm->adev;
6464         unsigned long flags;
6465
6466         if (!new_stream)
6467                 return;
6468
6469         /*
6470          * TODO: Determine why min/max totals and vrefresh can be 0 here.
6471          * For now it's sufficient to just guard against these conditions.
6472          */
6473         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6474                 return;
6475
6476         spin_lock_irqsave(&adev->ddev->event_lock, flags);
6477         vrr_params = new_crtc_state->vrr_params;
6478
6479         if (new_crtc_state->vrr_supported &&
6480             config.min_refresh_in_uhz &&
6481             config.max_refresh_in_uhz) {
6482                 config.state = new_crtc_state->base.vrr_enabled ?
6483                         VRR_STATE_ACTIVE_VARIABLE :
6484                         VRR_STATE_INACTIVE;
6485         } else {
6486                 config.state = VRR_STATE_UNSUPPORTED;
6487         }
6488
6489         mod_freesync_build_vrr_params(dm->freesync_module,
6490                                       new_stream,
6491                                       &config, &vrr_params);
6492
6493         new_crtc_state->freesync_timing_changed |=
6494                 (memcmp(&new_crtc_state->vrr_params.adjust,
6495                         &vrr_params.adjust,
6496                         sizeof(vrr_params.adjust)) != 0);
6497
6498         new_crtc_state->vrr_params = vrr_params;
6499         spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
6500 }
6501
6502 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
6503                                             struct dm_crtc_state *new_state)
6504 {
6505         bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
6506         bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
6507
6508         if (!old_vrr_active && new_vrr_active) {
6509                 /* Transition VRR inactive -> active:
6510                  * While VRR is active, we must not disable vblank irq, as a
6511                  * reenable after disable would compute bogus vblank/pflip
6512                  * timestamps if it likely happened inside display front-porch.
6513                  *
6514                  * We also need vupdate irq for the actual core vblank handling
6515                  * at end of vblank.
6516                  */
6517                 dm_set_vupdate_irq(new_state->base.crtc, true);
6518                 drm_crtc_vblank_get(new_state->base.crtc);
6519                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
6520                                  __func__, new_state->base.crtc->base.id);
6521         } else if (old_vrr_active && !new_vrr_active) {
6522                 /* Transition VRR active -> inactive:
6523                  * Allow vblank irq disable again for fixed refresh rate.
6524                  */
6525                 dm_set_vupdate_irq(new_state->base.crtc, false);
6526                 drm_crtc_vblank_put(new_state->base.crtc);
6527                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
6528                                  __func__, new_state->base.crtc->base.id);
6529         }
6530 }
6531
6532 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
6533 {
6534         struct drm_plane *plane;
6535         struct drm_plane_state *old_plane_state, *new_plane_state;
6536         int i;
6537
6538         /*
6539          * TODO: Make this per-stream so we don't issue redundant updates for
6540          * commits with multiple streams.
6541          */
6542         for_each_oldnew_plane_in_state(state, plane, old_plane_state,
6543                                        new_plane_state, i)
6544                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6545                         handle_cursor_update(plane, old_plane_state);
6546 }
6547
6548 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
6549                                     struct dc_state *dc_state,
6550                                     struct drm_device *dev,
6551                                     struct amdgpu_display_manager *dm,
6552                                     struct drm_crtc *pcrtc,
6553                                     bool wait_for_vblank)
6554 {
6555         uint32_t i;
6556         uint64_t timestamp_ns;
6557         struct drm_plane *plane;
6558         struct drm_plane_state *old_plane_state, *new_plane_state;
6559         struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
6560         struct drm_crtc_state *new_pcrtc_state =
6561                         drm_atomic_get_new_crtc_state(state, pcrtc);
6562         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
6563         struct dm_crtc_state *dm_old_crtc_state =
6564                         to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
6565         int planes_count = 0, vpos, hpos;
6566         long r;
6567         unsigned long flags;
6568         struct amdgpu_bo *abo;
6569         uint64_t tiling_flags;
6570         uint32_t target_vblank, last_flip_vblank;
6571         bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
6572         bool pflip_present = false;
6573         struct {
6574                 struct dc_surface_update surface_updates[MAX_SURFACES];
6575                 struct dc_plane_info plane_infos[MAX_SURFACES];
6576                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
6577                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
6578                 struct dc_stream_update stream_update;
6579         } *bundle;
6580
6581         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
6582
6583         if (!bundle) {
6584                 dm_error("Failed to allocate update bundle\n");
6585                 goto cleanup;
6586         }
6587
6588         /*
6589          * Disable the cursor first if we're disabling all the planes.
6590          * It'll remain on the screen after the planes are re-enabled
6591          * if we don't.
6592          */
6593         if (acrtc_state->active_planes == 0)
6594                 amdgpu_dm_commit_cursors(state);
6595
6596         /* update planes when needed */
6597         for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
6598                 struct drm_crtc *crtc = new_plane_state->crtc;
6599                 struct drm_crtc_state *new_crtc_state;
6600                 struct drm_framebuffer *fb = new_plane_state->fb;
6601                 bool plane_needs_flip;
6602                 struct dc_plane_state *dc_plane;
6603                 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
6604
6605                 /* Cursor plane is handled after stream updates */
6606                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6607                         continue;
6608
6609                 if (!fb || !crtc || pcrtc != crtc)
6610                         continue;
6611
6612                 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
6613                 if (!new_crtc_state->active)
6614                         continue;
6615
6616                 dc_plane = dm_new_plane_state->dc_state;
6617
6618                 bundle->surface_updates[planes_count].surface = dc_plane;
6619                 if (new_pcrtc_state->color_mgmt_changed) {
6620                         bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
6621                         bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
6622                 }
6623
6624                 fill_dc_scaling_info(new_plane_state,
6625                                      &bundle->scaling_infos[planes_count]);
6626
6627                 bundle->surface_updates[planes_count].scaling_info =
6628                         &bundle->scaling_infos[planes_count];
6629
6630                 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
6631
6632                 pflip_present = pflip_present || plane_needs_flip;
6633
6634                 if (!plane_needs_flip) {
6635                         planes_count += 1;
6636                         continue;
6637                 }
6638
6639                 abo = gem_to_amdgpu_bo(fb->obj[0]);
6640
6641                 /*
6642                  * Wait for all fences on this FB. Do limited wait to avoid
6643                  * deadlock during GPU reset when this fence will not signal
6644                  * but we hold reservation lock for the BO.
6645                  */
6646                 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
6647                                                         false,
6648                                                         msecs_to_jiffies(5000));
6649                 if (unlikely(r <= 0))
6650                         DRM_ERROR("Waiting for fences timed out!");
6651
6652                 /*
6653                  * TODO This might fail and hence better not used, wait
6654                  * explicitly on fences instead
6655                  * and in general should be called for
6656                  * blocking commit to as per framework helpers
6657                  */
6658                 r = amdgpu_bo_reserve(abo, true);
6659                 if (unlikely(r != 0))
6660                         DRM_ERROR("failed to reserve buffer before flip\n");
6661
6662                 amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
6663
6664                 amdgpu_bo_unreserve(abo);
6665
6666                 fill_dc_plane_info_and_addr(
6667                         dm->adev, new_plane_state, tiling_flags,
6668                         &bundle->plane_infos[planes_count],
6669                         &bundle->flip_addrs[planes_count].address);
6670
6671                 bundle->surface_updates[planes_count].plane_info =
6672                         &bundle->plane_infos[planes_count];
6673
6674                 /*
6675                  * Only allow immediate flips for fast updates that don't
6676                  * change FB pitch, DCC state, rotation or mirroing.
6677                  */
6678                 bundle->flip_addrs[planes_count].flip_immediate =
6679                         crtc->state->async_flip &&
6680                         acrtc_state->update_type == UPDATE_TYPE_FAST;
6681
6682                 timestamp_ns = ktime_get_ns();
6683                 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
6684                 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
6685                 bundle->surface_updates[planes_count].surface = dc_plane;
6686
6687                 if (!bundle->surface_updates[planes_count].surface) {
6688                         DRM_ERROR("No surface for CRTC: id=%d\n",
6689                                         acrtc_attach->crtc_id);
6690                         continue;
6691                 }
6692
6693                 if (plane == pcrtc->primary)
6694                         update_freesync_state_on_stream(
6695                                 dm,
6696                                 acrtc_state,
6697                                 acrtc_state->stream,
6698                                 dc_plane,
6699                                 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
6700
6701                 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
6702                                  __func__,
6703                                  bundle->flip_addrs[planes_count].address.grph.addr.high_part,
6704                                  bundle->flip_addrs[planes_count].address.grph.addr.low_part);
6705
6706                 planes_count += 1;
6707
6708         }
6709
6710         if (pflip_present) {
6711                 if (!vrr_active) {
6712                         /* Use old throttling in non-vrr fixed refresh rate mode
6713                          * to keep flip scheduling based on target vblank counts
6714                          * working in a backwards compatible way, e.g., for
6715                          * clients using the GLX_OML_sync_control extension or
6716                          * DRI3/Present extension with defined target_msc.
6717                          */
6718                         last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
6719                 }
6720                 else {
6721                         /* For variable refresh rate mode only:
6722                          * Get vblank of last completed flip to avoid > 1 vrr
6723                          * flips per video frame by use of throttling, but allow
6724                          * flip programming anywhere in the possibly large
6725                          * variable vrr vblank interval for fine-grained flip
6726                          * timing control and more opportunity to avoid stutter
6727                          * on late submission of flips.
6728                          */
6729                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
6730                         last_flip_vblank = acrtc_attach->last_flip_vblank;
6731                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
6732                 }
6733
6734                 target_vblank = last_flip_vblank + wait_for_vblank;
6735
6736                 /*
6737                  * Wait until we're out of the vertical blank period before the one
6738                  * targeted by the flip
6739                  */
6740                 while ((acrtc_attach->enabled &&
6741                         (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
6742                                                             0, &vpos, &hpos, NULL,
6743                                                             NULL, &pcrtc->hwmode)
6744                          & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
6745                         (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
6746                         (int)(target_vblank -
6747                           amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
6748                         usleep_range(1000, 1100);
6749                 }
6750
6751                 if (acrtc_attach->base.state->event) {
6752                         drm_crtc_vblank_get(pcrtc);
6753
6754                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
6755
6756                         WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
6757                         prepare_flip_isr(acrtc_attach);
6758
6759                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
6760                 }
6761
6762                 if (acrtc_state->stream) {
6763                         if (acrtc_state->freesync_vrr_info_changed)
6764                                 bundle->stream_update.vrr_infopacket =
6765                                         &acrtc_state->stream->vrr_infopacket;
6766                 }
6767         }
6768
6769         /* Update the planes if changed or disable if we don't have any. */
6770         if ((planes_count || acrtc_state->active_planes == 0) &&
6771                 acrtc_state->stream) {
6772                 bundle->stream_update.stream = acrtc_state->stream;
6773                 if (new_pcrtc_state->mode_changed) {
6774                         bundle->stream_update.src = acrtc_state->stream->src;
6775                         bundle->stream_update.dst = acrtc_state->stream->dst;
6776                 }
6777
6778                 if (new_pcrtc_state->color_mgmt_changed) {
6779                         /*
6780                          * TODO: This isn't fully correct since we've actually
6781                          * already modified the stream in place.
6782                          */
6783                         bundle->stream_update.gamut_remap =
6784                                 &acrtc_state->stream->gamut_remap_matrix;
6785                         bundle->stream_update.output_csc_transform =
6786                                 &acrtc_state->stream->csc_color_matrix;
6787                         bundle->stream_update.out_transfer_func =
6788                                 acrtc_state->stream->out_transfer_func;
6789                 }
6790
6791                 acrtc_state->stream->abm_level = acrtc_state->abm_level;
6792                 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
6793                         bundle->stream_update.abm_level = &acrtc_state->abm_level;
6794
6795                 /*
6796                  * If FreeSync state on the stream has changed then we need to
6797                  * re-adjust the min/max bounds now that DC doesn't handle this
6798                  * as part of commit.
6799                  */
6800                 if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
6801                     amdgpu_dm_vrr_active(acrtc_state)) {
6802                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
6803                         dc_stream_adjust_vmin_vmax(
6804                                 dm->dc, acrtc_state->stream,
6805                                 &acrtc_state->vrr_params.adjust);
6806                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
6807                 }
6808                 mutex_lock(&dm->dc_lock);
6809                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
6810                                 acrtc_state->stream->link->psr_allow_active)
6811                         amdgpu_dm_psr_disable(acrtc_state->stream);
6812
6813                 dc_commit_updates_for_stream(dm->dc,
6814                                                      bundle->surface_updates,
6815                                                      planes_count,
6816                                                      acrtc_state->stream,
6817                                                      &bundle->stream_update,
6818                                                      dc_state);
6819
6820                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
6821                                                 acrtc_state->stream->psr_version &&
6822                                                 !acrtc_state->stream->link->psr_feature_enabled)
6823                         amdgpu_dm_link_setup_psr(acrtc_state->stream);
6824                 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
6825                                                 acrtc_state->stream->link->psr_feature_enabled &&
6826                                                 !acrtc_state->stream->link->psr_allow_active) {
6827                         amdgpu_dm_psr_enable(acrtc_state->stream);
6828                 }
6829
6830                 mutex_unlock(&dm->dc_lock);
6831         }
6832
6833         /*
6834          * Update cursor state *after* programming all the planes.
6835          * This avoids redundant programming in the case where we're going
6836          * to be disabling a single plane - those pipes are being disabled.
6837          */
6838         if (acrtc_state->active_planes)
6839                 amdgpu_dm_commit_cursors(state);
6840
6841 cleanup:
6842         kfree(bundle);
6843 }
6844
6845 static void amdgpu_dm_commit_audio(struct drm_device *dev,
6846                                    struct drm_atomic_state *state)
6847 {
6848         struct amdgpu_device *adev = dev->dev_private;
6849         struct amdgpu_dm_connector *aconnector;
6850         struct drm_connector *connector;
6851         struct drm_connector_state *old_con_state, *new_con_state;
6852         struct drm_crtc_state *new_crtc_state;
6853         struct dm_crtc_state *new_dm_crtc_state;
6854         const struct dc_stream_status *status;
6855         int i, inst;
6856
6857         /* Notify device removals. */
6858         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6859                 if (old_con_state->crtc != new_con_state->crtc) {
6860                         /* CRTC changes require notification. */
6861                         goto notify;
6862                 }
6863
6864                 if (!new_con_state->crtc)
6865                         continue;
6866
6867                 new_crtc_state = drm_atomic_get_new_crtc_state(
6868                         state, new_con_state->crtc);
6869
6870                 if (!new_crtc_state)
6871                         continue;
6872
6873                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
6874                         continue;
6875
6876         notify:
6877                 aconnector = to_amdgpu_dm_connector(connector);
6878
6879                 mutex_lock(&adev->dm.audio_lock);
6880                 inst = aconnector->audio_inst;
6881                 aconnector->audio_inst = -1;
6882                 mutex_unlock(&adev->dm.audio_lock);
6883
6884                 amdgpu_dm_audio_eld_notify(adev, inst);
6885         }
6886
6887         /* Notify audio device additions. */
6888         for_each_new_connector_in_state(state, connector, new_con_state, i) {
6889                 if (!new_con_state->crtc)
6890                         continue;
6891
6892                 new_crtc_state = drm_atomic_get_new_crtc_state(
6893                         state, new_con_state->crtc);
6894
6895                 if (!new_crtc_state)
6896                         continue;
6897
6898                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
6899                         continue;
6900
6901                 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
6902                 if (!new_dm_crtc_state->stream)
6903                         continue;
6904
6905                 status = dc_stream_get_status(new_dm_crtc_state->stream);
6906                 if (!status)
6907                         continue;
6908
6909                 aconnector = to_amdgpu_dm_connector(connector);
6910
6911                 mutex_lock(&adev->dm.audio_lock);
6912                 inst = status->audio_inst;
6913                 aconnector->audio_inst = inst;
6914                 mutex_unlock(&adev->dm.audio_lock);
6915
6916                 amdgpu_dm_audio_eld_notify(adev, inst);
6917         }
6918 }
6919
6920 /*
6921  * Enable interrupts on CRTCs that are newly active, undergone
6922  * a modeset, or have active planes again.
6923  *
6924  * Done in two passes, based on the for_modeset flag:
6925  * Pass 1: For CRTCs going through modeset
6926  * Pass 2: For CRTCs going from 0 to n active planes
6927  *
6928  * Interrupts can only be enabled after the planes are programmed,
6929  * so this requires a two-pass approach since we don't want to
6930  * just defer the interrupts until after commit planes every time.
6931  */
6932 static void amdgpu_dm_enable_crtc_interrupts(struct drm_device *dev,
6933                                              struct drm_atomic_state *state,
6934                                              bool for_modeset)
6935 {
6936         struct amdgpu_device *adev = dev->dev_private;
6937         struct drm_crtc *crtc;
6938         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
6939         int i;
6940 #ifdef CONFIG_DEBUG_FS
6941         enum amdgpu_dm_pipe_crc_source source;
6942 #endif
6943
6944         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
6945                                       new_crtc_state, i) {
6946                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6947                 struct dm_crtc_state *dm_new_crtc_state =
6948                         to_dm_crtc_state(new_crtc_state);
6949                 struct dm_crtc_state *dm_old_crtc_state =
6950                         to_dm_crtc_state(old_crtc_state);
6951                 bool modeset = drm_atomic_crtc_needs_modeset(new_crtc_state);
6952                 bool run_pass;
6953
6954                 run_pass = (for_modeset && modeset) ||
6955                            (!for_modeset && !modeset &&
6956                             !dm_old_crtc_state->interrupts_enabled);
6957
6958                 if (!run_pass)
6959                         continue;
6960
6961                 if (!dm_new_crtc_state->interrupts_enabled)
6962                         continue;
6963
6964                 manage_dm_interrupts(adev, acrtc, true);
6965
6966 #ifdef CONFIG_DEBUG_FS
6967                 /* The stream has changed so CRC capture needs to re-enabled. */
6968                 source = dm_new_crtc_state->crc_src;
6969                 if (amdgpu_dm_is_valid_crc_source(source)) {
6970                         amdgpu_dm_crtc_configure_crc_source(
6971                                 crtc, dm_new_crtc_state,
6972                                 dm_new_crtc_state->crc_src);
6973                 }
6974 #endif
6975         }
6976 }
6977
6978 /*
6979  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
6980  * @crtc_state: the DRM CRTC state
6981  * @stream_state: the DC stream state.
6982  *
6983  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
6984  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
6985  */
6986 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
6987                                                 struct dc_stream_state *stream_state)
6988 {
6989         stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
6990 }
6991
6992 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
6993                                    struct drm_atomic_state *state,
6994                                    bool nonblock)
6995 {
6996         struct drm_crtc *crtc;
6997         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
6998         struct amdgpu_device *adev = dev->dev_private;
6999         int i;
7000
7001         /*
7002          * We evade vblank and pflip interrupts on CRTCs that are undergoing
7003          * a modeset, being disabled, or have no active planes.
7004          *
7005          * It's done in atomic commit rather than commit tail for now since
7006          * some of these interrupt handlers access the current CRTC state and
7007          * potentially the stream pointer itself.
7008          *
7009          * Since the atomic state is swapped within atomic commit and not within
7010          * commit tail this would leave to new state (that hasn't been committed yet)
7011          * being accesssed from within the handlers.
7012          *
7013          * TODO: Fix this so we can do this in commit tail and not have to block
7014          * in atomic check.
7015          */
7016         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7017                 struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7018                 struct dm_crtc_state *dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7019                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7020
7021                 if (dm_old_crtc_state->interrupts_enabled &&
7022                     (!dm_new_crtc_state->interrupts_enabled ||
7023                      drm_atomic_crtc_needs_modeset(new_crtc_state)))
7024                         manage_dm_interrupts(adev, acrtc, false);
7025         }
7026         /*
7027          * Add check here for SoC's that support hardware cursor plane, to
7028          * unset legacy_cursor_update
7029          */
7030
7031         return drm_atomic_helper_commit(dev, state, nonblock);
7032
7033         /*TODO Handle EINTR, reenable IRQ*/
7034 }
7035
7036 /**
7037  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
7038  * @state: The atomic state to commit
7039  *
7040  * This will tell DC to commit the constructed DC state from atomic_check,
7041  * programming the hardware. Any failures here implies a hardware failure, since
7042  * atomic check should have filtered anything non-kosher.
7043  */
7044 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
7045 {
7046         struct drm_device *dev = state->dev;
7047         struct amdgpu_device *adev = dev->dev_private;
7048         struct amdgpu_display_manager *dm = &adev->dm;
7049         struct dm_atomic_state *dm_state;
7050         struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
7051         uint32_t i, j;
7052         struct drm_crtc *crtc;
7053         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7054         unsigned long flags;
7055         bool wait_for_vblank = true;
7056         struct drm_connector *connector;
7057         struct drm_connector_state *old_con_state, *new_con_state;
7058         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7059         int crtc_disable_count = 0;
7060
7061         drm_atomic_helper_update_legacy_modeset_state(dev, state);
7062
7063         dm_state = dm_atomic_get_new_state(state);
7064         if (dm_state && dm_state->context) {
7065                 dc_state = dm_state->context;
7066         } else {
7067                 /* No state changes, retain current state. */
7068                 dc_state_temp = dc_create_state(dm->dc);
7069                 ASSERT(dc_state_temp);
7070                 dc_state = dc_state_temp;
7071                 dc_resource_state_copy_construct_current(dm->dc, dc_state);
7072         }
7073
7074         /* update changed items */
7075         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7076                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7077
7078                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7079                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7080
7081                 DRM_DEBUG_DRIVER(
7082                         "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7083                         "planes_changed:%d, mode_changed:%d,active_changed:%d,"
7084                         "connectors_changed:%d\n",
7085                         acrtc->crtc_id,
7086                         new_crtc_state->enable,
7087                         new_crtc_state->active,
7088                         new_crtc_state->planes_changed,
7089                         new_crtc_state->mode_changed,
7090                         new_crtc_state->active_changed,
7091                         new_crtc_state->connectors_changed);
7092
7093                 /* Copy all transient state flags into dc state */
7094                 if (dm_new_crtc_state->stream) {
7095                         amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
7096                                                             dm_new_crtc_state->stream);
7097                 }
7098
7099                 /* handles headless hotplug case, updating new_state and
7100                  * aconnector as needed
7101                  */
7102
7103                 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
7104
7105                         DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
7106
7107                         if (!dm_new_crtc_state->stream) {
7108                                 /*
7109                                  * this could happen because of issues with
7110                                  * userspace notifications delivery.
7111                                  * In this case userspace tries to set mode on
7112                                  * display which is disconnected in fact.
7113                                  * dc_sink is NULL in this case on aconnector.
7114                                  * We expect reset mode will come soon.
7115                                  *
7116                                  * This can also happen when unplug is done
7117                                  * during resume sequence ended
7118                                  *
7119                                  * In this case, we want to pretend we still
7120                                  * have a sink to keep the pipe running so that
7121                                  * hw state is consistent with the sw state
7122                                  */
7123                                 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7124                                                 __func__, acrtc->base.base.id);
7125                                 continue;
7126                         }
7127
7128                         if (dm_old_crtc_state->stream)
7129                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7130
7131                         pm_runtime_get_noresume(dev->dev);
7132
7133                         acrtc->enabled = true;
7134                         acrtc->hw_mode = new_crtc_state->mode;
7135                         crtc->hwmode = new_crtc_state->mode;
7136                 } else if (modereset_required(new_crtc_state)) {
7137                         DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
7138                         /* i.e. reset mode */
7139                         if (dm_old_crtc_state->stream) {
7140                                 if (dm_old_crtc_state->stream->link->psr_allow_active)
7141                                         amdgpu_dm_psr_disable(dm_old_crtc_state->stream);
7142
7143                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7144                         }
7145                 }
7146         } /* for_each_crtc_in_state() */
7147
7148         if (dc_state) {
7149                 dm_enable_per_frame_crtc_master_sync(dc_state);
7150                 mutex_lock(&dm->dc_lock);
7151                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
7152                 mutex_unlock(&dm->dc_lock);
7153         }
7154
7155         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7156                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7157
7158                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7159
7160                 if (dm_new_crtc_state->stream != NULL) {
7161                         const struct dc_stream_status *status =
7162                                         dc_stream_get_status(dm_new_crtc_state->stream);
7163
7164                         if (!status)
7165                                 status = dc_stream_get_status_from_state(dc_state,
7166                                                                          dm_new_crtc_state->stream);
7167
7168                         if (!status)
7169                                 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
7170                         else
7171                                 acrtc->otg_inst = status->primary_otg_inst;
7172                 }
7173         }
7174 #ifdef CONFIG_DRM_AMD_DC_HDCP
7175         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7176                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7177                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7178                 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7179
7180                 new_crtc_state = NULL;
7181
7182                 if (acrtc)
7183                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7184
7185                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7186
7187                 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
7188                     connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
7189                         hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
7190                         new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7191                         continue;
7192                 }
7193
7194                 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
7195                         hdcp_update_display(
7196                                 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
7197                                 new_con_state->hdcp_content_type,
7198                                 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
7199                                                                                                          : false);
7200         }
7201 #endif
7202
7203         /* Handle connector state changes */
7204         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7205                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7206                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
7207                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7208                 struct dc_surface_update dummy_updates[MAX_SURFACES];
7209                 struct dc_stream_update stream_update;
7210                 struct dc_info_packet hdr_packet;
7211                 struct dc_stream_status *status = NULL;
7212                 bool abm_changed, hdr_changed, scaling_changed;
7213
7214                 memset(&dummy_updates, 0, sizeof(dummy_updates));
7215                 memset(&stream_update, 0, sizeof(stream_update));
7216
7217                 if (acrtc) {
7218                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7219                         old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
7220                 }
7221
7222                 /* Skip any modesets/resets */
7223                 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
7224                         continue;
7225
7226                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7227                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7228
7229                 scaling_changed = is_scaling_state_different(dm_new_con_state,
7230                                                              dm_old_con_state);
7231
7232                 abm_changed = dm_new_crtc_state->abm_level !=
7233                               dm_old_crtc_state->abm_level;
7234
7235                 hdr_changed =
7236                         is_hdr_metadata_different(old_con_state, new_con_state);
7237
7238                 if (!scaling_changed && !abm_changed && !hdr_changed)
7239                         continue;
7240
7241                 stream_update.stream = dm_new_crtc_state->stream;
7242                 if (scaling_changed) {
7243                         update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
7244                                         dm_new_con_state, dm_new_crtc_state->stream);
7245
7246                         stream_update.src = dm_new_crtc_state->stream->src;
7247                         stream_update.dst = dm_new_crtc_state->stream->dst;
7248                 }
7249
7250                 if (abm_changed) {
7251                         dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
7252
7253                         stream_update.abm_level = &dm_new_crtc_state->abm_level;
7254                 }
7255
7256                 if (hdr_changed) {
7257                         fill_hdr_info_packet(new_con_state, &hdr_packet);
7258                         stream_update.hdr_static_metadata = &hdr_packet;
7259                 }
7260
7261                 status = dc_stream_get_status(dm_new_crtc_state->stream);
7262                 WARN_ON(!status);
7263                 WARN_ON(!status->plane_count);
7264
7265                 /*
7266                  * TODO: DC refuses to perform stream updates without a dc_surface_update.
7267                  * Here we create an empty update on each plane.
7268                  * To fix this, DC should permit updating only stream properties.
7269                  */
7270                 for (j = 0; j < status->plane_count; j++)
7271                         dummy_updates[j].surface = status->plane_states[0];
7272
7273
7274                 mutex_lock(&dm->dc_lock);
7275                 dc_commit_updates_for_stream(dm->dc,
7276                                                      dummy_updates,
7277                                                      status->plane_count,
7278                                                      dm_new_crtc_state->stream,
7279                                                      &stream_update,
7280                                                      dc_state);
7281                 mutex_unlock(&dm->dc_lock);
7282         }
7283
7284         /* Count number of newly disabled CRTCs for dropping PM refs later. */
7285         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
7286                                       new_crtc_state, i) {
7287                 if (old_crtc_state->active && !new_crtc_state->active)
7288                         crtc_disable_count++;
7289
7290                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7291                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7292
7293                 /* Update freesync active state. */
7294                 pre_update_freesync_state_on_stream(dm, dm_new_crtc_state);
7295
7296                 /* Handle vrr on->off / off->on transitions */
7297                 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
7298                                                 dm_new_crtc_state);
7299         }
7300
7301         /* Enable interrupts for CRTCs going through a modeset. */
7302         amdgpu_dm_enable_crtc_interrupts(dev, state, true);
7303
7304         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
7305                 if (new_crtc_state->async_flip)
7306                         wait_for_vblank = false;
7307
7308         /* update planes when needed per crtc*/
7309         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
7310                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7311
7312                 if (dm_new_crtc_state->stream)
7313                         amdgpu_dm_commit_planes(state, dc_state, dev,
7314                                                 dm, crtc, wait_for_vblank);
7315         }
7316
7317         /* Enable interrupts for CRTCs going from 0 to n active planes. */
7318         amdgpu_dm_enable_crtc_interrupts(dev, state, false);
7319
7320         /* Update audio instances for each connector. */
7321         amdgpu_dm_commit_audio(dev, state);
7322
7323         /*
7324          * send vblank event on all events not handled in flip and
7325          * mark consumed event for drm_atomic_helper_commit_hw_done
7326          */
7327         spin_lock_irqsave(&adev->ddev->event_lock, flags);
7328         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7329
7330                 if (new_crtc_state->event)
7331                         drm_send_event_locked(dev, &new_crtc_state->event->base);
7332
7333                 new_crtc_state->event = NULL;
7334         }
7335         spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
7336
7337         /* Signal HW programming completion */
7338         drm_atomic_helper_commit_hw_done(state);
7339
7340         if (wait_for_vblank)
7341                 drm_atomic_helper_wait_for_flip_done(dev, state);
7342
7343         drm_atomic_helper_cleanup_planes(dev, state);
7344
7345         /*
7346          * Finally, drop a runtime PM reference for each newly disabled CRTC,
7347          * so we can put the GPU into runtime suspend if we're not driving any
7348          * displays anymore
7349          */
7350         for (i = 0; i < crtc_disable_count; i++)
7351                 pm_runtime_put_autosuspend(dev->dev);
7352         pm_runtime_mark_last_busy(dev->dev);
7353
7354         if (dc_state_temp)
7355                 dc_release_state(dc_state_temp);
7356 }
7357
7358
7359 static int dm_force_atomic_commit(struct drm_connector *connector)
7360 {
7361         int ret = 0;
7362         struct drm_device *ddev = connector->dev;
7363         struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
7364         struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7365         struct drm_plane *plane = disconnected_acrtc->base.primary;
7366         struct drm_connector_state *conn_state;
7367         struct drm_crtc_state *crtc_state;
7368         struct drm_plane_state *plane_state;
7369
7370         if (!state)
7371                 return -ENOMEM;
7372
7373         state->acquire_ctx = ddev->mode_config.acquire_ctx;
7374
7375         /* Construct an atomic state to restore previous display setting */
7376
7377         /*
7378          * Attach connectors to drm_atomic_state
7379          */
7380         conn_state = drm_atomic_get_connector_state(state, connector);
7381
7382         ret = PTR_ERR_OR_ZERO(conn_state);
7383         if (ret)
7384                 goto err;
7385
7386         /* Attach crtc to drm_atomic_state*/
7387         crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
7388
7389         ret = PTR_ERR_OR_ZERO(crtc_state);
7390         if (ret)
7391                 goto err;
7392
7393         /* force a restore */
7394         crtc_state->mode_changed = true;
7395
7396         /* Attach plane to drm_atomic_state */
7397         plane_state = drm_atomic_get_plane_state(state, plane);
7398
7399         ret = PTR_ERR_OR_ZERO(plane_state);
7400         if (ret)
7401                 goto err;
7402
7403
7404         /* Call commit internally with the state we just constructed */
7405         ret = drm_atomic_commit(state);
7406         if (!ret)
7407                 return 0;
7408
7409 err:
7410         DRM_ERROR("Restoring old state failed with %i\n", ret);
7411         drm_atomic_state_put(state);
7412
7413         return ret;
7414 }
7415
7416 /*
7417  * This function handles all cases when set mode does not come upon hotplug.
7418  * This includes when a display is unplugged then plugged back into the
7419  * same port and when running without usermode desktop manager supprot
7420  */
7421 void dm_restore_drm_connector_state(struct drm_device *dev,
7422                                     struct drm_connector *connector)
7423 {
7424         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7425         struct amdgpu_crtc *disconnected_acrtc;
7426         struct dm_crtc_state *acrtc_state;
7427
7428         if (!aconnector->dc_sink || !connector->state || !connector->encoder)
7429                 return;
7430
7431         disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7432         if (!disconnected_acrtc)
7433                 return;
7434
7435         acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
7436         if (!acrtc_state->stream)
7437                 return;
7438
7439         /*
7440          * If the previous sink is not released and different from the current,
7441          * we deduce we are in a state where we can not rely on usermode call
7442          * to turn on the display, so we do it here
7443          */
7444         if (acrtc_state->stream->sink != aconnector->dc_sink)
7445                 dm_force_atomic_commit(&aconnector->base);
7446 }
7447
7448 /*
7449  * Grabs all modesetting locks to serialize against any blocking commits,
7450  * Waits for completion of all non blocking commits.
7451  */
7452 static int do_aquire_global_lock(struct drm_device *dev,
7453                                  struct drm_atomic_state *state)
7454 {
7455         struct drm_crtc *crtc;
7456         struct drm_crtc_commit *commit;
7457         long ret;
7458
7459         /*
7460          * Adding all modeset locks to aquire_ctx will
7461          * ensure that when the framework release it the
7462          * extra locks we are locking here will get released to
7463          */
7464         ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
7465         if (ret)
7466                 return ret;
7467
7468         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7469                 spin_lock(&crtc->commit_lock);
7470                 commit = list_first_entry_or_null(&crtc->commit_list,
7471                                 struct drm_crtc_commit, commit_entry);
7472                 if (commit)
7473                         drm_crtc_commit_get(commit);
7474                 spin_unlock(&crtc->commit_lock);
7475
7476                 if (!commit)
7477                         continue;
7478
7479                 /*
7480                  * Make sure all pending HW programming completed and
7481                  * page flips done
7482                  */
7483                 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
7484
7485                 if (ret > 0)
7486                         ret = wait_for_completion_interruptible_timeout(
7487                                         &commit->flip_done, 10*HZ);
7488
7489                 if (ret == 0)
7490                         DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
7491                                   "timed out\n", crtc->base.id, crtc->name);
7492
7493                 drm_crtc_commit_put(commit);
7494         }
7495
7496         return ret < 0 ? ret : 0;
7497 }
7498
7499 static void get_freesync_config_for_crtc(
7500         struct dm_crtc_state *new_crtc_state,
7501         struct dm_connector_state *new_con_state)
7502 {
7503         struct mod_freesync_config config = {0};
7504         struct amdgpu_dm_connector *aconnector =
7505                         to_amdgpu_dm_connector(new_con_state->base.connector);
7506         struct drm_display_mode *mode = &new_crtc_state->base.mode;
7507         int vrefresh = drm_mode_vrefresh(mode);
7508
7509         new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
7510                                         vrefresh >= aconnector->min_vfreq &&
7511                                         vrefresh <= aconnector->max_vfreq;
7512
7513         if (new_crtc_state->vrr_supported) {
7514                 new_crtc_state->stream->ignore_msa_timing_param = true;
7515                 config.state = new_crtc_state->base.vrr_enabled ?
7516                                 VRR_STATE_ACTIVE_VARIABLE :
7517                                 VRR_STATE_INACTIVE;
7518                 config.min_refresh_in_uhz =
7519                                 aconnector->min_vfreq * 1000000;
7520                 config.max_refresh_in_uhz =
7521                                 aconnector->max_vfreq * 1000000;
7522                 config.vsif_supported = true;
7523                 config.btr = true;
7524         }
7525
7526         new_crtc_state->freesync_config = config;
7527 }
7528
7529 static void reset_freesync_config_for_crtc(
7530         struct dm_crtc_state *new_crtc_state)
7531 {
7532         new_crtc_state->vrr_supported = false;
7533
7534         memset(&new_crtc_state->vrr_params, 0,
7535                sizeof(new_crtc_state->vrr_params));
7536         memset(&new_crtc_state->vrr_infopacket, 0,
7537                sizeof(new_crtc_state->vrr_infopacket));
7538 }
7539
7540 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
7541                                 struct drm_atomic_state *state,
7542                                 struct drm_crtc *crtc,
7543                                 struct drm_crtc_state *old_crtc_state,
7544                                 struct drm_crtc_state *new_crtc_state,
7545                                 bool enable,
7546                                 bool *lock_and_validation_needed)
7547 {
7548         struct dm_atomic_state *dm_state = NULL;
7549         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7550         struct dc_stream_state *new_stream;
7551         int ret = 0;
7552
7553         /*
7554          * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
7555          * update changed items
7556          */
7557         struct amdgpu_crtc *acrtc = NULL;
7558         struct amdgpu_dm_connector *aconnector = NULL;
7559         struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
7560         struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
7561
7562         new_stream = NULL;
7563
7564         dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7565         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7566         acrtc = to_amdgpu_crtc(crtc);
7567         aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
7568
7569         /* TODO This hack should go away */
7570         if (aconnector && enable) {
7571                 /* Make sure fake sink is created in plug-in scenario */
7572                 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
7573                                                             &aconnector->base);
7574                 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
7575                                                             &aconnector->base);
7576
7577                 if (IS_ERR(drm_new_conn_state)) {
7578                         ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
7579                         goto fail;
7580                 }
7581
7582                 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
7583                 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
7584
7585                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7586                         goto skip_modeset;
7587
7588                 new_stream = create_stream_for_sink(aconnector,
7589                                                      &new_crtc_state->mode,
7590                                                     dm_new_conn_state,
7591                                                     dm_old_crtc_state->stream);
7592
7593                 /*
7594                  * we can have no stream on ACTION_SET if a display
7595                  * was disconnected during S3, in this case it is not an
7596                  * error, the OS will be updated after detection, and
7597                  * will do the right thing on next atomic commit
7598                  */
7599
7600                 if (!new_stream) {
7601                         DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7602                                         __func__, acrtc->base.base.id);
7603                         ret = -ENOMEM;
7604                         goto fail;
7605                 }
7606
7607                 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
7608
7609                 ret = fill_hdr_info_packet(drm_new_conn_state,
7610                                            &new_stream->hdr_static_metadata);
7611                 if (ret)
7612                         goto fail;
7613
7614                 /*
7615                  * If we already removed the old stream from the context
7616                  * (and set the new stream to NULL) then we can't reuse
7617                  * the old stream even if the stream and scaling are unchanged.
7618                  * We'll hit the BUG_ON and black screen.
7619                  *
7620                  * TODO: Refactor this function to allow this check to work
7621                  * in all conditions.
7622                  */
7623                 if (dm_new_crtc_state->stream &&
7624                     dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
7625                     dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
7626                         new_crtc_state->mode_changed = false;
7627                         DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
7628                                          new_crtc_state->mode_changed);
7629                 }
7630         }
7631
7632         /* mode_changed flag may get updated above, need to check again */
7633         if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7634                 goto skip_modeset;
7635
7636         DRM_DEBUG_DRIVER(
7637                 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7638                 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
7639                 "connectors_changed:%d\n",
7640                 acrtc->crtc_id,
7641                 new_crtc_state->enable,
7642                 new_crtc_state->active,
7643                 new_crtc_state->planes_changed,
7644                 new_crtc_state->mode_changed,
7645                 new_crtc_state->active_changed,
7646                 new_crtc_state->connectors_changed);
7647
7648         /* Remove stream for any changed/disabled CRTC */
7649         if (!enable) {
7650
7651                 if (!dm_old_crtc_state->stream)
7652                         goto skip_modeset;
7653
7654                 ret = dm_atomic_get_state(state, &dm_state);
7655                 if (ret)
7656                         goto fail;
7657
7658                 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
7659                                 crtc->base.id);
7660
7661                 /* i.e. reset mode */
7662                 if (dc_remove_stream_from_ctx(
7663                                 dm->dc,
7664                                 dm_state->context,
7665                                 dm_old_crtc_state->stream) != DC_OK) {
7666                         ret = -EINVAL;
7667                         goto fail;
7668                 }
7669
7670                 dc_stream_release(dm_old_crtc_state->stream);
7671                 dm_new_crtc_state->stream = NULL;
7672
7673                 reset_freesync_config_for_crtc(dm_new_crtc_state);
7674
7675                 *lock_and_validation_needed = true;
7676
7677         } else {/* Add stream for any updated/enabled CRTC */
7678                 /*
7679                  * Quick fix to prevent NULL pointer on new_stream when
7680                  * added MST connectors not found in existing crtc_state in the chained mode
7681                  * TODO: need to dig out the root cause of that
7682                  */
7683                 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
7684                         goto skip_modeset;
7685
7686                 if (modereset_required(new_crtc_state))
7687                         goto skip_modeset;
7688
7689                 if (modeset_required(new_crtc_state, new_stream,
7690                                      dm_old_crtc_state->stream)) {
7691
7692                         WARN_ON(dm_new_crtc_state->stream);
7693
7694                         ret = dm_atomic_get_state(state, &dm_state);
7695                         if (ret)
7696                                 goto fail;
7697
7698                         dm_new_crtc_state->stream = new_stream;
7699
7700                         dc_stream_retain(new_stream);
7701
7702                         DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
7703                                                 crtc->base.id);
7704
7705                         if (dc_add_stream_to_ctx(
7706                                         dm->dc,
7707                                         dm_state->context,
7708                                         dm_new_crtc_state->stream) != DC_OK) {
7709                                 ret = -EINVAL;
7710                                 goto fail;
7711                         }
7712
7713                         *lock_and_validation_needed = true;
7714                 }
7715         }
7716
7717 skip_modeset:
7718         /* Release extra reference */
7719         if (new_stream)
7720                  dc_stream_release(new_stream);
7721
7722         /*
7723          * We want to do dc stream updates that do not require a
7724          * full modeset below.
7725          */
7726         if (!(enable && aconnector && new_crtc_state->enable &&
7727               new_crtc_state->active))
7728                 return 0;
7729         /*
7730          * Given above conditions, the dc state cannot be NULL because:
7731          * 1. We're in the process of enabling CRTCs (just been added
7732          *    to the dc context, or already is on the context)
7733          * 2. Has a valid connector attached, and
7734          * 3. Is currently active and enabled.
7735          * => The dc stream state currently exists.
7736          */
7737         BUG_ON(dm_new_crtc_state->stream == NULL);
7738
7739         /* Scaling or underscan settings */
7740         if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
7741                 update_stream_scaling_settings(
7742                         &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
7743
7744         /* ABM settings */
7745         dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
7746
7747         /*
7748          * Color management settings. We also update color properties
7749          * when a modeset is needed, to ensure it gets reprogrammed.
7750          */
7751         if (dm_new_crtc_state->base.color_mgmt_changed ||
7752             drm_atomic_crtc_needs_modeset(new_crtc_state)) {
7753                 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
7754                 if (ret)
7755                         goto fail;
7756         }
7757
7758         /* Update Freesync settings. */
7759         get_freesync_config_for_crtc(dm_new_crtc_state,
7760                                      dm_new_conn_state);
7761
7762         return ret;
7763
7764 fail:
7765         if (new_stream)
7766                 dc_stream_release(new_stream);
7767         return ret;
7768 }
7769
7770 static bool should_reset_plane(struct drm_atomic_state *state,
7771                                struct drm_plane *plane,
7772                                struct drm_plane_state *old_plane_state,
7773                                struct drm_plane_state *new_plane_state)
7774 {
7775         struct drm_plane *other;
7776         struct drm_plane_state *old_other_state, *new_other_state;
7777         struct drm_crtc_state *new_crtc_state;
7778         int i;
7779
7780         /*
7781          * TODO: Remove this hack once the checks below are sufficient
7782          * enough to determine when we need to reset all the planes on
7783          * the stream.
7784          */
7785         if (state->allow_modeset)
7786                 return true;
7787
7788         /* Exit early if we know that we're adding or removing the plane. */
7789         if (old_plane_state->crtc != new_plane_state->crtc)
7790                 return true;
7791
7792         /* old crtc == new_crtc == NULL, plane not in context. */
7793         if (!new_plane_state->crtc)
7794                 return false;
7795
7796         new_crtc_state =
7797                 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
7798
7799         if (!new_crtc_state)
7800                 return true;
7801
7802         /* CRTC Degamma changes currently require us to recreate planes. */
7803         if (new_crtc_state->color_mgmt_changed)
7804                 return true;
7805
7806         if (drm_atomic_crtc_needs_modeset(new_crtc_state))
7807                 return true;
7808
7809         /*
7810          * If there are any new primary or overlay planes being added or
7811          * removed then the z-order can potentially change. To ensure
7812          * correct z-order and pipe acquisition the current DC architecture
7813          * requires us to remove and recreate all existing planes.
7814          *
7815          * TODO: Come up with a more elegant solution for this.
7816          */
7817         for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
7818                 if (other->type == DRM_PLANE_TYPE_CURSOR)
7819                         continue;
7820
7821                 if (old_other_state->crtc != new_plane_state->crtc &&
7822                     new_other_state->crtc != new_plane_state->crtc)
7823                         continue;
7824
7825                 if (old_other_state->crtc != new_other_state->crtc)
7826                         return true;
7827
7828                 /* TODO: Remove this once we can handle fast format changes. */
7829                 if (old_other_state->fb && new_other_state->fb &&
7830                     old_other_state->fb->format != new_other_state->fb->format)
7831                         return true;
7832         }
7833
7834         return false;
7835 }
7836
7837 static int dm_update_plane_state(struct dc *dc,
7838                                  struct drm_atomic_state *state,
7839                                  struct drm_plane *plane,
7840                                  struct drm_plane_state *old_plane_state,
7841                                  struct drm_plane_state *new_plane_state,
7842                                  bool enable,
7843                                  bool *lock_and_validation_needed)
7844 {
7845
7846         struct dm_atomic_state *dm_state = NULL;
7847         struct drm_crtc *new_plane_crtc, *old_plane_crtc;
7848         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7849         struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
7850         struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
7851         bool needs_reset;
7852         int ret = 0;
7853
7854
7855         new_plane_crtc = new_plane_state->crtc;
7856         old_plane_crtc = old_plane_state->crtc;
7857         dm_new_plane_state = to_dm_plane_state(new_plane_state);
7858         dm_old_plane_state = to_dm_plane_state(old_plane_state);
7859
7860         /*TODO Implement atomic check for cursor plane */
7861         if (plane->type == DRM_PLANE_TYPE_CURSOR)
7862                 return 0;
7863
7864         needs_reset = should_reset_plane(state, plane, old_plane_state,
7865                                          new_plane_state);
7866
7867         /* Remove any changed/removed planes */
7868         if (!enable) {
7869                 if (!needs_reset)
7870                         return 0;
7871
7872                 if (!old_plane_crtc)
7873                         return 0;
7874
7875                 old_crtc_state = drm_atomic_get_old_crtc_state(
7876                                 state, old_plane_crtc);
7877                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7878
7879                 if (!dm_old_crtc_state->stream)
7880                         return 0;
7881
7882                 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
7883                                 plane->base.id, old_plane_crtc->base.id);
7884
7885                 ret = dm_atomic_get_state(state, &dm_state);
7886                 if (ret)
7887                         return ret;
7888
7889                 if (!dc_remove_plane_from_context(
7890                                 dc,
7891                                 dm_old_crtc_state->stream,
7892                                 dm_old_plane_state->dc_state,
7893                                 dm_state->context)) {
7894
7895                         ret = EINVAL;
7896                         return ret;
7897                 }
7898
7899
7900                 dc_plane_state_release(dm_old_plane_state->dc_state);
7901                 dm_new_plane_state->dc_state = NULL;
7902
7903                 *lock_and_validation_needed = true;
7904
7905         } else { /* Add new planes */
7906                 struct dc_plane_state *dc_new_plane_state;
7907
7908                 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
7909                         return 0;
7910
7911                 if (!new_plane_crtc)
7912                         return 0;
7913
7914                 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
7915                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7916
7917                 if (!dm_new_crtc_state->stream)
7918                         return 0;
7919
7920                 if (!needs_reset)
7921                         return 0;
7922
7923                 WARN_ON(dm_new_plane_state->dc_state);
7924
7925                 dc_new_plane_state = dc_create_plane_state(dc);
7926                 if (!dc_new_plane_state)
7927                         return -ENOMEM;
7928
7929                 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
7930                                 plane->base.id, new_plane_crtc->base.id);
7931
7932                 ret = fill_dc_plane_attributes(
7933                         new_plane_crtc->dev->dev_private,
7934                         dc_new_plane_state,
7935                         new_plane_state,
7936                         new_crtc_state);
7937                 if (ret) {
7938                         dc_plane_state_release(dc_new_plane_state);
7939                         return ret;
7940                 }
7941
7942                 ret = dm_atomic_get_state(state, &dm_state);
7943                 if (ret) {
7944                         dc_plane_state_release(dc_new_plane_state);
7945                         return ret;
7946                 }
7947
7948                 /*
7949                  * Any atomic check errors that occur after this will
7950                  * not need a release. The plane state will be attached
7951                  * to the stream, and therefore part of the atomic
7952                  * state. It'll be released when the atomic state is
7953                  * cleaned.
7954                  */
7955                 if (!dc_add_plane_to_context(
7956                                 dc,
7957                                 dm_new_crtc_state->stream,
7958                                 dc_new_plane_state,
7959                                 dm_state->context)) {
7960
7961                         dc_plane_state_release(dc_new_plane_state);
7962                         return -EINVAL;
7963                 }
7964
7965                 dm_new_plane_state->dc_state = dc_new_plane_state;
7966
7967                 /* Tell DC to do a full surface update every time there
7968                  * is a plane change. Inefficient, but works for now.
7969                  */
7970                 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
7971
7972                 *lock_and_validation_needed = true;
7973         }
7974
7975
7976         return ret;
7977 }
7978
7979 static int
7980 dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
7981                                     struct drm_atomic_state *state,
7982                                     enum surface_update_type *out_type)
7983 {
7984         struct dc *dc = dm->dc;
7985         struct dm_atomic_state *dm_state = NULL, *old_dm_state = NULL;
7986         int i, j, num_plane, ret = 0;
7987         struct drm_plane_state *old_plane_state, *new_plane_state;
7988         struct dm_plane_state *new_dm_plane_state, *old_dm_plane_state;
7989         struct drm_crtc *new_plane_crtc;
7990         struct drm_plane *plane;
7991
7992         struct drm_crtc *crtc;
7993         struct drm_crtc_state *new_crtc_state, *old_crtc_state;
7994         struct dm_crtc_state *new_dm_crtc_state, *old_dm_crtc_state;
7995         struct dc_stream_status *status = NULL;
7996         enum surface_update_type update_type = UPDATE_TYPE_FAST;
7997         struct surface_info_bundle {
7998                 struct dc_surface_update surface_updates[MAX_SURFACES];
7999                 struct dc_plane_info plane_infos[MAX_SURFACES];
8000                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
8001                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8002                 struct dc_stream_update stream_update;
8003         } *bundle;
8004
8005         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8006
8007         if (!bundle) {
8008                 DRM_ERROR("Failed to allocate update bundle\n");
8009                 /* Set type to FULL to avoid crashing in DC*/
8010                 update_type = UPDATE_TYPE_FULL;
8011                 goto cleanup;
8012         }
8013
8014         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8015
8016                 memset(bundle, 0, sizeof(struct surface_info_bundle));
8017
8018                 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8019                 old_dm_crtc_state = to_dm_crtc_state(old_crtc_state);
8020                 num_plane = 0;
8021
8022                 if (new_dm_crtc_state->stream != old_dm_crtc_state->stream) {
8023                         update_type = UPDATE_TYPE_FULL;
8024                         goto cleanup;
8025                 }
8026
8027                 if (!new_dm_crtc_state->stream)
8028                         continue;
8029
8030                 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, j) {
8031                         const struct amdgpu_framebuffer *amdgpu_fb =
8032                                 to_amdgpu_framebuffer(new_plane_state->fb);
8033                         struct dc_plane_info *plane_info = &bundle->plane_infos[num_plane];
8034                         struct dc_flip_addrs *flip_addr = &bundle->flip_addrs[num_plane];
8035                         struct dc_scaling_info *scaling_info = &bundle->scaling_infos[num_plane];
8036                         uint64_t tiling_flags;
8037
8038                         new_plane_crtc = new_plane_state->crtc;
8039                         new_dm_plane_state = to_dm_plane_state(new_plane_state);
8040                         old_dm_plane_state = to_dm_plane_state(old_plane_state);
8041
8042                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
8043                                 continue;
8044
8045                         if (new_dm_plane_state->dc_state != old_dm_plane_state->dc_state) {
8046                                 update_type = UPDATE_TYPE_FULL;
8047                                 goto cleanup;
8048                         }
8049
8050                         if (crtc != new_plane_crtc)
8051                                 continue;
8052
8053                         bundle->surface_updates[num_plane].surface =
8054                                         new_dm_plane_state->dc_state;
8055
8056                         if (new_crtc_state->mode_changed) {
8057                                 bundle->stream_update.dst = new_dm_crtc_state->stream->dst;
8058                                 bundle->stream_update.src = new_dm_crtc_state->stream->src;
8059                         }
8060
8061                         if (new_crtc_state->color_mgmt_changed) {
8062                                 bundle->surface_updates[num_plane].gamma =
8063                                                 new_dm_plane_state->dc_state->gamma_correction;
8064                                 bundle->surface_updates[num_plane].in_transfer_func =
8065                                                 new_dm_plane_state->dc_state->in_transfer_func;
8066                                 bundle->stream_update.gamut_remap =
8067                                                 &new_dm_crtc_state->stream->gamut_remap_matrix;
8068                                 bundle->stream_update.output_csc_transform =
8069                                                 &new_dm_crtc_state->stream->csc_color_matrix;
8070                                 bundle->stream_update.out_transfer_func =
8071                                                 new_dm_crtc_state->stream->out_transfer_func;
8072                         }
8073
8074                         ret = fill_dc_scaling_info(new_plane_state,
8075                                                    scaling_info);
8076                         if (ret)
8077                                 goto cleanup;
8078
8079                         bundle->surface_updates[num_plane].scaling_info = scaling_info;
8080
8081                         if (amdgpu_fb) {
8082                                 ret = get_fb_info(amdgpu_fb, &tiling_flags);
8083                                 if (ret)
8084                                         goto cleanup;
8085
8086                                 ret = fill_dc_plane_info_and_addr(
8087                                         dm->adev, new_plane_state, tiling_flags,
8088                                         plane_info,
8089                                         &flip_addr->address);
8090                                 if (ret)
8091                                         goto cleanup;
8092
8093                                 bundle->surface_updates[num_plane].plane_info = plane_info;
8094                                 bundle->surface_updates[num_plane].flip_addr = flip_addr;
8095                         }
8096
8097                         num_plane++;
8098                 }
8099
8100                 if (num_plane == 0)
8101                         continue;
8102
8103                 ret = dm_atomic_get_state(state, &dm_state);
8104                 if (ret)
8105                         goto cleanup;
8106
8107                 old_dm_state = dm_atomic_get_old_state(state);
8108                 if (!old_dm_state) {
8109                         ret = -EINVAL;
8110                         goto cleanup;
8111                 }
8112
8113                 status = dc_stream_get_status_from_state(old_dm_state->context,
8114                                                          new_dm_crtc_state->stream);
8115                 bundle->stream_update.stream = new_dm_crtc_state->stream;
8116                 /*
8117                  * TODO: DC modifies the surface during this call so we need
8118                  * to lock here - find a way to do this without locking.
8119                  */
8120                 mutex_lock(&dm->dc_lock);
8121                 update_type = dc_check_update_surfaces_for_stream(
8122                                 dc,     bundle->surface_updates, num_plane,
8123                                 &bundle->stream_update, status);
8124                 mutex_unlock(&dm->dc_lock);
8125
8126                 if (update_type > UPDATE_TYPE_MED) {
8127                         update_type = UPDATE_TYPE_FULL;
8128                         goto cleanup;
8129                 }
8130         }
8131
8132 cleanup:
8133         kfree(bundle);
8134
8135         *out_type = update_type;
8136         return ret;
8137 }
8138
8139 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
8140 {
8141         struct drm_connector *connector;
8142         struct drm_connector_state *conn_state;
8143         struct amdgpu_dm_connector *aconnector = NULL;
8144         int i;
8145         for_each_new_connector_in_state(state, connector, conn_state, i) {
8146                 if (conn_state->crtc != crtc)
8147                         continue;
8148
8149                 aconnector = to_amdgpu_dm_connector(connector);
8150                 if (!aconnector->port || !aconnector->mst_port)
8151                         aconnector = NULL;
8152                 else
8153                         break;
8154         }
8155
8156         if (!aconnector)
8157                 return 0;
8158
8159         return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
8160 }
8161
8162 /**
8163  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
8164  * @dev: The DRM device
8165  * @state: The atomic state to commit
8166  *
8167  * Validate that the given atomic state is programmable by DC into hardware.
8168  * This involves constructing a &struct dc_state reflecting the new hardware
8169  * state we wish to commit, then querying DC to see if it is programmable. It's
8170  * important not to modify the existing DC state. Otherwise, atomic_check
8171  * may unexpectedly commit hardware changes.
8172  *
8173  * When validating the DC state, it's important that the right locks are
8174  * acquired. For full updates case which removes/adds/updates streams on one
8175  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
8176  * that any such full update commit will wait for completion of any outstanding
8177  * flip using DRMs synchronization events. See
8178  * dm_determine_update_type_for_commit()
8179  *
8180  * Note that DM adds the affected connectors for all CRTCs in state, when that
8181  * might not seem necessary. This is because DC stream creation requires the
8182  * DC sink, which is tied to the DRM connector state. Cleaning this up should
8183  * be possible but non-trivial - a possible TODO item.
8184  *
8185  * Return: -Error code if validation failed.
8186  */
8187 static int amdgpu_dm_atomic_check(struct drm_device *dev,
8188                                   struct drm_atomic_state *state)
8189 {
8190         struct amdgpu_device *adev = dev->dev_private;
8191         struct dm_atomic_state *dm_state = NULL;
8192         struct dc *dc = adev->dm.dc;
8193         struct drm_connector *connector;
8194         struct drm_connector_state *old_con_state, *new_con_state;
8195         struct drm_crtc *crtc;
8196         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8197         struct drm_plane *plane;
8198         struct drm_plane_state *old_plane_state, *new_plane_state;
8199         enum surface_update_type update_type = UPDATE_TYPE_FAST;
8200         enum surface_update_type overall_update_type = UPDATE_TYPE_FAST;
8201
8202         int ret, i;
8203
8204         /*
8205          * This bool will be set for true for any modeset/reset
8206          * or plane update which implies non fast surface update.
8207          */
8208         bool lock_and_validation_needed = false;
8209
8210         ret = drm_atomic_helper_check_modeset(dev, state);
8211         if (ret)
8212                 goto fail;
8213
8214         if (adev->asic_type >= CHIP_NAVI10) {
8215                 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8216                         if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8217                                 ret = add_affected_mst_dsc_crtcs(state, crtc);
8218                                 if (ret)
8219                                         goto fail;
8220                         }
8221                 }
8222         }
8223
8224         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8225                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
8226                     !new_crtc_state->color_mgmt_changed &&
8227                     old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
8228                         continue;
8229
8230                 if (!new_crtc_state->enable)
8231                         continue;
8232
8233                 ret = drm_atomic_add_affected_connectors(state, crtc);
8234                 if (ret)
8235                         return ret;
8236
8237                 ret = drm_atomic_add_affected_planes(state, crtc);
8238                 if (ret)
8239                         goto fail;
8240         }
8241
8242         /*
8243          * Add all primary and overlay planes on the CRTC to the state
8244          * whenever a plane is enabled to maintain correct z-ordering
8245          * and to enable fast surface updates.
8246          */
8247         drm_for_each_crtc(crtc, dev) {
8248                 bool modified = false;
8249
8250                 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8251                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
8252                                 continue;
8253
8254                         if (new_plane_state->crtc == crtc ||
8255                             old_plane_state->crtc == crtc) {
8256                                 modified = true;
8257                                 break;
8258                         }
8259                 }
8260
8261                 if (!modified)
8262                         continue;
8263
8264                 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
8265                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
8266                                 continue;
8267
8268                         new_plane_state =
8269                                 drm_atomic_get_plane_state(state, plane);
8270
8271                         if (IS_ERR(new_plane_state)) {
8272                                 ret = PTR_ERR(new_plane_state);
8273                                 goto fail;
8274                         }
8275                 }
8276         }
8277
8278         /* Remove exiting planes if they are modified */
8279         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8280                 ret = dm_update_plane_state(dc, state, plane,
8281                                             old_plane_state,
8282                                             new_plane_state,
8283                                             false,
8284                                             &lock_and_validation_needed);
8285                 if (ret)
8286                         goto fail;
8287         }
8288
8289         /* Disable all crtcs which require disable */
8290         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8291                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
8292                                            old_crtc_state,
8293                                            new_crtc_state,
8294                                            false,
8295                                            &lock_and_validation_needed);
8296                 if (ret)
8297                         goto fail;
8298         }
8299
8300         /* Enable all crtcs which require enable */
8301         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8302                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
8303                                            old_crtc_state,
8304                                            new_crtc_state,
8305                                            true,
8306                                            &lock_and_validation_needed);
8307                 if (ret)
8308                         goto fail;
8309         }
8310
8311         /* Add new/modified planes */
8312         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8313                 ret = dm_update_plane_state(dc, state, plane,
8314                                             old_plane_state,
8315                                             new_plane_state,
8316                                             true,
8317                                             &lock_and_validation_needed);
8318                 if (ret)
8319                         goto fail;
8320         }
8321
8322         /* Run this here since we want to validate the streams we created */
8323         ret = drm_atomic_helper_check_planes(dev, state);
8324         if (ret)
8325                 goto fail;
8326
8327         if (state->legacy_cursor_update) {
8328                 /*
8329                  * This is a fast cursor update coming from the plane update
8330                  * helper, check if it can be done asynchronously for better
8331                  * performance.
8332                  */
8333                 state->async_update =
8334                         !drm_atomic_helper_async_check(dev, state);
8335
8336                 /*
8337                  * Skip the remaining global validation if this is an async
8338                  * update. Cursor updates can be done without affecting
8339                  * state or bandwidth calcs and this avoids the performance
8340                  * penalty of locking the private state object and
8341                  * allocating a new dc_state.
8342                  */
8343                 if (state->async_update)
8344                         return 0;
8345         }
8346
8347         /* Check scaling and underscan changes*/
8348         /* TODO Removed scaling changes validation due to inability to commit
8349          * new stream into context w\o causing full reset. Need to
8350          * decide how to handle.
8351          */
8352         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8353                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8354                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8355                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8356
8357                 /* Skip any modesets/resets */
8358                 if (!acrtc || drm_atomic_crtc_needs_modeset(
8359                                 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
8360                         continue;
8361
8362                 /* Skip any thing not scale or underscan changes */
8363                 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
8364                         continue;
8365
8366                 overall_update_type = UPDATE_TYPE_FULL;
8367                 lock_and_validation_needed = true;
8368         }
8369
8370         ret = dm_determine_update_type_for_commit(&adev->dm, state, &update_type);
8371         if (ret)
8372                 goto fail;
8373
8374         if (overall_update_type < update_type)
8375                 overall_update_type = update_type;
8376
8377         /*
8378          * lock_and_validation_needed was an old way to determine if we need to set
8379          * the global lock. Leaving it in to check if we broke any corner cases
8380          * lock_and_validation_needed true = UPDATE_TYPE_FULL or UPDATE_TYPE_MED
8381          * lock_and_validation_needed false = UPDATE_TYPE_FAST
8382          */
8383         if (lock_and_validation_needed && overall_update_type <= UPDATE_TYPE_FAST)
8384                 WARN(1, "Global lock should be Set, overall_update_type should be UPDATE_TYPE_MED or UPDATE_TYPE_FULL");
8385
8386         if (overall_update_type > UPDATE_TYPE_FAST) {
8387                 ret = dm_atomic_get_state(state, &dm_state);
8388                 if (ret)
8389                         goto fail;
8390
8391                 ret = do_aquire_global_lock(dev, state);
8392                 if (ret)
8393                         goto fail;
8394
8395 #if defined(CONFIG_DRM_AMD_DC_DCN)
8396                 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
8397                         goto fail;
8398
8399                 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
8400                 if (ret)
8401                         goto fail;
8402 #endif
8403
8404                 /*
8405                  * Perform validation of MST topology in the state:
8406                  * We need to perform MST atomic check before calling
8407                  * dc_validate_global_state(), or there is a chance
8408                  * to get stuck in an infinite loop and hang eventually.
8409                  */
8410                 ret = drm_dp_mst_atomic_check(state);
8411                 if (ret)
8412                         goto fail;
8413
8414                 if (dc_validate_global_state(dc, dm_state->context, false) != DC_OK) {
8415                         ret = -EINVAL;
8416                         goto fail;
8417                 }
8418         } else {
8419                 /*
8420                  * The commit is a fast update. Fast updates shouldn't change
8421                  * the DC context, affect global validation, and can have their
8422                  * commit work done in parallel with other commits not touching
8423                  * the same resource. If we have a new DC context as part of
8424                  * the DM atomic state from validation we need to free it and
8425                  * retain the existing one instead.
8426                  */
8427                 struct dm_atomic_state *new_dm_state, *old_dm_state;
8428
8429                 new_dm_state = dm_atomic_get_new_state(state);
8430                 old_dm_state = dm_atomic_get_old_state(state);
8431
8432                 if (new_dm_state && old_dm_state) {
8433                         if (new_dm_state->context)
8434                                 dc_release_state(new_dm_state->context);
8435
8436                         new_dm_state->context = old_dm_state->context;
8437
8438                         if (old_dm_state->context)
8439                                 dc_retain_state(old_dm_state->context);
8440                 }
8441         }
8442
8443         /* Store the overall update type for use later in atomic check. */
8444         for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
8445                 struct dm_crtc_state *dm_new_crtc_state =
8446                         to_dm_crtc_state(new_crtc_state);
8447
8448                 dm_new_crtc_state->update_type = (int)overall_update_type;
8449         }
8450
8451         /* Must be success */
8452         WARN_ON(ret);
8453         return ret;
8454
8455 fail:
8456         if (ret == -EDEADLK)
8457                 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
8458         else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
8459                 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
8460         else
8461                 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
8462
8463         return ret;
8464 }
8465
8466 static bool is_dp_capable_without_timing_msa(struct dc *dc,
8467                                              struct amdgpu_dm_connector *amdgpu_dm_connector)
8468 {
8469         uint8_t dpcd_data;
8470         bool capable = false;
8471
8472         if (amdgpu_dm_connector->dc_link &&
8473                 dm_helpers_dp_read_dpcd(
8474                                 NULL,
8475                                 amdgpu_dm_connector->dc_link,
8476                                 DP_DOWN_STREAM_PORT_COUNT,
8477                                 &dpcd_data,
8478                                 sizeof(dpcd_data))) {
8479                 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
8480         }
8481
8482         return capable;
8483 }
8484 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
8485                                         struct edid *edid)
8486 {
8487         int i;
8488         bool edid_check_required;
8489         struct detailed_timing *timing;
8490         struct detailed_non_pixel *data;
8491         struct detailed_data_monitor_range *range;
8492         struct amdgpu_dm_connector *amdgpu_dm_connector =
8493                         to_amdgpu_dm_connector(connector);
8494         struct dm_connector_state *dm_con_state = NULL;
8495
8496         struct drm_device *dev = connector->dev;
8497         struct amdgpu_device *adev = dev->dev_private;
8498         bool freesync_capable = false;
8499
8500         if (!connector->state) {
8501                 DRM_ERROR("%s - Connector has no state", __func__);
8502                 goto update;
8503         }
8504
8505         if (!edid) {
8506                 dm_con_state = to_dm_connector_state(connector->state);
8507
8508                 amdgpu_dm_connector->min_vfreq = 0;
8509                 amdgpu_dm_connector->max_vfreq = 0;
8510                 amdgpu_dm_connector->pixel_clock_mhz = 0;
8511
8512                 goto update;
8513         }
8514
8515         dm_con_state = to_dm_connector_state(connector->state);
8516
8517         edid_check_required = false;
8518         if (!amdgpu_dm_connector->dc_sink) {
8519                 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
8520                 goto update;
8521         }
8522         if (!adev->dm.freesync_module)
8523                 goto update;
8524         /*
8525          * if edid non zero restrict freesync only for dp and edp
8526          */
8527         if (edid) {
8528                 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
8529                         || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
8530                         edid_check_required = is_dp_capable_without_timing_msa(
8531                                                 adev->dm.dc,
8532                                                 amdgpu_dm_connector);
8533                 }
8534         }
8535         if (edid_check_required == true && (edid->version > 1 ||
8536            (edid->version == 1 && edid->revision > 1))) {
8537                 for (i = 0; i < 4; i++) {
8538
8539                         timing  = &edid->detailed_timings[i];
8540                         data    = &timing->data.other_data;
8541                         range   = &data->data.range;
8542                         /*
8543                          * Check if monitor has continuous frequency mode
8544                          */
8545                         if (data->type != EDID_DETAIL_MONITOR_RANGE)
8546                                 continue;
8547                         /*
8548                          * Check for flag range limits only. If flag == 1 then
8549                          * no additional timing information provided.
8550                          * Default GTF, GTF Secondary curve and CVT are not
8551                          * supported
8552                          */
8553                         if (range->flags != 1)
8554                                 continue;
8555
8556                         amdgpu_dm_connector->min_vfreq = range->min_vfreq;
8557                         amdgpu_dm_connector->max_vfreq = range->max_vfreq;
8558                         amdgpu_dm_connector->pixel_clock_mhz =
8559                                 range->pixel_clock_mhz * 10;
8560                         break;
8561                 }
8562
8563                 if (amdgpu_dm_connector->max_vfreq -
8564                     amdgpu_dm_connector->min_vfreq > 10) {
8565
8566                         freesync_capable = true;
8567                 }
8568         }
8569
8570 update:
8571         if (dm_con_state)
8572                 dm_con_state->freesync_capable = freesync_capable;
8573
8574         if (connector->vrr_capable_property)
8575                 drm_connector_set_vrr_capable_property(connector,
8576                                                        freesync_capable);
8577 }
8578
8579 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
8580 {
8581         uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
8582
8583         if (!(link->connector_signal & SIGNAL_TYPE_EDP))
8584                 return;
8585         if (link->type == dc_connection_none)
8586                 return;
8587         if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
8588                                         dpcd_data, sizeof(dpcd_data))) {
8589                 link->psr_feature_enabled = dpcd_data[0] ? true:false;
8590                 DRM_INFO("PSR support:%d\n", link->psr_feature_enabled);
8591         }
8592 }
8593
8594 /*
8595  * amdgpu_dm_link_setup_psr() - configure psr link
8596  * @stream: stream state
8597  *
8598  * Return: true if success
8599  */
8600 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
8601 {
8602         struct dc_link *link = NULL;
8603         struct psr_config psr_config = {0};
8604         struct psr_context psr_context = {0};
8605         struct dc *dc = NULL;
8606         bool ret = false;
8607
8608         if (stream == NULL)
8609                 return false;
8610
8611         link = stream->link;
8612         dc = link->ctx->dc;
8613
8614         psr_config.psr_version = dc->res_pool->dmcu->dmcu_version.psr_version;
8615
8616         if (psr_config.psr_version > 0) {
8617                 psr_config.psr_exit_link_training_required = 0x1;
8618                 psr_config.psr_frame_capture_indication_req = 0;
8619                 psr_config.psr_rfb_setup_time = 0x37;
8620                 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
8621                 psr_config.allow_smu_optimizations = 0x0;
8622
8623                 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
8624
8625         }
8626         DRM_DEBUG_DRIVER("PSR link: %d\n",      link->psr_feature_enabled);
8627
8628         return ret;
8629 }
8630
8631 /*
8632  * amdgpu_dm_psr_enable() - enable psr f/w
8633  * @stream: stream state
8634  *
8635  * Return: true if success
8636  */
8637 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
8638 {
8639         struct dc_link *link = stream->link;
8640         unsigned int vsync_rate_hz = 0;
8641         struct dc_static_screen_params params = {0};
8642         /* Calculate number of static frames before generating interrupt to
8643          * enter PSR.
8644          */
8645         // Init fail safe of 2 frames static
8646         unsigned int num_frames_static = 2;
8647
8648         DRM_DEBUG_DRIVER("Enabling psr...\n");
8649
8650         vsync_rate_hz = div64_u64(div64_u64((
8651                         stream->timing.pix_clk_100hz * 100),
8652                         stream->timing.v_total),
8653                         stream->timing.h_total);
8654
8655         /* Round up
8656          * Calculate number of frames such that at least 30 ms of time has
8657          * passed.
8658          */
8659         if (vsync_rate_hz != 0) {
8660                 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
8661                 num_frames_static = (30000 / frame_time_microsec) + 1;
8662         }
8663
8664         params.triggers.cursor_update = true;
8665         params.triggers.overlay_update = true;
8666         params.triggers.surface_update = true;
8667         params.num_frames = num_frames_static;
8668
8669         dc_stream_set_static_screen_params(link->ctx->dc,
8670                                            &stream, 1,
8671                                            &params);
8672
8673         return dc_link_set_psr_allow_active(link, true, false);
8674 }
8675
8676 /*
8677  * amdgpu_dm_psr_disable() - disable psr f/w
8678  * @stream:  stream state
8679  *
8680  * Return: true if success
8681  */
8682 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
8683 {
8684
8685         DRM_DEBUG_DRIVER("Disabling psr...\n");
8686
8687         return dc_link_set_psr_allow_active(stream->link, false, true);
8688 }