OSDN Git Service

Merge tag 'arm-defconfig-5.7' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc
[uclinux-h8/linux.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/inc/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37
38 #include "vid.h"
39 #include "amdgpu.h"
40 #include "amdgpu_display.h"
41 #include "amdgpu_ucode.h"
42 #include "atom.h"
43 #include "amdgpu_dm.h"
44 #ifdef CONFIG_DRM_AMD_DC_HDCP
45 #include "amdgpu_dm_hdcp.h"
46 #include <drm/drm_hdcp.h>
47 #endif
48 #include "amdgpu_pm.h"
49
50 #include "amd_shared.h"
51 #include "amdgpu_dm_irq.h"
52 #include "dm_helpers.h"
53 #include "amdgpu_dm_mst_types.h"
54 #if defined(CONFIG_DEBUG_FS)
55 #include "amdgpu_dm_debugfs.h"
56 #endif
57
58 #include "ivsrcid/ivsrcid_vislands30.h"
59
60 #include <linux/module.h>
61 #include <linux/moduleparam.h>
62 #include <linux/version.h>
63 #include <linux/types.h>
64 #include <linux/pm_runtime.h>
65 #include <linux/pci.h>
66 #include <linux/firmware.h>
67 #include <linux/component.h>
68
69 #include <drm/drm_atomic.h>
70 #include <drm/drm_atomic_uapi.h>
71 #include <drm/drm_atomic_helper.h>
72 #include <drm/drm_dp_mst_helper.h>
73 #include <drm/drm_fb_helper.h>
74 #include <drm/drm_fourcc.h>
75 #include <drm/drm_edid.h>
76 #include <drm/drm_vblank.h>
77 #include <drm/drm_audio_component.h>
78 #include <drm/drm_hdcp.h>
79
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
82
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
87
88 #include "soc15_common.h"
89 #endif
90
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
94
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
97
98 #define FIRMWARE_RAVEN_DMCU             "amdgpu/raven_dmcu.bin"
99 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
100
101 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
102 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
103
104 /* Number of bytes in PSP header for firmware. */
105 #define PSP_HEADER_BYTES 0x100
106
107 /* Number of bytes in PSP footer for firmware. */
108 #define PSP_FOOTER_BYTES 0x100
109
110 /**
111  * DOC: overview
112  *
113  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
114  * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
115  * requests into DC requests, and DC responses into DRM responses.
116  *
117  * The root control structure is &struct amdgpu_display_manager.
118  */
119
120 /* basic init/fini API */
121 static int amdgpu_dm_init(struct amdgpu_device *adev);
122 static void amdgpu_dm_fini(struct amdgpu_device *adev);
123
124 /*
125  * initializes drm_device display related structures, based on the information
126  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
127  * drm_encoder, drm_mode_config
128  *
129  * Returns 0 on success
130  */
131 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
132 /* removes and deallocates the drm structures, created by the above function */
133 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
134
135 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
136                                 struct drm_plane *plane,
137                                 unsigned long possible_crtcs,
138                                 const struct dc_plane_cap *plane_cap);
139 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
140                                struct drm_plane *plane,
141                                uint32_t link_index);
142 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
143                                     struct amdgpu_dm_connector *amdgpu_dm_connector,
144                                     uint32_t link_index,
145                                     struct amdgpu_encoder *amdgpu_encoder);
146 static int amdgpu_dm_encoder_init(struct drm_device *dev,
147                                   struct amdgpu_encoder *aencoder,
148                                   uint32_t link_index);
149
150 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
151
152 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
153                                    struct drm_atomic_state *state,
154                                    bool nonblock);
155
156 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
157
158 static int amdgpu_dm_atomic_check(struct drm_device *dev,
159                                   struct drm_atomic_state *state);
160
161 static void handle_cursor_update(struct drm_plane *plane,
162                                  struct drm_plane_state *old_plane_state);
163
164 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
165 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
166 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
167 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
168
169
170 /*
171  * dm_vblank_get_counter
172  *
173  * @brief
174  * Get counter for number of vertical blanks
175  *
176  * @param
177  * struct amdgpu_device *adev - [in] desired amdgpu device
178  * int disp_idx - [in] which CRTC to get the counter from
179  *
180  * @return
181  * Counter for vertical blanks
182  */
183 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
184 {
185         if (crtc >= adev->mode_info.num_crtc)
186                 return 0;
187         else {
188                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
189                 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
190                                 acrtc->base.state);
191
192
193                 if (acrtc_state->stream == NULL) {
194                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
195                                   crtc);
196                         return 0;
197                 }
198
199                 return dc_stream_get_vblank_counter(acrtc_state->stream);
200         }
201 }
202
203 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
204                                   u32 *vbl, u32 *position)
205 {
206         uint32_t v_blank_start, v_blank_end, h_position, v_position;
207
208         if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
209                 return -EINVAL;
210         else {
211                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
212                 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
213                                                 acrtc->base.state);
214
215                 if (acrtc_state->stream ==  NULL) {
216                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
217                                   crtc);
218                         return 0;
219                 }
220
221                 /*
222                  * TODO rework base driver to use values directly.
223                  * for now parse it back into reg-format
224                  */
225                 dc_stream_get_scanoutpos(acrtc_state->stream,
226                                          &v_blank_start,
227                                          &v_blank_end,
228                                          &h_position,
229                                          &v_position);
230
231                 *position = v_position | (h_position << 16);
232                 *vbl = v_blank_start | (v_blank_end << 16);
233         }
234
235         return 0;
236 }
237
238 static bool dm_is_idle(void *handle)
239 {
240         /* XXX todo */
241         return true;
242 }
243
244 static int dm_wait_for_idle(void *handle)
245 {
246         /* XXX todo */
247         return 0;
248 }
249
250 static bool dm_check_soft_reset(void *handle)
251 {
252         return false;
253 }
254
255 static int dm_soft_reset(void *handle)
256 {
257         /* XXX todo */
258         return 0;
259 }
260
261 static struct amdgpu_crtc *
262 get_crtc_by_otg_inst(struct amdgpu_device *adev,
263                      int otg_inst)
264 {
265         struct drm_device *dev = adev->ddev;
266         struct drm_crtc *crtc;
267         struct amdgpu_crtc *amdgpu_crtc;
268
269         if (otg_inst == -1) {
270                 WARN_ON(1);
271                 return adev->mode_info.crtcs[0];
272         }
273
274         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
275                 amdgpu_crtc = to_amdgpu_crtc(crtc);
276
277                 if (amdgpu_crtc->otg_inst == otg_inst)
278                         return amdgpu_crtc;
279         }
280
281         return NULL;
282 }
283
284 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
285 {
286         return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
287                dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
288 }
289
290 /**
291  * dm_pflip_high_irq() - Handle pageflip interrupt
292  * @interrupt_params: ignored
293  *
294  * Handles the pageflip interrupt by notifying all interested parties
295  * that the pageflip has been completed.
296  */
297 static void dm_pflip_high_irq(void *interrupt_params)
298 {
299         struct amdgpu_crtc *amdgpu_crtc;
300         struct common_irq_params *irq_params = interrupt_params;
301         struct amdgpu_device *adev = irq_params->adev;
302         unsigned long flags;
303         struct drm_pending_vblank_event *e;
304         struct dm_crtc_state *acrtc_state;
305         uint32_t vpos, hpos, v_blank_start, v_blank_end;
306         bool vrr_active;
307
308         amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
309
310         /* IRQ could occur when in initial stage */
311         /* TODO work and BO cleanup */
312         if (amdgpu_crtc == NULL) {
313                 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
314                 return;
315         }
316
317         spin_lock_irqsave(&adev->ddev->event_lock, flags);
318
319         if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
320                 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
321                                                  amdgpu_crtc->pflip_status,
322                                                  AMDGPU_FLIP_SUBMITTED,
323                                                  amdgpu_crtc->crtc_id,
324                                                  amdgpu_crtc);
325                 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
326                 return;
327         }
328
329         /* page flip completed. */
330         e = amdgpu_crtc->event;
331         amdgpu_crtc->event = NULL;
332
333         if (!e)
334                 WARN_ON(1);
335
336         acrtc_state = to_dm_crtc_state(amdgpu_crtc->base.state);
337         vrr_active = amdgpu_dm_vrr_active(acrtc_state);
338
339         /* Fixed refresh rate, or VRR scanout position outside front-porch? */
340         if (!vrr_active ||
341             !dc_stream_get_scanoutpos(acrtc_state->stream, &v_blank_start,
342                                       &v_blank_end, &hpos, &vpos) ||
343             (vpos < v_blank_start)) {
344                 /* Update to correct count and vblank timestamp if racing with
345                  * vblank irq. This also updates to the correct vblank timestamp
346                  * even in VRR mode, as scanout is past the front-porch atm.
347                  */
348                 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
349
350                 /* Wake up userspace by sending the pageflip event with proper
351                  * count and timestamp of vblank of flip completion.
352                  */
353                 if (e) {
354                         drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
355
356                         /* Event sent, so done with vblank for this flip */
357                         drm_crtc_vblank_put(&amdgpu_crtc->base);
358                 }
359         } else if (e) {
360                 /* VRR active and inside front-porch: vblank count and
361                  * timestamp for pageflip event will only be up to date after
362                  * drm_crtc_handle_vblank() has been executed from late vblank
363                  * irq handler after start of back-porch (vline 0). We queue the
364                  * pageflip event for send-out by drm_crtc_handle_vblank() with
365                  * updated timestamp and count, once it runs after us.
366                  *
367                  * We need to open-code this instead of using the helper
368                  * drm_crtc_arm_vblank_event(), as that helper would
369                  * call drm_crtc_accurate_vblank_count(), which we must
370                  * not call in VRR mode while we are in front-porch!
371                  */
372
373                 /* sequence will be replaced by real count during send-out. */
374                 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
375                 e->pipe = amdgpu_crtc->crtc_id;
376
377                 list_add_tail(&e->base.link, &adev->ddev->vblank_event_list);
378                 e = NULL;
379         }
380
381         /* Keep track of vblank of this flip for flip throttling. We use the
382          * cooked hw counter, as that one incremented at start of this vblank
383          * of pageflip completion, so last_flip_vblank is the forbidden count
384          * for queueing new pageflips if vsync + VRR is enabled.
385          */
386         amdgpu_crtc->last_flip_vblank =
387                 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
388
389         amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
390         spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
391
392         DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
393                          amdgpu_crtc->crtc_id, amdgpu_crtc,
394                          vrr_active, (int) !e);
395 }
396
397 static void dm_vupdate_high_irq(void *interrupt_params)
398 {
399         struct common_irq_params *irq_params = interrupt_params;
400         struct amdgpu_device *adev = irq_params->adev;
401         struct amdgpu_crtc *acrtc;
402         struct dm_crtc_state *acrtc_state;
403         unsigned long flags;
404
405         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
406
407         if (acrtc) {
408                 acrtc_state = to_dm_crtc_state(acrtc->base.state);
409
410                 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
411                               acrtc->crtc_id,
412                               amdgpu_dm_vrr_active(acrtc_state));
413
414                 /* Core vblank handling is done here after end of front-porch in
415                  * vrr mode, as vblank timestamping will give valid results
416                  * while now done after front-porch. This will also deliver
417                  * page-flip completion events that have been queued to us
418                  * if a pageflip happened inside front-porch.
419                  */
420                 if (amdgpu_dm_vrr_active(acrtc_state)) {
421                         drm_crtc_handle_vblank(&acrtc->base);
422
423                         /* BTR processing for pre-DCE12 ASICs */
424                         if (acrtc_state->stream &&
425                             adev->family < AMDGPU_FAMILY_AI) {
426                                 spin_lock_irqsave(&adev->ddev->event_lock, flags);
427                                 mod_freesync_handle_v_update(
428                                     adev->dm.freesync_module,
429                                     acrtc_state->stream,
430                                     &acrtc_state->vrr_params);
431
432                                 dc_stream_adjust_vmin_vmax(
433                                     adev->dm.dc,
434                                     acrtc_state->stream,
435                                     &acrtc_state->vrr_params.adjust);
436                                 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
437                         }
438                 }
439         }
440 }
441
442 /**
443  * dm_crtc_high_irq() - Handles CRTC interrupt
444  * @interrupt_params: ignored
445  *
446  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
447  * event handler.
448  */
449 static void dm_crtc_high_irq(void *interrupt_params)
450 {
451         struct common_irq_params *irq_params = interrupt_params;
452         struct amdgpu_device *adev = irq_params->adev;
453         struct amdgpu_crtc *acrtc;
454         struct dm_crtc_state *acrtc_state;
455         unsigned long flags;
456
457         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
458
459         if (acrtc) {
460                 acrtc_state = to_dm_crtc_state(acrtc->base.state);
461
462                 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
463                               acrtc->crtc_id,
464                               amdgpu_dm_vrr_active(acrtc_state));
465
466                 /* Core vblank handling at start of front-porch is only possible
467                  * in non-vrr mode, as only there vblank timestamping will give
468                  * valid results while done in front-porch. Otherwise defer it
469                  * to dm_vupdate_high_irq after end of front-porch.
470                  */
471                 if (!amdgpu_dm_vrr_active(acrtc_state))
472                         drm_crtc_handle_vblank(&acrtc->base);
473
474                 /* Following stuff must happen at start of vblank, for crc
475                  * computation and below-the-range btr support in vrr mode.
476                  */
477                 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
478
479                 if (acrtc_state->stream && adev->family >= AMDGPU_FAMILY_AI &&
480                     acrtc_state->vrr_params.supported &&
481                     acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
482                         spin_lock_irqsave(&adev->ddev->event_lock, flags);
483                         mod_freesync_handle_v_update(
484                                 adev->dm.freesync_module,
485                                 acrtc_state->stream,
486                                 &acrtc_state->vrr_params);
487
488                         dc_stream_adjust_vmin_vmax(
489                                 adev->dm.dc,
490                                 acrtc_state->stream,
491                                 &acrtc_state->vrr_params.adjust);
492                         spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
493                 }
494         }
495 }
496
497 #if defined(CONFIG_DRM_AMD_DC_DCN)
498 /**
499  * dm_dcn_crtc_high_irq() - Handles VStartup interrupt for DCN generation ASICs
500  * @interrupt params - interrupt parameters
501  *
502  * Notify DRM's vblank event handler at VSTARTUP
503  *
504  * Unlike DCE hardware, we trigger the handler at VSTARTUP. at which:
505  * * We are close enough to VUPDATE - the point of no return for hw
506  * * We are in the fixed portion of variable front porch when vrr is enabled
507  * * We are before VUPDATE, where double-buffered vrr registers are swapped
508  *
509  * It is therefore the correct place to signal vblank, send user flip events,
510  * and update VRR.
511  */
512 static void dm_dcn_crtc_high_irq(void *interrupt_params)
513 {
514         struct common_irq_params *irq_params = interrupt_params;
515         struct amdgpu_device *adev = irq_params->adev;
516         struct amdgpu_crtc *acrtc;
517         struct dm_crtc_state *acrtc_state;
518         unsigned long flags;
519
520         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
521
522         if (!acrtc)
523                 return;
524
525         acrtc_state = to_dm_crtc_state(acrtc->base.state);
526
527         DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
528                          amdgpu_dm_vrr_active(acrtc_state),
529                          acrtc_state->active_planes);
530
531         amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
532         drm_crtc_handle_vblank(&acrtc->base);
533
534         spin_lock_irqsave(&adev->ddev->event_lock, flags);
535
536         if (acrtc_state->vrr_params.supported &&
537             acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
538                 mod_freesync_handle_v_update(
539                 adev->dm.freesync_module,
540                 acrtc_state->stream,
541                 &acrtc_state->vrr_params);
542
543                 dc_stream_adjust_vmin_vmax(
544                         adev->dm.dc,
545                         acrtc_state->stream,
546                         &acrtc_state->vrr_params.adjust);
547         }
548
549         /*
550          * If there aren't any active_planes then DCH HUBP may be clock-gated.
551          * In that case, pageflip completion interrupts won't fire and pageflip
552          * completion events won't get delivered. Prevent this by sending
553          * pending pageflip events from here if a flip is still pending.
554          *
555          * If any planes are enabled, use dm_pflip_high_irq() instead, to
556          * avoid race conditions between flip programming and completion,
557          * which could cause too early flip completion events.
558          */
559         if (acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
560             acrtc_state->active_planes == 0) {
561                 if (acrtc->event) {
562                         drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
563                         acrtc->event = NULL;
564                         drm_crtc_vblank_put(&acrtc->base);
565                 }
566                 acrtc->pflip_status = AMDGPU_FLIP_NONE;
567         }
568
569         spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
570 }
571 #endif
572
573 static int dm_set_clockgating_state(void *handle,
574                   enum amd_clockgating_state state)
575 {
576         return 0;
577 }
578
579 static int dm_set_powergating_state(void *handle,
580                   enum amd_powergating_state state)
581 {
582         return 0;
583 }
584
585 /* Prototypes of private functions */
586 static int dm_early_init(void* handle);
587
588 /* Allocate memory for FBC compressed data  */
589 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
590 {
591         struct drm_device *dev = connector->dev;
592         struct amdgpu_device *adev = dev->dev_private;
593         struct dm_comressor_info *compressor = &adev->dm.compressor;
594         struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
595         struct drm_display_mode *mode;
596         unsigned long max_size = 0;
597
598         if (adev->dm.dc->fbc_compressor == NULL)
599                 return;
600
601         if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
602                 return;
603
604         if (compressor->bo_ptr)
605                 return;
606
607
608         list_for_each_entry(mode, &connector->modes, head) {
609                 if (max_size < mode->htotal * mode->vtotal)
610                         max_size = mode->htotal * mode->vtotal;
611         }
612
613         if (max_size) {
614                 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
615                             AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
616                             &compressor->gpu_addr, &compressor->cpu_addr);
617
618                 if (r)
619                         DRM_ERROR("DM: Failed to initialize FBC\n");
620                 else {
621                         adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
622                         DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
623                 }
624
625         }
626
627 }
628
629 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
630                                           int pipe, bool *enabled,
631                                           unsigned char *buf, int max_bytes)
632 {
633         struct drm_device *dev = dev_get_drvdata(kdev);
634         struct amdgpu_device *adev = dev->dev_private;
635         struct drm_connector *connector;
636         struct drm_connector_list_iter conn_iter;
637         struct amdgpu_dm_connector *aconnector;
638         int ret = 0;
639
640         *enabled = false;
641
642         mutex_lock(&adev->dm.audio_lock);
643
644         drm_connector_list_iter_begin(dev, &conn_iter);
645         drm_for_each_connector_iter(connector, &conn_iter) {
646                 aconnector = to_amdgpu_dm_connector(connector);
647                 if (aconnector->audio_inst != port)
648                         continue;
649
650                 *enabled = true;
651                 ret = drm_eld_size(connector->eld);
652                 memcpy(buf, connector->eld, min(max_bytes, ret));
653
654                 break;
655         }
656         drm_connector_list_iter_end(&conn_iter);
657
658         mutex_unlock(&adev->dm.audio_lock);
659
660         DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
661
662         return ret;
663 }
664
665 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
666         .get_eld = amdgpu_dm_audio_component_get_eld,
667 };
668
669 static int amdgpu_dm_audio_component_bind(struct device *kdev,
670                                        struct device *hda_kdev, void *data)
671 {
672         struct drm_device *dev = dev_get_drvdata(kdev);
673         struct amdgpu_device *adev = dev->dev_private;
674         struct drm_audio_component *acomp = data;
675
676         acomp->ops = &amdgpu_dm_audio_component_ops;
677         acomp->dev = kdev;
678         adev->dm.audio_component = acomp;
679
680         return 0;
681 }
682
683 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
684                                           struct device *hda_kdev, void *data)
685 {
686         struct drm_device *dev = dev_get_drvdata(kdev);
687         struct amdgpu_device *adev = dev->dev_private;
688         struct drm_audio_component *acomp = data;
689
690         acomp->ops = NULL;
691         acomp->dev = NULL;
692         adev->dm.audio_component = NULL;
693 }
694
695 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
696         .bind   = amdgpu_dm_audio_component_bind,
697         .unbind = amdgpu_dm_audio_component_unbind,
698 };
699
700 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
701 {
702         int i, ret;
703
704         if (!amdgpu_audio)
705                 return 0;
706
707         adev->mode_info.audio.enabled = true;
708
709         adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
710
711         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
712                 adev->mode_info.audio.pin[i].channels = -1;
713                 adev->mode_info.audio.pin[i].rate = -1;
714                 adev->mode_info.audio.pin[i].bits_per_sample = -1;
715                 adev->mode_info.audio.pin[i].status_bits = 0;
716                 adev->mode_info.audio.pin[i].category_code = 0;
717                 adev->mode_info.audio.pin[i].connected = false;
718                 adev->mode_info.audio.pin[i].id =
719                         adev->dm.dc->res_pool->audios[i]->inst;
720                 adev->mode_info.audio.pin[i].offset = 0;
721         }
722
723         ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
724         if (ret < 0)
725                 return ret;
726
727         adev->dm.audio_registered = true;
728
729         return 0;
730 }
731
732 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
733 {
734         if (!amdgpu_audio)
735                 return;
736
737         if (!adev->mode_info.audio.enabled)
738                 return;
739
740         if (adev->dm.audio_registered) {
741                 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
742                 adev->dm.audio_registered = false;
743         }
744
745         /* TODO: Disable audio? */
746
747         adev->mode_info.audio.enabled = false;
748 }
749
750 void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
751 {
752         struct drm_audio_component *acomp = adev->dm.audio_component;
753
754         if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
755                 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
756
757                 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
758                                                  pin, -1);
759         }
760 }
761
762 static int dm_dmub_hw_init(struct amdgpu_device *adev)
763 {
764         const struct dmcub_firmware_header_v1_0 *hdr;
765         struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
766         struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
767         const struct firmware *dmub_fw = adev->dm.dmub_fw;
768         struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
769         struct abm *abm = adev->dm.dc->res_pool->abm;
770         struct dmub_srv_hw_params hw_params;
771         enum dmub_status status;
772         const unsigned char *fw_inst_const, *fw_bss_data;
773         uint32_t i, fw_inst_const_size, fw_bss_data_size;
774         bool has_hw_support;
775
776         if (!dmub_srv)
777                 /* DMUB isn't supported on the ASIC. */
778                 return 0;
779
780         if (!fb_info) {
781                 DRM_ERROR("No framebuffer info for DMUB service.\n");
782                 return -EINVAL;
783         }
784
785         if (!dmub_fw) {
786                 /* Firmware required for DMUB support. */
787                 DRM_ERROR("No firmware provided for DMUB.\n");
788                 return -EINVAL;
789         }
790
791         status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
792         if (status != DMUB_STATUS_OK) {
793                 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
794                 return -EINVAL;
795         }
796
797         if (!has_hw_support) {
798                 DRM_INFO("DMUB unsupported on ASIC\n");
799                 return 0;
800         }
801
802         hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
803
804         fw_inst_const = dmub_fw->data +
805                         le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
806                         PSP_HEADER_BYTES;
807
808         fw_bss_data = dmub_fw->data +
809                       le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
810                       le32_to_cpu(hdr->inst_const_bytes);
811
812         /* Copy firmware and bios info into FB memory. */
813         fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
814                              PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
815
816         fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
817
818         /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
819          * amdgpu_ucode_init_single_fw will load dmub firmware
820          * fw_inst_const part to cw0; otherwise, the firmware back door load
821          * will be done by dm_dmub_hw_init
822          */
823         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
824                 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
825                                 fw_inst_const_size);
826         }
827
828         memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr, fw_bss_data,
829                fw_bss_data_size);
830
831         /* Copy firmware bios info into FB memory. */
832         memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
833                adev->bios_size);
834
835         /* Reset regions that need to be reset. */
836         memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
837         fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
838
839         memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
840                fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
841
842         memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
843                fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
844
845         /* Initialize hardware. */
846         memset(&hw_params, 0, sizeof(hw_params));
847         hw_params.fb_base = adev->gmc.fb_start;
848         hw_params.fb_offset = adev->gmc.aper_base;
849
850         /* backdoor load firmware and trigger dmub running */
851         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
852                 hw_params.load_inst_const = true;
853
854         if (dmcu)
855                 hw_params.psp_version = dmcu->psp_version;
856
857         for (i = 0; i < fb_info->num_fb; ++i)
858                 hw_params.fb[i] = &fb_info->fb[i];
859
860         status = dmub_srv_hw_init(dmub_srv, &hw_params);
861         if (status != DMUB_STATUS_OK) {
862                 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
863                 return -EINVAL;
864         }
865
866         /* Wait for firmware load to finish. */
867         status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
868         if (status != DMUB_STATUS_OK)
869                 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
870
871         /* Init DMCU and ABM if available. */
872         if (dmcu && abm) {
873                 dmcu->funcs->dmcu_init(dmcu);
874                 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
875         }
876
877         adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
878         if (!adev->dm.dc->ctx->dmub_srv) {
879                 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
880                 return -ENOMEM;
881         }
882
883         DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
884                  adev->dm.dmcub_fw_version);
885
886         return 0;
887 }
888
889 static int amdgpu_dm_init(struct amdgpu_device *adev)
890 {
891         struct dc_init_data init_data;
892 #ifdef CONFIG_DRM_AMD_DC_HDCP
893         struct dc_callback_init init_params;
894 #endif
895         int r;
896
897         adev->dm.ddev = adev->ddev;
898         adev->dm.adev = adev;
899
900         /* Zero all the fields */
901         memset(&init_data, 0, sizeof(init_data));
902 #ifdef CONFIG_DRM_AMD_DC_HDCP
903         memset(&init_params, 0, sizeof(init_params));
904 #endif
905
906         mutex_init(&adev->dm.dc_lock);
907         mutex_init(&adev->dm.audio_lock);
908
909         if(amdgpu_dm_irq_init(adev)) {
910                 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
911                 goto error;
912         }
913
914         init_data.asic_id.chip_family = adev->family;
915
916         init_data.asic_id.pci_revision_id = adev->pdev->revision;
917         init_data.asic_id.hw_internal_rev = adev->external_rev_id;
918
919         init_data.asic_id.vram_width = adev->gmc.vram_width;
920         /* TODO: initialize init_data.asic_id.vram_type here!!!! */
921         init_data.asic_id.atombios_base_address =
922                 adev->mode_info.atom_context->bios;
923
924         init_data.driver = adev;
925
926         adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
927
928         if (!adev->dm.cgs_device) {
929                 DRM_ERROR("amdgpu: failed to create cgs device.\n");
930                 goto error;
931         }
932
933         init_data.cgs_device = adev->dm.cgs_device;
934
935         init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
936
937         switch (adev->asic_type) {
938         case CHIP_CARRIZO:
939         case CHIP_STONEY:
940         case CHIP_RAVEN:
941         case CHIP_RENOIR:
942                 init_data.flags.gpu_vm_support = true;
943                 break;
944         default:
945                 break;
946         }
947
948         if (amdgpu_dc_feature_mask & DC_FBC_MASK)
949                 init_data.flags.fbc_support = true;
950
951         if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
952                 init_data.flags.multi_mon_pp_mclk_switch = true;
953
954         if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
955                 init_data.flags.disable_fractional_pwm = true;
956
957         init_data.flags.power_down_display_on_boot = true;
958
959         init_data.soc_bounding_box = adev->dm.soc_bounding_box;
960
961         /* Display Core create. */
962         adev->dm.dc = dc_create(&init_data);
963
964         if (adev->dm.dc) {
965                 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
966         } else {
967                 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
968                 goto error;
969         }
970
971         r = dm_dmub_hw_init(adev);
972         if (r) {
973                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
974                 goto error;
975         }
976
977         dc_hardware_init(adev->dm.dc);
978
979         adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
980         if (!adev->dm.freesync_module) {
981                 DRM_ERROR(
982                 "amdgpu: failed to initialize freesync_module.\n");
983         } else
984                 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
985                                 adev->dm.freesync_module);
986
987         amdgpu_dm_init_color_mod();
988
989 #ifdef CONFIG_DRM_AMD_DC_HDCP
990         if (adev->asic_type >= CHIP_RAVEN) {
991                 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
992
993                 if (!adev->dm.hdcp_workqueue)
994                         DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
995                 else
996                         DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
997
998                 dc_init_callbacks(adev->dm.dc, &init_params);
999         }
1000 #endif
1001         if (amdgpu_dm_initialize_drm_device(adev)) {
1002                 DRM_ERROR(
1003                 "amdgpu: failed to initialize sw for display support.\n");
1004                 goto error;
1005         }
1006
1007         /* Update the actual used number of crtc */
1008         adev->mode_info.num_crtc = adev->dm.display_indexes_num;
1009
1010         /* TODO: Add_display_info? */
1011
1012         /* TODO use dynamic cursor width */
1013         adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1014         adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1015
1016         if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
1017                 DRM_ERROR(
1018                 "amdgpu: failed to initialize sw for display support.\n");
1019                 goto error;
1020         }
1021
1022         DRM_DEBUG_DRIVER("KMS initialized.\n");
1023
1024         return 0;
1025 error:
1026         amdgpu_dm_fini(adev);
1027
1028         return -EINVAL;
1029 }
1030
1031 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1032 {
1033         amdgpu_dm_audio_fini(adev);
1034
1035         amdgpu_dm_destroy_drm_device(&adev->dm);
1036
1037 #ifdef CONFIG_DRM_AMD_DC_HDCP
1038         if (adev->dm.hdcp_workqueue) {
1039                 hdcp_destroy(adev->dm.hdcp_workqueue);
1040                 adev->dm.hdcp_workqueue = NULL;
1041         }
1042
1043         if (adev->dm.dc)
1044                 dc_deinit_callbacks(adev->dm.dc);
1045 #endif
1046         if (adev->dm.dc->ctx->dmub_srv) {
1047                 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1048                 adev->dm.dc->ctx->dmub_srv = NULL;
1049         }
1050
1051         if (adev->dm.dmub_bo)
1052                 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1053                                       &adev->dm.dmub_bo_gpu_addr,
1054                                       &adev->dm.dmub_bo_cpu_addr);
1055
1056         /* DC Destroy TODO: Replace destroy DAL */
1057         if (adev->dm.dc)
1058                 dc_destroy(&adev->dm.dc);
1059         /*
1060          * TODO: pageflip, vlank interrupt
1061          *
1062          * amdgpu_dm_irq_fini(adev);
1063          */
1064
1065         if (adev->dm.cgs_device) {
1066                 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1067                 adev->dm.cgs_device = NULL;
1068         }
1069         if (adev->dm.freesync_module) {
1070                 mod_freesync_destroy(adev->dm.freesync_module);
1071                 adev->dm.freesync_module = NULL;
1072         }
1073
1074         mutex_destroy(&adev->dm.audio_lock);
1075         mutex_destroy(&adev->dm.dc_lock);
1076
1077         return;
1078 }
1079
1080 static int load_dmcu_fw(struct amdgpu_device *adev)
1081 {
1082         const char *fw_name_dmcu = NULL;
1083         int r;
1084         const struct dmcu_firmware_header_v1_0 *hdr;
1085
1086         switch(adev->asic_type) {
1087         case CHIP_BONAIRE:
1088         case CHIP_HAWAII:
1089         case CHIP_KAVERI:
1090         case CHIP_KABINI:
1091         case CHIP_MULLINS:
1092         case CHIP_TONGA:
1093         case CHIP_FIJI:
1094         case CHIP_CARRIZO:
1095         case CHIP_STONEY:
1096         case CHIP_POLARIS11:
1097         case CHIP_POLARIS10:
1098         case CHIP_POLARIS12:
1099         case CHIP_VEGAM:
1100         case CHIP_VEGA10:
1101         case CHIP_VEGA12:
1102         case CHIP_VEGA20:
1103         case CHIP_NAVI10:
1104         case CHIP_NAVI14:
1105         case CHIP_RENOIR:
1106                 return 0;
1107         case CHIP_NAVI12:
1108                 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1109                 break;
1110         case CHIP_RAVEN:
1111                 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1112                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1113                 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1114                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1115                 else
1116                         return 0;
1117                 break;
1118         default:
1119                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1120                 return -EINVAL;
1121         }
1122
1123         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1124                 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1125                 return 0;
1126         }
1127
1128         r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1129         if (r == -ENOENT) {
1130                 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1131                 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1132                 adev->dm.fw_dmcu = NULL;
1133                 return 0;
1134         }
1135         if (r) {
1136                 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1137                         fw_name_dmcu);
1138                 return r;
1139         }
1140
1141         r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1142         if (r) {
1143                 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1144                         fw_name_dmcu);
1145                 release_firmware(adev->dm.fw_dmcu);
1146                 adev->dm.fw_dmcu = NULL;
1147                 return r;
1148         }
1149
1150         hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1151         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1152         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1153         adev->firmware.fw_size +=
1154                 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1155
1156         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1157         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1158         adev->firmware.fw_size +=
1159                 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1160
1161         adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1162
1163         DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1164
1165         return 0;
1166 }
1167
1168 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1169 {
1170         struct amdgpu_device *adev = ctx;
1171
1172         return dm_read_reg(adev->dm.dc->ctx, address);
1173 }
1174
1175 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1176                                      uint32_t value)
1177 {
1178         struct amdgpu_device *adev = ctx;
1179
1180         return dm_write_reg(adev->dm.dc->ctx, address, value);
1181 }
1182
1183 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1184 {
1185         struct dmub_srv_create_params create_params;
1186         struct dmub_srv_region_params region_params;
1187         struct dmub_srv_region_info region_info;
1188         struct dmub_srv_fb_params fb_params;
1189         struct dmub_srv_fb_info *fb_info;
1190         struct dmub_srv *dmub_srv;
1191         const struct dmcub_firmware_header_v1_0 *hdr;
1192         const char *fw_name_dmub;
1193         enum dmub_asic dmub_asic;
1194         enum dmub_status status;
1195         int r;
1196
1197         switch (adev->asic_type) {
1198         case CHIP_RENOIR:
1199                 dmub_asic = DMUB_ASIC_DCN21;
1200                 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1201                 break;
1202
1203         default:
1204                 /* ASIC doesn't support DMUB. */
1205                 return 0;
1206         }
1207
1208         r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1209         if (r) {
1210                 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1211                 return 0;
1212         }
1213
1214         r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1215         if (r) {
1216                 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1217                 return 0;
1218         }
1219
1220         hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1221
1222         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1223                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1224                         AMDGPU_UCODE_ID_DMCUB;
1225                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1226                         adev->dm.dmub_fw;
1227                 adev->firmware.fw_size +=
1228                         ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1229
1230                 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1231                          adev->dm.dmcub_fw_version);
1232         }
1233
1234         adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1235
1236         adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1237         dmub_srv = adev->dm.dmub_srv;
1238
1239         if (!dmub_srv) {
1240                 DRM_ERROR("Failed to allocate DMUB service!\n");
1241                 return -ENOMEM;
1242         }
1243
1244         memset(&create_params, 0, sizeof(create_params));
1245         create_params.user_ctx = adev;
1246         create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1247         create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1248         create_params.asic = dmub_asic;
1249
1250         /* Create the DMUB service. */
1251         status = dmub_srv_create(dmub_srv, &create_params);
1252         if (status != DMUB_STATUS_OK) {
1253                 DRM_ERROR("Error creating DMUB service: %d\n", status);
1254                 return -EINVAL;
1255         }
1256
1257         /* Calculate the size of all the regions for the DMUB service. */
1258         memset(&region_params, 0, sizeof(region_params));
1259
1260         region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1261                                         PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1262         region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1263         region_params.vbios_size = adev->bios_size;
1264         region_params.fw_bss_data =
1265                 adev->dm.dmub_fw->data +
1266                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1267                 le32_to_cpu(hdr->inst_const_bytes);
1268
1269         status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1270                                            &region_info);
1271
1272         if (status != DMUB_STATUS_OK) {
1273                 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1274                 return -EINVAL;
1275         }
1276
1277         /*
1278          * Allocate a framebuffer based on the total size of all the regions.
1279          * TODO: Move this into GART.
1280          */
1281         r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1282                                     AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1283                                     &adev->dm.dmub_bo_gpu_addr,
1284                                     &adev->dm.dmub_bo_cpu_addr);
1285         if (r)
1286                 return r;
1287
1288         /* Rebase the regions on the framebuffer address. */
1289         memset(&fb_params, 0, sizeof(fb_params));
1290         fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1291         fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1292         fb_params.region_info = &region_info;
1293
1294         adev->dm.dmub_fb_info =
1295                 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1296         fb_info = adev->dm.dmub_fb_info;
1297
1298         if (!fb_info) {
1299                 DRM_ERROR(
1300                         "Failed to allocate framebuffer info for DMUB service!\n");
1301                 return -ENOMEM;
1302         }
1303
1304         status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1305         if (status != DMUB_STATUS_OK) {
1306                 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1307                 return -EINVAL;
1308         }
1309
1310         return 0;
1311 }
1312
1313 static int dm_sw_init(void *handle)
1314 {
1315         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1316         int r;
1317
1318         r = dm_dmub_sw_init(adev);
1319         if (r)
1320                 return r;
1321
1322         return load_dmcu_fw(adev);
1323 }
1324
1325 static int dm_sw_fini(void *handle)
1326 {
1327         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1328
1329         kfree(adev->dm.dmub_fb_info);
1330         adev->dm.dmub_fb_info = NULL;
1331
1332         if (adev->dm.dmub_srv) {
1333                 dmub_srv_destroy(adev->dm.dmub_srv);
1334                 adev->dm.dmub_srv = NULL;
1335         }
1336
1337         if (adev->dm.dmub_fw) {
1338                 release_firmware(adev->dm.dmub_fw);
1339                 adev->dm.dmub_fw = NULL;
1340         }
1341
1342         if(adev->dm.fw_dmcu) {
1343                 release_firmware(adev->dm.fw_dmcu);
1344                 adev->dm.fw_dmcu = NULL;
1345         }
1346
1347         return 0;
1348 }
1349
1350 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1351 {
1352         struct amdgpu_dm_connector *aconnector;
1353         struct drm_connector *connector;
1354         struct drm_connector_list_iter iter;
1355         int ret = 0;
1356
1357         drm_connector_list_iter_begin(dev, &iter);
1358         drm_for_each_connector_iter(connector, &iter) {
1359                 aconnector = to_amdgpu_dm_connector(connector);
1360                 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1361                     aconnector->mst_mgr.aux) {
1362                         DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1363                                          aconnector,
1364                                          aconnector->base.base.id);
1365
1366                         ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1367                         if (ret < 0) {
1368                                 DRM_ERROR("DM_MST: Failed to start MST\n");
1369                                 aconnector->dc_link->type =
1370                                         dc_connection_single;
1371                                 break;
1372                         }
1373                 }
1374         }
1375         drm_connector_list_iter_end(&iter);
1376
1377         return ret;
1378 }
1379
1380 static int dm_late_init(void *handle)
1381 {
1382         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1383
1384         struct dmcu_iram_parameters params;
1385         unsigned int linear_lut[16];
1386         int i;
1387         struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1388         bool ret = false;
1389
1390         for (i = 0; i < 16; i++)
1391                 linear_lut[i] = 0xFFFF * i / 15;
1392
1393         params.set = 0;
1394         params.backlight_ramping_start = 0xCCCC;
1395         params.backlight_ramping_reduction = 0xCCCCCCCC;
1396         params.backlight_lut_array_size = 16;
1397         params.backlight_lut_array = linear_lut;
1398
1399         /* Min backlight level after ABM reduction,  Don't allow below 1%
1400          * 0xFFFF x 0.01 = 0x28F
1401          */
1402         params.min_abm_backlight = 0x28F;
1403
1404         /* todo will enable for navi10 */
1405         if (adev->asic_type <= CHIP_RAVEN) {
1406                 ret = dmcu_load_iram(dmcu, params);
1407
1408                 if (!ret)
1409                         return -EINVAL;
1410         }
1411
1412         return detect_mst_link_for_all_connectors(adev->ddev);
1413 }
1414
1415 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1416 {
1417         struct amdgpu_dm_connector *aconnector;
1418         struct drm_connector *connector;
1419         struct drm_connector_list_iter iter;
1420         struct drm_dp_mst_topology_mgr *mgr;
1421         int ret;
1422         bool need_hotplug = false;
1423
1424         drm_connector_list_iter_begin(dev, &iter);
1425         drm_for_each_connector_iter(connector, &iter) {
1426                 aconnector = to_amdgpu_dm_connector(connector);
1427                 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1428                     aconnector->mst_port)
1429                         continue;
1430
1431                 mgr = &aconnector->mst_mgr;
1432
1433                 if (suspend) {
1434                         drm_dp_mst_topology_mgr_suspend(mgr);
1435                 } else {
1436                         ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1437                         if (ret < 0) {
1438                                 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1439                                 need_hotplug = true;
1440                         }
1441                 }
1442         }
1443         drm_connector_list_iter_end(&iter);
1444
1445         if (need_hotplug)
1446                 drm_kms_helper_hotplug_event(dev);
1447 }
1448
1449 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1450 {
1451         struct smu_context *smu = &adev->smu;
1452         int ret = 0;
1453
1454         if (!is_support_sw_smu(adev))
1455                 return 0;
1456
1457         /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1458          * on window driver dc implementation.
1459          * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1460          * should be passed to smu during boot up and resume from s3.
1461          * boot up: dc calculate dcn watermark clock settings within dc_create,
1462          * dcn20_resource_construct
1463          * then call pplib functions below to pass the settings to smu:
1464          * smu_set_watermarks_for_clock_ranges
1465          * smu_set_watermarks_table
1466          * navi10_set_watermarks_table
1467          * smu_write_watermarks_table
1468          *
1469          * For Renoir, clock settings of dcn watermark are also fixed values.
1470          * dc has implemented different flow for window driver:
1471          * dc_hardware_init / dc_set_power_state
1472          * dcn10_init_hw
1473          * notify_wm_ranges
1474          * set_wm_ranges
1475          * -- Linux
1476          * smu_set_watermarks_for_clock_ranges
1477          * renoir_set_watermarks_table
1478          * smu_write_watermarks_table
1479          *
1480          * For Linux,
1481          * dc_hardware_init -> amdgpu_dm_init
1482          * dc_set_power_state --> dm_resume
1483          *
1484          * therefore, this function apply to navi10/12/14 but not Renoir
1485          * *
1486          */
1487         switch(adev->asic_type) {
1488         case CHIP_NAVI10:
1489         case CHIP_NAVI14:
1490         case CHIP_NAVI12:
1491                 break;
1492         default:
1493                 return 0;
1494         }
1495
1496         mutex_lock(&smu->mutex);
1497
1498         /* pass data to smu controller */
1499         if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
1500                         !(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
1501                 ret = smu_write_watermarks_table(smu);
1502
1503                 if (ret) {
1504                         mutex_unlock(&smu->mutex);
1505                         DRM_ERROR("Failed to update WMTABLE!\n");
1506                         return ret;
1507                 }
1508                 smu->watermarks_bitmap |= WATERMARKS_LOADED;
1509         }
1510
1511         mutex_unlock(&smu->mutex);
1512
1513         return 0;
1514 }
1515
1516 /**
1517  * dm_hw_init() - Initialize DC device
1518  * @handle: The base driver device containing the amdgpu_dm device.
1519  *
1520  * Initialize the &struct amdgpu_display_manager device. This involves calling
1521  * the initializers of each DM component, then populating the struct with them.
1522  *
1523  * Although the function implies hardware initialization, both hardware and
1524  * software are initialized here. Splitting them out to their relevant init
1525  * hooks is a future TODO item.
1526  *
1527  * Some notable things that are initialized here:
1528  *
1529  * - Display Core, both software and hardware
1530  * - DC modules that we need (freesync and color management)
1531  * - DRM software states
1532  * - Interrupt sources and handlers
1533  * - Vblank support
1534  * - Debug FS entries, if enabled
1535  */
1536 static int dm_hw_init(void *handle)
1537 {
1538         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1539         /* Create DAL display manager */
1540         amdgpu_dm_init(adev);
1541         amdgpu_dm_hpd_init(adev);
1542
1543         return 0;
1544 }
1545
1546 /**
1547  * dm_hw_fini() - Teardown DC device
1548  * @handle: The base driver device containing the amdgpu_dm device.
1549  *
1550  * Teardown components within &struct amdgpu_display_manager that require
1551  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1552  * were loaded. Also flush IRQ workqueues and disable them.
1553  */
1554 static int dm_hw_fini(void *handle)
1555 {
1556         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1557
1558         amdgpu_dm_hpd_fini(adev);
1559
1560         amdgpu_dm_irq_fini(adev);
1561         amdgpu_dm_fini(adev);
1562         return 0;
1563 }
1564
1565 static int dm_suspend(void *handle)
1566 {
1567         struct amdgpu_device *adev = handle;
1568         struct amdgpu_display_manager *dm = &adev->dm;
1569         int ret = 0;
1570
1571         WARN_ON(adev->dm.cached_state);
1572         adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
1573
1574         s3_handle_mst(adev->ddev, true);
1575
1576         amdgpu_dm_irq_suspend(adev);
1577
1578
1579         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1580
1581         return ret;
1582 }
1583
1584 static struct amdgpu_dm_connector *
1585 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1586                                              struct drm_crtc *crtc)
1587 {
1588         uint32_t i;
1589         struct drm_connector_state *new_con_state;
1590         struct drm_connector *connector;
1591         struct drm_crtc *crtc_from_state;
1592
1593         for_each_new_connector_in_state(state, connector, new_con_state, i) {
1594                 crtc_from_state = new_con_state->crtc;
1595
1596                 if (crtc_from_state == crtc)
1597                         return to_amdgpu_dm_connector(connector);
1598         }
1599
1600         return NULL;
1601 }
1602
1603 static void emulated_link_detect(struct dc_link *link)
1604 {
1605         struct dc_sink_init_data sink_init_data = { 0 };
1606         struct display_sink_capability sink_caps = { 0 };
1607         enum dc_edid_status edid_status;
1608         struct dc_context *dc_ctx = link->ctx;
1609         struct dc_sink *sink = NULL;
1610         struct dc_sink *prev_sink = NULL;
1611
1612         link->type = dc_connection_none;
1613         prev_sink = link->local_sink;
1614
1615         if (prev_sink != NULL)
1616                 dc_sink_retain(prev_sink);
1617
1618         switch (link->connector_signal) {
1619         case SIGNAL_TYPE_HDMI_TYPE_A: {
1620                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1621                 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1622                 break;
1623         }
1624
1625         case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1626                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1627                 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1628                 break;
1629         }
1630
1631         case SIGNAL_TYPE_DVI_DUAL_LINK: {
1632                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1633                 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1634                 break;
1635         }
1636
1637         case SIGNAL_TYPE_LVDS: {
1638                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1639                 sink_caps.signal = SIGNAL_TYPE_LVDS;
1640                 break;
1641         }
1642
1643         case SIGNAL_TYPE_EDP: {
1644                 sink_caps.transaction_type =
1645                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1646                 sink_caps.signal = SIGNAL_TYPE_EDP;
1647                 break;
1648         }
1649
1650         case SIGNAL_TYPE_DISPLAY_PORT: {
1651                 sink_caps.transaction_type =
1652                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1653                 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1654                 break;
1655         }
1656
1657         default:
1658                 DC_ERROR("Invalid connector type! signal:%d\n",
1659                         link->connector_signal);
1660                 return;
1661         }
1662
1663         sink_init_data.link = link;
1664         sink_init_data.sink_signal = sink_caps.signal;
1665
1666         sink = dc_sink_create(&sink_init_data);
1667         if (!sink) {
1668                 DC_ERROR("Failed to create sink!\n");
1669                 return;
1670         }
1671
1672         /* dc_sink_create returns a new reference */
1673         link->local_sink = sink;
1674
1675         edid_status = dm_helpers_read_local_edid(
1676                         link->ctx,
1677                         link,
1678                         sink);
1679
1680         if (edid_status != EDID_OK)
1681                 DC_ERROR("Failed to read EDID");
1682
1683 }
1684
1685 static int dm_resume(void *handle)
1686 {
1687         struct amdgpu_device *adev = handle;
1688         struct drm_device *ddev = adev->ddev;
1689         struct amdgpu_display_manager *dm = &adev->dm;
1690         struct amdgpu_dm_connector *aconnector;
1691         struct drm_connector *connector;
1692         struct drm_connector_list_iter iter;
1693         struct drm_crtc *crtc;
1694         struct drm_crtc_state *new_crtc_state;
1695         struct dm_crtc_state *dm_new_crtc_state;
1696         struct drm_plane *plane;
1697         struct drm_plane_state *new_plane_state;
1698         struct dm_plane_state *dm_new_plane_state;
1699         struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
1700         enum dc_connection_type new_connection_type = dc_connection_none;
1701         int i, r;
1702
1703         /* Recreate dc_state - DC invalidates it when setting power state to S3. */
1704         dc_release_state(dm_state->context);
1705         dm_state->context = dc_create_state(dm->dc);
1706         /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
1707         dc_resource_state_construct(dm->dc, dm_state->context);
1708
1709         /* Before powering on DC we need to re-initialize DMUB. */
1710         r = dm_dmub_hw_init(adev);
1711         if (r)
1712                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1713
1714         /* power on hardware */
1715         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1716
1717         /* program HPD filter */
1718         dc_resume(dm->dc);
1719
1720         /*
1721          * early enable HPD Rx IRQ, should be done before set mode as short
1722          * pulse interrupts are used for MST
1723          */
1724         amdgpu_dm_irq_resume_early(adev);
1725
1726         /* On resume we need to rewrite the MSTM control bits to enable MST*/
1727         s3_handle_mst(ddev, false);
1728
1729         /* Do detection*/
1730         drm_connector_list_iter_begin(ddev, &iter);
1731         drm_for_each_connector_iter(connector, &iter) {
1732                 aconnector = to_amdgpu_dm_connector(connector);
1733
1734                 /*
1735                  * this is the case when traversing through already created
1736                  * MST connectors, should be skipped
1737                  */
1738                 if (aconnector->mst_port)
1739                         continue;
1740
1741                 mutex_lock(&aconnector->hpd_lock);
1742                 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
1743                         DRM_ERROR("KMS: Failed to detect connector\n");
1744
1745                 if (aconnector->base.force && new_connection_type == dc_connection_none)
1746                         emulated_link_detect(aconnector->dc_link);
1747                 else
1748                         dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
1749
1750                 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
1751                         aconnector->fake_enable = false;
1752
1753                 if (aconnector->dc_sink)
1754                         dc_sink_release(aconnector->dc_sink);
1755                 aconnector->dc_sink = NULL;
1756                 amdgpu_dm_update_connector_after_detect(aconnector);
1757                 mutex_unlock(&aconnector->hpd_lock);
1758         }
1759         drm_connector_list_iter_end(&iter);
1760
1761         /* Force mode set in atomic commit */
1762         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
1763                 new_crtc_state->active_changed = true;
1764
1765         /*
1766          * atomic_check is expected to create the dc states. We need to release
1767          * them here, since they were duplicated as part of the suspend
1768          * procedure.
1769          */
1770         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
1771                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1772                 if (dm_new_crtc_state->stream) {
1773                         WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
1774                         dc_stream_release(dm_new_crtc_state->stream);
1775                         dm_new_crtc_state->stream = NULL;
1776                 }
1777         }
1778
1779         for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
1780                 dm_new_plane_state = to_dm_plane_state(new_plane_state);
1781                 if (dm_new_plane_state->dc_state) {
1782                         WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
1783                         dc_plane_state_release(dm_new_plane_state->dc_state);
1784                         dm_new_plane_state->dc_state = NULL;
1785                 }
1786         }
1787
1788         drm_atomic_helper_resume(ddev, dm->cached_state);
1789
1790         dm->cached_state = NULL;
1791
1792         amdgpu_dm_irq_resume_late(adev);
1793
1794         amdgpu_dm_smu_write_watermarks_table(adev);
1795
1796         return 0;
1797 }
1798
1799 /**
1800  * DOC: DM Lifecycle
1801  *
1802  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
1803  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
1804  * the base driver's device list to be initialized and torn down accordingly.
1805  *
1806  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
1807  */
1808
1809 static const struct amd_ip_funcs amdgpu_dm_funcs = {
1810         .name = "dm",
1811         .early_init = dm_early_init,
1812         .late_init = dm_late_init,
1813         .sw_init = dm_sw_init,
1814         .sw_fini = dm_sw_fini,
1815         .hw_init = dm_hw_init,
1816         .hw_fini = dm_hw_fini,
1817         .suspend = dm_suspend,
1818         .resume = dm_resume,
1819         .is_idle = dm_is_idle,
1820         .wait_for_idle = dm_wait_for_idle,
1821         .check_soft_reset = dm_check_soft_reset,
1822         .soft_reset = dm_soft_reset,
1823         .set_clockgating_state = dm_set_clockgating_state,
1824         .set_powergating_state = dm_set_powergating_state,
1825 };
1826
1827 const struct amdgpu_ip_block_version dm_ip_block =
1828 {
1829         .type = AMD_IP_BLOCK_TYPE_DCE,
1830         .major = 1,
1831         .minor = 0,
1832         .rev = 0,
1833         .funcs = &amdgpu_dm_funcs,
1834 };
1835
1836
1837 /**
1838  * DOC: atomic
1839  *
1840  * *WIP*
1841  */
1842
1843 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
1844         .fb_create = amdgpu_display_user_framebuffer_create,
1845         .output_poll_changed = drm_fb_helper_output_poll_changed,
1846         .atomic_check = amdgpu_dm_atomic_check,
1847         .atomic_commit = amdgpu_dm_atomic_commit,
1848 };
1849
1850 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
1851         .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
1852 };
1853
1854 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
1855 {
1856         u32 max_cll, min_cll, max, min, q, r;
1857         struct amdgpu_dm_backlight_caps *caps;
1858         struct amdgpu_display_manager *dm;
1859         struct drm_connector *conn_base;
1860         struct amdgpu_device *adev;
1861         static const u8 pre_computed_values[] = {
1862                 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
1863                 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
1864
1865         if (!aconnector || !aconnector->dc_link)
1866                 return;
1867
1868         conn_base = &aconnector->base;
1869         adev = conn_base->dev->dev_private;
1870         dm = &adev->dm;
1871         caps = &dm->backlight_caps;
1872         caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
1873         caps->aux_support = false;
1874         max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
1875         min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
1876
1877         if (caps->ext_caps->bits.oled == 1 ||
1878             caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
1879             caps->ext_caps->bits.hdr_aux_backlight_control == 1)
1880                 caps->aux_support = true;
1881
1882         /* From the specification (CTA-861-G), for calculating the maximum
1883          * luminance we need to use:
1884          *      Luminance = 50*2**(CV/32)
1885          * Where CV is a one-byte value.
1886          * For calculating this expression we may need float point precision;
1887          * to avoid this complexity level, we take advantage that CV is divided
1888          * by a constant. From the Euclids division algorithm, we know that CV
1889          * can be written as: CV = 32*q + r. Next, we replace CV in the
1890          * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
1891          * need to pre-compute the value of r/32. For pre-computing the values
1892          * We just used the following Ruby line:
1893          *      (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
1894          * The results of the above expressions can be verified at
1895          * pre_computed_values.
1896          */
1897         q = max_cll >> 5;
1898         r = max_cll % 32;
1899         max = (1 << q) * pre_computed_values[r];
1900
1901         // min luminance: maxLum * (CV/255)^2 / 100
1902         q = DIV_ROUND_CLOSEST(min_cll, 255);
1903         min = max * DIV_ROUND_CLOSEST((q * q), 100);
1904
1905         caps->aux_max_input_signal = max;
1906         caps->aux_min_input_signal = min;
1907 }
1908
1909 void amdgpu_dm_update_connector_after_detect(
1910                 struct amdgpu_dm_connector *aconnector)
1911 {
1912         struct drm_connector *connector = &aconnector->base;
1913         struct drm_device *dev = connector->dev;
1914         struct dc_sink *sink;
1915
1916         /* MST handled by drm_mst framework */
1917         if (aconnector->mst_mgr.mst_state == true)
1918                 return;
1919
1920
1921         sink = aconnector->dc_link->local_sink;
1922         if (sink)
1923                 dc_sink_retain(sink);
1924
1925         /*
1926          * Edid mgmt connector gets first update only in mode_valid hook and then
1927          * the connector sink is set to either fake or physical sink depends on link status.
1928          * Skip if already done during boot.
1929          */
1930         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
1931                         && aconnector->dc_em_sink) {
1932
1933                 /*
1934                  * For S3 resume with headless use eml_sink to fake stream
1935                  * because on resume connector->sink is set to NULL
1936                  */
1937                 mutex_lock(&dev->mode_config.mutex);
1938
1939                 if (sink) {
1940                         if (aconnector->dc_sink) {
1941                                 amdgpu_dm_update_freesync_caps(connector, NULL);
1942                                 /*
1943                                  * retain and release below are used to
1944                                  * bump up refcount for sink because the link doesn't point
1945                                  * to it anymore after disconnect, so on next crtc to connector
1946                                  * reshuffle by UMD we will get into unwanted dc_sink release
1947                                  */
1948                                 dc_sink_release(aconnector->dc_sink);
1949                         }
1950                         aconnector->dc_sink = sink;
1951                         dc_sink_retain(aconnector->dc_sink);
1952                         amdgpu_dm_update_freesync_caps(connector,
1953                                         aconnector->edid);
1954                 } else {
1955                         amdgpu_dm_update_freesync_caps(connector, NULL);
1956                         if (!aconnector->dc_sink) {
1957                                 aconnector->dc_sink = aconnector->dc_em_sink;
1958                                 dc_sink_retain(aconnector->dc_sink);
1959                         }
1960                 }
1961
1962                 mutex_unlock(&dev->mode_config.mutex);
1963
1964                 if (sink)
1965                         dc_sink_release(sink);
1966                 return;
1967         }
1968
1969         /*
1970          * TODO: temporary guard to look for proper fix
1971          * if this sink is MST sink, we should not do anything
1972          */
1973         if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
1974                 dc_sink_release(sink);
1975                 return;
1976         }
1977
1978         if (aconnector->dc_sink == sink) {
1979                 /*
1980                  * We got a DP short pulse (Link Loss, DP CTS, etc...).
1981                  * Do nothing!!
1982                  */
1983                 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
1984                                 aconnector->connector_id);
1985                 if (sink)
1986                         dc_sink_release(sink);
1987                 return;
1988         }
1989
1990         DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
1991                 aconnector->connector_id, aconnector->dc_sink, sink);
1992
1993         mutex_lock(&dev->mode_config.mutex);
1994
1995         /*
1996          * 1. Update status of the drm connector
1997          * 2. Send an event and let userspace tell us what to do
1998          */
1999         if (sink) {
2000                 /*
2001                  * TODO: check if we still need the S3 mode update workaround.
2002                  * If yes, put it here.
2003                  */
2004                 if (aconnector->dc_sink)
2005                         amdgpu_dm_update_freesync_caps(connector, NULL);
2006
2007                 aconnector->dc_sink = sink;
2008                 dc_sink_retain(aconnector->dc_sink);
2009                 if (sink->dc_edid.length == 0) {
2010                         aconnector->edid = NULL;
2011                         drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2012                 } else {
2013                         aconnector->edid =
2014                                 (struct edid *) sink->dc_edid.raw_edid;
2015
2016
2017                         drm_connector_update_edid_property(connector,
2018                                         aconnector->edid);
2019                         drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2020                                             aconnector->edid);
2021                 }
2022                 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2023                 update_connector_ext_caps(aconnector);
2024         } else {
2025                 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2026                 amdgpu_dm_update_freesync_caps(connector, NULL);
2027                 drm_connector_update_edid_property(connector, NULL);
2028                 aconnector->num_modes = 0;
2029                 dc_sink_release(aconnector->dc_sink);
2030                 aconnector->dc_sink = NULL;
2031                 aconnector->edid = NULL;
2032 #ifdef CONFIG_DRM_AMD_DC_HDCP
2033                 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2034                 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2035                         connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2036 #endif
2037         }
2038
2039         mutex_unlock(&dev->mode_config.mutex);
2040
2041         if (sink)
2042                 dc_sink_release(sink);
2043 }
2044
2045 static void handle_hpd_irq(void *param)
2046 {
2047         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2048         struct drm_connector *connector = &aconnector->base;
2049         struct drm_device *dev = connector->dev;
2050         enum dc_connection_type new_connection_type = dc_connection_none;
2051 #ifdef CONFIG_DRM_AMD_DC_HDCP
2052         struct amdgpu_device *adev = dev->dev_private;
2053 #endif
2054
2055         /*
2056          * In case of failure or MST no need to update connector status or notify the OS
2057          * since (for MST case) MST does this in its own context.
2058          */
2059         mutex_lock(&aconnector->hpd_lock);
2060
2061 #ifdef CONFIG_DRM_AMD_DC_HDCP
2062         if (adev->dm.hdcp_workqueue)
2063                 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2064 #endif
2065         if (aconnector->fake_enable)
2066                 aconnector->fake_enable = false;
2067
2068         if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2069                 DRM_ERROR("KMS: Failed to detect connector\n");
2070
2071         if (aconnector->base.force && new_connection_type == dc_connection_none) {
2072                 emulated_link_detect(aconnector->dc_link);
2073
2074
2075                 drm_modeset_lock_all(dev);
2076                 dm_restore_drm_connector_state(dev, connector);
2077                 drm_modeset_unlock_all(dev);
2078
2079                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2080                         drm_kms_helper_hotplug_event(dev);
2081
2082         } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2083                 amdgpu_dm_update_connector_after_detect(aconnector);
2084
2085
2086                 drm_modeset_lock_all(dev);
2087                 dm_restore_drm_connector_state(dev, connector);
2088                 drm_modeset_unlock_all(dev);
2089
2090                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2091                         drm_kms_helper_hotplug_event(dev);
2092         }
2093         mutex_unlock(&aconnector->hpd_lock);
2094
2095 }
2096
2097 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2098 {
2099         uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2100         uint8_t dret;
2101         bool new_irq_handled = false;
2102         int dpcd_addr;
2103         int dpcd_bytes_to_read;
2104
2105         const int max_process_count = 30;
2106         int process_count = 0;
2107
2108         const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2109
2110         if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2111                 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2112                 /* DPCD 0x200 - 0x201 for downstream IRQ */
2113                 dpcd_addr = DP_SINK_COUNT;
2114         } else {
2115                 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2116                 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2117                 dpcd_addr = DP_SINK_COUNT_ESI;
2118         }
2119
2120         dret = drm_dp_dpcd_read(
2121                 &aconnector->dm_dp_aux.aux,
2122                 dpcd_addr,
2123                 esi,
2124                 dpcd_bytes_to_read);
2125
2126         while (dret == dpcd_bytes_to_read &&
2127                 process_count < max_process_count) {
2128                 uint8_t retry;
2129                 dret = 0;
2130
2131                 process_count++;
2132
2133                 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2134                 /* handle HPD short pulse irq */
2135                 if (aconnector->mst_mgr.mst_state)
2136                         drm_dp_mst_hpd_irq(
2137                                 &aconnector->mst_mgr,
2138                                 esi,
2139                                 &new_irq_handled);
2140
2141                 if (new_irq_handled) {
2142                         /* ACK at DPCD to notify down stream */
2143                         const int ack_dpcd_bytes_to_write =
2144                                 dpcd_bytes_to_read - 1;
2145
2146                         for (retry = 0; retry < 3; retry++) {
2147                                 uint8_t wret;
2148
2149                                 wret = drm_dp_dpcd_write(
2150                                         &aconnector->dm_dp_aux.aux,
2151                                         dpcd_addr + 1,
2152                                         &esi[1],
2153                                         ack_dpcd_bytes_to_write);
2154                                 if (wret == ack_dpcd_bytes_to_write)
2155                                         break;
2156                         }
2157
2158                         /* check if there is new irq to be handled */
2159                         dret = drm_dp_dpcd_read(
2160                                 &aconnector->dm_dp_aux.aux,
2161                                 dpcd_addr,
2162                                 esi,
2163                                 dpcd_bytes_to_read);
2164
2165                         new_irq_handled = false;
2166                 } else {
2167                         break;
2168                 }
2169         }
2170
2171         if (process_count == max_process_count)
2172                 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2173 }
2174
2175 static void handle_hpd_rx_irq(void *param)
2176 {
2177         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2178         struct drm_connector *connector = &aconnector->base;
2179         struct drm_device *dev = connector->dev;
2180         struct dc_link *dc_link = aconnector->dc_link;
2181         bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2182         enum dc_connection_type new_connection_type = dc_connection_none;
2183 #ifdef CONFIG_DRM_AMD_DC_HDCP
2184         union hpd_irq_data hpd_irq_data;
2185         struct amdgpu_device *adev = dev->dev_private;
2186
2187         memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2188 #endif
2189
2190         /*
2191          * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2192          * conflict, after implement i2c helper, this mutex should be
2193          * retired.
2194          */
2195         if (dc_link->type != dc_connection_mst_branch)
2196                 mutex_lock(&aconnector->hpd_lock);
2197
2198
2199 #ifdef CONFIG_DRM_AMD_DC_HDCP
2200         if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) &&
2201 #else
2202         if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
2203 #endif
2204                         !is_mst_root_connector) {
2205                 /* Downstream Port status changed. */
2206                 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2207                         DRM_ERROR("KMS: Failed to detect connector\n");
2208
2209                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2210                         emulated_link_detect(dc_link);
2211
2212                         if (aconnector->fake_enable)
2213                                 aconnector->fake_enable = false;
2214
2215                         amdgpu_dm_update_connector_after_detect(aconnector);
2216
2217
2218                         drm_modeset_lock_all(dev);
2219                         dm_restore_drm_connector_state(dev, connector);
2220                         drm_modeset_unlock_all(dev);
2221
2222                         drm_kms_helper_hotplug_event(dev);
2223                 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2224
2225                         if (aconnector->fake_enable)
2226                                 aconnector->fake_enable = false;
2227
2228                         amdgpu_dm_update_connector_after_detect(aconnector);
2229
2230
2231                         drm_modeset_lock_all(dev);
2232                         dm_restore_drm_connector_state(dev, connector);
2233                         drm_modeset_unlock_all(dev);
2234
2235                         drm_kms_helper_hotplug_event(dev);
2236                 }
2237         }
2238 #ifdef CONFIG_DRM_AMD_DC_HDCP
2239         if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2240                 if (adev->dm.hdcp_workqueue)
2241                         hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
2242         }
2243 #endif
2244         if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2245             (dc_link->type == dc_connection_mst_branch))
2246                 dm_handle_hpd_rx_irq(aconnector);
2247
2248         if (dc_link->type != dc_connection_mst_branch) {
2249                 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2250                 mutex_unlock(&aconnector->hpd_lock);
2251         }
2252 }
2253
2254 static void register_hpd_handlers(struct amdgpu_device *adev)
2255 {
2256         struct drm_device *dev = adev->ddev;
2257         struct drm_connector *connector;
2258         struct amdgpu_dm_connector *aconnector;
2259         const struct dc_link *dc_link;
2260         struct dc_interrupt_params int_params = {0};
2261
2262         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2263         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2264
2265         list_for_each_entry(connector,
2266                         &dev->mode_config.connector_list, head) {
2267
2268                 aconnector = to_amdgpu_dm_connector(connector);
2269                 dc_link = aconnector->dc_link;
2270
2271                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2272                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2273                         int_params.irq_source = dc_link->irq_source_hpd;
2274
2275                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
2276                                         handle_hpd_irq,
2277                                         (void *) aconnector);
2278                 }
2279
2280                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2281
2282                         /* Also register for DP short pulse (hpd_rx). */
2283                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2284                         int_params.irq_source = dc_link->irq_source_hpd_rx;
2285
2286                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
2287                                         handle_hpd_rx_irq,
2288                                         (void *) aconnector);
2289                 }
2290         }
2291 }
2292
2293 /* Register IRQ sources and initialize IRQ callbacks */
2294 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2295 {
2296         struct dc *dc = adev->dm.dc;
2297         struct common_irq_params *c_irq_params;
2298         struct dc_interrupt_params int_params = {0};
2299         int r;
2300         int i;
2301         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2302
2303         if (adev->asic_type >= CHIP_VEGA10)
2304                 client_id = SOC15_IH_CLIENTID_DCE;
2305
2306         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2307         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2308
2309         /*
2310          * Actions of amdgpu_irq_add_id():
2311          * 1. Register a set() function with base driver.
2312          *    Base driver will call set() function to enable/disable an
2313          *    interrupt in DC hardware.
2314          * 2. Register amdgpu_dm_irq_handler().
2315          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2316          *    coming from DC hardware.
2317          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2318          *    for acknowledging and handling. */
2319
2320         /* Use VBLANK interrupt */
2321         for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2322                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2323                 if (r) {
2324                         DRM_ERROR("Failed to add crtc irq id!\n");
2325                         return r;
2326                 }
2327
2328                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2329                 int_params.irq_source =
2330                         dc_interrupt_to_irq_source(dc, i, 0);
2331
2332                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2333
2334                 c_irq_params->adev = adev;
2335                 c_irq_params->irq_src = int_params.irq_source;
2336
2337                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2338                                 dm_crtc_high_irq, c_irq_params);
2339         }
2340
2341         /* Use VUPDATE interrupt */
2342         for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2343                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2344                 if (r) {
2345                         DRM_ERROR("Failed to add vupdate irq id!\n");
2346                         return r;
2347                 }
2348
2349                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2350                 int_params.irq_source =
2351                         dc_interrupt_to_irq_source(dc, i, 0);
2352
2353                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2354
2355                 c_irq_params->adev = adev;
2356                 c_irq_params->irq_src = int_params.irq_source;
2357
2358                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2359                                 dm_vupdate_high_irq, c_irq_params);
2360         }
2361
2362         /* Use GRPH_PFLIP interrupt */
2363         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2364                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2365                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2366                 if (r) {
2367                         DRM_ERROR("Failed to add page flip irq id!\n");
2368                         return r;
2369                 }
2370
2371                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2372                 int_params.irq_source =
2373                         dc_interrupt_to_irq_source(dc, i, 0);
2374
2375                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2376
2377                 c_irq_params->adev = adev;
2378                 c_irq_params->irq_src = int_params.irq_source;
2379
2380                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2381                                 dm_pflip_high_irq, c_irq_params);
2382
2383         }
2384
2385         /* HPD */
2386         r = amdgpu_irq_add_id(adev, client_id,
2387                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2388         if (r) {
2389                 DRM_ERROR("Failed to add hpd irq id!\n");
2390                 return r;
2391         }
2392
2393         register_hpd_handlers(adev);
2394
2395         return 0;
2396 }
2397
2398 #if defined(CONFIG_DRM_AMD_DC_DCN)
2399 /* Register IRQ sources and initialize IRQ callbacks */
2400 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2401 {
2402         struct dc *dc = adev->dm.dc;
2403         struct common_irq_params *c_irq_params;
2404         struct dc_interrupt_params int_params = {0};
2405         int r;
2406         int i;
2407
2408         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2409         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2410
2411         /*
2412          * Actions of amdgpu_irq_add_id():
2413          * 1. Register a set() function with base driver.
2414          *    Base driver will call set() function to enable/disable an
2415          *    interrupt in DC hardware.
2416          * 2. Register amdgpu_dm_irq_handler().
2417          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2418          *    coming from DC hardware.
2419          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2420          *    for acknowledging and handling.
2421          */
2422
2423         /* Use VSTARTUP interrupt */
2424         for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2425                         i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2426                         i++) {
2427                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2428
2429                 if (r) {
2430                         DRM_ERROR("Failed to add crtc irq id!\n");
2431                         return r;
2432                 }
2433
2434                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2435                 int_params.irq_source =
2436                         dc_interrupt_to_irq_source(dc, i, 0);
2437
2438                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2439
2440                 c_irq_params->adev = adev;
2441                 c_irq_params->irq_src = int_params.irq_source;
2442
2443                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2444                                 dm_dcn_crtc_high_irq, c_irq_params);
2445         }
2446
2447         /* Use GRPH_PFLIP interrupt */
2448         for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2449                         i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2450                         i++) {
2451                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
2452                 if (r) {
2453                         DRM_ERROR("Failed to add page flip irq id!\n");
2454                         return r;
2455                 }
2456
2457                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2458                 int_params.irq_source =
2459                         dc_interrupt_to_irq_source(dc, i, 0);
2460
2461                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2462
2463                 c_irq_params->adev = adev;
2464                 c_irq_params->irq_src = int_params.irq_source;
2465
2466                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2467                                 dm_pflip_high_irq, c_irq_params);
2468
2469         }
2470
2471         /* HPD */
2472         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
2473                         &adev->hpd_irq);
2474         if (r) {
2475                 DRM_ERROR("Failed to add hpd irq id!\n");
2476                 return r;
2477         }
2478
2479         register_hpd_handlers(adev);
2480
2481         return 0;
2482 }
2483 #endif
2484
2485 /*
2486  * Acquires the lock for the atomic state object and returns
2487  * the new atomic state.
2488  *
2489  * This should only be called during atomic check.
2490  */
2491 static int dm_atomic_get_state(struct drm_atomic_state *state,
2492                                struct dm_atomic_state **dm_state)
2493 {
2494         struct drm_device *dev = state->dev;
2495         struct amdgpu_device *adev = dev->dev_private;
2496         struct amdgpu_display_manager *dm = &adev->dm;
2497         struct drm_private_state *priv_state;
2498
2499         if (*dm_state)
2500                 return 0;
2501
2502         priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2503         if (IS_ERR(priv_state))
2504                 return PTR_ERR(priv_state);
2505
2506         *dm_state = to_dm_atomic_state(priv_state);
2507
2508         return 0;
2509 }
2510
2511 struct dm_atomic_state *
2512 dm_atomic_get_new_state(struct drm_atomic_state *state)
2513 {
2514         struct drm_device *dev = state->dev;
2515         struct amdgpu_device *adev = dev->dev_private;
2516         struct amdgpu_display_manager *dm = &adev->dm;
2517         struct drm_private_obj *obj;
2518         struct drm_private_state *new_obj_state;
2519         int i;
2520
2521         for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
2522                 if (obj->funcs == dm->atomic_obj.funcs)
2523                         return to_dm_atomic_state(new_obj_state);
2524         }
2525
2526         return NULL;
2527 }
2528
2529 struct dm_atomic_state *
2530 dm_atomic_get_old_state(struct drm_atomic_state *state)
2531 {
2532         struct drm_device *dev = state->dev;
2533         struct amdgpu_device *adev = dev->dev_private;
2534         struct amdgpu_display_manager *dm = &adev->dm;
2535         struct drm_private_obj *obj;
2536         struct drm_private_state *old_obj_state;
2537         int i;
2538
2539         for_each_old_private_obj_in_state(state, obj, old_obj_state, i) {
2540                 if (obj->funcs == dm->atomic_obj.funcs)
2541                         return to_dm_atomic_state(old_obj_state);
2542         }
2543
2544         return NULL;
2545 }
2546
2547 static struct drm_private_state *
2548 dm_atomic_duplicate_state(struct drm_private_obj *obj)
2549 {
2550         struct dm_atomic_state *old_state, *new_state;
2551
2552         new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
2553         if (!new_state)
2554                 return NULL;
2555
2556         __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
2557
2558         old_state = to_dm_atomic_state(obj->state);
2559
2560         if (old_state && old_state->context)
2561                 new_state->context = dc_copy_state(old_state->context);
2562
2563         if (!new_state->context) {
2564                 kfree(new_state);
2565                 return NULL;
2566         }
2567
2568         return &new_state->base;
2569 }
2570
2571 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
2572                                     struct drm_private_state *state)
2573 {
2574         struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
2575
2576         if (dm_state && dm_state->context)
2577                 dc_release_state(dm_state->context);
2578
2579         kfree(dm_state);
2580 }
2581
2582 static struct drm_private_state_funcs dm_atomic_state_funcs = {
2583         .atomic_duplicate_state = dm_atomic_duplicate_state,
2584         .atomic_destroy_state = dm_atomic_destroy_state,
2585 };
2586
2587 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
2588 {
2589         struct dm_atomic_state *state;
2590         int r;
2591
2592         adev->mode_info.mode_config_initialized = true;
2593
2594         adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
2595         adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
2596
2597         adev->ddev->mode_config.max_width = 16384;
2598         adev->ddev->mode_config.max_height = 16384;
2599
2600         adev->ddev->mode_config.preferred_depth = 24;
2601         adev->ddev->mode_config.prefer_shadow = 1;
2602         /* indicates support for immediate flip */
2603         adev->ddev->mode_config.async_page_flip = true;
2604
2605         adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
2606
2607         state = kzalloc(sizeof(*state), GFP_KERNEL);
2608         if (!state)
2609                 return -ENOMEM;
2610
2611         state->context = dc_create_state(adev->dm.dc);
2612         if (!state->context) {
2613                 kfree(state);
2614                 return -ENOMEM;
2615         }
2616
2617         dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
2618
2619         drm_atomic_private_obj_init(adev->ddev,
2620                                     &adev->dm.atomic_obj,
2621                                     &state->base,
2622                                     &dm_atomic_state_funcs);
2623
2624         r = amdgpu_display_modeset_create_props(adev);
2625         if (r)
2626                 return r;
2627
2628         r = amdgpu_dm_audio_init(adev);
2629         if (r)
2630                 return r;
2631
2632         return 0;
2633 }
2634
2635 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
2636 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
2637 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
2638
2639 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2640         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2641
2642 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
2643 {
2644 #if defined(CONFIG_ACPI)
2645         struct amdgpu_dm_backlight_caps caps;
2646
2647         if (dm->backlight_caps.caps_valid)
2648                 return;
2649
2650         amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
2651         if (caps.caps_valid) {
2652                 dm->backlight_caps.caps_valid = true;
2653                 if (caps.aux_support)
2654                         return;
2655                 dm->backlight_caps.min_input_signal = caps.min_input_signal;
2656                 dm->backlight_caps.max_input_signal = caps.max_input_signal;
2657         } else {
2658                 dm->backlight_caps.min_input_signal =
2659                                 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2660                 dm->backlight_caps.max_input_signal =
2661                                 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2662         }
2663 #else
2664         if (dm->backlight_caps.aux_support)
2665                 return;
2666
2667         dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2668         dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2669 #endif
2670 }
2671
2672 static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
2673 {
2674         bool rc;
2675
2676         if (!link)
2677                 return 1;
2678
2679         rc = dc_link_set_backlight_level_nits(link, true, brightness,
2680                                               AUX_BL_DEFAULT_TRANSITION_TIME_MS);
2681
2682         return rc ? 0 : 1;
2683 }
2684
2685 static u32 convert_brightness(const struct amdgpu_dm_backlight_caps *caps,
2686                               const uint32_t user_brightness)
2687 {
2688         u32 min, max, conversion_pace;
2689         u32 brightness = user_brightness;
2690
2691         if (!caps)
2692                 goto out;
2693
2694         if (!caps->aux_support) {
2695                 max = caps->max_input_signal;
2696                 min = caps->min_input_signal;
2697                 /*
2698                  * The brightness input is in the range 0-255
2699                  * It needs to be rescaled to be between the
2700                  * requested min and max input signal
2701                  * It also needs to be scaled up by 0x101 to
2702                  * match the DC interface which has a range of
2703                  * 0 to 0xffff
2704                  */
2705                 conversion_pace = 0x101;
2706                 brightness =
2707                         user_brightness
2708                         * conversion_pace
2709                         * (max - min)
2710                         / AMDGPU_MAX_BL_LEVEL
2711                         + min * conversion_pace;
2712         } else {
2713                 /* TODO
2714                  * We are doing a linear interpolation here, which is OK but
2715                  * does not provide the optimal result. We probably want
2716                  * something close to the Perceptual Quantizer (PQ) curve.
2717                  */
2718                 max = caps->aux_max_input_signal;
2719                 min = caps->aux_min_input_signal;
2720
2721                 brightness = (AMDGPU_MAX_BL_LEVEL - user_brightness) * min
2722                                + user_brightness * max;
2723                 // Multiple the value by 1000 since we use millinits
2724                 brightness *= 1000;
2725                 brightness = DIV_ROUND_CLOSEST(brightness, AMDGPU_MAX_BL_LEVEL);
2726         }
2727
2728 out:
2729         return brightness;
2730 }
2731
2732 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
2733 {
2734         struct amdgpu_display_manager *dm = bl_get_data(bd);
2735         struct amdgpu_dm_backlight_caps caps;
2736         struct dc_link *link = NULL;
2737         u32 brightness;
2738         bool rc;
2739
2740         amdgpu_dm_update_backlight_caps(dm);
2741         caps = dm->backlight_caps;
2742
2743         link = (struct dc_link *)dm->backlight_link;
2744
2745         brightness = convert_brightness(&caps, bd->props.brightness);
2746         // Change brightness based on AUX property
2747         if (caps.aux_support)
2748                 return set_backlight_via_aux(link, brightness);
2749
2750         rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
2751
2752         return rc ? 0 : 1;
2753 }
2754
2755 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
2756 {
2757         struct amdgpu_display_manager *dm = bl_get_data(bd);
2758         int ret = dc_link_get_backlight_level(dm->backlight_link);
2759
2760         if (ret == DC_ERROR_UNEXPECTED)
2761                 return bd->props.brightness;
2762         return ret;
2763 }
2764
2765 static const struct backlight_ops amdgpu_dm_backlight_ops = {
2766         .options = BL_CORE_SUSPENDRESUME,
2767         .get_brightness = amdgpu_dm_backlight_get_brightness,
2768         .update_status  = amdgpu_dm_backlight_update_status,
2769 };
2770
2771 static void
2772 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
2773 {
2774         char bl_name[16];
2775         struct backlight_properties props = { 0 };
2776
2777         amdgpu_dm_update_backlight_caps(dm);
2778
2779         props.max_brightness = AMDGPU_MAX_BL_LEVEL;
2780         props.brightness = AMDGPU_MAX_BL_LEVEL;
2781         props.type = BACKLIGHT_RAW;
2782
2783         snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
2784                         dm->adev->ddev->primary->index);
2785
2786         dm->backlight_dev = backlight_device_register(bl_name,
2787                         dm->adev->ddev->dev,
2788                         dm,
2789                         &amdgpu_dm_backlight_ops,
2790                         &props);
2791
2792         if (IS_ERR(dm->backlight_dev))
2793                 DRM_ERROR("DM: Backlight registration failed!\n");
2794         else
2795                 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
2796 }
2797
2798 #endif
2799
2800 static int initialize_plane(struct amdgpu_display_manager *dm,
2801                             struct amdgpu_mode_info *mode_info, int plane_id,
2802                             enum drm_plane_type plane_type,
2803                             const struct dc_plane_cap *plane_cap)
2804 {
2805         struct drm_plane *plane;
2806         unsigned long possible_crtcs;
2807         int ret = 0;
2808
2809         plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
2810         if (!plane) {
2811                 DRM_ERROR("KMS: Failed to allocate plane\n");
2812                 return -ENOMEM;
2813         }
2814         plane->type = plane_type;
2815
2816         /*
2817          * HACK: IGT tests expect that the primary plane for a CRTC
2818          * can only have one possible CRTC. Only expose support for
2819          * any CRTC if they're not going to be used as a primary plane
2820          * for a CRTC - like overlay or underlay planes.
2821          */
2822         possible_crtcs = 1 << plane_id;
2823         if (plane_id >= dm->dc->caps.max_streams)
2824                 possible_crtcs = 0xff;
2825
2826         ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
2827
2828         if (ret) {
2829                 DRM_ERROR("KMS: Failed to initialize plane\n");
2830                 kfree(plane);
2831                 return ret;
2832         }
2833
2834         if (mode_info)
2835                 mode_info->planes[plane_id] = plane;
2836
2837         return ret;
2838 }
2839
2840
2841 static void register_backlight_device(struct amdgpu_display_manager *dm,
2842                                       struct dc_link *link)
2843 {
2844 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2845         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2846
2847         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
2848             link->type != dc_connection_none) {
2849                 /*
2850                  * Event if registration failed, we should continue with
2851                  * DM initialization because not having a backlight control
2852                  * is better then a black screen.
2853                  */
2854                 amdgpu_dm_register_backlight_device(dm);
2855
2856                 if (dm->backlight_dev)
2857                         dm->backlight_link = link;
2858         }
2859 #endif
2860 }
2861
2862
2863 /*
2864  * In this architecture, the association
2865  * connector -> encoder -> crtc
2866  * id not really requried. The crtc and connector will hold the
2867  * display_index as an abstraction to use with DAL component
2868  *
2869  * Returns 0 on success
2870  */
2871 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
2872 {
2873         struct amdgpu_display_manager *dm = &adev->dm;
2874         int32_t i;
2875         struct amdgpu_dm_connector *aconnector = NULL;
2876         struct amdgpu_encoder *aencoder = NULL;
2877         struct amdgpu_mode_info *mode_info = &adev->mode_info;
2878         uint32_t link_cnt;
2879         int32_t primary_planes;
2880         enum dc_connection_type new_connection_type = dc_connection_none;
2881         const struct dc_plane_cap *plane;
2882
2883         link_cnt = dm->dc->caps.max_links;
2884         if (amdgpu_dm_mode_config_init(dm->adev)) {
2885                 DRM_ERROR("DM: Failed to initialize mode config\n");
2886                 return -EINVAL;
2887         }
2888
2889         /* There is one primary plane per CRTC */
2890         primary_planes = dm->dc->caps.max_streams;
2891         ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
2892
2893         /*
2894          * Initialize primary planes, implicit planes for legacy IOCTLS.
2895          * Order is reversed to match iteration order in atomic check.
2896          */
2897         for (i = (primary_planes - 1); i >= 0; i--) {
2898                 plane = &dm->dc->caps.planes[i];
2899
2900                 if (initialize_plane(dm, mode_info, i,
2901                                      DRM_PLANE_TYPE_PRIMARY, plane)) {
2902                         DRM_ERROR("KMS: Failed to initialize primary plane\n");
2903                         goto fail;
2904                 }
2905         }
2906
2907         /*
2908          * Initialize overlay planes, index starting after primary planes.
2909          * These planes have a higher DRM index than the primary planes since
2910          * they should be considered as having a higher z-order.
2911          * Order is reversed to match iteration order in atomic check.
2912          *
2913          * Only support DCN for now, and only expose one so we don't encourage
2914          * userspace to use up all the pipes.
2915          */
2916         for (i = 0; i < dm->dc->caps.max_planes; ++i) {
2917                 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
2918
2919                 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
2920                         continue;
2921
2922                 if (!plane->blends_with_above || !plane->blends_with_below)
2923                         continue;
2924
2925                 if (!plane->pixel_format_support.argb8888)
2926                         continue;
2927
2928                 if (initialize_plane(dm, NULL, primary_planes + i,
2929                                      DRM_PLANE_TYPE_OVERLAY, plane)) {
2930                         DRM_ERROR("KMS: Failed to initialize overlay plane\n");
2931                         goto fail;
2932                 }
2933
2934                 /* Only create one overlay plane. */
2935                 break;
2936         }
2937
2938         for (i = 0; i < dm->dc->caps.max_streams; i++)
2939                 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
2940                         DRM_ERROR("KMS: Failed to initialize crtc\n");
2941                         goto fail;
2942                 }
2943
2944         dm->display_indexes_num = dm->dc->caps.max_streams;
2945
2946         /* loops over all connectors on the board */
2947         for (i = 0; i < link_cnt; i++) {
2948                 struct dc_link *link = NULL;
2949
2950                 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
2951                         DRM_ERROR(
2952                                 "KMS: Cannot support more than %d display indexes\n",
2953                                         AMDGPU_DM_MAX_DISPLAY_INDEX);
2954                         continue;
2955                 }
2956
2957                 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
2958                 if (!aconnector)
2959                         goto fail;
2960
2961                 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
2962                 if (!aencoder)
2963                         goto fail;
2964
2965                 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
2966                         DRM_ERROR("KMS: Failed to initialize encoder\n");
2967                         goto fail;
2968                 }
2969
2970                 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
2971                         DRM_ERROR("KMS: Failed to initialize connector\n");
2972                         goto fail;
2973                 }
2974
2975                 link = dc_get_link_at_index(dm->dc, i);
2976
2977                 if (!dc_link_detect_sink(link, &new_connection_type))
2978                         DRM_ERROR("KMS: Failed to detect connector\n");
2979
2980                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2981                         emulated_link_detect(link);
2982                         amdgpu_dm_update_connector_after_detect(aconnector);
2983
2984                 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
2985                         amdgpu_dm_update_connector_after_detect(aconnector);
2986                         register_backlight_device(dm, link);
2987                         if (amdgpu_dc_feature_mask & DC_PSR_MASK)
2988                                 amdgpu_dm_set_psr_caps(link);
2989                 }
2990
2991
2992         }
2993
2994         /* Software is initialized. Now we can register interrupt handlers. */
2995         switch (adev->asic_type) {
2996         case CHIP_BONAIRE:
2997         case CHIP_HAWAII:
2998         case CHIP_KAVERI:
2999         case CHIP_KABINI:
3000         case CHIP_MULLINS:
3001         case CHIP_TONGA:
3002         case CHIP_FIJI:
3003         case CHIP_CARRIZO:
3004         case CHIP_STONEY:
3005         case CHIP_POLARIS11:
3006         case CHIP_POLARIS10:
3007         case CHIP_POLARIS12:
3008         case CHIP_VEGAM:
3009         case CHIP_VEGA10:
3010         case CHIP_VEGA12:
3011         case CHIP_VEGA20:
3012                 if (dce110_register_irq_handlers(dm->adev)) {
3013                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3014                         goto fail;
3015                 }
3016                 break;
3017 #if defined(CONFIG_DRM_AMD_DC_DCN)
3018         case CHIP_RAVEN:
3019         case CHIP_NAVI12:
3020         case CHIP_NAVI10:
3021         case CHIP_NAVI14:
3022         case CHIP_RENOIR:
3023                 if (dcn10_register_irq_handlers(dm->adev)) {
3024                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3025                         goto fail;
3026                 }
3027                 break;
3028 #endif
3029         default:
3030                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3031                 goto fail;
3032         }
3033
3034         if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
3035                 dm->dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
3036
3037         /* No userspace support. */
3038         dm->dc->debug.disable_tri_buf = true;
3039
3040         return 0;
3041 fail:
3042         kfree(aencoder);
3043         kfree(aconnector);
3044
3045         return -EINVAL;
3046 }
3047
3048 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3049 {
3050         drm_mode_config_cleanup(dm->ddev);
3051         drm_atomic_private_obj_fini(&dm->atomic_obj);
3052         return;
3053 }
3054
3055 /******************************************************************************
3056  * amdgpu_display_funcs functions
3057  *****************************************************************************/
3058
3059 /*
3060  * dm_bandwidth_update - program display watermarks
3061  *
3062  * @adev: amdgpu_device pointer
3063  *
3064  * Calculate and program the display watermarks and line buffer allocation.
3065  */
3066 static void dm_bandwidth_update(struct amdgpu_device *adev)
3067 {
3068         /* TODO: implement later */
3069 }
3070
3071 static const struct amdgpu_display_funcs dm_display_funcs = {
3072         .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3073         .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3074         .backlight_set_level = NULL, /* never called for DC */
3075         .backlight_get_level = NULL, /* never called for DC */
3076         .hpd_sense = NULL,/* called unconditionally */
3077         .hpd_set_polarity = NULL, /* called unconditionally */
3078         .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3079         .page_flip_get_scanoutpos =
3080                 dm_crtc_get_scanoutpos,/* called unconditionally */
3081         .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3082         .add_connector = NULL, /* VBIOS parsing. DAL does it. */
3083 };
3084
3085 #if defined(CONFIG_DEBUG_KERNEL_DC)
3086
3087 static ssize_t s3_debug_store(struct device *device,
3088                               struct device_attribute *attr,
3089                               const char *buf,
3090                               size_t count)
3091 {
3092         int ret;
3093         int s3_state;
3094         struct drm_device *drm_dev = dev_get_drvdata(device);
3095         struct amdgpu_device *adev = drm_dev->dev_private;
3096
3097         ret = kstrtoint(buf, 0, &s3_state);
3098
3099         if (ret == 0) {
3100                 if (s3_state) {
3101                         dm_resume(adev);
3102                         drm_kms_helper_hotplug_event(adev->ddev);
3103                 } else
3104                         dm_suspend(adev);
3105         }
3106
3107         return ret == 0 ? count : 0;
3108 }
3109
3110 DEVICE_ATTR_WO(s3_debug);
3111
3112 #endif
3113
3114 static int dm_early_init(void *handle)
3115 {
3116         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3117
3118         switch (adev->asic_type) {
3119         case CHIP_BONAIRE:
3120         case CHIP_HAWAII:
3121                 adev->mode_info.num_crtc = 6;
3122                 adev->mode_info.num_hpd = 6;
3123                 adev->mode_info.num_dig = 6;
3124                 break;
3125         case CHIP_KAVERI:
3126                 adev->mode_info.num_crtc = 4;
3127                 adev->mode_info.num_hpd = 6;
3128                 adev->mode_info.num_dig = 7;
3129                 break;
3130         case CHIP_KABINI:
3131         case CHIP_MULLINS:
3132                 adev->mode_info.num_crtc = 2;
3133                 adev->mode_info.num_hpd = 6;
3134                 adev->mode_info.num_dig = 6;
3135                 break;
3136         case CHIP_FIJI:
3137         case CHIP_TONGA:
3138                 adev->mode_info.num_crtc = 6;
3139                 adev->mode_info.num_hpd = 6;
3140                 adev->mode_info.num_dig = 7;
3141                 break;
3142         case CHIP_CARRIZO:
3143                 adev->mode_info.num_crtc = 3;
3144                 adev->mode_info.num_hpd = 6;
3145                 adev->mode_info.num_dig = 9;
3146                 break;
3147         case CHIP_STONEY:
3148                 adev->mode_info.num_crtc = 2;
3149                 adev->mode_info.num_hpd = 6;
3150                 adev->mode_info.num_dig = 9;
3151                 break;
3152         case CHIP_POLARIS11:
3153         case CHIP_POLARIS12:
3154                 adev->mode_info.num_crtc = 5;
3155                 adev->mode_info.num_hpd = 5;
3156                 adev->mode_info.num_dig = 5;
3157                 break;
3158         case CHIP_POLARIS10:
3159         case CHIP_VEGAM:
3160                 adev->mode_info.num_crtc = 6;
3161                 adev->mode_info.num_hpd = 6;
3162                 adev->mode_info.num_dig = 6;
3163                 break;
3164         case CHIP_VEGA10:
3165         case CHIP_VEGA12:
3166         case CHIP_VEGA20:
3167                 adev->mode_info.num_crtc = 6;
3168                 adev->mode_info.num_hpd = 6;
3169                 adev->mode_info.num_dig = 6;
3170                 break;
3171 #if defined(CONFIG_DRM_AMD_DC_DCN)
3172         case CHIP_RAVEN:
3173                 adev->mode_info.num_crtc = 4;
3174                 adev->mode_info.num_hpd = 4;
3175                 adev->mode_info.num_dig = 4;
3176                 break;
3177 #endif
3178         case CHIP_NAVI10:
3179         case CHIP_NAVI12:
3180                 adev->mode_info.num_crtc = 6;
3181                 adev->mode_info.num_hpd = 6;
3182                 adev->mode_info.num_dig = 6;
3183                 break;
3184         case CHIP_NAVI14:
3185                 adev->mode_info.num_crtc = 5;
3186                 adev->mode_info.num_hpd = 5;
3187                 adev->mode_info.num_dig = 5;
3188                 break;
3189         case CHIP_RENOIR:
3190                 adev->mode_info.num_crtc = 4;
3191                 adev->mode_info.num_hpd = 4;
3192                 adev->mode_info.num_dig = 4;
3193                 break;
3194         default:
3195                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3196                 return -EINVAL;
3197         }
3198
3199         amdgpu_dm_set_irq_funcs(adev);
3200
3201         if (adev->mode_info.funcs == NULL)
3202                 adev->mode_info.funcs = &dm_display_funcs;
3203
3204         /*
3205          * Note: Do NOT change adev->audio_endpt_rreg and
3206          * adev->audio_endpt_wreg because they are initialised in
3207          * amdgpu_device_init()
3208          */
3209 #if defined(CONFIG_DEBUG_KERNEL_DC)
3210         device_create_file(
3211                 adev->ddev->dev,
3212                 &dev_attr_s3_debug);
3213 #endif
3214
3215         return 0;
3216 }
3217
3218 static bool modeset_required(struct drm_crtc_state *crtc_state,
3219                              struct dc_stream_state *new_stream,
3220                              struct dc_stream_state *old_stream)
3221 {
3222         if (!drm_atomic_crtc_needs_modeset(crtc_state))
3223                 return false;
3224
3225         if (!crtc_state->enable)
3226                 return false;
3227
3228         return crtc_state->active;
3229 }
3230
3231 static bool modereset_required(struct drm_crtc_state *crtc_state)
3232 {
3233         if (!drm_atomic_crtc_needs_modeset(crtc_state))
3234                 return false;
3235
3236         return !crtc_state->enable || !crtc_state->active;
3237 }
3238
3239 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3240 {
3241         drm_encoder_cleanup(encoder);
3242         kfree(encoder);
3243 }
3244
3245 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3246         .destroy = amdgpu_dm_encoder_destroy,
3247 };
3248
3249
3250 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3251                                 struct dc_scaling_info *scaling_info)
3252 {
3253         int scale_w, scale_h;
3254
3255         memset(scaling_info, 0, sizeof(*scaling_info));
3256
3257         /* Source is fixed 16.16 but we ignore mantissa for now... */
3258         scaling_info->src_rect.x = state->src_x >> 16;
3259         scaling_info->src_rect.y = state->src_y >> 16;
3260
3261         scaling_info->src_rect.width = state->src_w >> 16;
3262         if (scaling_info->src_rect.width == 0)
3263                 return -EINVAL;
3264
3265         scaling_info->src_rect.height = state->src_h >> 16;
3266         if (scaling_info->src_rect.height == 0)
3267                 return -EINVAL;
3268
3269         scaling_info->dst_rect.x = state->crtc_x;
3270         scaling_info->dst_rect.y = state->crtc_y;
3271
3272         if (state->crtc_w == 0)
3273                 return -EINVAL;
3274
3275         scaling_info->dst_rect.width = state->crtc_w;
3276
3277         if (state->crtc_h == 0)
3278                 return -EINVAL;
3279
3280         scaling_info->dst_rect.height = state->crtc_h;
3281
3282         /* DRM doesn't specify clipping on destination output. */
3283         scaling_info->clip_rect = scaling_info->dst_rect;
3284
3285         /* TODO: Validate scaling per-format with DC plane caps */
3286         scale_w = scaling_info->dst_rect.width * 1000 /
3287                   scaling_info->src_rect.width;
3288
3289         if (scale_w < 250 || scale_w > 16000)
3290                 return -EINVAL;
3291
3292         scale_h = scaling_info->dst_rect.height * 1000 /
3293                   scaling_info->src_rect.height;
3294
3295         if (scale_h < 250 || scale_h > 16000)
3296                 return -EINVAL;
3297
3298         /*
3299          * The "scaling_quality" can be ignored for now, quality = 0 has DC
3300          * assume reasonable defaults based on the format.
3301          */
3302
3303         return 0;
3304 }
3305
3306 static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
3307                        uint64_t *tiling_flags)
3308 {
3309         struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
3310         int r = amdgpu_bo_reserve(rbo, false);
3311
3312         if (unlikely(r)) {
3313                 /* Don't show error message when returning -ERESTARTSYS */
3314                 if (r != -ERESTARTSYS)
3315                         DRM_ERROR("Unable to reserve buffer: %d\n", r);
3316                 return r;
3317         }
3318
3319         if (tiling_flags)
3320                 amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
3321
3322         amdgpu_bo_unreserve(rbo);
3323
3324         return r;
3325 }
3326
3327 static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
3328 {
3329         uint32_t offset = AMDGPU_TILING_GET(tiling_flags, DCC_OFFSET_256B);
3330
3331         return offset ? (address + offset * 256) : 0;
3332 }
3333
3334 static int
3335 fill_plane_dcc_attributes(struct amdgpu_device *adev,
3336                           const struct amdgpu_framebuffer *afb,
3337                           const enum surface_pixel_format format,
3338                           const enum dc_rotation_angle rotation,
3339                           const struct plane_size *plane_size,
3340                           const union dc_tiling_info *tiling_info,
3341                           const uint64_t info,
3342                           struct dc_plane_dcc_param *dcc,
3343                           struct dc_plane_address *address)
3344 {
3345         struct dc *dc = adev->dm.dc;
3346         struct dc_dcc_surface_param input;
3347         struct dc_surface_dcc_cap output;
3348         uint32_t offset = AMDGPU_TILING_GET(info, DCC_OFFSET_256B);
3349         uint32_t i64b = AMDGPU_TILING_GET(info, DCC_INDEPENDENT_64B) != 0;
3350         uint64_t dcc_address;
3351
3352         memset(&input, 0, sizeof(input));
3353         memset(&output, 0, sizeof(output));
3354
3355         if (!offset)
3356                 return 0;
3357
3358         if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3359                 return 0;
3360
3361         if (!dc->cap_funcs.get_dcc_compression_cap)
3362                 return -EINVAL;
3363
3364         input.format = format;
3365         input.surface_size.width = plane_size->surface_size.width;
3366         input.surface_size.height = plane_size->surface_size.height;
3367         input.swizzle_mode = tiling_info->gfx9.swizzle;
3368
3369         if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3370                 input.scan = SCAN_DIRECTION_HORIZONTAL;
3371         else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3372                 input.scan = SCAN_DIRECTION_VERTICAL;
3373
3374         if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3375                 return -EINVAL;
3376
3377         if (!output.capable)
3378                 return -EINVAL;
3379
3380         if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0)
3381                 return -EINVAL;
3382
3383         dcc->enable = 1;
3384         dcc->meta_pitch =
3385                 AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1;
3386         dcc->independent_64b_blks = i64b;
3387
3388         dcc_address = get_dcc_address(afb->address, info);
3389         address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
3390         address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
3391
3392         return 0;
3393 }
3394
3395 static int
3396 fill_plane_buffer_attributes(struct amdgpu_device *adev,
3397                              const struct amdgpu_framebuffer *afb,
3398                              const enum surface_pixel_format format,
3399                              const enum dc_rotation_angle rotation,
3400                              const uint64_t tiling_flags,
3401                              union dc_tiling_info *tiling_info,
3402                              struct plane_size *plane_size,
3403                              struct dc_plane_dcc_param *dcc,
3404                              struct dc_plane_address *address)
3405 {
3406         const struct drm_framebuffer *fb = &afb->base;
3407         int ret;
3408
3409         memset(tiling_info, 0, sizeof(*tiling_info));
3410         memset(plane_size, 0, sizeof(*plane_size));
3411         memset(dcc, 0, sizeof(*dcc));
3412         memset(address, 0, sizeof(*address));
3413
3414         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
3415                 plane_size->surface_size.x = 0;
3416                 plane_size->surface_size.y = 0;
3417                 plane_size->surface_size.width = fb->width;
3418                 plane_size->surface_size.height = fb->height;
3419                 plane_size->surface_pitch =
3420                         fb->pitches[0] / fb->format->cpp[0];
3421
3422                 address->type = PLN_ADDR_TYPE_GRAPHICS;
3423                 address->grph.addr.low_part = lower_32_bits(afb->address);
3424                 address->grph.addr.high_part = upper_32_bits(afb->address);
3425         } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
3426                 uint64_t chroma_addr = afb->address + fb->offsets[1];
3427
3428                 plane_size->surface_size.x = 0;
3429                 plane_size->surface_size.y = 0;
3430                 plane_size->surface_size.width = fb->width;
3431                 plane_size->surface_size.height = fb->height;
3432                 plane_size->surface_pitch =
3433                         fb->pitches[0] / fb->format->cpp[0];
3434
3435                 plane_size->chroma_size.x = 0;
3436                 plane_size->chroma_size.y = 0;
3437                 /* TODO: set these based on surface format */
3438                 plane_size->chroma_size.width = fb->width / 2;
3439                 plane_size->chroma_size.height = fb->height / 2;
3440
3441                 plane_size->chroma_pitch =
3442                         fb->pitches[1] / fb->format->cpp[1];
3443
3444                 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
3445                 address->video_progressive.luma_addr.low_part =
3446                         lower_32_bits(afb->address);
3447                 address->video_progressive.luma_addr.high_part =
3448                         upper_32_bits(afb->address);
3449                 address->video_progressive.chroma_addr.low_part =
3450                         lower_32_bits(chroma_addr);
3451                 address->video_progressive.chroma_addr.high_part =
3452                         upper_32_bits(chroma_addr);
3453         }
3454
3455         /* Fill GFX8 params */
3456         if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3457                 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3458
3459                 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3460                 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3461                 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3462                 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3463                 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3464
3465                 /* XXX fix me for VI */
3466                 tiling_info->gfx8.num_banks = num_banks;
3467                 tiling_info->gfx8.array_mode =
3468                                 DC_ARRAY_2D_TILED_THIN1;
3469                 tiling_info->gfx8.tile_split = tile_split;
3470                 tiling_info->gfx8.bank_width = bankw;
3471                 tiling_info->gfx8.bank_height = bankh;
3472                 tiling_info->gfx8.tile_aspect = mtaspect;
3473                 tiling_info->gfx8.tile_mode =
3474                                 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3475         } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3476                         == DC_ARRAY_1D_TILED_THIN1) {
3477                 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3478         }
3479
3480         tiling_info->gfx8.pipe_config =
3481                         AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3482
3483         if (adev->asic_type == CHIP_VEGA10 ||
3484             adev->asic_type == CHIP_VEGA12 ||
3485             adev->asic_type == CHIP_VEGA20 ||
3486             adev->asic_type == CHIP_NAVI10 ||
3487             adev->asic_type == CHIP_NAVI14 ||
3488             adev->asic_type == CHIP_NAVI12 ||
3489             adev->asic_type == CHIP_RENOIR ||
3490             adev->asic_type == CHIP_RAVEN) {
3491                 /* Fill GFX9 params */
3492                 tiling_info->gfx9.num_pipes =
3493                         adev->gfx.config.gb_addr_config_fields.num_pipes;
3494                 tiling_info->gfx9.num_banks =
3495                         adev->gfx.config.gb_addr_config_fields.num_banks;
3496                 tiling_info->gfx9.pipe_interleave =
3497                         adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3498                 tiling_info->gfx9.num_shader_engines =
3499                         adev->gfx.config.gb_addr_config_fields.num_se;
3500                 tiling_info->gfx9.max_compressed_frags =
3501                         adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3502                 tiling_info->gfx9.num_rb_per_se =
3503                         adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3504                 tiling_info->gfx9.swizzle =
3505                         AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
3506                 tiling_info->gfx9.shaderEnable = 1;
3507
3508                 ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
3509                                                 plane_size, tiling_info,
3510                                                 tiling_flags, dcc, address);
3511                 if (ret)
3512                         return ret;
3513         }
3514
3515         return 0;
3516 }
3517
3518 static void
3519 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
3520                                bool *per_pixel_alpha, bool *global_alpha,
3521                                int *global_alpha_value)
3522 {
3523         *per_pixel_alpha = false;
3524         *global_alpha = false;
3525         *global_alpha_value = 0xff;
3526
3527         if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
3528                 return;
3529
3530         if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
3531                 static const uint32_t alpha_formats[] = {
3532                         DRM_FORMAT_ARGB8888,
3533                         DRM_FORMAT_RGBA8888,
3534                         DRM_FORMAT_ABGR8888,
3535                 };
3536                 uint32_t format = plane_state->fb->format->format;
3537                 unsigned int i;
3538
3539                 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
3540                         if (format == alpha_formats[i]) {
3541                                 *per_pixel_alpha = true;
3542                                 break;
3543                         }
3544                 }
3545         }
3546
3547         if (plane_state->alpha < 0xffff) {
3548                 *global_alpha = true;
3549                 *global_alpha_value = plane_state->alpha >> 8;
3550         }
3551 }
3552
3553 static int
3554 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
3555                             const enum surface_pixel_format format,
3556                             enum dc_color_space *color_space)
3557 {
3558         bool full_range;
3559
3560         *color_space = COLOR_SPACE_SRGB;
3561
3562         /* DRM color properties only affect non-RGB formats. */
3563         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3564                 return 0;
3565
3566         full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
3567
3568         switch (plane_state->color_encoding) {
3569         case DRM_COLOR_YCBCR_BT601:
3570                 if (full_range)
3571                         *color_space = COLOR_SPACE_YCBCR601;
3572                 else
3573                         *color_space = COLOR_SPACE_YCBCR601_LIMITED;
3574                 break;
3575
3576         case DRM_COLOR_YCBCR_BT709:
3577                 if (full_range)
3578                         *color_space = COLOR_SPACE_YCBCR709;
3579                 else
3580                         *color_space = COLOR_SPACE_YCBCR709_LIMITED;
3581                 break;
3582
3583         case DRM_COLOR_YCBCR_BT2020:
3584                 if (full_range)
3585                         *color_space = COLOR_SPACE_2020_YCBCR;
3586                 else
3587                         return -EINVAL;
3588                 break;
3589
3590         default:
3591                 return -EINVAL;
3592         }
3593
3594         return 0;
3595 }
3596
3597 static int
3598 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
3599                             const struct drm_plane_state *plane_state,
3600                             const uint64_t tiling_flags,
3601                             struct dc_plane_info *plane_info,
3602                             struct dc_plane_address *address)
3603 {
3604         const struct drm_framebuffer *fb = plane_state->fb;
3605         const struct amdgpu_framebuffer *afb =
3606                 to_amdgpu_framebuffer(plane_state->fb);
3607         struct drm_format_name_buf format_name;
3608         int ret;
3609
3610         memset(plane_info, 0, sizeof(*plane_info));
3611
3612         switch (fb->format->format) {
3613         case DRM_FORMAT_C8:
3614                 plane_info->format =
3615                         SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
3616                 break;
3617         case DRM_FORMAT_RGB565:
3618                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
3619                 break;
3620         case DRM_FORMAT_XRGB8888:
3621         case DRM_FORMAT_ARGB8888:
3622                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
3623                 break;
3624         case DRM_FORMAT_XRGB2101010:
3625         case DRM_FORMAT_ARGB2101010:
3626                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
3627                 break;
3628         case DRM_FORMAT_XBGR2101010:
3629         case DRM_FORMAT_ABGR2101010:
3630                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
3631                 break;
3632         case DRM_FORMAT_XBGR8888:
3633         case DRM_FORMAT_ABGR8888:
3634                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
3635                 break;
3636         case DRM_FORMAT_NV21:
3637                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
3638                 break;
3639         case DRM_FORMAT_NV12:
3640                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
3641                 break;
3642         default:
3643                 DRM_ERROR(
3644                         "Unsupported screen format %s\n",
3645                         drm_get_format_name(fb->format->format, &format_name));
3646                 return -EINVAL;
3647         }
3648
3649         switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
3650         case DRM_MODE_ROTATE_0:
3651                 plane_info->rotation = ROTATION_ANGLE_0;
3652                 break;
3653         case DRM_MODE_ROTATE_90:
3654                 plane_info->rotation = ROTATION_ANGLE_90;
3655                 break;
3656         case DRM_MODE_ROTATE_180:
3657                 plane_info->rotation = ROTATION_ANGLE_180;
3658                 break;
3659         case DRM_MODE_ROTATE_270:
3660                 plane_info->rotation = ROTATION_ANGLE_270;
3661                 break;
3662         default:
3663                 plane_info->rotation = ROTATION_ANGLE_0;
3664                 break;
3665         }
3666
3667         plane_info->visible = true;
3668         plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
3669
3670         plane_info->layer_index = 0;
3671
3672         ret = fill_plane_color_attributes(plane_state, plane_info->format,
3673                                           &plane_info->color_space);
3674         if (ret)
3675                 return ret;
3676
3677         ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
3678                                            plane_info->rotation, tiling_flags,
3679                                            &plane_info->tiling_info,
3680                                            &plane_info->plane_size,
3681                                            &plane_info->dcc, address);
3682         if (ret)
3683                 return ret;
3684
3685         fill_blending_from_plane_state(
3686                 plane_state, &plane_info->per_pixel_alpha,
3687                 &plane_info->global_alpha, &plane_info->global_alpha_value);
3688
3689         return 0;
3690 }
3691
3692 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
3693                                     struct dc_plane_state *dc_plane_state,
3694                                     struct drm_plane_state *plane_state,
3695                                     struct drm_crtc_state *crtc_state)
3696 {
3697         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
3698         const struct amdgpu_framebuffer *amdgpu_fb =
3699                 to_amdgpu_framebuffer(plane_state->fb);
3700         struct dc_scaling_info scaling_info;
3701         struct dc_plane_info plane_info;
3702         uint64_t tiling_flags;
3703         int ret;
3704
3705         ret = fill_dc_scaling_info(plane_state, &scaling_info);
3706         if (ret)
3707                 return ret;
3708
3709         dc_plane_state->src_rect = scaling_info.src_rect;
3710         dc_plane_state->dst_rect = scaling_info.dst_rect;
3711         dc_plane_state->clip_rect = scaling_info.clip_rect;
3712         dc_plane_state->scaling_quality = scaling_info.scaling_quality;
3713
3714         ret = get_fb_info(amdgpu_fb, &tiling_flags);
3715         if (ret)
3716                 return ret;
3717
3718         ret = fill_dc_plane_info_and_addr(adev, plane_state, tiling_flags,
3719                                           &plane_info,
3720                                           &dc_plane_state->address);
3721         if (ret)
3722                 return ret;
3723
3724         dc_plane_state->format = plane_info.format;
3725         dc_plane_state->color_space = plane_info.color_space;
3726         dc_plane_state->format = plane_info.format;
3727         dc_plane_state->plane_size = plane_info.plane_size;
3728         dc_plane_state->rotation = plane_info.rotation;
3729         dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
3730         dc_plane_state->stereo_format = plane_info.stereo_format;
3731         dc_plane_state->tiling_info = plane_info.tiling_info;
3732         dc_plane_state->visible = plane_info.visible;
3733         dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
3734         dc_plane_state->global_alpha = plane_info.global_alpha;
3735         dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
3736         dc_plane_state->dcc = plane_info.dcc;
3737         dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
3738
3739         /*
3740          * Always set input transfer function, since plane state is refreshed
3741          * every time.
3742          */
3743         ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
3744         if (ret)
3745                 return ret;
3746
3747         return 0;
3748 }
3749
3750 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
3751                                            const struct dm_connector_state *dm_state,
3752                                            struct dc_stream_state *stream)
3753 {
3754         enum amdgpu_rmx_type rmx_type;
3755
3756         struct rect src = { 0 }; /* viewport in composition space*/
3757         struct rect dst = { 0 }; /* stream addressable area */
3758
3759         /* no mode. nothing to be done */
3760         if (!mode)
3761                 return;
3762
3763         /* Full screen scaling by default */
3764         src.width = mode->hdisplay;
3765         src.height = mode->vdisplay;
3766         dst.width = stream->timing.h_addressable;
3767         dst.height = stream->timing.v_addressable;
3768
3769         if (dm_state) {
3770                 rmx_type = dm_state->scaling;
3771                 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
3772                         if (src.width * dst.height <
3773                                         src.height * dst.width) {
3774                                 /* height needs less upscaling/more downscaling */
3775                                 dst.width = src.width *
3776                                                 dst.height / src.height;
3777                         } else {
3778                                 /* width needs less upscaling/more downscaling */
3779                                 dst.height = src.height *
3780                                                 dst.width / src.width;
3781                         }
3782                 } else if (rmx_type == RMX_CENTER) {
3783                         dst = src;
3784                 }
3785
3786                 dst.x = (stream->timing.h_addressable - dst.width) / 2;
3787                 dst.y = (stream->timing.v_addressable - dst.height) / 2;
3788
3789                 if (dm_state->underscan_enable) {
3790                         dst.x += dm_state->underscan_hborder / 2;
3791                         dst.y += dm_state->underscan_vborder / 2;
3792                         dst.width -= dm_state->underscan_hborder;
3793                         dst.height -= dm_state->underscan_vborder;
3794                 }
3795         }
3796
3797         stream->src = src;
3798         stream->dst = dst;
3799
3800         DRM_DEBUG_DRIVER("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
3801                         dst.x, dst.y, dst.width, dst.height);
3802
3803 }
3804
3805 static enum dc_color_depth
3806 convert_color_depth_from_display_info(const struct drm_connector *connector,
3807                                       const struct drm_connector_state *state,
3808                                       bool is_y420)
3809 {
3810         uint8_t bpc;
3811
3812         if (is_y420) {
3813                 bpc = 8;
3814
3815                 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
3816                 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
3817                         bpc = 16;
3818                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
3819                         bpc = 12;
3820                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
3821                         bpc = 10;
3822         } else {
3823                 bpc = (uint8_t)connector->display_info.bpc;
3824                 /* Assume 8 bpc by default if no bpc is specified. */
3825                 bpc = bpc ? bpc : 8;
3826         }
3827
3828         if (!state)
3829                 state = connector->state;
3830
3831         if (state) {
3832                 /*
3833                  * Cap display bpc based on the user requested value.
3834                  *
3835                  * The value for state->max_bpc may not correctly updated
3836                  * depending on when the connector gets added to the state
3837                  * or if this was called outside of atomic check, so it
3838                  * can't be used directly.
3839                  */
3840                 bpc = min(bpc, state->max_requested_bpc);
3841
3842                 /* Round down to the nearest even number. */
3843                 bpc = bpc - (bpc & 1);
3844         }
3845
3846         switch (bpc) {
3847         case 0:
3848                 /*
3849                  * Temporary Work around, DRM doesn't parse color depth for
3850                  * EDID revision before 1.4
3851                  * TODO: Fix edid parsing
3852                  */
3853                 return COLOR_DEPTH_888;
3854         case 6:
3855                 return COLOR_DEPTH_666;
3856         case 8:
3857                 return COLOR_DEPTH_888;
3858         case 10:
3859                 return COLOR_DEPTH_101010;
3860         case 12:
3861                 return COLOR_DEPTH_121212;
3862         case 14:
3863                 return COLOR_DEPTH_141414;
3864         case 16:
3865                 return COLOR_DEPTH_161616;
3866         default:
3867                 return COLOR_DEPTH_UNDEFINED;
3868         }
3869 }
3870
3871 static enum dc_aspect_ratio
3872 get_aspect_ratio(const struct drm_display_mode *mode_in)
3873 {
3874         /* 1-1 mapping, since both enums follow the HDMI spec. */
3875         return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
3876 }
3877
3878 static enum dc_color_space
3879 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
3880 {
3881         enum dc_color_space color_space = COLOR_SPACE_SRGB;
3882
3883         switch (dc_crtc_timing->pixel_encoding) {
3884         case PIXEL_ENCODING_YCBCR422:
3885         case PIXEL_ENCODING_YCBCR444:
3886         case PIXEL_ENCODING_YCBCR420:
3887         {
3888                 /*
3889                  * 27030khz is the separation point between HDTV and SDTV
3890                  * according to HDMI spec, we use YCbCr709 and YCbCr601
3891                  * respectively
3892                  */
3893                 if (dc_crtc_timing->pix_clk_100hz > 270300) {
3894                         if (dc_crtc_timing->flags.Y_ONLY)
3895                                 color_space =
3896                                         COLOR_SPACE_YCBCR709_LIMITED;
3897                         else
3898                                 color_space = COLOR_SPACE_YCBCR709;
3899                 } else {
3900                         if (dc_crtc_timing->flags.Y_ONLY)
3901                                 color_space =
3902                                         COLOR_SPACE_YCBCR601_LIMITED;
3903                         else
3904                                 color_space = COLOR_SPACE_YCBCR601;
3905                 }
3906
3907         }
3908         break;
3909         case PIXEL_ENCODING_RGB:
3910                 color_space = COLOR_SPACE_SRGB;
3911                 break;
3912
3913         default:
3914                 WARN_ON(1);
3915                 break;
3916         }
3917
3918         return color_space;
3919 }
3920
3921 static bool adjust_colour_depth_from_display_info(
3922         struct dc_crtc_timing *timing_out,
3923         const struct drm_display_info *info)
3924 {
3925         enum dc_color_depth depth = timing_out->display_color_depth;
3926         int normalized_clk;
3927         do {
3928                 normalized_clk = timing_out->pix_clk_100hz / 10;
3929                 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
3930                 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
3931                         normalized_clk /= 2;
3932                 /* Adjusting pix clock following on HDMI spec based on colour depth */
3933                 switch (depth) {
3934                 case COLOR_DEPTH_888:
3935                         break;
3936                 case COLOR_DEPTH_101010:
3937                         normalized_clk = (normalized_clk * 30) / 24;
3938                         break;
3939                 case COLOR_DEPTH_121212:
3940                         normalized_clk = (normalized_clk * 36) / 24;
3941                         break;
3942                 case COLOR_DEPTH_161616:
3943                         normalized_clk = (normalized_clk * 48) / 24;
3944                         break;
3945                 default:
3946                         /* The above depths are the only ones valid for HDMI. */
3947                         return false;
3948                 }
3949                 if (normalized_clk <= info->max_tmds_clock) {
3950                         timing_out->display_color_depth = depth;
3951                         return true;
3952                 }
3953         } while (--depth > COLOR_DEPTH_666);
3954         return false;
3955 }
3956
3957 static void fill_stream_properties_from_drm_display_mode(
3958         struct dc_stream_state *stream,
3959         const struct drm_display_mode *mode_in,
3960         const struct drm_connector *connector,
3961         const struct drm_connector_state *connector_state,
3962         const struct dc_stream_state *old_stream)
3963 {
3964         struct dc_crtc_timing *timing_out = &stream->timing;
3965         const struct drm_display_info *info = &connector->display_info;
3966         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
3967         struct hdmi_vendor_infoframe hv_frame;
3968         struct hdmi_avi_infoframe avi_frame;
3969
3970         memset(&hv_frame, 0, sizeof(hv_frame));
3971         memset(&avi_frame, 0, sizeof(avi_frame));
3972
3973         timing_out->h_border_left = 0;
3974         timing_out->h_border_right = 0;
3975         timing_out->v_border_top = 0;
3976         timing_out->v_border_bottom = 0;
3977         /* TODO: un-hardcode */
3978         if (drm_mode_is_420_only(info, mode_in)
3979                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
3980                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
3981         else if (drm_mode_is_420_also(info, mode_in)
3982                         && aconnector->force_yuv420_output)
3983                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
3984         else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
3985                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
3986                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
3987         else
3988                 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
3989
3990         timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
3991         timing_out->display_color_depth = convert_color_depth_from_display_info(
3992                 connector, connector_state,
3993                 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420));
3994         timing_out->scan_type = SCANNING_TYPE_NODATA;
3995         timing_out->hdmi_vic = 0;
3996
3997         if(old_stream) {
3998                 timing_out->vic = old_stream->timing.vic;
3999                 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4000                 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4001         } else {
4002                 timing_out->vic = drm_match_cea_mode(mode_in);
4003                 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4004                         timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4005                 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4006                         timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4007         }
4008
4009         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4010                 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4011                 timing_out->vic = avi_frame.video_code;
4012                 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4013                 timing_out->hdmi_vic = hv_frame.vic;
4014         }
4015
4016         timing_out->h_addressable = mode_in->crtc_hdisplay;
4017         timing_out->h_total = mode_in->crtc_htotal;
4018         timing_out->h_sync_width =
4019                 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4020         timing_out->h_front_porch =
4021                 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4022         timing_out->v_total = mode_in->crtc_vtotal;
4023         timing_out->v_addressable = mode_in->crtc_vdisplay;
4024         timing_out->v_front_porch =
4025                 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4026         timing_out->v_sync_width =
4027                 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
4028         timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
4029         timing_out->aspect_ratio = get_aspect_ratio(mode_in);
4030
4031         stream->output_color_space = get_output_color_space(timing_out);
4032
4033         stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4034         stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
4035         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4036                 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4037                     drm_mode_is_420_also(info, mode_in) &&
4038                     timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4039                         timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4040                         adjust_colour_depth_from_display_info(timing_out, info);
4041                 }
4042         }
4043 }
4044
4045 static void fill_audio_info(struct audio_info *audio_info,
4046                             const struct drm_connector *drm_connector,
4047                             const struct dc_sink *dc_sink)
4048 {
4049         int i = 0;
4050         int cea_revision = 0;
4051         const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
4052
4053         audio_info->manufacture_id = edid_caps->manufacturer_id;
4054         audio_info->product_id = edid_caps->product_id;
4055
4056         cea_revision = drm_connector->display_info.cea_rev;
4057
4058         strscpy(audio_info->display_name,
4059                 edid_caps->display_name,
4060                 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
4061
4062         if (cea_revision >= 3) {
4063                 audio_info->mode_count = edid_caps->audio_mode_count;
4064
4065                 for (i = 0; i < audio_info->mode_count; ++i) {
4066                         audio_info->modes[i].format_code =
4067                                         (enum audio_format_code)
4068                                         (edid_caps->audio_modes[i].format_code);
4069                         audio_info->modes[i].channel_count =
4070                                         edid_caps->audio_modes[i].channel_count;
4071                         audio_info->modes[i].sample_rates.all =
4072                                         edid_caps->audio_modes[i].sample_rate;
4073                         audio_info->modes[i].sample_size =
4074                                         edid_caps->audio_modes[i].sample_size;
4075                 }
4076         }
4077
4078         audio_info->flags.all = edid_caps->speaker_flags;
4079
4080         /* TODO: We only check for the progressive mode, check for interlace mode too */
4081         if (drm_connector->latency_present[0]) {
4082                 audio_info->video_latency = drm_connector->video_latency[0];
4083                 audio_info->audio_latency = drm_connector->audio_latency[0];
4084         }
4085
4086         /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
4087
4088 }
4089
4090 static void
4091 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
4092                                       struct drm_display_mode *dst_mode)
4093 {
4094         dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
4095         dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
4096         dst_mode->crtc_clock = src_mode->crtc_clock;
4097         dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
4098         dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
4099         dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
4100         dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
4101         dst_mode->crtc_htotal = src_mode->crtc_htotal;
4102         dst_mode->crtc_hskew = src_mode->crtc_hskew;
4103         dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
4104         dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
4105         dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
4106         dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
4107         dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
4108 }
4109
4110 static void
4111 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
4112                                         const struct drm_display_mode *native_mode,
4113                                         bool scale_enabled)
4114 {
4115         if (scale_enabled) {
4116                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4117         } else if (native_mode->clock == drm_mode->clock &&
4118                         native_mode->htotal == drm_mode->htotal &&
4119                         native_mode->vtotal == drm_mode->vtotal) {
4120                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4121         } else {
4122                 /* no scaling nor amdgpu inserted, no need to patch */
4123         }
4124 }
4125
4126 static struct dc_sink *
4127 create_fake_sink(struct amdgpu_dm_connector *aconnector)
4128 {
4129         struct dc_sink_init_data sink_init_data = { 0 };
4130         struct dc_sink *sink = NULL;
4131         sink_init_data.link = aconnector->dc_link;
4132         sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
4133
4134         sink = dc_sink_create(&sink_init_data);
4135         if (!sink) {
4136                 DRM_ERROR("Failed to create sink!\n");
4137                 return NULL;
4138         }
4139         sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
4140
4141         return sink;
4142 }
4143
4144 static void set_multisync_trigger_params(
4145                 struct dc_stream_state *stream)
4146 {
4147         if (stream->triggered_crtc_reset.enabled) {
4148                 stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
4149                 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
4150         }
4151 }
4152
4153 static void set_master_stream(struct dc_stream_state *stream_set[],
4154                               int stream_count)
4155 {
4156         int j, highest_rfr = 0, master_stream = 0;
4157
4158         for (j = 0;  j < stream_count; j++) {
4159                 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
4160                         int refresh_rate = 0;
4161
4162                         refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
4163                                 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
4164                         if (refresh_rate > highest_rfr) {
4165                                 highest_rfr = refresh_rate;
4166                                 master_stream = j;
4167                         }
4168                 }
4169         }
4170         for (j = 0;  j < stream_count; j++) {
4171                 if (stream_set[j])
4172                         stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
4173         }
4174 }
4175
4176 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
4177 {
4178         int i = 0;
4179
4180         if (context->stream_count < 2)
4181                 return;
4182         for (i = 0; i < context->stream_count ; i++) {
4183                 if (!context->streams[i])
4184                         continue;
4185                 /*
4186                  * TODO: add a function to read AMD VSDB bits and set
4187                  * crtc_sync_master.multi_sync_enabled flag
4188                  * For now it's set to false
4189                  */
4190                 set_multisync_trigger_params(context->streams[i]);
4191         }
4192         set_master_stream(context->streams, context->stream_count);
4193 }
4194
4195 static struct dc_stream_state *
4196 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
4197                        const struct drm_display_mode *drm_mode,
4198                        const struct dm_connector_state *dm_state,
4199                        const struct dc_stream_state *old_stream)
4200 {
4201         struct drm_display_mode *preferred_mode = NULL;
4202         struct drm_connector *drm_connector;
4203         const struct drm_connector_state *con_state =
4204                 dm_state ? &dm_state->base : NULL;
4205         struct dc_stream_state *stream = NULL;
4206         struct drm_display_mode mode = *drm_mode;
4207         bool native_mode_found = false;
4208         bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
4209         int mode_refresh;
4210         int preferred_refresh = 0;
4211 #if defined(CONFIG_DRM_AMD_DC_DCN)
4212         struct dsc_dec_dpcd_caps dsc_caps;
4213 #endif
4214         uint32_t link_bandwidth_kbps;
4215
4216         struct dc_sink *sink = NULL;
4217         if (aconnector == NULL) {
4218                 DRM_ERROR("aconnector is NULL!\n");
4219                 return stream;
4220         }
4221
4222         drm_connector = &aconnector->base;
4223
4224         if (!aconnector->dc_sink) {
4225                 sink = create_fake_sink(aconnector);
4226                 if (!sink)
4227                         return stream;
4228         } else {
4229                 sink = aconnector->dc_sink;
4230                 dc_sink_retain(sink);
4231         }
4232
4233         stream = dc_create_stream_for_sink(sink);
4234
4235         if (stream == NULL) {
4236                 DRM_ERROR("Failed to create stream for sink!\n");
4237                 goto finish;
4238         }
4239
4240         stream->dm_stream_context = aconnector;
4241
4242         stream->timing.flags.LTE_340MCSC_SCRAMBLE =
4243                 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
4244
4245         list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
4246                 /* Search for preferred mode */
4247                 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
4248                         native_mode_found = true;
4249                         break;
4250                 }
4251         }
4252         if (!native_mode_found)
4253                 preferred_mode = list_first_entry_or_null(
4254                                 &aconnector->base.modes,
4255                                 struct drm_display_mode,
4256                                 head);
4257
4258         mode_refresh = drm_mode_vrefresh(&mode);
4259
4260         if (preferred_mode == NULL) {
4261                 /*
4262                  * This may not be an error, the use case is when we have no
4263                  * usermode calls to reset and set mode upon hotplug. In this
4264                  * case, we call set mode ourselves to restore the previous mode
4265                  * and the modelist may not be filled in in time.
4266                  */
4267                 DRM_DEBUG_DRIVER("No preferred mode found\n");
4268         } else {
4269                 decide_crtc_timing_for_drm_display_mode(
4270                                 &mode, preferred_mode,
4271                                 dm_state ? (dm_state->scaling != RMX_OFF) : false);
4272                 preferred_refresh = drm_mode_vrefresh(preferred_mode);
4273         }
4274
4275         if (!dm_state)
4276                 drm_mode_set_crtcinfo(&mode, 0);
4277
4278         /*
4279         * If scaling is enabled and refresh rate didn't change
4280         * we copy the vic and polarities of the old timings
4281         */
4282         if (!scale || mode_refresh != preferred_refresh)
4283                 fill_stream_properties_from_drm_display_mode(stream,
4284                         &mode, &aconnector->base, con_state, NULL);
4285         else
4286                 fill_stream_properties_from_drm_display_mode(stream,
4287                         &mode, &aconnector->base, con_state, old_stream);
4288
4289         stream->timing.flags.DSC = 0;
4290
4291         if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4292 #if defined(CONFIG_DRM_AMD_DC_DCN)
4293                 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
4294                                       aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
4295                                       aconnector->dc_link->dpcd_caps.dsc_caps.dsc_ext_caps.raw,
4296                                       &dsc_caps);
4297 #endif
4298                 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
4299                                                              dc_link_get_link_cap(aconnector->dc_link));
4300
4301 #if defined(CONFIG_DRM_AMD_DC_DCN)
4302                 if (dsc_caps.is_dsc_supported)
4303                         if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
4304                                                   &dsc_caps,
4305                                                   aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
4306                                                   link_bandwidth_kbps,
4307                                                   &stream->timing,
4308                                                   &stream->timing.dsc_cfg))
4309                                 stream->timing.flags.DSC = 1;
4310 #endif
4311         }
4312
4313         update_stream_scaling_settings(&mode, dm_state, stream);
4314
4315         fill_audio_info(
4316                 &stream->audio_info,
4317                 drm_connector,
4318                 sink);
4319
4320         update_stream_signal(stream, sink);
4321
4322         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4323                 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket, false, false);
4324         if (stream->link->psr_feature_enabled)  {
4325                 struct dc  *core_dc = stream->link->ctx->dc;
4326
4327                 if (dc_is_dmcu_initialized(core_dc)) {
4328                         struct dmcu *dmcu = core_dc->res_pool->dmcu;
4329
4330                         stream->psr_version = dmcu->dmcu_version.psr_version;
4331
4332                         //
4333                         // should decide stream support vsc sdp colorimetry capability
4334                         // before building vsc info packet
4335                         //
4336                         stream->use_vsc_sdp_for_colorimetry = false;
4337                         if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
4338                                 stream->use_vsc_sdp_for_colorimetry =
4339                                         aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
4340                         } else {
4341                                 if (stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 &&
4342                                         stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) {
4343                                         stream->use_vsc_sdp_for_colorimetry = true;
4344                                 }
4345                         }
4346                         mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
4347                 }
4348         }
4349 finish:
4350         dc_sink_release(sink);
4351
4352         return stream;
4353 }
4354
4355 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
4356 {
4357         drm_crtc_cleanup(crtc);
4358         kfree(crtc);
4359 }
4360
4361 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
4362                                   struct drm_crtc_state *state)
4363 {
4364         struct dm_crtc_state *cur = to_dm_crtc_state(state);
4365
4366         /* TODO Destroy dc_stream objects are stream object is flattened */
4367         if (cur->stream)
4368                 dc_stream_release(cur->stream);
4369
4370
4371         __drm_atomic_helper_crtc_destroy_state(state);
4372
4373
4374         kfree(state);
4375 }
4376
4377 static void dm_crtc_reset_state(struct drm_crtc *crtc)
4378 {
4379         struct dm_crtc_state *state;
4380
4381         if (crtc->state)
4382                 dm_crtc_destroy_state(crtc, crtc->state);
4383
4384         state = kzalloc(sizeof(*state), GFP_KERNEL);
4385         if (WARN_ON(!state))
4386                 return;
4387
4388         crtc->state = &state->base;
4389         crtc->state->crtc = crtc;
4390
4391 }
4392
4393 static struct drm_crtc_state *
4394 dm_crtc_duplicate_state(struct drm_crtc *crtc)
4395 {
4396         struct dm_crtc_state *state, *cur;
4397
4398         cur = to_dm_crtc_state(crtc->state);
4399
4400         if (WARN_ON(!crtc->state))
4401                 return NULL;
4402
4403         state = kzalloc(sizeof(*state), GFP_KERNEL);
4404         if (!state)
4405                 return NULL;
4406
4407         __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
4408
4409         if (cur->stream) {
4410                 state->stream = cur->stream;
4411                 dc_stream_retain(state->stream);
4412         }
4413
4414         state->active_planes = cur->active_planes;
4415         state->interrupts_enabled = cur->interrupts_enabled;
4416         state->vrr_params = cur->vrr_params;
4417         state->vrr_infopacket = cur->vrr_infopacket;
4418         state->abm_level = cur->abm_level;
4419         state->vrr_supported = cur->vrr_supported;
4420         state->freesync_config = cur->freesync_config;
4421         state->crc_src = cur->crc_src;
4422         state->cm_has_degamma = cur->cm_has_degamma;
4423         state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
4424
4425         /* TODO Duplicate dc_stream after objects are stream object is flattened */
4426
4427         return &state->base;
4428 }
4429
4430 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
4431 {
4432         enum dc_irq_source irq_source;
4433         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4434         struct amdgpu_device *adev = crtc->dev->dev_private;
4435         int rc;
4436
4437         /* Do not set vupdate for DCN hardware */
4438         if (adev->family > AMDGPU_FAMILY_AI)
4439                 return 0;
4440
4441         irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
4442
4443         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4444
4445         DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
4446                          acrtc->crtc_id, enable ? "en" : "dis", rc);
4447         return rc;
4448 }
4449
4450 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
4451 {
4452         enum dc_irq_source irq_source;
4453         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4454         struct amdgpu_device *adev = crtc->dev->dev_private;
4455         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
4456         int rc = 0;
4457
4458         if (enable) {
4459                 /* vblank irq on -> Only need vupdate irq in vrr mode */
4460                 if (amdgpu_dm_vrr_active(acrtc_state))
4461                         rc = dm_set_vupdate_irq(crtc, true);
4462         } else {
4463                 /* vblank irq off -> vupdate irq off */
4464                 rc = dm_set_vupdate_irq(crtc, false);
4465         }
4466
4467         if (rc)
4468                 return rc;
4469
4470         irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
4471         return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4472 }
4473
4474 static int dm_enable_vblank(struct drm_crtc *crtc)
4475 {
4476         return dm_set_vblank(crtc, true);
4477 }
4478
4479 static void dm_disable_vblank(struct drm_crtc *crtc)
4480 {
4481         dm_set_vblank(crtc, false);
4482 }
4483
4484 /* Implemented only the options currently availible for the driver */
4485 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
4486         .reset = dm_crtc_reset_state,
4487         .destroy = amdgpu_dm_crtc_destroy,
4488         .gamma_set = drm_atomic_helper_legacy_gamma_set,
4489         .set_config = drm_atomic_helper_set_config,
4490         .page_flip = drm_atomic_helper_page_flip,
4491         .atomic_duplicate_state = dm_crtc_duplicate_state,
4492         .atomic_destroy_state = dm_crtc_destroy_state,
4493         .set_crc_source = amdgpu_dm_crtc_set_crc_source,
4494         .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
4495         .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
4496         .get_vblank_counter = amdgpu_get_vblank_counter_kms,
4497         .enable_vblank = dm_enable_vblank,
4498         .disable_vblank = dm_disable_vblank,
4499         .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
4500 };
4501
4502 static enum drm_connector_status
4503 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
4504 {
4505         bool connected;
4506         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4507
4508         /*
4509          * Notes:
4510          * 1. This interface is NOT called in context of HPD irq.
4511          * 2. This interface *is called* in context of user-mode ioctl. Which
4512          * makes it a bad place for *any* MST-related activity.
4513          */
4514
4515         if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
4516             !aconnector->fake_enable)
4517                 connected = (aconnector->dc_sink != NULL);
4518         else
4519                 connected = (aconnector->base.force == DRM_FORCE_ON);
4520
4521         return (connected ? connector_status_connected :
4522                         connector_status_disconnected);
4523 }
4524
4525 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
4526                                             struct drm_connector_state *connector_state,
4527                                             struct drm_property *property,
4528                                             uint64_t val)
4529 {
4530         struct drm_device *dev = connector->dev;
4531         struct amdgpu_device *adev = dev->dev_private;
4532         struct dm_connector_state *dm_old_state =
4533                 to_dm_connector_state(connector->state);
4534         struct dm_connector_state *dm_new_state =
4535                 to_dm_connector_state(connector_state);
4536
4537         int ret = -EINVAL;
4538
4539         if (property == dev->mode_config.scaling_mode_property) {
4540                 enum amdgpu_rmx_type rmx_type;
4541
4542                 switch (val) {
4543                 case DRM_MODE_SCALE_CENTER:
4544                         rmx_type = RMX_CENTER;
4545                         break;
4546                 case DRM_MODE_SCALE_ASPECT:
4547                         rmx_type = RMX_ASPECT;
4548                         break;
4549                 case DRM_MODE_SCALE_FULLSCREEN:
4550                         rmx_type = RMX_FULL;
4551                         break;
4552                 case DRM_MODE_SCALE_NONE:
4553                 default:
4554                         rmx_type = RMX_OFF;
4555                         break;
4556                 }
4557
4558                 if (dm_old_state->scaling == rmx_type)
4559                         return 0;
4560
4561                 dm_new_state->scaling = rmx_type;
4562                 ret = 0;
4563         } else if (property == adev->mode_info.underscan_hborder_property) {
4564                 dm_new_state->underscan_hborder = val;
4565                 ret = 0;
4566         } else if (property == adev->mode_info.underscan_vborder_property) {
4567                 dm_new_state->underscan_vborder = val;
4568                 ret = 0;
4569         } else if (property == adev->mode_info.underscan_property) {
4570                 dm_new_state->underscan_enable = val;
4571                 ret = 0;
4572         } else if (property == adev->mode_info.abm_level_property) {
4573                 dm_new_state->abm_level = val;
4574                 ret = 0;
4575         }
4576
4577         return ret;
4578 }
4579
4580 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
4581                                             const struct drm_connector_state *state,
4582                                             struct drm_property *property,
4583                                             uint64_t *val)
4584 {
4585         struct drm_device *dev = connector->dev;
4586         struct amdgpu_device *adev = dev->dev_private;
4587         struct dm_connector_state *dm_state =
4588                 to_dm_connector_state(state);
4589         int ret = -EINVAL;
4590
4591         if (property == dev->mode_config.scaling_mode_property) {
4592                 switch (dm_state->scaling) {
4593                 case RMX_CENTER:
4594                         *val = DRM_MODE_SCALE_CENTER;
4595                         break;
4596                 case RMX_ASPECT:
4597                         *val = DRM_MODE_SCALE_ASPECT;
4598                         break;
4599                 case RMX_FULL:
4600                         *val = DRM_MODE_SCALE_FULLSCREEN;
4601                         break;
4602                 case RMX_OFF:
4603                 default:
4604                         *val = DRM_MODE_SCALE_NONE;
4605                         break;
4606                 }
4607                 ret = 0;
4608         } else if (property == adev->mode_info.underscan_hborder_property) {
4609                 *val = dm_state->underscan_hborder;
4610                 ret = 0;
4611         } else if (property == adev->mode_info.underscan_vborder_property) {
4612                 *val = dm_state->underscan_vborder;
4613                 ret = 0;
4614         } else if (property == adev->mode_info.underscan_property) {
4615                 *val = dm_state->underscan_enable;
4616                 ret = 0;
4617         } else if (property == adev->mode_info.abm_level_property) {
4618                 *val = dm_state->abm_level;
4619                 ret = 0;
4620         }
4621
4622         return ret;
4623 }
4624
4625 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
4626 {
4627         struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
4628
4629         drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
4630 }
4631
4632 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
4633 {
4634         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4635         const struct dc_link *link = aconnector->dc_link;
4636         struct amdgpu_device *adev = connector->dev->dev_private;
4637         struct amdgpu_display_manager *dm = &adev->dm;
4638
4639 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4640         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4641
4642         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4643             link->type != dc_connection_none &&
4644             dm->backlight_dev) {
4645                 backlight_device_unregister(dm->backlight_dev);
4646                 dm->backlight_dev = NULL;
4647         }
4648 #endif
4649
4650         if (aconnector->dc_em_sink)
4651                 dc_sink_release(aconnector->dc_em_sink);
4652         aconnector->dc_em_sink = NULL;
4653         if (aconnector->dc_sink)
4654                 dc_sink_release(aconnector->dc_sink);
4655         aconnector->dc_sink = NULL;
4656
4657         drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
4658         drm_connector_unregister(connector);
4659         drm_connector_cleanup(connector);
4660         if (aconnector->i2c) {
4661                 i2c_del_adapter(&aconnector->i2c->base);
4662                 kfree(aconnector->i2c);
4663         }
4664
4665         kfree(connector);
4666 }
4667
4668 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
4669 {
4670         struct dm_connector_state *state =
4671                 to_dm_connector_state(connector->state);
4672
4673         if (connector->state)
4674                 __drm_atomic_helper_connector_destroy_state(connector->state);
4675
4676         kfree(state);
4677
4678         state = kzalloc(sizeof(*state), GFP_KERNEL);
4679
4680         if (state) {
4681                 state->scaling = RMX_OFF;
4682                 state->underscan_enable = false;
4683                 state->underscan_hborder = 0;
4684                 state->underscan_vborder = 0;
4685                 state->base.max_requested_bpc = 8;
4686                 state->vcpi_slots = 0;
4687                 state->pbn = 0;
4688                 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4689                         state->abm_level = amdgpu_dm_abm_level;
4690
4691                 __drm_atomic_helper_connector_reset(connector, &state->base);
4692         }
4693 }
4694
4695 struct drm_connector_state *
4696 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
4697 {
4698         struct dm_connector_state *state =
4699                 to_dm_connector_state(connector->state);
4700
4701         struct dm_connector_state *new_state =
4702                         kmemdup(state, sizeof(*state), GFP_KERNEL);
4703
4704         if (!new_state)
4705                 return NULL;
4706
4707         __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
4708
4709         new_state->freesync_capable = state->freesync_capable;
4710         new_state->abm_level = state->abm_level;
4711         new_state->scaling = state->scaling;
4712         new_state->underscan_enable = state->underscan_enable;
4713         new_state->underscan_hborder = state->underscan_hborder;
4714         new_state->underscan_vborder = state->underscan_vborder;
4715         new_state->vcpi_slots = state->vcpi_slots;
4716         new_state->pbn = state->pbn;
4717         return &new_state->base;
4718 }
4719
4720 static int
4721 amdgpu_dm_connector_late_register(struct drm_connector *connector)
4722 {
4723         struct amdgpu_dm_connector *amdgpu_dm_connector =
4724                 to_amdgpu_dm_connector(connector);
4725
4726 #if defined(CONFIG_DEBUG_FS)
4727         connector_debugfs_init(amdgpu_dm_connector);
4728 #endif
4729
4730         return 0;
4731 }
4732
4733 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
4734         .reset = amdgpu_dm_connector_funcs_reset,
4735         .detect = amdgpu_dm_connector_detect,
4736         .fill_modes = drm_helper_probe_single_connector_modes,
4737         .destroy = amdgpu_dm_connector_destroy,
4738         .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
4739         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4740         .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
4741         .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
4742         .late_register = amdgpu_dm_connector_late_register,
4743         .early_unregister = amdgpu_dm_connector_unregister
4744 };
4745
4746 static int get_modes(struct drm_connector *connector)
4747 {
4748         return amdgpu_dm_connector_get_modes(connector);
4749 }
4750
4751 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
4752 {
4753         struct dc_sink_init_data init_params = {
4754                         .link = aconnector->dc_link,
4755                         .sink_signal = SIGNAL_TYPE_VIRTUAL
4756         };
4757         struct edid *edid;
4758
4759         if (!aconnector->base.edid_blob_ptr) {
4760                 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
4761                                 aconnector->base.name);
4762
4763                 aconnector->base.force = DRM_FORCE_OFF;
4764                 aconnector->base.override_edid = false;
4765                 return;
4766         }
4767
4768         edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
4769
4770         aconnector->edid = edid;
4771
4772         aconnector->dc_em_sink = dc_link_add_remote_sink(
4773                 aconnector->dc_link,
4774                 (uint8_t *)edid,
4775                 (edid->extensions + 1) * EDID_LENGTH,
4776                 &init_params);
4777
4778         if (aconnector->base.force == DRM_FORCE_ON) {
4779                 aconnector->dc_sink = aconnector->dc_link->local_sink ?
4780                 aconnector->dc_link->local_sink :
4781                 aconnector->dc_em_sink;
4782                 dc_sink_retain(aconnector->dc_sink);
4783         }
4784 }
4785
4786 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
4787 {
4788         struct dc_link *link = (struct dc_link *)aconnector->dc_link;
4789
4790         /*
4791          * In case of headless boot with force on for DP managed connector
4792          * Those settings have to be != 0 to get initial modeset
4793          */
4794         if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4795                 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
4796                 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
4797         }
4798
4799
4800         aconnector->base.override_edid = true;
4801         create_eml_sink(aconnector);
4802 }
4803
4804 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
4805                                    struct drm_display_mode *mode)
4806 {
4807         int result = MODE_ERROR;
4808         struct dc_sink *dc_sink;
4809         struct amdgpu_device *adev = connector->dev->dev_private;
4810         /* TODO: Unhardcode stream count */
4811         struct dc_stream_state *stream;
4812         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4813         enum dc_status dc_result = DC_OK;
4814
4815         if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
4816                         (mode->flags & DRM_MODE_FLAG_DBLSCAN))
4817                 return result;
4818
4819         /*
4820          * Only run this the first time mode_valid is called to initilialize
4821          * EDID mgmt
4822          */
4823         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
4824                 !aconnector->dc_em_sink)
4825                 handle_edid_mgmt(aconnector);
4826
4827         dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
4828
4829         if (dc_sink == NULL) {
4830                 DRM_ERROR("dc_sink is NULL!\n");
4831                 goto fail;
4832         }
4833
4834         stream = create_stream_for_sink(aconnector, mode, NULL, NULL);
4835         if (stream == NULL) {
4836                 DRM_ERROR("Failed to create stream for sink!\n");
4837                 goto fail;
4838         }
4839
4840         dc_result = dc_validate_stream(adev->dm.dc, stream);
4841
4842         if (dc_result == DC_OK)
4843                 result = MODE_OK;
4844         else
4845                 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d\n",
4846                               mode->hdisplay,
4847                               mode->vdisplay,
4848                               mode->clock,
4849                               dc_result);
4850
4851         dc_stream_release(stream);
4852
4853 fail:
4854         /* TODO: error handling*/
4855         return result;
4856 }
4857
4858 static int fill_hdr_info_packet(const struct drm_connector_state *state,
4859                                 struct dc_info_packet *out)
4860 {
4861         struct hdmi_drm_infoframe frame;
4862         unsigned char buf[30]; /* 26 + 4 */
4863         ssize_t len;
4864         int ret, i;
4865
4866         memset(out, 0, sizeof(*out));
4867
4868         if (!state->hdr_output_metadata)
4869                 return 0;
4870
4871         ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
4872         if (ret)
4873                 return ret;
4874
4875         len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
4876         if (len < 0)
4877                 return (int)len;
4878
4879         /* Static metadata is a fixed 26 bytes + 4 byte header. */
4880         if (len != 30)
4881                 return -EINVAL;
4882
4883         /* Prepare the infopacket for DC. */
4884         switch (state->connector->connector_type) {
4885         case DRM_MODE_CONNECTOR_HDMIA:
4886                 out->hb0 = 0x87; /* type */
4887                 out->hb1 = 0x01; /* version */
4888                 out->hb2 = 0x1A; /* length */
4889                 out->sb[0] = buf[3]; /* checksum */
4890                 i = 1;
4891                 break;
4892
4893         case DRM_MODE_CONNECTOR_DisplayPort:
4894         case DRM_MODE_CONNECTOR_eDP:
4895                 out->hb0 = 0x00; /* sdp id, zero */
4896                 out->hb1 = 0x87; /* type */
4897                 out->hb2 = 0x1D; /* payload len - 1 */
4898                 out->hb3 = (0x13 << 2); /* sdp version */
4899                 out->sb[0] = 0x01; /* version */
4900                 out->sb[1] = 0x1A; /* length */
4901                 i = 2;
4902                 break;
4903
4904         default:
4905                 return -EINVAL;
4906         }
4907
4908         memcpy(&out->sb[i], &buf[4], 26);
4909         out->valid = true;
4910
4911         print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
4912                        sizeof(out->sb), false);
4913
4914         return 0;
4915 }
4916
4917 static bool
4918 is_hdr_metadata_different(const struct drm_connector_state *old_state,
4919                           const struct drm_connector_state *new_state)
4920 {
4921         struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
4922         struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
4923
4924         if (old_blob != new_blob) {
4925                 if (old_blob && new_blob &&
4926                     old_blob->length == new_blob->length)
4927                         return memcmp(old_blob->data, new_blob->data,
4928                                       old_blob->length);
4929
4930                 return true;
4931         }
4932
4933         return false;
4934 }
4935
4936 static int
4937 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
4938                                  struct drm_atomic_state *state)
4939 {
4940         struct drm_connector_state *new_con_state =
4941                 drm_atomic_get_new_connector_state(state, conn);
4942         struct drm_connector_state *old_con_state =
4943                 drm_atomic_get_old_connector_state(state, conn);
4944         struct drm_crtc *crtc = new_con_state->crtc;
4945         struct drm_crtc_state *new_crtc_state;
4946         int ret;
4947
4948         if (!crtc)
4949                 return 0;
4950
4951         if (is_hdr_metadata_different(old_con_state, new_con_state)) {
4952                 struct dc_info_packet hdr_infopacket;
4953
4954                 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
4955                 if (ret)
4956                         return ret;
4957
4958                 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
4959                 if (IS_ERR(new_crtc_state))
4960                         return PTR_ERR(new_crtc_state);
4961
4962                 /*
4963                  * DC considers the stream backends changed if the
4964                  * static metadata changes. Forcing the modeset also
4965                  * gives a simple way for userspace to switch from
4966                  * 8bpc to 10bpc when setting the metadata to enter
4967                  * or exit HDR.
4968                  *
4969                  * Changing the static metadata after it's been
4970                  * set is permissible, however. So only force a
4971                  * modeset if we're entering or exiting HDR.
4972                  */
4973                 new_crtc_state->mode_changed =
4974                         !old_con_state->hdr_output_metadata ||
4975                         !new_con_state->hdr_output_metadata;
4976         }
4977
4978         return 0;
4979 }
4980
4981 static const struct drm_connector_helper_funcs
4982 amdgpu_dm_connector_helper_funcs = {
4983         /*
4984          * If hotplugging a second bigger display in FB Con mode, bigger resolution
4985          * modes will be filtered by drm_mode_validate_size(), and those modes
4986          * are missing after user start lightdm. So we need to renew modes list.
4987          * in get_modes call back, not just return the modes count
4988          */
4989         .get_modes = get_modes,
4990         .mode_valid = amdgpu_dm_connector_mode_valid,
4991         .atomic_check = amdgpu_dm_connector_atomic_check,
4992 };
4993
4994 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
4995 {
4996 }
4997
4998 static bool does_crtc_have_active_cursor(struct drm_crtc_state *new_crtc_state)
4999 {
5000         struct drm_device *dev = new_crtc_state->crtc->dev;
5001         struct drm_plane *plane;
5002
5003         drm_for_each_plane_mask(plane, dev, new_crtc_state->plane_mask) {
5004                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5005                         return true;
5006         }
5007
5008         return false;
5009 }
5010
5011 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
5012 {
5013         struct drm_atomic_state *state = new_crtc_state->state;
5014         struct drm_plane *plane;
5015         int num_active = 0;
5016
5017         drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
5018                 struct drm_plane_state *new_plane_state;
5019
5020                 /* Cursor planes are "fake". */
5021                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5022                         continue;
5023
5024                 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
5025
5026                 if (!new_plane_state) {
5027                         /*
5028                          * The plane is enable on the CRTC and hasn't changed
5029                          * state. This means that it previously passed
5030                          * validation and is therefore enabled.
5031                          */
5032                         num_active += 1;
5033                         continue;
5034                 }
5035
5036                 /* We need a framebuffer to be considered enabled. */
5037                 num_active += (new_plane_state->fb != NULL);
5038         }
5039
5040         return num_active;
5041 }
5042
5043 /*
5044  * Sets whether interrupts should be enabled on a specific CRTC.
5045  * We require that the stream be enabled and that there exist active
5046  * DC planes on the stream.
5047  */
5048 static void
5049 dm_update_crtc_interrupt_state(struct drm_crtc *crtc,
5050                                struct drm_crtc_state *new_crtc_state)
5051 {
5052         struct dm_crtc_state *dm_new_crtc_state =
5053                 to_dm_crtc_state(new_crtc_state);
5054
5055         dm_new_crtc_state->active_planes = 0;
5056         dm_new_crtc_state->interrupts_enabled = false;
5057
5058         if (!dm_new_crtc_state->stream)
5059                 return;
5060
5061         dm_new_crtc_state->active_planes =
5062                 count_crtc_active_planes(new_crtc_state);
5063
5064         dm_new_crtc_state->interrupts_enabled =
5065                 dm_new_crtc_state->active_planes > 0;
5066 }
5067
5068 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
5069                                        struct drm_crtc_state *state)
5070 {
5071         struct amdgpu_device *adev = crtc->dev->dev_private;
5072         struct dc *dc = adev->dm.dc;
5073         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
5074         int ret = -EINVAL;
5075
5076         /*
5077          * Update interrupt state for the CRTC. This needs to happen whenever
5078          * the CRTC has changed or whenever any of its planes have changed.
5079          * Atomic check satisfies both of these requirements since the CRTC
5080          * is added to the state by DRM during drm_atomic_helper_check_planes.
5081          */
5082         dm_update_crtc_interrupt_state(crtc, state);
5083
5084         if (unlikely(!dm_crtc_state->stream &&
5085                      modeset_required(state, NULL, dm_crtc_state->stream))) {
5086                 WARN_ON(1);
5087                 return ret;
5088         }
5089
5090         /* In some use cases, like reset, no stream is attached */
5091         if (!dm_crtc_state->stream)
5092                 return 0;
5093
5094         /*
5095          * We want at least one hardware plane enabled to use
5096          * the stream with a cursor enabled.
5097          */
5098         if (state->enable && state->active &&
5099             does_crtc_have_active_cursor(state) &&
5100             dm_crtc_state->active_planes == 0)
5101                 return -EINVAL;
5102
5103         if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
5104                 return 0;
5105
5106         return ret;
5107 }
5108
5109 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
5110                                       const struct drm_display_mode *mode,
5111                                       struct drm_display_mode *adjusted_mode)
5112 {
5113         return true;
5114 }
5115
5116 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
5117         .disable = dm_crtc_helper_disable,
5118         .atomic_check = dm_crtc_helper_atomic_check,
5119         .mode_fixup = dm_crtc_helper_mode_fixup,
5120         .get_scanout_position = amdgpu_crtc_get_scanout_position,
5121 };
5122
5123 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
5124 {
5125
5126 }
5127
5128 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
5129 {
5130         switch (display_color_depth) {
5131                 case COLOR_DEPTH_666:
5132                         return 6;
5133                 case COLOR_DEPTH_888:
5134                         return 8;
5135                 case COLOR_DEPTH_101010:
5136                         return 10;
5137                 case COLOR_DEPTH_121212:
5138                         return 12;
5139                 case COLOR_DEPTH_141414:
5140                         return 14;
5141                 case COLOR_DEPTH_161616:
5142                         return 16;
5143                 default:
5144                         break;
5145                 }
5146         return 0;
5147 }
5148
5149 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
5150                                           struct drm_crtc_state *crtc_state,
5151                                           struct drm_connector_state *conn_state)
5152 {
5153         struct drm_atomic_state *state = crtc_state->state;
5154         struct drm_connector *connector = conn_state->connector;
5155         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5156         struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
5157         const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
5158         struct drm_dp_mst_topology_mgr *mst_mgr;
5159         struct drm_dp_mst_port *mst_port;
5160         enum dc_color_depth color_depth;
5161         int clock, bpp = 0;
5162         bool is_y420 = false;
5163
5164         if (!aconnector->port || !aconnector->dc_sink)
5165                 return 0;
5166
5167         mst_port = aconnector->port;
5168         mst_mgr = &aconnector->mst_port->mst_mgr;
5169
5170         if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
5171                 return 0;
5172
5173         if (!state->duplicated) {
5174                 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
5175                                 aconnector->force_yuv420_output;
5176                 color_depth = convert_color_depth_from_display_info(connector, conn_state,
5177                                                                     is_y420);
5178                 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
5179                 clock = adjusted_mode->clock;
5180                 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
5181         }
5182         dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
5183                                                                            mst_mgr,
5184                                                                            mst_port,
5185                                                                            dm_new_connector_state->pbn,
5186                                                                            0);
5187         if (dm_new_connector_state->vcpi_slots < 0) {
5188                 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
5189                 return dm_new_connector_state->vcpi_slots;
5190         }
5191         return 0;
5192 }
5193
5194 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
5195         .disable = dm_encoder_helper_disable,
5196         .atomic_check = dm_encoder_helper_atomic_check
5197 };
5198
5199 #if defined(CONFIG_DRM_AMD_DC_DCN)
5200 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
5201                                             struct dc_state *dc_state)
5202 {
5203         struct dc_stream_state *stream = NULL;
5204         struct drm_connector *connector;
5205         struct drm_connector_state *new_con_state, *old_con_state;
5206         struct amdgpu_dm_connector *aconnector;
5207         struct dm_connector_state *dm_conn_state;
5208         int i, j, clock, bpp;
5209         int vcpi, pbn_div, pbn = 0;
5210
5211         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
5212
5213                 aconnector = to_amdgpu_dm_connector(connector);
5214
5215                 if (!aconnector->port)
5216                         continue;
5217
5218                 if (!new_con_state || !new_con_state->crtc)
5219                         continue;
5220
5221                 dm_conn_state = to_dm_connector_state(new_con_state);
5222
5223                 for (j = 0; j < dc_state->stream_count; j++) {
5224                         stream = dc_state->streams[j];
5225                         if (!stream)
5226                                 continue;
5227
5228                         if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
5229                                 break;
5230
5231                         stream = NULL;
5232                 }
5233
5234                 if (!stream)
5235                         continue;
5236
5237                 if (stream->timing.flags.DSC != 1) {
5238                         drm_dp_mst_atomic_enable_dsc(state,
5239                                                      aconnector->port,
5240                                                      dm_conn_state->pbn,
5241                                                      0,
5242                                                      false);
5243                         continue;
5244                 }
5245
5246                 pbn_div = dm_mst_get_pbn_divider(stream->link);
5247                 bpp = stream->timing.dsc_cfg.bits_per_pixel;
5248                 clock = stream->timing.pix_clk_100hz / 10;
5249                 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
5250                 vcpi = drm_dp_mst_atomic_enable_dsc(state,
5251                                                     aconnector->port,
5252                                                     pbn, pbn_div,
5253                                                     true);
5254                 if (vcpi < 0)
5255                         return vcpi;
5256
5257                 dm_conn_state->pbn = pbn;
5258                 dm_conn_state->vcpi_slots = vcpi;
5259         }
5260         return 0;
5261 }
5262 #endif
5263
5264 static void dm_drm_plane_reset(struct drm_plane *plane)
5265 {
5266         struct dm_plane_state *amdgpu_state = NULL;
5267
5268         if (plane->state)
5269                 plane->funcs->atomic_destroy_state(plane, plane->state);
5270
5271         amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
5272         WARN_ON(amdgpu_state == NULL);
5273
5274         if (amdgpu_state)
5275                 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
5276 }
5277
5278 static struct drm_plane_state *
5279 dm_drm_plane_duplicate_state(struct drm_plane *plane)
5280 {
5281         struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
5282
5283         old_dm_plane_state = to_dm_plane_state(plane->state);
5284         dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
5285         if (!dm_plane_state)
5286                 return NULL;
5287
5288         __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
5289
5290         if (old_dm_plane_state->dc_state) {
5291                 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
5292                 dc_plane_state_retain(dm_plane_state->dc_state);
5293         }
5294
5295         return &dm_plane_state->base;
5296 }
5297
5298 void dm_drm_plane_destroy_state(struct drm_plane *plane,
5299                                 struct drm_plane_state *state)
5300 {
5301         struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
5302
5303         if (dm_plane_state->dc_state)
5304                 dc_plane_state_release(dm_plane_state->dc_state);
5305
5306         drm_atomic_helper_plane_destroy_state(plane, state);
5307 }
5308
5309 static const struct drm_plane_funcs dm_plane_funcs = {
5310         .update_plane   = drm_atomic_helper_update_plane,
5311         .disable_plane  = drm_atomic_helper_disable_plane,
5312         .destroy        = drm_primary_helper_destroy,
5313         .reset = dm_drm_plane_reset,
5314         .atomic_duplicate_state = dm_drm_plane_duplicate_state,
5315         .atomic_destroy_state = dm_drm_plane_destroy_state,
5316 };
5317
5318 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
5319                                       struct drm_plane_state *new_state)
5320 {
5321         struct amdgpu_framebuffer *afb;
5322         struct drm_gem_object *obj;
5323         struct amdgpu_device *adev;
5324         struct amdgpu_bo *rbo;
5325         struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
5326         struct list_head list;
5327         struct ttm_validate_buffer tv;
5328         struct ww_acquire_ctx ticket;
5329         uint64_t tiling_flags;
5330         uint32_t domain;
5331         int r;
5332
5333         dm_plane_state_old = to_dm_plane_state(plane->state);
5334         dm_plane_state_new = to_dm_plane_state(new_state);
5335
5336         if (!new_state->fb) {
5337                 DRM_DEBUG_DRIVER("No FB bound\n");
5338                 return 0;
5339         }
5340
5341         afb = to_amdgpu_framebuffer(new_state->fb);
5342         obj = new_state->fb->obj[0];
5343         rbo = gem_to_amdgpu_bo(obj);
5344         adev = amdgpu_ttm_adev(rbo->tbo.bdev);
5345         INIT_LIST_HEAD(&list);
5346
5347         tv.bo = &rbo->tbo;
5348         tv.num_shared = 1;
5349         list_add(&tv.head, &list);
5350
5351         r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
5352         if (r) {
5353                 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
5354                 return r;
5355         }
5356
5357         if (plane->type != DRM_PLANE_TYPE_CURSOR)
5358                 domain = amdgpu_display_supported_domains(adev, rbo->flags);
5359         else
5360                 domain = AMDGPU_GEM_DOMAIN_VRAM;
5361
5362         r = amdgpu_bo_pin(rbo, domain);
5363         if (unlikely(r != 0)) {
5364                 if (r != -ERESTARTSYS)
5365                         DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
5366                 ttm_eu_backoff_reservation(&ticket, &list);
5367                 return r;
5368         }
5369
5370         r = amdgpu_ttm_alloc_gart(&rbo->tbo);
5371         if (unlikely(r != 0)) {
5372                 amdgpu_bo_unpin(rbo);
5373                 ttm_eu_backoff_reservation(&ticket, &list);
5374                 DRM_ERROR("%p bind failed\n", rbo);
5375                 return r;
5376         }
5377
5378         amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
5379
5380         ttm_eu_backoff_reservation(&ticket, &list);
5381
5382         afb->address = amdgpu_bo_gpu_offset(rbo);
5383
5384         amdgpu_bo_ref(rbo);
5385
5386         if (dm_plane_state_new->dc_state &&
5387                         dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
5388                 struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
5389
5390                 fill_plane_buffer_attributes(
5391                         adev, afb, plane_state->format, plane_state->rotation,
5392                         tiling_flags, &plane_state->tiling_info,
5393                         &plane_state->plane_size, &plane_state->dcc,
5394                         &plane_state->address);
5395         }
5396
5397         return 0;
5398 }
5399
5400 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
5401                                        struct drm_plane_state *old_state)
5402 {
5403         struct amdgpu_bo *rbo;
5404         int r;
5405
5406         if (!old_state->fb)
5407                 return;
5408
5409         rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
5410         r = amdgpu_bo_reserve(rbo, false);
5411         if (unlikely(r)) {
5412                 DRM_ERROR("failed to reserve rbo before unpin\n");
5413                 return;
5414         }
5415
5416         amdgpu_bo_unpin(rbo);
5417         amdgpu_bo_unreserve(rbo);
5418         amdgpu_bo_unref(&rbo);
5419 }
5420
5421 static int dm_plane_atomic_check(struct drm_plane *plane,
5422                                  struct drm_plane_state *state)
5423 {
5424         struct amdgpu_device *adev = plane->dev->dev_private;
5425         struct dc *dc = adev->dm.dc;
5426         struct dm_plane_state *dm_plane_state;
5427         struct dc_scaling_info scaling_info;
5428         int ret;
5429
5430         dm_plane_state = to_dm_plane_state(state);
5431
5432         if (!dm_plane_state->dc_state)
5433                 return 0;
5434
5435         ret = fill_dc_scaling_info(state, &scaling_info);
5436         if (ret)
5437                 return ret;
5438
5439         if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
5440                 return 0;
5441
5442         return -EINVAL;
5443 }
5444
5445 static int dm_plane_atomic_async_check(struct drm_plane *plane,
5446                                        struct drm_plane_state *new_plane_state)
5447 {
5448         /* Only support async updates on cursor planes. */
5449         if (plane->type != DRM_PLANE_TYPE_CURSOR)
5450                 return -EINVAL;
5451
5452         return 0;
5453 }
5454
5455 static void dm_plane_atomic_async_update(struct drm_plane *plane,
5456                                          struct drm_plane_state *new_state)
5457 {
5458         struct drm_plane_state *old_state =
5459                 drm_atomic_get_old_plane_state(new_state->state, plane);
5460
5461         swap(plane->state->fb, new_state->fb);
5462
5463         plane->state->src_x = new_state->src_x;
5464         plane->state->src_y = new_state->src_y;
5465         plane->state->src_w = new_state->src_w;
5466         plane->state->src_h = new_state->src_h;
5467         plane->state->crtc_x = new_state->crtc_x;
5468         plane->state->crtc_y = new_state->crtc_y;
5469         plane->state->crtc_w = new_state->crtc_w;
5470         plane->state->crtc_h = new_state->crtc_h;
5471
5472         handle_cursor_update(plane, old_state);
5473 }
5474
5475 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
5476         .prepare_fb = dm_plane_helper_prepare_fb,
5477         .cleanup_fb = dm_plane_helper_cleanup_fb,
5478         .atomic_check = dm_plane_atomic_check,
5479         .atomic_async_check = dm_plane_atomic_async_check,
5480         .atomic_async_update = dm_plane_atomic_async_update
5481 };
5482
5483 /*
5484  * TODO: these are currently initialized to rgb formats only.
5485  * For future use cases we should either initialize them dynamically based on
5486  * plane capabilities, or initialize this array to all formats, so internal drm
5487  * check will succeed, and let DC implement proper check
5488  */
5489 static const uint32_t rgb_formats[] = {
5490         DRM_FORMAT_XRGB8888,
5491         DRM_FORMAT_ARGB8888,
5492         DRM_FORMAT_RGBA8888,
5493         DRM_FORMAT_XRGB2101010,
5494         DRM_FORMAT_XBGR2101010,
5495         DRM_FORMAT_ARGB2101010,
5496         DRM_FORMAT_ABGR2101010,
5497         DRM_FORMAT_XBGR8888,
5498         DRM_FORMAT_ABGR8888,
5499         DRM_FORMAT_RGB565,
5500 };
5501
5502 static const uint32_t overlay_formats[] = {
5503         DRM_FORMAT_XRGB8888,
5504         DRM_FORMAT_ARGB8888,
5505         DRM_FORMAT_RGBA8888,
5506         DRM_FORMAT_XBGR8888,
5507         DRM_FORMAT_ABGR8888,
5508         DRM_FORMAT_RGB565
5509 };
5510
5511 static const u32 cursor_formats[] = {
5512         DRM_FORMAT_ARGB8888
5513 };
5514
5515 static int get_plane_formats(const struct drm_plane *plane,
5516                              const struct dc_plane_cap *plane_cap,
5517                              uint32_t *formats, int max_formats)
5518 {
5519         int i, num_formats = 0;
5520
5521         /*
5522          * TODO: Query support for each group of formats directly from
5523          * DC plane caps. This will require adding more formats to the
5524          * caps list.
5525          */
5526
5527         switch (plane->type) {
5528         case DRM_PLANE_TYPE_PRIMARY:
5529                 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
5530                         if (num_formats >= max_formats)
5531                                 break;
5532
5533                         formats[num_formats++] = rgb_formats[i];
5534                 }
5535
5536                 if (plane_cap && plane_cap->pixel_format_support.nv12)
5537                         formats[num_formats++] = DRM_FORMAT_NV12;
5538                 break;
5539
5540         case DRM_PLANE_TYPE_OVERLAY:
5541                 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
5542                         if (num_formats >= max_formats)
5543                                 break;
5544
5545                         formats[num_formats++] = overlay_formats[i];
5546                 }
5547                 break;
5548
5549         case DRM_PLANE_TYPE_CURSOR:
5550                 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
5551                         if (num_formats >= max_formats)
5552                                 break;
5553
5554                         formats[num_formats++] = cursor_formats[i];
5555                 }
5556                 break;
5557         }
5558
5559         return num_formats;
5560 }
5561
5562 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
5563                                 struct drm_plane *plane,
5564                                 unsigned long possible_crtcs,
5565                                 const struct dc_plane_cap *plane_cap)
5566 {
5567         uint32_t formats[32];
5568         int num_formats;
5569         int res = -EPERM;
5570
5571         num_formats = get_plane_formats(plane, plane_cap, formats,
5572                                         ARRAY_SIZE(formats));
5573
5574         res = drm_universal_plane_init(dm->adev->ddev, plane, possible_crtcs,
5575                                        &dm_plane_funcs, formats, num_formats,
5576                                        NULL, plane->type, NULL);
5577         if (res)
5578                 return res;
5579
5580         if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
5581             plane_cap && plane_cap->per_pixel_alpha) {
5582                 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
5583                                           BIT(DRM_MODE_BLEND_PREMULTI);
5584
5585                 drm_plane_create_alpha_property(plane);
5586                 drm_plane_create_blend_mode_property(plane, blend_caps);
5587         }
5588
5589         if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
5590             plane_cap && plane_cap->pixel_format_support.nv12) {
5591                 /* This only affects YUV formats. */
5592                 drm_plane_create_color_properties(
5593                         plane,
5594                         BIT(DRM_COLOR_YCBCR_BT601) |
5595                         BIT(DRM_COLOR_YCBCR_BT709),
5596                         BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
5597                         BIT(DRM_COLOR_YCBCR_FULL_RANGE),
5598                         DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
5599         }
5600
5601         drm_plane_helper_add(plane, &dm_plane_helper_funcs);
5602
5603         /* Create (reset) the plane state */
5604         if (plane->funcs->reset)
5605                 plane->funcs->reset(plane);
5606
5607         return 0;
5608 }
5609
5610 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
5611                                struct drm_plane *plane,
5612                                uint32_t crtc_index)
5613 {
5614         struct amdgpu_crtc *acrtc = NULL;
5615         struct drm_plane *cursor_plane;
5616
5617         int res = -ENOMEM;
5618
5619         cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
5620         if (!cursor_plane)
5621                 goto fail;
5622
5623         cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
5624         res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
5625
5626         acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
5627         if (!acrtc)
5628                 goto fail;
5629
5630         res = drm_crtc_init_with_planes(
5631                         dm->ddev,
5632                         &acrtc->base,
5633                         plane,
5634                         cursor_plane,
5635                         &amdgpu_dm_crtc_funcs, NULL);
5636
5637         if (res)
5638                 goto fail;
5639
5640         drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
5641
5642         /* Create (reset) the plane state */
5643         if (acrtc->base.funcs->reset)
5644                 acrtc->base.funcs->reset(&acrtc->base);
5645
5646         acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
5647         acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
5648
5649         acrtc->crtc_id = crtc_index;
5650         acrtc->base.enabled = false;
5651         acrtc->otg_inst = -1;
5652
5653         dm->adev->mode_info.crtcs[crtc_index] = acrtc;
5654         drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
5655                                    true, MAX_COLOR_LUT_ENTRIES);
5656         drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
5657
5658         return 0;
5659
5660 fail:
5661         kfree(acrtc);
5662         kfree(cursor_plane);
5663         return res;
5664 }
5665
5666
5667 static int to_drm_connector_type(enum signal_type st)
5668 {
5669         switch (st) {
5670         case SIGNAL_TYPE_HDMI_TYPE_A:
5671                 return DRM_MODE_CONNECTOR_HDMIA;
5672         case SIGNAL_TYPE_EDP:
5673                 return DRM_MODE_CONNECTOR_eDP;
5674         case SIGNAL_TYPE_LVDS:
5675                 return DRM_MODE_CONNECTOR_LVDS;
5676         case SIGNAL_TYPE_RGB:
5677                 return DRM_MODE_CONNECTOR_VGA;
5678         case SIGNAL_TYPE_DISPLAY_PORT:
5679         case SIGNAL_TYPE_DISPLAY_PORT_MST:
5680                 return DRM_MODE_CONNECTOR_DisplayPort;
5681         case SIGNAL_TYPE_DVI_DUAL_LINK:
5682         case SIGNAL_TYPE_DVI_SINGLE_LINK:
5683                 return DRM_MODE_CONNECTOR_DVID;
5684         case SIGNAL_TYPE_VIRTUAL:
5685                 return DRM_MODE_CONNECTOR_VIRTUAL;
5686
5687         default:
5688                 return DRM_MODE_CONNECTOR_Unknown;
5689         }
5690 }
5691
5692 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
5693 {
5694         struct drm_encoder *encoder;
5695
5696         /* There is only one encoder per connector */
5697         drm_connector_for_each_possible_encoder(connector, encoder)
5698                 return encoder;
5699
5700         return NULL;
5701 }
5702
5703 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
5704 {
5705         struct drm_encoder *encoder;
5706         struct amdgpu_encoder *amdgpu_encoder;
5707
5708         encoder = amdgpu_dm_connector_to_encoder(connector);
5709
5710         if (encoder == NULL)
5711                 return;
5712
5713         amdgpu_encoder = to_amdgpu_encoder(encoder);
5714
5715         amdgpu_encoder->native_mode.clock = 0;
5716
5717         if (!list_empty(&connector->probed_modes)) {
5718                 struct drm_display_mode *preferred_mode = NULL;
5719
5720                 list_for_each_entry(preferred_mode,
5721                                     &connector->probed_modes,
5722                                     head) {
5723                         if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
5724                                 amdgpu_encoder->native_mode = *preferred_mode;
5725
5726                         break;
5727                 }
5728
5729         }
5730 }
5731
5732 static struct drm_display_mode *
5733 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
5734                              char *name,
5735                              int hdisplay, int vdisplay)
5736 {
5737         struct drm_device *dev = encoder->dev;
5738         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
5739         struct drm_display_mode *mode = NULL;
5740         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
5741
5742         mode = drm_mode_duplicate(dev, native_mode);
5743
5744         if (mode == NULL)
5745                 return NULL;
5746
5747         mode->hdisplay = hdisplay;
5748         mode->vdisplay = vdisplay;
5749         mode->type &= ~DRM_MODE_TYPE_PREFERRED;
5750         strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
5751
5752         return mode;
5753
5754 }
5755
5756 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
5757                                                  struct drm_connector *connector)
5758 {
5759         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
5760         struct drm_display_mode *mode = NULL;
5761         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
5762         struct amdgpu_dm_connector *amdgpu_dm_connector =
5763                                 to_amdgpu_dm_connector(connector);
5764         int i;
5765         int n;
5766         struct mode_size {
5767                 char name[DRM_DISPLAY_MODE_LEN];
5768                 int w;
5769                 int h;
5770         } common_modes[] = {
5771                 {  "640x480",  640,  480},
5772                 {  "800x600",  800,  600},
5773                 { "1024x768", 1024,  768},
5774                 { "1280x720", 1280,  720},
5775                 { "1280x800", 1280,  800},
5776                 {"1280x1024", 1280, 1024},
5777                 { "1440x900", 1440,  900},
5778                 {"1680x1050", 1680, 1050},
5779                 {"1600x1200", 1600, 1200},
5780                 {"1920x1080", 1920, 1080},
5781                 {"1920x1200", 1920, 1200}
5782         };
5783
5784         n = ARRAY_SIZE(common_modes);
5785
5786         for (i = 0; i < n; i++) {
5787                 struct drm_display_mode *curmode = NULL;
5788                 bool mode_existed = false;
5789
5790                 if (common_modes[i].w > native_mode->hdisplay ||
5791                     common_modes[i].h > native_mode->vdisplay ||
5792                    (common_modes[i].w == native_mode->hdisplay &&
5793                     common_modes[i].h == native_mode->vdisplay))
5794                         continue;
5795
5796                 list_for_each_entry(curmode, &connector->probed_modes, head) {
5797                         if (common_modes[i].w == curmode->hdisplay &&
5798                             common_modes[i].h == curmode->vdisplay) {
5799                                 mode_existed = true;
5800                                 break;
5801                         }
5802                 }
5803
5804                 if (mode_existed)
5805                         continue;
5806
5807                 mode = amdgpu_dm_create_common_mode(encoder,
5808                                 common_modes[i].name, common_modes[i].w,
5809                                 common_modes[i].h);
5810                 drm_mode_probed_add(connector, mode);
5811                 amdgpu_dm_connector->num_modes++;
5812         }
5813 }
5814
5815 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
5816                                               struct edid *edid)
5817 {
5818         struct amdgpu_dm_connector *amdgpu_dm_connector =
5819                         to_amdgpu_dm_connector(connector);
5820
5821         if (edid) {
5822                 /* empty probed_modes */
5823                 INIT_LIST_HEAD(&connector->probed_modes);
5824                 amdgpu_dm_connector->num_modes =
5825                                 drm_add_edid_modes(connector, edid);
5826
5827                 /* sorting the probed modes before calling function
5828                  * amdgpu_dm_get_native_mode() since EDID can have
5829                  * more than one preferred mode. The modes that are
5830                  * later in the probed mode list could be of higher
5831                  * and preferred resolution. For example, 3840x2160
5832                  * resolution in base EDID preferred timing and 4096x2160
5833                  * preferred resolution in DID extension block later.
5834                  */
5835                 drm_mode_sort(&connector->probed_modes);
5836                 amdgpu_dm_get_native_mode(connector);
5837         } else {
5838                 amdgpu_dm_connector->num_modes = 0;
5839         }
5840 }
5841
5842 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
5843 {
5844         struct amdgpu_dm_connector *amdgpu_dm_connector =
5845                         to_amdgpu_dm_connector(connector);
5846         struct drm_encoder *encoder;
5847         struct edid *edid = amdgpu_dm_connector->edid;
5848
5849         encoder = amdgpu_dm_connector_to_encoder(connector);
5850
5851         if (!edid || !drm_edid_is_valid(edid)) {
5852                 amdgpu_dm_connector->num_modes =
5853                                 drm_add_modes_noedid(connector, 640, 480);
5854         } else {
5855                 amdgpu_dm_connector_ddc_get_modes(connector, edid);
5856                 amdgpu_dm_connector_add_common_modes(encoder, connector);
5857         }
5858         amdgpu_dm_fbc_init(connector);
5859
5860         return amdgpu_dm_connector->num_modes;
5861 }
5862
5863 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
5864                                      struct amdgpu_dm_connector *aconnector,
5865                                      int connector_type,
5866                                      struct dc_link *link,
5867                                      int link_index)
5868 {
5869         struct amdgpu_device *adev = dm->ddev->dev_private;
5870
5871         /*
5872          * Some of the properties below require access to state, like bpc.
5873          * Allocate some default initial connector state with our reset helper.
5874          */
5875         if (aconnector->base.funcs->reset)
5876                 aconnector->base.funcs->reset(&aconnector->base);
5877
5878         aconnector->connector_id = link_index;
5879         aconnector->dc_link = link;
5880         aconnector->base.interlace_allowed = false;
5881         aconnector->base.doublescan_allowed = false;
5882         aconnector->base.stereo_allowed = false;
5883         aconnector->base.dpms = DRM_MODE_DPMS_OFF;
5884         aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
5885         aconnector->audio_inst = -1;
5886         mutex_init(&aconnector->hpd_lock);
5887
5888         /*
5889          * configure support HPD hot plug connector_>polled default value is 0
5890          * which means HPD hot plug not supported
5891          */
5892         switch (connector_type) {
5893         case DRM_MODE_CONNECTOR_HDMIA:
5894                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
5895                 aconnector->base.ycbcr_420_allowed =
5896                         link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
5897                 break;
5898         case DRM_MODE_CONNECTOR_DisplayPort:
5899                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
5900                 aconnector->base.ycbcr_420_allowed =
5901                         link->link_enc->features.dp_ycbcr420_supported ? true : false;
5902                 break;
5903         case DRM_MODE_CONNECTOR_DVID:
5904                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
5905                 break;
5906         default:
5907                 break;
5908         }
5909
5910         drm_object_attach_property(&aconnector->base.base,
5911                                 dm->ddev->mode_config.scaling_mode_property,
5912                                 DRM_MODE_SCALE_NONE);
5913
5914         drm_object_attach_property(&aconnector->base.base,
5915                                 adev->mode_info.underscan_property,
5916                                 UNDERSCAN_OFF);
5917         drm_object_attach_property(&aconnector->base.base,
5918                                 adev->mode_info.underscan_hborder_property,
5919                                 0);
5920         drm_object_attach_property(&aconnector->base.base,
5921                                 adev->mode_info.underscan_vborder_property,
5922                                 0);
5923
5924         drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
5925
5926         /* This defaults to the max in the range, but we want 8bpc for non-edp. */
5927         aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
5928         aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
5929
5930         if (connector_type == DRM_MODE_CONNECTOR_eDP &&
5931             dc_is_dmcu_initialized(adev->dm.dc)) {
5932                 drm_object_attach_property(&aconnector->base.base,
5933                                 adev->mode_info.abm_level_property, 0);
5934         }
5935
5936         if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
5937             connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
5938             connector_type == DRM_MODE_CONNECTOR_eDP) {
5939                 drm_object_attach_property(
5940                         &aconnector->base.base,
5941                         dm->ddev->mode_config.hdr_output_metadata_property, 0);
5942
5943                 drm_connector_attach_vrr_capable_property(
5944                         &aconnector->base);
5945 #ifdef CONFIG_DRM_AMD_DC_HDCP
5946                 if (adev->dm.hdcp_workqueue)
5947                         drm_connector_attach_content_protection_property(&aconnector->base, true);
5948 #endif
5949         }
5950 }
5951
5952 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
5953                               struct i2c_msg *msgs, int num)
5954 {
5955         struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
5956         struct ddc_service *ddc_service = i2c->ddc_service;
5957         struct i2c_command cmd;
5958         int i;
5959         int result = -EIO;
5960
5961         cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
5962
5963         if (!cmd.payloads)
5964                 return result;
5965
5966         cmd.number_of_payloads = num;
5967         cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
5968         cmd.speed = 100;
5969
5970         for (i = 0; i < num; i++) {
5971                 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
5972                 cmd.payloads[i].address = msgs[i].addr;
5973                 cmd.payloads[i].length = msgs[i].len;
5974                 cmd.payloads[i].data = msgs[i].buf;
5975         }
5976
5977         if (dc_submit_i2c(
5978                         ddc_service->ctx->dc,
5979                         ddc_service->ddc_pin->hw_info.ddc_channel,
5980                         &cmd))
5981                 result = num;
5982
5983         kfree(cmd.payloads);
5984         return result;
5985 }
5986
5987 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
5988 {
5989         return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
5990 }
5991
5992 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
5993         .master_xfer = amdgpu_dm_i2c_xfer,
5994         .functionality = amdgpu_dm_i2c_func,
5995 };
5996
5997 static struct amdgpu_i2c_adapter *
5998 create_i2c(struct ddc_service *ddc_service,
5999            int link_index,
6000            int *res)
6001 {
6002         struct amdgpu_device *adev = ddc_service->ctx->driver_context;
6003         struct amdgpu_i2c_adapter *i2c;
6004
6005         i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
6006         if (!i2c)
6007                 return NULL;
6008         i2c->base.owner = THIS_MODULE;
6009         i2c->base.class = I2C_CLASS_DDC;
6010         i2c->base.dev.parent = &adev->pdev->dev;
6011         i2c->base.algo = &amdgpu_dm_i2c_algo;
6012         snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
6013         i2c_set_adapdata(&i2c->base, i2c);
6014         i2c->ddc_service = ddc_service;
6015         i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
6016
6017         return i2c;
6018 }
6019
6020
6021 /*
6022  * Note: this function assumes that dc_link_detect() was called for the
6023  * dc_link which will be represented by this aconnector.
6024  */
6025 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
6026                                     struct amdgpu_dm_connector *aconnector,
6027                                     uint32_t link_index,
6028                                     struct amdgpu_encoder *aencoder)
6029 {
6030         int res = 0;
6031         int connector_type;
6032         struct dc *dc = dm->dc;
6033         struct dc_link *link = dc_get_link_at_index(dc, link_index);
6034         struct amdgpu_i2c_adapter *i2c;
6035
6036         link->priv = aconnector;
6037
6038         DRM_DEBUG_DRIVER("%s()\n", __func__);
6039
6040         i2c = create_i2c(link->ddc, link->link_index, &res);
6041         if (!i2c) {
6042                 DRM_ERROR("Failed to create i2c adapter data\n");
6043                 return -ENOMEM;
6044         }
6045
6046         aconnector->i2c = i2c;
6047         res = i2c_add_adapter(&i2c->base);
6048
6049         if (res) {
6050                 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
6051                 goto out_free;
6052         }
6053
6054         connector_type = to_drm_connector_type(link->connector_signal);
6055
6056         res = drm_connector_init_with_ddc(
6057                         dm->ddev,
6058                         &aconnector->base,
6059                         &amdgpu_dm_connector_funcs,
6060                         connector_type,
6061                         &i2c->base);
6062
6063         if (res) {
6064                 DRM_ERROR("connector_init failed\n");
6065                 aconnector->connector_id = -1;
6066                 goto out_free;
6067         }
6068
6069         drm_connector_helper_add(
6070                         &aconnector->base,
6071                         &amdgpu_dm_connector_helper_funcs);
6072
6073         amdgpu_dm_connector_init_helper(
6074                 dm,
6075                 aconnector,
6076                 connector_type,
6077                 link,
6078                 link_index);
6079
6080         drm_connector_attach_encoder(
6081                 &aconnector->base, &aencoder->base);
6082
6083         if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
6084                 || connector_type == DRM_MODE_CONNECTOR_eDP)
6085                 amdgpu_dm_initialize_dp_connector(dm, aconnector);
6086
6087 out_free:
6088         if (res) {
6089                 kfree(i2c);
6090                 aconnector->i2c = NULL;
6091         }
6092         return res;
6093 }
6094
6095 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
6096 {
6097         switch (adev->mode_info.num_crtc) {
6098         case 1:
6099                 return 0x1;
6100         case 2:
6101                 return 0x3;
6102         case 3:
6103                 return 0x7;
6104         case 4:
6105                 return 0xf;
6106         case 5:
6107                 return 0x1f;
6108         case 6:
6109         default:
6110                 return 0x3f;
6111         }
6112 }
6113
6114 static int amdgpu_dm_encoder_init(struct drm_device *dev,
6115                                   struct amdgpu_encoder *aencoder,
6116                                   uint32_t link_index)
6117 {
6118         struct amdgpu_device *adev = dev->dev_private;
6119
6120         int res = drm_encoder_init(dev,
6121                                    &aencoder->base,
6122                                    &amdgpu_dm_encoder_funcs,
6123                                    DRM_MODE_ENCODER_TMDS,
6124                                    NULL);
6125
6126         aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
6127
6128         if (!res)
6129                 aencoder->encoder_id = link_index;
6130         else
6131                 aencoder->encoder_id = -1;
6132
6133         drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
6134
6135         return res;
6136 }
6137
6138 static void manage_dm_interrupts(struct amdgpu_device *adev,
6139                                  struct amdgpu_crtc *acrtc,
6140                                  bool enable)
6141 {
6142         /*
6143          * this is not correct translation but will work as soon as VBLANK
6144          * constant is the same as PFLIP
6145          */
6146         int irq_type =
6147                 amdgpu_display_crtc_idx_to_irq_type(
6148                         adev,
6149                         acrtc->crtc_id);
6150
6151         if (enable) {
6152                 drm_crtc_vblank_on(&acrtc->base);
6153                 amdgpu_irq_get(
6154                         adev,
6155                         &adev->pageflip_irq,
6156                         irq_type);
6157         } else {
6158
6159                 amdgpu_irq_put(
6160                         adev,
6161                         &adev->pageflip_irq,
6162                         irq_type);
6163                 drm_crtc_vblank_off(&acrtc->base);
6164         }
6165 }
6166
6167 static bool
6168 is_scaling_state_different(const struct dm_connector_state *dm_state,
6169                            const struct dm_connector_state *old_dm_state)
6170 {
6171         if (dm_state->scaling != old_dm_state->scaling)
6172                 return true;
6173         if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
6174                 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
6175                         return true;
6176         } else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
6177                 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
6178                         return true;
6179         } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
6180                    dm_state->underscan_vborder != old_dm_state->underscan_vborder)
6181                 return true;
6182         return false;
6183 }
6184
6185 #ifdef CONFIG_DRM_AMD_DC_HDCP
6186 static bool is_content_protection_different(struct drm_connector_state *state,
6187                                             const struct drm_connector_state *old_state,
6188                                             const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
6189 {
6190         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6191
6192         if (old_state->hdcp_content_type != state->hdcp_content_type &&
6193             state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
6194                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6195                 return true;
6196         }
6197
6198         /* CP is being re enabled, ignore this */
6199         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
6200             state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
6201                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
6202                 return false;
6203         }
6204
6205         /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */
6206         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
6207             state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
6208                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6209
6210         /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
6211          * hot-plug, headless s3, dpms
6212          */
6213         if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && connector->dpms == DRM_MODE_DPMS_ON &&
6214             aconnector->dc_sink != NULL)
6215                 return true;
6216
6217         if (old_state->content_protection == state->content_protection)
6218                 return false;
6219
6220         if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
6221                 return true;
6222
6223         return false;
6224 }
6225
6226 #endif
6227 static void remove_stream(struct amdgpu_device *adev,
6228                           struct amdgpu_crtc *acrtc,
6229                           struct dc_stream_state *stream)
6230 {
6231         /* this is the update mode case */
6232
6233         acrtc->otg_inst = -1;
6234         acrtc->enabled = false;
6235 }
6236
6237 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
6238                                struct dc_cursor_position *position)
6239 {
6240         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6241         int x, y;
6242         int xorigin = 0, yorigin = 0;
6243
6244         position->enable = false;
6245         position->x = 0;
6246         position->y = 0;
6247
6248         if (!crtc || !plane->state->fb)
6249                 return 0;
6250
6251         if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
6252             (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
6253                 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
6254                           __func__,
6255                           plane->state->crtc_w,
6256                           plane->state->crtc_h);
6257                 return -EINVAL;
6258         }
6259
6260         x = plane->state->crtc_x;
6261         y = plane->state->crtc_y;
6262
6263         if (x <= -amdgpu_crtc->max_cursor_width ||
6264             y <= -amdgpu_crtc->max_cursor_height)
6265                 return 0;
6266
6267         if (crtc->primary->state) {
6268                 /* avivo cursor are offset into the total surface */
6269                 x += crtc->primary->state->src_x >> 16;
6270                 y += crtc->primary->state->src_y >> 16;
6271         }
6272
6273         if (x < 0) {
6274                 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
6275                 x = 0;
6276         }
6277         if (y < 0) {
6278                 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
6279                 y = 0;
6280         }
6281         position->enable = true;
6282         position->x = x;
6283         position->y = y;
6284         position->x_hotspot = xorigin;
6285         position->y_hotspot = yorigin;
6286
6287         return 0;
6288 }
6289
6290 static void handle_cursor_update(struct drm_plane *plane,
6291                                  struct drm_plane_state *old_plane_state)
6292 {
6293         struct amdgpu_device *adev = plane->dev->dev_private;
6294         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
6295         struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
6296         struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
6297         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6298         uint64_t address = afb ? afb->address : 0;
6299         struct dc_cursor_position position;
6300         struct dc_cursor_attributes attributes;
6301         int ret;
6302
6303         if (!plane->state->fb && !old_plane_state->fb)
6304                 return;
6305
6306         DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
6307                          __func__,
6308                          amdgpu_crtc->crtc_id,
6309                          plane->state->crtc_w,
6310                          plane->state->crtc_h);
6311
6312         ret = get_cursor_position(plane, crtc, &position);
6313         if (ret)
6314                 return;
6315
6316         if (!position.enable) {
6317                 /* turn off cursor */
6318                 if (crtc_state && crtc_state->stream) {
6319                         mutex_lock(&adev->dm.dc_lock);
6320                         dc_stream_set_cursor_position(crtc_state->stream,
6321                                                       &position);
6322                         mutex_unlock(&adev->dm.dc_lock);
6323                 }
6324                 return;
6325         }
6326
6327         amdgpu_crtc->cursor_width = plane->state->crtc_w;
6328         amdgpu_crtc->cursor_height = plane->state->crtc_h;
6329
6330         memset(&attributes, 0, sizeof(attributes));
6331         attributes.address.high_part = upper_32_bits(address);
6332         attributes.address.low_part  = lower_32_bits(address);
6333         attributes.width             = plane->state->crtc_w;
6334         attributes.height            = plane->state->crtc_h;
6335         attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
6336         attributes.rotation_angle    = 0;
6337         attributes.attribute_flags.value = 0;
6338
6339         attributes.pitch = attributes.width;
6340
6341         if (crtc_state->stream) {
6342                 mutex_lock(&adev->dm.dc_lock);
6343                 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
6344                                                          &attributes))
6345                         DRM_ERROR("DC failed to set cursor attributes\n");
6346
6347                 if (!dc_stream_set_cursor_position(crtc_state->stream,
6348                                                    &position))
6349                         DRM_ERROR("DC failed to set cursor position\n");
6350                 mutex_unlock(&adev->dm.dc_lock);
6351         }
6352 }
6353
6354 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
6355 {
6356
6357         assert_spin_locked(&acrtc->base.dev->event_lock);
6358         WARN_ON(acrtc->event);
6359
6360         acrtc->event = acrtc->base.state->event;
6361
6362         /* Set the flip status */
6363         acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
6364
6365         /* Mark this event as consumed */
6366         acrtc->base.state->event = NULL;
6367
6368         DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
6369                                                  acrtc->crtc_id);
6370 }
6371
6372 static void update_freesync_state_on_stream(
6373         struct amdgpu_display_manager *dm,
6374         struct dm_crtc_state *new_crtc_state,
6375         struct dc_stream_state *new_stream,
6376         struct dc_plane_state *surface,
6377         u32 flip_timestamp_in_us)
6378 {
6379         struct mod_vrr_params vrr_params;
6380         struct dc_info_packet vrr_infopacket = {0};
6381         struct amdgpu_device *adev = dm->adev;
6382         unsigned long flags;
6383
6384         if (!new_stream)
6385                 return;
6386
6387         /*
6388          * TODO: Determine why min/max totals and vrefresh can be 0 here.
6389          * For now it's sufficient to just guard against these conditions.
6390          */
6391
6392         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6393                 return;
6394
6395         spin_lock_irqsave(&adev->ddev->event_lock, flags);
6396         vrr_params = new_crtc_state->vrr_params;
6397
6398         if (surface) {
6399                 mod_freesync_handle_preflip(
6400                         dm->freesync_module,
6401                         surface,
6402                         new_stream,
6403                         flip_timestamp_in_us,
6404                         &vrr_params);
6405
6406                 if (adev->family < AMDGPU_FAMILY_AI &&
6407                     amdgpu_dm_vrr_active(new_crtc_state)) {
6408                         mod_freesync_handle_v_update(dm->freesync_module,
6409                                                      new_stream, &vrr_params);
6410
6411                         /* Need to call this before the frame ends. */
6412                         dc_stream_adjust_vmin_vmax(dm->dc,
6413                                                    new_crtc_state->stream,
6414                                                    &vrr_params.adjust);
6415                 }
6416         }
6417
6418         mod_freesync_build_vrr_infopacket(
6419                 dm->freesync_module,
6420                 new_stream,
6421                 &vrr_params,
6422                 PACKET_TYPE_VRR,
6423                 TRANSFER_FUNC_UNKNOWN,
6424                 &vrr_infopacket);
6425
6426         new_crtc_state->freesync_timing_changed |=
6427                 (memcmp(&new_crtc_state->vrr_params.adjust,
6428                         &vrr_params.adjust,
6429                         sizeof(vrr_params.adjust)) != 0);
6430
6431         new_crtc_state->freesync_vrr_info_changed |=
6432                 (memcmp(&new_crtc_state->vrr_infopacket,
6433                         &vrr_infopacket,
6434                         sizeof(vrr_infopacket)) != 0);
6435
6436         new_crtc_state->vrr_params = vrr_params;
6437         new_crtc_state->vrr_infopacket = vrr_infopacket;
6438
6439         new_stream->adjust = new_crtc_state->vrr_params.adjust;
6440         new_stream->vrr_infopacket = vrr_infopacket;
6441
6442         if (new_crtc_state->freesync_vrr_info_changed)
6443                 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
6444                               new_crtc_state->base.crtc->base.id,
6445                               (int)new_crtc_state->base.vrr_enabled,
6446                               (int)vrr_params.state);
6447
6448         spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
6449 }
6450
6451 static void pre_update_freesync_state_on_stream(
6452         struct amdgpu_display_manager *dm,
6453         struct dm_crtc_state *new_crtc_state)
6454 {
6455         struct dc_stream_state *new_stream = new_crtc_state->stream;
6456         struct mod_vrr_params vrr_params;
6457         struct mod_freesync_config config = new_crtc_state->freesync_config;
6458         struct amdgpu_device *adev = dm->adev;
6459         unsigned long flags;
6460
6461         if (!new_stream)
6462                 return;
6463
6464         /*
6465          * TODO: Determine why min/max totals and vrefresh can be 0 here.
6466          * For now it's sufficient to just guard against these conditions.
6467          */
6468         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6469                 return;
6470
6471         spin_lock_irqsave(&adev->ddev->event_lock, flags);
6472         vrr_params = new_crtc_state->vrr_params;
6473
6474         if (new_crtc_state->vrr_supported &&
6475             config.min_refresh_in_uhz &&
6476             config.max_refresh_in_uhz) {
6477                 config.state = new_crtc_state->base.vrr_enabled ?
6478                         VRR_STATE_ACTIVE_VARIABLE :
6479                         VRR_STATE_INACTIVE;
6480         } else {
6481                 config.state = VRR_STATE_UNSUPPORTED;
6482         }
6483
6484         mod_freesync_build_vrr_params(dm->freesync_module,
6485                                       new_stream,
6486                                       &config, &vrr_params);
6487
6488         new_crtc_state->freesync_timing_changed |=
6489                 (memcmp(&new_crtc_state->vrr_params.adjust,
6490                         &vrr_params.adjust,
6491                         sizeof(vrr_params.adjust)) != 0);
6492
6493         new_crtc_state->vrr_params = vrr_params;
6494         spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
6495 }
6496
6497 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
6498                                             struct dm_crtc_state *new_state)
6499 {
6500         bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
6501         bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
6502
6503         if (!old_vrr_active && new_vrr_active) {
6504                 /* Transition VRR inactive -> active:
6505                  * While VRR is active, we must not disable vblank irq, as a
6506                  * reenable after disable would compute bogus vblank/pflip
6507                  * timestamps if it likely happened inside display front-porch.
6508                  *
6509                  * We also need vupdate irq for the actual core vblank handling
6510                  * at end of vblank.
6511                  */
6512                 dm_set_vupdate_irq(new_state->base.crtc, true);
6513                 drm_crtc_vblank_get(new_state->base.crtc);
6514                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
6515                                  __func__, new_state->base.crtc->base.id);
6516         } else if (old_vrr_active && !new_vrr_active) {
6517                 /* Transition VRR active -> inactive:
6518                  * Allow vblank irq disable again for fixed refresh rate.
6519                  */
6520                 dm_set_vupdate_irq(new_state->base.crtc, false);
6521                 drm_crtc_vblank_put(new_state->base.crtc);
6522                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
6523                                  __func__, new_state->base.crtc->base.id);
6524         }
6525 }
6526
6527 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
6528 {
6529         struct drm_plane *plane;
6530         struct drm_plane_state *old_plane_state, *new_plane_state;
6531         int i;
6532
6533         /*
6534          * TODO: Make this per-stream so we don't issue redundant updates for
6535          * commits with multiple streams.
6536          */
6537         for_each_oldnew_plane_in_state(state, plane, old_plane_state,
6538                                        new_plane_state, i)
6539                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6540                         handle_cursor_update(plane, old_plane_state);
6541 }
6542
6543 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
6544                                     struct dc_state *dc_state,
6545                                     struct drm_device *dev,
6546                                     struct amdgpu_display_manager *dm,
6547                                     struct drm_crtc *pcrtc,
6548                                     bool wait_for_vblank)
6549 {
6550         uint32_t i;
6551         uint64_t timestamp_ns;
6552         struct drm_plane *plane;
6553         struct drm_plane_state *old_plane_state, *new_plane_state;
6554         struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
6555         struct drm_crtc_state *new_pcrtc_state =
6556                         drm_atomic_get_new_crtc_state(state, pcrtc);
6557         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
6558         struct dm_crtc_state *dm_old_crtc_state =
6559                         to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
6560         int planes_count = 0, vpos, hpos;
6561         long r;
6562         unsigned long flags;
6563         struct amdgpu_bo *abo;
6564         uint64_t tiling_flags;
6565         uint32_t target_vblank, last_flip_vblank;
6566         bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
6567         bool pflip_present = false;
6568         struct {
6569                 struct dc_surface_update surface_updates[MAX_SURFACES];
6570                 struct dc_plane_info plane_infos[MAX_SURFACES];
6571                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
6572                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
6573                 struct dc_stream_update stream_update;
6574         } *bundle;
6575
6576         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
6577
6578         if (!bundle) {
6579                 dm_error("Failed to allocate update bundle\n");
6580                 goto cleanup;
6581         }
6582
6583         /*
6584          * Disable the cursor first if we're disabling all the planes.
6585          * It'll remain on the screen after the planes are re-enabled
6586          * if we don't.
6587          */
6588         if (acrtc_state->active_planes == 0)
6589                 amdgpu_dm_commit_cursors(state);
6590
6591         /* update planes when needed */
6592         for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
6593                 struct drm_crtc *crtc = new_plane_state->crtc;
6594                 struct drm_crtc_state *new_crtc_state;
6595                 struct drm_framebuffer *fb = new_plane_state->fb;
6596                 bool plane_needs_flip;
6597                 struct dc_plane_state *dc_plane;
6598                 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
6599
6600                 /* Cursor plane is handled after stream updates */
6601                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6602                         continue;
6603
6604                 if (!fb || !crtc || pcrtc != crtc)
6605                         continue;
6606
6607                 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
6608                 if (!new_crtc_state->active)
6609                         continue;
6610
6611                 dc_plane = dm_new_plane_state->dc_state;
6612
6613                 bundle->surface_updates[planes_count].surface = dc_plane;
6614                 if (new_pcrtc_state->color_mgmt_changed) {
6615                         bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
6616                         bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
6617                 }
6618
6619                 fill_dc_scaling_info(new_plane_state,
6620                                      &bundle->scaling_infos[planes_count]);
6621
6622                 bundle->surface_updates[planes_count].scaling_info =
6623                         &bundle->scaling_infos[planes_count];
6624
6625                 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
6626
6627                 pflip_present = pflip_present || plane_needs_flip;
6628
6629                 if (!plane_needs_flip) {
6630                         planes_count += 1;
6631                         continue;
6632                 }
6633
6634                 abo = gem_to_amdgpu_bo(fb->obj[0]);
6635
6636                 /*
6637                  * Wait for all fences on this FB. Do limited wait to avoid
6638                  * deadlock during GPU reset when this fence will not signal
6639                  * but we hold reservation lock for the BO.
6640                  */
6641                 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
6642                                                         false,
6643                                                         msecs_to_jiffies(5000));
6644                 if (unlikely(r <= 0))
6645                         DRM_ERROR("Waiting for fences timed out!");
6646
6647                 /*
6648                  * TODO This might fail and hence better not used, wait
6649                  * explicitly on fences instead
6650                  * and in general should be called for
6651                  * blocking commit to as per framework helpers
6652                  */
6653                 r = amdgpu_bo_reserve(abo, true);
6654                 if (unlikely(r != 0))
6655                         DRM_ERROR("failed to reserve buffer before flip\n");
6656
6657                 amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
6658
6659                 amdgpu_bo_unreserve(abo);
6660
6661                 fill_dc_plane_info_and_addr(
6662                         dm->adev, new_plane_state, tiling_flags,
6663                         &bundle->plane_infos[planes_count],
6664                         &bundle->flip_addrs[planes_count].address);
6665
6666                 bundle->surface_updates[planes_count].plane_info =
6667                         &bundle->plane_infos[planes_count];
6668
6669                 /*
6670                  * Only allow immediate flips for fast updates that don't
6671                  * change FB pitch, DCC state, rotation or mirroing.
6672                  */
6673                 bundle->flip_addrs[planes_count].flip_immediate =
6674                         crtc->state->async_flip &&
6675                         acrtc_state->update_type == UPDATE_TYPE_FAST;
6676
6677                 timestamp_ns = ktime_get_ns();
6678                 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
6679                 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
6680                 bundle->surface_updates[planes_count].surface = dc_plane;
6681
6682                 if (!bundle->surface_updates[planes_count].surface) {
6683                         DRM_ERROR("No surface for CRTC: id=%d\n",
6684                                         acrtc_attach->crtc_id);
6685                         continue;
6686                 }
6687
6688                 if (plane == pcrtc->primary)
6689                         update_freesync_state_on_stream(
6690                                 dm,
6691                                 acrtc_state,
6692                                 acrtc_state->stream,
6693                                 dc_plane,
6694                                 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
6695
6696                 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
6697                                  __func__,
6698                                  bundle->flip_addrs[planes_count].address.grph.addr.high_part,
6699                                  bundle->flip_addrs[planes_count].address.grph.addr.low_part);
6700
6701                 planes_count += 1;
6702
6703         }
6704
6705         if (pflip_present) {
6706                 if (!vrr_active) {
6707                         /* Use old throttling in non-vrr fixed refresh rate mode
6708                          * to keep flip scheduling based on target vblank counts
6709                          * working in a backwards compatible way, e.g., for
6710                          * clients using the GLX_OML_sync_control extension or
6711                          * DRI3/Present extension with defined target_msc.
6712                          */
6713                         last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
6714                 }
6715                 else {
6716                         /* For variable refresh rate mode only:
6717                          * Get vblank of last completed flip to avoid > 1 vrr
6718                          * flips per video frame by use of throttling, but allow
6719                          * flip programming anywhere in the possibly large
6720                          * variable vrr vblank interval for fine-grained flip
6721                          * timing control and more opportunity to avoid stutter
6722                          * on late submission of flips.
6723                          */
6724                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
6725                         last_flip_vblank = acrtc_attach->last_flip_vblank;
6726                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
6727                 }
6728
6729                 target_vblank = last_flip_vblank + wait_for_vblank;
6730
6731                 /*
6732                  * Wait until we're out of the vertical blank period before the one
6733                  * targeted by the flip
6734                  */
6735                 while ((acrtc_attach->enabled &&
6736                         (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
6737                                                             0, &vpos, &hpos, NULL,
6738                                                             NULL, &pcrtc->hwmode)
6739                          & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
6740                         (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
6741                         (int)(target_vblank -
6742                           amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
6743                         usleep_range(1000, 1100);
6744                 }
6745
6746                 if (acrtc_attach->base.state->event) {
6747                         drm_crtc_vblank_get(pcrtc);
6748
6749                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
6750
6751                         WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
6752                         prepare_flip_isr(acrtc_attach);
6753
6754                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
6755                 }
6756
6757                 if (acrtc_state->stream) {
6758                         if (acrtc_state->freesync_vrr_info_changed)
6759                                 bundle->stream_update.vrr_infopacket =
6760                                         &acrtc_state->stream->vrr_infopacket;
6761                 }
6762         }
6763
6764         /* Update the planes if changed or disable if we don't have any. */
6765         if ((planes_count || acrtc_state->active_planes == 0) &&
6766                 acrtc_state->stream) {
6767                 bundle->stream_update.stream = acrtc_state->stream;
6768                 if (new_pcrtc_state->mode_changed) {
6769                         bundle->stream_update.src = acrtc_state->stream->src;
6770                         bundle->stream_update.dst = acrtc_state->stream->dst;
6771                 }
6772
6773                 if (new_pcrtc_state->color_mgmt_changed) {
6774                         /*
6775                          * TODO: This isn't fully correct since we've actually
6776                          * already modified the stream in place.
6777                          */
6778                         bundle->stream_update.gamut_remap =
6779                                 &acrtc_state->stream->gamut_remap_matrix;
6780                         bundle->stream_update.output_csc_transform =
6781                                 &acrtc_state->stream->csc_color_matrix;
6782                         bundle->stream_update.out_transfer_func =
6783                                 acrtc_state->stream->out_transfer_func;
6784                 }
6785
6786                 acrtc_state->stream->abm_level = acrtc_state->abm_level;
6787                 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
6788                         bundle->stream_update.abm_level = &acrtc_state->abm_level;
6789
6790                 /*
6791                  * If FreeSync state on the stream has changed then we need to
6792                  * re-adjust the min/max bounds now that DC doesn't handle this
6793                  * as part of commit.
6794                  */
6795                 if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
6796                     amdgpu_dm_vrr_active(acrtc_state)) {
6797                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
6798                         dc_stream_adjust_vmin_vmax(
6799                                 dm->dc, acrtc_state->stream,
6800                                 &acrtc_state->vrr_params.adjust);
6801                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
6802                 }
6803                 mutex_lock(&dm->dc_lock);
6804                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
6805                                 acrtc_state->stream->link->psr_allow_active)
6806                         amdgpu_dm_psr_disable(acrtc_state->stream);
6807
6808                 dc_commit_updates_for_stream(dm->dc,
6809                                                      bundle->surface_updates,
6810                                                      planes_count,
6811                                                      acrtc_state->stream,
6812                                                      &bundle->stream_update,
6813                                                      dc_state);
6814
6815                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
6816                                                 acrtc_state->stream->psr_version &&
6817                                                 !acrtc_state->stream->link->psr_feature_enabled)
6818                         amdgpu_dm_link_setup_psr(acrtc_state->stream);
6819                 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
6820                                                 acrtc_state->stream->link->psr_feature_enabled &&
6821                                                 !acrtc_state->stream->link->psr_allow_active) {
6822                         amdgpu_dm_psr_enable(acrtc_state->stream);
6823                 }
6824
6825                 mutex_unlock(&dm->dc_lock);
6826         }
6827
6828         /*
6829          * Update cursor state *after* programming all the planes.
6830          * This avoids redundant programming in the case where we're going
6831          * to be disabling a single plane - those pipes are being disabled.
6832          */
6833         if (acrtc_state->active_planes)
6834                 amdgpu_dm_commit_cursors(state);
6835
6836 cleanup:
6837         kfree(bundle);
6838 }
6839
6840 static void amdgpu_dm_commit_audio(struct drm_device *dev,
6841                                    struct drm_atomic_state *state)
6842 {
6843         struct amdgpu_device *adev = dev->dev_private;
6844         struct amdgpu_dm_connector *aconnector;
6845         struct drm_connector *connector;
6846         struct drm_connector_state *old_con_state, *new_con_state;
6847         struct drm_crtc_state *new_crtc_state;
6848         struct dm_crtc_state *new_dm_crtc_state;
6849         const struct dc_stream_status *status;
6850         int i, inst;
6851
6852         /* Notify device removals. */
6853         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6854                 if (old_con_state->crtc != new_con_state->crtc) {
6855                         /* CRTC changes require notification. */
6856                         goto notify;
6857                 }
6858
6859                 if (!new_con_state->crtc)
6860                         continue;
6861
6862                 new_crtc_state = drm_atomic_get_new_crtc_state(
6863                         state, new_con_state->crtc);
6864
6865                 if (!new_crtc_state)
6866                         continue;
6867
6868                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
6869                         continue;
6870
6871         notify:
6872                 aconnector = to_amdgpu_dm_connector(connector);
6873
6874                 mutex_lock(&adev->dm.audio_lock);
6875                 inst = aconnector->audio_inst;
6876                 aconnector->audio_inst = -1;
6877                 mutex_unlock(&adev->dm.audio_lock);
6878
6879                 amdgpu_dm_audio_eld_notify(adev, inst);
6880         }
6881
6882         /* Notify audio device additions. */
6883         for_each_new_connector_in_state(state, connector, new_con_state, i) {
6884                 if (!new_con_state->crtc)
6885                         continue;
6886
6887                 new_crtc_state = drm_atomic_get_new_crtc_state(
6888                         state, new_con_state->crtc);
6889
6890                 if (!new_crtc_state)
6891                         continue;
6892
6893                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
6894                         continue;
6895
6896                 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
6897                 if (!new_dm_crtc_state->stream)
6898                         continue;
6899
6900                 status = dc_stream_get_status(new_dm_crtc_state->stream);
6901                 if (!status)
6902                         continue;
6903
6904                 aconnector = to_amdgpu_dm_connector(connector);
6905
6906                 mutex_lock(&adev->dm.audio_lock);
6907                 inst = status->audio_inst;
6908                 aconnector->audio_inst = inst;
6909                 mutex_unlock(&adev->dm.audio_lock);
6910
6911                 amdgpu_dm_audio_eld_notify(adev, inst);
6912         }
6913 }
6914
6915 /*
6916  * Enable interrupts on CRTCs that are newly active, undergone
6917  * a modeset, or have active planes again.
6918  *
6919  * Done in two passes, based on the for_modeset flag:
6920  * Pass 1: For CRTCs going through modeset
6921  * Pass 2: For CRTCs going from 0 to n active planes
6922  *
6923  * Interrupts can only be enabled after the planes are programmed,
6924  * so this requires a two-pass approach since we don't want to
6925  * just defer the interrupts until after commit planes every time.
6926  */
6927 static void amdgpu_dm_enable_crtc_interrupts(struct drm_device *dev,
6928                                              struct drm_atomic_state *state,
6929                                              bool for_modeset)
6930 {
6931         struct amdgpu_device *adev = dev->dev_private;
6932         struct drm_crtc *crtc;
6933         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
6934         int i;
6935 #ifdef CONFIG_DEBUG_FS
6936         enum amdgpu_dm_pipe_crc_source source;
6937 #endif
6938
6939         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
6940                                       new_crtc_state, i) {
6941                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6942                 struct dm_crtc_state *dm_new_crtc_state =
6943                         to_dm_crtc_state(new_crtc_state);
6944                 struct dm_crtc_state *dm_old_crtc_state =
6945                         to_dm_crtc_state(old_crtc_state);
6946                 bool modeset = drm_atomic_crtc_needs_modeset(new_crtc_state);
6947                 bool run_pass;
6948
6949                 run_pass = (for_modeset && modeset) ||
6950                            (!for_modeset && !modeset &&
6951                             !dm_old_crtc_state->interrupts_enabled);
6952
6953                 if (!run_pass)
6954                         continue;
6955
6956                 if (!dm_new_crtc_state->interrupts_enabled)
6957                         continue;
6958
6959                 manage_dm_interrupts(adev, acrtc, true);
6960
6961 #ifdef CONFIG_DEBUG_FS
6962                 /* The stream has changed so CRC capture needs to re-enabled. */
6963                 source = dm_new_crtc_state->crc_src;
6964                 if (amdgpu_dm_is_valid_crc_source(source)) {
6965                         amdgpu_dm_crtc_configure_crc_source(
6966                                 crtc, dm_new_crtc_state,
6967                                 dm_new_crtc_state->crc_src);
6968                 }
6969 #endif
6970         }
6971 }
6972
6973 /*
6974  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
6975  * @crtc_state: the DRM CRTC state
6976  * @stream_state: the DC stream state.
6977  *
6978  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
6979  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
6980  */
6981 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
6982                                                 struct dc_stream_state *stream_state)
6983 {
6984         stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
6985 }
6986
6987 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
6988                                    struct drm_atomic_state *state,
6989                                    bool nonblock)
6990 {
6991         struct drm_crtc *crtc;
6992         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
6993         struct amdgpu_device *adev = dev->dev_private;
6994         int i;
6995
6996         /*
6997          * We evade vblank and pflip interrupts on CRTCs that are undergoing
6998          * a modeset, being disabled, or have no active planes.
6999          *
7000          * It's done in atomic commit rather than commit tail for now since
7001          * some of these interrupt handlers access the current CRTC state and
7002          * potentially the stream pointer itself.
7003          *
7004          * Since the atomic state is swapped within atomic commit and not within
7005          * commit tail this would leave to new state (that hasn't been committed yet)
7006          * being accesssed from within the handlers.
7007          *
7008          * TODO: Fix this so we can do this in commit tail and not have to block
7009          * in atomic check.
7010          */
7011         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7012                 struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7013                 struct dm_crtc_state *dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7014                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7015
7016                 if (dm_old_crtc_state->interrupts_enabled &&
7017                     (!dm_new_crtc_state->interrupts_enabled ||
7018                      drm_atomic_crtc_needs_modeset(new_crtc_state)))
7019                         manage_dm_interrupts(adev, acrtc, false);
7020         }
7021         /*
7022          * Add check here for SoC's that support hardware cursor plane, to
7023          * unset legacy_cursor_update
7024          */
7025
7026         return drm_atomic_helper_commit(dev, state, nonblock);
7027
7028         /*TODO Handle EINTR, reenable IRQ*/
7029 }
7030
7031 /**
7032  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
7033  * @state: The atomic state to commit
7034  *
7035  * This will tell DC to commit the constructed DC state from atomic_check,
7036  * programming the hardware. Any failures here implies a hardware failure, since
7037  * atomic check should have filtered anything non-kosher.
7038  */
7039 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
7040 {
7041         struct drm_device *dev = state->dev;
7042         struct amdgpu_device *adev = dev->dev_private;
7043         struct amdgpu_display_manager *dm = &adev->dm;
7044         struct dm_atomic_state *dm_state;
7045         struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
7046         uint32_t i, j;
7047         struct drm_crtc *crtc;
7048         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7049         unsigned long flags;
7050         bool wait_for_vblank = true;
7051         struct drm_connector *connector;
7052         struct drm_connector_state *old_con_state, *new_con_state;
7053         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7054         int crtc_disable_count = 0;
7055
7056         drm_atomic_helper_update_legacy_modeset_state(dev, state);
7057
7058         dm_state = dm_atomic_get_new_state(state);
7059         if (dm_state && dm_state->context) {
7060                 dc_state = dm_state->context;
7061         } else {
7062                 /* No state changes, retain current state. */
7063                 dc_state_temp = dc_create_state(dm->dc);
7064                 ASSERT(dc_state_temp);
7065                 dc_state = dc_state_temp;
7066                 dc_resource_state_copy_construct_current(dm->dc, dc_state);
7067         }
7068
7069         /* update changed items */
7070         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7071                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7072
7073                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7074                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7075
7076                 DRM_DEBUG_DRIVER(
7077                         "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7078                         "planes_changed:%d, mode_changed:%d,active_changed:%d,"
7079                         "connectors_changed:%d\n",
7080                         acrtc->crtc_id,
7081                         new_crtc_state->enable,
7082                         new_crtc_state->active,
7083                         new_crtc_state->planes_changed,
7084                         new_crtc_state->mode_changed,
7085                         new_crtc_state->active_changed,
7086                         new_crtc_state->connectors_changed);
7087
7088                 /* Copy all transient state flags into dc state */
7089                 if (dm_new_crtc_state->stream) {
7090                         amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
7091                                                             dm_new_crtc_state->stream);
7092                 }
7093
7094                 /* handles headless hotplug case, updating new_state and
7095                  * aconnector as needed
7096                  */
7097
7098                 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
7099
7100                         DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
7101
7102                         if (!dm_new_crtc_state->stream) {
7103                                 /*
7104                                  * this could happen because of issues with
7105                                  * userspace notifications delivery.
7106                                  * In this case userspace tries to set mode on
7107                                  * display which is disconnected in fact.
7108                                  * dc_sink is NULL in this case on aconnector.
7109                                  * We expect reset mode will come soon.
7110                                  *
7111                                  * This can also happen when unplug is done
7112                                  * during resume sequence ended
7113                                  *
7114                                  * In this case, we want to pretend we still
7115                                  * have a sink to keep the pipe running so that
7116                                  * hw state is consistent with the sw state
7117                                  */
7118                                 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7119                                                 __func__, acrtc->base.base.id);
7120                                 continue;
7121                         }
7122
7123                         if (dm_old_crtc_state->stream)
7124                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7125
7126                         pm_runtime_get_noresume(dev->dev);
7127
7128                         acrtc->enabled = true;
7129                         acrtc->hw_mode = new_crtc_state->mode;
7130                         crtc->hwmode = new_crtc_state->mode;
7131                 } else if (modereset_required(new_crtc_state)) {
7132                         DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
7133                         /* i.e. reset mode */
7134                         if (dm_old_crtc_state->stream) {
7135                                 if (dm_old_crtc_state->stream->link->psr_allow_active)
7136                                         amdgpu_dm_psr_disable(dm_old_crtc_state->stream);
7137
7138                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7139                         }
7140                 }
7141         } /* for_each_crtc_in_state() */
7142
7143         if (dc_state) {
7144                 dm_enable_per_frame_crtc_master_sync(dc_state);
7145                 mutex_lock(&dm->dc_lock);
7146                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
7147                 mutex_unlock(&dm->dc_lock);
7148         }
7149
7150         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7151                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7152
7153                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7154
7155                 if (dm_new_crtc_state->stream != NULL) {
7156                         const struct dc_stream_status *status =
7157                                         dc_stream_get_status(dm_new_crtc_state->stream);
7158
7159                         if (!status)
7160                                 status = dc_stream_get_status_from_state(dc_state,
7161                                                                          dm_new_crtc_state->stream);
7162
7163                         if (!status)
7164                                 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
7165                         else
7166                                 acrtc->otg_inst = status->primary_otg_inst;
7167                 }
7168         }
7169 #ifdef CONFIG_DRM_AMD_DC_HDCP
7170         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7171                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7172                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7173                 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7174
7175                 new_crtc_state = NULL;
7176
7177                 if (acrtc)
7178                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7179
7180                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7181
7182                 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
7183                     connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
7184                         hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
7185                         new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7186                         continue;
7187                 }
7188
7189                 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
7190                         hdcp_update_display(
7191                                 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
7192                                 new_con_state->hdcp_content_type,
7193                                 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
7194                                                                                                          : false);
7195         }
7196 #endif
7197
7198         /* Handle connector state changes */
7199         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7200                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7201                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
7202                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7203                 struct dc_surface_update dummy_updates[MAX_SURFACES];
7204                 struct dc_stream_update stream_update;
7205                 struct dc_info_packet hdr_packet;
7206                 struct dc_stream_status *status = NULL;
7207                 bool abm_changed, hdr_changed, scaling_changed;
7208
7209                 memset(&dummy_updates, 0, sizeof(dummy_updates));
7210                 memset(&stream_update, 0, sizeof(stream_update));
7211
7212                 if (acrtc) {
7213                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7214                         old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
7215                 }
7216
7217                 /* Skip any modesets/resets */
7218                 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
7219                         continue;
7220
7221                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7222                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7223
7224                 scaling_changed = is_scaling_state_different(dm_new_con_state,
7225                                                              dm_old_con_state);
7226
7227                 abm_changed = dm_new_crtc_state->abm_level !=
7228                               dm_old_crtc_state->abm_level;
7229
7230                 hdr_changed =
7231                         is_hdr_metadata_different(old_con_state, new_con_state);
7232
7233                 if (!scaling_changed && !abm_changed && !hdr_changed)
7234                         continue;
7235
7236                 stream_update.stream = dm_new_crtc_state->stream;
7237                 if (scaling_changed) {
7238                         update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
7239                                         dm_new_con_state, dm_new_crtc_state->stream);
7240
7241                         stream_update.src = dm_new_crtc_state->stream->src;
7242                         stream_update.dst = dm_new_crtc_state->stream->dst;
7243                 }
7244
7245                 if (abm_changed) {
7246                         dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
7247
7248                         stream_update.abm_level = &dm_new_crtc_state->abm_level;
7249                 }
7250
7251                 if (hdr_changed) {
7252                         fill_hdr_info_packet(new_con_state, &hdr_packet);
7253                         stream_update.hdr_static_metadata = &hdr_packet;
7254                 }
7255
7256                 status = dc_stream_get_status(dm_new_crtc_state->stream);
7257                 WARN_ON(!status);
7258                 WARN_ON(!status->plane_count);
7259
7260                 /*
7261                  * TODO: DC refuses to perform stream updates without a dc_surface_update.
7262                  * Here we create an empty update on each plane.
7263                  * To fix this, DC should permit updating only stream properties.
7264                  */
7265                 for (j = 0; j < status->plane_count; j++)
7266                         dummy_updates[j].surface = status->plane_states[0];
7267
7268
7269                 mutex_lock(&dm->dc_lock);
7270                 dc_commit_updates_for_stream(dm->dc,
7271                                                      dummy_updates,
7272                                                      status->plane_count,
7273                                                      dm_new_crtc_state->stream,
7274                                                      &stream_update,
7275                                                      dc_state);
7276                 mutex_unlock(&dm->dc_lock);
7277         }
7278
7279         /* Count number of newly disabled CRTCs for dropping PM refs later. */
7280         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
7281                                       new_crtc_state, i) {
7282                 if (old_crtc_state->active && !new_crtc_state->active)
7283                         crtc_disable_count++;
7284
7285                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7286                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7287
7288                 /* Update freesync active state. */
7289                 pre_update_freesync_state_on_stream(dm, dm_new_crtc_state);
7290
7291                 /* Handle vrr on->off / off->on transitions */
7292                 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
7293                                                 dm_new_crtc_state);
7294         }
7295
7296         /* Enable interrupts for CRTCs going through a modeset. */
7297         amdgpu_dm_enable_crtc_interrupts(dev, state, true);
7298
7299         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
7300                 if (new_crtc_state->async_flip)
7301                         wait_for_vblank = false;
7302
7303         /* update planes when needed per crtc*/
7304         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
7305                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7306
7307                 if (dm_new_crtc_state->stream)
7308                         amdgpu_dm_commit_planes(state, dc_state, dev,
7309                                                 dm, crtc, wait_for_vblank);
7310         }
7311
7312         /* Enable interrupts for CRTCs going from 0 to n active planes. */
7313         amdgpu_dm_enable_crtc_interrupts(dev, state, false);
7314
7315         /* Update audio instances for each connector. */
7316         amdgpu_dm_commit_audio(dev, state);
7317
7318         /*
7319          * send vblank event on all events not handled in flip and
7320          * mark consumed event for drm_atomic_helper_commit_hw_done
7321          */
7322         spin_lock_irqsave(&adev->ddev->event_lock, flags);
7323         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7324
7325                 if (new_crtc_state->event)
7326                         drm_send_event_locked(dev, &new_crtc_state->event->base);
7327
7328                 new_crtc_state->event = NULL;
7329         }
7330         spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
7331
7332         /* Signal HW programming completion */
7333         drm_atomic_helper_commit_hw_done(state);
7334
7335         if (wait_for_vblank)
7336                 drm_atomic_helper_wait_for_flip_done(dev, state);
7337
7338         drm_atomic_helper_cleanup_planes(dev, state);
7339
7340         /*
7341          * Finally, drop a runtime PM reference for each newly disabled CRTC,
7342          * so we can put the GPU into runtime suspend if we're not driving any
7343          * displays anymore
7344          */
7345         for (i = 0; i < crtc_disable_count; i++)
7346                 pm_runtime_put_autosuspend(dev->dev);
7347         pm_runtime_mark_last_busy(dev->dev);
7348
7349         if (dc_state_temp)
7350                 dc_release_state(dc_state_temp);
7351 }
7352
7353
7354 static int dm_force_atomic_commit(struct drm_connector *connector)
7355 {
7356         int ret = 0;
7357         struct drm_device *ddev = connector->dev;
7358         struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
7359         struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7360         struct drm_plane *plane = disconnected_acrtc->base.primary;
7361         struct drm_connector_state *conn_state;
7362         struct drm_crtc_state *crtc_state;
7363         struct drm_plane_state *plane_state;
7364
7365         if (!state)
7366                 return -ENOMEM;
7367
7368         state->acquire_ctx = ddev->mode_config.acquire_ctx;
7369
7370         /* Construct an atomic state to restore previous display setting */
7371
7372         /*
7373          * Attach connectors to drm_atomic_state
7374          */
7375         conn_state = drm_atomic_get_connector_state(state, connector);
7376
7377         ret = PTR_ERR_OR_ZERO(conn_state);
7378         if (ret)
7379                 goto err;
7380
7381         /* Attach crtc to drm_atomic_state*/
7382         crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
7383
7384         ret = PTR_ERR_OR_ZERO(crtc_state);
7385         if (ret)
7386                 goto err;
7387
7388         /* force a restore */
7389         crtc_state->mode_changed = true;
7390
7391         /* Attach plane to drm_atomic_state */
7392         plane_state = drm_atomic_get_plane_state(state, plane);
7393
7394         ret = PTR_ERR_OR_ZERO(plane_state);
7395         if (ret)
7396                 goto err;
7397
7398
7399         /* Call commit internally with the state we just constructed */
7400         ret = drm_atomic_commit(state);
7401         if (!ret)
7402                 return 0;
7403
7404 err:
7405         DRM_ERROR("Restoring old state failed with %i\n", ret);
7406         drm_atomic_state_put(state);
7407
7408         return ret;
7409 }
7410
7411 /*
7412  * This function handles all cases when set mode does not come upon hotplug.
7413  * This includes when a display is unplugged then plugged back into the
7414  * same port and when running without usermode desktop manager supprot
7415  */
7416 void dm_restore_drm_connector_state(struct drm_device *dev,
7417                                     struct drm_connector *connector)
7418 {
7419         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7420         struct amdgpu_crtc *disconnected_acrtc;
7421         struct dm_crtc_state *acrtc_state;
7422
7423         if (!aconnector->dc_sink || !connector->state || !connector->encoder)
7424                 return;
7425
7426         disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7427         if (!disconnected_acrtc)
7428                 return;
7429
7430         acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
7431         if (!acrtc_state->stream)
7432                 return;
7433
7434         /*
7435          * If the previous sink is not released and different from the current,
7436          * we deduce we are in a state where we can not rely on usermode call
7437          * to turn on the display, so we do it here
7438          */
7439         if (acrtc_state->stream->sink != aconnector->dc_sink)
7440                 dm_force_atomic_commit(&aconnector->base);
7441 }
7442
7443 /*
7444  * Grabs all modesetting locks to serialize against any blocking commits,
7445  * Waits for completion of all non blocking commits.
7446  */
7447 static int do_aquire_global_lock(struct drm_device *dev,
7448                                  struct drm_atomic_state *state)
7449 {
7450         struct drm_crtc *crtc;
7451         struct drm_crtc_commit *commit;
7452         long ret;
7453
7454         /*
7455          * Adding all modeset locks to aquire_ctx will
7456          * ensure that when the framework release it the
7457          * extra locks we are locking here will get released to
7458          */
7459         ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
7460         if (ret)
7461                 return ret;
7462
7463         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7464                 spin_lock(&crtc->commit_lock);
7465                 commit = list_first_entry_or_null(&crtc->commit_list,
7466                                 struct drm_crtc_commit, commit_entry);
7467                 if (commit)
7468                         drm_crtc_commit_get(commit);
7469                 spin_unlock(&crtc->commit_lock);
7470
7471                 if (!commit)
7472                         continue;
7473
7474                 /*
7475                  * Make sure all pending HW programming completed and
7476                  * page flips done
7477                  */
7478                 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
7479
7480                 if (ret > 0)
7481                         ret = wait_for_completion_interruptible_timeout(
7482                                         &commit->flip_done, 10*HZ);
7483
7484                 if (ret == 0)
7485                         DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
7486                                   "timed out\n", crtc->base.id, crtc->name);
7487
7488                 drm_crtc_commit_put(commit);
7489         }
7490
7491         return ret < 0 ? ret : 0;
7492 }
7493
7494 static void get_freesync_config_for_crtc(
7495         struct dm_crtc_state *new_crtc_state,
7496         struct dm_connector_state *new_con_state)
7497 {
7498         struct mod_freesync_config config = {0};
7499         struct amdgpu_dm_connector *aconnector =
7500                         to_amdgpu_dm_connector(new_con_state->base.connector);
7501         struct drm_display_mode *mode = &new_crtc_state->base.mode;
7502         int vrefresh = drm_mode_vrefresh(mode);
7503
7504         new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
7505                                         vrefresh >= aconnector->min_vfreq &&
7506                                         vrefresh <= aconnector->max_vfreq;
7507
7508         if (new_crtc_state->vrr_supported) {
7509                 new_crtc_state->stream->ignore_msa_timing_param = true;
7510                 config.state = new_crtc_state->base.vrr_enabled ?
7511                                 VRR_STATE_ACTIVE_VARIABLE :
7512                                 VRR_STATE_INACTIVE;
7513                 config.min_refresh_in_uhz =
7514                                 aconnector->min_vfreq * 1000000;
7515                 config.max_refresh_in_uhz =
7516                                 aconnector->max_vfreq * 1000000;
7517                 config.vsif_supported = true;
7518                 config.btr = true;
7519         }
7520
7521         new_crtc_state->freesync_config = config;
7522 }
7523
7524 static void reset_freesync_config_for_crtc(
7525         struct dm_crtc_state *new_crtc_state)
7526 {
7527         new_crtc_state->vrr_supported = false;
7528
7529         memset(&new_crtc_state->vrr_params, 0,
7530                sizeof(new_crtc_state->vrr_params));
7531         memset(&new_crtc_state->vrr_infopacket, 0,
7532                sizeof(new_crtc_state->vrr_infopacket));
7533 }
7534
7535 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
7536                                 struct drm_atomic_state *state,
7537                                 struct drm_crtc *crtc,
7538                                 struct drm_crtc_state *old_crtc_state,
7539                                 struct drm_crtc_state *new_crtc_state,
7540                                 bool enable,
7541                                 bool *lock_and_validation_needed)
7542 {
7543         struct dm_atomic_state *dm_state = NULL;
7544         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7545         struct dc_stream_state *new_stream;
7546         int ret = 0;
7547
7548         /*
7549          * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
7550          * update changed items
7551          */
7552         struct amdgpu_crtc *acrtc = NULL;
7553         struct amdgpu_dm_connector *aconnector = NULL;
7554         struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
7555         struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
7556
7557         new_stream = NULL;
7558
7559         dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7560         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7561         acrtc = to_amdgpu_crtc(crtc);
7562         aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
7563
7564         /* TODO This hack should go away */
7565         if (aconnector && enable) {
7566                 /* Make sure fake sink is created in plug-in scenario */
7567                 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
7568                                                             &aconnector->base);
7569                 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
7570                                                             &aconnector->base);
7571
7572                 if (IS_ERR(drm_new_conn_state)) {
7573                         ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
7574                         goto fail;
7575                 }
7576
7577                 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
7578                 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
7579
7580                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7581                         goto skip_modeset;
7582
7583                 new_stream = create_stream_for_sink(aconnector,
7584                                                      &new_crtc_state->mode,
7585                                                     dm_new_conn_state,
7586                                                     dm_old_crtc_state->stream);
7587
7588                 /*
7589                  * we can have no stream on ACTION_SET if a display
7590                  * was disconnected during S3, in this case it is not an
7591                  * error, the OS will be updated after detection, and
7592                  * will do the right thing on next atomic commit
7593                  */
7594
7595                 if (!new_stream) {
7596                         DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7597                                         __func__, acrtc->base.base.id);
7598                         ret = -ENOMEM;
7599                         goto fail;
7600                 }
7601
7602                 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
7603
7604                 ret = fill_hdr_info_packet(drm_new_conn_state,
7605                                            &new_stream->hdr_static_metadata);
7606                 if (ret)
7607                         goto fail;
7608
7609                 /*
7610                  * If we already removed the old stream from the context
7611                  * (and set the new stream to NULL) then we can't reuse
7612                  * the old stream even if the stream and scaling are unchanged.
7613                  * We'll hit the BUG_ON and black screen.
7614                  *
7615                  * TODO: Refactor this function to allow this check to work
7616                  * in all conditions.
7617                  */
7618                 if (dm_new_crtc_state->stream &&
7619                     dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
7620                     dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
7621                         new_crtc_state->mode_changed = false;
7622                         DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
7623                                          new_crtc_state->mode_changed);
7624                 }
7625         }
7626
7627         /* mode_changed flag may get updated above, need to check again */
7628         if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7629                 goto skip_modeset;
7630
7631         DRM_DEBUG_DRIVER(
7632                 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7633                 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
7634                 "connectors_changed:%d\n",
7635                 acrtc->crtc_id,
7636                 new_crtc_state->enable,
7637                 new_crtc_state->active,
7638                 new_crtc_state->planes_changed,
7639                 new_crtc_state->mode_changed,
7640                 new_crtc_state->active_changed,
7641                 new_crtc_state->connectors_changed);
7642
7643         /* Remove stream for any changed/disabled CRTC */
7644         if (!enable) {
7645
7646                 if (!dm_old_crtc_state->stream)
7647                         goto skip_modeset;
7648
7649                 ret = dm_atomic_get_state(state, &dm_state);
7650                 if (ret)
7651                         goto fail;
7652
7653                 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
7654                                 crtc->base.id);
7655
7656                 /* i.e. reset mode */
7657                 if (dc_remove_stream_from_ctx(
7658                                 dm->dc,
7659                                 dm_state->context,
7660                                 dm_old_crtc_state->stream) != DC_OK) {
7661                         ret = -EINVAL;
7662                         goto fail;
7663                 }
7664
7665                 dc_stream_release(dm_old_crtc_state->stream);
7666                 dm_new_crtc_state->stream = NULL;
7667
7668                 reset_freesync_config_for_crtc(dm_new_crtc_state);
7669
7670                 *lock_and_validation_needed = true;
7671
7672         } else {/* Add stream for any updated/enabled CRTC */
7673                 /*
7674                  * Quick fix to prevent NULL pointer on new_stream when
7675                  * added MST connectors not found in existing crtc_state in the chained mode
7676                  * TODO: need to dig out the root cause of that
7677                  */
7678                 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
7679                         goto skip_modeset;
7680
7681                 if (modereset_required(new_crtc_state))
7682                         goto skip_modeset;
7683
7684                 if (modeset_required(new_crtc_state, new_stream,
7685                                      dm_old_crtc_state->stream)) {
7686
7687                         WARN_ON(dm_new_crtc_state->stream);
7688
7689                         ret = dm_atomic_get_state(state, &dm_state);
7690                         if (ret)
7691                                 goto fail;
7692
7693                         dm_new_crtc_state->stream = new_stream;
7694
7695                         dc_stream_retain(new_stream);
7696
7697                         DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
7698                                                 crtc->base.id);
7699
7700                         if (dc_add_stream_to_ctx(
7701                                         dm->dc,
7702                                         dm_state->context,
7703                                         dm_new_crtc_state->stream) != DC_OK) {
7704                                 ret = -EINVAL;
7705                                 goto fail;
7706                         }
7707
7708                         *lock_and_validation_needed = true;
7709                 }
7710         }
7711
7712 skip_modeset:
7713         /* Release extra reference */
7714         if (new_stream)
7715                  dc_stream_release(new_stream);
7716
7717         /*
7718          * We want to do dc stream updates that do not require a
7719          * full modeset below.
7720          */
7721         if (!(enable && aconnector && new_crtc_state->enable &&
7722               new_crtc_state->active))
7723                 return 0;
7724         /*
7725          * Given above conditions, the dc state cannot be NULL because:
7726          * 1. We're in the process of enabling CRTCs (just been added
7727          *    to the dc context, or already is on the context)
7728          * 2. Has a valid connector attached, and
7729          * 3. Is currently active and enabled.
7730          * => The dc stream state currently exists.
7731          */
7732         BUG_ON(dm_new_crtc_state->stream == NULL);
7733
7734         /* Scaling or underscan settings */
7735         if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
7736                 update_stream_scaling_settings(
7737                         &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
7738
7739         /* ABM settings */
7740         dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
7741
7742         /*
7743          * Color management settings. We also update color properties
7744          * when a modeset is needed, to ensure it gets reprogrammed.
7745          */
7746         if (dm_new_crtc_state->base.color_mgmt_changed ||
7747             drm_atomic_crtc_needs_modeset(new_crtc_state)) {
7748                 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
7749                 if (ret)
7750                         goto fail;
7751         }
7752
7753         /* Update Freesync settings. */
7754         get_freesync_config_for_crtc(dm_new_crtc_state,
7755                                      dm_new_conn_state);
7756
7757         return ret;
7758
7759 fail:
7760         if (new_stream)
7761                 dc_stream_release(new_stream);
7762         return ret;
7763 }
7764
7765 static bool should_reset_plane(struct drm_atomic_state *state,
7766                                struct drm_plane *plane,
7767                                struct drm_plane_state *old_plane_state,
7768                                struct drm_plane_state *new_plane_state)
7769 {
7770         struct drm_plane *other;
7771         struct drm_plane_state *old_other_state, *new_other_state;
7772         struct drm_crtc_state *new_crtc_state;
7773         int i;
7774
7775         /*
7776          * TODO: Remove this hack once the checks below are sufficient
7777          * enough to determine when we need to reset all the planes on
7778          * the stream.
7779          */
7780         if (state->allow_modeset)
7781                 return true;
7782
7783         /* Exit early if we know that we're adding or removing the plane. */
7784         if (old_plane_state->crtc != new_plane_state->crtc)
7785                 return true;
7786
7787         /* old crtc == new_crtc == NULL, plane not in context. */
7788         if (!new_plane_state->crtc)
7789                 return false;
7790
7791         new_crtc_state =
7792                 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
7793
7794         if (!new_crtc_state)
7795                 return true;
7796
7797         /* CRTC Degamma changes currently require us to recreate planes. */
7798         if (new_crtc_state->color_mgmt_changed)
7799                 return true;
7800
7801         if (drm_atomic_crtc_needs_modeset(new_crtc_state))
7802                 return true;
7803
7804         /*
7805          * If there are any new primary or overlay planes being added or
7806          * removed then the z-order can potentially change. To ensure
7807          * correct z-order and pipe acquisition the current DC architecture
7808          * requires us to remove and recreate all existing planes.
7809          *
7810          * TODO: Come up with a more elegant solution for this.
7811          */
7812         for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
7813                 if (other->type == DRM_PLANE_TYPE_CURSOR)
7814                         continue;
7815
7816                 if (old_other_state->crtc != new_plane_state->crtc &&
7817                     new_other_state->crtc != new_plane_state->crtc)
7818                         continue;
7819
7820                 if (old_other_state->crtc != new_other_state->crtc)
7821                         return true;
7822
7823                 /* TODO: Remove this once we can handle fast format changes. */
7824                 if (old_other_state->fb && new_other_state->fb &&
7825                     old_other_state->fb->format != new_other_state->fb->format)
7826                         return true;
7827         }
7828
7829         return false;
7830 }
7831
7832 static int dm_update_plane_state(struct dc *dc,
7833                                  struct drm_atomic_state *state,
7834                                  struct drm_plane *plane,
7835                                  struct drm_plane_state *old_plane_state,
7836                                  struct drm_plane_state *new_plane_state,
7837                                  bool enable,
7838                                  bool *lock_and_validation_needed)
7839 {
7840
7841         struct dm_atomic_state *dm_state = NULL;
7842         struct drm_crtc *new_plane_crtc, *old_plane_crtc;
7843         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7844         struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
7845         struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
7846         bool needs_reset;
7847         int ret = 0;
7848
7849
7850         new_plane_crtc = new_plane_state->crtc;
7851         old_plane_crtc = old_plane_state->crtc;
7852         dm_new_plane_state = to_dm_plane_state(new_plane_state);
7853         dm_old_plane_state = to_dm_plane_state(old_plane_state);
7854
7855         /*TODO Implement atomic check for cursor plane */
7856         if (plane->type == DRM_PLANE_TYPE_CURSOR)
7857                 return 0;
7858
7859         needs_reset = should_reset_plane(state, plane, old_plane_state,
7860                                          new_plane_state);
7861
7862         /* Remove any changed/removed planes */
7863         if (!enable) {
7864                 if (!needs_reset)
7865                         return 0;
7866
7867                 if (!old_plane_crtc)
7868                         return 0;
7869
7870                 old_crtc_state = drm_atomic_get_old_crtc_state(
7871                                 state, old_plane_crtc);
7872                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7873
7874                 if (!dm_old_crtc_state->stream)
7875                         return 0;
7876
7877                 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
7878                                 plane->base.id, old_plane_crtc->base.id);
7879
7880                 ret = dm_atomic_get_state(state, &dm_state);
7881                 if (ret)
7882                         return ret;
7883
7884                 if (!dc_remove_plane_from_context(
7885                                 dc,
7886                                 dm_old_crtc_state->stream,
7887                                 dm_old_plane_state->dc_state,
7888                                 dm_state->context)) {
7889
7890                         ret = EINVAL;
7891                         return ret;
7892                 }
7893
7894
7895                 dc_plane_state_release(dm_old_plane_state->dc_state);
7896                 dm_new_plane_state->dc_state = NULL;
7897
7898                 *lock_and_validation_needed = true;
7899
7900         } else { /* Add new planes */
7901                 struct dc_plane_state *dc_new_plane_state;
7902
7903                 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
7904                         return 0;
7905
7906                 if (!new_plane_crtc)
7907                         return 0;
7908
7909                 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
7910                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7911
7912                 if (!dm_new_crtc_state->stream)
7913                         return 0;
7914
7915                 if (!needs_reset)
7916                         return 0;
7917
7918                 WARN_ON(dm_new_plane_state->dc_state);
7919
7920                 dc_new_plane_state = dc_create_plane_state(dc);
7921                 if (!dc_new_plane_state)
7922                         return -ENOMEM;
7923
7924                 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
7925                                 plane->base.id, new_plane_crtc->base.id);
7926
7927                 ret = fill_dc_plane_attributes(
7928                         new_plane_crtc->dev->dev_private,
7929                         dc_new_plane_state,
7930                         new_plane_state,
7931                         new_crtc_state);
7932                 if (ret) {
7933                         dc_plane_state_release(dc_new_plane_state);
7934                         return ret;
7935                 }
7936
7937                 ret = dm_atomic_get_state(state, &dm_state);
7938                 if (ret) {
7939                         dc_plane_state_release(dc_new_plane_state);
7940                         return ret;
7941                 }
7942
7943                 /*
7944                  * Any atomic check errors that occur after this will
7945                  * not need a release. The plane state will be attached
7946                  * to the stream, and therefore part of the atomic
7947                  * state. It'll be released when the atomic state is
7948                  * cleaned.
7949                  */
7950                 if (!dc_add_plane_to_context(
7951                                 dc,
7952                                 dm_new_crtc_state->stream,
7953                                 dc_new_plane_state,
7954                                 dm_state->context)) {
7955
7956                         dc_plane_state_release(dc_new_plane_state);
7957                         return -EINVAL;
7958                 }
7959
7960                 dm_new_plane_state->dc_state = dc_new_plane_state;
7961
7962                 /* Tell DC to do a full surface update every time there
7963                  * is a plane change. Inefficient, but works for now.
7964                  */
7965                 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
7966
7967                 *lock_and_validation_needed = true;
7968         }
7969
7970
7971         return ret;
7972 }
7973
7974 static int
7975 dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
7976                                     struct drm_atomic_state *state,
7977                                     enum surface_update_type *out_type)
7978 {
7979         struct dc *dc = dm->dc;
7980         struct dm_atomic_state *dm_state = NULL, *old_dm_state = NULL;
7981         int i, j, num_plane, ret = 0;
7982         struct drm_plane_state *old_plane_state, *new_plane_state;
7983         struct dm_plane_state *new_dm_plane_state, *old_dm_plane_state;
7984         struct drm_crtc *new_plane_crtc;
7985         struct drm_plane *plane;
7986
7987         struct drm_crtc *crtc;
7988         struct drm_crtc_state *new_crtc_state, *old_crtc_state;
7989         struct dm_crtc_state *new_dm_crtc_state, *old_dm_crtc_state;
7990         struct dc_stream_status *status = NULL;
7991         enum surface_update_type update_type = UPDATE_TYPE_FAST;
7992         struct surface_info_bundle {
7993                 struct dc_surface_update surface_updates[MAX_SURFACES];
7994                 struct dc_plane_info plane_infos[MAX_SURFACES];
7995                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
7996                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
7997                 struct dc_stream_update stream_update;
7998         } *bundle;
7999
8000         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8001
8002         if (!bundle) {
8003                 DRM_ERROR("Failed to allocate update bundle\n");
8004                 /* Set type to FULL to avoid crashing in DC*/
8005                 update_type = UPDATE_TYPE_FULL;
8006                 goto cleanup;
8007         }
8008
8009         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8010
8011                 memset(bundle, 0, sizeof(struct surface_info_bundle));
8012
8013                 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8014                 old_dm_crtc_state = to_dm_crtc_state(old_crtc_state);
8015                 num_plane = 0;
8016
8017                 if (new_dm_crtc_state->stream != old_dm_crtc_state->stream) {
8018                         update_type = UPDATE_TYPE_FULL;
8019                         goto cleanup;
8020                 }
8021
8022                 if (!new_dm_crtc_state->stream)
8023                         continue;
8024
8025                 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, j) {
8026                         const struct amdgpu_framebuffer *amdgpu_fb =
8027                                 to_amdgpu_framebuffer(new_plane_state->fb);
8028                         struct dc_plane_info *plane_info = &bundle->plane_infos[num_plane];
8029                         struct dc_flip_addrs *flip_addr = &bundle->flip_addrs[num_plane];
8030                         struct dc_scaling_info *scaling_info = &bundle->scaling_infos[num_plane];
8031                         uint64_t tiling_flags;
8032
8033                         new_plane_crtc = new_plane_state->crtc;
8034                         new_dm_plane_state = to_dm_plane_state(new_plane_state);
8035                         old_dm_plane_state = to_dm_plane_state(old_plane_state);
8036
8037                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
8038                                 continue;
8039
8040                         if (new_dm_plane_state->dc_state != old_dm_plane_state->dc_state) {
8041                                 update_type = UPDATE_TYPE_FULL;
8042                                 goto cleanup;
8043                         }
8044
8045                         if (crtc != new_plane_crtc)
8046                                 continue;
8047
8048                         bundle->surface_updates[num_plane].surface =
8049                                         new_dm_plane_state->dc_state;
8050
8051                         if (new_crtc_state->mode_changed) {
8052                                 bundle->stream_update.dst = new_dm_crtc_state->stream->dst;
8053                                 bundle->stream_update.src = new_dm_crtc_state->stream->src;
8054                         }
8055
8056                         if (new_crtc_state->color_mgmt_changed) {
8057                                 bundle->surface_updates[num_plane].gamma =
8058                                                 new_dm_plane_state->dc_state->gamma_correction;
8059                                 bundle->surface_updates[num_plane].in_transfer_func =
8060                                                 new_dm_plane_state->dc_state->in_transfer_func;
8061                                 bundle->stream_update.gamut_remap =
8062                                                 &new_dm_crtc_state->stream->gamut_remap_matrix;
8063                                 bundle->stream_update.output_csc_transform =
8064                                                 &new_dm_crtc_state->stream->csc_color_matrix;
8065                                 bundle->stream_update.out_transfer_func =
8066                                                 new_dm_crtc_state->stream->out_transfer_func;
8067                         }
8068
8069                         ret = fill_dc_scaling_info(new_plane_state,
8070                                                    scaling_info);
8071                         if (ret)
8072                                 goto cleanup;
8073
8074                         bundle->surface_updates[num_plane].scaling_info = scaling_info;
8075
8076                         if (amdgpu_fb) {
8077                                 ret = get_fb_info(amdgpu_fb, &tiling_flags);
8078                                 if (ret)
8079                                         goto cleanup;
8080
8081                                 ret = fill_dc_plane_info_and_addr(
8082                                         dm->adev, new_plane_state, tiling_flags,
8083                                         plane_info,
8084                                         &flip_addr->address);
8085                                 if (ret)
8086                                         goto cleanup;
8087
8088                                 bundle->surface_updates[num_plane].plane_info = plane_info;
8089                                 bundle->surface_updates[num_plane].flip_addr = flip_addr;
8090                         }
8091
8092                         num_plane++;
8093                 }
8094
8095                 if (num_plane == 0)
8096                         continue;
8097
8098                 ret = dm_atomic_get_state(state, &dm_state);
8099                 if (ret)
8100                         goto cleanup;
8101
8102                 old_dm_state = dm_atomic_get_old_state(state);
8103                 if (!old_dm_state) {
8104                         ret = -EINVAL;
8105                         goto cleanup;
8106                 }
8107
8108                 status = dc_stream_get_status_from_state(old_dm_state->context,
8109                                                          new_dm_crtc_state->stream);
8110                 bundle->stream_update.stream = new_dm_crtc_state->stream;
8111                 /*
8112                  * TODO: DC modifies the surface during this call so we need
8113                  * to lock here - find a way to do this without locking.
8114                  */
8115                 mutex_lock(&dm->dc_lock);
8116                 update_type = dc_check_update_surfaces_for_stream(
8117                                 dc,     bundle->surface_updates, num_plane,
8118                                 &bundle->stream_update, status);
8119                 mutex_unlock(&dm->dc_lock);
8120
8121                 if (update_type > UPDATE_TYPE_MED) {
8122                         update_type = UPDATE_TYPE_FULL;
8123                         goto cleanup;
8124                 }
8125         }
8126
8127 cleanup:
8128         kfree(bundle);
8129
8130         *out_type = update_type;
8131         return ret;
8132 }
8133
8134 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
8135 {
8136         struct drm_connector *connector;
8137         struct drm_connector_state *conn_state;
8138         struct amdgpu_dm_connector *aconnector = NULL;
8139         int i;
8140         for_each_new_connector_in_state(state, connector, conn_state, i) {
8141                 if (conn_state->crtc != crtc)
8142                         continue;
8143
8144                 aconnector = to_amdgpu_dm_connector(connector);
8145                 if (!aconnector->port || !aconnector->mst_port)
8146                         aconnector = NULL;
8147                 else
8148                         break;
8149         }
8150
8151         if (!aconnector)
8152                 return 0;
8153
8154         return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
8155 }
8156
8157 /**
8158  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
8159  * @dev: The DRM device
8160  * @state: The atomic state to commit
8161  *
8162  * Validate that the given atomic state is programmable by DC into hardware.
8163  * This involves constructing a &struct dc_state reflecting the new hardware
8164  * state we wish to commit, then querying DC to see if it is programmable. It's
8165  * important not to modify the existing DC state. Otherwise, atomic_check
8166  * may unexpectedly commit hardware changes.
8167  *
8168  * When validating the DC state, it's important that the right locks are
8169  * acquired. For full updates case which removes/adds/updates streams on one
8170  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
8171  * that any such full update commit will wait for completion of any outstanding
8172  * flip using DRMs synchronization events. See
8173  * dm_determine_update_type_for_commit()
8174  *
8175  * Note that DM adds the affected connectors for all CRTCs in state, when that
8176  * might not seem necessary. This is because DC stream creation requires the
8177  * DC sink, which is tied to the DRM connector state. Cleaning this up should
8178  * be possible but non-trivial - a possible TODO item.
8179  *
8180  * Return: -Error code if validation failed.
8181  */
8182 static int amdgpu_dm_atomic_check(struct drm_device *dev,
8183                                   struct drm_atomic_state *state)
8184 {
8185         struct amdgpu_device *adev = dev->dev_private;
8186         struct dm_atomic_state *dm_state = NULL;
8187         struct dc *dc = adev->dm.dc;
8188         struct drm_connector *connector;
8189         struct drm_connector_state *old_con_state, *new_con_state;
8190         struct drm_crtc *crtc;
8191         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8192         struct drm_plane *plane;
8193         struct drm_plane_state *old_plane_state, *new_plane_state;
8194         enum surface_update_type update_type = UPDATE_TYPE_FAST;
8195         enum surface_update_type overall_update_type = UPDATE_TYPE_FAST;
8196
8197         int ret, i;
8198
8199         /*
8200          * This bool will be set for true for any modeset/reset
8201          * or plane update which implies non fast surface update.
8202          */
8203         bool lock_and_validation_needed = false;
8204
8205         ret = drm_atomic_helper_check_modeset(dev, state);
8206         if (ret)
8207                 goto fail;
8208
8209         if (adev->asic_type >= CHIP_NAVI10) {
8210                 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8211                         if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8212                                 ret = add_affected_mst_dsc_crtcs(state, crtc);
8213                                 if (ret)
8214                                         goto fail;
8215                         }
8216                 }
8217         }
8218
8219         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8220                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
8221                     !new_crtc_state->color_mgmt_changed &&
8222                     old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
8223                         continue;
8224
8225                 if (!new_crtc_state->enable)
8226                         continue;
8227
8228                 ret = drm_atomic_add_affected_connectors(state, crtc);
8229                 if (ret)
8230                         return ret;
8231
8232                 ret = drm_atomic_add_affected_planes(state, crtc);
8233                 if (ret)
8234                         goto fail;
8235         }
8236
8237         /*
8238          * Add all primary and overlay planes on the CRTC to the state
8239          * whenever a plane is enabled to maintain correct z-ordering
8240          * and to enable fast surface updates.
8241          */
8242         drm_for_each_crtc(crtc, dev) {
8243                 bool modified = false;
8244
8245                 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8246                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
8247                                 continue;
8248
8249                         if (new_plane_state->crtc == crtc ||
8250                             old_plane_state->crtc == crtc) {
8251                                 modified = true;
8252                                 break;
8253                         }
8254                 }
8255
8256                 if (!modified)
8257                         continue;
8258
8259                 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
8260                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
8261                                 continue;
8262
8263                         new_plane_state =
8264                                 drm_atomic_get_plane_state(state, plane);
8265
8266                         if (IS_ERR(new_plane_state)) {
8267                                 ret = PTR_ERR(new_plane_state);
8268                                 goto fail;
8269                         }
8270                 }
8271         }
8272
8273         /* Remove exiting planes if they are modified */
8274         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8275                 ret = dm_update_plane_state(dc, state, plane,
8276                                             old_plane_state,
8277                                             new_plane_state,
8278                                             false,
8279                                             &lock_and_validation_needed);
8280                 if (ret)
8281                         goto fail;
8282         }
8283
8284         /* Disable all crtcs which require disable */
8285         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8286                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
8287                                            old_crtc_state,
8288                                            new_crtc_state,
8289                                            false,
8290                                            &lock_and_validation_needed);
8291                 if (ret)
8292                         goto fail;
8293         }
8294
8295         /* Enable all crtcs which require enable */
8296         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8297                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
8298                                            old_crtc_state,
8299                                            new_crtc_state,
8300                                            true,
8301                                            &lock_and_validation_needed);
8302                 if (ret)
8303                         goto fail;
8304         }
8305
8306         /* Add new/modified planes */
8307         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8308                 ret = dm_update_plane_state(dc, state, plane,
8309                                             old_plane_state,
8310                                             new_plane_state,
8311                                             true,
8312                                             &lock_and_validation_needed);
8313                 if (ret)
8314                         goto fail;
8315         }
8316
8317         /* Run this here since we want to validate the streams we created */
8318         ret = drm_atomic_helper_check_planes(dev, state);
8319         if (ret)
8320                 goto fail;
8321
8322         if (state->legacy_cursor_update) {
8323                 /*
8324                  * This is a fast cursor update coming from the plane update
8325                  * helper, check if it can be done asynchronously for better
8326                  * performance.
8327                  */
8328                 state->async_update =
8329                         !drm_atomic_helper_async_check(dev, state);
8330
8331                 /*
8332                  * Skip the remaining global validation if this is an async
8333                  * update. Cursor updates can be done without affecting
8334                  * state or bandwidth calcs and this avoids the performance
8335                  * penalty of locking the private state object and
8336                  * allocating a new dc_state.
8337                  */
8338                 if (state->async_update)
8339                         return 0;
8340         }
8341
8342         /* Check scaling and underscan changes*/
8343         /* TODO Removed scaling changes validation due to inability to commit
8344          * new stream into context w\o causing full reset. Need to
8345          * decide how to handle.
8346          */
8347         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8348                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8349                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8350                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8351
8352                 /* Skip any modesets/resets */
8353                 if (!acrtc || drm_atomic_crtc_needs_modeset(
8354                                 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
8355                         continue;
8356
8357                 /* Skip any thing not scale or underscan changes */
8358                 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
8359                         continue;
8360
8361                 overall_update_type = UPDATE_TYPE_FULL;
8362                 lock_and_validation_needed = true;
8363         }
8364
8365         ret = dm_determine_update_type_for_commit(&adev->dm, state, &update_type);
8366         if (ret)
8367                 goto fail;
8368
8369         if (overall_update_type < update_type)
8370                 overall_update_type = update_type;
8371
8372         /*
8373          * lock_and_validation_needed was an old way to determine if we need to set
8374          * the global lock. Leaving it in to check if we broke any corner cases
8375          * lock_and_validation_needed true = UPDATE_TYPE_FULL or UPDATE_TYPE_MED
8376          * lock_and_validation_needed false = UPDATE_TYPE_FAST
8377          */
8378         if (lock_and_validation_needed && overall_update_type <= UPDATE_TYPE_FAST)
8379                 WARN(1, "Global lock should be Set, overall_update_type should be UPDATE_TYPE_MED or UPDATE_TYPE_FULL");
8380
8381         if (overall_update_type > UPDATE_TYPE_FAST) {
8382                 ret = dm_atomic_get_state(state, &dm_state);
8383                 if (ret)
8384                         goto fail;
8385
8386                 ret = do_aquire_global_lock(dev, state);
8387                 if (ret)
8388                         goto fail;
8389
8390 #if defined(CONFIG_DRM_AMD_DC_DCN)
8391                 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
8392                         goto fail;
8393
8394                 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
8395                 if (ret)
8396                         goto fail;
8397 #endif
8398
8399                 /*
8400                  * Perform validation of MST topology in the state:
8401                  * We need to perform MST atomic check before calling
8402                  * dc_validate_global_state(), or there is a chance
8403                  * to get stuck in an infinite loop and hang eventually.
8404                  */
8405                 ret = drm_dp_mst_atomic_check(state);
8406                 if (ret)
8407                         goto fail;
8408
8409                 if (dc_validate_global_state(dc, dm_state->context, false) != DC_OK) {
8410                         ret = -EINVAL;
8411                         goto fail;
8412                 }
8413         } else {
8414                 /*
8415                  * The commit is a fast update. Fast updates shouldn't change
8416                  * the DC context, affect global validation, and can have their
8417                  * commit work done in parallel with other commits not touching
8418                  * the same resource. If we have a new DC context as part of
8419                  * the DM atomic state from validation we need to free it and
8420                  * retain the existing one instead.
8421                  */
8422                 struct dm_atomic_state *new_dm_state, *old_dm_state;
8423
8424                 new_dm_state = dm_atomic_get_new_state(state);
8425                 old_dm_state = dm_atomic_get_old_state(state);
8426
8427                 if (new_dm_state && old_dm_state) {
8428                         if (new_dm_state->context)
8429                                 dc_release_state(new_dm_state->context);
8430
8431                         new_dm_state->context = old_dm_state->context;
8432
8433                         if (old_dm_state->context)
8434                                 dc_retain_state(old_dm_state->context);
8435                 }
8436         }
8437
8438         /* Store the overall update type for use later in atomic check. */
8439         for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
8440                 struct dm_crtc_state *dm_new_crtc_state =
8441                         to_dm_crtc_state(new_crtc_state);
8442
8443                 dm_new_crtc_state->update_type = (int)overall_update_type;
8444         }
8445
8446         /* Must be success */
8447         WARN_ON(ret);
8448         return ret;
8449
8450 fail:
8451         if (ret == -EDEADLK)
8452                 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
8453         else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
8454                 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
8455         else
8456                 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
8457
8458         return ret;
8459 }
8460
8461 static bool is_dp_capable_without_timing_msa(struct dc *dc,
8462                                              struct amdgpu_dm_connector *amdgpu_dm_connector)
8463 {
8464         uint8_t dpcd_data;
8465         bool capable = false;
8466
8467         if (amdgpu_dm_connector->dc_link &&
8468                 dm_helpers_dp_read_dpcd(
8469                                 NULL,
8470                                 amdgpu_dm_connector->dc_link,
8471                                 DP_DOWN_STREAM_PORT_COUNT,
8472                                 &dpcd_data,
8473                                 sizeof(dpcd_data))) {
8474                 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
8475         }
8476
8477         return capable;
8478 }
8479 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
8480                                         struct edid *edid)
8481 {
8482         int i;
8483         bool edid_check_required;
8484         struct detailed_timing *timing;
8485         struct detailed_non_pixel *data;
8486         struct detailed_data_monitor_range *range;
8487         struct amdgpu_dm_connector *amdgpu_dm_connector =
8488                         to_amdgpu_dm_connector(connector);
8489         struct dm_connector_state *dm_con_state = NULL;
8490
8491         struct drm_device *dev = connector->dev;
8492         struct amdgpu_device *adev = dev->dev_private;
8493         bool freesync_capable = false;
8494
8495         if (!connector->state) {
8496                 DRM_ERROR("%s - Connector has no state", __func__);
8497                 goto update;
8498         }
8499
8500         if (!edid) {
8501                 dm_con_state = to_dm_connector_state(connector->state);
8502
8503                 amdgpu_dm_connector->min_vfreq = 0;
8504                 amdgpu_dm_connector->max_vfreq = 0;
8505                 amdgpu_dm_connector->pixel_clock_mhz = 0;
8506
8507                 goto update;
8508         }
8509
8510         dm_con_state = to_dm_connector_state(connector->state);
8511
8512         edid_check_required = false;
8513         if (!amdgpu_dm_connector->dc_sink) {
8514                 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
8515                 goto update;
8516         }
8517         if (!adev->dm.freesync_module)
8518                 goto update;
8519         /*
8520          * if edid non zero restrict freesync only for dp and edp
8521          */
8522         if (edid) {
8523                 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
8524                         || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
8525                         edid_check_required = is_dp_capable_without_timing_msa(
8526                                                 adev->dm.dc,
8527                                                 amdgpu_dm_connector);
8528                 }
8529         }
8530         if (edid_check_required == true && (edid->version > 1 ||
8531            (edid->version == 1 && edid->revision > 1))) {
8532                 for (i = 0; i < 4; i++) {
8533
8534                         timing  = &edid->detailed_timings[i];
8535                         data    = &timing->data.other_data;
8536                         range   = &data->data.range;
8537                         /*
8538                          * Check if monitor has continuous frequency mode
8539                          */
8540                         if (data->type != EDID_DETAIL_MONITOR_RANGE)
8541                                 continue;
8542                         /*
8543                          * Check for flag range limits only. If flag == 1 then
8544                          * no additional timing information provided.
8545                          * Default GTF, GTF Secondary curve and CVT are not
8546                          * supported
8547                          */
8548                         if (range->flags != 1)
8549                                 continue;
8550
8551                         amdgpu_dm_connector->min_vfreq = range->min_vfreq;
8552                         amdgpu_dm_connector->max_vfreq = range->max_vfreq;
8553                         amdgpu_dm_connector->pixel_clock_mhz =
8554                                 range->pixel_clock_mhz * 10;
8555                         break;
8556                 }
8557
8558                 if (amdgpu_dm_connector->max_vfreq -
8559                     amdgpu_dm_connector->min_vfreq > 10) {
8560
8561                         freesync_capable = true;
8562                 }
8563         }
8564
8565 update:
8566         if (dm_con_state)
8567                 dm_con_state->freesync_capable = freesync_capable;
8568
8569         if (connector->vrr_capable_property)
8570                 drm_connector_set_vrr_capable_property(connector,
8571                                                        freesync_capable);
8572 }
8573
8574 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
8575 {
8576         uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
8577
8578         if (!(link->connector_signal & SIGNAL_TYPE_EDP))
8579                 return;
8580         if (link->type == dc_connection_none)
8581                 return;
8582         if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
8583                                         dpcd_data, sizeof(dpcd_data))) {
8584                 link->psr_feature_enabled = dpcd_data[0] ? true:false;
8585                 DRM_INFO("PSR support:%d\n", link->psr_feature_enabled);
8586         }
8587 }
8588
8589 /*
8590  * amdgpu_dm_link_setup_psr() - configure psr link
8591  * @stream: stream state
8592  *
8593  * Return: true if success
8594  */
8595 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
8596 {
8597         struct dc_link *link = NULL;
8598         struct psr_config psr_config = {0};
8599         struct psr_context psr_context = {0};
8600         struct dc *dc = NULL;
8601         bool ret = false;
8602
8603         if (stream == NULL)
8604                 return false;
8605
8606         link = stream->link;
8607         dc = link->ctx->dc;
8608
8609         psr_config.psr_version = dc->res_pool->dmcu->dmcu_version.psr_version;
8610
8611         if (psr_config.psr_version > 0) {
8612                 psr_config.psr_exit_link_training_required = 0x1;
8613                 psr_config.psr_frame_capture_indication_req = 0;
8614                 psr_config.psr_rfb_setup_time = 0x37;
8615                 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
8616                 psr_config.allow_smu_optimizations = 0x0;
8617
8618                 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
8619
8620         }
8621         DRM_DEBUG_DRIVER("PSR link: %d\n",      link->psr_feature_enabled);
8622
8623         return ret;
8624 }
8625
8626 /*
8627  * amdgpu_dm_psr_enable() - enable psr f/w
8628  * @stream: stream state
8629  *
8630  * Return: true if success
8631  */
8632 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
8633 {
8634         struct dc_link *link = stream->link;
8635         unsigned int vsync_rate_hz = 0;
8636         struct dc_static_screen_params params = {0};
8637         /* Calculate number of static frames before generating interrupt to
8638          * enter PSR.
8639          */
8640         // Init fail safe of 2 frames static
8641         unsigned int num_frames_static = 2;
8642
8643         DRM_DEBUG_DRIVER("Enabling psr...\n");
8644
8645         vsync_rate_hz = div64_u64(div64_u64((
8646                         stream->timing.pix_clk_100hz * 100),
8647                         stream->timing.v_total),
8648                         stream->timing.h_total);
8649
8650         /* Round up
8651          * Calculate number of frames such that at least 30 ms of time has
8652          * passed.
8653          */
8654         if (vsync_rate_hz != 0) {
8655                 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
8656                 num_frames_static = (30000 / frame_time_microsec) + 1;
8657         }
8658
8659         params.triggers.cursor_update = true;
8660         params.triggers.overlay_update = true;
8661         params.triggers.surface_update = true;
8662         params.num_frames = num_frames_static;
8663
8664         dc_stream_set_static_screen_params(link->ctx->dc,
8665                                            &stream, 1,
8666                                            &params);
8667
8668         return dc_link_set_psr_allow_active(link, true, false);
8669 }
8670
8671 /*
8672  * amdgpu_dm_psr_disable() - disable psr f/w
8673  * @stream:  stream state
8674  *
8675  * Return: true if success
8676  */
8677 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
8678 {
8679
8680         DRM_DEBUG_DRIVER("Disabling psr...\n");
8681
8682         return dc_link_set_psr_allow_active(stream->link, false, true);
8683 }