OSDN Git Service

4b76eb14a1114244e217890d4453016a5039340e
[tomoyo/tomoyo-test1.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc_link_dp.h"
32 #include "link_enc_cfg.h"
33 #include "dc/inc/core_types.h"
34 #include "dal_asic_id.h"
35 #include "dmub/dmub_srv.h"
36 #include "dc/inc/hw/dmcu.h"
37 #include "dc/inc/hw/abm.h"
38 #include "dc/dc_dmub_srv.h"
39 #include "dc/dc_edid_parser.h"
40 #include "dc/dc_stat.h"
41 #include "amdgpu_dm_trace.h"
42
43 #include "vid.h"
44 #include "amdgpu.h"
45 #include "amdgpu_display.h"
46 #include "amdgpu_ucode.h"
47 #include "atom.h"
48 #include "amdgpu_dm.h"
49 #ifdef CONFIG_DRM_AMD_DC_HDCP
50 #include "amdgpu_dm_hdcp.h"
51 #include <drm/drm_hdcp.h>
52 #endif
53 #include "amdgpu_pm.h"
54 #include "amdgpu_atombios.h"
55
56 #include "amd_shared.h"
57 #include "amdgpu_dm_irq.h"
58 #include "dm_helpers.h"
59 #include "amdgpu_dm_mst_types.h"
60 #if defined(CONFIG_DEBUG_FS)
61 #include "amdgpu_dm_debugfs.h"
62 #endif
63 #include "amdgpu_dm_psr.h"
64
65 #include "ivsrcid/ivsrcid_vislands30.h"
66
67 #include "i2caux_interface.h"
68 #include <linux/module.h>
69 #include <linux/moduleparam.h>
70 #include <linux/types.h>
71 #include <linux/pm_runtime.h>
72 #include <linux/pci.h>
73 #include <linux/firmware.h>
74 #include <linux/component.h>
75
76 #include <drm/drm_atomic.h>
77 #include <drm/drm_atomic_uapi.h>
78 #include <drm/drm_atomic_helper.h>
79 #include <drm/drm_dp_mst_helper.h>
80 #include <drm/drm_fb_helper.h>
81 #include <drm/drm_fourcc.h>
82 #include <drm/drm_edid.h>
83 #include <drm/drm_vblank.h>
84 #include <drm/drm_audio_component.h>
85
86 #if defined(CONFIG_DRM_AMD_DC_DCN)
87 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
88
89 #include "dcn/dcn_1_0_offset.h"
90 #include "dcn/dcn_1_0_sh_mask.h"
91 #include "soc15_hw_ip.h"
92 #include "vega10_ip_offset.h"
93
94 #include "soc15_common.h"
95 #endif
96
97 #include "modules/inc/mod_freesync.h"
98 #include "modules/power/power_helpers.h"
99 #include "modules/inc/mod_info_packet.h"
100
101 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
102 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
103 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
104 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
105 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
106 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
107 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
108 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
109 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
110 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
111 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
112 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
113 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
114 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
115 #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
116 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
117
118 #define FIRMWARE_RAVEN_DMCU             "amdgpu/raven_dmcu.bin"
119 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
120
121 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
122 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
123
124 /* Number of bytes in PSP header for firmware. */
125 #define PSP_HEADER_BYTES 0x100
126
127 /* Number of bytes in PSP footer for firmware. */
128 #define PSP_FOOTER_BYTES 0x100
129
130 /**
131  * DOC: overview
132  *
133  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
134  * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
135  * requests into DC requests, and DC responses into DRM responses.
136  *
137  * The root control structure is &struct amdgpu_display_manager.
138  */
139
140 /* basic init/fini API */
141 static int amdgpu_dm_init(struct amdgpu_device *adev);
142 static void amdgpu_dm_fini(struct amdgpu_device *adev);
143 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
144
145 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
146 {
147         switch (link->dpcd_caps.dongle_type) {
148         case DISPLAY_DONGLE_NONE:
149                 return DRM_MODE_SUBCONNECTOR_Native;
150         case DISPLAY_DONGLE_DP_VGA_CONVERTER:
151                 return DRM_MODE_SUBCONNECTOR_VGA;
152         case DISPLAY_DONGLE_DP_DVI_CONVERTER:
153         case DISPLAY_DONGLE_DP_DVI_DONGLE:
154                 return DRM_MODE_SUBCONNECTOR_DVID;
155         case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
156         case DISPLAY_DONGLE_DP_HDMI_DONGLE:
157                 return DRM_MODE_SUBCONNECTOR_HDMIA;
158         case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
159         default:
160                 return DRM_MODE_SUBCONNECTOR_Unknown;
161         }
162 }
163
164 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
165 {
166         struct dc_link *link = aconnector->dc_link;
167         struct drm_connector *connector = &aconnector->base;
168         enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
169
170         if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
171                 return;
172
173         if (aconnector->dc_sink)
174                 subconnector = get_subconnector_type(link);
175
176         drm_object_property_set_value(&connector->base,
177                         connector->dev->mode_config.dp_subconnector_property,
178                         subconnector);
179 }
180
181 /*
182  * initializes drm_device display related structures, based on the information
183  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
184  * drm_encoder, drm_mode_config
185  *
186  * Returns 0 on success
187  */
188 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
189 /* removes and deallocates the drm structures, created by the above function */
190 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
191
192 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
193                                 struct drm_plane *plane,
194                                 unsigned long possible_crtcs,
195                                 const struct dc_plane_cap *plane_cap);
196 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
197                                struct drm_plane *plane,
198                                uint32_t link_index);
199 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
200                                     struct amdgpu_dm_connector *amdgpu_dm_connector,
201                                     uint32_t link_index,
202                                     struct amdgpu_encoder *amdgpu_encoder);
203 static int amdgpu_dm_encoder_init(struct drm_device *dev,
204                                   struct amdgpu_encoder *aencoder,
205                                   uint32_t link_index);
206
207 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
208
209 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
210
211 static int amdgpu_dm_atomic_check(struct drm_device *dev,
212                                   struct drm_atomic_state *state);
213
214 static void handle_cursor_update(struct drm_plane *plane,
215                                  struct drm_plane_state *old_plane_state);
216
217 static const struct drm_format_info *
218 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
219
220 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
221 static void handle_hpd_rx_irq(void *param);
222
223 static bool
224 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
225                                  struct drm_crtc_state *new_crtc_state);
226 /*
227  * dm_vblank_get_counter
228  *
229  * @brief
230  * Get counter for number of vertical blanks
231  *
232  * @param
233  * struct amdgpu_device *adev - [in] desired amdgpu device
234  * int disp_idx - [in] which CRTC to get the counter from
235  *
236  * @return
237  * Counter for vertical blanks
238  */
239 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
240 {
241         if (crtc >= adev->mode_info.num_crtc)
242                 return 0;
243         else {
244                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
245
246                 if (acrtc->dm_irq_params.stream == NULL) {
247                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
248                                   crtc);
249                         return 0;
250                 }
251
252                 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
253         }
254 }
255
256 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
257                                   u32 *vbl, u32 *position)
258 {
259         uint32_t v_blank_start, v_blank_end, h_position, v_position;
260
261         if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
262                 return -EINVAL;
263         else {
264                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
265
266                 if (acrtc->dm_irq_params.stream ==  NULL) {
267                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
268                                   crtc);
269                         return 0;
270                 }
271
272                 /*
273                  * TODO rework base driver to use values directly.
274                  * for now parse it back into reg-format
275                  */
276                 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
277                                          &v_blank_start,
278                                          &v_blank_end,
279                                          &h_position,
280                                          &v_position);
281
282                 *position = v_position | (h_position << 16);
283                 *vbl = v_blank_start | (v_blank_end << 16);
284         }
285
286         return 0;
287 }
288
289 static bool dm_is_idle(void *handle)
290 {
291         /* XXX todo */
292         return true;
293 }
294
295 static int dm_wait_for_idle(void *handle)
296 {
297         /* XXX todo */
298         return 0;
299 }
300
301 static bool dm_check_soft_reset(void *handle)
302 {
303         return false;
304 }
305
306 static int dm_soft_reset(void *handle)
307 {
308         /* XXX todo */
309         return 0;
310 }
311
312 static struct amdgpu_crtc *
313 get_crtc_by_otg_inst(struct amdgpu_device *adev,
314                      int otg_inst)
315 {
316         struct drm_device *dev = adev_to_drm(adev);
317         struct drm_crtc *crtc;
318         struct amdgpu_crtc *amdgpu_crtc;
319
320         if (WARN_ON(otg_inst == -1))
321                 return adev->mode_info.crtcs[0];
322
323         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
324                 amdgpu_crtc = to_amdgpu_crtc(crtc);
325
326                 if (amdgpu_crtc->otg_inst == otg_inst)
327                         return amdgpu_crtc;
328         }
329
330         return NULL;
331 }
332
333 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
334 {
335         return acrtc->dm_irq_params.freesync_config.state ==
336                        VRR_STATE_ACTIVE_VARIABLE ||
337                acrtc->dm_irq_params.freesync_config.state ==
338                        VRR_STATE_ACTIVE_FIXED;
339 }
340
341 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
342 {
343         return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
344                dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
345 }
346
347 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
348                                               struct dm_crtc_state *new_state)
349 {
350         if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
351                 return true;
352         else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
353                 return true;
354         else
355                 return false;
356 }
357
358 /**
359  * dm_pflip_high_irq() - Handle pageflip interrupt
360  * @interrupt_params: ignored
361  *
362  * Handles the pageflip interrupt by notifying all interested parties
363  * that the pageflip has been completed.
364  */
365 static void dm_pflip_high_irq(void *interrupt_params)
366 {
367         struct amdgpu_crtc *amdgpu_crtc;
368         struct common_irq_params *irq_params = interrupt_params;
369         struct amdgpu_device *adev = irq_params->adev;
370         unsigned long flags;
371         struct drm_pending_vblank_event *e;
372         uint32_t vpos, hpos, v_blank_start, v_blank_end;
373         bool vrr_active;
374
375         amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
376
377         /* IRQ could occur when in initial stage */
378         /* TODO work and BO cleanup */
379         if (amdgpu_crtc == NULL) {
380                 DC_LOG_PFLIP("CRTC is null, returning.\n");
381                 return;
382         }
383
384         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
385
386         if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
387                 DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
388                                                  amdgpu_crtc->pflip_status,
389                                                  AMDGPU_FLIP_SUBMITTED,
390                                                  amdgpu_crtc->crtc_id,
391                                                  amdgpu_crtc);
392                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
393                 return;
394         }
395
396         /* page flip completed. */
397         e = amdgpu_crtc->event;
398         amdgpu_crtc->event = NULL;
399
400         WARN_ON(!e);
401
402         vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
403
404         /* Fixed refresh rate, or VRR scanout position outside front-porch? */
405         if (!vrr_active ||
406             !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
407                                       &v_blank_end, &hpos, &vpos) ||
408             (vpos < v_blank_start)) {
409                 /* Update to correct count and vblank timestamp if racing with
410                  * vblank irq. This also updates to the correct vblank timestamp
411                  * even in VRR mode, as scanout is past the front-porch atm.
412                  */
413                 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
414
415                 /* Wake up userspace by sending the pageflip event with proper
416                  * count and timestamp of vblank of flip completion.
417                  */
418                 if (e) {
419                         drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
420
421                         /* Event sent, so done with vblank for this flip */
422                         drm_crtc_vblank_put(&amdgpu_crtc->base);
423                 }
424         } else if (e) {
425                 /* VRR active and inside front-porch: vblank count and
426                  * timestamp for pageflip event will only be up to date after
427                  * drm_crtc_handle_vblank() has been executed from late vblank
428                  * irq handler after start of back-porch (vline 0). We queue the
429                  * pageflip event for send-out by drm_crtc_handle_vblank() with
430                  * updated timestamp and count, once it runs after us.
431                  *
432                  * We need to open-code this instead of using the helper
433                  * drm_crtc_arm_vblank_event(), as that helper would
434                  * call drm_crtc_accurate_vblank_count(), which we must
435                  * not call in VRR mode while we are in front-porch!
436                  */
437
438                 /* sequence will be replaced by real count during send-out. */
439                 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
440                 e->pipe = amdgpu_crtc->crtc_id;
441
442                 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
443                 e = NULL;
444         }
445
446         /* Keep track of vblank of this flip for flip throttling. We use the
447          * cooked hw counter, as that one incremented at start of this vblank
448          * of pageflip completion, so last_flip_vblank is the forbidden count
449          * for queueing new pageflips if vsync + VRR is enabled.
450          */
451         amdgpu_crtc->dm_irq_params.last_flip_vblank =
452                 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
453
454         amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
455         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
456
457         DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
458                      amdgpu_crtc->crtc_id, amdgpu_crtc,
459                      vrr_active, (int) !e);
460 }
461
462 static void dm_vupdate_high_irq(void *interrupt_params)
463 {
464         struct common_irq_params *irq_params = interrupt_params;
465         struct amdgpu_device *adev = irq_params->adev;
466         struct amdgpu_crtc *acrtc;
467         struct drm_device *drm_dev;
468         struct drm_vblank_crtc *vblank;
469         ktime_t frame_duration_ns, previous_timestamp;
470         unsigned long flags;
471         int vrr_active;
472
473         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
474
475         if (acrtc) {
476                 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
477                 drm_dev = acrtc->base.dev;
478                 vblank = &drm_dev->vblank[acrtc->base.index];
479                 previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
480                 frame_duration_ns = vblank->time - previous_timestamp;
481
482                 if (frame_duration_ns > 0) {
483                         trace_amdgpu_refresh_rate_track(acrtc->base.index,
484                                                 frame_duration_ns,
485                                                 ktime_divns(NSEC_PER_SEC, frame_duration_ns));
486                         atomic64_set(&irq_params->previous_timestamp, vblank->time);
487                 }
488
489                 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
490                               acrtc->crtc_id,
491                               vrr_active);
492
493                 /* Core vblank handling is done here after end of front-porch in
494                  * vrr mode, as vblank timestamping will give valid results
495                  * while now done after front-porch. This will also deliver
496                  * page-flip completion events that have been queued to us
497                  * if a pageflip happened inside front-porch.
498                  */
499                 if (vrr_active) {
500                         drm_crtc_handle_vblank(&acrtc->base);
501
502                         /* BTR processing for pre-DCE12 ASICs */
503                         if (acrtc->dm_irq_params.stream &&
504                             adev->family < AMDGPU_FAMILY_AI) {
505                                 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
506                                 mod_freesync_handle_v_update(
507                                     adev->dm.freesync_module,
508                                     acrtc->dm_irq_params.stream,
509                                     &acrtc->dm_irq_params.vrr_params);
510
511                                 dc_stream_adjust_vmin_vmax(
512                                     adev->dm.dc,
513                                     acrtc->dm_irq_params.stream,
514                                     &acrtc->dm_irq_params.vrr_params.adjust);
515                                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
516                         }
517                 }
518         }
519 }
520
521 /**
522  * dm_crtc_high_irq() - Handles CRTC interrupt
523  * @interrupt_params: used for determining the CRTC instance
524  *
525  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
526  * event handler.
527  */
528 static void dm_crtc_high_irq(void *interrupt_params)
529 {
530         struct common_irq_params *irq_params = interrupt_params;
531         struct amdgpu_device *adev = irq_params->adev;
532         struct amdgpu_crtc *acrtc;
533         unsigned long flags;
534         int vrr_active;
535
536         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
537         if (!acrtc)
538                 return;
539
540         vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
541
542         DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
543                       vrr_active, acrtc->dm_irq_params.active_planes);
544
545         /**
546          * Core vblank handling at start of front-porch is only possible
547          * in non-vrr mode, as only there vblank timestamping will give
548          * valid results while done in front-porch. Otherwise defer it
549          * to dm_vupdate_high_irq after end of front-porch.
550          */
551         if (!vrr_active)
552                 drm_crtc_handle_vblank(&acrtc->base);
553
554         /**
555          * Following stuff must happen at start of vblank, for crc
556          * computation and below-the-range btr support in vrr mode.
557          */
558         amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
559
560         /* BTR updates need to happen before VUPDATE on Vega and above. */
561         if (adev->family < AMDGPU_FAMILY_AI)
562                 return;
563
564         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
565
566         if (acrtc->dm_irq_params.stream &&
567             acrtc->dm_irq_params.vrr_params.supported &&
568             acrtc->dm_irq_params.freesync_config.state ==
569                     VRR_STATE_ACTIVE_VARIABLE) {
570                 mod_freesync_handle_v_update(adev->dm.freesync_module,
571                                              acrtc->dm_irq_params.stream,
572                                              &acrtc->dm_irq_params.vrr_params);
573
574                 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
575                                            &acrtc->dm_irq_params.vrr_params.adjust);
576         }
577
578         /*
579          * If there aren't any active_planes then DCH HUBP may be clock-gated.
580          * In that case, pageflip completion interrupts won't fire and pageflip
581          * completion events won't get delivered. Prevent this by sending
582          * pending pageflip events from here if a flip is still pending.
583          *
584          * If any planes are enabled, use dm_pflip_high_irq() instead, to
585          * avoid race conditions between flip programming and completion,
586          * which could cause too early flip completion events.
587          */
588         if (adev->family >= AMDGPU_FAMILY_RV &&
589             acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
590             acrtc->dm_irq_params.active_planes == 0) {
591                 if (acrtc->event) {
592                         drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
593                         acrtc->event = NULL;
594                         drm_crtc_vblank_put(&acrtc->base);
595                 }
596                 acrtc->pflip_status = AMDGPU_FLIP_NONE;
597         }
598
599         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
600 }
601
602 #if defined(CONFIG_DRM_AMD_DC_DCN)
603 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
604 /**
605  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
606  * DCN generation ASICs
607  * @interrupt_params: interrupt parameters
608  *
609  * Used to set crc window/read out crc value at vertical line 0 position
610  */
611 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
612 {
613         struct common_irq_params *irq_params = interrupt_params;
614         struct amdgpu_device *adev = irq_params->adev;
615         struct amdgpu_crtc *acrtc;
616
617         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
618
619         if (!acrtc)
620                 return;
621
622         amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
623 }
624 #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
625
626 /**
627  * dmub_aux_setconfig_reply_callback - Callback for AUX or SET_CONFIG command.
628  * @adev: amdgpu_device pointer
629  * @notify: dmub notification structure
630  *
631  * Dmub AUX or SET_CONFIG command completion processing callback
632  * Copies dmub notification to DM which is to be read by AUX command.
633  * issuing thread and also signals the event to wake up the thread.
634  */
635 void dmub_aux_setconfig_callback(struct amdgpu_device *adev, struct dmub_notification *notify)
636 {
637         if (adev->dm.dmub_notify)
638                 memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
639         if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
640                 complete(&adev->dm.dmub_aux_transfer_done);
641 }
642
643 /**
644  * dmub_hpd_callback - DMUB HPD interrupt processing callback.
645  * @adev: amdgpu_device pointer
646  * @notify: dmub notification structure
647  *
648  * Dmub Hpd interrupt processing callback. Gets displayindex through the
649  * ink index and calls helper to do the processing.
650  */
651 void dmub_hpd_callback(struct amdgpu_device *adev, struct dmub_notification *notify)
652 {
653         struct amdgpu_dm_connector *aconnector;
654         struct amdgpu_dm_connector *hpd_aconnector = NULL;
655         struct drm_connector *connector;
656         struct drm_connector_list_iter iter;
657         struct dc_link *link;
658         uint8_t link_index = 0;
659         struct drm_device *dev = adev->dm.ddev;
660
661         if (adev == NULL)
662                 return;
663
664         if (notify == NULL) {
665                 DRM_ERROR("DMUB HPD callback notification was NULL");
666                 return;
667         }
668
669         if (notify->link_index > adev->dm.dc->link_count) {
670                 DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
671                 return;
672         }
673
674         link_index = notify->link_index;
675         link = adev->dm.dc->links[link_index];
676
677         drm_connector_list_iter_begin(dev, &iter);
678         drm_for_each_connector_iter(connector, &iter) {
679                 aconnector = to_amdgpu_dm_connector(connector);
680                 if (link && aconnector->dc_link == link) {
681                         DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
682                         hpd_aconnector = aconnector;
683                         break;
684                 }
685         }
686         drm_connector_list_iter_end(&iter);
687
688         if (hpd_aconnector) {
689                 if (notify->type == DMUB_NOTIFICATION_HPD)
690                         handle_hpd_irq_helper(hpd_aconnector);
691                 else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
692                         handle_hpd_rx_irq(hpd_aconnector);
693         }
694 }
695
696 /**
697  * register_dmub_notify_callback - Sets callback for DMUB notify
698  * @adev: amdgpu_device pointer
699  * @type: Type of dmub notification
700  * @callback: Dmub interrupt callback function
701  * @dmub_int_thread_offload: offload indicator
702  *
703  * API to register a dmub callback handler for a dmub notification
704  * Also sets indicator whether callback processing to be offloaded.
705  * to dmub interrupt handling thread
706  * Return: true if successfully registered, false if there is existing registration
707  */
708 bool register_dmub_notify_callback(struct amdgpu_device *adev, enum dmub_notification_type type,
709 dmub_notify_interrupt_callback_t callback, bool dmub_int_thread_offload)
710 {
711         if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
712                 adev->dm.dmub_callback[type] = callback;
713                 adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
714         } else
715                 return false;
716
717         return true;
718 }
719
720 static void dm_handle_hpd_work(struct work_struct *work)
721 {
722         struct dmub_hpd_work *dmub_hpd_wrk;
723
724         dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
725
726         if (!dmub_hpd_wrk->dmub_notify) {
727                 DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
728                 return;
729         }
730
731         if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
732                 dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
733                 dmub_hpd_wrk->dmub_notify);
734         }
735
736         kfree(dmub_hpd_wrk->dmub_notify);
737         kfree(dmub_hpd_wrk);
738
739 }
740
741 #define DMUB_TRACE_MAX_READ 64
742 /**
743  * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
744  * @interrupt_params: used for determining the Outbox instance
745  *
746  * Handles the Outbox Interrupt
747  * event handler.
748  */
749 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
750 {
751         struct dmub_notification notify;
752         struct common_irq_params *irq_params = interrupt_params;
753         struct amdgpu_device *adev = irq_params->adev;
754         struct amdgpu_display_manager *dm = &adev->dm;
755         struct dmcub_trace_buf_entry entry = { 0 };
756         uint32_t count = 0;
757         struct dmub_hpd_work *dmub_hpd_wrk;
758         struct dc_link *plink = NULL;
759
760         if (dc_enable_dmub_notifications(adev->dm.dc) &&
761                 irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
762
763                 do {
764                         dc_stat_get_dmub_notification(adev->dm.dc, &notify);
765                         if (notify.type > ARRAY_SIZE(dm->dmub_thread_offload)) {
766                                 DRM_ERROR("DM: notify type %d invalid!", notify.type);
767                                 continue;
768                         }
769                         if (!dm->dmub_callback[notify.type]) {
770                                 DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type);
771                                 continue;
772                         }
773                         if (dm->dmub_thread_offload[notify.type] == true) {
774                                 dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
775                                 if (!dmub_hpd_wrk) {
776                                         DRM_ERROR("Failed to allocate dmub_hpd_wrk");
777                                         return;
778                                 }
779                                 dmub_hpd_wrk->dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_ATOMIC);
780                                 if (!dmub_hpd_wrk->dmub_notify) {
781                                         kfree(dmub_hpd_wrk);
782                                         DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify");
783                                         return;
784                                 }
785                                 INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
786                                 if (dmub_hpd_wrk->dmub_notify)
787                                         memcpy(dmub_hpd_wrk->dmub_notify, &notify, sizeof(struct dmub_notification));
788                                 dmub_hpd_wrk->adev = adev;
789                                 if (notify.type == DMUB_NOTIFICATION_HPD) {
790                                         plink = adev->dm.dc->links[notify.link_index];
791                                         if (plink) {
792                                                 plink->hpd_status =
793                                                         notify.hpd_status == DP_HPD_PLUG;
794                                         }
795                                 }
796                                 queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
797                         } else {
798                                 dm->dmub_callback[notify.type](adev, &notify);
799                         }
800                 } while (notify.pending_notification);
801         }
802
803
804         do {
805                 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
806                         trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
807                                                         entry.param0, entry.param1);
808
809                         DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
810                                  entry.trace_code, entry.tick_count, entry.param0, entry.param1);
811                 } else
812                         break;
813
814                 count++;
815
816         } while (count <= DMUB_TRACE_MAX_READ);
817
818         if (count > DMUB_TRACE_MAX_READ)
819                 DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
820 }
821 #endif /* CONFIG_DRM_AMD_DC_DCN */
822
823 static int dm_set_clockgating_state(void *handle,
824                   enum amd_clockgating_state state)
825 {
826         return 0;
827 }
828
829 static int dm_set_powergating_state(void *handle,
830                   enum amd_powergating_state state)
831 {
832         return 0;
833 }
834
835 /* Prototypes of private functions */
836 static int dm_early_init(void* handle);
837
838 /* Allocate memory for FBC compressed data  */
839 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
840 {
841         struct drm_device *dev = connector->dev;
842         struct amdgpu_device *adev = drm_to_adev(dev);
843         struct dm_compressor_info *compressor = &adev->dm.compressor;
844         struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
845         struct drm_display_mode *mode;
846         unsigned long max_size = 0;
847
848         if (adev->dm.dc->fbc_compressor == NULL)
849                 return;
850
851         if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
852                 return;
853
854         if (compressor->bo_ptr)
855                 return;
856
857
858         list_for_each_entry(mode, &connector->modes, head) {
859                 if (max_size < mode->htotal * mode->vtotal)
860                         max_size = mode->htotal * mode->vtotal;
861         }
862
863         if (max_size) {
864                 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
865                             AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
866                             &compressor->gpu_addr, &compressor->cpu_addr);
867
868                 if (r)
869                         DRM_ERROR("DM: Failed to initialize FBC\n");
870                 else {
871                         adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
872                         DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
873                 }
874
875         }
876
877 }
878
879 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
880                                           int pipe, bool *enabled,
881                                           unsigned char *buf, int max_bytes)
882 {
883         struct drm_device *dev = dev_get_drvdata(kdev);
884         struct amdgpu_device *adev = drm_to_adev(dev);
885         struct drm_connector *connector;
886         struct drm_connector_list_iter conn_iter;
887         struct amdgpu_dm_connector *aconnector;
888         int ret = 0;
889
890         *enabled = false;
891
892         mutex_lock(&adev->dm.audio_lock);
893
894         drm_connector_list_iter_begin(dev, &conn_iter);
895         drm_for_each_connector_iter(connector, &conn_iter) {
896                 aconnector = to_amdgpu_dm_connector(connector);
897                 if (aconnector->audio_inst != port)
898                         continue;
899
900                 *enabled = true;
901                 ret = drm_eld_size(connector->eld);
902                 memcpy(buf, connector->eld, min(max_bytes, ret));
903
904                 break;
905         }
906         drm_connector_list_iter_end(&conn_iter);
907
908         mutex_unlock(&adev->dm.audio_lock);
909
910         DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
911
912         return ret;
913 }
914
915 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
916         .get_eld = amdgpu_dm_audio_component_get_eld,
917 };
918
919 static int amdgpu_dm_audio_component_bind(struct device *kdev,
920                                        struct device *hda_kdev, void *data)
921 {
922         struct drm_device *dev = dev_get_drvdata(kdev);
923         struct amdgpu_device *adev = drm_to_adev(dev);
924         struct drm_audio_component *acomp = data;
925
926         acomp->ops = &amdgpu_dm_audio_component_ops;
927         acomp->dev = kdev;
928         adev->dm.audio_component = acomp;
929
930         return 0;
931 }
932
933 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
934                                           struct device *hda_kdev, void *data)
935 {
936         struct drm_device *dev = dev_get_drvdata(kdev);
937         struct amdgpu_device *adev = drm_to_adev(dev);
938         struct drm_audio_component *acomp = data;
939
940         acomp->ops = NULL;
941         acomp->dev = NULL;
942         adev->dm.audio_component = NULL;
943 }
944
945 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
946         .bind   = amdgpu_dm_audio_component_bind,
947         .unbind = amdgpu_dm_audio_component_unbind,
948 };
949
950 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
951 {
952         int i, ret;
953
954         if (!amdgpu_audio)
955                 return 0;
956
957         adev->mode_info.audio.enabled = true;
958
959         adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
960
961         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
962                 adev->mode_info.audio.pin[i].channels = -1;
963                 adev->mode_info.audio.pin[i].rate = -1;
964                 adev->mode_info.audio.pin[i].bits_per_sample = -1;
965                 adev->mode_info.audio.pin[i].status_bits = 0;
966                 adev->mode_info.audio.pin[i].category_code = 0;
967                 adev->mode_info.audio.pin[i].connected = false;
968                 adev->mode_info.audio.pin[i].id =
969                         adev->dm.dc->res_pool->audios[i]->inst;
970                 adev->mode_info.audio.pin[i].offset = 0;
971         }
972
973         ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
974         if (ret < 0)
975                 return ret;
976
977         adev->dm.audio_registered = true;
978
979         return 0;
980 }
981
982 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
983 {
984         if (!amdgpu_audio)
985                 return;
986
987         if (!adev->mode_info.audio.enabled)
988                 return;
989
990         if (adev->dm.audio_registered) {
991                 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
992                 adev->dm.audio_registered = false;
993         }
994
995         /* TODO: Disable audio? */
996
997         adev->mode_info.audio.enabled = false;
998 }
999
1000 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
1001 {
1002         struct drm_audio_component *acomp = adev->dm.audio_component;
1003
1004         if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
1005                 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
1006
1007                 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
1008                                                  pin, -1);
1009         }
1010 }
1011
1012 static int dm_dmub_hw_init(struct amdgpu_device *adev)
1013 {
1014         const struct dmcub_firmware_header_v1_0 *hdr;
1015         struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1016         struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
1017         const struct firmware *dmub_fw = adev->dm.dmub_fw;
1018         struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1019         struct abm *abm = adev->dm.dc->res_pool->abm;
1020         struct dmub_srv_hw_params hw_params;
1021         enum dmub_status status;
1022         const unsigned char *fw_inst_const, *fw_bss_data;
1023         uint32_t i, fw_inst_const_size, fw_bss_data_size;
1024         bool has_hw_support;
1025         struct dc *dc = adev->dm.dc;
1026
1027         if (!dmub_srv)
1028                 /* DMUB isn't supported on the ASIC. */
1029                 return 0;
1030
1031         if (!fb_info) {
1032                 DRM_ERROR("No framebuffer info for DMUB service.\n");
1033                 return -EINVAL;
1034         }
1035
1036         if (!dmub_fw) {
1037                 /* Firmware required for DMUB support. */
1038                 DRM_ERROR("No firmware provided for DMUB.\n");
1039                 return -EINVAL;
1040         }
1041
1042         status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
1043         if (status != DMUB_STATUS_OK) {
1044                 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
1045                 return -EINVAL;
1046         }
1047
1048         if (!has_hw_support) {
1049                 DRM_INFO("DMUB unsupported on ASIC\n");
1050                 return 0;
1051         }
1052
1053         hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
1054
1055         fw_inst_const = dmub_fw->data +
1056                         le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1057                         PSP_HEADER_BYTES;
1058
1059         fw_bss_data = dmub_fw->data +
1060                       le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1061                       le32_to_cpu(hdr->inst_const_bytes);
1062
1063         /* Copy firmware and bios info into FB memory. */
1064         fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1065                              PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1066
1067         fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1068
1069         /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
1070          * amdgpu_ucode_init_single_fw will load dmub firmware
1071          * fw_inst_const part to cw0; otherwise, the firmware back door load
1072          * will be done by dm_dmub_hw_init
1073          */
1074         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1075                 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
1076                                 fw_inst_const_size);
1077         }
1078
1079         if (fw_bss_data_size)
1080                 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
1081                        fw_bss_data, fw_bss_data_size);
1082
1083         /* Copy firmware bios info into FB memory. */
1084         memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
1085                adev->bios_size);
1086
1087         /* Reset regions that need to be reset. */
1088         memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
1089         fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
1090
1091         memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
1092                fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
1093
1094         memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
1095                fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
1096
1097         /* Initialize hardware. */
1098         memset(&hw_params, 0, sizeof(hw_params));
1099         hw_params.fb_base = adev->gmc.fb_start;
1100         hw_params.fb_offset = adev->gmc.aper_base;
1101
1102         /* backdoor load firmware and trigger dmub running */
1103         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
1104                 hw_params.load_inst_const = true;
1105
1106         if (dmcu)
1107                 hw_params.psp_version = dmcu->psp_version;
1108
1109         for (i = 0; i < fb_info->num_fb; ++i)
1110                 hw_params.fb[i] = &fb_info->fb[i];
1111
1112         switch (adev->asic_type) {
1113         case CHIP_YELLOW_CARP:
1114                 if (dc->ctx->asic_id.hw_internal_rev != YELLOW_CARP_A0) {
1115                         hw_params.dpia_supported = true;
1116 #if defined(CONFIG_DRM_AMD_DC_DCN)
1117                         hw_params.disable_dpia = dc->debug.dpia_debug.bits.disable_dpia;
1118 #endif
1119                 }
1120                 break;
1121         default:
1122                 break;
1123         }
1124
1125         status = dmub_srv_hw_init(dmub_srv, &hw_params);
1126         if (status != DMUB_STATUS_OK) {
1127                 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
1128                 return -EINVAL;
1129         }
1130
1131         /* Wait for firmware load to finish. */
1132         status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1133         if (status != DMUB_STATUS_OK)
1134                 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1135
1136         /* Init DMCU and ABM if available. */
1137         if (dmcu && abm) {
1138                 dmcu->funcs->dmcu_init(dmcu);
1139                 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1140         }
1141
1142         if (!adev->dm.dc->ctx->dmub_srv)
1143                 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
1144         if (!adev->dm.dc->ctx->dmub_srv) {
1145                 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
1146                 return -ENOMEM;
1147         }
1148
1149         DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
1150                  adev->dm.dmcub_fw_version);
1151
1152         return 0;
1153 }
1154
1155 #if defined(CONFIG_DRM_AMD_DC_DCN)
1156 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
1157 {
1158         uint64_t pt_base;
1159         uint32_t logical_addr_low;
1160         uint32_t logical_addr_high;
1161         uint32_t agp_base, agp_bot, agp_top;
1162         PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1163
1164         memset(pa_config, 0, sizeof(*pa_config));
1165
1166         logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1167         pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1168
1169         if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1170                 /*
1171                  * Raven2 has a HW issue that it is unable to use the vram which
1172                  * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1173                  * workaround that increase system aperture high address (add 1)
1174                  * to get rid of the VM fault and hardware hang.
1175                  */
1176                 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1177         else
1178                 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1179
1180         agp_base = 0;
1181         agp_bot = adev->gmc.agp_start >> 24;
1182         agp_top = adev->gmc.agp_end >> 24;
1183
1184
1185         page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1186         page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1187         page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1188         page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1189         page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1190         page_table_base.low_part = lower_32_bits(pt_base);
1191
1192         pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1193         pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1194
1195         pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1196         pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1197         pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1198
1199         pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1200         pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1201         pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1202
1203         pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1204         pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1205         pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1206
1207         pa_config->is_hvm_enabled = 0;
1208
1209 }
1210 #endif
1211 #if defined(CONFIG_DRM_AMD_DC_DCN)
1212 static void vblank_control_worker(struct work_struct *work)
1213 {
1214         struct vblank_control_work *vblank_work =
1215                 container_of(work, struct vblank_control_work, work);
1216         struct amdgpu_display_manager *dm = vblank_work->dm;
1217
1218         mutex_lock(&dm->dc_lock);
1219
1220         if (vblank_work->enable)
1221                 dm->active_vblank_irq_count++;
1222         else if(dm->active_vblank_irq_count)
1223                 dm->active_vblank_irq_count--;
1224
1225         dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1226
1227         DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1228
1229         /* Control PSR based on vblank requirements from OS */
1230         if (vblank_work->stream && vblank_work->stream->link) {
1231                 if (vblank_work->enable) {
1232                         if (vblank_work->stream->link->psr_settings.psr_allow_active)
1233                                 amdgpu_dm_psr_disable(vblank_work->stream);
1234                 } else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
1235                            !vblank_work->stream->link->psr_settings.psr_allow_active &&
1236                            vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
1237                         amdgpu_dm_psr_enable(vblank_work->stream);
1238                 }
1239         }
1240
1241         mutex_unlock(&dm->dc_lock);
1242
1243         dc_stream_release(vblank_work->stream);
1244
1245         kfree(vblank_work);
1246 }
1247
1248 #endif
1249
1250 static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
1251 {
1252         struct hpd_rx_irq_offload_work *offload_work;
1253         struct amdgpu_dm_connector *aconnector;
1254         struct dc_link *dc_link;
1255         struct amdgpu_device *adev;
1256         enum dc_connection_type new_connection_type = dc_connection_none;
1257         unsigned long flags;
1258
1259         offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
1260         aconnector = offload_work->offload_wq->aconnector;
1261
1262         if (!aconnector) {
1263                 DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
1264                 goto skip;
1265         }
1266
1267         adev = drm_to_adev(aconnector->base.dev);
1268         dc_link = aconnector->dc_link;
1269
1270         mutex_lock(&aconnector->hpd_lock);
1271         if (!dc_link_detect_sink(dc_link, &new_connection_type))
1272                 DRM_ERROR("KMS: Failed to detect connector\n");
1273         mutex_unlock(&aconnector->hpd_lock);
1274
1275         if (new_connection_type == dc_connection_none)
1276                 goto skip;
1277
1278         if (amdgpu_in_reset(adev))
1279                 goto skip;
1280
1281         mutex_lock(&adev->dm.dc_lock);
1282         if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
1283                 dc_link_dp_handle_automated_test(dc_link);
1284         else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
1285                         hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
1286                         dc_link_dp_allow_hpd_rx_irq(dc_link)) {
1287                 dc_link_dp_handle_link_loss(dc_link);
1288                 spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
1289                 offload_work->offload_wq->is_handling_link_loss = false;
1290                 spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
1291         }
1292         mutex_unlock(&adev->dm.dc_lock);
1293
1294 skip:
1295         kfree(offload_work);
1296
1297 }
1298
1299 static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
1300 {
1301         int max_caps = dc->caps.max_links;
1302         int i = 0;
1303         struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
1304
1305         hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
1306
1307         if (!hpd_rx_offload_wq)
1308                 return NULL;
1309
1310
1311         for (i = 0; i < max_caps; i++) {
1312                 hpd_rx_offload_wq[i].wq =
1313                                     create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
1314
1315                 if (hpd_rx_offload_wq[i].wq == NULL) {
1316                         DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
1317                         return NULL;
1318                 }
1319
1320                 spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
1321         }
1322
1323         return hpd_rx_offload_wq;
1324 }
1325
1326 struct amdgpu_stutter_quirk {
1327         u16 chip_vendor;
1328         u16 chip_device;
1329         u16 subsys_vendor;
1330         u16 subsys_device;
1331         u8 revision;
1332 };
1333
1334 static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
1335         /* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
1336         { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1337         { 0, 0, 0, 0, 0 },
1338 };
1339
1340 static bool dm_should_disable_stutter(struct pci_dev *pdev)
1341 {
1342         const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
1343
1344         while (p && p->chip_device != 0) {
1345                 if (pdev->vendor == p->chip_vendor &&
1346                     pdev->device == p->chip_device &&
1347                     pdev->subsystem_vendor == p->subsys_vendor &&
1348                     pdev->subsystem_device == p->subsys_device &&
1349                     pdev->revision == p->revision) {
1350                         return true;
1351                 }
1352                 ++p;
1353         }
1354         return false;
1355 }
1356
1357 static int amdgpu_dm_init(struct amdgpu_device *adev)
1358 {
1359         struct dc_init_data init_data;
1360 #ifdef CONFIG_DRM_AMD_DC_HDCP
1361         struct dc_callback_init init_params;
1362 #endif
1363         int r;
1364
1365         adev->dm.ddev = adev_to_drm(adev);
1366         adev->dm.adev = adev;
1367
1368         /* Zero all the fields */
1369         memset(&init_data, 0, sizeof(init_data));
1370 #ifdef CONFIG_DRM_AMD_DC_HDCP
1371         memset(&init_params, 0, sizeof(init_params));
1372 #endif
1373
1374         mutex_init(&adev->dm.dc_lock);
1375         mutex_init(&adev->dm.audio_lock);
1376 #if defined(CONFIG_DRM_AMD_DC_DCN)
1377         spin_lock_init(&adev->dm.vblank_lock);
1378 #endif
1379
1380         if(amdgpu_dm_irq_init(adev)) {
1381                 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1382                 goto error;
1383         }
1384
1385         init_data.asic_id.chip_family = adev->family;
1386
1387         init_data.asic_id.pci_revision_id = adev->pdev->revision;
1388         init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1389         init_data.asic_id.chip_id = adev->pdev->device;
1390
1391         init_data.asic_id.vram_width = adev->gmc.vram_width;
1392         /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1393         init_data.asic_id.atombios_base_address =
1394                 adev->mode_info.atom_context->bios;
1395
1396         init_data.driver = adev;
1397
1398         adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1399
1400         if (!adev->dm.cgs_device) {
1401                 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1402                 goto error;
1403         }
1404
1405         init_data.cgs_device = adev->dm.cgs_device;
1406
1407         init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1408
1409         switch (adev->asic_type) {
1410         case CHIP_CARRIZO:
1411         case CHIP_STONEY:
1412                 init_data.flags.gpu_vm_support = true;
1413                 break;
1414         default:
1415                 switch (adev->ip_versions[DCE_HWIP][0]) {
1416                 case IP_VERSION(2, 1, 0):
1417                         init_data.flags.gpu_vm_support = true;
1418                         switch (adev->dm.dmcub_fw_version) {
1419                         case 0: /* development */
1420                         case 0x1: /* linux-firmware.git hash 6d9f399 */
1421                         case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */
1422                                 init_data.flags.disable_dmcu = false;
1423                                 break;
1424                         default:
1425                                 init_data.flags.disable_dmcu = true;
1426                         }
1427                         break;
1428                 case IP_VERSION(1, 0, 0):
1429                 case IP_VERSION(1, 0, 1):
1430                 case IP_VERSION(3, 0, 1):
1431                 case IP_VERSION(3, 1, 2):
1432                 case IP_VERSION(3, 1, 3):
1433                         init_data.flags.gpu_vm_support = true;
1434                         break;
1435                 case IP_VERSION(2, 0, 3):
1436                         init_data.flags.disable_dmcu = true;
1437                         break;
1438                 default:
1439                         break;
1440                 }
1441                 break;
1442         }
1443
1444         if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1445                 init_data.flags.fbc_support = true;
1446
1447         if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1448                 init_data.flags.multi_mon_pp_mclk_switch = true;
1449
1450         if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1451                 init_data.flags.disable_fractional_pwm = true;
1452
1453         if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1454                 init_data.flags.edp_no_power_sequencing = true;
1455
1456         init_data.flags.power_down_display_on_boot = true;
1457
1458         if (check_seamless_boot_capability(adev)) {
1459                 init_data.flags.power_down_display_on_boot = false;
1460                 init_data.flags.allow_seamless_boot_optimization = true;
1461                 DRM_INFO("Seamless boot condition check passed\n");
1462         }
1463
1464         INIT_LIST_HEAD(&adev->dm.da_list);
1465         /* Display Core create. */
1466         adev->dm.dc = dc_create(&init_data);
1467
1468         if (adev->dm.dc) {
1469                 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1470         } else {
1471                 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1472                 goto error;
1473         }
1474
1475         if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1476                 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1477                 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1478         }
1479
1480         if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1481                 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1482         if (dm_should_disable_stutter(adev->pdev))
1483                 adev->dm.dc->debug.disable_stutter = true;
1484
1485         if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1486                 adev->dm.dc->debug.disable_stutter = true;
1487
1488         if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) {
1489                 adev->dm.dc->debug.disable_dsc = true;
1490                 adev->dm.dc->debug.disable_dsc_edp = true;
1491         }
1492
1493         if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1494                 adev->dm.dc->debug.disable_clock_gate = true;
1495
1496         r = dm_dmub_hw_init(adev);
1497         if (r) {
1498                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1499                 goto error;
1500         }
1501
1502         dc_hardware_init(adev->dm.dc);
1503
1504         adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
1505         if (!adev->dm.hpd_rx_offload_wq) {
1506                 DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
1507                 goto error;
1508         }
1509
1510 #if defined(CONFIG_DRM_AMD_DC_DCN)
1511         if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
1512                 struct dc_phy_addr_space_config pa_config;
1513
1514                 mmhub_read_system_context(adev, &pa_config);
1515
1516                 // Call the DC init_memory func
1517                 dc_setup_system_context(adev->dm.dc, &pa_config);
1518         }
1519 #endif
1520
1521         adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1522         if (!adev->dm.freesync_module) {
1523                 DRM_ERROR(
1524                 "amdgpu: failed to initialize freesync_module.\n");
1525         } else
1526                 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1527                                 adev->dm.freesync_module);
1528
1529         amdgpu_dm_init_color_mod();
1530
1531 #if defined(CONFIG_DRM_AMD_DC_DCN)
1532         if (adev->dm.dc->caps.max_links > 0) {
1533                 adev->dm.vblank_control_workqueue =
1534                         create_singlethread_workqueue("dm_vblank_control_workqueue");
1535                 if (!adev->dm.vblank_control_workqueue)
1536                         DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1537         }
1538 #endif
1539
1540 #ifdef CONFIG_DRM_AMD_DC_HDCP
1541         if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
1542                 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1543
1544                 if (!adev->dm.hdcp_workqueue)
1545                         DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1546                 else
1547                         DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1548
1549                 dc_init_callbacks(adev->dm.dc, &init_params);
1550         }
1551 #endif
1552 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1553         adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1554 #endif
1555         if (dc_enable_dmub_notifications(adev->dm.dc)) {
1556                 init_completion(&adev->dm.dmub_aux_transfer_done);
1557                 adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1558                 if (!adev->dm.dmub_notify) {
1559                         DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1560                         goto error;
1561                 }
1562
1563                 adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
1564                 if (!adev->dm.delayed_hpd_wq) {
1565                         DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
1566                         goto error;
1567                 }
1568
1569                 amdgpu_dm_outbox_init(adev);
1570 #if defined(CONFIG_DRM_AMD_DC_DCN)
1571                 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
1572                         dmub_aux_setconfig_callback, false)) {
1573                         DRM_ERROR("amdgpu: fail to register dmub aux callback");
1574                         goto error;
1575                 }
1576                 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
1577                         DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1578                         goto error;
1579                 }
1580                 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) {
1581                         DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1582                         goto error;
1583                 }
1584 #endif /* CONFIG_DRM_AMD_DC_DCN */
1585         }
1586
1587         if (amdgpu_dm_initialize_drm_device(adev)) {
1588                 DRM_ERROR(
1589                 "amdgpu: failed to initialize sw for display support.\n");
1590                 goto error;
1591         }
1592
1593         /* create fake encoders for MST */
1594         dm_dp_create_fake_mst_encoders(adev);
1595
1596         /* TODO: Add_display_info? */
1597
1598         /* TODO use dynamic cursor width */
1599         adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1600         adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1601
1602         if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1603                 DRM_ERROR(
1604                 "amdgpu: failed to initialize sw for display support.\n");
1605                 goto error;
1606         }
1607
1608
1609         DRM_DEBUG_DRIVER("KMS initialized.\n");
1610
1611         return 0;
1612 error:
1613         amdgpu_dm_fini(adev);
1614
1615         return -EINVAL;
1616 }
1617
1618 static int amdgpu_dm_early_fini(void *handle)
1619 {
1620         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1621
1622         amdgpu_dm_audio_fini(adev);
1623
1624         return 0;
1625 }
1626
1627 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1628 {
1629         int i;
1630
1631 #if defined(CONFIG_DRM_AMD_DC_DCN)
1632         if (adev->dm.vblank_control_workqueue) {
1633                 destroy_workqueue(adev->dm.vblank_control_workqueue);
1634                 adev->dm.vblank_control_workqueue = NULL;
1635         }
1636 #endif
1637
1638         for (i = 0; i < adev->dm.display_indexes_num; i++) {
1639                 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1640         }
1641
1642         amdgpu_dm_destroy_drm_device(&adev->dm);
1643
1644 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1645         if (adev->dm.crc_rd_wrk) {
1646                 flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1647                 kfree(adev->dm.crc_rd_wrk);
1648                 adev->dm.crc_rd_wrk = NULL;
1649         }
1650 #endif
1651 #ifdef CONFIG_DRM_AMD_DC_HDCP
1652         if (adev->dm.hdcp_workqueue) {
1653                 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1654                 adev->dm.hdcp_workqueue = NULL;
1655         }
1656
1657         if (adev->dm.dc)
1658                 dc_deinit_callbacks(adev->dm.dc);
1659 #endif
1660
1661         dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1662
1663         if (dc_enable_dmub_notifications(adev->dm.dc)) {
1664                 kfree(adev->dm.dmub_notify);
1665                 adev->dm.dmub_notify = NULL;
1666                 destroy_workqueue(adev->dm.delayed_hpd_wq);
1667                 adev->dm.delayed_hpd_wq = NULL;
1668         }
1669
1670         if (adev->dm.dmub_bo)
1671                 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1672                                       &adev->dm.dmub_bo_gpu_addr,
1673                                       &adev->dm.dmub_bo_cpu_addr);
1674
1675         if (adev->dm.hpd_rx_offload_wq) {
1676                 for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
1677                         if (adev->dm.hpd_rx_offload_wq[i].wq) {
1678                                 destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
1679                                 adev->dm.hpd_rx_offload_wq[i].wq = NULL;
1680                         }
1681                 }
1682
1683                 kfree(adev->dm.hpd_rx_offload_wq);
1684                 adev->dm.hpd_rx_offload_wq = NULL;
1685         }
1686
1687         /* DC Destroy TODO: Replace destroy DAL */
1688         if (adev->dm.dc)
1689                 dc_destroy(&adev->dm.dc);
1690         /*
1691          * TODO: pageflip, vlank interrupt
1692          *
1693          * amdgpu_dm_irq_fini(adev);
1694          */
1695
1696         if (adev->dm.cgs_device) {
1697                 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1698                 adev->dm.cgs_device = NULL;
1699         }
1700         if (adev->dm.freesync_module) {
1701                 mod_freesync_destroy(adev->dm.freesync_module);
1702                 adev->dm.freesync_module = NULL;
1703         }
1704
1705         mutex_destroy(&adev->dm.audio_lock);
1706         mutex_destroy(&adev->dm.dc_lock);
1707
1708         return;
1709 }
1710
1711 static int load_dmcu_fw(struct amdgpu_device *adev)
1712 {
1713         const char *fw_name_dmcu = NULL;
1714         int r;
1715         const struct dmcu_firmware_header_v1_0 *hdr;
1716
1717         switch(adev->asic_type) {
1718 #if defined(CONFIG_DRM_AMD_DC_SI)
1719         case CHIP_TAHITI:
1720         case CHIP_PITCAIRN:
1721         case CHIP_VERDE:
1722         case CHIP_OLAND:
1723 #endif
1724         case CHIP_BONAIRE:
1725         case CHIP_HAWAII:
1726         case CHIP_KAVERI:
1727         case CHIP_KABINI:
1728         case CHIP_MULLINS:
1729         case CHIP_TONGA:
1730         case CHIP_FIJI:
1731         case CHIP_CARRIZO:
1732         case CHIP_STONEY:
1733         case CHIP_POLARIS11:
1734         case CHIP_POLARIS10:
1735         case CHIP_POLARIS12:
1736         case CHIP_VEGAM:
1737         case CHIP_VEGA10:
1738         case CHIP_VEGA12:
1739         case CHIP_VEGA20:
1740                 return 0;
1741         case CHIP_NAVI12:
1742                 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1743                 break;
1744         case CHIP_RAVEN:
1745                 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1746                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1747                 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1748                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1749                 else
1750                         return 0;
1751                 break;
1752         default:
1753                 switch (adev->ip_versions[DCE_HWIP][0]) {
1754                 case IP_VERSION(2, 0, 2):
1755                 case IP_VERSION(2, 0, 3):
1756                 case IP_VERSION(2, 0, 0):
1757                 case IP_VERSION(2, 1, 0):
1758                 case IP_VERSION(3, 0, 0):
1759                 case IP_VERSION(3, 0, 2):
1760                 case IP_VERSION(3, 0, 3):
1761                 case IP_VERSION(3, 0, 1):
1762                 case IP_VERSION(3, 1, 2):
1763                 case IP_VERSION(3, 1, 3):
1764                         return 0;
1765                 default:
1766                         break;
1767                 }
1768                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1769                 return -EINVAL;
1770         }
1771
1772         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1773                 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1774                 return 0;
1775         }
1776
1777         r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1778         if (r == -ENOENT) {
1779                 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1780                 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1781                 adev->dm.fw_dmcu = NULL;
1782                 return 0;
1783         }
1784         if (r) {
1785                 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1786                         fw_name_dmcu);
1787                 return r;
1788         }
1789
1790         r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1791         if (r) {
1792                 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1793                         fw_name_dmcu);
1794                 release_firmware(adev->dm.fw_dmcu);
1795                 adev->dm.fw_dmcu = NULL;
1796                 return r;
1797         }
1798
1799         hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1800         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1801         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1802         adev->firmware.fw_size +=
1803                 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1804
1805         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1806         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1807         adev->firmware.fw_size +=
1808                 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1809
1810         adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1811
1812         DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1813
1814         return 0;
1815 }
1816
1817 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1818 {
1819         struct amdgpu_device *adev = ctx;
1820
1821         return dm_read_reg(adev->dm.dc->ctx, address);
1822 }
1823
1824 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1825                                      uint32_t value)
1826 {
1827         struct amdgpu_device *adev = ctx;
1828
1829         return dm_write_reg(adev->dm.dc->ctx, address, value);
1830 }
1831
1832 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1833 {
1834         struct dmub_srv_create_params create_params;
1835         struct dmub_srv_region_params region_params;
1836         struct dmub_srv_region_info region_info;
1837         struct dmub_srv_fb_params fb_params;
1838         struct dmub_srv_fb_info *fb_info;
1839         struct dmub_srv *dmub_srv;
1840         const struct dmcub_firmware_header_v1_0 *hdr;
1841         const char *fw_name_dmub;
1842         enum dmub_asic dmub_asic;
1843         enum dmub_status status;
1844         int r;
1845
1846         switch (adev->ip_versions[DCE_HWIP][0]) {
1847         case IP_VERSION(2, 1, 0):
1848                 dmub_asic = DMUB_ASIC_DCN21;
1849                 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1850                 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1851                         fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1852                 break;
1853         case IP_VERSION(3, 0, 0):
1854                 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) {
1855                         dmub_asic = DMUB_ASIC_DCN30;
1856                         fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1857                 } else {
1858                         dmub_asic = DMUB_ASIC_DCN30;
1859                         fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1860                 }
1861                 break;
1862         case IP_VERSION(3, 0, 1):
1863                 dmub_asic = DMUB_ASIC_DCN301;
1864                 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1865                 break;
1866         case IP_VERSION(3, 0, 2):
1867                 dmub_asic = DMUB_ASIC_DCN302;
1868                 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1869                 break;
1870         case IP_VERSION(3, 0, 3):
1871                 dmub_asic = DMUB_ASIC_DCN303;
1872                 fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1873                 break;
1874         case IP_VERSION(3, 1, 2):
1875         case IP_VERSION(3, 1, 3):
1876                 dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31;
1877                 fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
1878                 break;
1879
1880         default:
1881                 /* ASIC doesn't support DMUB. */
1882                 return 0;
1883         }
1884
1885         r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1886         if (r) {
1887                 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1888                 return 0;
1889         }
1890
1891         r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1892         if (r) {
1893                 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1894                 return 0;
1895         }
1896
1897         hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1898         adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1899
1900         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1901                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1902                         AMDGPU_UCODE_ID_DMCUB;
1903                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1904                         adev->dm.dmub_fw;
1905                 adev->firmware.fw_size +=
1906                         ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1907
1908                 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1909                          adev->dm.dmcub_fw_version);
1910         }
1911
1912
1913         adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1914         dmub_srv = adev->dm.dmub_srv;
1915
1916         if (!dmub_srv) {
1917                 DRM_ERROR("Failed to allocate DMUB service!\n");
1918                 return -ENOMEM;
1919         }
1920
1921         memset(&create_params, 0, sizeof(create_params));
1922         create_params.user_ctx = adev;
1923         create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1924         create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1925         create_params.asic = dmub_asic;
1926
1927         /* Create the DMUB service. */
1928         status = dmub_srv_create(dmub_srv, &create_params);
1929         if (status != DMUB_STATUS_OK) {
1930                 DRM_ERROR("Error creating DMUB service: %d\n", status);
1931                 return -EINVAL;
1932         }
1933
1934         /* Calculate the size of all the regions for the DMUB service. */
1935         memset(&region_params, 0, sizeof(region_params));
1936
1937         region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1938                                         PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1939         region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1940         region_params.vbios_size = adev->bios_size;
1941         region_params.fw_bss_data = region_params.bss_data_size ?
1942                 adev->dm.dmub_fw->data +
1943                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1944                 le32_to_cpu(hdr->inst_const_bytes) : NULL;
1945         region_params.fw_inst_const =
1946                 adev->dm.dmub_fw->data +
1947                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1948                 PSP_HEADER_BYTES;
1949
1950         status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1951                                            &region_info);
1952
1953         if (status != DMUB_STATUS_OK) {
1954                 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1955                 return -EINVAL;
1956         }
1957
1958         /*
1959          * Allocate a framebuffer based on the total size of all the regions.
1960          * TODO: Move this into GART.
1961          */
1962         r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1963                                     AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1964                                     &adev->dm.dmub_bo_gpu_addr,
1965                                     &adev->dm.dmub_bo_cpu_addr);
1966         if (r)
1967                 return r;
1968
1969         /* Rebase the regions on the framebuffer address. */
1970         memset(&fb_params, 0, sizeof(fb_params));
1971         fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1972         fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1973         fb_params.region_info = &region_info;
1974
1975         adev->dm.dmub_fb_info =
1976                 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1977         fb_info = adev->dm.dmub_fb_info;
1978
1979         if (!fb_info) {
1980                 DRM_ERROR(
1981                         "Failed to allocate framebuffer info for DMUB service!\n");
1982                 return -ENOMEM;
1983         }
1984
1985         status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1986         if (status != DMUB_STATUS_OK) {
1987                 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1988                 return -EINVAL;
1989         }
1990
1991         return 0;
1992 }
1993
1994 static int dm_sw_init(void *handle)
1995 {
1996         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1997         int r;
1998
1999         r = dm_dmub_sw_init(adev);
2000         if (r)
2001                 return r;
2002
2003         return load_dmcu_fw(adev);
2004 }
2005
2006 static int dm_sw_fini(void *handle)
2007 {
2008         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2009
2010         kfree(adev->dm.dmub_fb_info);
2011         adev->dm.dmub_fb_info = NULL;
2012
2013         if (adev->dm.dmub_srv) {
2014                 dmub_srv_destroy(adev->dm.dmub_srv);
2015                 adev->dm.dmub_srv = NULL;
2016         }
2017
2018         release_firmware(adev->dm.dmub_fw);
2019         adev->dm.dmub_fw = NULL;
2020
2021         release_firmware(adev->dm.fw_dmcu);
2022         adev->dm.fw_dmcu = NULL;
2023
2024         return 0;
2025 }
2026
2027 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
2028 {
2029         struct amdgpu_dm_connector *aconnector;
2030         struct drm_connector *connector;
2031         struct drm_connector_list_iter iter;
2032         int ret = 0;
2033
2034         drm_connector_list_iter_begin(dev, &iter);
2035         drm_for_each_connector_iter(connector, &iter) {
2036                 aconnector = to_amdgpu_dm_connector(connector);
2037                 if (aconnector->dc_link->type == dc_connection_mst_branch &&
2038                     aconnector->mst_mgr.aux) {
2039                         DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
2040                                          aconnector,
2041                                          aconnector->base.base.id);
2042
2043                         ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
2044                         if (ret < 0) {
2045                                 DRM_ERROR("DM_MST: Failed to start MST\n");
2046                                 aconnector->dc_link->type =
2047                                         dc_connection_single;
2048                                 break;
2049                         }
2050                 }
2051         }
2052         drm_connector_list_iter_end(&iter);
2053
2054         return ret;
2055 }
2056
2057 static int dm_late_init(void *handle)
2058 {
2059         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2060
2061         struct dmcu_iram_parameters params;
2062         unsigned int linear_lut[16];
2063         int i;
2064         struct dmcu *dmcu = NULL;
2065
2066         dmcu = adev->dm.dc->res_pool->dmcu;
2067
2068         for (i = 0; i < 16; i++)
2069                 linear_lut[i] = 0xFFFF * i / 15;
2070
2071         params.set = 0;
2072         params.backlight_ramping_override = false;
2073         params.backlight_ramping_start = 0xCCCC;
2074         params.backlight_ramping_reduction = 0xCCCCCCCC;
2075         params.backlight_lut_array_size = 16;
2076         params.backlight_lut_array = linear_lut;
2077
2078         /* Min backlight level after ABM reduction,  Don't allow below 1%
2079          * 0xFFFF x 0.01 = 0x28F
2080          */
2081         params.min_abm_backlight = 0x28F;
2082         /* In the case where abm is implemented on dmcub,
2083         * dmcu object will be null.
2084         * ABM 2.4 and up are implemented on dmcub.
2085         */
2086         if (dmcu) {
2087                 if (!dmcu_load_iram(dmcu, params))
2088                         return -EINVAL;
2089         } else if (adev->dm.dc->ctx->dmub_srv) {
2090                 struct dc_link *edp_links[MAX_NUM_EDP];
2091                 int edp_num;
2092
2093                 get_edp_links(adev->dm.dc, edp_links, &edp_num);
2094                 for (i = 0; i < edp_num; i++) {
2095                         if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
2096                                 return -EINVAL;
2097                 }
2098         }
2099
2100         return detect_mst_link_for_all_connectors(adev_to_drm(adev));
2101 }
2102
2103 static void s3_handle_mst(struct drm_device *dev, bool suspend)
2104 {
2105         struct amdgpu_dm_connector *aconnector;
2106         struct drm_connector *connector;
2107         struct drm_connector_list_iter iter;
2108         struct drm_dp_mst_topology_mgr *mgr;
2109         int ret;
2110         bool need_hotplug = false;
2111
2112         drm_connector_list_iter_begin(dev, &iter);
2113         drm_for_each_connector_iter(connector, &iter) {
2114                 aconnector = to_amdgpu_dm_connector(connector);
2115                 if (aconnector->dc_link->type != dc_connection_mst_branch ||
2116                     aconnector->mst_port)
2117                         continue;
2118
2119                 mgr = &aconnector->mst_mgr;
2120
2121                 if (suspend) {
2122                         drm_dp_mst_topology_mgr_suspend(mgr);
2123                 } else {
2124                         ret = drm_dp_mst_topology_mgr_resume(mgr, true);
2125                         if (ret < 0) {
2126                                 drm_dp_mst_topology_mgr_set_mst(mgr, false);
2127                                 need_hotplug = true;
2128                         }
2129                 }
2130         }
2131         drm_connector_list_iter_end(&iter);
2132
2133         if (need_hotplug)
2134                 drm_kms_helper_hotplug_event(dev);
2135 }
2136
2137 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
2138 {
2139         struct smu_context *smu = &adev->smu;
2140         int ret = 0;
2141
2142         if (!is_support_sw_smu(adev))
2143                 return 0;
2144
2145         /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
2146          * on window driver dc implementation.
2147          * For Navi1x, clock settings of dcn watermarks are fixed. the settings
2148          * should be passed to smu during boot up and resume from s3.
2149          * boot up: dc calculate dcn watermark clock settings within dc_create,
2150          * dcn20_resource_construct
2151          * then call pplib functions below to pass the settings to smu:
2152          * smu_set_watermarks_for_clock_ranges
2153          * smu_set_watermarks_table
2154          * navi10_set_watermarks_table
2155          * smu_write_watermarks_table
2156          *
2157          * For Renoir, clock settings of dcn watermark are also fixed values.
2158          * dc has implemented different flow for window driver:
2159          * dc_hardware_init / dc_set_power_state
2160          * dcn10_init_hw
2161          * notify_wm_ranges
2162          * set_wm_ranges
2163          * -- Linux
2164          * smu_set_watermarks_for_clock_ranges
2165          * renoir_set_watermarks_table
2166          * smu_write_watermarks_table
2167          *
2168          * For Linux,
2169          * dc_hardware_init -> amdgpu_dm_init
2170          * dc_set_power_state --> dm_resume
2171          *
2172          * therefore, this function apply to navi10/12/14 but not Renoir
2173          * *
2174          */
2175         switch (adev->ip_versions[DCE_HWIP][0]) {
2176         case IP_VERSION(2, 0, 2):
2177         case IP_VERSION(2, 0, 0):
2178                 break;
2179         default:
2180                 return 0;
2181         }
2182
2183         ret = smu_write_watermarks_table(smu);
2184         if (ret) {
2185                 DRM_ERROR("Failed to update WMTABLE!\n");
2186                 return ret;
2187         }
2188
2189         return 0;
2190 }
2191
2192 /**
2193  * dm_hw_init() - Initialize DC device
2194  * @handle: The base driver device containing the amdgpu_dm device.
2195  *
2196  * Initialize the &struct amdgpu_display_manager device. This involves calling
2197  * the initializers of each DM component, then populating the struct with them.
2198  *
2199  * Although the function implies hardware initialization, both hardware and
2200  * software are initialized here. Splitting them out to their relevant init
2201  * hooks is a future TODO item.
2202  *
2203  * Some notable things that are initialized here:
2204  *
2205  * - Display Core, both software and hardware
2206  * - DC modules that we need (freesync and color management)
2207  * - DRM software states
2208  * - Interrupt sources and handlers
2209  * - Vblank support
2210  * - Debug FS entries, if enabled
2211  */
2212 static int dm_hw_init(void *handle)
2213 {
2214         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2215         /* Create DAL display manager */
2216         amdgpu_dm_init(adev);
2217         amdgpu_dm_hpd_init(adev);
2218
2219         return 0;
2220 }
2221
2222 /**
2223  * dm_hw_fini() - Teardown DC device
2224  * @handle: The base driver device containing the amdgpu_dm device.
2225  *
2226  * Teardown components within &struct amdgpu_display_manager that require
2227  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
2228  * were loaded. Also flush IRQ workqueues and disable them.
2229  */
2230 static int dm_hw_fini(void *handle)
2231 {
2232         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2233
2234         amdgpu_dm_hpd_fini(adev);
2235
2236         amdgpu_dm_irq_fini(adev);
2237         amdgpu_dm_fini(adev);
2238         return 0;
2239 }
2240
2241
2242 static int dm_enable_vblank(struct drm_crtc *crtc);
2243 static void dm_disable_vblank(struct drm_crtc *crtc);
2244
2245 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
2246                                  struct dc_state *state, bool enable)
2247 {
2248         enum dc_irq_source irq_source;
2249         struct amdgpu_crtc *acrtc;
2250         int rc = -EBUSY;
2251         int i = 0;
2252
2253         for (i = 0; i < state->stream_count; i++) {
2254                 acrtc = get_crtc_by_otg_inst(
2255                                 adev, state->stream_status[i].primary_otg_inst);
2256
2257                 if (acrtc && state->stream_status[i].plane_count != 0) {
2258                         irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
2259                         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
2260                         DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
2261                                       acrtc->crtc_id, enable ? "en" : "dis", rc);
2262                         if (rc)
2263                                 DRM_WARN("Failed to %s pflip interrupts\n",
2264                                          enable ? "enable" : "disable");
2265
2266                         if (enable) {
2267                                 rc = dm_enable_vblank(&acrtc->base);
2268                                 if (rc)
2269                                         DRM_WARN("Failed to enable vblank interrupts\n");
2270                         } else {
2271                                 dm_disable_vblank(&acrtc->base);
2272                         }
2273
2274                 }
2275         }
2276
2277 }
2278
2279 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
2280 {
2281         struct dc_state *context = NULL;
2282         enum dc_status res = DC_ERROR_UNEXPECTED;
2283         int i;
2284         struct dc_stream_state *del_streams[MAX_PIPES];
2285         int del_streams_count = 0;
2286
2287         memset(del_streams, 0, sizeof(del_streams));
2288
2289         context = dc_create_state(dc);
2290         if (context == NULL)
2291                 goto context_alloc_fail;
2292
2293         dc_resource_state_copy_construct_current(dc, context);
2294
2295         /* First remove from context all streams */
2296         for (i = 0; i < context->stream_count; i++) {
2297                 struct dc_stream_state *stream = context->streams[i];
2298
2299                 del_streams[del_streams_count++] = stream;
2300         }
2301
2302         /* Remove all planes for removed streams and then remove the streams */
2303         for (i = 0; i < del_streams_count; i++) {
2304                 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
2305                         res = DC_FAIL_DETACH_SURFACES;
2306                         goto fail;
2307                 }
2308
2309                 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
2310                 if (res != DC_OK)
2311                         goto fail;
2312         }
2313
2314         res = dc_commit_state(dc, context);
2315
2316 fail:
2317         dc_release_state(context);
2318
2319 context_alloc_fail:
2320         return res;
2321 }
2322
2323 static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
2324 {
2325         int i;
2326
2327         if (dm->hpd_rx_offload_wq) {
2328                 for (i = 0; i < dm->dc->caps.max_links; i++)
2329                         flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
2330         }
2331 }
2332
2333 static int dm_suspend(void *handle)
2334 {
2335         struct amdgpu_device *adev = handle;
2336         struct amdgpu_display_manager *dm = &adev->dm;
2337         int ret = 0;
2338
2339         if (amdgpu_in_reset(adev)) {
2340                 mutex_lock(&dm->dc_lock);
2341
2342 #if defined(CONFIG_DRM_AMD_DC_DCN)
2343                 dc_allow_idle_optimizations(adev->dm.dc, false);
2344 #endif
2345
2346                 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
2347
2348                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
2349
2350                 amdgpu_dm_commit_zero_streams(dm->dc);
2351
2352                 amdgpu_dm_irq_suspend(adev);
2353
2354                 hpd_rx_irq_work_suspend(dm);
2355
2356                 return ret;
2357         }
2358
2359         WARN_ON(adev->dm.cached_state);
2360         adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
2361
2362         s3_handle_mst(adev_to_drm(adev), true);
2363
2364         amdgpu_dm_irq_suspend(adev);
2365
2366         hpd_rx_irq_work_suspend(dm);
2367
2368         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2369
2370         return 0;
2371 }
2372
2373 static struct amdgpu_dm_connector *
2374 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2375                                              struct drm_crtc *crtc)
2376 {
2377         uint32_t i;
2378         struct drm_connector_state *new_con_state;
2379         struct drm_connector *connector;
2380         struct drm_crtc *crtc_from_state;
2381
2382         for_each_new_connector_in_state(state, connector, new_con_state, i) {
2383                 crtc_from_state = new_con_state->crtc;
2384
2385                 if (crtc_from_state == crtc)
2386                         return to_amdgpu_dm_connector(connector);
2387         }
2388
2389         return NULL;
2390 }
2391
2392 static void emulated_link_detect(struct dc_link *link)
2393 {
2394         struct dc_sink_init_data sink_init_data = { 0 };
2395         struct display_sink_capability sink_caps = { 0 };
2396         enum dc_edid_status edid_status;
2397         struct dc_context *dc_ctx = link->ctx;
2398         struct dc_sink *sink = NULL;
2399         struct dc_sink *prev_sink = NULL;
2400
2401         link->type = dc_connection_none;
2402         prev_sink = link->local_sink;
2403
2404         if (prev_sink)
2405                 dc_sink_release(prev_sink);
2406
2407         switch (link->connector_signal) {
2408         case SIGNAL_TYPE_HDMI_TYPE_A: {
2409                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2410                 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2411                 break;
2412         }
2413
2414         case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2415                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2416                 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2417                 break;
2418         }
2419
2420         case SIGNAL_TYPE_DVI_DUAL_LINK: {
2421                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2422                 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2423                 break;
2424         }
2425
2426         case SIGNAL_TYPE_LVDS: {
2427                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2428                 sink_caps.signal = SIGNAL_TYPE_LVDS;
2429                 break;
2430         }
2431
2432         case SIGNAL_TYPE_EDP: {
2433                 sink_caps.transaction_type =
2434                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2435                 sink_caps.signal = SIGNAL_TYPE_EDP;
2436                 break;
2437         }
2438
2439         case SIGNAL_TYPE_DISPLAY_PORT: {
2440                 sink_caps.transaction_type =
2441                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2442                 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2443                 break;
2444         }
2445
2446         default:
2447                 DC_ERROR("Invalid connector type! signal:%d\n",
2448                         link->connector_signal);
2449                 return;
2450         }
2451
2452         sink_init_data.link = link;
2453         sink_init_data.sink_signal = sink_caps.signal;
2454
2455         sink = dc_sink_create(&sink_init_data);
2456         if (!sink) {
2457                 DC_ERROR("Failed to create sink!\n");
2458                 return;
2459         }
2460
2461         /* dc_sink_create returns a new reference */
2462         link->local_sink = sink;
2463
2464         edid_status = dm_helpers_read_local_edid(
2465                         link->ctx,
2466                         link,
2467                         sink);
2468
2469         if (edid_status != EDID_OK)
2470                 DC_ERROR("Failed to read EDID");
2471
2472 }
2473
2474 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2475                                      struct amdgpu_display_manager *dm)
2476 {
2477         struct {
2478                 struct dc_surface_update surface_updates[MAX_SURFACES];
2479                 struct dc_plane_info plane_infos[MAX_SURFACES];
2480                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
2481                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2482                 struct dc_stream_update stream_update;
2483         } * bundle;
2484         int k, m;
2485
2486         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2487
2488         if (!bundle) {
2489                 dm_error("Failed to allocate update bundle\n");
2490                 goto cleanup;
2491         }
2492
2493         for (k = 0; k < dc_state->stream_count; k++) {
2494                 bundle->stream_update.stream = dc_state->streams[k];
2495
2496                 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2497                         bundle->surface_updates[m].surface =
2498                                 dc_state->stream_status->plane_states[m];
2499                         bundle->surface_updates[m].surface->force_full_update =
2500                                 true;
2501                 }
2502                 dc_commit_updates_for_stream(
2503                         dm->dc, bundle->surface_updates,
2504                         dc_state->stream_status->plane_count,
2505                         dc_state->streams[k], &bundle->stream_update, dc_state);
2506         }
2507
2508 cleanup:
2509         kfree(bundle);
2510
2511         return;
2512 }
2513
2514 static void dm_set_dpms_off(struct dc_link *link, struct dm_crtc_state *acrtc_state)
2515 {
2516         struct dc_stream_state *stream_state;
2517         struct amdgpu_dm_connector *aconnector = link->priv;
2518         struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2519         struct dc_stream_update stream_update;
2520         bool dpms_off = true;
2521
2522         memset(&stream_update, 0, sizeof(stream_update));
2523         stream_update.dpms_off = &dpms_off;
2524
2525         mutex_lock(&adev->dm.dc_lock);
2526         stream_state = dc_stream_find_from_link(link);
2527
2528         if (stream_state == NULL) {
2529                 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2530                 mutex_unlock(&adev->dm.dc_lock);
2531                 return;
2532         }
2533
2534         stream_update.stream = stream_state;
2535         acrtc_state->force_dpms_off = true;
2536         dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2537                                      stream_state, &stream_update,
2538                                      stream_state->ctx->dc->current_state);
2539         mutex_unlock(&adev->dm.dc_lock);
2540 }
2541
2542 static int dm_resume(void *handle)
2543 {
2544         struct amdgpu_device *adev = handle;
2545         struct drm_device *ddev = adev_to_drm(adev);
2546         struct amdgpu_display_manager *dm = &adev->dm;
2547         struct amdgpu_dm_connector *aconnector;
2548         struct drm_connector *connector;
2549         struct drm_connector_list_iter iter;
2550         struct drm_crtc *crtc;
2551         struct drm_crtc_state *new_crtc_state;
2552         struct dm_crtc_state *dm_new_crtc_state;
2553         struct drm_plane *plane;
2554         struct drm_plane_state *new_plane_state;
2555         struct dm_plane_state *dm_new_plane_state;
2556         struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2557         enum dc_connection_type new_connection_type = dc_connection_none;
2558         struct dc_state *dc_state;
2559         int i, r, j;
2560
2561         if (amdgpu_in_reset(adev)) {
2562                 dc_state = dm->cached_dc_state;
2563
2564                 /*
2565                  * The dc->current_state is backed up into dm->cached_dc_state
2566                  * before we commit 0 streams.
2567                  *
2568                  * DC will clear link encoder assignments on the real state
2569                  * but the changes won't propagate over to the copy we made
2570                  * before the 0 streams commit.
2571                  *
2572                  * DC expects that link encoder assignments are *not* valid
2573                  * when committing a state, so as a workaround it needs to be
2574                  * cleared here.
2575                  */
2576                 link_enc_cfg_init(dm->dc, dc_state);
2577
2578                 amdgpu_dm_outbox_init(adev);
2579
2580                 r = dm_dmub_hw_init(adev);
2581                 if (r)
2582                         DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2583
2584                 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2585                 dc_resume(dm->dc);
2586
2587                 amdgpu_dm_irq_resume_early(adev);
2588
2589                 for (i = 0; i < dc_state->stream_count; i++) {
2590                         dc_state->streams[i]->mode_changed = true;
2591                         for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
2592                                 dc_state->stream_status[i].plane_states[j]->update_flags.raw
2593                                         = 0xffffffff;
2594                         }
2595                 }
2596
2597                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
2598
2599                 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2600
2601                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2602
2603                 dc_release_state(dm->cached_dc_state);
2604                 dm->cached_dc_state = NULL;
2605
2606                 amdgpu_dm_irq_resume_late(adev);
2607
2608                 mutex_unlock(&dm->dc_lock);
2609
2610                 return 0;
2611         }
2612         /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2613         dc_release_state(dm_state->context);
2614         dm_state->context = dc_create_state(dm->dc);
2615         /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2616         dc_resource_state_construct(dm->dc, dm_state->context);
2617
2618         /* Before powering on DC we need to re-initialize DMUB. */
2619         r = dm_dmub_hw_init(adev);
2620         if (r)
2621                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2622
2623         /* power on hardware */
2624         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2625
2626         /* program HPD filter */
2627         dc_resume(dm->dc);
2628
2629         /*
2630          * early enable HPD Rx IRQ, should be done before set mode as short
2631          * pulse interrupts are used for MST
2632          */
2633         amdgpu_dm_irq_resume_early(adev);
2634
2635         /* On resume we need to rewrite the MSTM control bits to enable MST*/
2636         s3_handle_mst(ddev, false);
2637
2638         /* Do detection*/
2639         drm_connector_list_iter_begin(ddev, &iter);
2640         drm_for_each_connector_iter(connector, &iter) {
2641                 aconnector = to_amdgpu_dm_connector(connector);
2642
2643                 /*
2644                  * this is the case when traversing through already created
2645                  * MST connectors, should be skipped
2646                  */
2647                 if (aconnector->mst_port)
2648                         continue;
2649
2650                 mutex_lock(&aconnector->hpd_lock);
2651                 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2652                         DRM_ERROR("KMS: Failed to detect connector\n");
2653
2654                 if (aconnector->base.force && new_connection_type == dc_connection_none)
2655                         emulated_link_detect(aconnector->dc_link);
2656                 else
2657                         dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2658
2659                 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2660                         aconnector->fake_enable = false;
2661
2662                 if (aconnector->dc_sink)
2663                         dc_sink_release(aconnector->dc_sink);
2664                 aconnector->dc_sink = NULL;
2665                 amdgpu_dm_update_connector_after_detect(aconnector);
2666                 mutex_unlock(&aconnector->hpd_lock);
2667         }
2668         drm_connector_list_iter_end(&iter);
2669
2670         /* Force mode set in atomic commit */
2671         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2672                 new_crtc_state->active_changed = true;
2673
2674         /*
2675          * atomic_check is expected to create the dc states. We need to release
2676          * them here, since they were duplicated as part of the suspend
2677          * procedure.
2678          */
2679         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2680                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2681                 if (dm_new_crtc_state->stream) {
2682                         WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2683                         dc_stream_release(dm_new_crtc_state->stream);
2684                         dm_new_crtc_state->stream = NULL;
2685                 }
2686         }
2687
2688         for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2689                 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2690                 if (dm_new_plane_state->dc_state) {
2691                         WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2692                         dc_plane_state_release(dm_new_plane_state->dc_state);
2693                         dm_new_plane_state->dc_state = NULL;
2694                 }
2695         }
2696
2697         drm_atomic_helper_resume(ddev, dm->cached_state);
2698
2699         dm->cached_state = NULL;
2700
2701         amdgpu_dm_irq_resume_late(adev);
2702
2703         amdgpu_dm_smu_write_watermarks_table(adev);
2704
2705         return 0;
2706 }
2707
2708 /**
2709  * DOC: DM Lifecycle
2710  *
2711  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2712  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2713  * the base driver's device list to be initialized and torn down accordingly.
2714  *
2715  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2716  */
2717
2718 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2719         .name = "dm",
2720         .early_init = dm_early_init,
2721         .late_init = dm_late_init,
2722         .sw_init = dm_sw_init,
2723         .sw_fini = dm_sw_fini,
2724         .early_fini = amdgpu_dm_early_fini,
2725         .hw_init = dm_hw_init,
2726         .hw_fini = dm_hw_fini,
2727         .suspend = dm_suspend,
2728         .resume = dm_resume,
2729         .is_idle = dm_is_idle,
2730         .wait_for_idle = dm_wait_for_idle,
2731         .check_soft_reset = dm_check_soft_reset,
2732         .soft_reset = dm_soft_reset,
2733         .set_clockgating_state = dm_set_clockgating_state,
2734         .set_powergating_state = dm_set_powergating_state,
2735 };
2736
2737 const struct amdgpu_ip_block_version dm_ip_block =
2738 {
2739         .type = AMD_IP_BLOCK_TYPE_DCE,
2740         .major = 1,
2741         .minor = 0,
2742         .rev = 0,
2743         .funcs = &amdgpu_dm_funcs,
2744 };
2745
2746
2747 /**
2748  * DOC: atomic
2749  *
2750  * *WIP*
2751  */
2752
2753 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2754         .fb_create = amdgpu_display_user_framebuffer_create,
2755         .get_format_info = amd_get_format_info,
2756         .output_poll_changed = drm_fb_helper_output_poll_changed,
2757         .atomic_check = amdgpu_dm_atomic_check,
2758         .atomic_commit = drm_atomic_helper_commit,
2759 };
2760
2761 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2762         .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2763 };
2764
2765 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2766 {
2767         u32 max_cll, min_cll, max, min, q, r;
2768         struct amdgpu_dm_backlight_caps *caps;
2769         struct amdgpu_display_manager *dm;
2770         struct drm_connector *conn_base;
2771         struct amdgpu_device *adev;
2772         struct dc_link *link = NULL;
2773         static const u8 pre_computed_values[] = {
2774                 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2775                 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2776         int i;
2777
2778         if (!aconnector || !aconnector->dc_link)
2779                 return;
2780
2781         link = aconnector->dc_link;
2782         if (link->connector_signal != SIGNAL_TYPE_EDP)
2783                 return;
2784
2785         conn_base = &aconnector->base;
2786         adev = drm_to_adev(conn_base->dev);
2787         dm = &adev->dm;
2788         for (i = 0; i < dm->num_of_edps; i++) {
2789                 if (link == dm->backlight_link[i])
2790                         break;
2791         }
2792         if (i >= dm->num_of_edps)
2793                 return;
2794         caps = &dm->backlight_caps[i];
2795         caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2796         caps->aux_support = false;
2797         max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2798         min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2799
2800         if (caps->ext_caps->bits.oled == 1 /*||
2801             caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2802             caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
2803                 caps->aux_support = true;
2804
2805         if (amdgpu_backlight == 0)
2806                 caps->aux_support = false;
2807         else if (amdgpu_backlight == 1)
2808                 caps->aux_support = true;
2809
2810         /* From the specification (CTA-861-G), for calculating the maximum
2811          * luminance we need to use:
2812          *      Luminance = 50*2**(CV/32)
2813          * Where CV is a one-byte value.
2814          * For calculating this expression we may need float point precision;
2815          * to avoid this complexity level, we take advantage that CV is divided
2816          * by a constant. From the Euclids division algorithm, we know that CV
2817          * can be written as: CV = 32*q + r. Next, we replace CV in the
2818          * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2819          * need to pre-compute the value of r/32. For pre-computing the values
2820          * We just used the following Ruby line:
2821          *      (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2822          * The results of the above expressions can be verified at
2823          * pre_computed_values.
2824          */
2825         q = max_cll >> 5;
2826         r = max_cll % 32;
2827         max = (1 << q) * pre_computed_values[r];
2828
2829         // min luminance: maxLum * (CV/255)^2 / 100
2830         q = DIV_ROUND_CLOSEST(min_cll, 255);
2831         min = max * DIV_ROUND_CLOSEST((q * q), 100);
2832
2833         caps->aux_max_input_signal = max;
2834         caps->aux_min_input_signal = min;
2835 }
2836
2837 void amdgpu_dm_update_connector_after_detect(
2838                 struct amdgpu_dm_connector *aconnector)
2839 {
2840         struct drm_connector *connector = &aconnector->base;
2841         struct drm_device *dev = connector->dev;
2842         struct dc_sink *sink;
2843
2844         /* MST handled by drm_mst framework */
2845         if (aconnector->mst_mgr.mst_state == true)
2846                 return;
2847
2848         sink = aconnector->dc_link->local_sink;
2849         if (sink)
2850                 dc_sink_retain(sink);
2851
2852         /*
2853          * Edid mgmt connector gets first update only in mode_valid hook and then
2854          * the connector sink is set to either fake or physical sink depends on link status.
2855          * Skip if already done during boot.
2856          */
2857         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2858                         && aconnector->dc_em_sink) {
2859
2860                 /*
2861                  * For S3 resume with headless use eml_sink to fake stream
2862                  * because on resume connector->sink is set to NULL
2863                  */
2864                 mutex_lock(&dev->mode_config.mutex);
2865
2866                 if (sink) {
2867                         if (aconnector->dc_sink) {
2868                                 amdgpu_dm_update_freesync_caps(connector, NULL);
2869                                 /*
2870                                  * retain and release below are used to
2871                                  * bump up refcount for sink because the link doesn't point
2872                                  * to it anymore after disconnect, so on next crtc to connector
2873                                  * reshuffle by UMD we will get into unwanted dc_sink release
2874                                  */
2875                                 dc_sink_release(aconnector->dc_sink);
2876                         }
2877                         aconnector->dc_sink = sink;
2878                         dc_sink_retain(aconnector->dc_sink);
2879                         amdgpu_dm_update_freesync_caps(connector,
2880                                         aconnector->edid);
2881                 } else {
2882                         amdgpu_dm_update_freesync_caps(connector, NULL);
2883                         if (!aconnector->dc_sink) {
2884                                 aconnector->dc_sink = aconnector->dc_em_sink;
2885                                 dc_sink_retain(aconnector->dc_sink);
2886                         }
2887                 }
2888
2889                 mutex_unlock(&dev->mode_config.mutex);
2890
2891                 if (sink)
2892                         dc_sink_release(sink);
2893                 return;
2894         }
2895
2896         /*
2897          * TODO: temporary guard to look for proper fix
2898          * if this sink is MST sink, we should not do anything
2899          */
2900         if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2901                 dc_sink_release(sink);
2902                 return;
2903         }
2904
2905         if (aconnector->dc_sink == sink) {
2906                 /*
2907                  * We got a DP short pulse (Link Loss, DP CTS, etc...).
2908                  * Do nothing!!
2909                  */
2910                 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2911                                 aconnector->connector_id);
2912                 if (sink)
2913                         dc_sink_release(sink);
2914                 return;
2915         }
2916
2917         DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2918                 aconnector->connector_id, aconnector->dc_sink, sink);
2919
2920         mutex_lock(&dev->mode_config.mutex);
2921
2922         /*
2923          * 1. Update status of the drm connector
2924          * 2. Send an event and let userspace tell us what to do
2925          */
2926         if (sink) {
2927                 /*
2928                  * TODO: check if we still need the S3 mode update workaround.
2929                  * If yes, put it here.
2930                  */
2931                 if (aconnector->dc_sink) {
2932                         amdgpu_dm_update_freesync_caps(connector, NULL);
2933                         dc_sink_release(aconnector->dc_sink);
2934                 }
2935
2936                 aconnector->dc_sink = sink;
2937                 dc_sink_retain(aconnector->dc_sink);
2938                 if (sink->dc_edid.length == 0) {
2939                         aconnector->edid = NULL;
2940                         if (aconnector->dc_link->aux_mode) {
2941                                 drm_dp_cec_unset_edid(
2942                                         &aconnector->dm_dp_aux.aux);
2943                         }
2944                 } else {
2945                         aconnector->edid =
2946                                 (struct edid *)sink->dc_edid.raw_edid;
2947
2948                         drm_connector_update_edid_property(connector,
2949                                                            aconnector->edid);
2950                         if (aconnector->dc_link->aux_mode)
2951                                 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2952                                                     aconnector->edid);
2953                 }
2954
2955                 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2956                 update_connector_ext_caps(aconnector);
2957         } else {
2958                 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2959                 amdgpu_dm_update_freesync_caps(connector, NULL);
2960                 drm_connector_update_edid_property(connector, NULL);
2961                 aconnector->num_modes = 0;
2962                 dc_sink_release(aconnector->dc_sink);
2963                 aconnector->dc_sink = NULL;
2964                 aconnector->edid = NULL;
2965 #ifdef CONFIG_DRM_AMD_DC_HDCP
2966                 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2967                 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2968                         connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2969 #endif
2970         }
2971
2972         mutex_unlock(&dev->mode_config.mutex);
2973
2974         update_subconnector_property(aconnector);
2975
2976         if (sink)
2977                 dc_sink_release(sink);
2978 }
2979
2980 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
2981 {
2982         struct drm_connector *connector = &aconnector->base;
2983         struct drm_device *dev = connector->dev;
2984         enum dc_connection_type new_connection_type = dc_connection_none;
2985         struct amdgpu_device *adev = drm_to_adev(dev);
2986         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
2987         struct dm_crtc_state *dm_crtc_state = NULL;
2988
2989         if (adev->dm.disable_hpd_irq)
2990                 return;
2991
2992         if (dm_con_state->base.state && dm_con_state->base.crtc)
2993                 dm_crtc_state = to_dm_crtc_state(drm_atomic_get_crtc_state(
2994                                         dm_con_state->base.state,
2995                                         dm_con_state->base.crtc));
2996         /*
2997          * In case of failure or MST no need to update connector status or notify the OS
2998          * since (for MST case) MST does this in its own context.
2999          */
3000         mutex_lock(&aconnector->hpd_lock);
3001
3002 #ifdef CONFIG_DRM_AMD_DC_HDCP
3003         if (adev->dm.hdcp_workqueue) {
3004                 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
3005                 dm_con_state->update_hdcp = true;
3006         }
3007 #endif
3008         if (aconnector->fake_enable)
3009                 aconnector->fake_enable = false;
3010
3011         if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
3012                 DRM_ERROR("KMS: Failed to detect connector\n");
3013
3014         if (aconnector->base.force && new_connection_type == dc_connection_none) {
3015                 emulated_link_detect(aconnector->dc_link);
3016
3017                 drm_modeset_lock_all(dev);
3018                 dm_restore_drm_connector_state(dev, connector);
3019                 drm_modeset_unlock_all(dev);
3020
3021                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3022                         drm_kms_helper_hotplug_event(dev);
3023
3024         } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
3025                 if (new_connection_type == dc_connection_none &&
3026                     aconnector->dc_link->type == dc_connection_none &&
3027                     dm_crtc_state)
3028                         dm_set_dpms_off(aconnector->dc_link, dm_crtc_state);
3029
3030                 amdgpu_dm_update_connector_after_detect(aconnector);
3031
3032                 drm_modeset_lock_all(dev);
3033                 dm_restore_drm_connector_state(dev, connector);
3034                 drm_modeset_unlock_all(dev);
3035
3036                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3037                         drm_kms_helper_hotplug_event(dev);
3038         }
3039         mutex_unlock(&aconnector->hpd_lock);
3040
3041 }
3042
3043 static void handle_hpd_irq(void *param)
3044 {
3045         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3046
3047         handle_hpd_irq_helper(aconnector);
3048
3049 }
3050
3051 static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
3052 {
3053         uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
3054         uint8_t dret;
3055         bool new_irq_handled = false;
3056         int dpcd_addr;
3057         int dpcd_bytes_to_read;
3058
3059         const int max_process_count = 30;
3060         int process_count = 0;
3061
3062         const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
3063
3064         if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
3065                 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
3066                 /* DPCD 0x200 - 0x201 for downstream IRQ */
3067                 dpcd_addr = DP_SINK_COUNT;
3068         } else {
3069                 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
3070                 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
3071                 dpcd_addr = DP_SINK_COUNT_ESI;
3072         }
3073
3074         dret = drm_dp_dpcd_read(
3075                 &aconnector->dm_dp_aux.aux,
3076                 dpcd_addr,
3077                 esi,
3078                 dpcd_bytes_to_read);
3079
3080         while (dret == dpcd_bytes_to_read &&
3081                 process_count < max_process_count) {
3082                 uint8_t retry;
3083                 dret = 0;
3084
3085                 process_count++;
3086
3087                 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
3088                 /* handle HPD short pulse irq */
3089                 if (aconnector->mst_mgr.mst_state)
3090                         drm_dp_mst_hpd_irq(
3091                                 &aconnector->mst_mgr,
3092                                 esi,
3093                                 &new_irq_handled);
3094
3095                 if (new_irq_handled) {
3096                         /* ACK at DPCD to notify down stream */
3097                         const int ack_dpcd_bytes_to_write =
3098                                 dpcd_bytes_to_read - 1;
3099
3100                         for (retry = 0; retry < 3; retry++) {
3101                                 uint8_t wret;
3102
3103                                 wret = drm_dp_dpcd_write(
3104                                         &aconnector->dm_dp_aux.aux,
3105                                         dpcd_addr + 1,
3106                                         &esi[1],
3107                                         ack_dpcd_bytes_to_write);
3108                                 if (wret == ack_dpcd_bytes_to_write)
3109                                         break;
3110                         }
3111
3112                         /* check if there is new irq to be handled */
3113                         dret = drm_dp_dpcd_read(
3114                                 &aconnector->dm_dp_aux.aux,
3115                                 dpcd_addr,
3116                                 esi,
3117                                 dpcd_bytes_to_read);
3118
3119                         new_irq_handled = false;
3120                 } else {
3121                         break;
3122                 }
3123         }
3124
3125         if (process_count == max_process_count)
3126                 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
3127 }
3128
3129 static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
3130                                                         union hpd_irq_data hpd_irq_data)
3131 {
3132         struct hpd_rx_irq_offload_work *offload_work =
3133                                 kzalloc(sizeof(*offload_work), GFP_KERNEL);
3134
3135         if (!offload_work) {
3136                 DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
3137                 return;
3138         }
3139
3140         INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
3141         offload_work->data = hpd_irq_data;
3142         offload_work->offload_wq = offload_wq;
3143
3144         queue_work(offload_wq->wq, &offload_work->work);
3145         DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
3146 }
3147
3148 static void handle_hpd_rx_irq(void *param)
3149 {
3150         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3151         struct drm_connector *connector = &aconnector->base;
3152         struct drm_device *dev = connector->dev;
3153         struct dc_link *dc_link = aconnector->dc_link;
3154         bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
3155         bool result = false;
3156         enum dc_connection_type new_connection_type = dc_connection_none;
3157         struct amdgpu_device *adev = drm_to_adev(dev);
3158         union hpd_irq_data hpd_irq_data;
3159         bool link_loss = false;
3160         bool has_left_work = false;
3161         int idx = aconnector->base.index;
3162         struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
3163
3164         memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
3165
3166         if (adev->dm.disable_hpd_irq)
3167                 return;
3168
3169         /*
3170          * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
3171          * conflict, after implement i2c helper, this mutex should be
3172          * retired.
3173          */
3174         mutex_lock(&aconnector->hpd_lock);
3175
3176         result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
3177                                                 &link_loss, true, &has_left_work);
3178
3179         if (!has_left_work)
3180                 goto out;
3181
3182         if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
3183                 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3184                 goto out;
3185         }
3186
3187         if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
3188                 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
3189                         hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
3190                         dm_handle_mst_sideband_msg(aconnector);
3191                         goto out;
3192                 }
3193
3194                 if (link_loss) {
3195                         bool skip = false;
3196
3197                         spin_lock(&offload_wq->offload_lock);
3198                         skip = offload_wq->is_handling_link_loss;
3199
3200                         if (!skip)
3201                                 offload_wq->is_handling_link_loss = true;
3202
3203                         spin_unlock(&offload_wq->offload_lock);
3204
3205                         if (!skip)
3206                                 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3207
3208                         goto out;
3209                 }
3210         }
3211
3212 out:
3213         if (result && !is_mst_root_connector) {
3214                 /* Downstream Port status changed. */
3215                 if (!dc_link_detect_sink(dc_link, &new_connection_type))
3216                         DRM_ERROR("KMS: Failed to detect connector\n");
3217
3218                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3219                         emulated_link_detect(dc_link);
3220
3221                         if (aconnector->fake_enable)
3222                                 aconnector->fake_enable = false;
3223
3224                         amdgpu_dm_update_connector_after_detect(aconnector);
3225
3226
3227                         drm_modeset_lock_all(dev);
3228                         dm_restore_drm_connector_state(dev, connector);
3229                         drm_modeset_unlock_all(dev);
3230
3231                         drm_kms_helper_hotplug_event(dev);
3232                 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
3233
3234                         if (aconnector->fake_enable)
3235                                 aconnector->fake_enable = false;
3236
3237                         amdgpu_dm_update_connector_after_detect(aconnector);
3238
3239
3240                         drm_modeset_lock_all(dev);
3241                         dm_restore_drm_connector_state(dev, connector);
3242                         drm_modeset_unlock_all(dev);
3243
3244                         drm_kms_helper_hotplug_event(dev);
3245                 }
3246         }
3247 #ifdef CONFIG_DRM_AMD_DC_HDCP
3248         if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
3249                 if (adev->dm.hdcp_workqueue)
3250                         hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
3251         }
3252 #endif
3253
3254         if (dc_link->type != dc_connection_mst_branch)
3255                 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
3256
3257         mutex_unlock(&aconnector->hpd_lock);
3258 }
3259
3260 static void register_hpd_handlers(struct amdgpu_device *adev)
3261 {
3262         struct drm_device *dev = adev_to_drm(adev);
3263         struct drm_connector *connector;
3264         struct amdgpu_dm_connector *aconnector;
3265         const struct dc_link *dc_link;
3266         struct dc_interrupt_params int_params = {0};
3267
3268         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3269         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3270
3271         list_for_each_entry(connector,
3272                         &dev->mode_config.connector_list, head) {
3273
3274                 aconnector = to_amdgpu_dm_connector(connector);
3275                 dc_link = aconnector->dc_link;
3276
3277                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
3278                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3279                         int_params.irq_source = dc_link->irq_source_hpd;
3280
3281                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
3282                                         handle_hpd_irq,
3283                                         (void *) aconnector);
3284                 }
3285
3286                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
3287
3288                         /* Also register for DP short pulse (hpd_rx). */
3289                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3290                         int_params.irq_source = dc_link->irq_source_hpd_rx;
3291
3292                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
3293                                         handle_hpd_rx_irq,
3294                                         (void *) aconnector);
3295
3296                         if (adev->dm.hpd_rx_offload_wq)
3297                                 adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
3298                                         aconnector;
3299                 }
3300         }
3301 }
3302
3303 #if defined(CONFIG_DRM_AMD_DC_SI)
3304 /* Register IRQ sources and initialize IRQ callbacks */
3305 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
3306 {
3307         struct dc *dc = adev->dm.dc;
3308         struct common_irq_params *c_irq_params;
3309         struct dc_interrupt_params int_params = {0};
3310         int r;
3311         int i;
3312         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3313
3314         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3315         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3316
3317         /*
3318          * Actions of amdgpu_irq_add_id():
3319          * 1. Register a set() function with base driver.
3320          *    Base driver will call set() function to enable/disable an
3321          *    interrupt in DC hardware.
3322          * 2. Register amdgpu_dm_irq_handler().
3323          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3324          *    coming from DC hardware.
3325          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3326          *    for acknowledging and handling. */
3327
3328         /* Use VBLANK interrupt */
3329         for (i = 0; i < adev->mode_info.num_crtc; i++) {
3330                 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
3331                 if (r) {
3332                         DRM_ERROR("Failed to add crtc irq id!\n");
3333                         return r;
3334                 }
3335
3336                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3337                 int_params.irq_source =
3338                         dc_interrupt_to_irq_source(dc, i+1 , 0);
3339
3340                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3341
3342                 c_irq_params->adev = adev;
3343                 c_irq_params->irq_src = int_params.irq_source;
3344
3345                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3346                                 dm_crtc_high_irq, c_irq_params);
3347         }
3348
3349         /* Use GRPH_PFLIP interrupt */
3350         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3351                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3352                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3353                 if (r) {
3354                         DRM_ERROR("Failed to add page flip irq id!\n");
3355                         return r;
3356                 }
3357
3358                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3359                 int_params.irq_source =
3360                         dc_interrupt_to_irq_source(dc, i, 0);
3361
3362                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3363
3364                 c_irq_params->adev = adev;
3365                 c_irq_params->irq_src = int_params.irq_source;
3366
3367                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3368                                 dm_pflip_high_irq, c_irq_params);
3369
3370         }
3371
3372         /* HPD */
3373         r = amdgpu_irq_add_id(adev, client_id,
3374                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3375         if (r) {
3376                 DRM_ERROR("Failed to add hpd irq id!\n");
3377                 return r;
3378         }
3379
3380         register_hpd_handlers(adev);
3381
3382         return 0;
3383 }
3384 #endif
3385
3386 /* Register IRQ sources and initialize IRQ callbacks */
3387 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
3388 {
3389         struct dc *dc = adev->dm.dc;
3390         struct common_irq_params *c_irq_params;
3391         struct dc_interrupt_params int_params = {0};
3392         int r;
3393         int i;
3394         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3395
3396         if (adev->family >= AMDGPU_FAMILY_AI)
3397                 client_id = SOC15_IH_CLIENTID_DCE;
3398
3399         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3400         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3401
3402         /*
3403          * Actions of amdgpu_irq_add_id():
3404          * 1. Register a set() function with base driver.
3405          *    Base driver will call set() function to enable/disable an
3406          *    interrupt in DC hardware.
3407          * 2. Register amdgpu_dm_irq_handler().
3408          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3409          *    coming from DC hardware.
3410          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3411          *    for acknowledging and handling. */
3412
3413         /* Use VBLANK interrupt */
3414         for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
3415                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
3416                 if (r) {
3417                         DRM_ERROR("Failed to add crtc irq id!\n");
3418                         return r;
3419                 }
3420
3421                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3422                 int_params.irq_source =
3423                         dc_interrupt_to_irq_source(dc, i, 0);
3424
3425                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3426
3427                 c_irq_params->adev = adev;
3428                 c_irq_params->irq_src = int_params.irq_source;
3429
3430                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3431                                 dm_crtc_high_irq, c_irq_params);
3432         }
3433
3434         /* Use VUPDATE interrupt */
3435         for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3436                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3437                 if (r) {
3438                         DRM_ERROR("Failed to add vupdate irq id!\n");
3439                         return r;
3440                 }
3441
3442                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3443                 int_params.irq_source =
3444                         dc_interrupt_to_irq_source(dc, i, 0);
3445
3446                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3447
3448                 c_irq_params->adev = adev;
3449                 c_irq_params->irq_src = int_params.irq_source;
3450
3451                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3452                                 dm_vupdate_high_irq, c_irq_params);
3453         }
3454
3455         /* Use GRPH_PFLIP interrupt */
3456         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3457                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3458                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3459                 if (r) {
3460                         DRM_ERROR("Failed to add page flip irq id!\n");
3461                         return r;
3462                 }
3463
3464                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3465                 int_params.irq_source =
3466                         dc_interrupt_to_irq_source(dc, i, 0);
3467
3468                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3469
3470                 c_irq_params->adev = adev;
3471                 c_irq_params->irq_src = int_params.irq_source;
3472
3473                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3474                                 dm_pflip_high_irq, c_irq_params);
3475
3476         }
3477
3478         /* HPD */
3479         r = amdgpu_irq_add_id(adev, client_id,
3480                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3481         if (r) {
3482                 DRM_ERROR("Failed to add hpd irq id!\n");
3483                 return r;
3484         }
3485
3486         register_hpd_handlers(adev);
3487
3488         return 0;
3489 }
3490
3491 #if defined(CONFIG_DRM_AMD_DC_DCN)
3492 /* Register IRQ sources and initialize IRQ callbacks */
3493 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3494 {
3495         struct dc *dc = adev->dm.dc;
3496         struct common_irq_params *c_irq_params;
3497         struct dc_interrupt_params int_params = {0};
3498         int r;
3499         int i;
3500 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3501         static const unsigned int vrtl_int_srcid[] = {
3502                 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3503                 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3504                 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3505                 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3506                 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3507                 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3508         };
3509 #endif
3510
3511         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3512         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3513
3514         /*
3515          * Actions of amdgpu_irq_add_id():
3516          * 1. Register a set() function with base driver.
3517          *    Base driver will call set() function to enable/disable an
3518          *    interrupt in DC hardware.
3519          * 2. Register amdgpu_dm_irq_handler().
3520          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3521          *    coming from DC hardware.
3522          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3523          *    for acknowledging and handling.
3524          */
3525
3526         /* Use VSTARTUP interrupt */
3527         for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3528                         i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3529                         i++) {
3530                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3531
3532                 if (r) {
3533                         DRM_ERROR("Failed to add crtc irq id!\n");
3534                         return r;
3535                 }
3536
3537                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3538                 int_params.irq_source =
3539                         dc_interrupt_to_irq_source(dc, i, 0);
3540
3541                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3542
3543                 c_irq_params->adev = adev;
3544                 c_irq_params->irq_src = int_params.irq_source;
3545
3546                 amdgpu_dm_irq_register_interrupt(
3547                         adev, &int_params, dm_crtc_high_irq, c_irq_params);
3548         }
3549
3550         /* Use otg vertical line interrupt */
3551 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3552         for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3553                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3554                                 vrtl_int_srcid[i], &adev->vline0_irq);
3555
3556                 if (r) {
3557                         DRM_ERROR("Failed to add vline0 irq id!\n");
3558                         return r;
3559                 }
3560
3561                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3562                 int_params.irq_source =
3563                         dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3564
3565                 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3566                         DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3567                         break;
3568                 }
3569
3570                 c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3571                                         - DC_IRQ_SOURCE_DC1_VLINE0];
3572
3573                 c_irq_params->adev = adev;
3574                 c_irq_params->irq_src = int_params.irq_source;
3575
3576                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3577                                 dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3578         }
3579 #endif
3580
3581         /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3582          * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3583          * to trigger at end of each vblank, regardless of state of the lock,
3584          * matching DCE behaviour.
3585          */
3586         for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3587              i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3588              i++) {
3589                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3590
3591                 if (r) {
3592                         DRM_ERROR("Failed to add vupdate irq id!\n");
3593                         return r;
3594                 }
3595
3596                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3597                 int_params.irq_source =
3598                         dc_interrupt_to_irq_source(dc, i, 0);
3599
3600                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3601
3602                 c_irq_params->adev = adev;
3603                 c_irq_params->irq_src = int_params.irq_source;
3604
3605                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3606                                 dm_vupdate_high_irq, c_irq_params);
3607         }
3608
3609         /* Use GRPH_PFLIP interrupt */
3610         for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3611                         i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
3612                         i++) {
3613                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3614                 if (r) {
3615                         DRM_ERROR("Failed to add page flip irq id!\n");
3616                         return r;
3617                 }
3618
3619                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3620                 int_params.irq_source =
3621                         dc_interrupt_to_irq_source(dc, i, 0);
3622
3623                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3624
3625                 c_irq_params->adev = adev;
3626                 c_irq_params->irq_src = int_params.irq_source;
3627
3628                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3629                                 dm_pflip_high_irq, c_irq_params);
3630
3631         }
3632
3633         /* HPD */
3634         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3635                         &adev->hpd_irq);
3636         if (r) {
3637                 DRM_ERROR("Failed to add hpd irq id!\n");
3638                 return r;
3639         }
3640
3641         register_hpd_handlers(adev);
3642
3643         return 0;
3644 }
3645 /* Register Outbox IRQ sources and initialize IRQ callbacks */
3646 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3647 {
3648         struct dc *dc = adev->dm.dc;
3649         struct common_irq_params *c_irq_params;
3650         struct dc_interrupt_params int_params = {0};
3651         int r, i;
3652
3653         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3654         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3655
3656         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3657                         &adev->dmub_outbox_irq);
3658         if (r) {
3659                 DRM_ERROR("Failed to add outbox irq id!\n");
3660                 return r;
3661         }
3662
3663         if (dc->ctx->dmub_srv) {
3664                 i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3665                 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3666                 int_params.irq_source =
3667                 dc_interrupt_to_irq_source(dc, i, 0);
3668
3669                 c_irq_params = &adev->dm.dmub_outbox_params[0];
3670
3671                 c_irq_params->adev = adev;
3672                 c_irq_params->irq_src = int_params.irq_source;
3673
3674                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3675                                 dm_dmub_outbox1_low_irq, c_irq_params);
3676         }
3677
3678         return 0;
3679 }
3680 #endif
3681
3682 /*
3683  * Acquires the lock for the atomic state object and returns
3684  * the new atomic state.
3685  *
3686  * This should only be called during atomic check.
3687  */
3688 static int dm_atomic_get_state(struct drm_atomic_state *state,
3689                                struct dm_atomic_state **dm_state)
3690 {
3691         struct drm_device *dev = state->dev;
3692         struct amdgpu_device *adev = drm_to_adev(dev);
3693         struct amdgpu_display_manager *dm = &adev->dm;
3694         struct drm_private_state *priv_state;
3695
3696         if (*dm_state)
3697                 return 0;
3698
3699         priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3700         if (IS_ERR(priv_state))
3701                 return PTR_ERR(priv_state);
3702
3703         *dm_state = to_dm_atomic_state(priv_state);
3704
3705         return 0;
3706 }
3707
3708 static struct dm_atomic_state *
3709 dm_atomic_get_new_state(struct drm_atomic_state *state)
3710 {
3711         struct drm_device *dev = state->dev;
3712         struct amdgpu_device *adev = drm_to_adev(dev);
3713         struct amdgpu_display_manager *dm = &adev->dm;
3714         struct drm_private_obj *obj;
3715         struct drm_private_state *new_obj_state;
3716         int i;
3717
3718         for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3719                 if (obj->funcs == dm->atomic_obj.funcs)
3720                         return to_dm_atomic_state(new_obj_state);
3721         }
3722
3723         return NULL;
3724 }
3725
3726 static struct drm_private_state *
3727 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3728 {
3729         struct dm_atomic_state *old_state, *new_state;
3730
3731         new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3732         if (!new_state)
3733                 return NULL;
3734
3735         __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3736
3737         old_state = to_dm_atomic_state(obj->state);
3738
3739         if (old_state && old_state->context)
3740                 new_state->context = dc_copy_state(old_state->context);
3741
3742         if (!new_state->context) {
3743                 kfree(new_state);
3744                 return NULL;
3745         }
3746
3747         return &new_state->base;
3748 }
3749
3750 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3751                                     struct drm_private_state *state)
3752 {
3753         struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3754
3755         if (dm_state && dm_state->context)
3756                 dc_release_state(dm_state->context);
3757
3758         kfree(dm_state);
3759 }
3760
3761 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3762         .atomic_duplicate_state = dm_atomic_duplicate_state,
3763         .atomic_destroy_state = dm_atomic_destroy_state,
3764 };
3765
3766 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3767 {
3768         struct dm_atomic_state *state;
3769         int r;
3770
3771         adev->mode_info.mode_config_initialized = true;
3772
3773         adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3774         adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3775
3776         adev_to_drm(adev)->mode_config.max_width = 16384;
3777         adev_to_drm(adev)->mode_config.max_height = 16384;
3778
3779         adev_to_drm(adev)->mode_config.preferred_depth = 24;
3780         adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3781         /* indicates support for immediate flip */
3782         adev_to_drm(adev)->mode_config.async_page_flip = true;
3783
3784         adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3785
3786         state = kzalloc(sizeof(*state), GFP_KERNEL);
3787         if (!state)
3788                 return -ENOMEM;
3789
3790         state->context = dc_create_state(adev->dm.dc);
3791         if (!state->context) {
3792                 kfree(state);
3793                 return -ENOMEM;
3794         }
3795
3796         dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3797
3798         drm_atomic_private_obj_init(adev_to_drm(adev),
3799                                     &adev->dm.atomic_obj,
3800                                     &state->base,
3801                                     &dm_atomic_state_funcs);
3802
3803         r = amdgpu_display_modeset_create_props(adev);
3804         if (r) {
3805                 dc_release_state(state->context);
3806                 kfree(state);
3807                 return r;
3808         }
3809
3810         r = amdgpu_dm_audio_init(adev);
3811         if (r) {
3812                 dc_release_state(state->context);
3813                 kfree(state);
3814                 return r;
3815         }
3816
3817         return 0;
3818 }
3819
3820 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3821 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3822 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3823
3824 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3825         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3826
3827 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
3828                                             int bl_idx)
3829 {
3830 #if defined(CONFIG_ACPI)
3831         struct amdgpu_dm_backlight_caps caps;
3832
3833         memset(&caps, 0, sizeof(caps));
3834
3835         if (dm->backlight_caps[bl_idx].caps_valid)
3836                 return;
3837
3838         amdgpu_acpi_get_backlight_caps(&caps);
3839         if (caps.caps_valid) {
3840                 dm->backlight_caps[bl_idx].caps_valid = true;
3841                 if (caps.aux_support)
3842                         return;
3843                 dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
3844                 dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
3845         } else {
3846                 dm->backlight_caps[bl_idx].min_input_signal =
3847                                 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3848                 dm->backlight_caps[bl_idx].max_input_signal =
3849                                 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3850         }
3851 #else
3852         if (dm->backlight_caps[bl_idx].aux_support)
3853                 return;
3854
3855         dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3856         dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3857 #endif
3858 }
3859
3860 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3861                                 unsigned *min, unsigned *max)
3862 {
3863         if (!caps)
3864                 return 0;
3865
3866         if (caps->aux_support) {
3867                 // Firmware limits are in nits, DC API wants millinits.
3868                 *max = 1000 * caps->aux_max_input_signal;
3869                 *min = 1000 * caps->aux_min_input_signal;
3870         } else {
3871                 // Firmware limits are 8-bit, PWM control is 16-bit.
3872                 *max = 0x101 * caps->max_input_signal;
3873                 *min = 0x101 * caps->min_input_signal;
3874         }
3875         return 1;
3876 }
3877
3878 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3879                                         uint32_t brightness)
3880 {
3881         unsigned min, max;
3882
3883         if (!get_brightness_range(caps, &min, &max))
3884                 return brightness;
3885
3886         // Rescale 0..255 to min..max
3887         return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3888                                        AMDGPU_MAX_BL_LEVEL);
3889 }
3890
3891 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3892                                       uint32_t brightness)
3893 {
3894         unsigned min, max;
3895
3896         if (!get_brightness_range(caps, &min, &max))
3897                 return brightness;
3898
3899         if (brightness < min)
3900                 return 0;
3901         // Rescale min..max to 0..255
3902         return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3903                                  max - min);
3904 }
3905
3906 static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
3907                                          int bl_idx,
3908                                          u32 user_brightness)
3909 {
3910         struct amdgpu_dm_backlight_caps caps;
3911         struct dc_link *link;
3912         u32 brightness;
3913         bool rc;
3914
3915         amdgpu_dm_update_backlight_caps(dm, bl_idx);
3916         caps = dm->backlight_caps[bl_idx];
3917
3918         dm->brightness[bl_idx] = user_brightness;
3919         /* update scratch register */
3920         if (bl_idx == 0)
3921                 amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]);
3922         brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
3923         link = (struct dc_link *)dm->backlight_link[bl_idx];
3924
3925         /* Change brightness based on AUX property */
3926         if (caps.aux_support) {
3927                 rc = dc_link_set_backlight_level_nits(link, true, brightness,
3928                                                       AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3929                 if (!rc)
3930                         DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
3931         } else {
3932                 rc = dc_link_set_backlight_level(link, brightness, 0);
3933                 if (!rc)
3934                         DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
3935         }
3936
3937         return rc ? 0 : 1;
3938 }
3939
3940 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3941 {
3942         struct amdgpu_display_manager *dm = bl_get_data(bd);
3943         int i;
3944
3945         for (i = 0; i < dm->num_of_edps; i++) {
3946                 if (bd == dm->backlight_dev[i])
3947                         break;
3948         }
3949         if (i >= AMDGPU_DM_MAX_NUM_EDP)
3950                 i = 0;
3951         amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
3952
3953         return 0;
3954 }
3955
3956 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
3957                                          int bl_idx)
3958 {
3959         struct amdgpu_dm_backlight_caps caps;
3960         struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
3961
3962         amdgpu_dm_update_backlight_caps(dm, bl_idx);
3963         caps = dm->backlight_caps[bl_idx];
3964
3965         if (caps.aux_support) {
3966                 u32 avg, peak;
3967                 bool rc;
3968
3969                 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3970                 if (!rc)
3971                         return dm->brightness[bl_idx];
3972                 return convert_brightness_to_user(&caps, avg);
3973         } else {
3974                 int ret = dc_link_get_backlight_level(link);
3975
3976                 if (ret == DC_ERROR_UNEXPECTED)
3977                         return dm->brightness[bl_idx];
3978                 return convert_brightness_to_user(&caps, ret);
3979         }
3980 }
3981
3982 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3983 {
3984         struct amdgpu_display_manager *dm = bl_get_data(bd);
3985         int i;
3986
3987         for (i = 0; i < dm->num_of_edps; i++) {
3988                 if (bd == dm->backlight_dev[i])
3989                         break;
3990         }
3991         if (i >= AMDGPU_DM_MAX_NUM_EDP)
3992                 i = 0;
3993         return amdgpu_dm_backlight_get_level(dm, i);
3994 }
3995
3996 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3997         .options = BL_CORE_SUSPENDRESUME,
3998         .get_brightness = amdgpu_dm_backlight_get_brightness,
3999         .update_status  = amdgpu_dm_backlight_update_status,
4000 };
4001
4002 static void
4003 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4004 {
4005         char bl_name[16];
4006         struct backlight_properties props = { 0 };
4007
4008         amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps);
4009         dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL;
4010
4011         props.max_brightness = AMDGPU_MAX_BL_LEVEL;
4012         props.brightness = AMDGPU_MAX_BL_LEVEL;
4013         props.type = BACKLIGHT_RAW;
4014
4015         snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
4016                  adev_to_drm(dm->adev)->primary->index + dm->num_of_edps);
4017
4018         dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name,
4019                                                                        adev_to_drm(dm->adev)->dev,
4020                                                                        dm,
4021                                                                        &amdgpu_dm_backlight_ops,
4022                                                                        &props);
4023
4024         if (IS_ERR(dm->backlight_dev[dm->num_of_edps]))
4025                 DRM_ERROR("DM: Backlight registration failed!\n");
4026         else
4027                 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4028 }
4029 #endif
4030
4031 static int initialize_plane(struct amdgpu_display_manager *dm,
4032                             struct amdgpu_mode_info *mode_info, int plane_id,
4033                             enum drm_plane_type plane_type,
4034                             const struct dc_plane_cap *plane_cap)
4035 {
4036         struct drm_plane *plane;
4037         unsigned long possible_crtcs;
4038         int ret = 0;
4039
4040         plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
4041         if (!plane) {
4042                 DRM_ERROR("KMS: Failed to allocate plane\n");
4043                 return -ENOMEM;
4044         }
4045         plane->type = plane_type;
4046
4047         /*
4048          * HACK: IGT tests expect that the primary plane for a CRTC
4049          * can only have one possible CRTC. Only expose support for
4050          * any CRTC if they're not going to be used as a primary plane
4051          * for a CRTC - like overlay or underlay planes.
4052          */
4053         possible_crtcs = 1 << plane_id;
4054         if (plane_id >= dm->dc->caps.max_streams)
4055                 possible_crtcs = 0xff;
4056
4057         ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
4058
4059         if (ret) {
4060                 DRM_ERROR("KMS: Failed to initialize plane\n");
4061                 kfree(plane);
4062                 return ret;
4063         }
4064
4065         if (mode_info)
4066                 mode_info->planes[plane_id] = plane;
4067
4068         return ret;
4069 }
4070
4071
4072 static void register_backlight_device(struct amdgpu_display_manager *dm,
4073                                       struct dc_link *link)
4074 {
4075 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4076         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4077
4078         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4079             link->type != dc_connection_none) {
4080                 /*
4081                  * Event if registration failed, we should continue with
4082                  * DM initialization because not having a backlight control
4083                  * is better then a black screen.
4084                  */
4085                 if (!dm->backlight_dev[dm->num_of_edps])
4086                         amdgpu_dm_register_backlight_device(dm);
4087
4088                 if (dm->backlight_dev[dm->num_of_edps]) {
4089                         dm->backlight_link[dm->num_of_edps] = link;
4090                         dm->num_of_edps++;
4091                 }
4092         }
4093 #endif
4094 }
4095
4096
4097 /*
4098  * In this architecture, the association
4099  * connector -> encoder -> crtc
4100  * id not really requried. The crtc and connector will hold the
4101  * display_index as an abstraction to use with DAL component
4102  *
4103  * Returns 0 on success
4104  */
4105 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4106 {
4107         struct amdgpu_display_manager *dm = &adev->dm;
4108         int32_t i;
4109         struct amdgpu_dm_connector *aconnector = NULL;
4110         struct amdgpu_encoder *aencoder = NULL;
4111         struct amdgpu_mode_info *mode_info = &adev->mode_info;
4112         uint32_t link_cnt;
4113         int32_t primary_planes;
4114         enum dc_connection_type new_connection_type = dc_connection_none;
4115         const struct dc_plane_cap *plane;
4116         bool psr_feature_enabled = false;
4117
4118         dm->display_indexes_num = dm->dc->caps.max_streams;
4119         /* Update the actual used number of crtc */
4120         adev->mode_info.num_crtc = adev->dm.display_indexes_num;
4121
4122         link_cnt = dm->dc->caps.max_links;
4123         if (amdgpu_dm_mode_config_init(dm->adev)) {
4124                 DRM_ERROR("DM: Failed to initialize mode config\n");
4125                 return -EINVAL;
4126         }
4127
4128         /* There is one primary plane per CRTC */
4129         primary_planes = dm->dc->caps.max_streams;
4130         ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
4131
4132         /*
4133          * Initialize primary planes, implicit planes for legacy IOCTLS.
4134          * Order is reversed to match iteration order in atomic check.
4135          */
4136         for (i = (primary_planes - 1); i >= 0; i--) {
4137                 plane = &dm->dc->caps.planes[i];
4138
4139                 if (initialize_plane(dm, mode_info, i,
4140                                      DRM_PLANE_TYPE_PRIMARY, plane)) {
4141                         DRM_ERROR("KMS: Failed to initialize primary plane\n");
4142                         goto fail;
4143                 }
4144         }
4145
4146         /*
4147          * Initialize overlay planes, index starting after primary planes.
4148          * These planes have a higher DRM index than the primary planes since
4149          * they should be considered as having a higher z-order.
4150          * Order is reversed to match iteration order in atomic check.
4151          *
4152          * Only support DCN for now, and only expose one so we don't encourage
4153          * userspace to use up all the pipes.
4154          */
4155         for (i = 0; i < dm->dc->caps.max_planes; ++i) {
4156                 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
4157
4158                 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
4159                         continue;
4160
4161                 if (!plane->blends_with_above || !plane->blends_with_below)
4162                         continue;
4163
4164                 if (!plane->pixel_format_support.argb8888)
4165                         continue;
4166
4167                 if (initialize_plane(dm, NULL, primary_planes + i,
4168                                      DRM_PLANE_TYPE_OVERLAY, plane)) {
4169                         DRM_ERROR("KMS: Failed to initialize overlay plane\n");
4170                         goto fail;
4171                 }
4172
4173                 /* Only create one overlay plane. */
4174                 break;
4175         }
4176
4177         for (i = 0; i < dm->dc->caps.max_streams; i++)
4178                 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4179                         DRM_ERROR("KMS: Failed to initialize crtc\n");
4180                         goto fail;
4181                 }
4182
4183 #if defined(CONFIG_DRM_AMD_DC_DCN)
4184         /* Use Outbox interrupt */
4185         switch (adev->ip_versions[DCE_HWIP][0]) {
4186         case IP_VERSION(3, 0, 0):
4187         case IP_VERSION(3, 1, 2):
4188         case IP_VERSION(3, 1, 3):
4189         case IP_VERSION(2, 1, 0):
4190                 if (register_outbox_irq_handlers(dm->adev)) {
4191                         DRM_ERROR("DM: Failed to initialize IRQ\n");
4192                         goto fail;
4193                 }
4194                 break;
4195         default:
4196                 DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n",
4197                               adev->ip_versions[DCE_HWIP][0]);
4198         }
4199
4200         /* Determine whether to enable PSR support by default. */
4201         if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) {
4202                 switch (adev->ip_versions[DCE_HWIP][0]) {
4203                 case IP_VERSION(3, 1, 2):
4204                 case IP_VERSION(3, 1, 3):
4205                         psr_feature_enabled = true;
4206                         break;
4207                 default:
4208                         psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK;
4209                         break;
4210                 }
4211         }
4212 #endif
4213
4214         /* loops over all connectors on the board */
4215         for (i = 0; i < link_cnt; i++) {
4216                 struct dc_link *link = NULL;
4217
4218                 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
4219                         DRM_ERROR(
4220                                 "KMS: Cannot support more than %d display indexes\n",
4221                                         AMDGPU_DM_MAX_DISPLAY_INDEX);
4222                         continue;
4223                 }
4224
4225                 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
4226                 if (!aconnector)
4227                         goto fail;
4228
4229                 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
4230                 if (!aencoder)
4231                         goto fail;
4232
4233                 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
4234                         DRM_ERROR("KMS: Failed to initialize encoder\n");
4235                         goto fail;
4236                 }
4237
4238                 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
4239                         DRM_ERROR("KMS: Failed to initialize connector\n");
4240                         goto fail;
4241                 }
4242
4243                 link = dc_get_link_at_index(dm->dc, i);
4244
4245                 if (!dc_link_detect_sink(link, &new_connection_type))
4246                         DRM_ERROR("KMS: Failed to detect connector\n");
4247
4248                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
4249                         emulated_link_detect(link);
4250                         amdgpu_dm_update_connector_after_detect(aconnector);
4251
4252                 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
4253                         amdgpu_dm_update_connector_after_detect(aconnector);
4254                         register_backlight_device(dm, link);
4255                         if (dm->num_of_edps)
4256                                 update_connector_ext_caps(aconnector);
4257                         if (psr_feature_enabled)
4258                                 amdgpu_dm_set_psr_caps(link);
4259                 }
4260
4261
4262         }
4263
4264         /*
4265          * Disable vblank IRQs aggressively for power-saving.
4266          *
4267          * TODO: Fix vblank control helpers to delay PSR entry to allow this when PSR
4268          * is also supported.
4269          */
4270         adev_to_drm(adev)->vblank_disable_immediate = !psr_feature_enabled;
4271
4272         /* Software is initialized. Now we can register interrupt handlers. */
4273         switch (adev->asic_type) {
4274 #if defined(CONFIG_DRM_AMD_DC_SI)
4275         case CHIP_TAHITI:
4276         case CHIP_PITCAIRN:
4277         case CHIP_VERDE:
4278         case CHIP_OLAND:
4279                 if (dce60_register_irq_handlers(dm->adev)) {
4280                         DRM_ERROR("DM: Failed to initialize IRQ\n");
4281                         goto fail;
4282                 }
4283                 break;
4284 #endif
4285         case CHIP_BONAIRE:
4286         case CHIP_HAWAII:
4287         case CHIP_KAVERI:
4288         case CHIP_KABINI:
4289         case CHIP_MULLINS:
4290         case CHIP_TONGA:
4291         case CHIP_FIJI:
4292         case CHIP_CARRIZO:
4293         case CHIP_STONEY:
4294         case CHIP_POLARIS11:
4295         case CHIP_POLARIS10:
4296         case CHIP_POLARIS12:
4297         case CHIP_VEGAM:
4298         case CHIP_VEGA10:
4299         case CHIP_VEGA12:
4300         case CHIP_VEGA20:
4301                 if (dce110_register_irq_handlers(dm->adev)) {
4302                         DRM_ERROR("DM: Failed to initialize IRQ\n");
4303                         goto fail;
4304                 }
4305                 break;
4306         default:
4307 #if defined(CONFIG_DRM_AMD_DC_DCN)
4308                 switch (adev->ip_versions[DCE_HWIP][0]) {
4309                 case IP_VERSION(1, 0, 0):
4310                 case IP_VERSION(1, 0, 1):
4311                 case IP_VERSION(2, 0, 2):
4312                 case IP_VERSION(2, 0, 3):
4313                 case IP_VERSION(2, 0, 0):
4314                 case IP_VERSION(2, 1, 0):
4315                 case IP_VERSION(3, 0, 0):
4316                 case IP_VERSION(3, 0, 2):
4317                 case IP_VERSION(3, 0, 3):
4318                 case IP_VERSION(3, 0, 1):
4319                 case IP_VERSION(3, 1, 2):
4320                 case IP_VERSION(3, 1, 3):
4321                         if (dcn10_register_irq_handlers(dm->adev)) {
4322                                 DRM_ERROR("DM: Failed to initialize IRQ\n");
4323                                 goto fail;
4324                         }
4325                         break;
4326                 default:
4327                         DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",
4328                                         adev->ip_versions[DCE_HWIP][0]);
4329                         goto fail;
4330                 }
4331 #endif
4332                 break;
4333         }
4334
4335         return 0;
4336 fail:
4337         kfree(aencoder);
4338         kfree(aconnector);
4339
4340         return -EINVAL;
4341 }
4342
4343 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4344 {
4345         drm_atomic_private_obj_fini(&dm->atomic_obj);
4346         return;
4347 }
4348
4349 /******************************************************************************
4350  * amdgpu_display_funcs functions
4351  *****************************************************************************/
4352
4353 /*
4354  * dm_bandwidth_update - program display watermarks
4355  *
4356  * @adev: amdgpu_device pointer
4357  *
4358  * Calculate and program the display watermarks and line buffer allocation.
4359  */
4360 static void dm_bandwidth_update(struct amdgpu_device *adev)
4361 {
4362         /* TODO: implement later */
4363 }
4364
4365 static const struct amdgpu_display_funcs dm_display_funcs = {
4366         .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
4367         .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
4368         .backlight_set_level = NULL, /* never called for DC */
4369         .backlight_get_level = NULL, /* never called for DC */
4370         .hpd_sense = NULL,/* called unconditionally */
4371         .hpd_set_polarity = NULL, /* called unconditionally */
4372         .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4373         .page_flip_get_scanoutpos =
4374                 dm_crtc_get_scanoutpos,/* called unconditionally */
4375         .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
4376         .add_connector = NULL, /* VBIOS parsing. DAL does it. */
4377 };
4378
4379 #if defined(CONFIG_DEBUG_KERNEL_DC)
4380
4381 static ssize_t s3_debug_store(struct device *device,
4382                               struct device_attribute *attr,
4383                               const char *buf,
4384                               size_t count)
4385 {
4386         int ret;
4387         int s3_state;
4388         struct drm_device *drm_dev = dev_get_drvdata(device);
4389         struct amdgpu_device *adev = drm_to_adev(drm_dev);
4390
4391         ret = kstrtoint(buf, 0, &s3_state);
4392
4393         if (ret == 0) {
4394                 if (s3_state) {
4395                         dm_resume(adev);
4396                         drm_kms_helper_hotplug_event(adev_to_drm(adev));
4397                 } else
4398                         dm_suspend(adev);
4399         }
4400
4401         return ret == 0 ? count : 0;
4402 }
4403
4404 DEVICE_ATTR_WO(s3_debug);
4405
4406 #endif
4407
4408 static int dm_early_init(void *handle)
4409 {
4410         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4411
4412         switch (adev->asic_type) {
4413 #if defined(CONFIG_DRM_AMD_DC_SI)
4414         case CHIP_TAHITI:
4415         case CHIP_PITCAIRN:
4416         case CHIP_VERDE:
4417                 adev->mode_info.num_crtc = 6;
4418                 adev->mode_info.num_hpd = 6;
4419                 adev->mode_info.num_dig = 6;
4420                 break;
4421         case CHIP_OLAND:
4422                 adev->mode_info.num_crtc = 2;
4423                 adev->mode_info.num_hpd = 2;
4424                 adev->mode_info.num_dig = 2;
4425                 break;
4426 #endif
4427         case CHIP_BONAIRE:
4428         case CHIP_HAWAII:
4429                 adev->mode_info.num_crtc = 6;
4430                 adev->mode_info.num_hpd = 6;
4431                 adev->mode_info.num_dig = 6;
4432                 break;
4433         case CHIP_KAVERI:
4434                 adev->mode_info.num_crtc = 4;
4435                 adev->mode_info.num_hpd = 6;
4436                 adev->mode_info.num_dig = 7;
4437                 break;
4438         case CHIP_KABINI:
4439         case CHIP_MULLINS:
4440                 adev->mode_info.num_crtc = 2;
4441                 adev->mode_info.num_hpd = 6;
4442                 adev->mode_info.num_dig = 6;
4443                 break;
4444         case CHIP_FIJI:
4445         case CHIP_TONGA:
4446                 adev->mode_info.num_crtc = 6;
4447                 adev->mode_info.num_hpd = 6;
4448                 adev->mode_info.num_dig = 7;
4449                 break;
4450         case CHIP_CARRIZO:
4451                 adev->mode_info.num_crtc = 3;
4452                 adev->mode_info.num_hpd = 6;
4453                 adev->mode_info.num_dig = 9;
4454                 break;
4455         case CHIP_STONEY:
4456                 adev->mode_info.num_crtc = 2;
4457                 adev->mode_info.num_hpd = 6;
4458                 adev->mode_info.num_dig = 9;
4459                 break;
4460         case CHIP_POLARIS11:
4461         case CHIP_POLARIS12:
4462                 adev->mode_info.num_crtc = 5;
4463                 adev->mode_info.num_hpd = 5;
4464                 adev->mode_info.num_dig = 5;
4465                 break;
4466         case CHIP_POLARIS10:
4467         case CHIP_VEGAM:
4468                 adev->mode_info.num_crtc = 6;
4469                 adev->mode_info.num_hpd = 6;
4470                 adev->mode_info.num_dig = 6;
4471                 break;
4472         case CHIP_VEGA10:
4473         case CHIP_VEGA12:
4474         case CHIP_VEGA20:
4475                 adev->mode_info.num_crtc = 6;
4476                 adev->mode_info.num_hpd = 6;
4477                 adev->mode_info.num_dig = 6;
4478                 break;
4479         default:
4480 #if defined(CONFIG_DRM_AMD_DC_DCN)
4481                 switch (adev->ip_versions[DCE_HWIP][0]) {
4482                 case IP_VERSION(2, 0, 2):
4483                 case IP_VERSION(3, 0, 0):
4484                         adev->mode_info.num_crtc = 6;
4485                         adev->mode_info.num_hpd = 6;
4486                         adev->mode_info.num_dig = 6;
4487                         break;
4488                 case IP_VERSION(2, 0, 0):
4489                 case IP_VERSION(3, 0, 2):
4490                         adev->mode_info.num_crtc = 5;
4491                         adev->mode_info.num_hpd = 5;
4492                         adev->mode_info.num_dig = 5;
4493                         break;
4494                 case IP_VERSION(2, 0, 3):
4495                 case IP_VERSION(3, 0, 3):
4496                         adev->mode_info.num_crtc = 2;
4497                         adev->mode_info.num_hpd = 2;
4498                         adev->mode_info.num_dig = 2;
4499                         break;
4500                 case IP_VERSION(1, 0, 0):
4501                 case IP_VERSION(1, 0, 1):
4502                 case IP_VERSION(3, 0, 1):
4503                 case IP_VERSION(2, 1, 0):
4504                 case IP_VERSION(3, 1, 2):
4505                 case IP_VERSION(3, 1, 3):
4506                         adev->mode_info.num_crtc = 4;
4507                         adev->mode_info.num_hpd = 4;
4508                         adev->mode_info.num_dig = 4;
4509                         break;
4510                 default:
4511                         DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",
4512                                         adev->ip_versions[DCE_HWIP][0]);
4513                         return -EINVAL;
4514                 }
4515 #endif
4516                 break;
4517         }
4518
4519         amdgpu_dm_set_irq_funcs(adev);
4520
4521         if (adev->mode_info.funcs == NULL)
4522                 adev->mode_info.funcs = &dm_display_funcs;
4523
4524         /*
4525          * Note: Do NOT change adev->audio_endpt_rreg and
4526          * adev->audio_endpt_wreg because they are initialised in
4527          * amdgpu_device_init()
4528          */
4529 #if defined(CONFIG_DEBUG_KERNEL_DC)
4530         device_create_file(
4531                 adev_to_drm(adev)->dev,
4532                 &dev_attr_s3_debug);
4533 #endif
4534
4535         return 0;
4536 }
4537
4538 static bool modeset_required(struct drm_crtc_state *crtc_state,
4539                              struct dc_stream_state *new_stream,
4540                              struct dc_stream_state *old_stream)
4541 {
4542         return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4543 }
4544
4545 static bool modereset_required(struct drm_crtc_state *crtc_state)
4546 {
4547         return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4548 }
4549
4550 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4551 {
4552         drm_encoder_cleanup(encoder);
4553         kfree(encoder);
4554 }
4555
4556 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4557         .destroy = amdgpu_dm_encoder_destroy,
4558 };
4559
4560
4561 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4562                                          struct drm_framebuffer *fb,
4563                                          int *min_downscale, int *max_upscale)
4564 {
4565         struct amdgpu_device *adev = drm_to_adev(dev);
4566         struct dc *dc = adev->dm.dc;
4567         /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4568         struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4569
4570         switch (fb->format->format) {
4571         case DRM_FORMAT_P010:
4572         case DRM_FORMAT_NV12:
4573         case DRM_FORMAT_NV21:
4574                 *max_upscale = plane_cap->max_upscale_factor.nv12;
4575                 *min_downscale = plane_cap->max_downscale_factor.nv12;
4576                 break;
4577
4578         case DRM_FORMAT_XRGB16161616F:
4579         case DRM_FORMAT_ARGB16161616F:
4580         case DRM_FORMAT_XBGR16161616F:
4581         case DRM_FORMAT_ABGR16161616F:
4582                 *max_upscale = plane_cap->max_upscale_factor.fp16;
4583                 *min_downscale = plane_cap->max_downscale_factor.fp16;
4584                 break;
4585
4586         default:
4587                 *max_upscale = plane_cap->max_upscale_factor.argb8888;
4588                 *min_downscale = plane_cap->max_downscale_factor.argb8888;
4589                 break;
4590         }
4591
4592         /*
4593          * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4594          * scaling factor of 1.0 == 1000 units.
4595          */
4596         if (*max_upscale == 1)
4597                 *max_upscale = 1000;
4598
4599         if (*min_downscale == 1)
4600                 *min_downscale = 1000;
4601 }
4602
4603
4604 static int fill_dc_scaling_info(struct amdgpu_device *adev,
4605                                 const struct drm_plane_state *state,
4606                                 struct dc_scaling_info *scaling_info)
4607 {
4608         int scale_w, scale_h, min_downscale, max_upscale;
4609
4610         memset(scaling_info, 0, sizeof(*scaling_info));
4611
4612         /* Source is fixed 16.16 but we ignore mantissa for now... */
4613         scaling_info->src_rect.x = state->src_x >> 16;
4614         scaling_info->src_rect.y = state->src_y >> 16;
4615
4616         /*
4617          * For reasons we don't (yet) fully understand a non-zero
4618          * src_y coordinate into an NV12 buffer can cause a
4619          * system hang on DCN1x.
4620          * To avoid hangs (and maybe be overly cautious)
4621          * let's reject both non-zero src_x and src_y.
4622          *
4623          * We currently know of only one use-case to reproduce a
4624          * scenario with non-zero src_x and src_y for NV12, which
4625          * is to gesture the YouTube Android app into full screen
4626          * on ChromeOS.
4627          */
4628         if (((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) ||
4629             (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) &&
4630             (state->fb && state->fb->format->format == DRM_FORMAT_NV12 &&
4631             (scaling_info->src_rect.x != 0 || scaling_info->src_rect.y != 0)))
4632                 return -EINVAL;
4633
4634         scaling_info->src_rect.width = state->src_w >> 16;
4635         if (scaling_info->src_rect.width == 0)
4636                 return -EINVAL;
4637
4638         scaling_info->src_rect.height = state->src_h >> 16;
4639         if (scaling_info->src_rect.height == 0)
4640                 return -EINVAL;
4641
4642         scaling_info->dst_rect.x = state->crtc_x;
4643         scaling_info->dst_rect.y = state->crtc_y;
4644
4645         if (state->crtc_w == 0)
4646                 return -EINVAL;
4647
4648         scaling_info->dst_rect.width = state->crtc_w;
4649
4650         if (state->crtc_h == 0)
4651                 return -EINVAL;
4652
4653         scaling_info->dst_rect.height = state->crtc_h;
4654
4655         /* DRM doesn't specify clipping on destination output. */
4656         scaling_info->clip_rect = scaling_info->dst_rect;
4657
4658         /* Validate scaling per-format with DC plane caps */
4659         if (state->plane && state->plane->dev && state->fb) {
4660                 get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4661                                              &min_downscale, &max_upscale);
4662         } else {
4663                 min_downscale = 250;
4664                 max_upscale = 16000;
4665         }
4666
4667         scale_w = scaling_info->dst_rect.width * 1000 /
4668                   scaling_info->src_rect.width;
4669
4670         if (scale_w < min_downscale || scale_w > max_upscale)
4671                 return -EINVAL;
4672
4673         scale_h = scaling_info->dst_rect.height * 1000 /
4674                   scaling_info->src_rect.height;
4675
4676         if (scale_h < min_downscale || scale_h > max_upscale)
4677                 return -EINVAL;
4678
4679         /*
4680          * The "scaling_quality" can be ignored for now, quality = 0 has DC
4681          * assume reasonable defaults based on the format.
4682          */
4683
4684         return 0;
4685 }
4686
4687 static void
4688 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4689                                  uint64_t tiling_flags)
4690 {
4691         /* Fill GFX8 params */
4692         if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4693                 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4694
4695                 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4696                 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4697                 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4698                 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4699                 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4700
4701                 /* XXX fix me for VI */
4702                 tiling_info->gfx8.num_banks = num_banks;
4703                 tiling_info->gfx8.array_mode =
4704                                 DC_ARRAY_2D_TILED_THIN1;
4705                 tiling_info->gfx8.tile_split = tile_split;
4706                 tiling_info->gfx8.bank_width = bankw;
4707                 tiling_info->gfx8.bank_height = bankh;
4708                 tiling_info->gfx8.tile_aspect = mtaspect;
4709                 tiling_info->gfx8.tile_mode =
4710                                 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4711         } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4712                         == DC_ARRAY_1D_TILED_THIN1) {
4713                 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4714         }
4715
4716         tiling_info->gfx8.pipe_config =
4717                         AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4718 }
4719
4720 static void
4721 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4722                                   union dc_tiling_info *tiling_info)
4723 {
4724         tiling_info->gfx9.num_pipes =
4725                 adev->gfx.config.gb_addr_config_fields.num_pipes;
4726         tiling_info->gfx9.num_banks =
4727                 adev->gfx.config.gb_addr_config_fields.num_banks;
4728         tiling_info->gfx9.pipe_interleave =
4729                 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4730         tiling_info->gfx9.num_shader_engines =
4731                 adev->gfx.config.gb_addr_config_fields.num_se;
4732         tiling_info->gfx9.max_compressed_frags =
4733                 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4734         tiling_info->gfx9.num_rb_per_se =
4735                 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4736         tiling_info->gfx9.shaderEnable = 1;
4737         if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
4738                 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4739 }
4740
4741 static int
4742 validate_dcc(struct amdgpu_device *adev,
4743              const enum surface_pixel_format format,
4744              const enum dc_rotation_angle rotation,
4745              const union dc_tiling_info *tiling_info,
4746              const struct dc_plane_dcc_param *dcc,
4747              const struct dc_plane_address *address,
4748              const struct plane_size *plane_size)
4749 {
4750         struct dc *dc = adev->dm.dc;
4751         struct dc_dcc_surface_param input;
4752         struct dc_surface_dcc_cap output;
4753
4754         memset(&input, 0, sizeof(input));
4755         memset(&output, 0, sizeof(output));
4756
4757         if (!dcc->enable)
4758                 return 0;
4759
4760         if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4761             !dc->cap_funcs.get_dcc_compression_cap)
4762                 return -EINVAL;
4763
4764         input.format = format;
4765         input.surface_size.width = plane_size->surface_size.width;
4766         input.surface_size.height = plane_size->surface_size.height;
4767         input.swizzle_mode = tiling_info->gfx9.swizzle;
4768
4769         if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4770                 input.scan = SCAN_DIRECTION_HORIZONTAL;
4771         else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4772                 input.scan = SCAN_DIRECTION_VERTICAL;
4773
4774         if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4775                 return -EINVAL;
4776
4777         if (!output.capable)
4778                 return -EINVAL;
4779
4780         if (dcc->independent_64b_blks == 0 &&
4781             output.grph.rgb.independent_64b_blks != 0)
4782                 return -EINVAL;
4783
4784         return 0;
4785 }
4786
4787 static bool
4788 modifier_has_dcc(uint64_t modifier)
4789 {
4790         return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4791 }
4792
4793 static unsigned
4794 modifier_gfx9_swizzle_mode(uint64_t modifier)
4795 {
4796         if (modifier == DRM_FORMAT_MOD_LINEAR)
4797                 return 0;
4798
4799         return AMD_FMT_MOD_GET(TILE, modifier);
4800 }
4801
4802 static const struct drm_format_info *
4803 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4804 {
4805         return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4806 }
4807
4808 static void
4809 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4810                                     union dc_tiling_info *tiling_info,
4811                                     uint64_t modifier)
4812 {
4813         unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4814         unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4815         unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4816         unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4817
4818         fill_gfx9_tiling_info_from_device(adev, tiling_info);
4819
4820         if (!IS_AMD_FMT_MOD(modifier))
4821                 return;
4822
4823         tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4824         tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4825
4826         if (adev->family >= AMDGPU_FAMILY_NV) {
4827                 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4828         } else {
4829                 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4830
4831                 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4832         }
4833 }
4834
4835 enum dm_micro_swizzle {
4836         MICRO_SWIZZLE_Z = 0,
4837         MICRO_SWIZZLE_S = 1,
4838         MICRO_SWIZZLE_D = 2,
4839         MICRO_SWIZZLE_R = 3
4840 };
4841
4842 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4843                                           uint32_t format,
4844                                           uint64_t modifier)
4845 {
4846         struct amdgpu_device *adev = drm_to_adev(plane->dev);
4847         const struct drm_format_info *info = drm_format_info(format);
4848         int i;
4849
4850         enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4851
4852         if (!info)
4853                 return false;
4854
4855         /*
4856          * We always have to allow these modifiers:
4857          * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4858          * 2. Not passing any modifiers is the same as explicitly passing INVALID.
4859          */
4860         if (modifier == DRM_FORMAT_MOD_LINEAR ||
4861             modifier == DRM_FORMAT_MOD_INVALID) {
4862                 return true;
4863         }
4864
4865         /* Check that the modifier is on the list of the plane's supported modifiers. */
4866         for (i = 0; i < plane->modifier_count; i++) {
4867                 if (modifier == plane->modifiers[i])
4868                         break;
4869         }
4870         if (i == plane->modifier_count)
4871                 return false;
4872
4873         /*
4874          * For D swizzle the canonical modifier depends on the bpp, so check
4875          * it here.
4876          */
4877         if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4878             adev->family >= AMDGPU_FAMILY_NV) {
4879                 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4880                         return false;
4881         }
4882
4883         if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4884             info->cpp[0] < 8)
4885                 return false;
4886
4887         if (modifier_has_dcc(modifier)) {
4888                 /* Per radeonsi comments 16/64 bpp are more complicated. */
4889                 if (info->cpp[0] != 4)
4890                         return false;
4891                 /* We support multi-planar formats, but not when combined with
4892                  * additional DCC metadata planes. */
4893                 if (info->num_planes > 1)
4894                         return false;
4895         }
4896
4897         return true;
4898 }
4899
4900 static void
4901 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4902 {
4903         if (!*mods)
4904                 return;
4905
4906         if (*cap - *size < 1) {
4907                 uint64_t new_cap = *cap * 2;
4908                 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4909
4910                 if (!new_mods) {
4911                         kfree(*mods);
4912                         *mods = NULL;
4913                         return;
4914                 }
4915
4916                 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4917                 kfree(*mods);
4918                 *mods = new_mods;
4919                 *cap = new_cap;
4920         }
4921
4922         (*mods)[*size] = mod;
4923         *size += 1;
4924 }
4925
4926 static void
4927 add_gfx9_modifiers(const struct amdgpu_device *adev,
4928                    uint64_t **mods, uint64_t *size, uint64_t *capacity)
4929 {
4930         int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4931         int pipe_xor_bits = min(8, pipes +
4932                                 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4933         int bank_xor_bits = min(8 - pipe_xor_bits,
4934                                 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4935         int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4936                  ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4937
4938
4939         if (adev->family == AMDGPU_FAMILY_RV) {
4940                 /* Raven2 and later */
4941                 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4942
4943                 /*
4944                  * No _D DCC swizzles yet because we only allow 32bpp, which
4945                  * doesn't support _D on DCN
4946                  */
4947
4948                 if (has_constant_encode) {
4949                         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4950                                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4951                                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4952                                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4953                                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4954                                     AMD_FMT_MOD_SET(DCC, 1) |
4955                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4956                                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4957                                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4958                 }
4959
4960                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4961                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4962                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4963                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4964                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4965                             AMD_FMT_MOD_SET(DCC, 1) |
4966                             AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4967                             AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4968                             AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4969
4970                 if (has_constant_encode) {
4971                         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4972                                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4973                                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4974                                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4975                                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4976                                     AMD_FMT_MOD_SET(DCC, 1) |
4977                                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4978                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4979                                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4980
4981                                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4982                                     AMD_FMT_MOD_SET(RB, rb) |
4983                                     AMD_FMT_MOD_SET(PIPE, pipes));
4984                 }
4985
4986                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4987                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4988                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4989                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4990                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4991                             AMD_FMT_MOD_SET(DCC, 1) |
4992                             AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4993                             AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4994                             AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4995                             AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4996                             AMD_FMT_MOD_SET(RB, rb) |
4997                             AMD_FMT_MOD_SET(PIPE, pipes));
4998         }
4999
5000         /*
5001          * Only supported for 64bpp on Raven, will be filtered on format in
5002          * dm_plane_format_mod_supported.
5003          */
5004         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5005                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
5006                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5007                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5008                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5009
5010         if (adev->family == AMDGPU_FAMILY_RV) {
5011                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5012                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5013                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5014                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5015                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5016         }
5017
5018         /*
5019          * Only supported for 64bpp on Raven, will be filtered on format in
5020          * dm_plane_format_mod_supported.
5021          */
5022         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5023                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5024                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5025
5026         if (adev->family == AMDGPU_FAMILY_RV) {
5027                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5028                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5029                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5030         }
5031 }
5032
5033 static void
5034 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
5035                       uint64_t **mods, uint64_t *size, uint64_t *capacity)
5036 {
5037         int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5038
5039         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5040                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5041                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5042                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5043                     AMD_FMT_MOD_SET(DCC, 1) |
5044                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5045                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5046                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5047
5048         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5049                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5050                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5051                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5052                     AMD_FMT_MOD_SET(DCC, 1) |
5053                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5054                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5055                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5056                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5057
5058         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5059                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5060                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5061                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5062
5063         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5064                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5065                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5066                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5067
5068
5069         /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5070         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5071                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5072                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5073
5074         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5075                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5076                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5077 }
5078
5079 static void
5080 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
5081                       uint64_t **mods, uint64_t *size, uint64_t *capacity)
5082 {
5083         int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5084         int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
5085
5086         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5087                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5088                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5089                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5090                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
5091                     AMD_FMT_MOD_SET(DCC, 1) |
5092                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5093                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5094                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5095                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5096
5097         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5098                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5099                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5100                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5101                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
5102                     AMD_FMT_MOD_SET(DCC, 1) |
5103                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5104                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5105                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5106
5107         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5108                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5109                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5110                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5111                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
5112                     AMD_FMT_MOD_SET(DCC, 1) |
5113                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5114                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5115                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5116                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5117                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5118
5119         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5120                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5121                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5122                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5123                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
5124                     AMD_FMT_MOD_SET(DCC, 1) |
5125                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5126                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5127                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5128                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5129
5130         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5131                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5132                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5133                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5134                     AMD_FMT_MOD_SET(PACKERS, pkrs));
5135
5136         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5137                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5138                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5139                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5140                     AMD_FMT_MOD_SET(PACKERS, pkrs));
5141
5142         /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5143         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5144                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5145                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5146
5147         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5148                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5149                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5150 }
5151
5152 static int
5153 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
5154 {
5155         uint64_t size = 0, capacity = 128;
5156         *mods = NULL;
5157
5158         /* We have not hooked up any pre-GFX9 modifiers. */
5159         if (adev->family < AMDGPU_FAMILY_AI)
5160                 return 0;
5161
5162         *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
5163
5164         if (plane_type == DRM_PLANE_TYPE_CURSOR) {
5165                 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5166                 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5167                 return *mods ? 0 : -ENOMEM;
5168         }
5169
5170         switch (adev->family) {
5171         case AMDGPU_FAMILY_AI:
5172         case AMDGPU_FAMILY_RV:
5173                 add_gfx9_modifiers(adev, mods, &size, &capacity);
5174                 break;
5175         case AMDGPU_FAMILY_NV:
5176         case AMDGPU_FAMILY_VGH:
5177         case AMDGPU_FAMILY_YC:
5178                 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
5179                         add_gfx10_3_modifiers(adev, mods, &size, &capacity);
5180                 else
5181                         add_gfx10_1_modifiers(adev, mods, &size, &capacity);
5182                 break;
5183         }
5184
5185         add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5186
5187         /* INVALID marks the end of the list. */
5188         add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5189
5190         if (!*mods)
5191                 return -ENOMEM;
5192
5193         return 0;
5194 }
5195
5196 static int
5197 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
5198                                           const struct amdgpu_framebuffer *afb,
5199                                           const enum surface_pixel_format format,
5200                                           const enum dc_rotation_angle rotation,
5201                                           const struct plane_size *plane_size,
5202                                           union dc_tiling_info *tiling_info,
5203                                           struct dc_plane_dcc_param *dcc,
5204                                           struct dc_plane_address *address,
5205                                           const bool force_disable_dcc)
5206 {
5207         const uint64_t modifier = afb->base.modifier;
5208         int ret = 0;
5209
5210         fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
5211         tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
5212
5213         if (modifier_has_dcc(modifier) && !force_disable_dcc) {
5214                 uint64_t dcc_address = afb->address + afb->base.offsets[1];
5215                 bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
5216                 bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier);
5217
5218                 dcc->enable = 1;
5219                 dcc->meta_pitch = afb->base.pitches[1];
5220                 dcc->independent_64b_blks = independent_64b_blks;
5221                 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) {
5222                         if (independent_64b_blks && independent_128b_blks)
5223                                 dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl;
5224                         else if (independent_128b_blks)
5225                                 dcc->dcc_ind_blk = hubp_ind_block_128b;
5226                         else if (independent_64b_blks && !independent_128b_blks)
5227                                 dcc->dcc_ind_blk = hubp_ind_block_64b;
5228                         else
5229                                 dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5230                 } else {
5231                         if (independent_64b_blks)
5232                                 dcc->dcc_ind_blk = hubp_ind_block_64b;
5233                         else
5234                                 dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5235                 }
5236
5237                 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
5238                 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
5239         }
5240
5241         ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
5242         if (ret)
5243                 drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret);
5244
5245         return ret;
5246 }
5247
5248 static int
5249 fill_plane_buffer_attributes(struct amdgpu_device *adev,
5250                              const struct amdgpu_framebuffer *afb,
5251                              const enum surface_pixel_format format,
5252                              const enum dc_rotation_angle rotation,
5253                              const uint64_t tiling_flags,
5254                              union dc_tiling_info *tiling_info,
5255                              struct plane_size *plane_size,
5256                              struct dc_plane_dcc_param *dcc,
5257                              struct dc_plane_address *address,
5258                              bool tmz_surface,
5259                              bool force_disable_dcc)
5260 {
5261         const struct drm_framebuffer *fb = &afb->base;
5262         int ret;
5263
5264         memset(tiling_info, 0, sizeof(*tiling_info));
5265         memset(plane_size, 0, sizeof(*plane_size));
5266         memset(dcc, 0, sizeof(*dcc));
5267         memset(address, 0, sizeof(*address));
5268
5269         address->tmz_surface = tmz_surface;
5270
5271         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
5272                 uint64_t addr = afb->address + fb->offsets[0];
5273
5274                 plane_size->surface_size.x = 0;
5275                 plane_size->surface_size.y = 0;
5276                 plane_size->surface_size.width = fb->width;
5277                 plane_size->surface_size.height = fb->height;
5278                 plane_size->surface_pitch =
5279                         fb->pitches[0] / fb->format->cpp[0];
5280
5281                 address->type = PLN_ADDR_TYPE_GRAPHICS;
5282                 address->grph.addr.low_part = lower_32_bits(addr);
5283                 address->grph.addr.high_part = upper_32_bits(addr);
5284         } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
5285                 uint64_t luma_addr = afb->address + fb->offsets[0];
5286                 uint64_t chroma_addr = afb->address + fb->offsets[1];
5287
5288                 plane_size->surface_size.x = 0;
5289                 plane_size->surface_size.y = 0;
5290                 plane_size->surface_size.width = fb->width;
5291                 plane_size->surface_size.height = fb->height;
5292                 plane_size->surface_pitch =
5293                         fb->pitches[0] / fb->format->cpp[0];
5294
5295                 plane_size->chroma_size.x = 0;
5296                 plane_size->chroma_size.y = 0;
5297                 /* TODO: set these based on surface format */
5298                 plane_size->chroma_size.width = fb->width / 2;
5299                 plane_size->chroma_size.height = fb->height / 2;
5300
5301                 plane_size->chroma_pitch =
5302                         fb->pitches[1] / fb->format->cpp[1];
5303
5304                 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
5305                 address->video_progressive.luma_addr.low_part =
5306                         lower_32_bits(luma_addr);
5307                 address->video_progressive.luma_addr.high_part =
5308                         upper_32_bits(luma_addr);
5309                 address->video_progressive.chroma_addr.low_part =
5310                         lower_32_bits(chroma_addr);
5311                 address->video_progressive.chroma_addr.high_part =
5312                         upper_32_bits(chroma_addr);
5313         }
5314
5315         if (adev->family >= AMDGPU_FAMILY_AI) {
5316                 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
5317                                                                 rotation, plane_size,
5318                                                                 tiling_info, dcc,
5319                                                                 address,
5320                                                                 force_disable_dcc);
5321                 if (ret)
5322                         return ret;
5323         } else {
5324                 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
5325         }
5326
5327         return 0;
5328 }
5329
5330 static void
5331 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
5332                                bool *per_pixel_alpha, bool *global_alpha,
5333                                int *global_alpha_value)
5334 {
5335         *per_pixel_alpha = false;
5336         *global_alpha = false;
5337         *global_alpha_value = 0xff;
5338
5339         if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
5340                 return;
5341
5342         if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
5343                 static const uint32_t alpha_formats[] = {
5344                         DRM_FORMAT_ARGB8888,
5345                         DRM_FORMAT_RGBA8888,
5346                         DRM_FORMAT_ABGR8888,
5347                 };
5348                 uint32_t format = plane_state->fb->format->format;
5349                 unsigned int i;
5350
5351                 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
5352                         if (format == alpha_formats[i]) {
5353                                 *per_pixel_alpha = true;
5354                                 break;
5355                         }
5356                 }
5357         }
5358
5359         if (plane_state->alpha < 0xffff) {
5360                 *global_alpha = true;
5361                 *global_alpha_value = plane_state->alpha >> 8;
5362         }
5363 }
5364
5365 static int
5366 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
5367                             const enum surface_pixel_format format,
5368                             enum dc_color_space *color_space)
5369 {
5370         bool full_range;
5371
5372         *color_space = COLOR_SPACE_SRGB;
5373
5374         /* DRM color properties only affect non-RGB formats. */
5375         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
5376                 return 0;
5377
5378         full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
5379
5380         switch (plane_state->color_encoding) {
5381         case DRM_COLOR_YCBCR_BT601:
5382                 if (full_range)
5383                         *color_space = COLOR_SPACE_YCBCR601;
5384                 else
5385                         *color_space = COLOR_SPACE_YCBCR601_LIMITED;
5386                 break;
5387
5388         case DRM_COLOR_YCBCR_BT709:
5389                 if (full_range)
5390                         *color_space = COLOR_SPACE_YCBCR709;
5391                 else
5392                         *color_space = COLOR_SPACE_YCBCR709_LIMITED;
5393                 break;
5394
5395         case DRM_COLOR_YCBCR_BT2020:
5396                 if (full_range)
5397                         *color_space = COLOR_SPACE_2020_YCBCR;
5398                 else
5399                         return -EINVAL;
5400                 break;
5401
5402         default:
5403                 return -EINVAL;
5404         }
5405
5406         return 0;
5407 }
5408
5409 static int
5410 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
5411                             const struct drm_plane_state *plane_state,
5412                             const uint64_t tiling_flags,
5413                             struct dc_plane_info *plane_info,
5414                             struct dc_plane_address *address,
5415                             bool tmz_surface,
5416                             bool force_disable_dcc)
5417 {
5418         const struct drm_framebuffer *fb = plane_state->fb;
5419         const struct amdgpu_framebuffer *afb =
5420                 to_amdgpu_framebuffer(plane_state->fb);
5421         int ret;
5422
5423         memset(plane_info, 0, sizeof(*plane_info));
5424
5425         switch (fb->format->format) {
5426         case DRM_FORMAT_C8:
5427                 plane_info->format =
5428                         SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
5429                 break;
5430         case DRM_FORMAT_RGB565:
5431                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
5432                 break;
5433         case DRM_FORMAT_XRGB8888:
5434         case DRM_FORMAT_ARGB8888:
5435                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
5436                 break;
5437         case DRM_FORMAT_XRGB2101010:
5438         case DRM_FORMAT_ARGB2101010:
5439                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
5440                 break;
5441         case DRM_FORMAT_XBGR2101010:
5442         case DRM_FORMAT_ABGR2101010:
5443                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
5444                 break;
5445         case DRM_FORMAT_XBGR8888:
5446         case DRM_FORMAT_ABGR8888:
5447                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
5448                 break;
5449         case DRM_FORMAT_NV21:
5450                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
5451                 break;
5452         case DRM_FORMAT_NV12:
5453                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
5454                 break;
5455         case DRM_FORMAT_P010:
5456                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
5457                 break;
5458         case DRM_FORMAT_XRGB16161616F:
5459         case DRM_FORMAT_ARGB16161616F:
5460                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
5461                 break;
5462         case DRM_FORMAT_XBGR16161616F:
5463         case DRM_FORMAT_ABGR16161616F:
5464                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
5465                 break;
5466         case DRM_FORMAT_XRGB16161616:
5467         case DRM_FORMAT_ARGB16161616:
5468                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
5469                 break;
5470         case DRM_FORMAT_XBGR16161616:
5471         case DRM_FORMAT_ABGR16161616:
5472                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
5473                 break;
5474         default:
5475                 DRM_ERROR(
5476                         "Unsupported screen format %p4cc\n",
5477                         &fb->format->format);
5478                 return -EINVAL;
5479         }
5480
5481         switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
5482         case DRM_MODE_ROTATE_0:
5483                 plane_info->rotation = ROTATION_ANGLE_0;
5484                 break;
5485         case DRM_MODE_ROTATE_90:
5486                 plane_info->rotation = ROTATION_ANGLE_90;
5487                 break;
5488         case DRM_MODE_ROTATE_180:
5489                 plane_info->rotation = ROTATION_ANGLE_180;
5490                 break;
5491         case DRM_MODE_ROTATE_270:
5492                 plane_info->rotation = ROTATION_ANGLE_270;
5493                 break;
5494         default:
5495                 plane_info->rotation = ROTATION_ANGLE_0;
5496                 break;
5497         }
5498
5499         plane_info->visible = true;
5500         plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
5501
5502         plane_info->layer_index = 0;
5503
5504         ret = fill_plane_color_attributes(plane_state, plane_info->format,
5505                                           &plane_info->color_space);
5506         if (ret)
5507                 return ret;
5508
5509         ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
5510                                            plane_info->rotation, tiling_flags,
5511                                            &plane_info->tiling_info,
5512                                            &plane_info->plane_size,
5513                                            &plane_info->dcc, address, tmz_surface,
5514                                            force_disable_dcc);
5515         if (ret)
5516                 return ret;
5517
5518         fill_blending_from_plane_state(
5519                 plane_state, &plane_info->per_pixel_alpha,
5520                 &plane_info->global_alpha, &plane_info->global_alpha_value);
5521
5522         return 0;
5523 }
5524
5525 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5526                                     struct dc_plane_state *dc_plane_state,
5527                                     struct drm_plane_state *plane_state,
5528                                     struct drm_crtc_state *crtc_state)
5529 {
5530         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5531         struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
5532         struct dc_scaling_info scaling_info;
5533         struct dc_plane_info plane_info;
5534         int ret;
5535         bool force_disable_dcc = false;
5536
5537         ret = fill_dc_scaling_info(adev, plane_state, &scaling_info);
5538         if (ret)
5539                 return ret;
5540
5541         dc_plane_state->src_rect = scaling_info.src_rect;
5542         dc_plane_state->dst_rect = scaling_info.dst_rect;
5543         dc_plane_state->clip_rect = scaling_info.clip_rect;
5544         dc_plane_state->scaling_quality = scaling_info.scaling_quality;
5545
5546         force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5547         ret = fill_dc_plane_info_and_addr(adev, plane_state,
5548                                           afb->tiling_flags,
5549                                           &plane_info,
5550                                           &dc_plane_state->address,
5551                                           afb->tmz_surface,
5552                                           force_disable_dcc);
5553         if (ret)
5554                 return ret;
5555
5556         dc_plane_state->format = plane_info.format;
5557         dc_plane_state->color_space = plane_info.color_space;
5558         dc_plane_state->format = plane_info.format;
5559         dc_plane_state->plane_size = plane_info.plane_size;
5560         dc_plane_state->rotation = plane_info.rotation;
5561         dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5562         dc_plane_state->stereo_format = plane_info.stereo_format;
5563         dc_plane_state->tiling_info = plane_info.tiling_info;
5564         dc_plane_state->visible = plane_info.visible;
5565         dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5566         dc_plane_state->global_alpha = plane_info.global_alpha;
5567         dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5568         dc_plane_state->dcc = plane_info.dcc;
5569         dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
5570         dc_plane_state->flip_int_enabled = true;
5571
5572         /*
5573          * Always set input transfer function, since plane state is refreshed
5574          * every time.
5575          */
5576         ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5577         if (ret)
5578                 return ret;
5579
5580         return 0;
5581 }
5582
5583 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5584                                            const struct dm_connector_state *dm_state,
5585                                            struct dc_stream_state *stream)
5586 {
5587         enum amdgpu_rmx_type rmx_type;
5588
5589         struct rect src = { 0 }; /* viewport in composition space*/
5590         struct rect dst = { 0 }; /* stream addressable area */
5591
5592         /* no mode. nothing to be done */
5593         if (!mode)
5594                 return;
5595
5596         /* Full screen scaling by default */
5597         src.width = mode->hdisplay;
5598         src.height = mode->vdisplay;
5599         dst.width = stream->timing.h_addressable;
5600         dst.height = stream->timing.v_addressable;
5601
5602         if (dm_state) {
5603                 rmx_type = dm_state->scaling;
5604                 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5605                         if (src.width * dst.height <
5606                                         src.height * dst.width) {
5607                                 /* height needs less upscaling/more downscaling */
5608                                 dst.width = src.width *
5609                                                 dst.height / src.height;
5610                         } else {
5611                                 /* width needs less upscaling/more downscaling */
5612                                 dst.height = src.height *
5613                                                 dst.width / src.width;
5614                         }
5615                 } else if (rmx_type == RMX_CENTER) {
5616                         dst = src;
5617                 }
5618
5619                 dst.x = (stream->timing.h_addressable - dst.width) / 2;
5620                 dst.y = (stream->timing.v_addressable - dst.height) / 2;
5621
5622                 if (dm_state->underscan_enable) {
5623                         dst.x += dm_state->underscan_hborder / 2;
5624                         dst.y += dm_state->underscan_vborder / 2;
5625                         dst.width -= dm_state->underscan_hborder;
5626                         dst.height -= dm_state->underscan_vborder;
5627                 }
5628         }
5629
5630         stream->src = src;
5631         stream->dst = dst;
5632
5633         DRM_DEBUG_KMS("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
5634                       dst.x, dst.y, dst.width, dst.height);
5635
5636 }
5637
5638 static enum dc_color_depth
5639 convert_color_depth_from_display_info(const struct drm_connector *connector,
5640                                       bool is_y420, int requested_bpc)
5641 {
5642         uint8_t bpc;
5643
5644         if (is_y420) {
5645                 bpc = 8;
5646
5647                 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
5648                 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5649                         bpc = 16;
5650                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5651                         bpc = 12;
5652                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5653                         bpc = 10;
5654         } else {
5655                 bpc = (uint8_t)connector->display_info.bpc;
5656                 /* Assume 8 bpc by default if no bpc is specified. */
5657                 bpc = bpc ? bpc : 8;
5658         }
5659
5660         if (requested_bpc > 0) {
5661                 /*
5662                  * Cap display bpc based on the user requested value.
5663                  *
5664                  * The value for state->max_bpc may not correctly updated
5665                  * depending on when the connector gets added to the state
5666                  * or if this was called outside of atomic check, so it
5667                  * can't be used directly.
5668                  */
5669                 bpc = min_t(u8, bpc, requested_bpc);
5670
5671                 /* Round down to the nearest even number. */
5672                 bpc = bpc - (bpc & 1);
5673         }
5674
5675         switch (bpc) {
5676         case 0:
5677                 /*
5678                  * Temporary Work around, DRM doesn't parse color depth for
5679                  * EDID revision before 1.4
5680                  * TODO: Fix edid parsing
5681                  */
5682                 return COLOR_DEPTH_888;
5683         case 6:
5684                 return COLOR_DEPTH_666;
5685         case 8:
5686                 return COLOR_DEPTH_888;
5687         case 10:
5688                 return COLOR_DEPTH_101010;
5689         case 12:
5690                 return COLOR_DEPTH_121212;
5691         case 14:
5692                 return COLOR_DEPTH_141414;
5693         case 16:
5694                 return COLOR_DEPTH_161616;
5695         default:
5696                 return COLOR_DEPTH_UNDEFINED;
5697         }
5698 }
5699
5700 static enum dc_aspect_ratio
5701 get_aspect_ratio(const struct drm_display_mode *mode_in)
5702 {
5703         /* 1-1 mapping, since both enums follow the HDMI spec. */
5704         return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5705 }
5706
5707 static enum dc_color_space
5708 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5709 {
5710         enum dc_color_space color_space = COLOR_SPACE_SRGB;
5711
5712         switch (dc_crtc_timing->pixel_encoding) {
5713         case PIXEL_ENCODING_YCBCR422:
5714         case PIXEL_ENCODING_YCBCR444:
5715         case PIXEL_ENCODING_YCBCR420:
5716         {
5717                 /*
5718                  * 27030khz is the separation point between HDTV and SDTV
5719                  * according to HDMI spec, we use YCbCr709 and YCbCr601
5720                  * respectively
5721                  */
5722                 if (dc_crtc_timing->pix_clk_100hz > 270300) {
5723                         if (dc_crtc_timing->flags.Y_ONLY)
5724                                 color_space =
5725                                         COLOR_SPACE_YCBCR709_LIMITED;
5726                         else
5727                                 color_space = COLOR_SPACE_YCBCR709;
5728                 } else {
5729                         if (dc_crtc_timing->flags.Y_ONLY)
5730                                 color_space =
5731                                         COLOR_SPACE_YCBCR601_LIMITED;
5732                         else
5733                                 color_space = COLOR_SPACE_YCBCR601;
5734                 }
5735
5736         }
5737         break;
5738         case PIXEL_ENCODING_RGB:
5739                 color_space = COLOR_SPACE_SRGB;
5740                 break;
5741
5742         default:
5743                 WARN_ON(1);
5744                 break;
5745         }
5746
5747         return color_space;
5748 }
5749
5750 static bool adjust_colour_depth_from_display_info(
5751         struct dc_crtc_timing *timing_out,
5752         const struct drm_display_info *info)
5753 {
5754         enum dc_color_depth depth = timing_out->display_color_depth;
5755         int normalized_clk;
5756         do {
5757                 normalized_clk = timing_out->pix_clk_100hz / 10;
5758                 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5759                 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5760                         normalized_clk /= 2;
5761                 /* Adjusting pix clock following on HDMI spec based on colour depth */
5762                 switch (depth) {
5763                 case COLOR_DEPTH_888:
5764                         break;
5765                 case COLOR_DEPTH_101010:
5766                         normalized_clk = (normalized_clk * 30) / 24;
5767                         break;
5768                 case COLOR_DEPTH_121212:
5769                         normalized_clk = (normalized_clk * 36) / 24;
5770                         break;
5771                 case COLOR_DEPTH_161616:
5772                         normalized_clk = (normalized_clk * 48) / 24;
5773                         break;
5774                 default:
5775                         /* The above depths are the only ones valid for HDMI. */
5776                         return false;
5777                 }
5778                 if (normalized_clk <= info->max_tmds_clock) {
5779                         timing_out->display_color_depth = depth;
5780                         return true;
5781                 }
5782         } while (--depth > COLOR_DEPTH_666);
5783         return false;
5784 }
5785
5786 static void fill_stream_properties_from_drm_display_mode(
5787         struct dc_stream_state *stream,
5788         const struct drm_display_mode *mode_in,
5789         const struct drm_connector *connector,
5790         const struct drm_connector_state *connector_state,
5791         const struct dc_stream_state *old_stream,
5792         int requested_bpc)
5793 {
5794         struct dc_crtc_timing *timing_out = &stream->timing;
5795         const struct drm_display_info *info = &connector->display_info;
5796         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5797         struct hdmi_vendor_infoframe hv_frame;
5798         struct hdmi_avi_infoframe avi_frame;
5799
5800         memset(&hv_frame, 0, sizeof(hv_frame));
5801         memset(&avi_frame, 0, sizeof(avi_frame));
5802
5803         timing_out->h_border_left = 0;
5804         timing_out->h_border_right = 0;
5805         timing_out->v_border_top = 0;
5806         timing_out->v_border_bottom = 0;
5807         /* TODO: un-hardcode */
5808         if (drm_mode_is_420_only(info, mode_in)
5809                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5810                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5811         else if (drm_mode_is_420_also(info, mode_in)
5812                         && aconnector->force_yuv420_output)
5813                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5814         else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
5815                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5816                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5817         else
5818                 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5819
5820         timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5821         timing_out->display_color_depth = convert_color_depth_from_display_info(
5822                 connector,
5823                 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5824                 requested_bpc);
5825         timing_out->scan_type = SCANNING_TYPE_NODATA;
5826         timing_out->hdmi_vic = 0;
5827
5828         if(old_stream) {
5829                 timing_out->vic = old_stream->timing.vic;
5830                 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5831                 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5832         } else {
5833                 timing_out->vic = drm_match_cea_mode(mode_in);
5834                 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5835                         timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5836                 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5837                         timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5838         }
5839
5840         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5841                 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5842                 timing_out->vic = avi_frame.video_code;
5843                 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5844                 timing_out->hdmi_vic = hv_frame.vic;
5845         }
5846
5847         if (is_freesync_video_mode(mode_in, aconnector)) {
5848                 timing_out->h_addressable = mode_in->hdisplay;
5849                 timing_out->h_total = mode_in->htotal;
5850                 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5851                 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5852                 timing_out->v_total = mode_in->vtotal;
5853                 timing_out->v_addressable = mode_in->vdisplay;
5854                 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5855                 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5856                 timing_out->pix_clk_100hz = mode_in->clock * 10;
5857         } else {
5858                 timing_out->h_addressable = mode_in->crtc_hdisplay;
5859                 timing_out->h_total = mode_in->crtc_htotal;
5860                 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5861                 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5862                 timing_out->v_total = mode_in->crtc_vtotal;
5863                 timing_out->v_addressable = mode_in->crtc_vdisplay;
5864                 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5865                 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5866                 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5867         }
5868
5869         timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5870
5871         stream->output_color_space = get_output_color_space(timing_out);
5872
5873         stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5874         stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5875         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5876                 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5877                     drm_mode_is_420_also(info, mode_in) &&
5878                     timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5879                         timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5880                         adjust_colour_depth_from_display_info(timing_out, info);
5881                 }
5882         }
5883 }
5884
5885 static void fill_audio_info(struct audio_info *audio_info,
5886                             const struct drm_connector *drm_connector,
5887                             const struct dc_sink *dc_sink)
5888 {
5889         int i = 0;
5890         int cea_revision = 0;
5891         const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5892
5893         audio_info->manufacture_id = edid_caps->manufacturer_id;
5894         audio_info->product_id = edid_caps->product_id;
5895
5896         cea_revision = drm_connector->display_info.cea_rev;
5897
5898         strscpy(audio_info->display_name,
5899                 edid_caps->display_name,
5900                 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5901
5902         if (cea_revision >= 3) {
5903                 audio_info->mode_count = edid_caps->audio_mode_count;
5904
5905                 for (i = 0; i < audio_info->mode_count; ++i) {
5906                         audio_info->modes[i].format_code =
5907                                         (enum audio_format_code)
5908                                         (edid_caps->audio_modes[i].format_code);
5909                         audio_info->modes[i].channel_count =
5910                                         edid_caps->audio_modes[i].channel_count;
5911                         audio_info->modes[i].sample_rates.all =
5912                                         edid_caps->audio_modes[i].sample_rate;
5913                         audio_info->modes[i].sample_size =
5914                                         edid_caps->audio_modes[i].sample_size;
5915                 }
5916         }
5917
5918         audio_info->flags.all = edid_caps->speaker_flags;
5919
5920         /* TODO: We only check for the progressive mode, check for interlace mode too */
5921         if (drm_connector->latency_present[0]) {
5922                 audio_info->video_latency = drm_connector->video_latency[0];
5923                 audio_info->audio_latency = drm_connector->audio_latency[0];
5924         }
5925
5926         /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5927
5928 }
5929
5930 static void
5931 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5932                                       struct drm_display_mode *dst_mode)
5933 {
5934         dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5935         dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5936         dst_mode->crtc_clock = src_mode->crtc_clock;
5937         dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5938         dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5939         dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
5940         dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5941         dst_mode->crtc_htotal = src_mode->crtc_htotal;
5942         dst_mode->crtc_hskew = src_mode->crtc_hskew;
5943         dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5944         dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5945         dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5946         dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5947         dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5948 }
5949
5950 static void
5951 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5952                                         const struct drm_display_mode *native_mode,
5953                                         bool scale_enabled)
5954 {
5955         if (scale_enabled) {
5956                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5957         } else if (native_mode->clock == drm_mode->clock &&
5958                         native_mode->htotal == drm_mode->htotal &&
5959                         native_mode->vtotal == drm_mode->vtotal) {
5960                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5961         } else {
5962                 /* no scaling nor amdgpu inserted, no need to patch */
5963         }
5964 }
5965
5966 static struct dc_sink *
5967 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5968 {
5969         struct dc_sink_init_data sink_init_data = { 0 };
5970         struct dc_sink *sink = NULL;
5971         sink_init_data.link = aconnector->dc_link;
5972         sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5973
5974         sink = dc_sink_create(&sink_init_data);
5975         if (!sink) {
5976                 DRM_ERROR("Failed to create sink!\n");
5977                 return NULL;
5978         }
5979         sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5980
5981         return sink;
5982 }
5983
5984 static void set_multisync_trigger_params(
5985                 struct dc_stream_state *stream)
5986 {
5987         struct dc_stream_state *master = NULL;
5988
5989         if (stream->triggered_crtc_reset.enabled) {
5990                 master = stream->triggered_crtc_reset.event_source;
5991                 stream->triggered_crtc_reset.event =
5992                         master->timing.flags.VSYNC_POSITIVE_POLARITY ?
5993                         CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
5994                 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
5995         }
5996 }
5997
5998 static void set_master_stream(struct dc_stream_state *stream_set[],
5999                               int stream_count)
6000 {
6001         int j, highest_rfr = 0, master_stream = 0;
6002
6003         for (j = 0;  j < stream_count; j++) {
6004                 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
6005                         int refresh_rate = 0;
6006
6007                         refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
6008                                 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
6009                         if (refresh_rate > highest_rfr) {
6010                                 highest_rfr = refresh_rate;
6011                                 master_stream = j;
6012                         }
6013                 }
6014         }
6015         for (j = 0;  j < stream_count; j++) {
6016                 if (stream_set[j])
6017                         stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
6018         }
6019 }
6020
6021 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
6022 {
6023         int i = 0;
6024         struct dc_stream_state *stream;
6025
6026         if (context->stream_count < 2)
6027                 return;
6028         for (i = 0; i < context->stream_count ; i++) {
6029                 if (!context->streams[i])
6030                         continue;
6031                 /*
6032                  * TODO: add a function to read AMD VSDB bits and set
6033                  * crtc_sync_master.multi_sync_enabled flag
6034                  * For now it's set to false
6035                  */
6036         }
6037
6038         set_master_stream(context->streams, context->stream_count);
6039
6040         for (i = 0; i < context->stream_count ; i++) {
6041                 stream = context->streams[i];
6042
6043                 if (!stream)
6044                         continue;
6045
6046                 set_multisync_trigger_params(stream);
6047         }
6048 }
6049
6050 #if defined(CONFIG_DRM_AMD_DC_DCN)
6051 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
6052                                                         struct dc_sink *sink, struct dc_stream_state *stream,
6053                                                         struct dsc_dec_dpcd_caps *dsc_caps)
6054 {
6055         stream->timing.flags.DSC = 0;
6056
6057         if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
6058                 sink->sink_signal == SIGNAL_TYPE_EDP)) {
6059                 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
6060                                       aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
6061                                       aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
6062                                       dsc_caps);
6063         }
6064 }
6065
6066 static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
6067                                     struct dc_sink *sink, struct dc_stream_state *stream,
6068                                     struct dsc_dec_dpcd_caps *dsc_caps,
6069                                     uint32_t max_dsc_target_bpp_limit_override)
6070 {
6071         const struct dc_link_settings *verified_link_cap = NULL;
6072         uint32_t link_bw_in_kbps;
6073         uint32_t edp_min_bpp_x16, edp_max_bpp_x16;
6074         struct dc *dc = sink->ctx->dc;
6075         struct dc_dsc_bw_range bw_range = {0};
6076         struct dc_dsc_config dsc_cfg = {0};
6077
6078         verified_link_cap = dc_link_get_link_cap(stream->link);
6079         link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap);
6080         edp_min_bpp_x16 = 8 * 16;
6081         edp_max_bpp_x16 = 8 * 16;
6082
6083         if (edp_max_bpp_x16 > dsc_caps->edp_max_bits_per_pixel)
6084                 edp_max_bpp_x16 = dsc_caps->edp_max_bits_per_pixel;
6085
6086         if (edp_max_bpp_x16 < edp_min_bpp_x16)
6087                 edp_min_bpp_x16 = edp_max_bpp_x16;
6088
6089         if (dc_dsc_compute_bandwidth_range(dc->res_pool->dscs[0],
6090                                 dc->debug.dsc_min_slice_height_override,
6091                                 edp_min_bpp_x16, edp_max_bpp_x16,
6092                                 dsc_caps,
6093                                 &stream->timing,
6094                                 &bw_range)) {
6095
6096                 if (bw_range.max_kbps < link_bw_in_kbps) {
6097                         if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6098                                         dsc_caps,
6099                                         dc->debug.dsc_min_slice_height_override,
6100                                         max_dsc_target_bpp_limit_override,
6101                                         0,
6102                                         &stream->timing,
6103                                         &dsc_cfg)) {
6104                                 stream->timing.dsc_cfg = dsc_cfg;
6105                                 stream->timing.flags.DSC = 1;
6106                                 stream->timing.dsc_cfg.bits_per_pixel = edp_max_bpp_x16;
6107                         }
6108                         return;
6109                 }
6110         }
6111
6112         if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6113                                 dsc_caps,
6114                                 dc->debug.dsc_min_slice_height_override,
6115                                 max_dsc_target_bpp_limit_override,
6116                                 link_bw_in_kbps,
6117                                 &stream->timing,
6118                                 &dsc_cfg)) {
6119                 stream->timing.dsc_cfg = dsc_cfg;
6120                 stream->timing.flags.DSC = 1;
6121         }
6122 }
6123
6124 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
6125                                                                                 struct dc_sink *sink, struct dc_stream_state *stream,
6126                                                                                 struct dsc_dec_dpcd_caps *dsc_caps)
6127 {
6128         struct drm_connector *drm_connector = &aconnector->base;
6129         uint32_t link_bandwidth_kbps;
6130         uint32_t max_dsc_target_bpp_limit_override = 0;
6131         struct dc *dc = sink->ctx->dc;
6132
6133         link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
6134                                                         dc_link_get_link_cap(aconnector->dc_link));
6135
6136         if (stream->link && stream->link->local_sink)
6137                 max_dsc_target_bpp_limit_override =
6138                         stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit;
6139         
6140         /* Set DSC policy according to dsc_clock_en */
6141         dc_dsc_policy_set_enable_dsc_when_not_needed(
6142                 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
6143
6144         if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP && !dc->debug.disable_dsc_edp &&
6145             dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) {
6146
6147                 apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override);
6148
6149         } else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6150
6151                 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6152                                                 dsc_caps,
6153                                                 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6154                                                 max_dsc_target_bpp_limit_override,
6155                                                 link_bandwidth_kbps,
6156                                                 &stream->timing,
6157                                                 &stream->timing.dsc_cfg)) {
6158                         stream->timing.flags.DSC = 1;
6159                         DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name);
6160                 }
6161         }
6162
6163         /* Overwrite the stream flag if DSC is enabled through debugfs */
6164         if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
6165                 stream->timing.flags.DSC = 1;
6166
6167         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
6168                 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
6169
6170         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
6171                 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
6172
6173         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
6174                 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
6175 }
6176 #endif /* CONFIG_DRM_AMD_DC_DCN */
6177
6178 /**
6179  * DOC: FreeSync Video
6180  *
6181  * When a userspace application wants to play a video, the content follows a
6182  * standard format definition that usually specifies the FPS for that format.
6183  * The below list illustrates some video format and the expected FPS,
6184  * respectively:
6185  *
6186  * - TV/NTSC (23.976 FPS)
6187  * - Cinema (24 FPS)
6188  * - TV/PAL (25 FPS)
6189  * - TV/NTSC (29.97 FPS)
6190  * - TV/NTSC (30 FPS)
6191  * - Cinema HFR (48 FPS)
6192  * - TV/PAL (50 FPS)
6193  * - Commonly used (60 FPS)
6194  * - Multiples of 24 (48,72,96,120 FPS)
6195  *
6196  * The list of standards video format is not huge and can be added to the
6197  * connector modeset list beforehand. With that, userspace can leverage
6198  * FreeSync to extends the front porch in order to attain the target refresh
6199  * rate. Such a switch will happen seamlessly, without screen blanking or
6200  * reprogramming of the output in any other way. If the userspace requests a
6201  * modesetting change compatible with FreeSync modes that only differ in the
6202  * refresh rate, DC will skip the full update and avoid blink during the
6203  * transition. For example, the video player can change the modesetting from
6204  * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
6205  * causing any display blink. This same concept can be applied to a mode
6206  * setting change.
6207  */
6208 static struct drm_display_mode *
6209 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
6210                           bool use_probed_modes)
6211 {
6212         struct drm_display_mode *m, *m_pref = NULL;
6213         u16 current_refresh, highest_refresh;
6214         struct list_head *list_head = use_probed_modes ?
6215                                                     &aconnector->base.probed_modes :
6216                                                     &aconnector->base.modes;
6217
6218         if (aconnector->freesync_vid_base.clock != 0)
6219                 return &aconnector->freesync_vid_base;
6220
6221         /* Find the preferred mode */
6222         list_for_each_entry (m, list_head, head) {
6223                 if (m->type & DRM_MODE_TYPE_PREFERRED) {
6224                         m_pref = m;
6225                         break;
6226                 }
6227         }
6228
6229         if (!m_pref) {
6230                 /* Probably an EDID with no preferred mode. Fallback to first entry */
6231                 m_pref = list_first_entry_or_null(
6232                         &aconnector->base.modes, struct drm_display_mode, head);
6233                 if (!m_pref) {
6234                         DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
6235                         return NULL;
6236                 }
6237         }
6238
6239         highest_refresh = drm_mode_vrefresh(m_pref);
6240
6241         /*
6242          * Find the mode with highest refresh rate with same resolution.
6243          * For some monitors, preferred mode is not the mode with highest
6244          * supported refresh rate.
6245          */
6246         list_for_each_entry (m, list_head, head) {
6247                 current_refresh  = drm_mode_vrefresh(m);
6248
6249                 if (m->hdisplay == m_pref->hdisplay &&
6250                     m->vdisplay == m_pref->vdisplay &&
6251                     highest_refresh < current_refresh) {
6252                         highest_refresh = current_refresh;
6253                         m_pref = m;
6254                 }
6255         }
6256
6257         aconnector->freesync_vid_base = *m_pref;
6258         return m_pref;
6259 }
6260
6261 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
6262                                    struct amdgpu_dm_connector *aconnector)
6263 {
6264         struct drm_display_mode *high_mode;
6265         int timing_diff;
6266
6267         high_mode = get_highest_refresh_rate_mode(aconnector, false);
6268         if (!high_mode || !mode)
6269                 return false;
6270
6271         timing_diff = high_mode->vtotal - mode->vtotal;
6272
6273         if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
6274             high_mode->hdisplay != mode->hdisplay ||
6275             high_mode->vdisplay != mode->vdisplay ||
6276             high_mode->hsync_start != mode->hsync_start ||
6277             high_mode->hsync_end != mode->hsync_end ||
6278             high_mode->htotal != mode->htotal ||
6279             high_mode->hskew != mode->hskew ||
6280             high_mode->vscan != mode->vscan ||
6281             high_mode->vsync_start - mode->vsync_start != timing_diff ||
6282             high_mode->vsync_end - mode->vsync_end != timing_diff)
6283                 return false;
6284         else
6285                 return true;
6286 }
6287
6288 static struct dc_stream_state *
6289 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6290                        const struct drm_display_mode *drm_mode,
6291                        const struct dm_connector_state *dm_state,
6292                        const struct dc_stream_state *old_stream,
6293                        int requested_bpc)
6294 {
6295         struct drm_display_mode *preferred_mode = NULL;
6296         struct drm_connector *drm_connector;
6297         const struct drm_connector_state *con_state =
6298                 dm_state ? &dm_state->base : NULL;
6299         struct dc_stream_state *stream = NULL;
6300         struct drm_display_mode mode = *drm_mode;
6301         struct drm_display_mode saved_mode;
6302         struct drm_display_mode *freesync_mode = NULL;
6303         bool native_mode_found = false;
6304         bool recalculate_timing = false;
6305         bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
6306         int mode_refresh;
6307         int preferred_refresh = 0;
6308 #if defined(CONFIG_DRM_AMD_DC_DCN)
6309         struct dsc_dec_dpcd_caps dsc_caps;
6310 #endif
6311         struct dc_sink *sink = NULL;
6312
6313         memset(&saved_mode, 0, sizeof(saved_mode));
6314
6315         if (aconnector == NULL) {
6316                 DRM_ERROR("aconnector is NULL!\n");
6317                 return stream;
6318         }
6319
6320         drm_connector = &aconnector->base;
6321
6322         if (!aconnector->dc_sink) {
6323                 sink = create_fake_sink(aconnector);
6324                 if (!sink)
6325                         return stream;
6326         } else {
6327                 sink = aconnector->dc_sink;
6328                 dc_sink_retain(sink);
6329         }
6330
6331         stream = dc_create_stream_for_sink(sink);
6332
6333         if (stream == NULL) {
6334                 DRM_ERROR("Failed to create stream for sink!\n");
6335                 goto finish;
6336         }
6337
6338         stream->dm_stream_context = aconnector;
6339
6340         stream->timing.flags.LTE_340MCSC_SCRAMBLE =
6341                 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
6342
6343         list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
6344                 /* Search for preferred mode */
6345                 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
6346                         native_mode_found = true;
6347                         break;
6348                 }
6349         }
6350         if (!native_mode_found)
6351                 preferred_mode = list_first_entry_or_null(
6352                                 &aconnector->base.modes,
6353                                 struct drm_display_mode,
6354                                 head);
6355
6356         mode_refresh = drm_mode_vrefresh(&mode);
6357
6358         if (preferred_mode == NULL) {
6359                 /*
6360                  * This may not be an error, the use case is when we have no
6361                  * usermode calls to reset and set mode upon hotplug. In this
6362                  * case, we call set mode ourselves to restore the previous mode
6363                  * and the modelist may not be filled in in time.
6364                  */
6365                 DRM_DEBUG_DRIVER("No preferred mode found\n");
6366         } else {
6367                 recalculate_timing = amdgpu_freesync_vid_mode &&
6368                                  is_freesync_video_mode(&mode, aconnector);
6369                 if (recalculate_timing) {
6370                         freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
6371                         saved_mode = mode;
6372                         mode = *freesync_mode;
6373                 } else {
6374                         decide_crtc_timing_for_drm_display_mode(
6375                                 &mode, preferred_mode, scale);
6376
6377                         preferred_refresh = drm_mode_vrefresh(preferred_mode);
6378                 }
6379         }
6380
6381         if (recalculate_timing)
6382                 drm_mode_set_crtcinfo(&saved_mode, 0);
6383         else if (!dm_state)
6384                 drm_mode_set_crtcinfo(&mode, 0);
6385
6386        /*
6387         * If scaling is enabled and refresh rate didn't change
6388         * we copy the vic and polarities of the old timings
6389         */
6390         if (!scale || mode_refresh != preferred_refresh)
6391                 fill_stream_properties_from_drm_display_mode(
6392                         stream, &mode, &aconnector->base, con_state, NULL,
6393                         requested_bpc);
6394         else
6395                 fill_stream_properties_from_drm_display_mode(
6396                         stream, &mode, &aconnector->base, con_state, old_stream,
6397                         requested_bpc);
6398
6399 #if defined(CONFIG_DRM_AMD_DC_DCN)
6400         /* SST DSC determination policy */
6401         update_dsc_caps(aconnector, sink, stream, &dsc_caps);
6402         if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
6403                 apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
6404 #endif
6405
6406         update_stream_scaling_settings(&mode, dm_state, stream);
6407
6408         fill_audio_info(
6409                 &stream->audio_info,
6410                 drm_connector,
6411                 sink);
6412
6413         update_stream_signal(stream, sink);
6414
6415         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
6416                 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
6417
6418         if (stream->link->psr_settings.psr_feature_enabled) {
6419                 //
6420                 // should decide stream support vsc sdp colorimetry capability
6421                 // before building vsc info packet
6422                 //
6423                 stream->use_vsc_sdp_for_colorimetry = false;
6424                 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
6425                         stream->use_vsc_sdp_for_colorimetry =
6426                                 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
6427                 } else {
6428                         if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
6429                                 stream->use_vsc_sdp_for_colorimetry = true;
6430                 }
6431                 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
6432                 aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
6433
6434         }
6435 finish:
6436         dc_sink_release(sink);
6437
6438         return stream;
6439 }
6440
6441 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
6442 {
6443         drm_crtc_cleanup(crtc);
6444         kfree(crtc);
6445 }
6446
6447 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
6448                                   struct drm_crtc_state *state)
6449 {
6450         struct dm_crtc_state *cur = to_dm_crtc_state(state);
6451
6452         /* TODO Destroy dc_stream objects are stream object is flattened */
6453         if (cur->stream)
6454                 dc_stream_release(cur->stream);
6455
6456
6457         __drm_atomic_helper_crtc_destroy_state(state);
6458
6459
6460         kfree(state);
6461 }
6462
6463 static void dm_crtc_reset_state(struct drm_crtc *crtc)
6464 {
6465         struct dm_crtc_state *state;
6466
6467         if (crtc->state)
6468                 dm_crtc_destroy_state(crtc, crtc->state);
6469
6470         state = kzalloc(sizeof(*state), GFP_KERNEL);
6471         if (WARN_ON(!state))
6472                 return;
6473
6474         __drm_atomic_helper_crtc_reset(crtc, &state->base);
6475 }
6476
6477 static struct drm_crtc_state *
6478 dm_crtc_duplicate_state(struct drm_crtc *crtc)
6479 {
6480         struct dm_crtc_state *state, *cur;
6481
6482         cur = to_dm_crtc_state(crtc->state);
6483
6484         if (WARN_ON(!crtc->state))
6485                 return NULL;
6486
6487         state = kzalloc(sizeof(*state), GFP_KERNEL);
6488         if (!state)
6489                 return NULL;
6490
6491         __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
6492
6493         if (cur->stream) {
6494                 state->stream = cur->stream;
6495                 dc_stream_retain(state->stream);
6496         }
6497
6498         state->active_planes = cur->active_planes;
6499         state->vrr_infopacket = cur->vrr_infopacket;
6500         state->abm_level = cur->abm_level;
6501         state->vrr_supported = cur->vrr_supported;
6502         state->freesync_config = cur->freesync_config;
6503         state->cm_has_degamma = cur->cm_has_degamma;
6504         state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
6505         state->force_dpms_off = cur->force_dpms_off;
6506         /* TODO Duplicate dc_stream after objects are stream object is flattened */
6507
6508         return &state->base;
6509 }
6510
6511 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
6512 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
6513 {
6514         crtc_debugfs_init(crtc);
6515
6516         return 0;
6517 }
6518 #endif
6519
6520 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
6521 {
6522         enum dc_irq_source irq_source;
6523         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6524         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6525         int rc;
6526
6527         irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
6528
6529         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
6530
6531         DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
6532                       acrtc->crtc_id, enable ? "en" : "dis", rc);
6533         return rc;
6534 }
6535
6536 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
6537 {
6538         enum dc_irq_source irq_source;
6539         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6540         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6541         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
6542 #if defined(CONFIG_DRM_AMD_DC_DCN)
6543         struct amdgpu_display_manager *dm = &adev->dm;
6544         struct vblank_control_work *work;
6545 #endif
6546         int rc = 0;
6547
6548         if (enable) {
6549                 /* vblank irq on -> Only need vupdate irq in vrr mode */
6550                 if (amdgpu_dm_vrr_active(acrtc_state))
6551                         rc = dm_set_vupdate_irq(crtc, true);
6552         } else {
6553                 /* vblank irq off -> vupdate irq off */
6554                 rc = dm_set_vupdate_irq(crtc, false);
6555         }
6556
6557         if (rc)
6558                 return rc;
6559
6560         irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
6561
6562         if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
6563                 return -EBUSY;
6564
6565         if (amdgpu_in_reset(adev))
6566                 return 0;
6567
6568 #if defined(CONFIG_DRM_AMD_DC_DCN)
6569         if (dm->vblank_control_workqueue) {
6570                 work = kzalloc(sizeof(*work), GFP_ATOMIC);
6571                 if (!work)
6572                         return -ENOMEM;
6573
6574                 INIT_WORK(&work->work, vblank_control_worker);
6575                 work->dm = dm;
6576                 work->acrtc = acrtc;
6577                 work->enable = enable;
6578
6579                 if (acrtc_state->stream) {
6580                         dc_stream_retain(acrtc_state->stream);
6581                         work->stream = acrtc_state->stream;
6582                 }
6583
6584                 queue_work(dm->vblank_control_workqueue, &work->work);
6585         }
6586 #endif
6587
6588         return 0;
6589 }
6590
6591 static int dm_enable_vblank(struct drm_crtc *crtc)
6592 {
6593         return dm_set_vblank(crtc, true);
6594 }
6595
6596 static void dm_disable_vblank(struct drm_crtc *crtc)
6597 {
6598         dm_set_vblank(crtc, false);
6599 }
6600
6601 /* Implemented only the options currently availible for the driver */
6602 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
6603         .reset = dm_crtc_reset_state,
6604         .destroy = amdgpu_dm_crtc_destroy,
6605         .set_config = drm_atomic_helper_set_config,
6606         .page_flip = drm_atomic_helper_page_flip,
6607         .atomic_duplicate_state = dm_crtc_duplicate_state,
6608         .atomic_destroy_state = dm_crtc_destroy_state,
6609         .set_crc_source = amdgpu_dm_crtc_set_crc_source,
6610         .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
6611         .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
6612         .get_vblank_counter = amdgpu_get_vblank_counter_kms,
6613         .enable_vblank = dm_enable_vblank,
6614         .disable_vblank = dm_disable_vblank,
6615         .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
6616 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
6617         .late_register = amdgpu_dm_crtc_late_register,
6618 #endif
6619 };
6620
6621 static enum drm_connector_status
6622 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6623 {
6624         bool connected;
6625         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6626
6627         /*
6628          * Notes:
6629          * 1. This interface is NOT called in context of HPD irq.
6630          * 2. This interface *is called* in context of user-mode ioctl. Which
6631          * makes it a bad place for *any* MST-related activity.
6632          */
6633
6634         if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6635             !aconnector->fake_enable)
6636                 connected = (aconnector->dc_sink != NULL);
6637         else
6638                 connected = (aconnector->base.force == DRM_FORCE_ON);
6639
6640         update_subconnector_property(aconnector);
6641
6642         return (connected ? connector_status_connected :
6643                         connector_status_disconnected);
6644 }
6645
6646 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
6647                                             struct drm_connector_state *connector_state,
6648                                             struct drm_property *property,
6649                                             uint64_t val)
6650 {
6651         struct drm_device *dev = connector->dev;
6652         struct amdgpu_device *adev = drm_to_adev(dev);
6653         struct dm_connector_state *dm_old_state =
6654                 to_dm_connector_state(connector->state);
6655         struct dm_connector_state *dm_new_state =
6656                 to_dm_connector_state(connector_state);
6657
6658         int ret = -EINVAL;
6659
6660         if (property == dev->mode_config.scaling_mode_property) {
6661                 enum amdgpu_rmx_type rmx_type;
6662
6663                 switch (val) {
6664                 case DRM_MODE_SCALE_CENTER:
6665                         rmx_type = RMX_CENTER;
6666                         break;
6667                 case DRM_MODE_SCALE_ASPECT:
6668                         rmx_type = RMX_ASPECT;
6669                         break;
6670                 case DRM_MODE_SCALE_FULLSCREEN:
6671                         rmx_type = RMX_FULL;
6672                         break;
6673                 case DRM_MODE_SCALE_NONE:
6674                 default:
6675                         rmx_type = RMX_OFF;
6676                         break;
6677                 }
6678
6679                 if (dm_old_state->scaling == rmx_type)
6680                         return 0;
6681
6682                 dm_new_state->scaling = rmx_type;
6683                 ret = 0;
6684         } else if (property == adev->mode_info.underscan_hborder_property) {
6685                 dm_new_state->underscan_hborder = val;
6686                 ret = 0;
6687         } else if (property == adev->mode_info.underscan_vborder_property) {
6688                 dm_new_state->underscan_vborder = val;
6689                 ret = 0;
6690         } else if (property == adev->mode_info.underscan_property) {
6691                 dm_new_state->underscan_enable = val;
6692                 ret = 0;
6693         } else if (property == adev->mode_info.abm_level_property) {
6694                 dm_new_state->abm_level = val;
6695                 ret = 0;
6696         }
6697
6698         return ret;
6699 }
6700
6701 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6702                                             const struct drm_connector_state *state,
6703                                             struct drm_property *property,
6704                                             uint64_t *val)
6705 {
6706         struct drm_device *dev = connector->dev;
6707         struct amdgpu_device *adev = drm_to_adev(dev);
6708         struct dm_connector_state *dm_state =
6709                 to_dm_connector_state(state);
6710         int ret = -EINVAL;
6711
6712         if (property == dev->mode_config.scaling_mode_property) {
6713                 switch (dm_state->scaling) {
6714                 case RMX_CENTER:
6715                         *val = DRM_MODE_SCALE_CENTER;
6716                         break;
6717                 case RMX_ASPECT:
6718                         *val = DRM_MODE_SCALE_ASPECT;
6719                         break;
6720                 case RMX_FULL:
6721                         *val = DRM_MODE_SCALE_FULLSCREEN;
6722                         break;
6723                 case RMX_OFF:
6724                 default:
6725                         *val = DRM_MODE_SCALE_NONE;
6726                         break;
6727                 }
6728                 ret = 0;
6729         } else if (property == adev->mode_info.underscan_hborder_property) {
6730                 *val = dm_state->underscan_hborder;
6731                 ret = 0;
6732         } else if (property == adev->mode_info.underscan_vborder_property) {
6733                 *val = dm_state->underscan_vborder;
6734                 ret = 0;
6735         } else if (property == adev->mode_info.underscan_property) {
6736                 *val = dm_state->underscan_enable;
6737                 ret = 0;
6738         } else if (property == adev->mode_info.abm_level_property) {
6739                 *val = dm_state->abm_level;
6740                 ret = 0;
6741         }
6742
6743         return ret;
6744 }
6745
6746 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6747 {
6748         struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6749
6750         drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6751 }
6752
6753 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
6754 {
6755         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6756         const struct dc_link *link = aconnector->dc_link;
6757         struct amdgpu_device *adev = drm_to_adev(connector->dev);
6758         struct amdgpu_display_manager *dm = &adev->dm;
6759         int i;
6760
6761         /*
6762          * Call only if mst_mgr was iniitalized before since it's not done
6763          * for all connector types.
6764          */
6765         if (aconnector->mst_mgr.dev)
6766                 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6767
6768 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6769         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
6770         for (i = 0; i < dm->num_of_edps; i++) {
6771                 if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
6772                         backlight_device_unregister(dm->backlight_dev[i]);
6773                         dm->backlight_dev[i] = NULL;
6774                 }
6775         }
6776 #endif
6777
6778         if (aconnector->dc_em_sink)
6779                 dc_sink_release(aconnector->dc_em_sink);
6780         aconnector->dc_em_sink = NULL;
6781         if (aconnector->dc_sink)
6782                 dc_sink_release(aconnector->dc_sink);
6783         aconnector->dc_sink = NULL;
6784
6785         drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
6786         drm_connector_unregister(connector);
6787         drm_connector_cleanup(connector);
6788         if (aconnector->i2c) {
6789                 i2c_del_adapter(&aconnector->i2c->base);
6790                 kfree(aconnector->i2c);
6791         }
6792         kfree(aconnector->dm_dp_aux.aux.name);
6793
6794         kfree(connector);
6795 }
6796
6797 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6798 {
6799         struct dm_connector_state *state =
6800                 to_dm_connector_state(connector->state);
6801
6802         if (connector->state)
6803                 __drm_atomic_helper_connector_destroy_state(connector->state);
6804
6805         kfree(state);
6806
6807         state = kzalloc(sizeof(*state), GFP_KERNEL);
6808
6809         if (state) {
6810                 state->scaling = RMX_OFF;
6811                 state->underscan_enable = false;
6812                 state->underscan_hborder = 0;
6813                 state->underscan_vborder = 0;
6814                 state->base.max_requested_bpc = 8;
6815                 state->vcpi_slots = 0;
6816                 state->pbn = 0;
6817                 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6818                         state->abm_level = amdgpu_dm_abm_level;
6819
6820                 __drm_atomic_helper_connector_reset(connector, &state->base);
6821         }
6822 }
6823
6824 struct drm_connector_state *
6825 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6826 {
6827         struct dm_connector_state *state =
6828                 to_dm_connector_state(connector->state);
6829
6830         struct dm_connector_state *new_state =
6831                         kmemdup(state, sizeof(*state), GFP_KERNEL);
6832
6833         if (!new_state)
6834                 return NULL;
6835
6836         __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6837
6838         new_state->freesync_capable = state->freesync_capable;
6839         new_state->abm_level = state->abm_level;
6840         new_state->scaling = state->scaling;
6841         new_state->underscan_enable = state->underscan_enable;
6842         new_state->underscan_hborder = state->underscan_hborder;
6843         new_state->underscan_vborder = state->underscan_vborder;
6844         new_state->vcpi_slots = state->vcpi_slots;
6845         new_state->pbn = state->pbn;
6846         return &new_state->base;
6847 }
6848
6849 static int
6850 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6851 {
6852         struct amdgpu_dm_connector *amdgpu_dm_connector =
6853                 to_amdgpu_dm_connector(connector);
6854         int r;
6855
6856         if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6857             (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6858                 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6859                 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6860                 if (r)
6861                         return r;
6862         }
6863
6864 #if defined(CONFIG_DEBUG_FS)
6865         connector_debugfs_init(amdgpu_dm_connector);
6866 #endif
6867
6868         return 0;
6869 }
6870
6871 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6872         .reset = amdgpu_dm_connector_funcs_reset,
6873         .detect = amdgpu_dm_connector_detect,
6874         .fill_modes = drm_helper_probe_single_connector_modes,
6875         .destroy = amdgpu_dm_connector_destroy,
6876         .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6877         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6878         .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6879         .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6880         .late_register = amdgpu_dm_connector_late_register,
6881         .early_unregister = amdgpu_dm_connector_unregister
6882 };
6883
6884 static int get_modes(struct drm_connector *connector)
6885 {
6886         return amdgpu_dm_connector_get_modes(connector);
6887 }
6888
6889 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6890 {
6891         struct dc_sink_init_data init_params = {
6892                         .link = aconnector->dc_link,
6893                         .sink_signal = SIGNAL_TYPE_VIRTUAL
6894         };
6895         struct edid *edid;
6896
6897         if (!aconnector->base.edid_blob_ptr) {
6898                 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6899                                 aconnector->base.name);
6900
6901                 aconnector->base.force = DRM_FORCE_OFF;
6902                 aconnector->base.override_edid = false;
6903                 return;
6904         }
6905
6906         edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6907
6908         aconnector->edid = edid;
6909
6910         aconnector->dc_em_sink = dc_link_add_remote_sink(
6911                 aconnector->dc_link,
6912                 (uint8_t *)edid,
6913                 (edid->extensions + 1) * EDID_LENGTH,
6914                 &init_params);
6915
6916         if (aconnector->base.force == DRM_FORCE_ON) {
6917                 aconnector->dc_sink = aconnector->dc_link->local_sink ?
6918                 aconnector->dc_link->local_sink :
6919                 aconnector->dc_em_sink;
6920                 dc_sink_retain(aconnector->dc_sink);
6921         }
6922 }
6923
6924 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
6925 {
6926         struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6927
6928         /*
6929          * In case of headless boot with force on for DP managed connector
6930          * Those settings have to be != 0 to get initial modeset
6931          */
6932         if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6933                 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6934                 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6935         }
6936
6937
6938         aconnector->base.override_edid = true;
6939         create_eml_sink(aconnector);
6940 }
6941
6942 static struct dc_stream_state *
6943 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6944                                 const struct drm_display_mode *drm_mode,
6945                                 const struct dm_connector_state *dm_state,
6946                                 const struct dc_stream_state *old_stream)
6947 {
6948         struct drm_connector *connector = &aconnector->base;
6949         struct amdgpu_device *adev = drm_to_adev(connector->dev);
6950         struct dc_stream_state *stream;
6951         const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6952         int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
6953         enum dc_status dc_result = DC_OK;
6954
6955         do {
6956                 stream = create_stream_for_sink(aconnector, drm_mode,
6957                                                 dm_state, old_stream,
6958                                                 requested_bpc);
6959                 if (stream == NULL) {
6960                         DRM_ERROR("Failed to create stream for sink!\n");
6961                         break;
6962                 }
6963
6964                 dc_result = dc_validate_stream(adev->dm.dc, stream);
6965
6966                 if (dc_result != DC_OK) {
6967                         DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
6968                                       drm_mode->hdisplay,
6969                                       drm_mode->vdisplay,
6970                                       drm_mode->clock,
6971                                       dc_result,
6972                                       dc_status_to_str(dc_result));
6973
6974                         dc_stream_release(stream);
6975                         stream = NULL;
6976                         requested_bpc -= 2; /* lower bpc to retry validation */
6977                 }
6978
6979         } while (stream == NULL && requested_bpc >= 6);
6980
6981         if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
6982                 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
6983
6984                 aconnector->force_yuv420_output = true;
6985                 stream = create_validate_stream_for_sink(aconnector, drm_mode,
6986                                                 dm_state, old_stream);
6987                 aconnector->force_yuv420_output = false;
6988         }
6989
6990         return stream;
6991 }
6992
6993 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
6994                                    struct drm_display_mode *mode)
6995 {
6996         int result = MODE_ERROR;
6997         struct dc_sink *dc_sink;
6998         /* TODO: Unhardcode stream count */
6999         struct dc_stream_state *stream;
7000         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7001
7002         if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
7003                         (mode->flags & DRM_MODE_FLAG_DBLSCAN))
7004                 return result;
7005
7006         /*
7007          * Only run this the first time mode_valid is called to initilialize
7008          * EDID mgmt
7009          */
7010         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
7011                 !aconnector->dc_em_sink)
7012                 handle_edid_mgmt(aconnector);
7013
7014         dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
7015
7016         if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
7017                                 aconnector->base.force != DRM_FORCE_ON) {
7018                 DRM_ERROR("dc_sink is NULL!\n");
7019                 goto fail;
7020         }
7021
7022         stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
7023         if (stream) {
7024                 dc_stream_release(stream);
7025                 result = MODE_OK;
7026         }
7027
7028 fail:
7029         /* TODO: error handling*/
7030         return result;
7031 }
7032
7033 static int fill_hdr_info_packet(const struct drm_connector_state *state,
7034                                 struct dc_info_packet *out)
7035 {
7036         struct hdmi_drm_infoframe frame;
7037         unsigned char buf[30]; /* 26 + 4 */
7038         ssize_t len;
7039         int ret, i;
7040
7041         memset(out, 0, sizeof(*out));
7042
7043         if (!state->hdr_output_metadata)
7044                 return 0;
7045
7046         ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
7047         if (ret)
7048                 return ret;
7049
7050         len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
7051         if (len < 0)
7052                 return (int)len;
7053
7054         /* Static metadata is a fixed 26 bytes + 4 byte header. */
7055         if (len != 30)
7056                 return -EINVAL;
7057
7058         /* Prepare the infopacket for DC. */
7059         switch (state->connector->connector_type) {
7060         case DRM_MODE_CONNECTOR_HDMIA:
7061                 out->hb0 = 0x87; /* type */
7062                 out->hb1 = 0x01; /* version */
7063                 out->hb2 = 0x1A; /* length */
7064                 out->sb[0] = buf[3]; /* checksum */
7065                 i = 1;
7066                 break;
7067
7068         case DRM_MODE_CONNECTOR_DisplayPort:
7069         case DRM_MODE_CONNECTOR_eDP:
7070                 out->hb0 = 0x00; /* sdp id, zero */
7071                 out->hb1 = 0x87; /* type */
7072                 out->hb2 = 0x1D; /* payload len - 1 */
7073                 out->hb3 = (0x13 << 2); /* sdp version */
7074                 out->sb[0] = 0x01; /* version */
7075                 out->sb[1] = 0x1A; /* length */
7076                 i = 2;
7077                 break;
7078
7079         default:
7080                 return -EINVAL;
7081         }
7082
7083         memcpy(&out->sb[i], &buf[4], 26);
7084         out->valid = true;
7085
7086         print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
7087                        sizeof(out->sb), false);
7088
7089         return 0;
7090 }
7091
7092 static int
7093 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
7094                                  struct drm_atomic_state *state)
7095 {
7096         struct drm_connector_state *new_con_state =
7097                 drm_atomic_get_new_connector_state(state, conn);
7098         struct drm_connector_state *old_con_state =
7099                 drm_atomic_get_old_connector_state(state, conn);
7100         struct drm_crtc *crtc = new_con_state->crtc;
7101         struct drm_crtc_state *new_crtc_state;
7102         int ret;
7103
7104         trace_amdgpu_dm_connector_atomic_check(new_con_state);
7105
7106         if (!crtc)
7107                 return 0;
7108
7109         if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
7110                 struct dc_info_packet hdr_infopacket;
7111
7112                 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
7113                 if (ret)
7114                         return ret;
7115
7116                 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
7117                 if (IS_ERR(new_crtc_state))
7118                         return PTR_ERR(new_crtc_state);
7119
7120                 /*
7121                  * DC considers the stream backends changed if the
7122                  * static metadata changes. Forcing the modeset also
7123                  * gives a simple way for userspace to switch from
7124                  * 8bpc to 10bpc when setting the metadata to enter
7125                  * or exit HDR.
7126                  *
7127                  * Changing the static metadata after it's been
7128                  * set is permissible, however. So only force a
7129                  * modeset if we're entering or exiting HDR.
7130                  */
7131                 new_crtc_state->mode_changed =
7132                         !old_con_state->hdr_output_metadata ||
7133                         !new_con_state->hdr_output_metadata;
7134         }
7135
7136         return 0;
7137 }
7138
7139 static const struct drm_connector_helper_funcs
7140 amdgpu_dm_connector_helper_funcs = {
7141         /*
7142          * If hotplugging a second bigger display in FB Con mode, bigger resolution
7143          * modes will be filtered by drm_mode_validate_size(), and those modes
7144          * are missing after user start lightdm. So we need to renew modes list.
7145          * in get_modes call back, not just return the modes count
7146          */
7147         .get_modes = get_modes,
7148         .mode_valid = amdgpu_dm_connector_mode_valid,
7149         .atomic_check = amdgpu_dm_connector_atomic_check,
7150 };
7151
7152 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
7153 {
7154 }
7155
7156 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
7157 {
7158         struct drm_atomic_state *state = new_crtc_state->state;
7159         struct drm_plane *plane;
7160         int num_active = 0;
7161
7162         drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
7163                 struct drm_plane_state *new_plane_state;
7164
7165                 /* Cursor planes are "fake". */
7166                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7167                         continue;
7168
7169                 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
7170
7171                 if (!new_plane_state) {
7172                         /*
7173                          * The plane is enable on the CRTC and hasn't changed
7174                          * state. This means that it previously passed
7175                          * validation and is therefore enabled.
7176                          */
7177                         num_active += 1;
7178                         continue;
7179                 }
7180
7181                 /* We need a framebuffer to be considered enabled. */
7182                 num_active += (new_plane_state->fb != NULL);
7183         }
7184
7185         return num_active;
7186 }
7187
7188 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
7189                                          struct drm_crtc_state *new_crtc_state)
7190 {
7191         struct dm_crtc_state *dm_new_crtc_state =
7192                 to_dm_crtc_state(new_crtc_state);
7193
7194         dm_new_crtc_state->active_planes = 0;
7195
7196         if (!dm_new_crtc_state->stream)
7197                 return;
7198
7199         dm_new_crtc_state->active_planes =
7200                 count_crtc_active_planes(new_crtc_state);
7201 }
7202
7203 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
7204                                        struct drm_atomic_state *state)
7205 {
7206         struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
7207                                                                           crtc);
7208         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
7209         struct dc *dc = adev->dm.dc;
7210         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
7211         int ret = -EINVAL;
7212
7213         trace_amdgpu_dm_crtc_atomic_check(crtc_state);
7214
7215         dm_update_crtc_active_planes(crtc, crtc_state);
7216
7217         if (WARN_ON(unlikely(!dm_crtc_state->stream &&
7218                      modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
7219                 return ret;
7220         }
7221
7222         /*
7223          * We require the primary plane to be enabled whenever the CRTC is, otherwise
7224          * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
7225          * planes are disabled, which is not supported by the hardware. And there is legacy
7226          * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
7227          */
7228         if (crtc_state->enable &&
7229             !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
7230                 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
7231                 return -EINVAL;
7232         }
7233
7234         /* In some use cases, like reset, no stream is attached */
7235         if (!dm_crtc_state->stream)
7236                 return 0;
7237
7238         if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
7239                 return 0;
7240
7241         DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
7242         return ret;
7243 }
7244
7245 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
7246                                       const struct drm_display_mode *mode,
7247                                       struct drm_display_mode *adjusted_mode)
7248 {
7249         return true;
7250 }
7251
7252 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
7253         .disable = dm_crtc_helper_disable,
7254         .atomic_check = dm_crtc_helper_atomic_check,
7255         .mode_fixup = dm_crtc_helper_mode_fixup,
7256         .get_scanout_position = amdgpu_crtc_get_scanout_position,
7257 };
7258
7259 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
7260 {
7261
7262 }
7263
7264 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
7265 {
7266         switch (display_color_depth) {
7267                 case COLOR_DEPTH_666:
7268                         return 6;
7269                 case COLOR_DEPTH_888:
7270                         return 8;
7271                 case COLOR_DEPTH_101010:
7272                         return 10;
7273                 case COLOR_DEPTH_121212:
7274                         return 12;
7275                 case COLOR_DEPTH_141414:
7276                         return 14;
7277                 case COLOR_DEPTH_161616:
7278                         return 16;
7279                 default:
7280                         break;
7281                 }
7282         return 0;
7283 }
7284
7285 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
7286                                           struct drm_crtc_state *crtc_state,
7287                                           struct drm_connector_state *conn_state)
7288 {
7289         struct drm_atomic_state *state = crtc_state->state;
7290         struct drm_connector *connector = conn_state->connector;
7291         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7292         struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
7293         const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
7294         struct drm_dp_mst_topology_mgr *mst_mgr;
7295         struct drm_dp_mst_port *mst_port;
7296         enum dc_color_depth color_depth;
7297         int clock, bpp = 0;
7298         bool is_y420 = false;
7299
7300         if (!aconnector->port || !aconnector->dc_sink)
7301                 return 0;
7302
7303         mst_port = aconnector->port;
7304         mst_mgr = &aconnector->mst_port->mst_mgr;
7305
7306         if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
7307                 return 0;
7308
7309         if (!state->duplicated) {
7310                 int max_bpc = conn_state->max_requested_bpc;
7311                 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
7312                                 aconnector->force_yuv420_output;
7313                 color_depth = convert_color_depth_from_display_info(connector,
7314                                                                     is_y420,
7315                                                                     max_bpc);
7316                 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
7317                 clock = adjusted_mode->clock;
7318                 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
7319         }
7320         dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
7321                                                                            mst_mgr,
7322                                                                            mst_port,
7323                                                                            dm_new_connector_state->pbn,
7324                                                                            dm_mst_get_pbn_divider(aconnector->dc_link));
7325         if (dm_new_connector_state->vcpi_slots < 0) {
7326                 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
7327                 return dm_new_connector_state->vcpi_slots;
7328         }
7329         return 0;
7330 }
7331
7332 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
7333         .disable = dm_encoder_helper_disable,
7334         .atomic_check = dm_encoder_helper_atomic_check
7335 };
7336
7337 #if defined(CONFIG_DRM_AMD_DC_DCN)
7338 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
7339                                             struct dc_state *dc_state,
7340                                             struct dsc_mst_fairness_vars *vars)
7341 {
7342         struct dc_stream_state *stream = NULL;
7343         struct drm_connector *connector;
7344         struct drm_connector_state *new_con_state;
7345         struct amdgpu_dm_connector *aconnector;
7346         struct dm_connector_state *dm_conn_state;
7347         int i, j;
7348         int vcpi, pbn_div, pbn, slot_num = 0;
7349
7350         for_each_new_connector_in_state(state, connector, new_con_state, i) {
7351
7352                 aconnector = to_amdgpu_dm_connector(connector);
7353
7354                 if (!aconnector->port)
7355                         continue;
7356
7357                 if (!new_con_state || !new_con_state->crtc)
7358                         continue;
7359
7360                 dm_conn_state = to_dm_connector_state(new_con_state);
7361
7362                 for (j = 0; j < dc_state->stream_count; j++) {
7363                         stream = dc_state->streams[j];
7364                         if (!stream)
7365                                 continue;
7366
7367                         if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
7368                                 break;
7369
7370                         stream = NULL;
7371                 }
7372
7373                 if (!stream)
7374                         continue;
7375
7376                 pbn_div = dm_mst_get_pbn_divider(stream->link);
7377                 /* pbn is calculated by compute_mst_dsc_configs_for_state*/
7378                 for (j = 0; j < dc_state->stream_count; j++) {
7379                         if (vars[j].aconnector == aconnector) {
7380                                 pbn = vars[j].pbn;
7381                                 break;
7382                         }
7383                 }
7384
7385                 if (j == dc_state->stream_count)
7386                         continue;
7387
7388                 slot_num = DIV_ROUND_UP(pbn, pbn_div);
7389
7390                 if (stream->timing.flags.DSC != 1) {
7391                         dm_conn_state->pbn = pbn;
7392                         dm_conn_state->vcpi_slots = slot_num;
7393
7394                         drm_dp_mst_atomic_enable_dsc(state,
7395                                                      aconnector->port,
7396                                                      dm_conn_state->pbn,
7397                                                      0,
7398                                                      false);
7399                         continue;
7400                 }
7401
7402                 vcpi = drm_dp_mst_atomic_enable_dsc(state,
7403                                                     aconnector->port,
7404                                                     pbn, pbn_div,
7405                                                     true);
7406                 if (vcpi < 0)
7407                         return vcpi;
7408
7409                 dm_conn_state->pbn = pbn;
7410                 dm_conn_state->vcpi_slots = vcpi;
7411         }
7412         return 0;
7413 }
7414 #endif
7415
7416 static void dm_drm_plane_reset(struct drm_plane *plane)
7417 {
7418         struct dm_plane_state *amdgpu_state = NULL;
7419
7420         if (plane->state)
7421                 plane->funcs->atomic_destroy_state(plane, plane->state);
7422
7423         amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
7424         WARN_ON(amdgpu_state == NULL);
7425
7426         if (amdgpu_state)
7427                 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
7428 }
7429
7430 static struct drm_plane_state *
7431 dm_drm_plane_duplicate_state(struct drm_plane *plane)
7432 {
7433         struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
7434
7435         old_dm_plane_state = to_dm_plane_state(plane->state);
7436         dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
7437         if (!dm_plane_state)
7438                 return NULL;
7439
7440         __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
7441
7442         if (old_dm_plane_state->dc_state) {
7443                 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
7444                 dc_plane_state_retain(dm_plane_state->dc_state);
7445         }
7446
7447         return &dm_plane_state->base;
7448 }
7449
7450 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
7451                                 struct drm_plane_state *state)
7452 {
7453         struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
7454
7455         if (dm_plane_state->dc_state)
7456                 dc_plane_state_release(dm_plane_state->dc_state);
7457
7458         drm_atomic_helper_plane_destroy_state(plane, state);
7459 }
7460
7461 static const struct drm_plane_funcs dm_plane_funcs = {
7462         .update_plane   = drm_atomic_helper_update_plane,
7463         .disable_plane  = drm_atomic_helper_disable_plane,
7464         .destroy        = drm_primary_helper_destroy,
7465         .reset = dm_drm_plane_reset,
7466         .atomic_duplicate_state = dm_drm_plane_duplicate_state,
7467         .atomic_destroy_state = dm_drm_plane_destroy_state,
7468         .format_mod_supported = dm_plane_format_mod_supported,
7469 };
7470
7471 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
7472                                       struct drm_plane_state *new_state)
7473 {
7474         struct amdgpu_framebuffer *afb;
7475         struct drm_gem_object *obj;
7476         struct amdgpu_device *adev;
7477         struct amdgpu_bo *rbo;
7478         struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
7479         struct list_head list;
7480         struct ttm_validate_buffer tv;
7481         struct ww_acquire_ctx ticket;
7482         uint32_t domain;
7483         int r;
7484
7485         if (!new_state->fb) {
7486                 DRM_DEBUG_KMS("No FB bound\n");
7487                 return 0;
7488         }
7489
7490         afb = to_amdgpu_framebuffer(new_state->fb);
7491         obj = new_state->fb->obj[0];
7492         rbo = gem_to_amdgpu_bo(obj);
7493         adev = amdgpu_ttm_adev(rbo->tbo.bdev);
7494         INIT_LIST_HEAD(&list);
7495
7496         tv.bo = &rbo->tbo;
7497         tv.num_shared = 1;
7498         list_add(&tv.head, &list);
7499
7500         r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
7501         if (r) {
7502                 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
7503                 return r;
7504         }
7505
7506         if (plane->type != DRM_PLANE_TYPE_CURSOR)
7507                 domain = amdgpu_display_supported_domains(adev, rbo->flags);
7508         else
7509                 domain = AMDGPU_GEM_DOMAIN_VRAM;
7510
7511         r = amdgpu_bo_pin(rbo, domain);
7512         if (unlikely(r != 0)) {
7513                 if (r != -ERESTARTSYS)
7514                         DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
7515                 ttm_eu_backoff_reservation(&ticket, &list);
7516                 return r;
7517         }
7518
7519         r = amdgpu_ttm_alloc_gart(&rbo->tbo);
7520         if (unlikely(r != 0)) {
7521                 amdgpu_bo_unpin(rbo);
7522                 ttm_eu_backoff_reservation(&ticket, &list);
7523                 DRM_ERROR("%p bind failed\n", rbo);
7524                 return r;
7525         }
7526
7527         ttm_eu_backoff_reservation(&ticket, &list);
7528
7529         afb->address = amdgpu_bo_gpu_offset(rbo);
7530
7531         amdgpu_bo_ref(rbo);
7532
7533         /**
7534          * We don't do surface updates on planes that have been newly created,
7535          * but we also don't have the afb->address during atomic check.
7536          *
7537          * Fill in buffer attributes depending on the address here, but only on
7538          * newly created planes since they're not being used by DC yet and this
7539          * won't modify global state.
7540          */
7541         dm_plane_state_old = to_dm_plane_state(plane->state);
7542         dm_plane_state_new = to_dm_plane_state(new_state);
7543
7544         if (dm_plane_state_new->dc_state &&
7545             dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
7546                 struct dc_plane_state *plane_state =
7547                         dm_plane_state_new->dc_state;
7548                 bool force_disable_dcc = !plane_state->dcc.enable;
7549
7550                 fill_plane_buffer_attributes(
7551                         adev, afb, plane_state->format, plane_state->rotation,
7552                         afb->tiling_flags,
7553                         &plane_state->tiling_info, &plane_state->plane_size,
7554                         &plane_state->dcc, &plane_state->address,
7555                         afb->tmz_surface, force_disable_dcc);
7556         }
7557
7558         return 0;
7559 }
7560
7561 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
7562                                        struct drm_plane_state *old_state)
7563 {
7564         struct amdgpu_bo *rbo;
7565         int r;
7566
7567         if (!old_state->fb)
7568                 return;
7569
7570         rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
7571         r = amdgpu_bo_reserve(rbo, false);
7572         if (unlikely(r)) {
7573                 DRM_ERROR("failed to reserve rbo before unpin\n");
7574                 return;
7575         }
7576
7577         amdgpu_bo_unpin(rbo);
7578         amdgpu_bo_unreserve(rbo);
7579         amdgpu_bo_unref(&rbo);
7580 }
7581
7582 static int dm_plane_helper_check_state(struct drm_plane_state *state,
7583                                        struct drm_crtc_state *new_crtc_state)
7584 {
7585         struct drm_framebuffer *fb = state->fb;
7586         int min_downscale, max_upscale;
7587         int min_scale = 0;
7588         int max_scale = INT_MAX;
7589
7590         /* Plane enabled? Validate viewport and get scaling factors from plane caps. */
7591         if (fb && state->crtc) {
7592                 /* Validate viewport to cover the case when only the position changes */
7593                 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
7594                         int viewport_width = state->crtc_w;
7595                         int viewport_height = state->crtc_h;
7596
7597                         if (state->crtc_x < 0)
7598                                 viewport_width += state->crtc_x;
7599                         else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
7600                                 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
7601
7602                         if (state->crtc_y < 0)
7603                                 viewport_height += state->crtc_y;
7604                         else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
7605                                 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
7606
7607                         if (viewport_width < 0 || viewport_height < 0) {
7608                                 DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
7609                                 return -EINVAL;
7610                         } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
7611                                 DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
7612                                 return -EINVAL;
7613                         } else if (viewport_height < MIN_VIEWPORT_SIZE) {
7614                                 DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
7615                                 return -EINVAL;
7616                         }
7617
7618                 }
7619
7620                 /* Get min/max allowed scaling factors from plane caps. */
7621                 get_min_max_dc_plane_scaling(state->crtc->dev, fb,
7622                                              &min_downscale, &max_upscale);
7623                 /*
7624                  * Convert to drm convention: 16.16 fixed point, instead of dc's
7625                  * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
7626                  * dst/src, so min_scale = 1.0 / max_upscale, etc.
7627                  */
7628                 min_scale = (1000 << 16) / max_upscale;
7629                 max_scale = (1000 << 16) / min_downscale;
7630         }
7631
7632         return drm_atomic_helper_check_plane_state(
7633                 state, new_crtc_state, min_scale, max_scale, true, true);
7634 }
7635
7636 static int dm_plane_atomic_check(struct drm_plane *plane,
7637                                  struct drm_atomic_state *state)
7638 {
7639         struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
7640                                                                                  plane);
7641         struct amdgpu_device *adev = drm_to_adev(plane->dev);
7642         struct dc *dc = adev->dm.dc;
7643         struct dm_plane_state *dm_plane_state;
7644         struct dc_scaling_info scaling_info;
7645         struct drm_crtc_state *new_crtc_state;
7646         int ret;
7647
7648         trace_amdgpu_dm_plane_atomic_check(new_plane_state);
7649
7650         dm_plane_state = to_dm_plane_state(new_plane_state);
7651
7652         if (!dm_plane_state->dc_state)
7653                 return 0;
7654
7655         new_crtc_state =
7656                 drm_atomic_get_new_crtc_state(state,
7657                                               new_plane_state->crtc);
7658         if (!new_crtc_state)
7659                 return -EINVAL;
7660
7661         ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
7662         if (ret)
7663                 return ret;
7664
7665         ret = fill_dc_scaling_info(adev, new_plane_state, &scaling_info);
7666         if (ret)
7667                 return ret;
7668
7669         if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
7670                 return 0;
7671
7672         return -EINVAL;
7673 }
7674
7675 static int dm_plane_atomic_async_check(struct drm_plane *plane,
7676                                        struct drm_atomic_state *state)
7677 {
7678         /* Only support async updates on cursor planes. */
7679         if (plane->type != DRM_PLANE_TYPE_CURSOR)
7680                 return -EINVAL;
7681
7682         return 0;
7683 }
7684
7685 static void dm_plane_atomic_async_update(struct drm_plane *plane,
7686                                          struct drm_atomic_state *state)
7687 {
7688         struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
7689                                                                            plane);
7690         struct drm_plane_state *old_state =
7691                 drm_atomic_get_old_plane_state(state, plane);
7692
7693         trace_amdgpu_dm_atomic_update_cursor(new_state);
7694
7695         swap(plane->state->fb, new_state->fb);
7696
7697         plane->state->src_x = new_state->src_x;
7698         plane->state->src_y = new_state->src_y;
7699         plane->state->src_w = new_state->src_w;
7700         plane->state->src_h = new_state->src_h;
7701         plane->state->crtc_x = new_state->crtc_x;
7702         plane->state->crtc_y = new_state->crtc_y;
7703         plane->state->crtc_w = new_state->crtc_w;
7704         plane->state->crtc_h = new_state->crtc_h;
7705
7706         handle_cursor_update(plane, old_state);
7707 }
7708
7709 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7710         .prepare_fb = dm_plane_helper_prepare_fb,
7711         .cleanup_fb = dm_plane_helper_cleanup_fb,
7712         .atomic_check = dm_plane_atomic_check,
7713         .atomic_async_check = dm_plane_atomic_async_check,
7714         .atomic_async_update = dm_plane_atomic_async_update
7715 };
7716
7717 /*
7718  * TODO: these are currently initialized to rgb formats only.
7719  * For future use cases we should either initialize them dynamically based on
7720  * plane capabilities, or initialize this array to all formats, so internal drm
7721  * check will succeed, and let DC implement proper check
7722  */
7723 static const uint32_t rgb_formats[] = {
7724         DRM_FORMAT_XRGB8888,
7725         DRM_FORMAT_ARGB8888,
7726         DRM_FORMAT_RGBA8888,
7727         DRM_FORMAT_XRGB2101010,
7728         DRM_FORMAT_XBGR2101010,
7729         DRM_FORMAT_ARGB2101010,
7730         DRM_FORMAT_ABGR2101010,
7731         DRM_FORMAT_XRGB16161616,
7732         DRM_FORMAT_XBGR16161616,
7733         DRM_FORMAT_ARGB16161616,
7734         DRM_FORMAT_ABGR16161616,
7735         DRM_FORMAT_XBGR8888,
7736         DRM_FORMAT_ABGR8888,
7737         DRM_FORMAT_RGB565,
7738 };
7739
7740 static const uint32_t overlay_formats[] = {
7741         DRM_FORMAT_XRGB8888,
7742         DRM_FORMAT_ARGB8888,
7743         DRM_FORMAT_RGBA8888,
7744         DRM_FORMAT_XBGR8888,
7745         DRM_FORMAT_ABGR8888,
7746         DRM_FORMAT_RGB565
7747 };
7748
7749 static const u32 cursor_formats[] = {
7750         DRM_FORMAT_ARGB8888
7751 };
7752
7753 static int get_plane_formats(const struct drm_plane *plane,
7754                              const struct dc_plane_cap *plane_cap,
7755                              uint32_t *formats, int max_formats)
7756 {
7757         int i, num_formats = 0;
7758
7759         /*
7760          * TODO: Query support for each group of formats directly from
7761          * DC plane caps. This will require adding more formats to the
7762          * caps list.
7763          */
7764
7765         switch (plane->type) {
7766         case DRM_PLANE_TYPE_PRIMARY:
7767                 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
7768                         if (num_formats >= max_formats)
7769                                 break;
7770
7771                         formats[num_formats++] = rgb_formats[i];
7772                 }
7773
7774                 if (plane_cap && plane_cap->pixel_format_support.nv12)
7775                         formats[num_formats++] = DRM_FORMAT_NV12;
7776                 if (plane_cap && plane_cap->pixel_format_support.p010)
7777                         formats[num_formats++] = DRM_FORMAT_P010;
7778                 if (plane_cap && plane_cap->pixel_format_support.fp16) {
7779                         formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7780                         formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
7781                         formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7782                         formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
7783                 }
7784                 break;
7785
7786         case DRM_PLANE_TYPE_OVERLAY:
7787                 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7788                         if (num_formats >= max_formats)
7789                                 break;
7790
7791                         formats[num_formats++] = overlay_formats[i];
7792                 }
7793                 break;
7794
7795         case DRM_PLANE_TYPE_CURSOR:
7796                 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7797                         if (num_formats >= max_formats)
7798                                 break;
7799
7800                         formats[num_formats++] = cursor_formats[i];
7801                 }
7802                 break;
7803         }
7804
7805         return num_formats;
7806 }
7807
7808 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7809                                 struct drm_plane *plane,
7810                                 unsigned long possible_crtcs,
7811                                 const struct dc_plane_cap *plane_cap)
7812 {
7813         uint32_t formats[32];
7814         int num_formats;
7815         int res = -EPERM;
7816         unsigned int supported_rotations;
7817         uint64_t *modifiers = NULL;
7818
7819         num_formats = get_plane_formats(plane, plane_cap, formats,
7820                                         ARRAY_SIZE(formats));
7821
7822         res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7823         if (res)
7824                 return res;
7825
7826         res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
7827                                        &dm_plane_funcs, formats, num_formats,
7828                                        modifiers, plane->type, NULL);
7829         kfree(modifiers);
7830         if (res)
7831                 return res;
7832
7833         if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7834             plane_cap && plane_cap->per_pixel_alpha) {
7835                 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7836                                           BIT(DRM_MODE_BLEND_PREMULTI);
7837
7838                 drm_plane_create_alpha_property(plane);
7839                 drm_plane_create_blend_mode_property(plane, blend_caps);
7840         }
7841
7842         if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
7843             plane_cap &&
7844             (plane_cap->pixel_format_support.nv12 ||
7845              plane_cap->pixel_format_support.p010)) {
7846                 /* This only affects YUV formats. */
7847                 drm_plane_create_color_properties(
7848                         plane,
7849                         BIT(DRM_COLOR_YCBCR_BT601) |
7850                         BIT(DRM_COLOR_YCBCR_BT709) |
7851                         BIT(DRM_COLOR_YCBCR_BT2020),
7852                         BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7853                         BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7854                         DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7855         }
7856
7857         supported_rotations =
7858                 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7859                 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7860
7861         if (dm->adev->asic_type >= CHIP_BONAIRE &&
7862             plane->type != DRM_PLANE_TYPE_CURSOR)
7863                 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7864                                                    supported_rotations);
7865
7866         drm_plane_helper_add(plane, &dm_plane_helper_funcs);
7867
7868         /* Create (reset) the plane state */
7869         if (plane->funcs->reset)
7870                 plane->funcs->reset(plane);
7871
7872         return 0;
7873 }
7874
7875 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7876                                struct drm_plane *plane,
7877                                uint32_t crtc_index)
7878 {
7879         struct amdgpu_crtc *acrtc = NULL;
7880         struct drm_plane *cursor_plane;
7881
7882         int res = -ENOMEM;
7883
7884         cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7885         if (!cursor_plane)
7886                 goto fail;
7887
7888         cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
7889         res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
7890
7891         acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7892         if (!acrtc)
7893                 goto fail;
7894
7895         res = drm_crtc_init_with_planes(
7896                         dm->ddev,
7897                         &acrtc->base,
7898                         plane,
7899                         cursor_plane,
7900                         &amdgpu_dm_crtc_funcs, NULL);
7901
7902         if (res)
7903                 goto fail;
7904
7905         drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
7906
7907         /* Create (reset) the plane state */
7908         if (acrtc->base.funcs->reset)
7909                 acrtc->base.funcs->reset(&acrtc->base);
7910
7911         acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
7912         acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
7913
7914         acrtc->crtc_id = crtc_index;
7915         acrtc->base.enabled = false;
7916         acrtc->otg_inst = -1;
7917
7918         dm->adev->mode_info.crtcs[crtc_index] = acrtc;
7919         drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
7920                                    true, MAX_COLOR_LUT_ENTRIES);
7921         drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
7922
7923         return 0;
7924
7925 fail:
7926         kfree(acrtc);
7927         kfree(cursor_plane);
7928         return res;
7929 }
7930
7931
7932 static int to_drm_connector_type(enum signal_type st)
7933 {
7934         switch (st) {
7935         case SIGNAL_TYPE_HDMI_TYPE_A:
7936                 return DRM_MODE_CONNECTOR_HDMIA;
7937         case SIGNAL_TYPE_EDP:
7938                 return DRM_MODE_CONNECTOR_eDP;
7939         case SIGNAL_TYPE_LVDS:
7940                 return DRM_MODE_CONNECTOR_LVDS;
7941         case SIGNAL_TYPE_RGB:
7942                 return DRM_MODE_CONNECTOR_VGA;
7943         case SIGNAL_TYPE_DISPLAY_PORT:
7944         case SIGNAL_TYPE_DISPLAY_PORT_MST:
7945                 return DRM_MODE_CONNECTOR_DisplayPort;
7946         case SIGNAL_TYPE_DVI_DUAL_LINK:
7947         case SIGNAL_TYPE_DVI_SINGLE_LINK:
7948                 return DRM_MODE_CONNECTOR_DVID;
7949         case SIGNAL_TYPE_VIRTUAL:
7950                 return DRM_MODE_CONNECTOR_VIRTUAL;
7951
7952         default:
7953                 return DRM_MODE_CONNECTOR_Unknown;
7954         }
7955 }
7956
7957 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
7958 {
7959         struct drm_encoder *encoder;
7960
7961         /* There is only one encoder per connector */
7962         drm_connector_for_each_possible_encoder(connector, encoder)
7963                 return encoder;
7964
7965         return NULL;
7966 }
7967
7968 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
7969 {
7970         struct drm_encoder *encoder;
7971         struct amdgpu_encoder *amdgpu_encoder;
7972
7973         encoder = amdgpu_dm_connector_to_encoder(connector);
7974
7975         if (encoder == NULL)
7976                 return;
7977
7978         amdgpu_encoder = to_amdgpu_encoder(encoder);
7979
7980         amdgpu_encoder->native_mode.clock = 0;
7981
7982         if (!list_empty(&connector->probed_modes)) {
7983                 struct drm_display_mode *preferred_mode = NULL;
7984
7985                 list_for_each_entry(preferred_mode,
7986                                     &connector->probed_modes,
7987                                     head) {
7988                         if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
7989                                 amdgpu_encoder->native_mode = *preferred_mode;
7990
7991                         break;
7992                 }
7993
7994         }
7995 }
7996
7997 static struct drm_display_mode *
7998 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
7999                              char *name,
8000                              int hdisplay, int vdisplay)
8001 {
8002         struct drm_device *dev = encoder->dev;
8003         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8004         struct drm_display_mode *mode = NULL;
8005         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
8006
8007         mode = drm_mode_duplicate(dev, native_mode);
8008
8009         if (mode == NULL)
8010                 return NULL;
8011
8012         mode->hdisplay = hdisplay;
8013         mode->vdisplay = vdisplay;
8014         mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8015         strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
8016
8017         return mode;
8018
8019 }
8020
8021 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
8022                                                  struct drm_connector *connector)
8023 {
8024         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8025         struct drm_display_mode *mode = NULL;
8026         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
8027         struct amdgpu_dm_connector *amdgpu_dm_connector =
8028                                 to_amdgpu_dm_connector(connector);
8029         int i;
8030         int n;
8031         struct mode_size {
8032                 char name[DRM_DISPLAY_MODE_LEN];
8033                 int w;
8034                 int h;
8035         } common_modes[] = {
8036                 {  "640x480",  640,  480},
8037                 {  "800x600",  800,  600},
8038                 { "1024x768", 1024,  768},
8039                 { "1280x720", 1280,  720},
8040                 { "1280x800", 1280,  800},
8041                 {"1280x1024", 1280, 1024},
8042                 { "1440x900", 1440,  900},
8043                 {"1680x1050", 1680, 1050},
8044                 {"1600x1200", 1600, 1200},
8045                 {"1920x1080", 1920, 1080},
8046                 {"1920x1200", 1920, 1200}
8047         };
8048
8049         n = ARRAY_SIZE(common_modes);
8050
8051         for (i = 0; i < n; i++) {
8052                 struct drm_display_mode *curmode = NULL;
8053                 bool mode_existed = false;
8054
8055                 if (common_modes[i].w > native_mode->hdisplay ||
8056                     common_modes[i].h > native_mode->vdisplay ||
8057                    (common_modes[i].w == native_mode->hdisplay &&
8058                     common_modes[i].h == native_mode->vdisplay))
8059                         continue;
8060
8061                 list_for_each_entry(curmode, &connector->probed_modes, head) {
8062                         if (common_modes[i].w == curmode->hdisplay &&
8063                             common_modes[i].h == curmode->vdisplay) {
8064                                 mode_existed = true;
8065                                 break;
8066                         }
8067                 }
8068
8069                 if (mode_existed)
8070                         continue;
8071
8072                 mode = amdgpu_dm_create_common_mode(encoder,
8073                                 common_modes[i].name, common_modes[i].w,
8074                                 common_modes[i].h);
8075                 drm_mode_probed_add(connector, mode);
8076                 amdgpu_dm_connector->num_modes++;
8077         }
8078 }
8079
8080 static void amdgpu_set_panel_orientation(struct drm_connector *connector)
8081 {
8082         struct drm_encoder *encoder;
8083         struct amdgpu_encoder *amdgpu_encoder;
8084         const struct drm_display_mode *native_mode;
8085
8086         if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
8087             connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
8088                 return;
8089
8090         encoder = amdgpu_dm_connector_to_encoder(connector);
8091         if (!encoder)
8092                 return;
8093
8094         amdgpu_encoder = to_amdgpu_encoder(encoder);
8095
8096         native_mode = &amdgpu_encoder->native_mode;
8097         if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
8098                 return;
8099
8100         drm_connector_set_panel_orientation_with_quirk(connector,
8101                                                        DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
8102                                                        native_mode->hdisplay,
8103                                                        native_mode->vdisplay);
8104 }
8105
8106 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
8107                                               struct edid *edid)
8108 {
8109         struct amdgpu_dm_connector *amdgpu_dm_connector =
8110                         to_amdgpu_dm_connector(connector);
8111
8112         if (edid) {
8113                 /* empty probed_modes */
8114                 INIT_LIST_HEAD(&connector->probed_modes);
8115                 amdgpu_dm_connector->num_modes =
8116                                 drm_add_edid_modes(connector, edid);
8117
8118                 /* sorting the probed modes before calling function
8119                  * amdgpu_dm_get_native_mode() since EDID can have
8120                  * more than one preferred mode. The modes that are
8121                  * later in the probed mode list could be of higher
8122                  * and preferred resolution. For example, 3840x2160
8123                  * resolution in base EDID preferred timing and 4096x2160
8124                  * preferred resolution in DID extension block later.
8125                  */
8126                 drm_mode_sort(&connector->probed_modes);
8127                 amdgpu_dm_get_native_mode(connector);
8128
8129                 /* Freesync capabilities are reset by calling
8130                  * drm_add_edid_modes() and need to be
8131                  * restored here.
8132                  */
8133                 amdgpu_dm_update_freesync_caps(connector, edid);
8134
8135                 amdgpu_set_panel_orientation(connector);
8136         } else {
8137                 amdgpu_dm_connector->num_modes = 0;
8138         }
8139 }
8140
8141 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
8142                               struct drm_display_mode *mode)
8143 {
8144         struct drm_display_mode *m;
8145
8146         list_for_each_entry (m, &aconnector->base.probed_modes, head) {
8147                 if (drm_mode_equal(m, mode))
8148                         return true;
8149         }
8150
8151         return false;
8152 }
8153
8154 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
8155 {
8156         const struct drm_display_mode *m;
8157         struct drm_display_mode *new_mode;
8158         uint i;
8159         uint32_t new_modes_count = 0;
8160
8161         /* Standard FPS values
8162          *
8163          * 23.976       - TV/NTSC
8164          * 24           - Cinema
8165          * 25           - TV/PAL
8166          * 29.97        - TV/NTSC
8167          * 30           - TV/NTSC
8168          * 48           - Cinema HFR
8169          * 50           - TV/PAL
8170          * 60           - Commonly used
8171          * 48,72,96,120 - Multiples of 24
8172          */
8173         static const uint32_t common_rates[] = {
8174                 23976, 24000, 25000, 29970, 30000,
8175                 48000, 50000, 60000, 72000, 96000, 120000
8176         };
8177
8178         /*
8179          * Find mode with highest refresh rate with the same resolution
8180          * as the preferred mode. Some monitors report a preferred mode
8181          * with lower resolution than the highest refresh rate supported.
8182          */
8183
8184         m = get_highest_refresh_rate_mode(aconnector, true);
8185         if (!m)
8186                 return 0;
8187
8188         for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
8189                 uint64_t target_vtotal, target_vtotal_diff;
8190                 uint64_t num, den;
8191
8192                 if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
8193                         continue;
8194
8195                 if (common_rates[i] < aconnector->min_vfreq * 1000 ||
8196                     common_rates[i] > aconnector->max_vfreq * 1000)
8197                         continue;
8198
8199                 num = (unsigned long long)m->clock * 1000 * 1000;
8200                 den = common_rates[i] * (unsigned long long)m->htotal;
8201                 target_vtotal = div_u64(num, den);
8202                 target_vtotal_diff = target_vtotal - m->vtotal;
8203
8204                 /* Check for illegal modes */
8205                 if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
8206                     m->vsync_end + target_vtotal_diff < m->vsync_start ||
8207                     m->vtotal + target_vtotal_diff < m->vsync_end)
8208                         continue;
8209
8210                 new_mode = drm_mode_duplicate(aconnector->base.dev, m);
8211                 if (!new_mode)
8212                         goto out;
8213
8214                 new_mode->vtotal += (u16)target_vtotal_diff;
8215                 new_mode->vsync_start += (u16)target_vtotal_diff;
8216                 new_mode->vsync_end += (u16)target_vtotal_diff;
8217                 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8218                 new_mode->type |= DRM_MODE_TYPE_DRIVER;
8219
8220                 if (!is_duplicate_mode(aconnector, new_mode)) {
8221                         drm_mode_probed_add(&aconnector->base, new_mode);
8222                         new_modes_count += 1;
8223                 } else
8224                         drm_mode_destroy(aconnector->base.dev, new_mode);
8225         }
8226  out:
8227         return new_modes_count;
8228 }
8229
8230 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
8231                                                    struct edid *edid)
8232 {
8233         struct amdgpu_dm_connector *amdgpu_dm_connector =
8234                 to_amdgpu_dm_connector(connector);
8235
8236         if (!(amdgpu_freesync_vid_mode && edid))
8237                 return;
8238
8239         if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
8240                 amdgpu_dm_connector->num_modes +=
8241                         add_fs_modes(amdgpu_dm_connector);
8242 }
8243
8244 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
8245 {
8246         struct amdgpu_dm_connector *amdgpu_dm_connector =
8247                         to_amdgpu_dm_connector(connector);
8248         struct drm_encoder *encoder;
8249         struct edid *edid = amdgpu_dm_connector->edid;
8250
8251         encoder = amdgpu_dm_connector_to_encoder(connector);
8252
8253         if (!drm_edid_is_valid(edid)) {
8254                 amdgpu_dm_connector->num_modes =
8255                                 drm_add_modes_noedid(connector, 640, 480);
8256         } else {
8257                 amdgpu_dm_connector_ddc_get_modes(connector, edid);
8258                 amdgpu_dm_connector_add_common_modes(encoder, connector);
8259                 amdgpu_dm_connector_add_freesync_modes(connector, edid);
8260         }
8261         amdgpu_dm_fbc_init(connector);
8262
8263         return amdgpu_dm_connector->num_modes;
8264 }
8265
8266 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
8267                                      struct amdgpu_dm_connector *aconnector,
8268                                      int connector_type,
8269                                      struct dc_link *link,
8270                                      int link_index)
8271 {
8272         struct amdgpu_device *adev = drm_to_adev(dm->ddev);
8273
8274         /*
8275          * Some of the properties below require access to state, like bpc.
8276          * Allocate some default initial connector state with our reset helper.
8277          */
8278         if (aconnector->base.funcs->reset)
8279                 aconnector->base.funcs->reset(&aconnector->base);
8280
8281         aconnector->connector_id = link_index;
8282         aconnector->dc_link = link;
8283         aconnector->base.interlace_allowed = false;
8284         aconnector->base.doublescan_allowed = false;
8285         aconnector->base.stereo_allowed = false;
8286         aconnector->base.dpms = DRM_MODE_DPMS_OFF;
8287         aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
8288         aconnector->audio_inst = -1;
8289         mutex_init(&aconnector->hpd_lock);
8290
8291         /*
8292          * configure support HPD hot plug connector_>polled default value is 0
8293          * which means HPD hot plug not supported
8294          */
8295         switch (connector_type) {
8296         case DRM_MODE_CONNECTOR_HDMIA:
8297                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8298                 aconnector->base.ycbcr_420_allowed =
8299                         link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
8300                 break;
8301         case DRM_MODE_CONNECTOR_DisplayPort:
8302                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8303                 if (link->is_dig_mapping_flexible &&
8304                     link->dc->res_pool->funcs->link_encs_assign) {
8305                         link->link_enc =
8306                                 link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link);
8307                         if (!link->link_enc)
8308                                 link->link_enc =
8309                                         link_enc_cfg_get_next_avail_link_enc(link->ctx->dc);
8310                 }
8311
8312                 if (link->link_enc)
8313                         aconnector->base.ycbcr_420_allowed =
8314                         link->link_enc->features.dp_ycbcr420_supported ? true : false;
8315                 break;
8316         case DRM_MODE_CONNECTOR_DVID:
8317                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8318                 break;
8319         default:
8320                 break;
8321         }
8322
8323         drm_object_attach_property(&aconnector->base.base,
8324                                 dm->ddev->mode_config.scaling_mode_property,
8325                                 DRM_MODE_SCALE_NONE);
8326
8327         drm_object_attach_property(&aconnector->base.base,
8328                                 adev->mode_info.underscan_property,
8329                                 UNDERSCAN_OFF);
8330         drm_object_attach_property(&aconnector->base.base,
8331                                 adev->mode_info.underscan_hborder_property,
8332                                 0);
8333         drm_object_attach_property(&aconnector->base.base,
8334                                 adev->mode_info.underscan_vborder_property,
8335                                 0);
8336
8337         if (!aconnector->mst_port)
8338                 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
8339
8340         /* This defaults to the max in the range, but we want 8bpc for non-edp. */
8341         aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
8342         aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
8343
8344         if (connector_type == DRM_MODE_CONNECTOR_eDP &&
8345             (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
8346                 drm_object_attach_property(&aconnector->base.base,
8347                                 adev->mode_info.abm_level_property, 0);
8348         }
8349
8350         if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
8351             connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
8352             connector_type == DRM_MODE_CONNECTOR_eDP) {
8353                 drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
8354
8355                 if (!aconnector->mst_port)
8356                         drm_connector_attach_vrr_capable_property(&aconnector->base);
8357
8358 #ifdef CONFIG_DRM_AMD_DC_HDCP
8359                 if (adev->dm.hdcp_workqueue)
8360                         drm_connector_attach_content_protection_property(&aconnector->base, true);
8361 #endif
8362         }
8363 }
8364
8365 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
8366                               struct i2c_msg *msgs, int num)
8367 {
8368         struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
8369         struct ddc_service *ddc_service = i2c->ddc_service;
8370         struct i2c_command cmd;
8371         int i;
8372         int result = -EIO;
8373
8374         cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
8375
8376         if (!cmd.payloads)
8377                 return result;
8378
8379         cmd.number_of_payloads = num;
8380         cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
8381         cmd.speed = 100;
8382
8383         for (i = 0; i < num; i++) {
8384                 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
8385                 cmd.payloads[i].address = msgs[i].addr;
8386                 cmd.payloads[i].length = msgs[i].len;
8387                 cmd.payloads[i].data = msgs[i].buf;
8388         }
8389
8390         if (dc_submit_i2c(
8391                         ddc_service->ctx->dc,
8392                         ddc_service->ddc_pin->hw_info.ddc_channel,
8393                         &cmd))
8394                 result = num;
8395
8396         kfree(cmd.payloads);
8397         return result;
8398 }
8399
8400 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
8401 {
8402         return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
8403 }
8404
8405 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
8406         .master_xfer = amdgpu_dm_i2c_xfer,
8407         .functionality = amdgpu_dm_i2c_func,
8408 };
8409
8410 static struct amdgpu_i2c_adapter *
8411 create_i2c(struct ddc_service *ddc_service,
8412            int link_index,
8413            int *res)
8414 {
8415         struct amdgpu_device *adev = ddc_service->ctx->driver_context;
8416         struct amdgpu_i2c_adapter *i2c;
8417
8418         i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
8419         if (!i2c)
8420                 return NULL;
8421         i2c->base.owner = THIS_MODULE;
8422         i2c->base.class = I2C_CLASS_DDC;
8423         i2c->base.dev.parent = &adev->pdev->dev;
8424         i2c->base.algo = &amdgpu_dm_i2c_algo;
8425         snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
8426         i2c_set_adapdata(&i2c->base, i2c);
8427         i2c->ddc_service = ddc_service;
8428         if (i2c->ddc_service->ddc_pin)
8429                 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
8430
8431         return i2c;
8432 }
8433
8434
8435 /*
8436  * Note: this function assumes that dc_link_detect() was called for the
8437  * dc_link which will be represented by this aconnector.
8438  */
8439 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
8440                                     struct amdgpu_dm_connector *aconnector,
8441                                     uint32_t link_index,
8442                                     struct amdgpu_encoder *aencoder)
8443 {
8444         int res = 0;
8445         int connector_type;
8446         struct dc *dc = dm->dc;
8447         struct dc_link *link = dc_get_link_at_index(dc, link_index);
8448         struct amdgpu_i2c_adapter *i2c;
8449
8450         link->priv = aconnector;
8451
8452         DRM_DEBUG_DRIVER("%s()\n", __func__);
8453
8454         i2c = create_i2c(link->ddc, link->link_index, &res);
8455         if (!i2c) {
8456                 DRM_ERROR("Failed to create i2c adapter data\n");
8457                 return -ENOMEM;
8458         }
8459
8460         aconnector->i2c = i2c;
8461         res = i2c_add_adapter(&i2c->base);
8462
8463         if (res) {
8464                 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
8465                 goto out_free;
8466         }
8467
8468         connector_type = to_drm_connector_type(link->connector_signal);
8469
8470         res = drm_connector_init_with_ddc(
8471                         dm->ddev,
8472                         &aconnector->base,
8473                         &amdgpu_dm_connector_funcs,
8474                         connector_type,
8475                         &i2c->base);
8476
8477         if (res) {
8478                 DRM_ERROR("connector_init failed\n");
8479                 aconnector->connector_id = -1;
8480                 goto out_free;
8481         }
8482
8483         drm_connector_helper_add(
8484                         &aconnector->base,
8485                         &amdgpu_dm_connector_helper_funcs);
8486
8487         amdgpu_dm_connector_init_helper(
8488                 dm,
8489                 aconnector,
8490                 connector_type,
8491                 link,
8492                 link_index);
8493
8494         drm_connector_attach_encoder(
8495                 &aconnector->base, &aencoder->base);
8496
8497         if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
8498                 || connector_type == DRM_MODE_CONNECTOR_eDP)
8499                 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
8500
8501 out_free:
8502         if (res) {
8503                 kfree(i2c);
8504                 aconnector->i2c = NULL;
8505         }
8506         return res;
8507 }
8508
8509 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
8510 {
8511         switch (adev->mode_info.num_crtc) {
8512         case 1:
8513                 return 0x1;
8514         case 2:
8515                 return 0x3;
8516         case 3:
8517                 return 0x7;
8518         case 4:
8519                 return 0xf;
8520         case 5:
8521                 return 0x1f;
8522         case 6:
8523         default:
8524                 return 0x3f;
8525         }
8526 }
8527
8528 static int amdgpu_dm_encoder_init(struct drm_device *dev,
8529                                   struct amdgpu_encoder *aencoder,
8530                                   uint32_t link_index)
8531 {
8532         struct amdgpu_device *adev = drm_to_adev(dev);
8533
8534         int res = drm_encoder_init(dev,
8535                                    &aencoder->base,
8536                                    &amdgpu_dm_encoder_funcs,
8537                                    DRM_MODE_ENCODER_TMDS,
8538                                    NULL);
8539
8540         aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
8541
8542         if (!res)
8543                 aencoder->encoder_id = link_index;
8544         else
8545                 aencoder->encoder_id = -1;
8546
8547         drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
8548
8549         return res;
8550 }
8551
8552 static void manage_dm_interrupts(struct amdgpu_device *adev,
8553                                  struct amdgpu_crtc *acrtc,
8554                                  bool enable)
8555 {
8556         /*
8557          * We have no guarantee that the frontend index maps to the same
8558          * backend index - some even map to more than one.
8559          *
8560          * TODO: Use a different interrupt or check DC itself for the mapping.
8561          */
8562         int irq_type =
8563                 amdgpu_display_crtc_idx_to_irq_type(
8564                         adev,
8565                         acrtc->crtc_id);
8566
8567         if (enable) {
8568                 drm_crtc_vblank_on(&acrtc->base);
8569                 amdgpu_irq_get(
8570                         adev,
8571                         &adev->pageflip_irq,
8572                         irq_type);
8573 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8574                 amdgpu_irq_get(
8575                         adev,
8576                         &adev->vline0_irq,
8577                         irq_type);
8578 #endif
8579         } else {
8580 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8581                 amdgpu_irq_put(
8582                         adev,
8583                         &adev->vline0_irq,
8584                         irq_type);
8585 #endif
8586                 amdgpu_irq_put(
8587                         adev,
8588                         &adev->pageflip_irq,
8589                         irq_type);
8590                 drm_crtc_vblank_off(&acrtc->base);
8591         }
8592 }
8593
8594 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
8595                                       struct amdgpu_crtc *acrtc)
8596 {
8597         int irq_type =
8598                 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
8599
8600         /**
8601          * This reads the current state for the IRQ and force reapplies
8602          * the setting to hardware.
8603          */
8604         amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
8605 }
8606
8607 static bool
8608 is_scaling_state_different(const struct dm_connector_state *dm_state,
8609                            const struct dm_connector_state *old_dm_state)
8610 {
8611         if (dm_state->scaling != old_dm_state->scaling)
8612                 return true;
8613         if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
8614                 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
8615                         return true;
8616         } else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
8617                 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
8618                         return true;
8619         } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
8620                    dm_state->underscan_vborder != old_dm_state->underscan_vborder)
8621                 return true;
8622         return false;
8623 }
8624
8625 #ifdef CONFIG_DRM_AMD_DC_HDCP
8626 static bool is_content_protection_different(struct drm_connector_state *state,
8627                                             const struct drm_connector_state *old_state,
8628                                             const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
8629 {
8630         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8631         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
8632
8633         /* Handle: Type0/1 change */
8634         if (old_state->hdcp_content_type != state->hdcp_content_type &&
8635             state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
8636                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8637                 return true;
8638         }
8639
8640         /* CP is being re enabled, ignore this
8641          *
8642          * Handles:     ENABLED -> DESIRED
8643          */
8644         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
8645             state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8646                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
8647                 return false;
8648         }
8649
8650         /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
8651          *
8652          * Handles:     UNDESIRED -> ENABLED
8653          */
8654         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
8655             state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
8656                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8657
8658         /* Stream removed and re-enabled
8659          *
8660          * Can sometimes overlap with the HPD case,
8661          * thus set update_hdcp to false to avoid
8662          * setting HDCP multiple times.
8663          *
8664          * Handles:     DESIRED -> DESIRED (Special case)
8665          */
8666         if (!(old_state->crtc && old_state->crtc->enabled) &&
8667                 state->crtc && state->crtc->enabled &&
8668                 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8669                 dm_con_state->update_hdcp = false;
8670                 return true;
8671         }
8672
8673         /* Hot-plug, headless s3, dpms
8674          *
8675          * Only start HDCP if the display is connected/enabled.
8676          * update_hdcp flag will be set to false until the next
8677          * HPD comes in.
8678          *
8679          * Handles:     DESIRED -> DESIRED (Special case)
8680          */
8681         if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
8682             connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
8683                 dm_con_state->update_hdcp = false;
8684                 return true;
8685         }
8686
8687         /*
8688          * Handles:     UNDESIRED -> UNDESIRED
8689          *              DESIRED -> DESIRED
8690          *              ENABLED -> ENABLED
8691          */
8692         if (old_state->content_protection == state->content_protection)
8693                 return false;
8694
8695         /*
8696          * Handles:     UNDESIRED -> DESIRED
8697          *              DESIRED -> UNDESIRED
8698          *              ENABLED -> UNDESIRED
8699          */
8700         if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
8701                 return true;
8702
8703         /*
8704          * Handles:     DESIRED -> ENABLED
8705          */
8706         return false;
8707 }
8708
8709 #endif
8710 static void remove_stream(struct amdgpu_device *adev,
8711                           struct amdgpu_crtc *acrtc,
8712                           struct dc_stream_state *stream)
8713 {
8714         /* this is the update mode case */
8715
8716         acrtc->otg_inst = -1;
8717         acrtc->enabled = false;
8718 }
8719
8720 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
8721                                struct dc_cursor_position *position)
8722 {
8723         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8724         int x, y;
8725         int xorigin = 0, yorigin = 0;
8726
8727         if (!crtc || !plane->state->fb)
8728                 return 0;
8729
8730         if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
8731             (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
8732                 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
8733                           __func__,
8734                           plane->state->crtc_w,
8735                           plane->state->crtc_h);
8736                 return -EINVAL;
8737         }
8738
8739         x = plane->state->crtc_x;
8740         y = plane->state->crtc_y;
8741
8742         if (x <= -amdgpu_crtc->max_cursor_width ||
8743             y <= -amdgpu_crtc->max_cursor_height)
8744                 return 0;
8745
8746         if (x < 0) {
8747                 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
8748                 x = 0;
8749         }
8750         if (y < 0) {
8751                 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
8752                 y = 0;
8753         }
8754         position->enable = true;
8755         position->translate_by_source = true;
8756         position->x = x;
8757         position->y = y;
8758         position->x_hotspot = xorigin;
8759         position->y_hotspot = yorigin;
8760
8761         return 0;
8762 }
8763
8764 static void handle_cursor_update(struct drm_plane *plane,
8765                                  struct drm_plane_state *old_plane_state)
8766 {
8767         struct amdgpu_device *adev = drm_to_adev(plane->dev);
8768         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
8769         struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
8770         struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
8771         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8772         uint64_t address = afb ? afb->address : 0;
8773         struct dc_cursor_position position = {0};
8774         struct dc_cursor_attributes attributes;
8775         int ret;
8776
8777         if (!plane->state->fb && !old_plane_state->fb)
8778                 return;
8779
8780         DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
8781                       __func__,
8782                       amdgpu_crtc->crtc_id,
8783                       plane->state->crtc_w,
8784                       plane->state->crtc_h);
8785
8786         ret = get_cursor_position(plane, crtc, &position);
8787         if (ret)
8788                 return;
8789
8790         if (!position.enable) {
8791                 /* turn off cursor */
8792                 if (crtc_state && crtc_state->stream) {
8793                         mutex_lock(&adev->dm.dc_lock);
8794                         dc_stream_set_cursor_position(crtc_state->stream,
8795                                                       &position);
8796                         mutex_unlock(&adev->dm.dc_lock);
8797                 }
8798                 return;
8799         }
8800
8801         amdgpu_crtc->cursor_width = plane->state->crtc_w;
8802         amdgpu_crtc->cursor_height = plane->state->crtc_h;
8803
8804         memset(&attributes, 0, sizeof(attributes));
8805         attributes.address.high_part = upper_32_bits(address);
8806         attributes.address.low_part  = lower_32_bits(address);
8807         attributes.width             = plane->state->crtc_w;
8808         attributes.height            = plane->state->crtc_h;
8809         attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
8810         attributes.rotation_angle    = 0;
8811         attributes.attribute_flags.value = 0;
8812
8813         attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
8814
8815         if (crtc_state->stream) {
8816                 mutex_lock(&adev->dm.dc_lock);
8817                 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
8818                                                          &attributes))
8819                         DRM_ERROR("DC failed to set cursor attributes\n");
8820
8821                 if (!dc_stream_set_cursor_position(crtc_state->stream,
8822                                                    &position))
8823                         DRM_ERROR("DC failed to set cursor position\n");
8824                 mutex_unlock(&adev->dm.dc_lock);
8825         }
8826 }
8827
8828 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
8829 {
8830
8831         assert_spin_locked(&acrtc->base.dev->event_lock);
8832         WARN_ON(acrtc->event);
8833
8834         acrtc->event = acrtc->base.state->event;
8835
8836         /* Set the flip status */
8837         acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8838
8839         /* Mark this event as consumed */
8840         acrtc->base.state->event = NULL;
8841
8842         DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8843                      acrtc->crtc_id);
8844 }
8845
8846 static void update_freesync_state_on_stream(
8847         struct amdgpu_display_manager *dm,
8848         struct dm_crtc_state *new_crtc_state,
8849         struct dc_stream_state *new_stream,
8850         struct dc_plane_state *surface,
8851         u32 flip_timestamp_in_us)
8852 {
8853         struct mod_vrr_params vrr_params;
8854         struct dc_info_packet vrr_infopacket = {0};
8855         struct amdgpu_device *adev = dm->adev;
8856         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8857         unsigned long flags;
8858         bool pack_sdp_v1_3 = false;
8859
8860         if (!new_stream)
8861                 return;
8862
8863         /*
8864          * TODO: Determine why min/max totals and vrefresh can be 0 here.
8865          * For now it's sufficient to just guard against these conditions.
8866          */
8867
8868         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8869                 return;
8870
8871         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8872         vrr_params = acrtc->dm_irq_params.vrr_params;
8873
8874         if (surface) {
8875                 mod_freesync_handle_preflip(
8876                         dm->freesync_module,
8877                         surface,
8878                         new_stream,
8879                         flip_timestamp_in_us,
8880                         &vrr_params);
8881
8882                 if (adev->family < AMDGPU_FAMILY_AI &&
8883                     amdgpu_dm_vrr_active(new_crtc_state)) {
8884                         mod_freesync_handle_v_update(dm->freesync_module,
8885                                                      new_stream, &vrr_params);
8886
8887                         /* Need to call this before the frame ends. */
8888                         dc_stream_adjust_vmin_vmax(dm->dc,
8889                                                    new_crtc_state->stream,
8890                                                    &vrr_params.adjust);
8891                 }
8892         }
8893
8894         mod_freesync_build_vrr_infopacket(
8895                 dm->freesync_module,
8896                 new_stream,
8897                 &vrr_params,
8898                 PACKET_TYPE_VRR,
8899                 TRANSFER_FUNC_UNKNOWN,
8900                 &vrr_infopacket,
8901                 pack_sdp_v1_3);
8902
8903         new_crtc_state->freesync_timing_changed |=
8904                 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8905                         &vrr_params.adjust,
8906                         sizeof(vrr_params.adjust)) != 0);
8907
8908         new_crtc_state->freesync_vrr_info_changed |=
8909                 (memcmp(&new_crtc_state->vrr_infopacket,
8910                         &vrr_infopacket,
8911                         sizeof(vrr_infopacket)) != 0);
8912
8913         acrtc->dm_irq_params.vrr_params = vrr_params;
8914         new_crtc_state->vrr_infopacket = vrr_infopacket;
8915
8916         new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
8917         new_stream->vrr_infopacket = vrr_infopacket;
8918
8919         if (new_crtc_state->freesync_vrr_info_changed)
8920                 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
8921                               new_crtc_state->base.crtc->base.id,
8922                               (int)new_crtc_state->base.vrr_enabled,
8923                               (int)vrr_params.state);
8924
8925         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8926 }
8927
8928 static void update_stream_irq_parameters(
8929         struct amdgpu_display_manager *dm,
8930         struct dm_crtc_state *new_crtc_state)
8931 {
8932         struct dc_stream_state *new_stream = new_crtc_state->stream;
8933         struct mod_vrr_params vrr_params;
8934         struct mod_freesync_config config = new_crtc_state->freesync_config;
8935         struct amdgpu_device *adev = dm->adev;
8936         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8937         unsigned long flags;
8938
8939         if (!new_stream)
8940                 return;
8941
8942         /*
8943          * TODO: Determine why min/max totals and vrefresh can be 0 here.
8944          * For now it's sufficient to just guard against these conditions.
8945          */
8946         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8947                 return;
8948
8949         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8950         vrr_params = acrtc->dm_irq_params.vrr_params;
8951
8952         if (new_crtc_state->vrr_supported &&
8953             config.min_refresh_in_uhz &&
8954             config.max_refresh_in_uhz) {
8955                 /*
8956                  * if freesync compatible mode was set, config.state will be set
8957                  * in atomic check
8958                  */
8959                 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
8960                     (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
8961                      new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
8962                         vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
8963                         vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
8964                         vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
8965                         vrr_params.state = VRR_STATE_ACTIVE_FIXED;
8966                 } else {
8967                         config.state = new_crtc_state->base.vrr_enabled ?
8968                                                      VRR_STATE_ACTIVE_VARIABLE :
8969                                                      VRR_STATE_INACTIVE;
8970                 }
8971         } else {
8972                 config.state = VRR_STATE_UNSUPPORTED;
8973         }
8974
8975         mod_freesync_build_vrr_params(dm->freesync_module,
8976                                       new_stream,
8977                                       &config, &vrr_params);
8978
8979         new_crtc_state->freesync_timing_changed |=
8980                 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8981                         &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
8982
8983         new_crtc_state->freesync_config = config;
8984         /* Copy state for access from DM IRQ handler */
8985         acrtc->dm_irq_params.freesync_config = config;
8986         acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
8987         acrtc->dm_irq_params.vrr_params = vrr_params;
8988         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8989 }
8990
8991 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
8992                                             struct dm_crtc_state *new_state)
8993 {
8994         bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
8995         bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
8996
8997         if (!old_vrr_active && new_vrr_active) {
8998                 /* Transition VRR inactive -> active:
8999                  * While VRR is active, we must not disable vblank irq, as a
9000                  * reenable after disable would compute bogus vblank/pflip
9001                  * timestamps if it likely happened inside display front-porch.
9002                  *
9003                  * We also need vupdate irq for the actual core vblank handling
9004                  * at end of vblank.
9005                  */
9006                 dm_set_vupdate_irq(new_state->base.crtc, true);
9007                 drm_crtc_vblank_get(new_state->base.crtc);
9008                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
9009                                  __func__, new_state->base.crtc->base.id);
9010         } else if (old_vrr_active && !new_vrr_active) {
9011                 /* Transition VRR active -> inactive:
9012                  * Allow vblank irq disable again for fixed refresh rate.
9013                  */
9014                 dm_set_vupdate_irq(new_state->base.crtc, false);
9015                 drm_crtc_vblank_put(new_state->base.crtc);
9016                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
9017                                  __func__, new_state->base.crtc->base.id);
9018         }
9019 }
9020
9021 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
9022 {
9023         struct drm_plane *plane;
9024         struct drm_plane_state *old_plane_state;
9025         int i;
9026
9027         /*
9028          * TODO: Make this per-stream so we don't issue redundant updates for
9029          * commits with multiple streams.
9030          */
9031         for_each_old_plane_in_state(state, plane, old_plane_state, i)
9032                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9033                         handle_cursor_update(plane, old_plane_state);
9034 }
9035
9036 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
9037                                     struct dc_state *dc_state,
9038                                     struct drm_device *dev,
9039                                     struct amdgpu_display_manager *dm,
9040                                     struct drm_crtc *pcrtc,
9041                                     bool wait_for_vblank)
9042 {
9043         uint32_t i;
9044         uint64_t timestamp_ns;
9045         struct drm_plane *plane;
9046         struct drm_plane_state *old_plane_state, *new_plane_state;
9047         struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
9048         struct drm_crtc_state *new_pcrtc_state =
9049                         drm_atomic_get_new_crtc_state(state, pcrtc);
9050         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
9051         struct dm_crtc_state *dm_old_crtc_state =
9052                         to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
9053         int planes_count = 0, vpos, hpos;
9054         long r;
9055         unsigned long flags;
9056         struct amdgpu_bo *abo;
9057         uint32_t target_vblank, last_flip_vblank;
9058         bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
9059         bool pflip_present = false;
9060         struct {
9061                 struct dc_surface_update surface_updates[MAX_SURFACES];
9062                 struct dc_plane_info plane_infos[MAX_SURFACES];
9063                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
9064                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
9065                 struct dc_stream_update stream_update;
9066         } *bundle;
9067
9068         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
9069
9070         if (!bundle) {
9071                 dm_error("Failed to allocate update bundle\n");
9072                 goto cleanup;
9073         }
9074
9075         /*
9076          * Disable the cursor first if we're disabling all the planes.
9077          * It'll remain on the screen after the planes are re-enabled
9078          * if we don't.
9079          */
9080         if (acrtc_state->active_planes == 0)
9081                 amdgpu_dm_commit_cursors(state);
9082
9083         /* update planes when needed */
9084         for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9085                 struct drm_crtc *crtc = new_plane_state->crtc;
9086                 struct drm_crtc_state *new_crtc_state;
9087                 struct drm_framebuffer *fb = new_plane_state->fb;
9088                 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
9089                 bool plane_needs_flip;
9090                 struct dc_plane_state *dc_plane;
9091                 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
9092
9093                 /* Cursor plane is handled after stream updates */
9094                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9095                         continue;
9096
9097                 if (!fb || !crtc || pcrtc != crtc)
9098                         continue;
9099
9100                 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
9101                 if (!new_crtc_state->active)
9102                         continue;
9103
9104                 dc_plane = dm_new_plane_state->dc_state;
9105
9106                 bundle->surface_updates[planes_count].surface = dc_plane;
9107                 if (new_pcrtc_state->color_mgmt_changed) {
9108                         bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
9109                         bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
9110                         bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
9111                 }
9112
9113                 fill_dc_scaling_info(dm->adev, new_plane_state,
9114                                      &bundle->scaling_infos[planes_count]);
9115
9116                 bundle->surface_updates[planes_count].scaling_info =
9117                         &bundle->scaling_infos[planes_count];
9118
9119                 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
9120
9121                 pflip_present = pflip_present || plane_needs_flip;
9122
9123                 if (!plane_needs_flip) {
9124                         planes_count += 1;
9125                         continue;
9126                 }
9127
9128                 abo = gem_to_amdgpu_bo(fb->obj[0]);
9129
9130                 /*
9131                  * Wait for all fences on this FB. Do limited wait to avoid
9132                  * deadlock during GPU reset when this fence will not signal
9133                  * but we hold reservation lock for the BO.
9134                  */
9135                 r = dma_resv_wait_timeout(abo->tbo.base.resv, true, false,
9136                                           msecs_to_jiffies(5000));
9137                 if (unlikely(r <= 0))
9138                         DRM_ERROR("Waiting for fences timed out!");
9139
9140                 fill_dc_plane_info_and_addr(
9141                         dm->adev, new_plane_state,
9142                         afb->tiling_flags,
9143                         &bundle->plane_infos[planes_count],
9144                         &bundle->flip_addrs[planes_count].address,
9145                         afb->tmz_surface, false);
9146
9147                 DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
9148                                  new_plane_state->plane->index,
9149                                  bundle->plane_infos[planes_count].dcc.enable);
9150
9151                 bundle->surface_updates[planes_count].plane_info =
9152                         &bundle->plane_infos[planes_count];
9153
9154                 /*
9155                  * Only allow immediate flips for fast updates that don't
9156                  * change FB pitch, DCC state, rotation or mirroing.
9157                  */
9158                 bundle->flip_addrs[planes_count].flip_immediate =
9159                         crtc->state->async_flip &&
9160                         acrtc_state->update_type == UPDATE_TYPE_FAST;
9161
9162                 timestamp_ns = ktime_get_ns();
9163                 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
9164                 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
9165                 bundle->surface_updates[planes_count].surface = dc_plane;
9166
9167                 if (!bundle->surface_updates[planes_count].surface) {
9168                         DRM_ERROR("No surface for CRTC: id=%d\n",
9169                                         acrtc_attach->crtc_id);
9170                         continue;
9171                 }
9172
9173                 if (plane == pcrtc->primary)
9174                         update_freesync_state_on_stream(
9175                                 dm,
9176                                 acrtc_state,
9177                                 acrtc_state->stream,
9178                                 dc_plane,
9179                                 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
9180
9181                 DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
9182                                  __func__,
9183                                  bundle->flip_addrs[planes_count].address.grph.addr.high_part,
9184                                  bundle->flip_addrs[planes_count].address.grph.addr.low_part);
9185
9186                 planes_count += 1;
9187
9188         }
9189
9190         if (pflip_present) {
9191                 if (!vrr_active) {
9192                         /* Use old throttling in non-vrr fixed refresh rate mode
9193                          * to keep flip scheduling based on target vblank counts
9194                          * working in a backwards compatible way, e.g., for
9195                          * clients using the GLX_OML_sync_control extension or
9196                          * DRI3/Present extension with defined target_msc.
9197                          */
9198                         last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
9199                 }
9200                 else {
9201                         /* For variable refresh rate mode only:
9202                          * Get vblank of last completed flip to avoid > 1 vrr
9203                          * flips per video frame by use of throttling, but allow
9204                          * flip programming anywhere in the possibly large
9205                          * variable vrr vblank interval for fine-grained flip
9206                          * timing control and more opportunity to avoid stutter
9207                          * on late submission of flips.
9208                          */
9209                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9210                         last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
9211                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9212                 }
9213
9214                 target_vblank = last_flip_vblank + wait_for_vblank;
9215
9216                 /*
9217                  * Wait until we're out of the vertical blank period before the one
9218                  * targeted by the flip
9219                  */
9220                 while ((acrtc_attach->enabled &&
9221                         (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
9222                                                             0, &vpos, &hpos, NULL,
9223                                                             NULL, &pcrtc->hwmode)
9224                          & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
9225                         (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
9226                         (int)(target_vblank -
9227                           amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
9228                         usleep_range(1000, 1100);
9229                 }
9230
9231                 /**
9232                  * Prepare the flip event for the pageflip interrupt to handle.
9233                  *
9234                  * This only works in the case where we've already turned on the
9235                  * appropriate hardware blocks (eg. HUBP) so in the transition case
9236                  * from 0 -> n planes we have to skip a hardware generated event
9237                  * and rely on sending it from software.
9238                  */
9239                 if (acrtc_attach->base.state->event &&
9240                     acrtc_state->active_planes > 0 &&
9241                     !acrtc_state->force_dpms_off) {
9242                         drm_crtc_vblank_get(pcrtc);
9243
9244                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9245
9246                         WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
9247                         prepare_flip_isr(acrtc_attach);
9248
9249                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9250                 }
9251
9252                 if (acrtc_state->stream) {
9253                         if (acrtc_state->freesync_vrr_info_changed)
9254                                 bundle->stream_update.vrr_infopacket =
9255                                         &acrtc_state->stream->vrr_infopacket;
9256                 }
9257         }
9258
9259         /* Update the planes if changed or disable if we don't have any. */
9260         if ((planes_count || acrtc_state->active_planes == 0) &&
9261                 acrtc_state->stream) {
9262 #if defined(CONFIG_DRM_AMD_DC_DCN)
9263                 /*
9264                  * If PSR or idle optimizations are enabled then flush out
9265                  * any pending work before hardware programming.
9266                  */
9267                 if (dm->vblank_control_workqueue)
9268                         flush_workqueue(dm->vblank_control_workqueue);
9269 #endif
9270
9271                 bundle->stream_update.stream = acrtc_state->stream;
9272                 if (new_pcrtc_state->mode_changed) {
9273                         bundle->stream_update.src = acrtc_state->stream->src;
9274                         bundle->stream_update.dst = acrtc_state->stream->dst;
9275                 }
9276
9277                 if (new_pcrtc_state->color_mgmt_changed) {
9278                         /*
9279                          * TODO: This isn't fully correct since we've actually
9280                          * already modified the stream in place.
9281                          */
9282                         bundle->stream_update.gamut_remap =
9283                                 &acrtc_state->stream->gamut_remap_matrix;
9284                         bundle->stream_update.output_csc_transform =
9285                                 &acrtc_state->stream->csc_color_matrix;
9286                         bundle->stream_update.out_transfer_func =
9287                                 acrtc_state->stream->out_transfer_func;
9288                 }
9289
9290                 acrtc_state->stream->abm_level = acrtc_state->abm_level;
9291                 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
9292                         bundle->stream_update.abm_level = &acrtc_state->abm_level;
9293
9294                 /*
9295                  * If FreeSync state on the stream has changed then we need to
9296                  * re-adjust the min/max bounds now that DC doesn't handle this
9297                  * as part of commit.
9298                  */
9299                 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
9300                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9301                         dc_stream_adjust_vmin_vmax(
9302                                 dm->dc, acrtc_state->stream,
9303                                 &acrtc_attach->dm_irq_params.vrr_params.adjust);
9304                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9305                 }
9306                 mutex_lock(&dm->dc_lock);
9307                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9308                                 acrtc_state->stream->link->psr_settings.psr_allow_active)
9309                         amdgpu_dm_psr_disable(acrtc_state->stream);
9310
9311                 dc_commit_updates_for_stream(dm->dc,
9312                                                      bundle->surface_updates,
9313                                                      planes_count,
9314                                                      acrtc_state->stream,
9315                                                      &bundle->stream_update,
9316                                                      dc_state);
9317
9318                 /**
9319                  * Enable or disable the interrupts on the backend.
9320                  *
9321                  * Most pipes are put into power gating when unused.
9322                  *
9323                  * When power gating is enabled on a pipe we lose the
9324                  * interrupt enablement state when power gating is disabled.
9325                  *
9326                  * So we need to update the IRQ control state in hardware
9327                  * whenever the pipe turns on (since it could be previously
9328                  * power gated) or off (since some pipes can't be power gated
9329                  * on some ASICs).
9330                  */
9331                 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
9332                         dm_update_pflip_irq_state(drm_to_adev(dev),
9333                                                   acrtc_attach);
9334
9335                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9336                                 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
9337                                 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
9338                         amdgpu_dm_link_setup_psr(acrtc_state->stream);
9339
9340                 /* Decrement skip count when PSR is enabled and we're doing fast updates. */
9341                 if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
9342                     acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
9343                         struct amdgpu_dm_connector *aconn =
9344                                 (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
9345
9346                         if (aconn->psr_skip_count > 0)
9347                                 aconn->psr_skip_count--;
9348
9349                         /* Allow PSR when skip count is 0. */
9350                         acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
9351                 } else {
9352                         acrtc_attach->dm_irq_params.allow_psr_entry = false;
9353                 }
9354
9355                 mutex_unlock(&dm->dc_lock);
9356         }
9357
9358         /*
9359          * Update cursor state *after* programming all the planes.
9360          * This avoids redundant programming in the case where we're going
9361          * to be disabling a single plane - those pipes are being disabled.
9362          */
9363         if (acrtc_state->active_planes)
9364                 amdgpu_dm_commit_cursors(state);
9365
9366 cleanup:
9367         kfree(bundle);
9368 }
9369
9370 static void amdgpu_dm_commit_audio(struct drm_device *dev,
9371                                    struct drm_atomic_state *state)
9372 {
9373         struct amdgpu_device *adev = drm_to_adev(dev);
9374         struct amdgpu_dm_connector *aconnector;
9375         struct drm_connector *connector;
9376         struct drm_connector_state *old_con_state, *new_con_state;
9377         struct drm_crtc_state *new_crtc_state;
9378         struct dm_crtc_state *new_dm_crtc_state;
9379         const struct dc_stream_status *status;
9380         int i, inst;
9381
9382         /* Notify device removals. */
9383         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9384                 if (old_con_state->crtc != new_con_state->crtc) {
9385                         /* CRTC changes require notification. */
9386                         goto notify;
9387                 }
9388
9389                 if (!new_con_state->crtc)
9390                         continue;
9391
9392                 new_crtc_state = drm_atomic_get_new_crtc_state(
9393                         state, new_con_state->crtc);
9394
9395                 if (!new_crtc_state)
9396                         continue;
9397
9398                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9399                         continue;
9400
9401         notify:
9402                 aconnector = to_amdgpu_dm_connector(connector);
9403
9404                 mutex_lock(&adev->dm.audio_lock);
9405                 inst = aconnector->audio_inst;
9406                 aconnector->audio_inst = -1;
9407                 mutex_unlock(&adev->dm.audio_lock);
9408
9409                 amdgpu_dm_audio_eld_notify(adev, inst);
9410         }
9411
9412         /* Notify audio device additions. */
9413         for_each_new_connector_in_state(state, connector, new_con_state, i) {
9414                 if (!new_con_state->crtc)
9415                         continue;
9416
9417                 new_crtc_state = drm_atomic_get_new_crtc_state(
9418                         state, new_con_state->crtc);
9419
9420                 if (!new_crtc_state)
9421                         continue;
9422
9423                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9424                         continue;
9425
9426                 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
9427                 if (!new_dm_crtc_state->stream)
9428                         continue;
9429
9430                 status = dc_stream_get_status(new_dm_crtc_state->stream);
9431                 if (!status)
9432                         continue;
9433
9434                 aconnector = to_amdgpu_dm_connector(connector);
9435
9436                 mutex_lock(&adev->dm.audio_lock);
9437                 inst = status->audio_inst;
9438                 aconnector->audio_inst = inst;
9439                 mutex_unlock(&adev->dm.audio_lock);
9440
9441                 amdgpu_dm_audio_eld_notify(adev, inst);
9442         }
9443 }
9444
9445 /*
9446  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
9447  * @crtc_state: the DRM CRTC state
9448  * @stream_state: the DC stream state.
9449  *
9450  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
9451  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
9452  */
9453 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
9454                                                 struct dc_stream_state *stream_state)
9455 {
9456         stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
9457 }
9458
9459 /**
9460  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
9461  * @state: The atomic state to commit
9462  *
9463  * This will tell DC to commit the constructed DC state from atomic_check,
9464  * programming the hardware. Any failures here implies a hardware failure, since
9465  * atomic check should have filtered anything non-kosher.
9466  */
9467 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
9468 {
9469         struct drm_device *dev = state->dev;
9470         struct amdgpu_device *adev = drm_to_adev(dev);
9471         struct amdgpu_display_manager *dm = &adev->dm;
9472         struct dm_atomic_state *dm_state;
9473         struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
9474         uint32_t i, j;
9475         struct drm_crtc *crtc;
9476         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9477         unsigned long flags;
9478         bool wait_for_vblank = true;
9479         struct drm_connector *connector;
9480         struct drm_connector_state *old_con_state, *new_con_state;
9481         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9482         int crtc_disable_count = 0;
9483         bool mode_set_reset_required = false;
9484
9485         trace_amdgpu_dm_atomic_commit_tail_begin(state);
9486
9487         drm_atomic_helper_update_legacy_modeset_state(dev, state);
9488
9489         dm_state = dm_atomic_get_new_state(state);
9490         if (dm_state && dm_state->context) {
9491                 dc_state = dm_state->context;
9492         } else {
9493                 /* No state changes, retain current state. */
9494                 dc_state_temp = dc_create_state(dm->dc);
9495                 ASSERT(dc_state_temp);
9496                 dc_state = dc_state_temp;
9497                 dc_resource_state_copy_construct_current(dm->dc, dc_state);
9498         }
9499
9500         for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
9501                                        new_crtc_state, i) {
9502                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9503
9504                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9505
9506                 if (old_crtc_state->active &&
9507                     (!new_crtc_state->active ||
9508                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9509                         manage_dm_interrupts(adev, acrtc, false);
9510                         dc_stream_release(dm_old_crtc_state->stream);
9511                 }
9512         }
9513
9514         drm_atomic_helper_calc_timestamping_constants(state);
9515
9516         /* update changed items */
9517         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9518                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9519
9520                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9521                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9522
9523                 DRM_DEBUG_ATOMIC(
9524                         "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9525                         "planes_changed:%d, mode_changed:%d,active_changed:%d,"
9526                         "connectors_changed:%d\n",
9527                         acrtc->crtc_id,
9528                         new_crtc_state->enable,
9529                         new_crtc_state->active,
9530                         new_crtc_state->planes_changed,
9531                         new_crtc_state->mode_changed,
9532                         new_crtc_state->active_changed,
9533                         new_crtc_state->connectors_changed);
9534
9535                 /* Disable cursor if disabling crtc */
9536                 if (old_crtc_state->active && !new_crtc_state->active) {
9537                         struct dc_cursor_position position;
9538
9539                         memset(&position, 0, sizeof(position));
9540                         mutex_lock(&dm->dc_lock);
9541                         dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
9542                         mutex_unlock(&dm->dc_lock);
9543                 }
9544
9545                 /* Copy all transient state flags into dc state */
9546                 if (dm_new_crtc_state->stream) {
9547                         amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
9548                                                             dm_new_crtc_state->stream);
9549                 }
9550
9551                 /* handles headless hotplug case, updating new_state and
9552                  * aconnector as needed
9553                  */
9554
9555                 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
9556
9557                         DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
9558
9559                         if (!dm_new_crtc_state->stream) {
9560                                 /*
9561                                  * this could happen because of issues with
9562                                  * userspace notifications delivery.
9563                                  * In this case userspace tries to set mode on
9564                                  * display which is disconnected in fact.
9565                                  * dc_sink is NULL in this case on aconnector.
9566                                  * We expect reset mode will come soon.
9567                                  *
9568                                  * This can also happen when unplug is done
9569                                  * during resume sequence ended
9570                                  *
9571                                  * In this case, we want to pretend we still
9572                                  * have a sink to keep the pipe running so that
9573                                  * hw state is consistent with the sw state
9574                                  */
9575                                 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9576                                                 __func__, acrtc->base.base.id);
9577                                 continue;
9578                         }
9579
9580                         if (dm_old_crtc_state->stream)
9581                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9582
9583                         pm_runtime_get_noresume(dev->dev);
9584
9585                         acrtc->enabled = true;
9586                         acrtc->hw_mode = new_crtc_state->mode;
9587                         crtc->hwmode = new_crtc_state->mode;
9588                         mode_set_reset_required = true;
9589                 } else if (modereset_required(new_crtc_state)) {
9590                         DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
9591                         /* i.e. reset mode */
9592                         if (dm_old_crtc_state->stream)
9593                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9594
9595                         mode_set_reset_required = true;
9596                 }
9597         } /* for_each_crtc_in_state() */
9598
9599         if (dc_state) {
9600                 /* if there mode set or reset, disable eDP PSR */
9601                 if (mode_set_reset_required) {
9602 #if defined(CONFIG_DRM_AMD_DC_DCN)
9603                         if (dm->vblank_control_workqueue)
9604                                 flush_workqueue(dm->vblank_control_workqueue);
9605 #endif
9606                         amdgpu_dm_psr_disable_all(dm);
9607                 }
9608
9609                 dm_enable_per_frame_crtc_master_sync(dc_state);
9610                 mutex_lock(&dm->dc_lock);
9611                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
9612 #if defined(CONFIG_DRM_AMD_DC_DCN)
9613                /* Allow idle optimization when vblank count is 0 for display off */
9614                if (dm->active_vblank_irq_count == 0)
9615                    dc_allow_idle_optimizations(dm->dc,true);
9616 #endif
9617                 mutex_unlock(&dm->dc_lock);
9618         }
9619
9620         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9621                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9622
9623                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9624
9625                 if (dm_new_crtc_state->stream != NULL) {
9626                         const struct dc_stream_status *status =
9627                                         dc_stream_get_status(dm_new_crtc_state->stream);
9628
9629                         if (!status)
9630                                 status = dc_stream_get_status_from_state(dc_state,
9631                                                                          dm_new_crtc_state->stream);
9632                         if (!status)
9633                                 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
9634                         else
9635                                 acrtc->otg_inst = status->primary_otg_inst;
9636                 }
9637         }
9638 #ifdef CONFIG_DRM_AMD_DC_HDCP
9639         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9640                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9641                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9642                 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9643
9644                 new_crtc_state = NULL;
9645
9646                 if (acrtc)
9647                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9648
9649                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9650
9651                 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
9652                     connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
9653                         hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
9654                         new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
9655                         dm_new_con_state->update_hdcp = true;
9656                         continue;
9657                 }
9658
9659                 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
9660                         hdcp_update_display(
9661                                 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
9662                                 new_con_state->hdcp_content_type,
9663                                 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
9664         }
9665 #endif
9666
9667         /* Handle connector state changes */
9668         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9669                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9670                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9671                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9672                 struct dc_surface_update dummy_updates[MAX_SURFACES];
9673                 struct dc_stream_update stream_update;
9674                 struct dc_info_packet hdr_packet;
9675                 struct dc_stream_status *status = NULL;
9676                 bool abm_changed, hdr_changed, scaling_changed;
9677
9678                 memset(&dummy_updates, 0, sizeof(dummy_updates));
9679                 memset(&stream_update, 0, sizeof(stream_update));
9680
9681                 if (acrtc) {
9682                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9683                         old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
9684                 }
9685
9686                 /* Skip any modesets/resets */
9687                 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
9688                         continue;
9689
9690                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9691                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9692
9693                 scaling_changed = is_scaling_state_different(dm_new_con_state,
9694                                                              dm_old_con_state);
9695
9696                 abm_changed = dm_new_crtc_state->abm_level !=
9697                               dm_old_crtc_state->abm_level;
9698
9699                 hdr_changed =
9700                         !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
9701
9702                 if (!scaling_changed && !abm_changed && !hdr_changed)
9703                         continue;
9704
9705                 stream_update.stream = dm_new_crtc_state->stream;
9706                 if (scaling_changed) {
9707                         update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
9708                                         dm_new_con_state, dm_new_crtc_state->stream);
9709
9710                         stream_update.src = dm_new_crtc_state->stream->src;
9711                         stream_update.dst = dm_new_crtc_state->stream->dst;
9712                 }
9713
9714                 if (abm_changed) {
9715                         dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
9716
9717                         stream_update.abm_level = &dm_new_crtc_state->abm_level;
9718                 }
9719
9720                 if (hdr_changed) {
9721                         fill_hdr_info_packet(new_con_state, &hdr_packet);
9722                         stream_update.hdr_static_metadata = &hdr_packet;
9723                 }
9724
9725                 status = dc_stream_get_status(dm_new_crtc_state->stream);
9726
9727                 if (WARN_ON(!status))
9728                         continue;
9729
9730                 WARN_ON(!status->plane_count);
9731
9732                 /*
9733                  * TODO: DC refuses to perform stream updates without a dc_surface_update.
9734                  * Here we create an empty update on each plane.
9735                  * To fix this, DC should permit updating only stream properties.
9736                  */
9737                 for (j = 0; j < status->plane_count; j++)
9738                         dummy_updates[j].surface = status->plane_states[0];
9739
9740
9741                 mutex_lock(&dm->dc_lock);
9742                 dc_commit_updates_for_stream(dm->dc,
9743                                                      dummy_updates,
9744                                                      status->plane_count,
9745                                                      dm_new_crtc_state->stream,
9746                                                      &stream_update,
9747                                                      dc_state);
9748                 mutex_unlock(&dm->dc_lock);
9749         }
9750
9751         /* Count number of newly disabled CRTCs for dropping PM refs later. */
9752         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
9753                                       new_crtc_state, i) {
9754                 if (old_crtc_state->active && !new_crtc_state->active)
9755                         crtc_disable_count++;
9756
9757                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9758                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9759
9760                 /* For freesync config update on crtc state and params for irq */
9761                 update_stream_irq_parameters(dm, dm_new_crtc_state);
9762
9763                 /* Handle vrr on->off / off->on transitions */
9764                 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
9765                                                 dm_new_crtc_state);
9766         }
9767
9768         /**
9769          * Enable interrupts for CRTCs that are newly enabled or went through
9770          * a modeset. It was intentionally deferred until after the front end
9771          * state was modified to wait until the OTG was on and so the IRQ
9772          * handlers didn't access stale or invalid state.
9773          */
9774         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9775                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9776 #ifdef CONFIG_DEBUG_FS
9777                 bool configure_crc = false;
9778                 enum amdgpu_dm_pipe_crc_source cur_crc_src;
9779 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9780                 struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
9781 #endif
9782                 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9783                 cur_crc_src = acrtc->dm_irq_params.crc_src;
9784                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9785 #endif
9786                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9787
9788                 if (new_crtc_state->active &&
9789                     (!old_crtc_state->active ||
9790                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9791                         dc_stream_retain(dm_new_crtc_state->stream);
9792                         acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
9793                         manage_dm_interrupts(adev, acrtc, true);
9794
9795 #ifdef CONFIG_DEBUG_FS
9796                         /**
9797                          * Frontend may have changed so reapply the CRC capture
9798                          * settings for the stream.
9799                          */
9800                         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9801
9802                         if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
9803                                 configure_crc = true;
9804 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9805                                 if (amdgpu_dm_crc_window_is_activated(crtc)) {
9806                                         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9807                                         acrtc->dm_irq_params.crc_window.update_win = true;
9808                                         acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
9809                                         spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
9810                                         crc_rd_wrk->crtc = crtc;
9811                                         spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
9812                                         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9813                                 }
9814 #endif
9815                         }
9816
9817                         if (configure_crc)
9818                                 if (amdgpu_dm_crtc_configure_crc_source(
9819                                         crtc, dm_new_crtc_state, cur_crc_src))
9820                                         DRM_DEBUG_DRIVER("Failed to configure crc source");
9821 #endif
9822                 }
9823         }
9824
9825         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
9826                 if (new_crtc_state->async_flip)
9827                         wait_for_vblank = false;
9828
9829         /* update planes when needed per crtc*/
9830         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
9831                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9832
9833                 if (dm_new_crtc_state->stream)
9834                         amdgpu_dm_commit_planes(state, dc_state, dev,
9835                                                 dm, crtc, wait_for_vblank);
9836         }
9837
9838         /* Update audio instances for each connector. */
9839         amdgpu_dm_commit_audio(dev, state);
9840
9841 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||           \
9842         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
9843         /* restore the backlight level */
9844         for (i = 0; i < dm->num_of_edps; i++) {
9845                 if (dm->backlight_dev[i] &&
9846                     (amdgpu_dm_backlight_get_level(dm, i) != dm->brightness[i]))
9847                         amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
9848         }
9849 #endif
9850         /*
9851          * send vblank event on all events not handled in flip and
9852          * mark consumed event for drm_atomic_helper_commit_hw_done
9853          */
9854         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9855         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9856
9857                 if (new_crtc_state->event)
9858                         drm_send_event_locked(dev, &new_crtc_state->event->base);
9859
9860                 new_crtc_state->event = NULL;
9861         }
9862         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9863
9864         /* Signal HW programming completion */
9865         drm_atomic_helper_commit_hw_done(state);
9866
9867         if (wait_for_vblank)
9868                 drm_atomic_helper_wait_for_flip_done(dev, state);
9869
9870         drm_atomic_helper_cleanup_planes(dev, state);
9871
9872         /* return the stolen vga memory back to VRAM */
9873         if (!adev->mman.keep_stolen_vga_memory)
9874                 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
9875         amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
9876
9877         /*
9878          * Finally, drop a runtime PM reference for each newly disabled CRTC,
9879          * so we can put the GPU into runtime suspend if we're not driving any
9880          * displays anymore
9881          */
9882         for (i = 0; i < crtc_disable_count; i++)
9883                 pm_runtime_put_autosuspend(dev->dev);
9884         pm_runtime_mark_last_busy(dev->dev);
9885
9886         if (dc_state_temp)
9887                 dc_release_state(dc_state_temp);
9888 }
9889
9890
9891 static int dm_force_atomic_commit(struct drm_connector *connector)
9892 {
9893         int ret = 0;
9894         struct drm_device *ddev = connector->dev;
9895         struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
9896         struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9897         struct drm_plane *plane = disconnected_acrtc->base.primary;
9898         struct drm_connector_state *conn_state;
9899         struct drm_crtc_state *crtc_state;
9900         struct drm_plane_state *plane_state;
9901
9902         if (!state)
9903                 return -ENOMEM;
9904
9905         state->acquire_ctx = ddev->mode_config.acquire_ctx;
9906
9907         /* Construct an atomic state to restore previous display setting */
9908
9909         /*
9910          * Attach connectors to drm_atomic_state
9911          */
9912         conn_state = drm_atomic_get_connector_state(state, connector);
9913
9914         ret = PTR_ERR_OR_ZERO(conn_state);
9915         if (ret)
9916                 goto out;
9917
9918         /* Attach crtc to drm_atomic_state*/
9919         crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
9920
9921         ret = PTR_ERR_OR_ZERO(crtc_state);
9922         if (ret)
9923                 goto out;
9924
9925         /* force a restore */
9926         crtc_state->mode_changed = true;
9927
9928         /* Attach plane to drm_atomic_state */
9929         plane_state = drm_atomic_get_plane_state(state, plane);
9930
9931         ret = PTR_ERR_OR_ZERO(plane_state);
9932         if (ret)
9933                 goto out;
9934
9935         /* Call commit internally with the state we just constructed */
9936         ret = drm_atomic_commit(state);
9937
9938 out:
9939         drm_atomic_state_put(state);
9940         if (ret)
9941                 DRM_ERROR("Restoring old state failed with %i\n", ret);
9942
9943         return ret;
9944 }
9945
9946 /*
9947  * This function handles all cases when set mode does not come upon hotplug.
9948  * This includes when a display is unplugged then plugged back into the
9949  * same port and when running without usermode desktop manager supprot
9950  */
9951 void dm_restore_drm_connector_state(struct drm_device *dev,
9952                                     struct drm_connector *connector)
9953 {
9954         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9955         struct amdgpu_crtc *disconnected_acrtc;
9956         struct dm_crtc_state *acrtc_state;
9957
9958         if (!aconnector->dc_sink || !connector->state || !connector->encoder)
9959                 return;
9960
9961         disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9962         if (!disconnected_acrtc)
9963                 return;
9964
9965         acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
9966         if (!acrtc_state->stream)
9967                 return;
9968
9969         /*
9970          * If the previous sink is not released and different from the current,
9971          * we deduce we are in a state where we can not rely on usermode call
9972          * to turn on the display, so we do it here
9973          */
9974         if (acrtc_state->stream->sink != aconnector->dc_sink)
9975                 dm_force_atomic_commit(&aconnector->base);
9976 }
9977
9978 /*
9979  * Grabs all modesetting locks to serialize against any blocking commits,
9980  * Waits for completion of all non blocking commits.
9981  */
9982 static int do_aquire_global_lock(struct drm_device *dev,
9983                                  struct drm_atomic_state *state)
9984 {
9985         struct drm_crtc *crtc;
9986         struct drm_crtc_commit *commit;
9987         long ret;
9988
9989         /*
9990          * Adding all modeset locks to aquire_ctx will
9991          * ensure that when the framework release it the
9992          * extra locks we are locking here will get released to
9993          */
9994         ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
9995         if (ret)
9996                 return ret;
9997
9998         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
9999                 spin_lock(&crtc->commit_lock);
10000                 commit = list_first_entry_or_null(&crtc->commit_list,
10001                                 struct drm_crtc_commit, commit_entry);
10002                 if (commit)
10003                         drm_crtc_commit_get(commit);
10004                 spin_unlock(&crtc->commit_lock);
10005
10006                 if (!commit)
10007                         continue;
10008
10009                 /*
10010                  * Make sure all pending HW programming completed and
10011                  * page flips done
10012                  */
10013                 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
10014
10015                 if (ret > 0)
10016                         ret = wait_for_completion_interruptible_timeout(
10017                                         &commit->flip_done, 10*HZ);
10018
10019                 if (ret == 0)
10020                         DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
10021                                   "timed out\n", crtc->base.id, crtc->name);
10022
10023                 drm_crtc_commit_put(commit);
10024         }
10025
10026         return ret < 0 ? ret : 0;
10027 }
10028
10029 static void get_freesync_config_for_crtc(
10030         struct dm_crtc_state *new_crtc_state,
10031         struct dm_connector_state *new_con_state)
10032 {
10033         struct mod_freesync_config config = {0};
10034         struct amdgpu_dm_connector *aconnector =
10035                         to_amdgpu_dm_connector(new_con_state->base.connector);
10036         struct drm_display_mode *mode = &new_crtc_state->base.mode;
10037         int vrefresh = drm_mode_vrefresh(mode);
10038         bool fs_vid_mode = false;
10039
10040         new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
10041                                         vrefresh >= aconnector->min_vfreq &&
10042                                         vrefresh <= aconnector->max_vfreq;
10043
10044         if (new_crtc_state->vrr_supported) {
10045                 new_crtc_state->stream->ignore_msa_timing_param = true;
10046                 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
10047
10048                 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
10049                 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
10050                 config.vsif_supported = true;
10051                 config.btr = true;
10052
10053                 if (fs_vid_mode) {
10054                         config.state = VRR_STATE_ACTIVE_FIXED;
10055                         config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
10056                         goto out;
10057                 } else if (new_crtc_state->base.vrr_enabled) {
10058                         config.state = VRR_STATE_ACTIVE_VARIABLE;
10059                 } else {
10060                         config.state = VRR_STATE_INACTIVE;
10061                 }
10062         }
10063 out:
10064         new_crtc_state->freesync_config = config;
10065 }
10066
10067 static void reset_freesync_config_for_crtc(
10068         struct dm_crtc_state *new_crtc_state)
10069 {
10070         new_crtc_state->vrr_supported = false;
10071
10072         memset(&new_crtc_state->vrr_infopacket, 0,
10073                sizeof(new_crtc_state->vrr_infopacket));
10074 }
10075
10076 static bool
10077 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
10078                                  struct drm_crtc_state *new_crtc_state)
10079 {
10080         struct drm_display_mode old_mode, new_mode;
10081
10082         if (!old_crtc_state || !new_crtc_state)
10083                 return false;
10084
10085         old_mode = old_crtc_state->mode;
10086         new_mode = new_crtc_state->mode;
10087
10088         if (old_mode.clock       == new_mode.clock &&
10089             old_mode.hdisplay    == new_mode.hdisplay &&
10090             old_mode.vdisplay    == new_mode.vdisplay &&
10091             old_mode.htotal      == new_mode.htotal &&
10092             old_mode.vtotal      != new_mode.vtotal &&
10093             old_mode.hsync_start == new_mode.hsync_start &&
10094             old_mode.vsync_start != new_mode.vsync_start &&
10095             old_mode.hsync_end   == new_mode.hsync_end &&
10096             old_mode.vsync_end   != new_mode.vsync_end &&
10097             old_mode.hskew       == new_mode.hskew &&
10098             old_mode.vscan       == new_mode.vscan &&
10099             (old_mode.vsync_end - old_mode.vsync_start) ==
10100             (new_mode.vsync_end - new_mode.vsync_start))
10101                 return true;
10102
10103         return false;
10104 }
10105
10106 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
10107         uint64_t num, den, res;
10108         struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
10109
10110         dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
10111
10112         num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
10113         den = (unsigned long long)new_crtc_state->mode.htotal *
10114               (unsigned long long)new_crtc_state->mode.vtotal;
10115
10116         res = div_u64(num, den);
10117         dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
10118 }
10119
10120 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
10121                                 struct drm_atomic_state *state,
10122                                 struct drm_crtc *crtc,
10123                                 struct drm_crtc_state *old_crtc_state,
10124                                 struct drm_crtc_state *new_crtc_state,
10125                                 bool enable,
10126                                 bool *lock_and_validation_needed)
10127 {
10128         struct dm_atomic_state *dm_state = NULL;
10129         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
10130         struct dc_stream_state *new_stream;
10131         int ret = 0;
10132
10133         /*
10134          * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
10135          * update changed items
10136          */
10137         struct amdgpu_crtc *acrtc = NULL;
10138         struct amdgpu_dm_connector *aconnector = NULL;
10139         struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
10140         struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
10141
10142         new_stream = NULL;
10143
10144         dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10145         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10146         acrtc = to_amdgpu_crtc(crtc);
10147         aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
10148
10149         /* TODO This hack should go away */
10150         if (aconnector && enable) {
10151                 /* Make sure fake sink is created in plug-in scenario */
10152                 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
10153                                                             &aconnector->base);
10154                 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
10155                                                             &aconnector->base);
10156
10157                 if (IS_ERR(drm_new_conn_state)) {
10158                         ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
10159                         goto fail;
10160                 }
10161
10162                 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
10163                 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
10164
10165                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10166                         goto skip_modeset;
10167
10168                 new_stream = create_validate_stream_for_sink(aconnector,
10169                                                              &new_crtc_state->mode,
10170                                                              dm_new_conn_state,
10171                                                              dm_old_crtc_state->stream);
10172
10173                 /*
10174                  * we can have no stream on ACTION_SET if a display
10175                  * was disconnected during S3, in this case it is not an
10176                  * error, the OS will be updated after detection, and
10177                  * will do the right thing on next atomic commit
10178                  */
10179
10180                 if (!new_stream) {
10181                         DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
10182                                         __func__, acrtc->base.base.id);
10183                         ret = -ENOMEM;
10184                         goto fail;
10185                 }
10186
10187                 /*
10188                  * TODO: Check VSDB bits to decide whether this should
10189                  * be enabled or not.
10190                  */
10191                 new_stream->triggered_crtc_reset.enabled =
10192                         dm->force_timing_sync;
10193
10194                 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10195
10196                 ret = fill_hdr_info_packet(drm_new_conn_state,
10197                                            &new_stream->hdr_static_metadata);
10198                 if (ret)
10199                         goto fail;
10200
10201                 /*
10202                  * If we already removed the old stream from the context
10203                  * (and set the new stream to NULL) then we can't reuse
10204                  * the old stream even if the stream and scaling are unchanged.
10205                  * We'll hit the BUG_ON and black screen.
10206                  *
10207                  * TODO: Refactor this function to allow this check to work
10208                  * in all conditions.
10209                  */
10210                 if (amdgpu_freesync_vid_mode &&
10211                     dm_new_crtc_state->stream &&
10212                     is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
10213                         goto skip_modeset;
10214
10215                 if (dm_new_crtc_state->stream &&
10216                     dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
10217                     dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
10218                         new_crtc_state->mode_changed = false;
10219                         DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
10220                                          new_crtc_state->mode_changed);
10221                 }
10222         }
10223
10224         /* mode_changed flag may get updated above, need to check again */
10225         if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10226                 goto skip_modeset;
10227
10228         DRM_DEBUG_ATOMIC(
10229                 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
10230                 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
10231                 "connectors_changed:%d\n",
10232                 acrtc->crtc_id,
10233                 new_crtc_state->enable,
10234                 new_crtc_state->active,
10235                 new_crtc_state->planes_changed,
10236                 new_crtc_state->mode_changed,
10237                 new_crtc_state->active_changed,
10238                 new_crtc_state->connectors_changed);
10239
10240         /* Remove stream for any changed/disabled CRTC */
10241         if (!enable) {
10242
10243                 if (!dm_old_crtc_state->stream)
10244                         goto skip_modeset;
10245
10246                 if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
10247                     is_timing_unchanged_for_freesync(new_crtc_state,
10248                                                      old_crtc_state)) {
10249                         new_crtc_state->mode_changed = false;
10250                         DRM_DEBUG_DRIVER(
10251                                 "Mode change not required for front porch change, "
10252                                 "setting mode_changed to %d",
10253                                 new_crtc_state->mode_changed);
10254
10255                         set_freesync_fixed_config(dm_new_crtc_state);
10256
10257                         goto skip_modeset;
10258                 } else if (amdgpu_freesync_vid_mode && aconnector &&
10259                            is_freesync_video_mode(&new_crtc_state->mode,
10260                                                   aconnector)) {
10261                         struct drm_display_mode *high_mode;
10262
10263                         high_mode = get_highest_refresh_rate_mode(aconnector, false);
10264                         if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
10265                                 set_freesync_fixed_config(dm_new_crtc_state);
10266                         }
10267                 }
10268
10269                 ret = dm_atomic_get_state(state, &dm_state);
10270                 if (ret)
10271                         goto fail;
10272
10273                 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
10274                                 crtc->base.id);
10275
10276                 /* i.e. reset mode */
10277                 if (dc_remove_stream_from_ctx(
10278                                 dm->dc,
10279                                 dm_state->context,
10280                                 dm_old_crtc_state->stream) != DC_OK) {
10281                         ret = -EINVAL;
10282                         goto fail;
10283                 }
10284
10285                 dc_stream_release(dm_old_crtc_state->stream);
10286                 dm_new_crtc_state->stream = NULL;
10287
10288                 reset_freesync_config_for_crtc(dm_new_crtc_state);
10289
10290                 *lock_and_validation_needed = true;
10291
10292         } else {/* Add stream for any updated/enabled CRTC */
10293                 /*
10294                  * Quick fix to prevent NULL pointer on new_stream when
10295                  * added MST connectors not found in existing crtc_state in the chained mode
10296                  * TODO: need to dig out the root cause of that
10297                  */
10298                 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
10299                         goto skip_modeset;
10300
10301                 if (modereset_required(new_crtc_state))
10302                         goto skip_modeset;
10303
10304                 if (modeset_required(new_crtc_state, new_stream,
10305                                      dm_old_crtc_state->stream)) {
10306
10307                         WARN_ON(dm_new_crtc_state->stream);
10308
10309                         ret = dm_atomic_get_state(state, &dm_state);
10310                         if (ret)
10311                                 goto fail;
10312
10313                         dm_new_crtc_state->stream = new_stream;
10314
10315                         dc_stream_retain(new_stream);
10316
10317                         DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
10318                                          crtc->base.id);
10319
10320                         if (dc_add_stream_to_ctx(
10321                                         dm->dc,
10322                                         dm_state->context,
10323                                         dm_new_crtc_state->stream) != DC_OK) {
10324                                 ret = -EINVAL;
10325                                 goto fail;
10326                         }
10327
10328                         *lock_and_validation_needed = true;
10329                 }
10330         }
10331
10332 skip_modeset:
10333         /* Release extra reference */
10334         if (new_stream)
10335                  dc_stream_release(new_stream);
10336
10337         /*
10338          * We want to do dc stream updates that do not require a
10339          * full modeset below.
10340          */
10341         if (!(enable && aconnector && new_crtc_state->active))
10342                 return 0;
10343         /*
10344          * Given above conditions, the dc state cannot be NULL because:
10345          * 1. We're in the process of enabling CRTCs (just been added
10346          *    to the dc context, or already is on the context)
10347          * 2. Has a valid connector attached, and
10348          * 3. Is currently active and enabled.
10349          * => The dc stream state currently exists.
10350          */
10351         BUG_ON(dm_new_crtc_state->stream == NULL);
10352
10353         /* Scaling or underscan settings */
10354         if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
10355                                 drm_atomic_crtc_needs_modeset(new_crtc_state))
10356                 update_stream_scaling_settings(
10357                         &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
10358
10359         /* ABM settings */
10360         dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10361
10362         /*
10363          * Color management settings. We also update color properties
10364          * when a modeset is needed, to ensure it gets reprogrammed.
10365          */
10366         if (dm_new_crtc_state->base.color_mgmt_changed ||
10367             drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10368                 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
10369                 if (ret)
10370                         goto fail;
10371         }
10372
10373         /* Update Freesync settings. */
10374         get_freesync_config_for_crtc(dm_new_crtc_state,
10375                                      dm_new_conn_state);
10376
10377         return ret;
10378
10379 fail:
10380         if (new_stream)
10381                 dc_stream_release(new_stream);
10382         return ret;
10383 }
10384
10385 static bool should_reset_plane(struct drm_atomic_state *state,
10386                                struct drm_plane *plane,
10387                                struct drm_plane_state *old_plane_state,
10388                                struct drm_plane_state *new_plane_state)
10389 {
10390         struct drm_plane *other;
10391         struct drm_plane_state *old_other_state, *new_other_state;
10392         struct drm_crtc_state *new_crtc_state;
10393         int i;
10394
10395         /*
10396          * TODO: Remove this hack once the checks below are sufficient
10397          * enough to determine when we need to reset all the planes on
10398          * the stream.
10399          */
10400         if (state->allow_modeset)
10401                 return true;
10402
10403         /* Exit early if we know that we're adding or removing the plane. */
10404         if (old_plane_state->crtc != new_plane_state->crtc)
10405                 return true;
10406
10407         /* old crtc == new_crtc == NULL, plane not in context. */
10408         if (!new_plane_state->crtc)
10409                 return false;
10410
10411         new_crtc_state =
10412                 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
10413
10414         if (!new_crtc_state)
10415                 return true;
10416
10417         /* CRTC Degamma changes currently require us to recreate planes. */
10418         if (new_crtc_state->color_mgmt_changed)
10419                 return true;
10420
10421         if (drm_atomic_crtc_needs_modeset(new_crtc_state))
10422                 return true;
10423
10424         /*
10425          * If there are any new primary or overlay planes being added or
10426          * removed then the z-order can potentially change. To ensure
10427          * correct z-order and pipe acquisition the current DC architecture
10428          * requires us to remove and recreate all existing planes.
10429          *
10430          * TODO: Come up with a more elegant solution for this.
10431          */
10432         for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
10433                 struct amdgpu_framebuffer *old_afb, *new_afb;
10434                 if (other->type == DRM_PLANE_TYPE_CURSOR)
10435                         continue;
10436
10437                 if (old_other_state->crtc != new_plane_state->crtc &&
10438                     new_other_state->crtc != new_plane_state->crtc)
10439                         continue;
10440
10441                 if (old_other_state->crtc != new_other_state->crtc)
10442                         return true;
10443
10444                 /* Src/dst size and scaling updates. */
10445                 if (old_other_state->src_w != new_other_state->src_w ||
10446                     old_other_state->src_h != new_other_state->src_h ||
10447                     old_other_state->crtc_w != new_other_state->crtc_w ||
10448                     old_other_state->crtc_h != new_other_state->crtc_h)
10449                         return true;
10450
10451                 /* Rotation / mirroring updates. */
10452                 if (old_other_state->rotation != new_other_state->rotation)
10453                         return true;
10454
10455                 /* Blending updates. */
10456                 if (old_other_state->pixel_blend_mode !=
10457                     new_other_state->pixel_blend_mode)
10458                         return true;
10459
10460                 /* Alpha updates. */
10461                 if (old_other_state->alpha != new_other_state->alpha)
10462                         return true;
10463
10464                 /* Colorspace changes. */
10465                 if (old_other_state->color_range != new_other_state->color_range ||
10466                     old_other_state->color_encoding != new_other_state->color_encoding)
10467                         return true;
10468
10469                 /* Framebuffer checks fall at the end. */
10470                 if (!old_other_state->fb || !new_other_state->fb)
10471                         continue;
10472
10473                 /* Pixel format changes can require bandwidth updates. */
10474                 if (old_other_state->fb->format != new_other_state->fb->format)
10475                         return true;
10476
10477                 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
10478                 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
10479
10480                 /* Tiling and DCC changes also require bandwidth updates. */
10481                 if (old_afb->tiling_flags != new_afb->tiling_flags ||
10482                     old_afb->base.modifier != new_afb->base.modifier)
10483                         return true;
10484         }
10485
10486         return false;
10487 }
10488
10489 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
10490                               struct drm_plane_state *new_plane_state,
10491                               struct drm_framebuffer *fb)
10492 {
10493         struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
10494         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
10495         unsigned int pitch;
10496         bool linear;
10497
10498         if (fb->width > new_acrtc->max_cursor_width ||
10499             fb->height > new_acrtc->max_cursor_height) {
10500                 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
10501                                  new_plane_state->fb->width,
10502                                  new_plane_state->fb->height);
10503                 return -EINVAL;
10504         }
10505         if (new_plane_state->src_w != fb->width << 16 ||
10506             new_plane_state->src_h != fb->height << 16) {
10507                 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10508                 return -EINVAL;
10509         }
10510
10511         /* Pitch in pixels */
10512         pitch = fb->pitches[0] / fb->format->cpp[0];
10513
10514         if (fb->width != pitch) {
10515                 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
10516                                  fb->width, pitch);
10517                 return -EINVAL;
10518         }
10519
10520         switch (pitch) {
10521         case 64:
10522         case 128:
10523         case 256:
10524                 /* FB pitch is supported by cursor plane */
10525                 break;
10526         default:
10527                 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
10528                 return -EINVAL;
10529         }
10530
10531         /* Core DRM takes care of checking FB modifiers, so we only need to
10532          * check tiling flags when the FB doesn't have a modifier. */
10533         if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
10534                 if (adev->family < AMDGPU_FAMILY_AI) {
10535                         linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
10536                                  AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
10537                                  AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
10538                 } else {
10539                         linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
10540                 }
10541                 if (!linear) {
10542                         DRM_DEBUG_ATOMIC("Cursor FB not linear");
10543                         return -EINVAL;
10544                 }
10545         }
10546
10547         return 0;
10548 }
10549
10550 static int dm_update_plane_state(struct dc *dc,
10551                                  struct drm_atomic_state *state,
10552                                  struct drm_plane *plane,
10553                                  struct drm_plane_state *old_plane_state,
10554                                  struct drm_plane_state *new_plane_state,
10555                                  bool enable,
10556                                  bool *lock_and_validation_needed)
10557 {
10558
10559         struct dm_atomic_state *dm_state = NULL;
10560         struct drm_crtc *new_plane_crtc, *old_plane_crtc;
10561         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10562         struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
10563         struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
10564         struct amdgpu_crtc *new_acrtc;
10565         bool needs_reset;
10566         int ret = 0;
10567
10568
10569         new_plane_crtc = new_plane_state->crtc;
10570         old_plane_crtc = old_plane_state->crtc;
10571         dm_new_plane_state = to_dm_plane_state(new_plane_state);
10572         dm_old_plane_state = to_dm_plane_state(old_plane_state);
10573
10574         if (plane->type == DRM_PLANE_TYPE_CURSOR) {
10575                 if (!enable || !new_plane_crtc ||
10576                         drm_atomic_plane_disabling(plane->state, new_plane_state))
10577                         return 0;
10578
10579                 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
10580
10581                 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
10582                         DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10583                         return -EINVAL;
10584                 }
10585
10586                 if (new_plane_state->fb) {
10587                         ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
10588                                                  new_plane_state->fb);
10589                         if (ret)
10590                                 return ret;
10591                 }
10592
10593                 return 0;
10594         }
10595
10596         needs_reset = should_reset_plane(state, plane, old_plane_state,
10597                                          new_plane_state);
10598
10599         /* Remove any changed/removed planes */
10600         if (!enable) {
10601                 if (!needs_reset)
10602                         return 0;
10603
10604                 if (!old_plane_crtc)
10605                         return 0;
10606
10607                 old_crtc_state = drm_atomic_get_old_crtc_state(
10608                                 state, old_plane_crtc);
10609                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10610
10611                 if (!dm_old_crtc_state->stream)
10612                         return 0;
10613
10614                 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
10615                                 plane->base.id, old_plane_crtc->base.id);
10616
10617                 ret = dm_atomic_get_state(state, &dm_state);
10618                 if (ret)
10619                         return ret;
10620
10621                 if (!dc_remove_plane_from_context(
10622                                 dc,
10623                                 dm_old_crtc_state->stream,
10624                                 dm_old_plane_state->dc_state,
10625                                 dm_state->context)) {
10626
10627                         return -EINVAL;
10628                 }
10629
10630
10631                 dc_plane_state_release(dm_old_plane_state->dc_state);
10632                 dm_new_plane_state->dc_state = NULL;
10633
10634                 *lock_and_validation_needed = true;
10635
10636         } else { /* Add new planes */
10637                 struct dc_plane_state *dc_new_plane_state;
10638
10639                 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10640                         return 0;
10641
10642                 if (!new_plane_crtc)
10643                         return 0;
10644
10645                 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
10646                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10647
10648                 if (!dm_new_crtc_state->stream)
10649                         return 0;
10650
10651                 if (!needs_reset)
10652                         return 0;
10653
10654                 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
10655                 if (ret)
10656                         return ret;
10657
10658                 WARN_ON(dm_new_plane_state->dc_state);
10659
10660                 dc_new_plane_state = dc_create_plane_state(dc);
10661                 if (!dc_new_plane_state)
10662                         return -ENOMEM;
10663
10664                 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
10665                                  plane->base.id, new_plane_crtc->base.id);
10666
10667                 ret = fill_dc_plane_attributes(
10668                         drm_to_adev(new_plane_crtc->dev),
10669                         dc_new_plane_state,
10670                         new_plane_state,
10671                         new_crtc_state);
10672                 if (ret) {
10673                         dc_plane_state_release(dc_new_plane_state);
10674                         return ret;
10675                 }
10676
10677                 ret = dm_atomic_get_state(state, &dm_state);
10678                 if (ret) {
10679                         dc_plane_state_release(dc_new_plane_state);
10680                         return ret;
10681                 }
10682
10683                 /*
10684                  * Any atomic check errors that occur after this will
10685                  * not need a release. The plane state will be attached
10686                  * to the stream, and therefore part of the atomic
10687                  * state. It'll be released when the atomic state is
10688                  * cleaned.
10689                  */
10690                 if (!dc_add_plane_to_context(
10691                                 dc,
10692                                 dm_new_crtc_state->stream,
10693                                 dc_new_plane_state,
10694                                 dm_state->context)) {
10695
10696                         dc_plane_state_release(dc_new_plane_state);
10697                         return -EINVAL;
10698                 }
10699
10700                 dm_new_plane_state->dc_state = dc_new_plane_state;
10701
10702                 /* Tell DC to do a full surface update every time there
10703                  * is a plane change. Inefficient, but works for now.
10704                  */
10705                 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
10706
10707                 *lock_and_validation_needed = true;
10708         }
10709
10710
10711         return ret;
10712 }
10713
10714 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
10715                                 struct drm_crtc *crtc,
10716                                 struct drm_crtc_state *new_crtc_state)
10717 {
10718         struct drm_plane *cursor = crtc->cursor, *underlying;
10719         struct drm_plane_state *new_cursor_state, *new_underlying_state;
10720         int i;
10721         int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
10722
10723         /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
10724          * cursor per pipe but it's going to inherit the scaling and
10725          * positioning from the underlying pipe. Check the cursor plane's
10726          * blending properties match the underlying planes'. */
10727
10728         new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
10729         if (!new_cursor_state || !new_cursor_state->fb) {
10730                 return 0;
10731         }
10732
10733         cursor_scale_w = new_cursor_state->crtc_w * 1000 /
10734                          (new_cursor_state->src_w >> 16);
10735         cursor_scale_h = new_cursor_state->crtc_h * 1000 /
10736                          (new_cursor_state->src_h >> 16);
10737
10738         for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
10739                 /* Narrow down to non-cursor planes on the same CRTC as the cursor */
10740                 if (new_underlying_state->crtc != crtc || underlying == crtc->cursor)
10741                         continue;
10742
10743                 /* Ignore disabled planes */
10744                 if (!new_underlying_state->fb)
10745                         continue;
10746
10747                 underlying_scale_w = new_underlying_state->crtc_w * 1000 /
10748                                      (new_underlying_state->src_w >> 16);
10749                 underlying_scale_h = new_underlying_state->crtc_h * 1000 /
10750                                      (new_underlying_state->src_h >> 16);
10751
10752                 if (cursor_scale_w != underlying_scale_w ||
10753                     cursor_scale_h != underlying_scale_h) {
10754                         drm_dbg_atomic(crtc->dev,
10755                                        "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n",
10756                                        cursor->base.id, cursor->name, underlying->base.id, underlying->name);
10757                         return -EINVAL;
10758                 }
10759
10760                 /* If this plane covers the whole CRTC, no need to check planes underneath */
10761                 if (new_underlying_state->crtc_x <= 0 &&
10762                     new_underlying_state->crtc_y <= 0 &&
10763                     new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay &&
10764                     new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay)
10765                         break;
10766         }
10767
10768         return 0;
10769 }
10770
10771 #if defined(CONFIG_DRM_AMD_DC_DCN)
10772 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
10773 {
10774         struct drm_connector *connector;
10775         struct drm_connector_state *conn_state;
10776         struct amdgpu_dm_connector *aconnector = NULL;
10777         int i;
10778         for_each_new_connector_in_state(state, connector, conn_state, i) {
10779                 if (conn_state->crtc != crtc)
10780                         continue;
10781
10782                 aconnector = to_amdgpu_dm_connector(connector);
10783                 if (!aconnector->port || !aconnector->mst_port)
10784                         aconnector = NULL;
10785                 else
10786                         break;
10787         }
10788
10789         if (!aconnector)
10790                 return 0;
10791
10792         return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
10793 }
10794 #endif
10795
10796 /**
10797  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10798  * @dev: The DRM device
10799  * @state: The atomic state to commit
10800  *
10801  * Validate that the given atomic state is programmable by DC into hardware.
10802  * This involves constructing a &struct dc_state reflecting the new hardware
10803  * state we wish to commit, then querying DC to see if it is programmable. It's
10804  * important not to modify the existing DC state. Otherwise, atomic_check
10805  * may unexpectedly commit hardware changes.
10806  *
10807  * When validating the DC state, it's important that the right locks are
10808  * acquired. For full updates case which removes/adds/updates streams on one
10809  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
10810  * that any such full update commit will wait for completion of any outstanding
10811  * flip using DRMs synchronization events.
10812  *
10813  * Note that DM adds the affected connectors for all CRTCs in state, when that
10814  * might not seem necessary. This is because DC stream creation requires the
10815  * DC sink, which is tied to the DRM connector state. Cleaning this up should
10816  * be possible but non-trivial - a possible TODO item.
10817  *
10818  * Return: -Error code if validation failed.
10819  */
10820 static int amdgpu_dm_atomic_check(struct drm_device *dev,
10821                                   struct drm_atomic_state *state)
10822 {
10823         struct amdgpu_device *adev = drm_to_adev(dev);
10824         struct dm_atomic_state *dm_state = NULL;
10825         struct dc *dc = adev->dm.dc;
10826         struct drm_connector *connector;
10827         struct drm_connector_state *old_con_state, *new_con_state;
10828         struct drm_crtc *crtc;
10829         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10830         struct drm_plane *plane;
10831         struct drm_plane_state *old_plane_state, *new_plane_state;
10832         enum dc_status status;
10833         int ret, i;
10834         bool lock_and_validation_needed = false;
10835         struct dm_crtc_state *dm_old_crtc_state;
10836 #if defined(CONFIG_DRM_AMD_DC_DCN)
10837         struct dsc_mst_fairness_vars vars[MAX_PIPES];
10838         struct drm_dp_mst_topology_state *mst_state;
10839         struct drm_dp_mst_topology_mgr *mgr;
10840 #endif
10841
10842         trace_amdgpu_dm_atomic_check_begin(state);
10843
10844         ret = drm_atomic_helper_check_modeset(dev, state);
10845         if (ret) {
10846                 DRM_DEBUG_DRIVER("drm_atomic_helper_check_modeset() failed\n");
10847                 goto fail;
10848         }
10849
10850         /* Check connector changes */
10851         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10852                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10853                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10854
10855                 /* Skip connectors that are disabled or part of modeset already. */
10856                 if (!old_con_state->crtc && !new_con_state->crtc)
10857                         continue;
10858
10859                 if (!new_con_state->crtc)
10860                         continue;
10861
10862                 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10863                 if (IS_ERR(new_crtc_state)) {
10864                         DRM_DEBUG_DRIVER("drm_atomic_get_crtc_state() failed\n");
10865                         ret = PTR_ERR(new_crtc_state);
10866                         goto fail;
10867                 }
10868
10869                 if (dm_old_con_state->abm_level !=
10870                     dm_new_con_state->abm_level)
10871                         new_crtc_state->connectors_changed = true;
10872         }
10873
10874 #if defined(CONFIG_DRM_AMD_DC_DCN)
10875         if (dc_resource_is_dsc_encoding_supported(dc)) {
10876                 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10877                         if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10878                                 ret = add_affected_mst_dsc_crtcs(state, crtc);
10879                                 if (ret) {
10880                                         DRM_DEBUG_DRIVER("add_affected_mst_dsc_crtcs() failed\n");
10881                                         goto fail;
10882                                 }
10883                         }
10884                 }
10885         }
10886 #endif
10887         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10888                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10889
10890                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
10891                     !new_crtc_state->color_mgmt_changed &&
10892                     old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
10893                         dm_old_crtc_state->dsc_force_changed == false)
10894                         continue;
10895
10896                 ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
10897                 if (ret) {
10898                         DRM_DEBUG_DRIVER("amdgpu_dm_verify_lut_sizes() failed\n");
10899                         goto fail;
10900                 }
10901
10902                 if (!new_crtc_state->enable)
10903                         continue;
10904
10905                 ret = drm_atomic_add_affected_connectors(state, crtc);
10906                 if (ret) {
10907                         DRM_DEBUG_DRIVER("drm_atomic_add_affected_connectors() failed\n");
10908                         goto fail;
10909                 }
10910
10911                 ret = drm_atomic_add_affected_planes(state, crtc);
10912                 if (ret) {
10913                         DRM_DEBUG_DRIVER("drm_atomic_add_affected_planes() failed\n");
10914                         goto fail;
10915                 }
10916
10917                 if (dm_old_crtc_state->dsc_force_changed)
10918                         new_crtc_state->mode_changed = true;
10919         }
10920
10921         /*
10922          * Add all primary and overlay planes on the CRTC to the state
10923          * whenever a plane is enabled to maintain correct z-ordering
10924          * and to enable fast surface updates.
10925          */
10926         drm_for_each_crtc(crtc, dev) {
10927                 bool modified = false;
10928
10929                 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
10930                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
10931                                 continue;
10932
10933                         if (new_plane_state->crtc == crtc ||
10934                             old_plane_state->crtc == crtc) {
10935                                 modified = true;
10936                                 break;
10937                         }
10938                 }
10939
10940                 if (!modified)
10941                         continue;
10942
10943                 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
10944                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
10945                                 continue;
10946
10947                         new_plane_state =
10948                                 drm_atomic_get_plane_state(state, plane);
10949
10950                         if (IS_ERR(new_plane_state)) {
10951                                 ret = PTR_ERR(new_plane_state);
10952                                 DRM_DEBUG_DRIVER("new_plane_state is BAD\n");
10953                                 goto fail;
10954                         }
10955                 }
10956         }
10957
10958         /* Remove exiting planes if they are modified */
10959         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10960                 ret = dm_update_plane_state(dc, state, plane,
10961                                             old_plane_state,
10962                                             new_plane_state,
10963                                             false,
10964                                             &lock_and_validation_needed);
10965                 if (ret) {
10966                         DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
10967                         goto fail;
10968                 }
10969         }
10970
10971         /* Disable all crtcs which require disable */
10972         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10973                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10974                                            old_crtc_state,
10975                                            new_crtc_state,
10976                                            false,
10977                                            &lock_and_validation_needed);
10978                 if (ret) {
10979                         DRM_DEBUG_DRIVER("DISABLE: dm_update_crtc_state() failed\n");
10980                         goto fail;
10981                 }
10982         }
10983
10984         /* Enable all crtcs which require enable */
10985         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10986                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10987                                            old_crtc_state,
10988                                            new_crtc_state,
10989                                            true,
10990                                            &lock_and_validation_needed);
10991                 if (ret) {
10992                         DRM_DEBUG_DRIVER("ENABLE: dm_update_crtc_state() failed\n");
10993                         goto fail;
10994                 }
10995         }
10996
10997         /* Add new/modified planes */
10998         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10999                 ret = dm_update_plane_state(dc, state, plane,
11000                                             old_plane_state,
11001                                             new_plane_state,
11002                                             true,
11003                                             &lock_and_validation_needed);
11004                 if (ret) {
11005                         DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
11006                         goto fail;
11007                 }
11008         }
11009
11010         /* Run this here since we want to validate the streams we created */
11011         ret = drm_atomic_helper_check_planes(dev, state);
11012         if (ret) {
11013                 DRM_DEBUG_DRIVER("drm_atomic_helper_check_planes() failed\n");
11014                 goto fail;
11015         }
11016
11017         /* Check cursor planes scaling */
11018         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11019                 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
11020                 if (ret) {
11021                         DRM_DEBUG_DRIVER("dm_check_crtc_cursor() failed\n");
11022                         goto fail;
11023                 }
11024         }
11025
11026         if (state->legacy_cursor_update) {
11027                 /*
11028                  * This is a fast cursor update coming from the plane update
11029                  * helper, check if it can be done asynchronously for better
11030                  * performance.
11031                  */
11032                 state->async_update =
11033                         !drm_atomic_helper_async_check(dev, state);
11034
11035                 /*
11036                  * Skip the remaining global validation if this is an async
11037                  * update. Cursor updates can be done without affecting
11038                  * state or bandwidth calcs and this avoids the performance
11039                  * penalty of locking the private state object and
11040                  * allocating a new dc_state.
11041                  */
11042                 if (state->async_update)
11043                         return 0;
11044         }
11045
11046         /* Check scaling and underscan changes*/
11047         /* TODO Removed scaling changes validation due to inability to commit
11048          * new stream into context w\o causing full reset. Need to
11049          * decide how to handle.
11050          */
11051         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
11052                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
11053                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
11054                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
11055
11056                 /* Skip any modesets/resets */
11057                 if (!acrtc || drm_atomic_crtc_needs_modeset(
11058                                 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
11059                         continue;
11060
11061                 /* Skip any thing not scale or underscan changes */
11062                 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
11063                         continue;
11064
11065                 lock_and_validation_needed = true;
11066         }
11067
11068 #if defined(CONFIG_DRM_AMD_DC_DCN)
11069         /* set the slot info for each mst_state based on the link encoding format */
11070         for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
11071                 struct amdgpu_dm_connector *aconnector;
11072                 struct drm_connector *connector;
11073                 struct drm_connector_list_iter iter;
11074                 u8 link_coding_cap;
11075
11076                 if (!mgr->mst_state )
11077                         continue;
11078
11079                 drm_connector_list_iter_begin(dev, &iter);
11080                 drm_for_each_connector_iter(connector, &iter) {
11081                         int id = connector->index;
11082
11083                         if (id == mst_state->mgr->conn_base_id) {
11084                                 aconnector = to_amdgpu_dm_connector(connector);
11085                                 link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
11086                                 drm_dp_mst_update_slots(mst_state, link_coding_cap);
11087
11088                                 break;
11089                         }
11090                 }
11091                 drm_connector_list_iter_end(&iter);
11092
11093         }
11094 #endif
11095         /**
11096          * Streams and planes are reset when there are changes that affect
11097          * bandwidth. Anything that affects bandwidth needs to go through
11098          * DC global validation to ensure that the configuration can be applied
11099          * to hardware.
11100          *
11101          * We have to currently stall out here in atomic_check for outstanding
11102          * commits to finish in this case because our IRQ handlers reference
11103          * DRM state directly - we can end up disabling interrupts too early
11104          * if we don't.
11105          *
11106          * TODO: Remove this stall and drop DM state private objects.
11107          */
11108         if (lock_and_validation_needed) {
11109                 ret = dm_atomic_get_state(state, &dm_state);
11110                 if (ret) {
11111                         DRM_DEBUG_DRIVER("dm_atomic_get_state() failed\n");
11112                         goto fail;
11113                 }
11114
11115                 ret = do_aquire_global_lock(dev, state);
11116                 if (ret) {
11117                         DRM_DEBUG_DRIVER("do_aquire_global_lock() failed\n");
11118                         goto fail;
11119                 }
11120
11121 #if defined(CONFIG_DRM_AMD_DC_DCN)
11122                 if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars)) {
11123                         DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
11124                         goto fail;
11125                 }
11126
11127                 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
11128                 if (ret) {
11129                         DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n");
11130                         goto fail;
11131                 }
11132 #endif
11133
11134                 /*
11135                  * Perform validation of MST topology in the state:
11136                  * We need to perform MST atomic check before calling
11137                  * dc_validate_global_state(), or there is a chance
11138                  * to get stuck in an infinite loop and hang eventually.
11139                  */
11140                 ret = drm_dp_mst_atomic_check(state);
11141                 if (ret) {
11142                         DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n");
11143                         goto fail;
11144                 }
11145                 status = dc_validate_global_state(dc, dm_state->context, true);
11146                 if (status != DC_OK) {
11147                         DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)",
11148                                        dc_status_to_str(status), status);
11149                         ret = -EINVAL;
11150                         goto fail;
11151                 }
11152         } else {
11153                 /*
11154                  * The commit is a fast update. Fast updates shouldn't change
11155                  * the DC context, affect global validation, and can have their
11156                  * commit work done in parallel with other commits not touching
11157                  * the same resource. If we have a new DC context as part of
11158                  * the DM atomic state from validation we need to free it and
11159                  * retain the existing one instead.
11160                  *
11161                  * Furthermore, since the DM atomic state only contains the DC
11162                  * context and can safely be annulled, we can free the state
11163                  * and clear the associated private object now to free
11164                  * some memory and avoid a possible use-after-free later.
11165                  */
11166
11167                 for (i = 0; i < state->num_private_objs; i++) {
11168                         struct drm_private_obj *obj = state->private_objs[i].ptr;
11169
11170                         if (obj->funcs == adev->dm.atomic_obj.funcs) {
11171                                 int j = state->num_private_objs-1;
11172
11173                                 dm_atomic_destroy_state(obj,
11174                                                 state->private_objs[i].state);
11175
11176                                 /* If i is not at the end of the array then the
11177                                  * last element needs to be moved to where i was
11178                                  * before the array can safely be truncated.
11179                                  */
11180                                 if (i != j)
11181                                         state->private_objs[i] =
11182                                                 state->private_objs[j];
11183
11184                                 state->private_objs[j].ptr = NULL;
11185                                 state->private_objs[j].state = NULL;
11186                                 state->private_objs[j].old_state = NULL;
11187                                 state->private_objs[j].new_state = NULL;
11188
11189                                 state->num_private_objs = j;
11190                                 break;
11191                         }
11192                 }
11193         }
11194
11195         /* Store the overall update type for use later in atomic check. */
11196         for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
11197                 struct dm_crtc_state *dm_new_crtc_state =
11198                         to_dm_crtc_state(new_crtc_state);
11199
11200                 dm_new_crtc_state->update_type = lock_and_validation_needed ?
11201                                                          UPDATE_TYPE_FULL :
11202                                                          UPDATE_TYPE_FAST;
11203         }
11204
11205         /* Must be success */
11206         WARN_ON(ret);
11207
11208         trace_amdgpu_dm_atomic_check_finish(state, ret);
11209
11210         return ret;
11211
11212 fail:
11213         if (ret == -EDEADLK)
11214                 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
11215         else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
11216                 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
11217         else
11218                 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
11219
11220         trace_amdgpu_dm_atomic_check_finish(state, ret);
11221
11222         return ret;
11223 }
11224
11225 static bool is_dp_capable_without_timing_msa(struct dc *dc,
11226                                              struct amdgpu_dm_connector *amdgpu_dm_connector)
11227 {
11228         uint8_t dpcd_data;
11229         bool capable = false;
11230
11231         if (amdgpu_dm_connector->dc_link &&
11232                 dm_helpers_dp_read_dpcd(
11233                                 NULL,
11234                                 amdgpu_dm_connector->dc_link,
11235                                 DP_DOWN_STREAM_PORT_COUNT,
11236                                 &dpcd_data,
11237                                 sizeof(dpcd_data))) {
11238                 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
11239         }
11240
11241         return capable;
11242 }
11243
11244 static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
11245                 unsigned int offset,
11246                 unsigned int total_length,
11247                 uint8_t *data,
11248                 unsigned int length,
11249                 struct amdgpu_hdmi_vsdb_info *vsdb)
11250 {
11251         bool res;
11252         union dmub_rb_cmd cmd;
11253         struct dmub_cmd_send_edid_cea *input;
11254         struct dmub_cmd_edid_cea_output *output;
11255
11256         if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
11257                 return false;
11258
11259         memset(&cmd, 0, sizeof(cmd));
11260
11261         input = &cmd.edid_cea.data.input;
11262
11263         cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
11264         cmd.edid_cea.header.sub_type = 0;
11265         cmd.edid_cea.header.payload_bytes =
11266                 sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
11267         input->offset = offset;
11268         input->length = length;
11269         input->total_length = total_length;
11270         memcpy(input->payload, data, length);
11271
11272         res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
11273         if (!res) {
11274                 DRM_ERROR("EDID CEA parser failed\n");
11275                 return false;
11276         }
11277
11278         output = &cmd.edid_cea.data.output;
11279
11280         if (output->type == DMUB_CMD__EDID_CEA_ACK) {
11281                 if (!output->ack.success) {
11282                         DRM_ERROR("EDID CEA ack failed at offset %d\n",
11283                                         output->ack.offset);
11284                 }
11285         } else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
11286                 if (!output->amd_vsdb.vsdb_found)
11287                         return false;
11288
11289                 vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
11290                 vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
11291                 vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
11292                 vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
11293         } else {
11294                 DRM_WARN("Unknown EDID CEA parser results\n");
11295                 return false;
11296         }
11297
11298         return true;
11299 }
11300
11301 static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
11302                 uint8_t *edid_ext, int len,
11303                 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11304 {
11305         int i;
11306
11307         /* send extension block to DMCU for parsing */
11308         for (i = 0; i < len; i += 8) {
11309                 bool res;
11310                 int offset;
11311
11312                 /* send 8 bytes a time */
11313                 if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
11314                         return false;
11315
11316                 if (i+8 == len) {
11317                         /* EDID block sent completed, expect result */
11318                         int version, min_rate, max_rate;
11319
11320                         res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
11321                         if (res) {
11322                                 /* amd vsdb found */
11323                                 vsdb_info->freesync_supported = 1;
11324                                 vsdb_info->amd_vsdb_version = version;
11325                                 vsdb_info->min_refresh_rate_hz = min_rate;
11326                                 vsdb_info->max_refresh_rate_hz = max_rate;
11327                                 return true;
11328                         }
11329                         /* not amd vsdb */
11330                         return false;
11331                 }
11332
11333                 /* check for ack*/
11334                 res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
11335                 if (!res)
11336                         return false;
11337         }
11338
11339         return false;
11340 }
11341
11342 static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
11343                 uint8_t *edid_ext, int len,
11344                 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11345 {
11346         int i;
11347
11348         /* send extension block to DMCU for parsing */
11349         for (i = 0; i < len; i += 8) {
11350                 /* send 8 bytes a time */
11351                 if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
11352                         return false;
11353         }
11354
11355         return vsdb_info->freesync_supported;
11356 }
11357
11358 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
11359                 uint8_t *edid_ext, int len,
11360                 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11361 {
11362         struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
11363
11364         if (adev->dm.dmub_srv)
11365                 return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
11366         else
11367                 return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
11368 }
11369
11370 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
11371                 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
11372 {
11373         uint8_t *edid_ext = NULL;
11374         int i;
11375         bool valid_vsdb_found = false;
11376
11377         /*----- drm_find_cea_extension() -----*/
11378         /* No EDID or EDID extensions */
11379         if (edid == NULL || edid->extensions == 0)
11380                 return -ENODEV;
11381
11382         /* Find CEA extension */
11383         for (i = 0; i < edid->extensions; i++) {
11384                 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
11385                 if (edid_ext[0] == CEA_EXT)
11386                         break;
11387         }
11388
11389         if (i == edid->extensions)
11390                 return -ENODEV;
11391
11392         /*----- cea_db_offsets() -----*/
11393         if (edid_ext[0] != CEA_EXT)
11394                 return -ENODEV;
11395
11396         valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
11397
11398         return valid_vsdb_found ? i : -ENODEV;
11399 }
11400
11401 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
11402                                         struct edid *edid)
11403 {
11404         int i = 0;
11405         struct detailed_timing *timing;
11406         struct detailed_non_pixel *data;
11407         struct detailed_data_monitor_range *range;
11408         struct amdgpu_dm_connector *amdgpu_dm_connector =
11409                         to_amdgpu_dm_connector(connector);
11410         struct dm_connector_state *dm_con_state = NULL;
11411         struct dc_sink *sink;
11412
11413         struct drm_device *dev = connector->dev;
11414         struct amdgpu_device *adev = drm_to_adev(dev);
11415         bool freesync_capable = false;
11416         struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
11417
11418         if (!connector->state) {
11419                 DRM_ERROR("%s - Connector has no state", __func__);
11420                 goto update;
11421         }
11422
11423         sink = amdgpu_dm_connector->dc_sink ?
11424                 amdgpu_dm_connector->dc_sink :
11425                 amdgpu_dm_connector->dc_em_sink;
11426
11427         if (!edid || !sink) {
11428                 dm_con_state = to_dm_connector_state(connector->state);
11429
11430                 amdgpu_dm_connector->min_vfreq = 0;
11431                 amdgpu_dm_connector->max_vfreq = 0;
11432                 amdgpu_dm_connector->pixel_clock_mhz = 0;
11433                 connector->display_info.monitor_range.min_vfreq = 0;
11434                 connector->display_info.monitor_range.max_vfreq = 0;
11435                 freesync_capable = false;
11436
11437                 goto update;
11438         }
11439
11440         dm_con_state = to_dm_connector_state(connector->state);
11441
11442         if (!adev->dm.freesync_module)
11443                 goto update;
11444
11445
11446         if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
11447                 || sink->sink_signal == SIGNAL_TYPE_EDP) {
11448                 bool edid_check_required = false;
11449
11450                 if (edid) {
11451                         edid_check_required = is_dp_capable_without_timing_msa(
11452                                                 adev->dm.dc,
11453                                                 amdgpu_dm_connector);
11454                 }
11455
11456                 if (edid_check_required == true && (edid->version > 1 ||
11457                    (edid->version == 1 && edid->revision > 1))) {
11458                         for (i = 0; i < 4; i++) {
11459
11460                                 timing  = &edid->detailed_timings[i];
11461                                 data    = &timing->data.other_data;
11462                                 range   = &data->data.range;
11463                                 /*
11464                                  * Check if monitor has continuous frequency mode
11465                                  */
11466                                 if (data->type != EDID_DETAIL_MONITOR_RANGE)
11467                                         continue;
11468                                 /*
11469                                  * Check for flag range limits only. If flag == 1 then
11470                                  * no additional timing information provided.
11471                                  * Default GTF, GTF Secondary curve and CVT are not
11472                                  * supported
11473                                  */
11474                                 if (range->flags != 1)
11475                                         continue;
11476
11477                                 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
11478                                 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
11479                                 amdgpu_dm_connector->pixel_clock_mhz =
11480                                         range->pixel_clock_mhz * 10;
11481
11482                                 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
11483                                 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
11484
11485                                 break;
11486                         }
11487
11488                         if (amdgpu_dm_connector->max_vfreq -
11489                             amdgpu_dm_connector->min_vfreq > 10) {
11490
11491                                 freesync_capable = true;
11492                         }
11493                 }
11494         } else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
11495                 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
11496                 if (i >= 0 && vsdb_info.freesync_supported) {
11497                         timing  = &edid->detailed_timings[i];
11498                         data    = &timing->data.other_data;
11499
11500                         amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
11501                         amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
11502                         if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
11503                                 freesync_capable = true;
11504
11505                         connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
11506                         connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
11507                 }
11508         }
11509
11510 update:
11511         if (dm_con_state)
11512                 dm_con_state->freesync_capable = freesync_capable;
11513
11514         if (connector->vrr_capable_property)
11515                 drm_connector_set_vrr_capable_property(connector,
11516                                                        freesync_capable);
11517 }
11518
11519 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
11520 {
11521         struct amdgpu_device *adev = drm_to_adev(dev);
11522         struct dc *dc = adev->dm.dc;
11523         int i;
11524
11525         mutex_lock(&adev->dm.dc_lock);
11526         if (dc->current_state) {
11527                 for (i = 0; i < dc->current_state->stream_count; ++i)
11528                         dc->current_state->streams[i]
11529                                 ->triggered_crtc_reset.enabled =
11530                                 adev->dm.force_timing_sync;
11531
11532                 dm_enable_per_frame_crtc_master_sync(dc->current_state);
11533                 dc_trigger_sync(dc, dc->current_state);
11534         }
11535         mutex_unlock(&adev->dm.dc_lock);
11536 }
11537
11538 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
11539                        uint32_t value, const char *func_name)
11540 {
11541 #ifdef DM_CHECK_ADDR_0
11542         if (address == 0) {
11543                 DC_ERR("invalid register write. address = 0");
11544                 return;
11545         }
11546 #endif
11547         cgs_write_register(ctx->cgs_device, address, value);
11548         trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
11549 }
11550
11551 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
11552                           const char *func_name)
11553 {
11554         uint32_t value;
11555 #ifdef DM_CHECK_ADDR_0
11556         if (address == 0) {
11557                 DC_ERR("invalid register read; address = 0\n");
11558                 return 0;
11559         }
11560 #endif
11561
11562         if (ctx->dmub_srv &&
11563             ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
11564             !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
11565                 ASSERT(false);
11566                 return 0;
11567         }
11568
11569         value = cgs_read_register(ctx->cgs_device, address);
11570
11571         trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
11572
11573         return value;
11574 }
11575
11576 int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux, struct dc_context *ctx,
11577         uint8_t status_type, uint32_t *operation_result)
11578 {
11579         struct amdgpu_device *adev = ctx->driver_context;
11580         int return_status = -1;
11581         struct dmub_notification *p_notify = adev->dm.dmub_notify;
11582
11583         if (is_cmd_aux) {
11584                 if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11585                         return_status = p_notify->aux_reply.length;
11586                         *operation_result = p_notify->result;
11587                 } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT) {
11588                         *operation_result = AUX_RET_ERROR_TIMEOUT;
11589                 } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_FAIL) {
11590                         *operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
11591                 } else {
11592                         *operation_result = AUX_RET_ERROR_UNKNOWN;
11593                 }
11594         } else {
11595                 if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11596                         return_status = 0;
11597                         *operation_result = p_notify->sc_status;
11598                 } else {
11599                         *operation_result = SET_CONFIG_UNKNOWN_ERROR;
11600                 }
11601         }
11602
11603         return return_status;
11604 }
11605
11606 int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context *ctx,
11607         unsigned int link_index, void *cmd_payload, void *operation_result)
11608 {
11609         struct amdgpu_device *adev = ctx->driver_context;
11610         int ret = 0;
11611
11612         if (is_cmd_aux) {
11613                 dc_process_dmub_aux_transfer_async(ctx->dc,
11614                         link_index, (struct aux_payload *)cmd_payload);
11615         } else if (dc_process_dmub_set_config_async(ctx->dc, link_index,
11616                                         (struct set_config_cmd_payload *)cmd_payload,
11617                                         adev->dm.dmub_notify)) {
11618                 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11619                                         ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11620                                         (uint32_t *)operation_result);
11621         }
11622
11623         ret = wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ);
11624         if (ret == 0) {
11625                 DRM_ERROR("wait_for_completion_timeout timeout!");
11626                 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11627                                 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT,
11628                                 (uint32_t *)operation_result);
11629         }
11630
11631         if (is_cmd_aux) {
11632                 if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
11633                         struct aux_payload *payload = (struct aux_payload *)cmd_payload;
11634
11635                         payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
11636                         if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
11637                             payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK) {
11638                                 memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
11639                                        adev->dm.dmub_notify->aux_reply.length);
11640                         }
11641                 }
11642         }
11643
11644         return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11645                         ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11646                         (uint32_t *)operation_result);
11647 }
11648
11649 /*
11650  * Check whether seamless boot is supported.
11651  *
11652  * So far we only support seamless boot on CHIP_VANGOGH.
11653  * If everything goes well, we may consider expanding
11654  * seamless boot to other ASICs.
11655  */
11656 bool check_seamless_boot_capability(struct amdgpu_device *adev)
11657 {
11658         switch (adev->asic_type) {
11659         case CHIP_VANGOGH:
11660                 if (!adev->mman.keep_stolen_vga_memory)
11661                         return true;
11662                 break;
11663         default:
11664                 break;
11665         }
11666
11667         return false;
11668 }