OSDN Git Service

Merge tag 'armsoc-soc' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc
[tomoyo/tomoyo-test1.git] / drivers / gpu / drm / amd / display / dc / core / dc.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  */
24
25 #include "dm_services.h"
26
27 #include "dc.h"
28
29 #include "core_status.h"
30 #include "core_types.h"
31 #include "hw_sequencer.h"
32 #include "dce/dce_hwseq.h"
33
34 #include "resource.h"
35
36 #include "clock_source.h"
37 #include "dc_bios_types.h"
38
39 #include "bios_parser_interface.h"
40 #include "include/irq_service_interface.h"
41 #include "transform.h"
42 #include "dmcu.h"
43 #include "dpp.h"
44 #include "timing_generator.h"
45 #include "abm.h"
46 #include "virtual/virtual_link_encoder.h"
47
48 #include "link_hwss.h"
49 #include "link_encoder.h"
50
51 #include "dc_link_ddc.h"
52 #include "dm_helpers.h"
53 #include "mem_input.h"
54 #include "hubp.h"
55
56 #include "dc_link_dp.h"
57
58 #include "dce/dce_i2c.h"
59
60 #define DC_LOGGER \
61         dc->ctx->logger
62
63 const static char DC_BUILD_ID[] = "production-build";
64
65 /**
66  * DOC: Overview
67  *
68  * DC is the OS-agnostic component of the amdgpu DC driver.
69  *
70  * DC maintains and validates a set of structs representing the state of the
71  * driver and writes that state to AMD hardware
72  *
73  * Main DC HW structs:
74  *
75  * struct dc - The central struct.  One per driver.  Created on driver load,
76  * destroyed on driver unload.
77  *
78  * struct dc_context - One per driver.
79  * Used as a backpointer by most other structs in dc.
80  *
81  * struct dc_link - One per connector (the physical DP, HDMI, miniDP, or eDP
82  * plugpoints).  Created on driver load, destroyed on driver unload.
83  *
84  * struct dc_sink - One per display.  Created on boot or hotplug.
85  * Destroyed on shutdown or hotunplug.  A dc_link can have a local sink
86  * (the display directly attached).  It may also have one or more remote
87  * sinks (in the Multi-Stream Transport case)
88  *
89  * struct resource_pool - One per driver.  Represents the hw blocks not in the
90  * main pipeline.  Not directly accessible by dm.
91  *
92  * Main dc state structs:
93  *
94  * These structs can be created and destroyed as needed.  There is a full set of
95  * these structs in dc->current_state representing the currently programmed state.
96  *
97  * struct dc_state - The global DC state to track global state information,
98  * such as bandwidth values.
99  *
100  * struct dc_stream_state - Represents the hw configuration for the pipeline from
101  * a framebuffer to a display.  Maps one-to-one with dc_sink.
102  *
103  * struct dc_plane_state - Represents a framebuffer.  Each stream has at least one,
104  * and may have more in the Multi-Plane Overlay case.
105  *
106  * struct resource_context - Represents the programmable state of everything in
107  * the resource_pool.  Not directly accessible by dm.
108  *
109  * struct pipe_ctx - A member of struct resource_context.  Represents the
110  * internal hardware pipeline components.  Each dc_plane_state has either
111  * one or two (in the pipe-split case).
112  */
113
114 /*******************************************************************************
115  * Private functions
116  ******************************************************************************/
117
118 static inline void elevate_update_type(enum surface_update_type *original, enum surface_update_type new)
119 {
120         if (new > *original)
121                 *original = new;
122 }
123
124 static void destroy_links(struct dc *dc)
125 {
126         uint32_t i;
127
128         for (i = 0; i < dc->link_count; i++) {
129                 if (NULL != dc->links[i])
130                         link_destroy(&dc->links[i]);
131         }
132 }
133
134 static bool create_links(
135                 struct dc *dc,
136                 uint32_t num_virtual_links)
137 {
138         int i;
139         int connectors_num;
140         struct dc_bios *bios = dc->ctx->dc_bios;
141
142         dc->link_count = 0;
143
144         connectors_num = bios->funcs->get_connectors_number(bios);
145
146         if (connectors_num > ENUM_ID_COUNT) {
147                 dm_error(
148                         "DC: Number of connectors %d exceeds maximum of %d!\n",
149                         connectors_num,
150                         ENUM_ID_COUNT);
151                 return false;
152         }
153
154         if (connectors_num == 0 && num_virtual_links == 0) {
155                 dm_error("DC: Number of connectors is zero!\n");
156         }
157
158         dm_output_to_console(
159                 "DC: %s: connectors_num: physical:%d, virtual:%d\n",
160                 __func__,
161                 connectors_num,
162                 num_virtual_links);
163
164         for (i = 0; i < connectors_num; i++) {
165                 struct link_init_data link_init_params = {0};
166                 struct dc_link *link;
167
168                 link_init_params.ctx = dc->ctx;
169                 /* next BIOS object table connector */
170                 link_init_params.connector_index = i;
171                 link_init_params.link_index = dc->link_count;
172                 link_init_params.dc = dc;
173                 link = link_create(&link_init_params);
174
175                 if (link) {
176                         dc->links[dc->link_count] = link;
177                         link->dc = dc;
178                         ++dc->link_count;
179                 }
180         }
181
182         for (i = 0; i < num_virtual_links; i++) {
183                 struct dc_link *link = kzalloc(sizeof(*link), GFP_KERNEL);
184                 struct encoder_init_data enc_init = {0};
185
186                 if (link == NULL) {
187                         BREAK_TO_DEBUGGER();
188                         goto failed_alloc;
189                 }
190
191                 link->link_index = dc->link_count;
192                 dc->links[dc->link_count] = link;
193                 dc->link_count++;
194
195                 link->ctx = dc->ctx;
196                 link->dc = dc;
197                 link->connector_signal = SIGNAL_TYPE_VIRTUAL;
198                 link->link_id.type = OBJECT_TYPE_CONNECTOR;
199                 link->link_id.id = CONNECTOR_ID_VIRTUAL;
200                 link->link_id.enum_id = ENUM_ID_1;
201                 link->link_enc = kzalloc(sizeof(*link->link_enc), GFP_KERNEL);
202
203                 if (!link->link_enc) {
204                         BREAK_TO_DEBUGGER();
205                         goto failed_alloc;
206                 }
207
208                 link->link_status.dpcd_caps = &link->dpcd_caps;
209
210                 enc_init.ctx = dc->ctx;
211                 enc_init.channel = CHANNEL_ID_UNKNOWN;
212                 enc_init.hpd_source = HPD_SOURCEID_UNKNOWN;
213                 enc_init.transmitter = TRANSMITTER_UNKNOWN;
214                 enc_init.connector = link->link_id;
215                 enc_init.encoder.type = OBJECT_TYPE_ENCODER;
216                 enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL;
217                 enc_init.encoder.enum_id = ENUM_ID_1;
218                 virtual_link_encoder_construct(link->link_enc, &enc_init);
219         }
220
221         return true;
222
223 failed_alloc:
224         return false;
225 }
226
227 static struct dc_perf_trace *dc_perf_trace_create(void)
228 {
229         return kzalloc(sizeof(struct dc_perf_trace), GFP_KERNEL);
230 }
231
232 static void dc_perf_trace_destroy(struct dc_perf_trace **perf_trace)
233 {
234         kfree(*perf_trace);
235         *perf_trace = NULL;
236 }
237
238 /**
239  *****************************************************************************
240  *  Function: dc_stream_adjust_vmin_vmax
241  *
242  *  @brief
243  *     Looks up the pipe context of dc_stream_state and updates the
244  *     vertical_total_min and vertical_total_max of the DRR, Dynamic Refresh
245  *     Rate, which is a power-saving feature that targets reducing panel
246  *     refresh rate while the screen is static
247  *
248  *  @param [in] dc: dc reference
249  *  @param [in] stream: Initial dc stream state
250  *  @param [in] adjust: Updated parameters for vertical_total_min and
251  *  vertical_total_max
252  *****************************************************************************
253  */
254 bool dc_stream_adjust_vmin_vmax(struct dc *dc,
255                 struct dc_stream_state *stream,
256                 struct dc_crtc_timing_adjust *adjust)
257 {
258         int i = 0;
259         bool ret = false;
260
261         for (i = 0; i < MAX_PIPES; i++) {
262                 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
263
264                 if (pipe->stream == stream && pipe->stream_res.stream_enc) {
265                         pipe->stream->adjust = *adjust;
266                         dc->hwss.set_drr(&pipe,
267                                         1,
268                                         adjust->v_total_min,
269                                         adjust->v_total_max);
270
271                         ret = true;
272                 }
273         }
274         return ret;
275 }
276
277 bool dc_stream_get_crtc_position(struct dc *dc,
278                 struct dc_stream_state **streams, int num_streams,
279                 unsigned int *v_pos, unsigned int *nom_v_pos)
280 {
281         /* TODO: Support multiple streams */
282         const struct dc_stream_state *stream = streams[0];
283         int i = 0;
284         bool ret = false;
285         struct crtc_position position;
286
287         for (i = 0; i < MAX_PIPES; i++) {
288                 struct pipe_ctx *pipe =
289                                 &dc->current_state->res_ctx.pipe_ctx[i];
290
291                 if (pipe->stream == stream && pipe->stream_res.stream_enc) {
292                         dc->hwss.get_position(&pipe, 1, &position);
293
294                         *v_pos = position.vertical_count;
295                         *nom_v_pos = position.nominal_vcount;
296                         ret = true;
297                 }
298         }
299         return ret;
300 }
301
302 /**
303  * dc_stream_configure_crc() - Configure CRC capture for the given stream.
304  * @dc: DC Object
305  * @stream: The stream to configure CRC on.
306  * @enable: Enable CRC if true, disable otherwise.
307  * @continuous: Capture CRC on every frame if true. Otherwise, only capture
308  *              once.
309  *
310  * By default, only CRC0 is configured, and the entire frame is used to
311  * calculate the crc.
312  */
313 bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
314                              bool enable, bool continuous)
315 {
316         int i;
317         struct pipe_ctx *pipe;
318         struct crc_params param;
319         struct timing_generator *tg;
320
321         for (i = 0; i < MAX_PIPES; i++) {
322                 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
323                 if (pipe->stream == stream)
324                         break;
325         }
326         /* Stream not found */
327         if (i == MAX_PIPES)
328                 return false;
329
330         /* Always capture the full frame */
331         param.windowa_x_start = 0;
332         param.windowa_y_start = 0;
333         param.windowa_x_end = pipe->stream->timing.h_addressable;
334         param.windowa_y_end = pipe->stream->timing.v_addressable;
335         param.windowb_x_start = 0;
336         param.windowb_y_start = 0;
337         param.windowb_x_end = pipe->stream->timing.h_addressable;
338         param.windowb_y_end = pipe->stream->timing.v_addressable;
339
340         /* Default to the union of both windows */
341         param.selection = UNION_WINDOW_A_B;
342         param.continuous_mode = continuous;
343         param.enable = enable;
344
345         tg = pipe->stream_res.tg;
346
347         /* Only call if supported */
348         if (tg->funcs->configure_crc)
349                 return tg->funcs->configure_crc(tg, &param);
350         DC_LOG_WARNING("CRC capture not supported.");
351         return false;
352 }
353
354 /**
355  * dc_stream_get_crc() - Get CRC values for the given stream.
356  * @dc: DC object
357  * @stream: The DC stream state of the stream to get CRCs from.
358  * @r_cr, g_y, b_cb: CRC values for the three channels are stored here.
359  *
360  * dc_stream_configure_crc needs to be called beforehand to enable CRCs.
361  * Return false if stream is not found, or if CRCs are not enabled.
362  */
363 bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream,
364                        uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb)
365 {
366         int i;
367         struct pipe_ctx *pipe;
368         struct timing_generator *tg;
369
370         for (i = 0; i < MAX_PIPES; i++) {
371                 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
372                 if (pipe->stream == stream)
373                         break;
374         }
375         /* Stream not found */
376         if (i == MAX_PIPES)
377                 return false;
378
379         tg = pipe->stream_res.tg;
380
381         if (tg->funcs->get_crc)
382                 return tg->funcs->get_crc(tg, r_cr, g_y, b_cb);
383         DC_LOG_WARNING("CRC capture not supported.");
384         return false;
385 }
386
387 void dc_stream_set_dither_option(struct dc_stream_state *stream,
388                 enum dc_dither_option option)
389 {
390         struct bit_depth_reduction_params params;
391         struct dc_link *link = stream->sink->link;
392         struct pipe_ctx *pipes = NULL;
393         int i;
394
395         for (i = 0; i < MAX_PIPES; i++) {
396                 if (link->dc->current_state->res_ctx.pipe_ctx[i].stream ==
397                                 stream) {
398                         pipes = &link->dc->current_state->res_ctx.pipe_ctx[i];
399                         break;
400                 }
401         }
402
403         if (!pipes)
404                 return;
405         if (option > DITHER_OPTION_MAX)
406                 return;
407
408         stream->dither_option = option;
409
410         memset(&params, 0, sizeof(params));
411         resource_build_bit_depth_reduction_params(stream, &params);
412         stream->bit_depth_params = params;
413
414         if (pipes->plane_res.xfm &&
415             pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth) {
416                 pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth(
417                         pipes->plane_res.xfm,
418                         pipes->plane_res.scl_data.lb_params.depth,
419                         &stream->bit_depth_params);
420         }
421
422         pipes->stream_res.opp->funcs->
423                 opp_program_bit_depth_reduction(pipes->stream_res.opp, &params);
424 }
425
426 bool dc_stream_set_gamut_remap(struct dc *dc, const struct dc_stream_state *stream)
427 {
428         int i = 0;
429         bool ret = false;
430         struct pipe_ctx *pipes;
431
432         for (i = 0; i < MAX_PIPES; i++) {
433                 if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) {
434                         pipes = &dc->current_state->res_ctx.pipe_ctx[i];
435                         dc->hwss.program_gamut_remap(pipes);
436                         ret = true;
437                 }
438         }
439
440         return ret;
441 }
442
443 bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream)
444 {
445         int i = 0;
446         bool ret = false;
447         struct pipe_ctx *pipes;
448
449         for (i = 0; i < MAX_PIPES; i++) {
450                 if (dc->current_state->res_ctx.pipe_ctx[i].stream
451                                 == stream) {
452
453                         pipes = &dc->current_state->res_ctx.pipe_ctx[i];
454                         dc->hwss.program_output_csc(dc,
455                                         pipes,
456                                         stream->output_color_space,
457                                         stream->csc_color_matrix.matrix,
458                                         pipes->plane_res.hubp->opp_id);
459                         ret = true;
460                 }
461         }
462
463         return ret;
464 }
465
466 void dc_stream_set_static_screen_events(struct dc *dc,
467                 struct dc_stream_state **streams,
468                 int num_streams,
469                 const struct dc_static_screen_events *events)
470 {
471         int i = 0;
472         int j = 0;
473         struct pipe_ctx *pipes_affected[MAX_PIPES];
474         int num_pipes_affected = 0;
475
476         for (i = 0; i < num_streams; i++) {
477                 struct dc_stream_state *stream = streams[i];
478
479                 for (j = 0; j < MAX_PIPES; j++) {
480                         if (dc->current_state->res_ctx.pipe_ctx[j].stream
481                                         == stream) {
482                                 pipes_affected[num_pipes_affected++] =
483                                                 &dc->current_state->res_ctx.pipe_ctx[j];
484                         }
485                 }
486         }
487
488         dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, events);
489 }
490
491 void dc_link_set_drive_settings(struct dc *dc,
492                                 struct link_training_settings *lt_settings,
493                                 const struct dc_link *link)
494 {
495
496         int i;
497
498         for (i = 0; i < dc->link_count; i++) {
499                 if (dc->links[i] == link)
500                         break;
501         }
502
503         if (i >= dc->link_count)
504                 ASSERT_CRITICAL(false);
505
506         dc_link_dp_set_drive_settings(dc->links[i], lt_settings);
507 }
508
509 void dc_link_perform_link_training(struct dc *dc,
510                                    struct dc_link_settings *link_setting,
511                                    bool skip_video_pattern)
512 {
513         int i;
514
515         for (i = 0; i < dc->link_count; i++)
516                 dc_link_dp_perform_link_training(
517                         dc->links[i],
518                         link_setting,
519                         skip_video_pattern);
520 }
521
522 void dc_link_set_preferred_link_settings(struct dc *dc,
523                                          struct dc_link_settings *link_setting,
524                                          struct dc_link *link)
525 {
526         int i;
527         struct pipe_ctx *pipe;
528         struct dc_stream_state *link_stream;
529         struct dc_link_settings store_settings = *link_setting;
530
531         for (i = 0; i < MAX_PIPES; i++) {
532                 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
533                 if (pipe->stream && pipe->stream->sink
534                         && pipe->stream->sink->link) {
535                         if (pipe->stream->sink->link == link)
536                                 break;
537                 }
538         }
539
540         /* Stream not found */
541         if (i == MAX_PIPES)
542                 return;
543
544         link_stream = link->dc->current_state->res_ctx.pipe_ctx[i].stream;
545
546         link->preferred_link_setting = store_settings;
547         if (link_stream)
548                 decide_link_settings(link_stream, &store_settings);
549
550         if ((store_settings.lane_count != LANE_COUNT_UNKNOWN) &&
551                 (store_settings.link_rate != LINK_RATE_UNKNOWN))
552                 dp_retrain_link_dp_test(link, &store_settings, false);
553 }
554
555 void dc_link_enable_hpd(const struct dc_link *link)
556 {
557         dc_link_dp_enable_hpd(link);
558 }
559
560 void dc_link_disable_hpd(const struct dc_link *link)
561 {
562         dc_link_dp_disable_hpd(link);
563 }
564
565
566 void dc_link_set_test_pattern(struct dc_link *link,
567                               enum dp_test_pattern test_pattern,
568                               const struct link_training_settings *p_link_settings,
569                               const unsigned char *p_custom_pattern,
570                               unsigned int cust_pattern_size)
571 {
572         if (link != NULL)
573                 dc_link_dp_set_test_pattern(
574                         link,
575                         test_pattern,
576                         p_link_settings,
577                         p_custom_pattern,
578                         cust_pattern_size);
579 }
580
581 static void destruct(struct dc *dc)
582 {
583         dc_release_state(dc->current_state);
584         dc->current_state = NULL;
585
586         destroy_links(dc);
587
588         dc_destroy_resource_pool(dc);
589
590         if (dc->ctx->gpio_service)
591                 dal_gpio_service_destroy(&dc->ctx->gpio_service);
592
593         if (dc->ctx->i2caux)
594                 dal_i2caux_destroy(&dc->ctx->i2caux);
595
596         if (dc->ctx->created_bios)
597                 dal_bios_parser_destroy(&dc->ctx->dc_bios);
598
599         dc_perf_trace_destroy(&dc->ctx->perf_trace);
600
601         kfree(dc->ctx);
602         dc->ctx = NULL;
603
604         kfree(dc->bw_vbios);
605         dc->bw_vbios = NULL;
606
607         kfree(dc->bw_dceip);
608         dc->bw_dceip = NULL;
609
610 #ifdef CONFIG_DRM_AMD_DC_DCN1_0
611         kfree(dc->dcn_soc);
612         dc->dcn_soc = NULL;
613
614         kfree(dc->dcn_ip);
615         dc->dcn_ip = NULL;
616
617 #endif
618 }
619
620 static bool construct(struct dc *dc,
621                 const struct dc_init_data *init_params)
622 {
623         struct dc_context *dc_ctx;
624         struct bw_calcs_dceip *dc_dceip;
625         struct bw_calcs_vbios *dc_vbios;
626 #ifdef CONFIG_DRM_AMD_DC_DCN1_0
627         struct dcn_soc_bounding_box *dcn_soc;
628         struct dcn_ip_params *dcn_ip;
629 #endif
630
631         enum dce_version dc_version = DCE_VERSION_UNKNOWN;
632
633         dc_dceip = kzalloc(sizeof(*dc_dceip), GFP_KERNEL);
634         if (!dc_dceip) {
635                 dm_error("%s: failed to create dceip\n", __func__);
636                 goto fail;
637         }
638
639         dc->bw_dceip = dc_dceip;
640
641         dc_vbios = kzalloc(sizeof(*dc_vbios), GFP_KERNEL);
642         if (!dc_vbios) {
643                 dm_error("%s: failed to create vbios\n", __func__);
644                 goto fail;
645         }
646
647         dc->bw_vbios = dc_vbios;
648 #ifdef CONFIG_DRM_AMD_DC_DCN1_0
649         dcn_soc = kzalloc(sizeof(*dcn_soc), GFP_KERNEL);
650         if (!dcn_soc) {
651                 dm_error("%s: failed to create dcn_soc\n", __func__);
652                 goto fail;
653         }
654
655         dc->dcn_soc = dcn_soc;
656
657         dcn_ip = kzalloc(sizeof(*dcn_ip), GFP_KERNEL);
658         if (!dcn_ip) {
659                 dm_error("%s: failed to create dcn_ip\n", __func__);
660                 goto fail;
661         }
662
663         dc->dcn_ip = dcn_ip;
664 #endif
665
666         dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL);
667         if (!dc_ctx) {
668                 dm_error("%s: failed to create ctx\n", __func__);
669                 goto fail;
670         }
671
672         dc_ctx->cgs_device = init_params->cgs_device;
673         dc_ctx->driver_context = init_params->driver;
674         dc_ctx->dc = dc;
675         dc_ctx->asic_id = init_params->asic_id;
676         dc_ctx->dc_sink_id_count = 0;
677         dc->ctx = dc_ctx;
678
679         dc->current_state = dc_create_state();
680
681         if (!dc->current_state) {
682                 dm_error("%s: failed to create validate ctx\n", __func__);
683                 goto fail;
684         }
685
686         /* Create logger */
687
688         dc_ctx->dce_environment = init_params->dce_environment;
689
690         dc_version = resource_parse_asic_id(init_params->asic_id);
691         dc_ctx->dce_version = dc_version;
692
693         /* Resource should construct all asic specific resources.
694          * This should be the only place where we need to parse the asic id
695          */
696         if (init_params->vbios_override)
697                 dc_ctx->dc_bios = init_params->vbios_override;
698         else {
699                 /* Create BIOS parser */
700                 struct bp_init_data bp_init_data;
701
702                 bp_init_data.ctx = dc_ctx;
703                 bp_init_data.bios = init_params->asic_id.atombios_base_address;
704
705                 dc_ctx->dc_bios = dal_bios_parser_create(
706                                 &bp_init_data, dc_version);
707
708                 if (!dc_ctx->dc_bios) {
709                         ASSERT_CRITICAL(false);
710                         goto fail;
711                 }
712
713                 dc_ctx->created_bios = true;
714                 }
715
716         /* Create I2C AUX */
717         dc_ctx->i2caux = dal_i2caux_create(dc_ctx);
718
719         if (!dc_ctx->i2caux) {
720                 ASSERT_CRITICAL(false);
721                 goto fail;
722         }
723
724         dc_ctx->perf_trace = dc_perf_trace_create();
725         if (!dc_ctx->perf_trace) {
726                 ASSERT_CRITICAL(false);
727                 goto fail;
728         }
729
730         /* Create GPIO service */
731         dc_ctx->gpio_service = dal_gpio_service_create(
732                         dc_version,
733                         dc_ctx->dce_environment,
734                         dc_ctx);
735
736         if (!dc_ctx->gpio_service) {
737                 ASSERT_CRITICAL(false);
738                 goto fail;
739         }
740
741         dc->res_pool = dc_create_resource_pool(
742                         dc,
743                         init_params->num_virtual_links,
744                         dc_version,
745                         init_params->asic_id);
746         if (!dc->res_pool)
747                 goto fail;
748
749         dc_resource_state_construct(dc, dc->current_state);
750
751         if (!create_links(dc, init_params->num_virtual_links))
752                 goto fail;
753
754         return true;
755
756 fail:
757
758         destruct(dc);
759         return false;
760 }
761
762 static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
763 {
764         int i, j;
765         struct dc_state *dangling_context = dc_create_state();
766         struct dc_state *current_ctx;
767
768         if (dangling_context == NULL)
769                 return;
770
771         dc_resource_state_copy_construct(dc->current_state, dangling_context);
772
773         for (i = 0; i < dc->res_pool->pipe_count; i++) {
774                 struct dc_stream_state *old_stream =
775                                 dc->current_state->res_ctx.pipe_ctx[i].stream;
776                 bool should_disable = true;
777
778                 for (j = 0; j < context->stream_count; j++) {
779                         if (old_stream == context->streams[j]) {
780                                 should_disable = false;
781                                 break;
782                         }
783                 }
784                 if (should_disable && old_stream) {
785                         dc_rem_all_planes_for_stream(dc, old_stream, dangling_context);
786                         dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context);
787                 }
788         }
789
790         current_ctx = dc->current_state;
791         dc->current_state = dangling_context;
792         dc_release_state(current_ctx);
793 }
794
795 /*******************************************************************************
796  * Public functions
797  ******************************************************************************/
798
799 struct dc *dc_create(const struct dc_init_data *init_params)
800 {
801         struct dc *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
802         unsigned int full_pipe_count;
803
804         if (NULL == dc)
805                 goto alloc_fail;
806
807         if (false == construct(dc, init_params))
808                 goto construct_fail;
809
810         /*TODO: separate HW and SW initialization*/
811         dc->hwss.init_hw(dc);
812
813         full_pipe_count = dc->res_pool->pipe_count;
814         if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE)
815                 full_pipe_count--;
816         dc->caps.max_streams = min(
817                         full_pipe_count,
818                         dc->res_pool->stream_enc_count);
819
820         dc->caps.max_links = dc->link_count;
821         dc->caps.max_audios = dc->res_pool->audio_count;
822         dc->caps.linear_pitch_alignment = 64;
823
824         /* Populate versioning information */
825         dc->versions.dc_ver = DC_VER;
826
827         if (dc->res_pool->dmcu != NULL)
828                 dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version;
829
830         dc->config = init_params->flags;
831
832         dc->build_id = DC_BUILD_ID;
833
834         DC_LOG_DC("Display Core initialized\n");
835
836
837
838         return dc;
839
840 construct_fail:
841         kfree(dc);
842
843 alloc_fail:
844         return NULL;
845 }
846
847 void dc_destroy(struct dc **dc)
848 {
849         destruct(*dc);
850         kfree(*dc);
851         *dc = NULL;
852 }
853
854 static void enable_timing_multisync(
855                 struct dc *dc,
856                 struct dc_state *ctx)
857 {
858         int i = 0, multisync_count = 0;
859         int pipe_count = dc->res_pool->pipe_count;
860         struct pipe_ctx *multisync_pipes[MAX_PIPES] = { NULL };
861
862         for (i = 0; i < pipe_count; i++) {
863                 if (!ctx->res_ctx.pipe_ctx[i].stream ||
864                                 !ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.enabled)
865                         continue;
866                 if (ctx->res_ctx.pipe_ctx[i].stream == ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.event_source)
867                         continue;
868                 multisync_pipes[multisync_count] = &ctx->res_ctx.pipe_ctx[i];
869                 multisync_count++;
870         }
871
872         if (multisync_count > 0) {
873                 dc->hwss.enable_per_frame_crtc_position_reset(
874                         dc, multisync_count, multisync_pipes);
875         }
876 }
877
878 static void program_timing_sync(
879                 struct dc *dc,
880                 struct dc_state *ctx)
881 {
882         int i, j;
883         int group_index = 0;
884         int pipe_count = dc->res_pool->pipe_count;
885         struct pipe_ctx *unsynced_pipes[MAX_PIPES] = { NULL };
886
887         for (i = 0; i < pipe_count; i++) {
888                 if (!ctx->res_ctx.pipe_ctx[i].stream || ctx->res_ctx.pipe_ctx[i].top_pipe)
889                         continue;
890
891                 unsynced_pipes[i] = &ctx->res_ctx.pipe_ctx[i];
892         }
893
894         for (i = 0; i < pipe_count; i++) {
895                 int group_size = 1;
896                 struct pipe_ctx *pipe_set[MAX_PIPES];
897
898                 if (!unsynced_pipes[i])
899                         continue;
900
901                 pipe_set[0] = unsynced_pipes[i];
902                 unsynced_pipes[i] = NULL;
903
904                 /* Add tg to the set, search rest of the tg's for ones with
905                  * same timing, add all tgs with same timing to the group
906                  */
907                 for (j = i + 1; j < pipe_count; j++) {
908                         if (!unsynced_pipes[j])
909                                 continue;
910
911                         if (resource_are_streams_timing_synchronizable(
912                                         unsynced_pipes[j]->stream,
913                                         pipe_set[0]->stream)) {
914                                 pipe_set[group_size] = unsynced_pipes[j];
915                                 unsynced_pipes[j] = NULL;
916                                 group_size++;
917                         }
918                 }
919
920                 /* set first unblanked pipe as master */
921                 for (j = 0; j < group_size; j++) {
922                         struct pipe_ctx *temp;
923
924                         if (pipe_set[j]->stream_res.tg->funcs->is_blanked && !pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg)) {
925                                 if (j == 0)
926                                         break;
927
928                                 temp = pipe_set[0];
929                                 pipe_set[0] = pipe_set[j];
930                                 pipe_set[j] = temp;
931                                 break;
932                         }
933                 }
934
935                 /* remove any other unblanked pipes as they have already been synced */
936                 for (j = j + 1; j < group_size; j++) {
937                         if (pipe_set[j]->stream_res.tg->funcs->is_blanked && !pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg)) {
938                                 group_size--;
939                                 pipe_set[j] = pipe_set[group_size];
940                                 j--;
941                         }
942                 }
943
944                 if (group_size > 1) {
945                         dc->hwss.enable_timing_synchronization(
946                                 dc, group_index, group_size, pipe_set);
947                         group_index++;
948                 }
949         }
950 }
951
952 static bool context_changed(
953                 struct dc *dc,
954                 struct dc_state *context)
955 {
956         uint8_t i;
957
958         if (context->stream_count != dc->current_state->stream_count)
959                 return true;
960
961         for (i = 0; i < dc->current_state->stream_count; i++) {
962                 if (dc->current_state->streams[i] != context->streams[i])
963                         return true;
964         }
965
966         return false;
967 }
968
969 bool dc_enable_stereo(
970         struct dc *dc,
971         struct dc_state *context,
972         struct dc_stream_state *streams[],
973         uint8_t stream_count)
974 {
975         bool ret = true;
976         int i, j;
977         struct pipe_ctx *pipe;
978
979         for (i = 0; i < MAX_PIPES; i++) {
980                 if (context != NULL)
981                         pipe = &context->res_ctx.pipe_ctx[i];
982                 else
983                         pipe = &dc->current_state->res_ctx.pipe_ctx[i];
984                 for (j = 0 ; pipe && j < stream_count; j++)  {
985                         if (streams[j] && streams[j] == pipe->stream &&
986                                 dc->hwss.setup_stereo)
987                                 dc->hwss.setup_stereo(pipe, dc);
988                 }
989         }
990
991         return ret;
992 }
993
994 /*
995  * Applies given context to HW and copy it into current context.
996  * It's up to the user to release the src context afterwards.
997  */
998 static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *context)
999 {
1000         struct dc_bios *dcb = dc->ctx->dc_bios;
1001         enum dc_status result = DC_ERROR_UNEXPECTED;
1002         struct pipe_ctx *pipe;
1003         int i, k, l;
1004         struct dc_stream_state *dc_streams[MAX_STREAMS] = {0};
1005
1006         disable_dangling_plane(dc, context);
1007
1008         for (i = 0; i < context->stream_count; i++)
1009                 dc_streams[i] =  context->streams[i];
1010
1011         if (!dcb->funcs->is_accelerated_mode(dcb))
1012                 dc->hwss.enable_accelerated_mode(dc, context);
1013
1014         dc->hwss.prepare_bandwidth(dc, context);
1015
1016         /* re-program planes for existing stream, in case we need to
1017          * free up plane resource for later use
1018          */
1019         for (i = 0; i < context->stream_count; i++) {
1020                 if (context->streams[i]->mode_changed)
1021                         continue;
1022
1023                 dc->hwss.apply_ctx_for_surface(
1024                         dc, context->streams[i],
1025                         context->stream_status[i].plane_count,
1026                         context); /* use new pipe config in new context */
1027         }
1028
1029         /* Program hardware */
1030         for (i = 0; i < dc->res_pool->pipe_count; i++) {
1031                 pipe = &context->res_ctx.pipe_ctx[i];
1032                 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe);
1033         }
1034
1035         result = dc->hwss.apply_ctx_to_hw(dc, context);
1036
1037         if (result != DC_OK)
1038                 return result;
1039
1040         if (context->stream_count > 1) {
1041                 enable_timing_multisync(dc, context);
1042                 program_timing_sync(dc, context);
1043         }
1044
1045         /* Program all planes within new context*/
1046         for (i = 0; i < context->stream_count; i++) {
1047                 const struct dc_sink *sink = context->streams[i]->sink;
1048
1049                 if (!context->streams[i]->mode_changed)
1050                         continue;
1051
1052                 dc->hwss.apply_ctx_for_surface(
1053                                 dc, context->streams[i],
1054                                 context->stream_status[i].plane_count,
1055                                 context);
1056
1057                 /*
1058                  * enable stereo
1059                  * TODO rework dc_enable_stereo call to work with validation sets?
1060                  */
1061                 for (k = 0; k < MAX_PIPES; k++) {
1062                         pipe = &context->res_ctx.pipe_ctx[k];
1063
1064                         for (l = 0 ; pipe && l < context->stream_count; l++)  {
1065                                 if (context->streams[l] &&
1066                                         context->streams[l] == pipe->stream &&
1067                                         dc->hwss.setup_stereo)
1068                                         dc->hwss.setup_stereo(pipe, dc);
1069                         }
1070                 }
1071
1072                 CONN_MSG_MODE(sink->link, "{%dx%d, %dx%d@%dKhz}",
1073                                 context->streams[i]->timing.h_addressable,
1074                                 context->streams[i]->timing.v_addressable,
1075                                 context->streams[i]->timing.h_total,
1076                                 context->streams[i]->timing.v_total,
1077                                 context->streams[i]->timing.pix_clk_khz);
1078         }
1079
1080         dc_enable_stereo(dc, context, dc_streams, context->stream_count);
1081
1082         /* pplib is notified if disp_num changed */
1083         dc->hwss.optimize_bandwidth(dc, context);
1084
1085         dc_release_state(dc->current_state);
1086
1087         dc->current_state = context;
1088
1089         dc_retain_state(dc->current_state);
1090
1091         return result;
1092 }
1093
1094 bool dc_commit_state(struct dc *dc, struct dc_state *context)
1095 {
1096         enum dc_status result = DC_ERROR_UNEXPECTED;
1097         int i;
1098
1099         if (false == context_changed(dc, context))
1100                 return DC_OK;
1101
1102         DC_LOG_DC("%s: %d streams\n",
1103                                 __func__, context->stream_count);
1104
1105         for (i = 0; i < context->stream_count; i++) {
1106                 struct dc_stream_state *stream = context->streams[i];
1107
1108                 dc_stream_log(dc, stream);
1109         }
1110
1111         result = dc_commit_state_no_check(dc, context);
1112
1113         return (result == DC_OK);
1114 }
1115
1116 bool dc_post_update_surfaces_to_stream(struct dc *dc)
1117 {
1118         int i;
1119         struct dc_state *context = dc->current_state;
1120
1121         post_surface_trace(dc);
1122
1123         for (i = 0; i < dc->res_pool->pipe_count; i++)
1124                 if (context->res_ctx.pipe_ctx[i].stream == NULL ||
1125                     context->res_ctx.pipe_ctx[i].plane_state == NULL) {
1126                         context->res_ctx.pipe_ctx[i].pipe_idx = i;
1127                         dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]);
1128                 }
1129
1130         dc->optimized_required = false;
1131
1132         dc->hwss.optimize_bandwidth(dc, context);
1133         return true;
1134 }
1135
1136 struct dc_state *dc_create_state(void)
1137 {
1138         struct dc_state *context = kzalloc(sizeof(struct dc_state),
1139                                            GFP_KERNEL);
1140
1141         if (!context)
1142                 return NULL;
1143
1144         kref_init(&context->refcount);
1145         return context;
1146 }
1147
1148 void dc_retain_state(struct dc_state *context)
1149 {
1150         kref_get(&context->refcount);
1151 }
1152
1153 static void dc_state_free(struct kref *kref)
1154 {
1155         struct dc_state *context = container_of(kref, struct dc_state, refcount);
1156         dc_resource_state_destruct(context);
1157         kfree(context);
1158 }
1159
1160 void dc_release_state(struct dc_state *context)
1161 {
1162         kref_put(&context->refcount, dc_state_free);
1163 }
1164
1165 static bool is_surface_in_context(
1166                 const struct dc_state *context,
1167                 const struct dc_plane_state *plane_state)
1168 {
1169         int j;
1170
1171         for (j = 0; j < MAX_PIPES; j++) {
1172                 const struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1173
1174                 if (plane_state == pipe_ctx->plane_state) {
1175                         return true;
1176                 }
1177         }
1178
1179         return false;
1180 }
1181
1182 static enum surface_update_type get_plane_info_update_type(const struct dc_surface_update *u)
1183 {
1184         union surface_update_flags *update_flags = &u->surface->update_flags;
1185
1186         if (!u->plane_info)
1187                 return UPDATE_TYPE_FAST;
1188
1189         if (u->plane_info->color_space != u->surface->color_space)
1190                 update_flags->bits.color_space_change = 1;
1191
1192         if (u->plane_info->horizontal_mirror != u->surface->horizontal_mirror)
1193                 update_flags->bits.horizontal_mirror_change = 1;
1194
1195         if (u->plane_info->rotation != u->surface->rotation)
1196                 update_flags->bits.rotation_change = 1;
1197
1198         if (u->plane_info->format != u->surface->format)
1199                 update_flags->bits.pixel_format_change = 1;
1200
1201         if (u->plane_info->stereo_format != u->surface->stereo_format)
1202                 update_flags->bits.stereo_format_change = 1;
1203
1204         if (u->plane_info->per_pixel_alpha != u->surface->per_pixel_alpha)
1205                 update_flags->bits.per_pixel_alpha_change = 1;
1206
1207         if (u->plane_info->global_alpha_value != u->surface->global_alpha_value)
1208                 update_flags->bits.global_alpha_change = 1;
1209
1210         if (u->plane_info->dcc.enable != u->surface->dcc.enable
1211                         || u->plane_info->dcc.grph.independent_64b_blks != u->surface->dcc.grph.independent_64b_blks
1212                         || u->plane_info->dcc.grph.meta_pitch != u->surface->dcc.grph.meta_pitch)
1213                 update_flags->bits.dcc_change = 1;
1214
1215         if (resource_pixel_format_to_bpp(u->plane_info->format) !=
1216                         resource_pixel_format_to_bpp(u->surface->format))
1217                 /* different bytes per element will require full bandwidth
1218                  * and DML calculation
1219                  */
1220                 update_flags->bits.bpp_change = 1;
1221
1222         if (memcmp(&u->plane_info->tiling_info, &u->surface->tiling_info,
1223                         sizeof(union dc_tiling_info)) != 0) {
1224                 update_flags->bits.swizzle_change = 1;
1225                 /* todo: below are HW dependent, we should add a hook to
1226                  * DCE/N resource and validated there.
1227                  */
1228                 if (u->plane_info->tiling_info.gfx9.swizzle != DC_SW_LINEAR)
1229                         /* swizzled mode requires RQ to be setup properly,
1230                          * thus need to run DML to calculate RQ settings
1231                          */
1232                         update_flags->bits.bandwidth_change = 1;
1233         }
1234
1235         if (update_flags->bits.rotation_change
1236                         || update_flags->bits.stereo_format_change
1237                         || update_flags->bits.pixel_format_change
1238                         || update_flags->bits.bpp_change
1239                         || update_flags->bits.bandwidth_change
1240                         || update_flags->bits.output_tf_change)
1241                 return UPDATE_TYPE_FULL;
1242
1243         return UPDATE_TYPE_MED;
1244 }
1245
1246 static enum surface_update_type get_scaling_info_update_type(
1247                 const struct dc_surface_update *u)
1248 {
1249         union surface_update_flags *update_flags = &u->surface->update_flags;
1250
1251         if (!u->scaling_info)
1252                 return UPDATE_TYPE_FAST;
1253
1254         if (u->scaling_info->clip_rect.width != u->surface->clip_rect.width
1255                         || u->scaling_info->clip_rect.height != u->surface->clip_rect.height
1256                         || u->scaling_info->dst_rect.width != u->surface->dst_rect.width
1257                         || u->scaling_info->dst_rect.height != u->surface->dst_rect.height) {
1258                 update_flags->bits.scaling_change = 1;
1259
1260                 if ((u->scaling_info->dst_rect.width < u->surface->dst_rect.width
1261                         || u->scaling_info->dst_rect.height < u->surface->dst_rect.height)
1262                                 && (u->scaling_info->dst_rect.width < u->surface->src_rect.width
1263                                         || u->scaling_info->dst_rect.height < u->surface->src_rect.height))
1264                         /* Making dst rect smaller requires a bandwidth change */
1265                         update_flags->bits.bandwidth_change = 1;
1266         }
1267
1268         if (u->scaling_info->src_rect.width != u->surface->src_rect.width
1269                 || u->scaling_info->src_rect.height != u->surface->src_rect.height) {
1270
1271                 update_flags->bits.scaling_change = 1;
1272                 if (u->scaling_info->src_rect.width > u->surface->src_rect.width
1273                                 && u->scaling_info->src_rect.height > u->surface->src_rect.height)
1274                         /* Making src rect bigger requires a bandwidth change */
1275                         update_flags->bits.clock_change = 1;
1276         }
1277
1278         if (u->scaling_info->src_rect.x != u->surface->src_rect.x
1279                         || u->scaling_info->src_rect.y != u->surface->src_rect.y
1280                         || u->scaling_info->clip_rect.x != u->surface->clip_rect.x
1281                         || u->scaling_info->clip_rect.y != u->surface->clip_rect.y
1282                         || u->scaling_info->dst_rect.x != u->surface->dst_rect.x
1283                         || u->scaling_info->dst_rect.y != u->surface->dst_rect.y)
1284                 update_flags->bits.position_change = 1;
1285
1286         if (update_flags->bits.clock_change
1287                         || update_flags->bits.bandwidth_change)
1288                 return UPDATE_TYPE_FULL;
1289
1290         if (update_flags->bits.scaling_change
1291                         || update_flags->bits.position_change)
1292                 return UPDATE_TYPE_MED;
1293
1294         return UPDATE_TYPE_FAST;
1295 }
1296
1297 static enum surface_update_type det_surface_update(const struct dc *dc,
1298                 const struct dc_surface_update *u)
1299 {
1300         const struct dc_state *context = dc->current_state;
1301         enum surface_update_type type;
1302         enum surface_update_type overall_type = UPDATE_TYPE_FAST;
1303         union surface_update_flags *update_flags = &u->surface->update_flags;
1304
1305         update_flags->raw = 0; // Reset all flags
1306
1307         if (!is_surface_in_context(context, u->surface)) {
1308                 update_flags->bits.new_plane = 1;
1309                 return UPDATE_TYPE_FULL;
1310         }
1311
1312         type = get_plane_info_update_type(u);
1313         elevate_update_type(&overall_type, type);
1314
1315         type = get_scaling_info_update_type(u);
1316         elevate_update_type(&overall_type, type);
1317
1318         if (u->in_transfer_func)
1319                 update_flags->bits.in_transfer_func_change = 1;
1320
1321         if (u->input_csc_color_matrix)
1322                 update_flags->bits.input_csc_change = 1;
1323
1324         if (u->coeff_reduction_factor)
1325                 update_flags->bits.coeff_reduction_change = 1;
1326
1327         if (u->gamma) {
1328                 enum surface_pixel_format format = SURFACE_PIXEL_FORMAT_GRPH_BEGIN;
1329
1330                 if (u->plane_info)
1331                         format = u->plane_info->format;
1332                 else if (u->surface)
1333                         format = u->surface->format;
1334
1335                 if (dce_use_lut(format))
1336                         update_flags->bits.gamma_change = 1;
1337         }
1338
1339         if (update_flags->bits.in_transfer_func_change) {
1340                 type = UPDATE_TYPE_MED;
1341                 elevate_update_type(&overall_type, type);
1342         }
1343
1344         if (update_flags->bits.input_csc_change
1345                         || update_flags->bits.coeff_reduction_change
1346                         || update_flags->bits.gamma_change) {
1347                 type = UPDATE_TYPE_FULL;
1348                 elevate_update_type(&overall_type, type);
1349         }
1350
1351         return overall_type;
1352 }
1353
1354 static enum surface_update_type check_update_surfaces_for_stream(
1355                 struct dc *dc,
1356                 struct dc_surface_update *updates,
1357                 int surface_count,
1358                 struct dc_stream_update *stream_update,
1359                 const struct dc_stream_status *stream_status)
1360 {
1361         int i;
1362         enum surface_update_type overall_type = UPDATE_TYPE_FAST;
1363
1364         if (stream_status == NULL || stream_status->plane_count != surface_count)
1365                 return UPDATE_TYPE_FULL;
1366
1367         /* some stream updates require passive update */
1368         if (stream_update) {
1369                 if ((stream_update->src.height != 0) &&
1370                                 (stream_update->src.width != 0))
1371                         return UPDATE_TYPE_FULL;
1372
1373                 if ((stream_update->dst.height != 0) &&
1374                                 (stream_update->dst.width != 0))
1375                         return UPDATE_TYPE_FULL;
1376
1377                 if (stream_update->out_transfer_func)
1378                         return UPDATE_TYPE_FULL;
1379
1380                 if (stream_update->abm_level)
1381                         return UPDATE_TYPE_FULL;
1382
1383                 if (stream_update->dpms_off)
1384                         return UPDATE_TYPE_FULL;
1385         }
1386
1387         for (i = 0 ; i < surface_count; i++) {
1388                 enum surface_update_type type =
1389                                 det_surface_update(dc, &updates[i]);
1390
1391                 if (type == UPDATE_TYPE_FULL)
1392                         return type;
1393
1394                 elevate_update_type(&overall_type, type);
1395         }
1396
1397         return overall_type;
1398 }
1399
1400 /**
1401  * dc_check_update_surfaces_for_stream() - Determine update type (fast, med, or full)
1402  *
1403  * See :c:type:`enum surface_update_type <surface_update_type>` for explanation of update types
1404  */
1405 enum surface_update_type dc_check_update_surfaces_for_stream(
1406                 struct dc *dc,
1407                 struct dc_surface_update *updates,
1408                 int surface_count,
1409                 struct dc_stream_update *stream_update,
1410                 const struct dc_stream_status *stream_status)
1411 {
1412         int i;
1413         enum surface_update_type type;
1414
1415         for (i = 0; i < surface_count; i++)
1416                 updates[i].surface->update_flags.raw = 0;
1417
1418         type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status);
1419         if (type == UPDATE_TYPE_FULL)
1420                 for (i = 0; i < surface_count; i++)
1421                         updates[i].surface->update_flags.raw = 0xFFFFFFFF;
1422
1423         return type;
1424 }
1425
1426 static struct dc_stream_status *stream_get_status(
1427         struct dc_state *ctx,
1428         struct dc_stream_state *stream)
1429 {
1430         uint8_t i;
1431
1432         for (i = 0; i < ctx->stream_count; i++) {
1433                 if (stream == ctx->streams[i]) {
1434                         return &ctx->stream_status[i];
1435                 }
1436         }
1437
1438         return NULL;
1439 }
1440
1441 static const enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL;
1442
1443 static void commit_planes_do_stream_update(struct dc *dc,
1444                 struct dc_stream_state *stream,
1445                 struct dc_stream_update *stream_update,
1446                 enum surface_update_type update_type,
1447                 struct dc_state *context)
1448 {
1449         int j;
1450
1451         // Stream updates
1452         for (j = 0; j < dc->res_pool->pipe_count; j++) {
1453                 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1454
1455                 if (!pipe_ctx->top_pipe &&
1456                         pipe_ctx->stream &&
1457                         pipe_ctx->stream == stream) {
1458
1459                         /* Fast update*/
1460                         // VRR program can be done as part of FAST UPDATE
1461                         if (stream_update->adjust)
1462                                 dc->hwss.set_drr(&pipe_ctx, 1,
1463                                         stream_update->adjust->v_total_min,
1464                                         stream_update->adjust->v_total_max);
1465
1466                         if (stream_update->periodic_fn_vsync_delta &&
1467                                         pipe_ctx->stream_res.tg->funcs->program_vline_interrupt)
1468                                 pipe_ctx->stream_res.tg->funcs->program_vline_interrupt(
1469                                         pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing,
1470                                         pipe_ctx->stream->periodic_fn_vsync_delta);
1471
1472                         if ((stream_update->hdr_static_metadata && !stream->use_dynamic_meta) ||
1473                                         stream_update->vrr_infopacket ||
1474                                         stream_update->vsc_infopacket) {
1475                                 resource_build_info_frame(pipe_ctx);
1476                                 dc->hwss.update_info_frame(pipe_ctx);
1477                         }
1478
1479                         if (stream_update->gamut_remap)
1480                                 dc_stream_set_gamut_remap(dc, stream);
1481
1482                         if (stream_update->output_csc_transform)
1483                                 dc_stream_program_csc_matrix(dc, stream);
1484
1485                         if (stream_update->dither_option) {
1486                                 resource_build_bit_depth_reduction_params(pipe_ctx->stream,
1487                                                                         &pipe_ctx->stream->bit_depth_params);
1488                                 pipe_ctx->stream_res.opp->funcs->opp_program_fmt(pipe_ctx->stream_res.opp,
1489                                                 &stream->bit_depth_params,
1490                                                 &stream->clamping);
1491                         }
1492
1493                         /* Full fe update*/
1494                         if (update_type == UPDATE_TYPE_FAST)
1495                                 continue;
1496
1497                         if (stream_update->dpms_off) {
1498                                 if (*stream_update->dpms_off) {
1499                                         core_link_disable_stream(pipe_ctx, KEEP_ACQUIRED_RESOURCE);
1500                                         dc->hwss.optimize_bandwidth(dc, dc->current_state);
1501                                 } else {
1502                                         dc->hwss.prepare_bandwidth(dc, dc->current_state);
1503                                         core_link_enable_stream(dc->current_state, pipe_ctx);
1504                                 }
1505                         }
1506
1507                         if (stream_update->abm_level && pipe_ctx->stream_res.abm) {
1508                                 if (pipe_ctx->stream_res.tg->funcs->is_blanked) {
1509                                         // if otg funcs defined check if blanked before programming
1510                                         if (!pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg))
1511                                                 pipe_ctx->stream_res.abm->funcs->set_abm_level(
1512                                                         pipe_ctx->stream_res.abm, stream->abm_level);
1513                                 } else
1514                                         pipe_ctx->stream_res.abm->funcs->set_abm_level(
1515                                                 pipe_ctx->stream_res.abm, stream->abm_level);
1516                         }
1517                 }
1518         }
1519 }
1520
1521 static void commit_planes_for_stream(struct dc *dc,
1522                 struct dc_surface_update *srf_updates,
1523                 int surface_count,
1524                 struct dc_stream_state *stream,
1525                 struct dc_stream_update *stream_update,
1526                 enum surface_update_type update_type,
1527                 struct dc_state *context)
1528 {
1529         int i, j;
1530         struct pipe_ctx *top_pipe_to_program = NULL;
1531
1532         if (update_type == UPDATE_TYPE_FULL) {
1533                 dc->hwss.prepare_bandwidth(dc, context);
1534                 context_clock_trace(dc, context);
1535         }
1536
1537         // Stream updates
1538         if (stream_update)
1539                 commit_planes_do_stream_update(dc, stream, stream_update, update_type, context);
1540
1541         if (surface_count == 0) {
1542                 /*
1543                  * In case of turning off screen, no need to program front end a second time.
1544                  * just return after program blank.
1545                  */
1546                 dc->hwss.apply_ctx_for_surface(dc, stream, 0, context);
1547                 return;
1548         }
1549
1550         // Update Type FULL, Surface updates
1551         for (j = 0; j < dc->res_pool->pipe_count; j++) {
1552                 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1553
1554                 if (!pipe_ctx->top_pipe &&
1555                         pipe_ctx->stream &&
1556                         pipe_ctx->stream == stream) {
1557                         struct dc_stream_status *stream_status = NULL;
1558
1559                         top_pipe_to_program = pipe_ctx;
1560
1561                         if (!pipe_ctx->plane_state)
1562                                 continue;
1563
1564                         /* Full fe update*/
1565                         if (update_type == UPDATE_TYPE_FAST)
1566                                 continue;
1567
1568                         stream_status =
1569                                 stream_get_status(context, pipe_ctx->stream);
1570
1571                         dc->hwss.apply_ctx_for_surface(
1572                                         dc, pipe_ctx->stream, stream_status->plane_count, context);
1573                 }
1574         }
1575
1576         if (update_type == UPDATE_TYPE_FULL)
1577                 context_timing_trace(dc, &context->res_ctx);
1578
1579         // Update Type FAST, Surface updates
1580         if (update_type == UPDATE_TYPE_FAST) {
1581                 /* Lock the top pipe while updating plane addrs, since freesync requires
1582                  *  plane addr update event triggers to be synchronized.
1583                  *  top_pipe_to_program is expected to never be NULL
1584                  */
1585                 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true);
1586
1587                 /* Perform requested Updates */
1588                 for (i = 0; i < surface_count; i++) {
1589                         struct dc_plane_state *plane_state = srf_updates[i].surface;
1590
1591                         for (j = 0; j < dc->res_pool->pipe_count; j++) {
1592                                 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1593
1594                                 if (pipe_ctx->stream != stream)
1595                                         continue;
1596
1597                                 if (pipe_ctx->plane_state != plane_state)
1598                                         continue;
1599
1600                                 if (srf_updates[i].flip_addr)
1601                                         dc->hwss.update_plane_addr(dc, pipe_ctx);
1602                         }
1603                 }
1604
1605                 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
1606         }
1607 }
1608
1609 void dc_commit_updates_for_stream(struct dc *dc,
1610                 struct dc_surface_update *srf_updates,
1611                 int surface_count,
1612                 struct dc_stream_state *stream,
1613                 struct dc_stream_update *stream_update,
1614                 struct dc_plane_state **plane_states,
1615                 struct dc_state *state)
1616 {
1617         const struct dc_stream_status *stream_status;
1618         enum surface_update_type update_type;
1619         struct dc_state *context;
1620         struct dc_context *dc_ctx = dc->ctx;
1621         int i, j;
1622
1623         stream_status = dc_stream_get_status(stream);
1624         context = dc->current_state;
1625
1626         update_type = dc_check_update_surfaces_for_stream(
1627                                 dc, srf_updates, surface_count, stream_update, stream_status);
1628
1629         if (update_type >= update_surface_trace_level)
1630                 update_surface_trace(dc, srf_updates, surface_count);
1631
1632
1633         if (update_type >= UPDATE_TYPE_FULL) {
1634
1635                 /* initialize scratch memory for building context */
1636                 context = dc_create_state();
1637                 if (context == NULL) {
1638                         DC_ERROR("Failed to allocate new validate context!\n");
1639                         return;
1640                 }
1641
1642                 dc_resource_state_copy_construct(state, context);
1643         }
1644
1645
1646         for (i = 0; i < surface_count; i++) {
1647                 struct dc_plane_state *surface = srf_updates[i].surface;
1648
1649                 /* TODO: On flip we don't build the state, so it still has the
1650                  * old address. Which is why we are updating the address here
1651                  */
1652                 if (srf_updates[i].flip_addr) {
1653                         surface->address = srf_updates[i].flip_addr->address;
1654                         surface->flip_immediate = srf_updates[i].flip_addr->flip_immediate;
1655
1656                 }
1657
1658                 if (update_type >= UPDATE_TYPE_MED) {
1659                         for (j = 0; j < dc->res_pool->pipe_count; j++) {
1660                                 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1661
1662                                 if (pipe_ctx->plane_state != surface)
1663                                         continue;
1664
1665                                 resource_build_scaling_params(pipe_ctx);
1666                         }
1667                 }
1668         }
1669
1670         commit_planes_for_stream(
1671                                 dc,
1672                                 srf_updates,
1673                                 surface_count,
1674                                 stream,
1675                                 stream_update,
1676                                 update_type,
1677                                 context);
1678         /*update current_State*/
1679         if (dc->current_state != context) {
1680
1681                 struct dc_state *old = dc->current_state;
1682
1683                 dc->current_state = context;
1684                 dc_release_state(old);
1685
1686         }
1687         /*let's use current_state to update watermark etc*/
1688         if (update_type >= UPDATE_TYPE_FULL)
1689                 dc_post_update_surfaces_to_stream(dc);
1690
1691         return;
1692
1693 }
1694
1695 uint8_t dc_get_current_stream_count(struct dc *dc)
1696 {
1697         return dc->current_state->stream_count;
1698 }
1699
1700 struct dc_stream_state *dc_get_stream_at_index(struct dc *dc, uint8_t i)
1701 {
1702         if (i < dc->current_state->stream_count)
1703                 return dc->current_state->streams[i];
1704         return NULL;
1705 }
1706
1707 enum dc_irq_source dc_interrupt_to_irq_source(
1708                 struct dc *dc,
1709                 uint32_t src_id,
1710                 uint32_t ext_id)
1711 {
1712         return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id);
1713 }
1714
1715 /**
1716  * dc_interrupt_set() - Enable/disable an AMD hw interrupt source
1717  */
1718 bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable)
1719 {
1720
1721         if (dc == NULL)
1722                 return false;
1723
1724         return dal_irq_service_set(dc->res_pool->irqs, src, enable);
1725 }
1726
1727 void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src)
1728 {
1729         dal_irq_service_ack(dc->res_pool->irqs, src);
1730 }
1731
1732 void dc_set_power_state(
1733         struct dc *dc,
1734         enum dc_acpi_cm_power_state power_state)
1735 {
1736         struct kref refcount;
1737
1738         switch (power_state) {
1739         case DC_ACPI_CM_POWER_STATE_D0:
1740                 dc_resource_state_construct(dc, dc->current_state);
1741
1742                 dc->hwss.init_hw(dc);
1743                 break;
1744         default:
1745                 ASSERT(dc->current_state->stream_count == 0);
1746                 /* Zero out the current context so that on resume we start with
1747                  * clean state, and dc hw programming optimizations will not
1748                  * cause any trouble.
1749                  */
1750
1751                 /* Preserve refcount */
1752                 refcount = dc->current_state->refcount;
1753                 dc_resource_state_destruct(dc->current_state);
1754                 memset(dc->current_state, 0,
1755                                 sizeof(*dc->current_state));
1756
1757                 dc->current_state->refcount = refcount;
1758
1759                 break;
1760         }
1761
1762 }
1763
1764 void dc_resume(struct dc *dc)
1765 {
1766
1767         uint32_t i;
1768
1769         for (i = 0; i < dc->link_count; i++)
1770                 core_link_resume(dc->links[i]);
1771 }
1772
1773 bool dc_is_dmcu_initialized(struct dc *dc)
1774 {
1775         struct dmcu *dmcu = dc->res_pool->dmcu;
1776
1777         if (dmcu)
1778                 return dmcu->funcs->is_dmcu_initialized(dmcu);
1779         return false;
1780 }
1781
1782 bool dc_submit_i2c(
1783                 struct dc *dc,
1784                 uint32_t link_index,
1785                 struct i2c_command *cmd)
1786 {
1787
1788         struct dc_link *link = dc->links[link_index];
1789         struct ddc_service *ddc = link->ddc;
1790         return dce_i2c_submit_command(
1791                 dc->res_pool,
1792                 ddc->ddc_pin,
1793                 cmd);
1794 }
1795
1796 static bool link_add_remote_sink_helper(struct dc_link *dc_link, struct dc_sink *sink)
1797 {
1798         if (dc_link->sink_count >= MAX_SINKS_PER_LINK) {
1799                 BREAK_TO_DEBUGGER();
1800                 return false;
1801         }
1802
1803         dc_sink_retain(sink);
1804
1805         dc_link->remote_sinks[dc_link->sink_count] = sink;
1806         dc_link->sink_count++;
1807
1808         return true;
1809 }
1810
1811 /**
1812  * dc_link_add_remote_sink() - Create a sink and attach it to an existing link
1813  *
1814  * EDID length is in bytes
1815  */
1816 struct dc_sink *dc_link_add_remote_sink(
1817                 struct dc_link *link,
1818                 const uint8_t *edid,
1819                 int len,
1820                 struct dc_sink_init_data *init_data)
1821 {
1822         struct dc_sink *dc_sink;
1823         enum dc_edid_status edid_status;
1824
1825         if (len > DC_MAX_EDID_BUFFER_SIZE) {
1826                 dm_error("Max EDID buffer size breached!\n");
1827                 return NULL;
1828         }
1829
1830         if (!init_data) {
1831                 BREAK_TO_DEBUGGER();
1832                 return NULL;
1833         }
1834
1835         if (!init_data->link) {
1836                 BREAK_TO_DEBUGGER();
1837                 return NULL;
1838         }
1839
1840         dc_sink = dc_sink_create(init_data);
1841
1842         if (!dc_sink)
1843                 return NULL;
1844
1845         memmove(dc_sink->dc_edid.raw_edid, edid, len);
1846         dc_sink->dc_edid.length = len;
1847
1848         if (!link_add_remote_sink_helper(
1849                         link,
1850                         dc_sink))
1851                 goto fail_add_sink;
1852
1853         edid_status = dm_helpers_parse_edid_caps(
1854                         link->ctx,
1855                         &dc_sink->dc_edid,
1856                         &dc_sink->edid_caps);
1857
1858         /*
1859          * Treat device as no EDID device if EDID
1860          * parsing fails
1861          */
1862         if (edid_status != EDID_OK) {
1863                 dc_sink->dc_edid.length = 0;
1864                 dm_error("Bad EDID, status%d!\n", edid_status);
1865         }
1866
1867         return dc_sink;
1868
1869 fail_add_sink:
1870         dc_sink_release(dc_sink);
1871         return NULL;
1872 }
1873
1874 /**
1875  * dc_link_remove_remote_sink() - Remove a remote sink from a dc_link
1876  *
1877  * Note that this just removes the struct dc_sink - it doesn't
1878  * program hardware or alter other members of dc_link
1879  */
1880 void dc_link_remove_remote_sink(struct dc_link *link, struct dc_sink *sink)
1881 {
1882         int i;
1883
1884         if (!link->sink_count) {
1885                 BREAK_TO_DEBUGGER();
1886                 return;
1887         }
1888
1889         for (i = 0; i < link->sink_count; i++) {
1890                 if (link->remote_sinks[i] == sink) {
1891                         dc_sink_release(sink);
1892                         link->remote_sinks[i] = NULL;
1893
1894                         /* shrink array to remove empty place */
1895                         while (i < link->sink_count - 1) {
1896                                 link->remote_sinks[i] = link->remote_sinks[i+1];
1897                                 i++;
1898                         }
1899                         link->remote_sinks[i] = NULL;
1900                         link->sink_count--;
1901                         return;
1902                 }
1903         }
1904 }
1905
1906 void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info)
1907 {
1908         info->displayClock                              = (unsigned int)state->bw.dcn.clk.dispclk_khz;
1909         info->engineClock                               = (unsigned int)state->bw.dcn.clk.dcfclk_khz;
1910         info->memoryClock                               = (unsigned int)state->bw.dcn.clk.dramclk_khz;
1911         info->maxSupportedDppClock              = (unsigned int)state->bw.dcn.clk.max_supported_dppclk_khz;
1912         info->dppClock                                  = (unsigned int)state->bw.dcn.clk.dppclk_khz;
1913         info->socClock                                  = (unsigned int)state->bw.dcn.clk.socclk_khz;
1914         info->dcfClockDeepSleep                 = (unsigned int)state->bw.dcn.clk.dcfclk_deep_sleep_khz;
1915         info->fClock                                    = (unsigned int)state->bw.dcn.clk.fclk_khz;
1916         info->phyClock                                  = (unsigned int)state->bw.dcn.clk.phyclk_khz;
1917 }