2 * Copyright (c) 2015-2019 The Linux Foundation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
15 #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
16 #include "sde_encoder_phys.h"
17 #include "sde_hw_interrupts.h"
18 #include "sde_core_irq.h"
19 #include "sde_formats.h"
21 #define SDE_DEBUG_CMDENC(e, fmt, ...) SDE_DEBUG("enc%d intf%d " fmt, \
22 (e) && (e)->base.parent ? \
23 (e)->base.parent->base.id : -1, \
24 (e) ? (e)->intf_idx - INTF_0 : -1, ##__VA_ARGS__)
26 #define SDE_ERROR_CMDENC(e, fmt, ...) SDE_ERROR("enc%d intf%d " fmt, \
27 (e) && (e)->base.parent ? \
28 (e)->base.parent->base.id : -1, \
29 (e) ? (e)->intf_idx - INTF_0 : -1, ##__VA_ARGS__)
31 #define to_sde_encoder_phys_cmd(x) \
32 container_of(x, struct sde_encoder_phys_cmd, base)
35 * Tearcheck sync start and continue thresholds are empirically found
36 * based on common panels In the future, may want to allow panels to override
37 * these default values
39 #define DEFAULT_TEARCHECK_SYNC_THRESH_START 4
40 #define DEFAULT_TEARCHECK_SYNC_THRESH_CONTINUE 4
42 static inline bool sde_encoder_phys_cmd_is_master(
43 struct sde_encoder_phys *phys_enc)
45 return (phys_enc->split_role != ENC_ROLE_SLAVE) ? true : false;
48 static bool sde_encoder_phys_cmd_mode_fixup(
49 struct sde_encoder_phys *phys_enc,
50 const struct drm_display_mode *mode,
51 struct drm_display_mode *adj_mode)
54 SDE_DEBUG_CMDENC(to_sde_encoder_phys_cmd(phys_enc), "\n");
58 static void sde_encoder_phys_cmd_mode_set(
59 struct sde_encoder_phys *phys_enc,
60 struct drm_display_mode *mode,
61 struct drm_display_mode *adj_mode)
63 struct sde_encoder_phys_cmd *cmd_enc =
64 to_sde_encoder_phys_cmd(phys_enc);
65 struct sde_rm *rm = &phys_enc->sde_kms->rm;
66 struct sde_rm_hw_iter iter;
69 if (!phys_enc || !mode || !adj_mode) {
70 SDE_ERROR("invalid arg(s), enc %d mode %d adj_mode %d\n",
71 phys_enc != 0, mode != 0, adj_mode != 0);
74 phys_enc->cached_mode = *adj_mode;
75 SDE_DEBUG_CMDENC(cmd_enc, "caching mode:\n");
76 drm_mode_debug_printmodeline(adj_mode);
78 instance = phys_enc->split_role == ENC_ROLE_SLAVE ? 1 : 0;
80 /* Retrieve previously allocated HW Resources. Shouldn't fail */
81 sde_rm_init_hw_iter(&iter, phys_enc->parent->base.id, SDE_HW_BLK_CTL);
82 for (i = 0; i <= instance; i++) {
83 if (sde_rm_get_hw(rm, &iter))
84 phys_enc->hw_ctl = (struct sde_hw_ctl *)iter.hw;
87 if (IS_ERR_OR_NULL(phys_enc->hw_ctl)) {
88 SDE_ERROR_CMDENC(cmd_enc, "failed to init ctl: %ld\n",
89 PTR_ERR(phys_enc->hw_ctl));
90 phys_enc->hw_ctl = NULL;
95 static void sde_encoder_phys_cmd_pp_tx_done_irq(void *arg, int irq_idx)
97 struct sde_encoder_phys_cmd *cmd_enc = arg;
98 struct sde_encoder_phys *phys_enc;
99 unsigned long lock_flags;
105 phys_enc = &cmd_enc->base;
107 /* notify all synchronous clients first, then asynchronous clients */
108 if (phys_enc->parent_ops.handle_frame_done)
109 phys_enc->parent_ops.handle_frame_done(phys_enc->parent,
110 phys_enc, SDE_ENCODER_FRAME_EVENT_DONE);
112 spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
113 new_cnt = atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
114 spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
116 SDE_EVT32_IRQ(DRMID(phys_enc->parent),
117 phys_enc->hw_pp->idx - PINGPONG_0, new_cnt);
119 /* Signal any waiting atomic commit thread */
120 wake_up_all(&phys_enc->pending_kickoff_wq);
123 static void sde_encoder_phys_cmd_pp_rd_ptr_irq(void *arg, int irq_idx)
125 struct sde_encoder_phys_cmd *cmd_enc = arg;
126 struct sde_encoder_phys *phys_enc = &cmd_enc->base;
131 if (phys_enc->parent_ops.handle_vblank_virt)
132 phys_enc->parent_ops.handle_vblank_virt(phys_enc->parent,
136 static bool _sde_encoder_phys_is_ppsplit_slave(
137 struct sde_encoder_phys *phys_enc)
139 enum sde_rm_topology_name topology;
144 topology = sde_connector_get_topology_name(phys_enc->connector);
145 if (topology == SDE_RM_TOPOLOGY_PPSPLIT &&
146 phys_enc->split_role == ENC_ROLE_SLAVE)
152 static int _sde_encoder_phys_cmd_wait_for_idle(
153 struct sde_encoder_phys *phys_enc)
155 struct sde_encoder_phys_cmd *cmd_enc =
156 to_sde_encoder_phys_cmd(phys_enc);
161 SDE_ERROR("invalid encoder\n");
165 /* slave encoder doesn't enable for ppsplit */
166 if (_sde_encoder_phys_is_ppsplit_slave(phys_enc))
169 /* return EWOULDBLOCK since we know the wait isn't necessary */
170 if (phys_enc->enable_state == SDE_ENC_DISABLED) {
171 SDE_ERROR_CMDENC(cmd_enc, "encoder is disabled\n");
175 /* wait for previous kickoff to complete */
176 ret = sde_encoder_helper_wait_event_timeout(
177 DRMID(phys_enc->parent),
178 phys_enc->hw_pp->idx - PINGPONG_0,
179 &phys_enc->pending_kickoff_wq,
180 &phys_enc->pending_kickoff_cnt,
183 irq_status = sde_core_irq_read(phys_enc->sde_kms,
184 INTR_IDX_PINGPONG, true);
186 SDE_EVT32(DRMID(phys_enc->parent),
187 phys_enc->hw_pp->idx - PINGPONG_0);
188 SDE_DEBUG_CMDENC(cmd_enc,
189 "pp:%d done but irq not triggered\n",
190 phys_enc->hw_pp->idx - PINGPONG_0);
191 sde_encoder_phys_cmd_pp_tx_done_irq(cmd_enc,
195 SDE_EVT32(DRMID(phys_enc->parent),
196 phys_enc->hw_pp->idx - PINGPONG_0);
197 SDE_ERROR_CMDENC(cmd_enc, "pp:%d kickoff timed out\n",
198 phys_enc->hw_pp->idx - PINGPONG_0);
199 if (phys_enc->parent_ops.handle_frame_done)
200 phys_enc->parent_ops.handle_frame_done(
201 phys_enc->parent, phys_enc,
202 SDE_ENCODER_FRAME_EVENT_ERROR);
212 static void sde_encoder_phys_cmd_underrun_irq(void *arg, int irq_idx)
214 struct sde_encoder_phys_cmd *cmd_enc = arg;
215 struct sde_encoder_phys *phys_enc;
220 phys_enc = &cmd_enc->base;
221 if (phys_enc->parent_ops.handle_underrun_virt)
222 phys_enc->parent_ops.handle_underrun_virt(phys_enc->parent,
226 static int sde_encoder_phys_cmd_register_irq(struct sde_encoder_phys *phys_enc,
227 enum sde_intr_type intr_type, int idx,
228 void (*irq_func)(void *, int), const char *irq_name)
230 struct sde_encoder_phys_cmd *cmd_enc =
231 to_sde_encoder_phys_cmd(phys_enc);
235 SDE_ERROR("invalid encoder\n");
239 cmd_enc->irq_idx[idx] = sde_core_irq_idx_lookup(phys_enc->sde_kms,
240 intr_type, phys_enc->hw_pp->idx);
241 if (cmd_enc->irq_idx[idx] < 0) {
242 SDE_ERROR_CMDENC(cmd_enc,
243 "failed to lookup IRQ index for %s with pp=%d\n",
245 phys_enc->hw_pp->idx - PINGPONG_0);
249 cmd_enc->irq_cb[idx].func = irq_func;
250 cmd_enc->irq_cb[idx].arg = cmd_enc;
251 ret = sde_core_irq_register_callback(phys_enc->sde_kms,
252 cmd_enc->irq_idx[idx], &cmd_enc->irq_cb[idx]);
254 SDE_ERROR_CMDENC(cmd_enc,
255 "failed to register IRQ callback %s\n",
260 ret = sde_core_irq_enable(phys_enc->sde_kms, &cmd_enc->irq_idx[idx], 1);
262 SDE_ERROR_CMDENC(cmd_enc,
263 "failed to enable IRQ for %s, pp %d, irq_idx %d\n",
265 phys_enc->hw_pp->idx - PINGPONG_0,
266 cmd_enc->irq_idx[idx]);
267 cmd_enc->irq_idx[idx] = -EINVAL;
269 /* Unregister callback on IRQ enable failure */
270 sde_core_irq_unregister_callback(phys_enc->sde_kms,
271 cmd_enc->irq_idx[idx], &cmd_enc->irq_cb[idx]);
275 SDE_DEBUG_CMDENC(cmd_enc, "registered IRQ %s for pp %d, irq_idx %d\n",
277 phys_enc->hw_pp->idx - PINGPONG_0,
278 cmd_enc->irq_idx[idx]);
283 static int sde_encoder_phys_cmd_unregister_irq(
284 struct sde_encoder_phys *phys_enc, int idx)
286 struct sde_encoder_phys_cmd *cmd_enc =
287 to_sde_encoder_phys_cmd(phys_enc);
290 SDE_ERROR("invalid encoder\n");
294 sde_core_irq_disable(phys_enc->sde_kms, &cmd_enc->irq_idx[idx], 1);
295 sde_core_irq_unregister_callback(phys_enc->sde_kms,
296 cmd_enc->irq_idx[idx], &cmd_enc->irq_cb[idx]);
298 SDE_DEBUG_CMDENC(cmd_enc, "unregistered IRQ for pp %d, irq_idx %d\n",
299 phys_enc->hw_pp->idx - PINGPONG_0,
300 cmd_enc->irq_idx[idx]);
305 static void sde_encoder_phys_cmd_tearcheck_config(
306 struct sde_encoder_phys *phys_enc)
308 struct sde_encoder_phys_cmd *cmd_enc =
309 to_sde_encoder_phys_cmd(phys_enc);
310 struct sde_hw_tear_check tc_cfg = { 0 };
311 struct drm_display_mode *mode = &phys_enc->cached_mode;
312 bool tc_enable = true;
314 struct msm_drm_private *priv;
315 struct sde_kms *sde_kms;
318 SDE_ERROR("invalid encoder\n");
322 SDE_DEBUG_CMDENC(cmd_enc, "pp %d\n", phys_enc->hw_pp->idx - PINGPONG_0);
324 if (!phys_enc->hw_pp->ops.setup_tearcheck ||
325 !phys_enc->hw_pp->ops.enable_tearcheck) {
326 SDE_DEBUG_CMDENC(cmd_enc, "tearcheck not supported\n");
330 sde_kms = phys_enc->sde_kms;
331 priv = sde_kms->dev->dev_private;
333 * TE default: dsi byte clock calculated base on 70 fps;
334 * around 14 ms to complete a kickoff cycle if te disabled;
335 * vclk_line base on 60 fps; write is faster than read;
336 * init == start == rdptr;
338 * vsync_count is ratio of MDP VSYNC clock frequency to LCD panel
339 * frequency divided by the no. of rows (lines) in the LCDpanel.
341 vsync_hz = sde_power_clk_get_rate(&priv->phandle, "vsync_clk");
343 SDE_DEBUG_CMDENC(cmd_enc, "invalid vsync clock rate\n");
347 tc_cfg.vsync_count = vsync_hz / (mode->vtotal * mode->vrefresh);
348 tc_cfg.hw_vsync_mode = 1;
351 * By setting sync_cfg_height to near max register value, we essentially
352 * disable sde hw generated TE signal, since hw TE will arrive first.
353 * Only caveat is if due to error, we hit wrap-around.
355 tc_cfg.sync_cfg_height = 0xFFF0;
356 tc_cfg.vsync_init_val = mode->vdisplay;
357 tc_cfg.sync_threshold_start = DEFAULT_TEARCHECK_SYNC_THRESH_START;
358 tc_cfg.sync_threshold_continue = DEFAULT_TEARCHECK_SYNC_THRESH_CONTINUE;
359 tc_cfg.start_pos = mode->vdisplay;
360 tc_cfg.rd_ptr_irq = mode->vdisplay + 1;
362 SDE_DEBUG_CMDENC(cmd_enc,
363 "tc %d vsync_clk_speed_hz %u vtotal %u vrefresh %u\n",
364 phys_enc->hw_pp->idx - PINGPONG_0, vsync_hz,
365 mode->vtotal, mode->vrefresh);
366 SDE_DEBUG_CMDENC(cmd_enc,
367 "tc %d enable %u start_pos %u rd_ptr_irq %u\n",
368 phys_enc->hw_pp->idx - PINGPONG_0, tc_enable, tc_cfg.start_pos,
370 SDE_DEBUG_CMDENC(cmd_enc,
371 "tc %d hw_vsync_mode %u vsync_count %u vsync_init_val %u\n",
372 phys_enc->hw_pp->idx - PINGPONG_0, tc_cfg.hw_vsync_mode,
373 tc_cfg.vsync_count, tc_cfg.vsync_init_val);
374 SDE_DEBUG_CMDENC(cmd_enc,
375 "tc %d cfgheight %u thresh_start %u thresh_cont %u\n",
376 phys_enc->hw_pp->idx - PINGPONG_0, tc_cfg.sync_cfg_height,
377 tc_cfg.sync_threshold_start, tc_cfg.sync_threshold_continue);
379 phys_enc->hw_pp->ops.setup_tearcheck(phys_enc->hw_pp, &tc_cfg);
380 phys_enc->hw_pp->ops.enable_tearcheck(phys_enc->hw_pp, tc_enable);
383 static void sde_encoder_phys_cmd_pingpong_config(
384 struct sde_encoder_phys *phys_enc)
386 struct sde_encoder_phys_cmd *cmd_enc =
387 to_sde_encoder_phys_cmd(phys_enc);
388 struct sde_hw_intf_cfg intf_cfg = { 0 };
390 if (!phys_enc || !phys_enc->hw_ctl ||
391 !phys_enc->hw_ctl->ops.setup_intf_cfg) {
392 SDE_ERROR("invalid arg(s), enc %d\n", phys_enc != 0);
396 SDE_DEBUG_CMDENC(cmd_enc, "pp %d, enabling mode:\n",
397 phys_enc->hw_pp->idx - PINGPONG_0);
398 drm_mode_debug_printmodeline(&phys_enc->cached_mode);
400 intf_cfg.intf = cmd_enc->intf_idx;
401 intf_cfg.intf_mode_sel = SDE_CTL_MODE_SEL_CMD;
402 intf_cfg.stream_sel = cmd_enc->stream_sel;
403 intf_cfg.mode_3d = sde_encoder_helper_get_3d_blend_mode(phys_enc);
405 phys_enc->hw_ctl->ops.setup_intf_cfg(phys_enc->hw_ctl, &intf_cfg);
407 sde_encoder_phys_cmd_tearcheck_config(phys_enc);
410 static bool sde_encoder_phys_cmd_needs_single_flush(
411 struct sde_encoder_phys *phys_enc)
413 enum sde_rm_topology_name topology;
418 topology = sde_connector_get_topology_name(phys_enc->connector);
419 return topology == SDE_RM_TOPOLOGY_PPSPLIT;
422 static int sde_encoder_phys_cmd_control_vblank_irq(
423 struct sde_encoder_phys *phys_enc,
426 struct sde_encoder_phys_cmd *cmd_enc =
427 to_sde_encoder_phys_cmd(phys_enc);
431 SDE_ERROR("invalid encoder\n");
435 /* Slave encoders don't report vblank */
436 if (!sde_encoder_phys_cmd_is_master(phys_enc))
439 SDE_DEBUG_CMDENC(cmd_enc, "[%pS] enable=%d/%d\n",
440 __builtin_return_address(0),
441 enable, atomic_read(&phys_enc->vblank_refcount));
443 SDE_EVT32(DRMID(phys_enc->parent), phys_enc->hw_pp->idx - PINGPONG_0,
444 enable, atomic_read(&phys_enc->vblank_refcount));
446 if (enable && atomic_inc_return(&phys_enc->vblank_refcount) == 1)
447 ret = sde_encoder_phys_cmd_register_irq(phys_enc,
448 SDE_IRQ_TYPE_PING_PONG_RD_PTR,
450 sde_encoder_phys_cmd_pp_rd_ptr_irq,
452 else if (!enable && atomic_dec_return(&phys_enc->vblank_refcount) == 0)
453 ret = sde_encoder_phys_cmd_unregister_irq(phys_enc,
458 SDE_ERROR_CMDENC(cmd_enc,
459 "control vblank irq error %d, enable %d\n",
465 static void sde_encoder_phys_cmd_enable(struct sde_encoder_phys *phys_enc)
467 struct sde_encoder_phys_cmd *cmd_enc =
468 to_sde_encoder_phys_cmd(phys_enc);
469 struct sde_hw_ctl *ctl;
473 if (!phys_enc || !phys_enc->hw_ctl) {
474 SDE_ERROR("invalid arg(s), encoder %d\n", phys_enc != 0);
477 SDE_DEBUG_CMDENC(cmd_enc, "pp %d\n", phys_enc->hw_pp->idx - PINGPONG_0);
479 if (phys_enc->enable_state == SDE_ENC_ENABLED) {
480 SDE_ERROR("already enabled\n");
484 sde_encoder_helper_split_config(phys_enc, cmd_enc->intf_idx);
486 sde_encoder_phys_cmd_pingpong_config(phys_enc);
488 if (_sde_encoder_phys_is_ppsplit_slave(phys_enc))
491 /* Both master and slave need to register for pp_tx_done */
492 ret = sde_encoder_phys_cmd_register_irq(phys_enc,
493 SDE_IRQ_TYPE_PING_PONG_COMP,
495 sde_encoder_phys_cmd_pp_tx_done_irq,
500 ret = sde_encoder_phys_cmd_control_vblank_irq(phys_enc, true);
502 sde_encoder_phys_cmd_unregister_irq(phys_enc,
507 ret = sde_encoder_phys_cmd_register_irq(phys_enc,
508 SDE_IRQ_TYPE_INTF_UNDER_RUN,
510 sde_encoder_phys_cmd_underrun_irq,
513 sde_encoder_phys_cmd_control_vblank_irq(phys_enc, false);
514 sde_encoder_phys_cmd_unregister_irq(phys_enc,
520 ctl = phys_enc->hw_ctl;
521 ctl->ops.get_bitmask_intf(ctl, &flush_mask, cmd_enc->intf_idx);
522 ctl->ops.update_pending_flush(ctl, flush_mask);
523 phys_enc->enable_state = SDE_ENC_ENABLED;
525 SDE_DEBUG_CMDENC(cmd_enc, "update pending flush ctl %d flush_mask %x\n",
526 ctl->idx - CTL_0, flush_mask);
529 static void sde_encoder_phys_cmd_disable(struct sde_encoder_phys *phys_enc)
531 struct sde_encoder_phys_cmd *cmd_enc =
532 to_sde_encoder_phys_cmd(phys_enc);
536 SDE_ERROR("invalid encoder\n");
539 SDE_DEBUG_CMDENC(cmd_enc, "pp %d\n", phys_enc->hw_pp->idx - PINGPONG_0);
541 if (phys_enc->enable_state == SDE_ENC_DISABLED) {
542 SDE_ERROR_CMDENC(cmd_enc, "already disabled\n");
546 SDE_EVT32(DRMID(phys_enc->parent), phys_enc->hw_pp->idx - PINGPONG_0);
548 if (!_sde_encoder_phys_is_ppsplit_slave(phys_enc)) {
549 ret = _sde_encoder_phys_cmd_wait_for_idle(phys_enc);
551 atomic_set(&phys_enc->pending_kickoff_cnt, 0);
552 SDE_ERROR_CMDENC(cmd_enc,
553 "pp %d failed wait for idle, %d\n",
554 phys_enc->hw_pp->idx - PINGPONG_0, ret);
555 SDE_EVT32(DRMID(phys_enc->parent),
556 phys_enc->hw_pp->idx - PINGPONG_0, ret);
559 sde_encoder_phys_cmd_unregister_irq(
560 phys_enc, INTR_IDX_UNDERRUN);
561 sde_encoder_phys_cmd_control_vblank_irq(phys_enc, false);
562 sde_encoder_phys_cmd_unregister_irq(
563 phys_enc, INTR_IDX_PINGPONG);
566 phys_enc->enable_state = SDE_ENC_DISABLED;
568 if (atomic_read(&phys_enc->vblank_refcount))
569 SDE_ERROR("enc:%d role:%d invalid vblank refcount %d\n",
570 phys_enc->parent->base.id,
571 phys_enc->split_role,
572 atomic_read(&phys_enc->vblank_refcount));
575 static void sde_encoder_phys_cmd_post_disable(
576 struct sde_encoder_phys *phys_enc)
578 if (!phys_enc || !phys_enc->hw_ctl) {
579 SDE_ERROR("invalid encoder %d\n", phys_enc != NULL);
583 if (!_sde_encoder_phys_is_ppsplit_slave(phys_enc) &&
584 phys_enc->hw_ctl->ops.clear_intf_cfg)
585 phys_enc->hw_ctl->ops.clear_intf_cfg(phys_enc->hw_ctl);
588 static void sde_encoder_phys_cmd_destroy(struct sde_encoder_phys *phys_enc)
590 struct sde_encoder_phys_cmd *cmd_enc =
591 to_sde_encoder_phys_cmd(phys_enc);
594 SDE_ERROR("invalid encoder\n");
600 static void sde_encoder_phys_cmd_get_hw_resources(
601 struct sde_encoder_phys *phys_enc,
602 struct sde_encoder_hw_resources *hw_res,
603 struct drm_connector_state *conn_state)
605 struct sde_encoder_phys_cmd *cmd_enc =
606 to_sde_encoder_phys_cmd(phys_enc);
609 SDE_ERROR("invalid encoder\n");
612 SDE_DEBUG_CMDENC(cmd_enc, "\n");
613 hw_res->intfs[cmd_enc->intf_idx - INTF_0] = INTF_MODE_CMD;
616 static int sde_encoder_phys_cmd_wait_for_commit_done(
617 struct sde_encoder_phys *phys_enc)
620 * Since ctl_start "commits" the transaction to hardware, and the
621 * tearcheck block takes it from there, there is no need to have a
622 * separate wait for committed, a la wait-for-vsync in video mode
628 static void sde_encoder_phys_cmd_prepare_for_kickoff(
629 struct sde_encoder_phys *phys_enc)
631 struct sde_encoder_phys_cmd *cmd_enc =
632 to_sde_encoder_phys_cmd(phys_enc);
636 SDE_ERROR("invalid encoder\n");
639 SDE_DEBUG_CMDENC(cmd_enc, "pp %d\n", phys_enc->hw_pp->idx - PINGPONG_0);
640 SDE_EVT32(DRMID(phys_enc->parent), phys_enc->hw_pp->idx - PINGPONG_0);
643 * Mark kickoff request as outstanding. If there are more than one,
644 * outstanding, then we have to wait for the previous one to complete
646 ret = _sde_encoder_phys_cmd_wait_for_idle(phys_enc);
648 /* force pending_kickoff_cnt 0 to discard failed kickoff */
649 atomic_set(&phys_enc->pending_kickoff_cnt, 0);
650 SDE_EVT32(DRMID(phys_enc->parent),
651 phys_enc->hw_pp->idx - PINGPONG_0);
652 SDE_ERROR("failed wait_for_idle: %d\n", ret);
656 static void sde_encoder_phys_cmd_init_ops(
657 struct sde_encoder_phys_ops *ops)
659 ops->is_master = sde_encoder_phys_cmd_is_master;
660 ops->mode_set = sde_encoder_phys_cmd_mode_set;
661 ops->mode_fixup = sde_encoder_phys_cmd_mode_fixup;
662 ops->enable = sde_encoder_phys_cmd_enable;
663 ops->disable = sde_encoder_phys_cmd_disable;
664 ops->post_disable = sde_encoder_phys_cmd_post_disable;
665 ops->destroy = sde_encoder_phys_cmd_destroy;
666 ops->get_hw_resources = sde_encoder_phys_cmd_get_hw_resources;
667 ops->control_vblank_irq = sde_encoder_phys_cmd_control_vblank_irq;
668 ops->wait_for_commit_done = sde_encoder_phys_cmd_wait_for_commit_done;
669 ops->prepare_for_kickoff = sde_encoder_phys_cmd_prepare_for_kickoff;
670 ops->trigger_start = sde_encoder_helper_trigger_start;
671 ops->needs_single_flush = sde_encoder_phys_cmd_needs_single_flush;
674 struct sde_encoder_phys *sde_encoder_phys_cmd_init(
675 struct sde_enc_phys_init_params *p)
677 struct sde_encoder_phys *phys_enc = NULL;
678 struct sde_encoder_phys_cmd *cmd_enc = NULL;
679 struct sde_hw_mdp *hw_mdp;
682 SDE_DEBUG("intf %d\n", p->intf_idx - INTF_0);
684 cmd_enc = kzalloc(sizeof(*cmd_enc), GFP_KERNEL);
687 SDE_ERROR("failed to allocate\n");
690 phys_enc = &cmd_enc->base;
692 hw_mdp = sde_rm_get_mdp(&p->sde_kms->rm);
693 if (IS_ERR_OR_NULL(hw_mdp)) {
694 ret = PTR_ERR(hw_mdp);
695 SDE_ERROR("failed to get mdptop\n");
698 phys_enc->hw_mdptop = hw_mdp;
700 cmd_enc->intf_idx = p->intf_idx;
701 phys_enc->intf_idx = p->intf_idx;
703 sde_encoder_phys_cmd_init_ops(&phys_enc->ops);
704 phys_enc->parent = p->parent;
705 phys_enc->parent_ops = p->parent_ops;
706 phys_enc->sde_kms = p->sde_kms;
707 phys_enc->split_role = p->split_role;
708 phys_enc->intf_mode = INTF_MODE_CMD;
709 phys_enc->enc_spinlock = p->enc_spinlock;
710 cmd_enc->stream_sel = 0;
711 phys_enc->enable_state = SDE_ENC_DISABLED;
712 for (i = 0; i < INTR_IDX_MAX; i++)
713 INIT_LIST_HEAD(&cmd_enc->irq_cb[i].list);
714 atomic_set(&phys_enc->vblank_refcount, 0);
715 atomic_set(&phys_enc->pending_kickoff_cnt, 0);
716 init_waitqueue_head(&phys_enc->pending_kickoff_wq);
718 SDE_DEBUG_CMDENC(cmd_enc, "created\n");