--- /dev/null
+QTI Snapdragon Display Engine (SDE) shared display
+
+Required properties:
+- compatible: "qcom,shared-display"
+- qcom,shared-display-base: node handle of qcom,shared-display-base, see below.
+- qcom,blend-stage-range: blend stage range that are not shared in one display
+- qcom,shared-display-src-mode: source resolution of the shared display, contains
+ two properties:
+ qcom,mode-h-active: horizontal resolution
+ qcom,mode-v-active: vertical resolution
+- qcom,shared-display-dst-mode: destination rectangle in the shared display,
+ contains 4 properties:
+ qcom,mode-x-offset: x offset inside the shared display
+ qcom,mode-y-offset: y offset inside the shared display
+ qcom,mode-width: width inside the shared display
+ qcom,mode-height: height inside the shared display
+
+
+qcom,shared-display-base properties:
+- qcom,shared-display-base-intf: intf index of the base display
+- qcom,shared-display-base-mst: if display is DP MST type, optional
+- qcom,shared-display-base-mode: timing of the base display, contains the
+ following properties:
+ qcom,mode-h-active: H active size
+ qcom,mode-h-front-porch: H front portch
+ qcom,mode-h-pulse-width: H pulse width
+ qcom,mode-h-back-porch: H back porch
+ qcom,mode-h-active-high: if H active polarity is high
+ qcom,mode-v-active: V active size
+ qcom,mode-v-front-porch: V front portch
+ qcom,mode-v-pulse-width: V pulse width
+ qcom,mode-v-back-porch: V back porch
+ qcom,mode-v-active-high: if V active polarity is high
+ qcom,mode-refresh-rate: vertial refresh rate
+ qcom,mode-clock-in-khz: clock in kHz
+
+Example:
+
+/ {
+ ...
+
+ sde_sh_base0: qcom,shared-display-base@0 {
+ qcom,shared-display-base-intf = <0>;
+ qcom,shared-display-base-mst;
+ qcom,shared-display-base-mode {
+ qcom,mode-h-active = <3840>;
+ qcom,mode-h-front-porch = <176>;
+ qcom,mode-h-pulse-width = <88>;
+ qcom,mode-h-back-porch = <296>;
+ qcom,mode-h-active-high;
+ qcom,mode-v-active = <2160>;
+ qcom,mode-v-front-porch = <8>;
+ qcom,mode-v-pulse-width = <10>;
+ qcom,mode-v-back-porch = <72>;
+ qcom,mode-v-active-high;
+ qcom,mode-refresh-rate = <30>;
+ qcom,mode-clock-in-khz = <297000>;
+ };
+ };
+
+ sde_sh0: qcom,shared-display@0 {
+ compatible = "qcom,shared-display";
+ qcom,shared-display-base = <&sde_sh_base0>;
+ qcom,blend-stage-range = <0 5>;
+ qcom,shared-display-src-mode {
+ qcom,mode-h-active = <1920>;
+ qcom,mode-v-active = <1080>;
+ };
+ qcom,shared-display-dst-mode {
+ qcom,mode-x-offset = <0>;
+ qcom,mode-y-offset = <0>;
+ qcom,mode-width = <1920>;
+ qcom,mode-height = <1080>;
+ };
+ };
+
+};
help
Choose this option for writeback connector support.
+config DRM_SDE_SHD
+ bool "Enable Shared display support in SDE DRM"
+ depends on DRM_MSM
+ help
+ Choose this option for shared display support.
+ This option enables multiple logical displays
+ to share one base physical encoder/connector.
+ Each logical display will appear as different
+ connectors and report back to user.
+
config DRM_SDE_HDMI
bool "Enable HDMI driver support in DRM SDE driver"
depends on DRM_MSM
msm_drm-$(CONFIG_DRM_SDE_WB) += sde/sde_wb.o \
sde/sde_encoder_phys_wb.o
+msm_drm-$(CONFIG_DRM_SDE_SHD) += sde/sde_shd.o \
+ sde/sde_encoder_phys_shd.o
+
msm_drm-$(CONFIG_DRM_MSM) += \
msm_atomic.o \
msm_drv.o \
* @MSM_DISPLAY_CAP_CMD_MODE: Command mode supported
* @MSM_DISPLAY_CAP_HOT_PLUG: Hot plug detection supported
* @MSM_DISPLAY_CAP_EDID: EDID supported
+ * @MSM_DISPLAY_CAP_SHARED: Display is shared
*/
enum msm_display_caps {
MSM_DISPLAY_CAP_VID_MODE = BIT(0),
MSM_DISPLAY_CAP_CMD_MODE = BIT(1),
MSM_DISPLAY_CAP_HOT_PLUG = BIT(2),
MSM_DISPLAY_CAP_EDID = BIT(3),
+ MSM_DISPLAY_CAP_SHARED = BIT(4),
};
/**
* @property_data: Array of private data for generic property handling
* @blob_caps: Pointer to blob structure for 'capabilities' property
* @blob_hdr: Pointer to blob structure for 'hdr_properties' property
+ * @is_shared: connector is shared
+ * @shared_roi: roi of the shared display
*/
struct sde_connector {
struct drm_connector base;
struct msm_property_data property_data[CONNECTOR_PROP_COUNT];
struct drm_property_blob *blob_caps;
struct drm_property_blob *blob_hdr;
+ bool is_shared;
+ struct sde_rect shared_roi;
};
/**
struct sde_format *format;
struct sde_hw_ctl *ctl = mixer->hw_ctl;
struct sde_hw_stage_cfg *stage_cfg = &sde_crtc->stage_cfg;
+ struct sde_crtc_state *cstate;
u32 flush_mask = 0, crtc_split_width;
uint32_t lm_idx = LEFT_MIXER, idx;
drm_atomic_crtc_for_each_plane(plane, crtc) {
pstate = to_sde_plane_state(plane->state);
+ cstate = to_sde_crtc_state(crtc->state);
- /* always stage plane on either left or right lm */
- if (plane->state->crtc_x >= crtc_split_width) {
- lm_idx = RIGHT_MIXER;
- idx = right_crtc_zpos_cnt[pstate->stage]++;
- } else {
+ /* shared dual mixer mode will always enable both LM */
+ if (cstate->is_shared &&
+ sde_crtc->num_mixers == CRTC_DUAL_MIXERS) {
lm_idx = LEFT_MIXER;
idx = left_crtc_zpos_cnt[pstate->stage]++;
- }
+ lm_right = true;
+ } else {
+ /* always stage plane on either left or right lm */
+ if (plane->state->crtc_x >= crtc_split_width) {
+ lm_idx = RIGHT_MIXER;
+ idx = right_crtc_zpos_cnt[pstate->stage]++;
+ } else {
+ lm_idx = LEFT_MIXER;
+ idx = left_crtc_zpos_cnt[pstate->stage]++;
+ }
- /* stage plane on right LM if it crosses the boundary */
- lm_right = (lm_idx == LEFT_MIXER) &&
- (plane->state->crtc_x + plane->state->crtc_w >
+ /* stage plane on right LM if it crosses the
+ * boundary.
+ */
+ lm_right = (lm_idx == LEFT_MIXER) &&
+ (plane->state->crtc_x + plane->state->crtc_w >
crtc_split_width);
+ }
/*
* program each mixer with two hw pipes in dual mixer mode,
mutex_unlock(&sde_crtc->crtc_lock);
}
+static void _sde_crtc_setup_is_shared(struct drm_crtc_state *state)
+{
+ struct sde_crtc_state *cstate;
+
+ cstate = to_sde_crtc_state(state);
+
+ cstate->is_shared = false;
+ if (cstate->num_connectors) {
+ struct drm_connector *conn = cstate->connectors[0];
+ struct sde_connector *sde_conn = to_sde_connector(conn);
+
+ if (sde_conn->is_shared) {
+ cstate->is_shared = true;
+ cstate->shared_roi = sde_conn->shared_roi;
+ }
+ }
+}
+
static void sde_crtc_atomic_begin(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
sde_crtc = to_sde_crtc(crtc);
dev = crtc->dev;
- if (!sde_crtc->num_mixers)
+ if (!sde_crtc->num_mixers) {
+ _sde_crtc_setup_is_shared(crtc->state);
_sde_crtc_setup_mixers(crtc);
+ }
/* Reset flush mask from previous commit */
for (i = 0; i < ARRAY_SIZE(sde_crtc->mixers); i++) {
state->mode_changed = true;
mixer_width = sde_crtc_mixer_width(sde_crtc, mode);
+ _sde_crtc_setup_is_shared(state);
/* get plane state for all drm planes associated with crtc state */
drm_atomic_crtc_state_for_each_plane(plane, state) {
* @input_fence_timeout_ns : Cached input fence timeout, in ns
* @property_blobs: Reference pointers for blob properties
* @new_perf: new performance state being requested
+ * @is_shared: connector is shared
+ * @shared_roi: roi of the shared display
*/
struct sde_crtc_state {
struct drm_crtc_state base;
struct drm_property_blob *property_blobs[CRTC_PROP_COUNT];
struct sde_core_perf_params new_perf;
+ bool is_shared;
+ struct sde_rect shared_roi;
};
#define to_sde_crtc_state(x) \
atomic_t last_underrun_ts;
atomic_t underrun_cnt_dwork;
struct delayed_work dwork;
+
+ bool is_shared;
};
#define to_sde_encoder_virt(x) container_of(x, struct sde_encoder_virt, base)
struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
if (phys) {
- if (!sde_enc->hw_pp[i]) {
+ if (!sde_enc->hw_pp[i] && !sde_enc->is_shared) {
SDE_ERROR_ENC(sde_enc,
"invalid pingpong block for the encoder\n");
return;
return 0;
}
+static int sde_encoder_virt_add_phys_enc_shd(struct sde_encoder_virt *sde_enc,
+ struct sde_enc_phys_init_params *params)
+{
+ struct sde_encoder_phys *enc = NULL;
+
+ if (sde_enc->num_phys_encs + 1 >= ARRAY_SIZE(sde_enc->phys_encs)) {
+ SDE_ERROR_ENC(sde_enc, "too many physical encoders %d\n",
+ sde_enc->num_phys_encs);
+ return -EINVAL;
+ }
+
+ enc = sde_encoder_phys_shd_init(params);
+
+ if (IS_ERR(enc)) {
+ SDE_ERROR_ENC(sde_enc, "failed to init shd enc: %ld\n",
+ PTR_ERR(enc));
+ return PTR_ERR(enc);
+ }
+
+ sde_enc->is_shared = true;
+
+ sde_enc->phys_encs[sde_enc->num_phys_encs] = enc;
+ ++sde_enc->num_phys_encs;
+
+ return 0;
+}
+
static int sde_encoder_setup_display(struct sde_encoder_virt *sde_enc,
struct sde_kms *sde_kms,
struct msm_display_info *disp_info,
SDE_DEBUG("h_tile_instance %d = %d, split_role %d\n",
i, controller_id, phys_params.split_role);
- if (intf_type == INTF_WB) {
+ if (disp_info->capabilities & MSM_DISPLAY_CAP_SHARED) {
+ phys_params.wb_idx = WB_MAX;
+ phys_params.intf_idx = controller_id + INTF_0;
+ } else if (intf_type == INTF_WB) {
phys_params.intf_idx = INTF_MAX;
phys_params.wb_idx = sde_encoder_get_wb(
sde_kms->catalog,
}
if (!ret) {
- if (intf_type == INTF_WB)
+ if (disp_info->capabilities & MSM_DISPLAY_CAP_SHARED) {
+ ret = sde_encoder_virt_add_phys_enc_shd(sde_enc,
+ &phys_params);
+ } else if (intf_type == INTF_WB)
ret = sde_encoder_virt_add_phys_enc_wb(sde_enc,
&phys_params);
else
};
/**
+ * struct sde_encoder_phys_shd - sub-class of sde_encoder_phys to handle shared
+ * display
+ * @base: Baseclass physical encoder structure
+ * @hw_lm: mixer hw block to overwrite base encoder
+ * @hw_ctl: ctl hw block to overwrite base encoder
+ * @irq_idx: IRQ interface lookup index
+ * @irq_cb: interrupt callback
+ * @num_mixers: Number of mixers available in base encoder
+ * @num_ctls: Number of ctls available in base encoder
+ */
+struct sde_encoder_phys_shd {
+ struct sde_encoder_phys base;
+ struct sde_hw_mixer *hw_lm[CRTC_DUAL_MIXERS];
+ struct sde_hw_ctl *hw_ctl[CRTC_DUAL_MIXERS];
+ int irq_idx[INTR_IDX_MAX];
+ struct sde_irq_callback irq_cb[INTR_IDX_MAX];
+ u32 num_mixers;
+ u32 num_ctls;
+};
+
+/**
* struct sde_enc_phys_init_params - initialization parameters for phys encs
* @sde_kms: Pointer to the sde_kms top level
* @parent: Pointer to the containing virtual encoder
}
#endif
+/**
+ * sde_encoder_phys_shd_init - Construct a new shared physical encoder
+ * @p: Pointer to init params structure
+ * Return: Error code or newly allocated encoder
+ */
+#ifdef CONFIG_DRM_SDE_SHD
+struct sde_encoder_phys *sde_encoder_phys_shd_init(
+ struct sde_enc_phys_init_params *p);
+#else
+static inline
+struct sde_encoder_phys *sde_encoder_phys_shd_init(
+ struct sde_enc_phys_init_params *p)
+{
+ return NULL;
+}
+#endif
+
void sde_encoder_phys_setup_cdm(struct sde_encoder_phys *phys_enc,
const struct sde_format *format, u32 output_type,
struct sde_rect *roi);
--- /dev/null
+/*
+ * Copyright (c) 2018 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "[drm-shd:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/debugfs.h>
+#include <uapi/drm/sde_drm.h>
+
+#include "sde_encoder_phys.h"
+#include "sde_formats.h"
+#include "sde_hw_top.h"
+#include "sde_hw_interrupts.h"
+#include "sde_core_irq.h"
+#include "sde_crtc.h"
+#include "sde_trace.h"
+#include "sde_shd.h"
+#include "sde_plane.h"
+
+#define SHD_DEBUG(fmt, ...) pr_debug(fmt, ##__VA_ARGS__)
+
+#define SDE_ERROR_PHYS(p, fmt, ...) SDE_ERROR("enc%d intf%d " fmt,\
+ (p) ? (p)->parent->base.id : -1, \
+ (p) ? (p)->intf_idx - INTF_0 : -1, \
+ ##__VA_ARGS__)
+
+#define SDE_DEBUG_PHYS(p, fmt, ...) SDE_DEBUG("enc%d intf%d " fmt,\
+ (p) ? (p)->parent->base.id : -1, \
+ (p) ? (p)->intf_idx - INTF_0 : -1, \
+ ##__VA_ARGS__)
+
+#define CTL_SSPP_FLUSH_MASK 0xCC183F
+#define CTL_MIXER_FLUSH_MASK 0x1007C0
+
+#define CTL_LAYER(lm) \
+ (((lm) == LM_5) ? (0x024) : (((lm) - LM_0) * 0x004))
+#define CTL_LAYER_EXT(lm) \
+ (0x40 + (((lm) - LM_0) * 0x004))
+#define CTL_LAYER_EXT2(lm) \
+ (0x70 + (((lm) - LM_0) * 0x004))
+
+#define CTL_MIXER_BORDER_OUT BIT(24)
+
+#define LM_BLEND0_OP 0x00
+
+static inline struct sde_encoder_phys_shd *to_sde_encoder_phys_shd(
+ struct sde_encoder_phys *phys_enc)
+{
+ return container_of(phys_enc, struct sde_encoder_phys_shd, base);
+}
+
+static DEFINE_SPINLOCK(hw_ctl_lock);
+
+struct sde_shd_ctl_mixer_cfg {
+ u32 mixercfg;
+ u32 mixercfg_ext;
+ u32 mixercfg_ext2;
+
+ u32 mixercfg_mask;
+ u32 mixercfg_ext_mask;
+ u32 mixercfg_ext2_mask;
+};
+
+struct sde_shd_hw_ctl {
+ struct sde_hw_ctl base;
+ struct shd_stage_range range;
+ struct sde_hw_ctl *orig;
+ u32 flush_mask;
+ struct sde_shd_ctl_mixer_cfg mixer_cfg[MAX_BLOCKS];
+ struct sde_encoder_phys_shd *shd_enc;
+};
+
+struct sde_shd_mixer_cfg {
+ uint32_t fg_alpha;
+ uint32_t bg_alpha;
+ uint32_t blend_op;
+ bool dirty;
+};
+
+struct sde_shd_hw_mixer {
+ struct sde_hw_mixer base;
+ struct shd_stage_range range;
+ struct sde_hw_mixer *orig;
+ struct sde_shd_mixer_cfg cfg[SDE_STAGE_MAX];
+};
+
+static bool sde_encoder_phys_shd_is_master(struct sde_encoder_phys *phys_enc)
+{
+ return true;
+}
+
+static void sde_encoder_phys_shd_vblank_irq(void *arg, int irq_idx)
+{
+ struct sde_encoder_phys *phys_enc = arg;
+ struct sde_hw_ctl *hw_ctl;
+ struct sde_shd_hw_ctl *shd_ctl;
+ unsigned long lock_flags;
+ u32 flush_register = ~0;
+ int new_cnt = -1, old_cnt = -1;
+
+ if (!phys_enc)
+ return;
+
+ hw_ctl = phys_enc->hw_ctl;
+ if (!hw_ctl)
+ return;
+
+ if (phys_enc->parent_ops.handle_vblank_virt)
+ phys_enc->parent_ops.handle_vblank_virt(phys_enc->parent,
+ phys_enc);
+
+ old_cnt = atomic_read(&phys_enc->pending_kickoff_cnt);
+
+ /*
+ * only decrement the pending flush count if we've actually flushed
+ * hardware. due to sw irq latency, vblank may have already happened
+ * so we need to double-check with hw that it accepted the flush bits
+ */
+ spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+
+ if (hw_ctl && hw_ctl->ops.get_flush_register)
+ flush_register = hw_ctl->ops.get_flush_register(hw_ctl);
+
+ shd_ctl = container_of(hw_ctl, struct sde_shd_hw_ctl, base);
+
+ if ((flush_register & shd_ctl->flush_mask) == 0)
+ new_cnt = atomic_add_unless(&phys_enc->pending_kickoff_cnt,
+ -1, 0);
+
+ spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+
+ SDE_EVT32_IRQ(DRMID(phys_enc->parent), phys_enc->intf_idx - INTF_0,
+ old_cnt, new_cnt, flush_register);
+
+ /* Signal any waiting atomic commit thread */
+ wake_up_all(&phys_enc->pending_kickoff_wq);
+}
+
+static int _sde_encoder_phys_shd_register_irq(
+ struct sde_encoder_phys *phys_enc,
+ enum sde_intr_type intr_type, int idx,
+ void (*irq_func)(void *, int), const char *irq_name)
+{
+ struct sde_encoder_phys_shd *shd_enc;
+ int ret = 0;
+
+ shd_enc = to_sde_encoder_phys_shd(phys_enc);
+ shd_enc->irq_idx[idx] = sde_core_irq_idx_lookup(phys_enc->sde_kms,
+ intr_type, phys_enc->intf_idx);
+
+ if (shd_enc->irq_idx[idx] < 0) {
+ SDE_DEBUG_PHYS(phys_enc,
+ "failed to lookup IRQ index for %s type:%d\n", irq_name,
+ intr_type);
+ return -EINVAL;
+ }
+
+ shd_enc->irq_cb[idx].func = irq_func;
+ shd_enc->irq_cb[idx].arg = phys_enc;
+ ret = sde_core_irq_register_callback(phys_enc->sde_kms,
+ shd_enc->irq_idx[idx], &shd_enc->irq_cb[idx]);
+ if (ret) {
+ SDE_ERROR_PHYS(phys_enc,
+ "failed to register IRQ callback for %s\n", irq_name);
+ shd_enc->irq_idx[idx] = -EINVAL;
+ return ret;
+ }
+
+ SDE_DEBUG_PHYS(phys_enc, "registered irq %s idx: %d\n",
+ irq_name, shd_enc->irq_idx[idx]);
+
+ return ret;
+}
+
+static int _sde_encoder_phys_shd_unregister_irq(
+ struct sde_encoder_phys *phys_enc, int idx)
+{
+ struct sde_encoder_phys_shd *shd_enc;
+
+ shd_enc = to_sde_encoder_phys_shd(phys_enc);
+
+ sde_core_irq_unregister_callback(phys_enc->sde_kms,
+ shd_enc->irq_idx[idx], &shd_enc->irq_cb[idx]);
+
+ SDE_DEBUG_PHYS(phys_enc, "unregistered %d\n", shd_enc->irq_idx[idx]);
+
+ return 0;
+}
+
+static void _sde_shd_hw_ctl_clear_blendstages_in_range(
+ struct sde_shd_hw_ctl *hw_ctl, enum sde_lm lm)
+{
+ struct sde_hw_blk_reg_map *c = &hw_ctl->base.hw;
+ u32 mixercfg, mixercfg_ext;
+ u32 mixercfg_ext2;
+ u32 mask = 0, ext_mask = 0, ext2_mask = 0;
+ u32 start = hw_ctl->range.start + SDE_STAGE_0;
+ u32 end = start + hw_ctl->range.size;
+ u32 i;
+
+ mixercfg = SDE_REG_READ(c, CTL_LAYER(lm));
+ mixercfg_ext = SDE_REG_READ(c, CTL_LAYER_EXT(lm));
+ mixercfg_ext2 = SDE_REG_READ(c, CTL_LAYER_EXT2(lm));
+
+ if (!mixercfg && !mixercfg_ext && !mixercfg_ext2)
+ goto end;
+
+ /* SSPP_VIG0 */
+ i = (mixercfg & 0x7) | ((mixercfg_ext & 1) << 3);
+ if (i > start && i <= end) {
+ mask |= 0x7;
+ ext_mask |= 0x1;
+ }
+
+ /* SSPP_VIG1 */
+ i = ((mixercfg >> 3) & 0x7) | (((mixercfg_ext >> 2) & 1) << 3);
+ if (i > start && i <= end) {
+ mask |= (0x7 << 3);
+ ext_mask |= (0x1 << 2);
+ }
+
+ /* SSPP_VIG2 */
+ i = ((mixercfg >> 6) & 0x7) | (((mixercfg_ext >> 4) & 1) << 3);
+ if (i > start && i <= end) {
+ mask |= (0x7 << 6);
+ ext_mask |= (0x1 << 4);
+ }
+
+ /* SSPP_RGB0 */
+ i = ((mixercfg >> 9) & 0x7) | (((mixercfg_ext >> 8) & 1) << 3);
+ if (i > start && i <= end) {
+ mask |= (0x7 << 9);
+ ext_mask |= (0x1 << 8);
+ }
+
+ /* SSPP_RGB1 */
+ i = ((mixercfg >> 12) & 0x7) | (((mixercfg_ext >> 10) & 1) << 3);
+ if (i > start && i <= end) {
+ mask |= (0x7 << 12);
+ ext_mask |= (0x1 << 10);
+ }
+
+ /* SSPP_RGB2 */
+ i = ((mixercfg >> 15) & 0x7) | (((mixercfg_ext >> 12) & 1) << 3);
+ if (i > start && i <= end) {
+ mask |= (0x7 << 15);
+ ext_mask |= (0x1 << 12);
+ }
+
+ /* SSPP_DMA0 */
+ i = ((mixercfg >> 18) & 0x7) | (((mixercfg_ext >> 16) & 1) << 3);
+ if (i > start && i <= end) {
+ mask |= (0x7 << 18);
+ ext_mask |= (0x1 << 16);
+ }
+
+ /* SSPP_DMA1 */
+ i = ((mixercfg >> 21) & 0x7) | (((mixercfg_ext >> 18) & 1) << 3);
+ if (i > start && i <= end) {
+ mask |= (0x7 << 21);
+ ext_mask |= (0x1 << 18);
+ }
+
+ /* SSPP_VIG3 */
+ i = ((mixercfg >> 26) & 0x7) | (((mixercfg_ext >> 6) & 1) << 3);
+ if (i > start && i <= end) {
+ mask |= (0x7 << 26);
+ ext_mask |= (0x1 << 6);
+ }
+
+ /* SSPP_RGB3 */
+ i = ((mixercfg >> 29) & 0x7) | (((mixercfg_ext >> 14) & 1) << 3);
+ if (i > start && i <= end) {
+ mask |= (0x7 << 29);
+ ext_mask |= (0x1 << 14);
+ }
+
+ /* SSPP_CURSOR_0 */
+ i = (mixercfg_ext >> 20) & 0xF;
+ if (i > start && i <= end)
+ ext_mask |= (0xF << 20);
+
+ /* SSPP_CURSOR_1 */
+ i = (mixercfg_ext >> 26) & 0xF;
+ if (i > start && i <= end)
+ ext_mask |= (0xF << 26);
+
+ /* SSPP_DMA2 */
+ i = (mixercfg_ext2 >> 0) & 0xF;
+ if (i > start && i <= end)
+ ext2_mask |= (0xF << 0);
+
+ /* SSPP_DMA3 */
+ i = (mixercfg_ext2 >> 4) & 0xF;
+ if (i > start && i <= end)
+ ext2_mask |= (0xF << 4);
+
+end:
+ hw_ctl->mixer_cfg[lm].mixercfg_mask = mask;
+ hw_ctl->mixer_cfg[lm].mixercfg_ext_mask = ext_mask;
+ hw_ctl->mixer_cfg[lm].mixercfg_ext2_mask = ext2_mask;
+}
+
+static void _sde_shd_hw_ctl_clear_all_blendstages(struct sde_hw_ctl *ctx,
+ bool handoff, const u32 *resv_pipes, u32 resv_pipes_length)
+{
+ struct sde_shd_hw_ctl *hw_ctl;
+ int i;
+
+ if (!ctx)
+ return;
+
+ hw_ctl = container_of(ctx, struct sde_shd_hw_ctl, base);
+
+ for (i = 0; i < ctx->mixer_count; i++) {
+ int mixer_id = ctx->mixer_hw_caps[i].id;
+
+ _sde_shd_hw_ctl_clear_blendstages_in_range(hw_ctl, mixer_id);
+ }
+}
+
+static inline int _stage_offset(struct sde_hw_mixer *ctx, enum sde_stage stage)
+{
+ const struct sde_lm_sub_blks *sblk = ctx->cap->sblk;
+ int rc;
+
+ if (stage == SDE_STAGE_BASE)
+ rc = -EINVAL;
+ else if (stage <= sblk->maxblendstages)
+ rc = sblk->blendstage_base[stage - SDE_STAGE_0];
+ else
+ rc = -EINVAL;
+
+ return rc;
+}
+
+static void _sde_shd_hw_ctl_setup_blendstage(struct sde_hw_ctl *ctx,
+ enum sde_lm lm, struct sde_hw_stage_cfg *stage_cfg, u32 index,
+ bool handoff, const u32 *resv_pipes, u32 resv_pipes_length)
+{
+ struct sde_shd_hw_ctl *hw_ctl;
+ u32 mixercfg = 0, mixercfg_ext = 0, mix, ext, full, mixercfg_ext2;
+ u32 mask = 0, ext_mask = 0, ext2_mask = 0;
+ int i, j;
+ int stages;
+ int stage_offset = 0;
+ int pipes_per_stage;
+
+ if (!ctx)
+ return;
+
+ hw_ctl = container_of(ctx, struct sde_shd_hw_ctl, base);
+
+ if (test_bit(SDE_MIXER_SOURCESPLIT,
+ &ctx->mixer_hw_caps->features))
+ pipes_per_stage = PIPES_PER_STAGE;
+ else
+ pipes_per_stage = 1;
+
+ _sde_shd_hw_ctl_clear_blendstages_in_range(hw_ctl, lm);
+
+ if (!stage_cfg)
+ goto exit;
+
+ mixercfg = CTL_MIXER_BORDER_OUT;
+ stage_offset = hw_ctl->range.start;
+ stages = hw_ctl->range.size;
+
+ for (i = SDE_STAGE_0; i <= stages; i++) {
+ /* overflow to ext register if 'i + 1 > 7' */
+ mix = (i + stage_offset + 1) & 0x7;
+ ext = (i + stage_offset) >= 7;
+ full = (i + stage_offset + 1) & 0xF;
+
+ for (j = 0 ; j < pipes_per_stage; j++) {
+ switch (stage_cfg->stage[index][i][j]) {
+ case SSPP_VIG0:
+ mixercfg |= mix << 0;
+ mixercfg_ext |= ext << 0;
+ mask |= 0x7 << 0;
+ ext_mask |= 0x1 << 0;
+ break;
+ case SSPP_VIG1:
+ mixercfg |= mix << 3;
+ mixercfg_ext |= ext << 2;
+ mask |= 0x7 << 3;
+ ext_mask |= 0x1 << 2;
+ break;
+ case SSPP_VIG2:
+ mixercfg |= mix << 6;
+ mixercfg_ext |= ext << 4;
+ mask |= 0x7 << 6;
+ ext_mask |= 0x1 << 4;
+ break;
+ case SSPP_VIG3:
+ mixercfg |= mix << 26;
+ mixercfg_ext |= ext << 6;
+ mask |= 0x7 << 26;
+ ext_mask |= 0x1 << 6;
+ break;
+ case SSPP_RGB0:
+ mixercfg |= mix << 9;
+ mixercfg_ext |= ext << 8;
+ mask |= 0x7 << 9;
+ ext_mask |= 0x1 << 8;
+ break;
+ case SSPP_RGB1:
+ mixercfg |= mix << 12;
+ mixercfg_ext |= ext << 10;
+ mask |= 0x7 << 12;
+ ext_mask |= 0x1 << 10;
+ break;
+ case SSPP_RGB2:
+ mixercfg |= mix << 15;
+ mixercfg_ext |= ext << 12;
+ mask |= 0x7 << 15;
+ ext_mask |= 0x1 << 12;
+ break;
+ case SSPP_RGB3:
+ mixercfg |= mix << 29;
+ mixercfg_ext |= ext << 14;
+ mask |= 0x7 << 29;
+ ext_mask |= 0x1 << 14;
+ break;
+ case SSPP_DMA0:
+ mixercfg |= mix << 18;
+ mixercfg_ext |= ext << 16;
+ mask |= 0x7 << 18;
+ ext_mask |= 0x1 << 16;
+ break;
+ case SSPP_DMA1:
+ mixercfg |= mix << 21;
+ mixercfg_ext |= ext << 18;
+ mask |= 0x7 << 21;
+ ext_mask |= 0x1 << 18;
+ break;
+ case SSPP_DMA2:
+ mix |= full;
+ mixercfg_ext2 |= mix << 0;
+ ext2_mask |= 0xF << 0;
+ break;
+ case SSPP_DMA3:
+ mix |= full;
+ mixercfg_ext2 |= mix << 4;
+ ext2_mask |= 0xF << 4;
+ break;
+ case SSPP_CURSOR0:
+ mixercfg_ext |= full << 20;
+ ext_mask |= 0xF << 20;
+ break;
+ case SSPP_CURSOR1:
+ mixercfg_ext |= full << 26;
+ ext_mask |= 0xF << 26;
+ break;
+ default:
+ break;
+ }
+ }
+ }
+
+ hw_ctl->mixer_cfg[lm].mixercfg_mask |= mask;
+ hw_ctl->mixer_cfg[lm].mixercfg_ext_mask |= ext_mask;
+ hw_ctl->mixer_cfg[lm].mixercfg_ext2_mask |= ext2_mask;
+exit:
+ hw_ctl->mixer_cfg[lm].mixercfg = mixercfg;
+ hw_ctl->mixer_cfg[lm].mixercfg_ext = mixercfg_ext;
+ hw_ctl->mixer_cfg[lm].mixercfg_ext2 = mixercfg_ext2;
+}
+
+static void _sde_shd_hw_ctl_trigger_flush(struct sde_hw_ctl *ctx)
+{
+ struct sde_shd_hw_ctl *hw_ctl;
+ struct sde_hw_blk_reg_map *c;
+ u32 mixercfg, mixercfg_ext;
+ u32 mixercfg_ext2;
+ int i;
+
+ hw_ctl = container_of(ctx, struct sde_shd_hw_ctl, base);
+
+ hw_ctl->flush_mask = ctx->pending_flush_mask;
+
+ hw_ctl->flush_mask &= CTL_SSPP_FLUSH_MASK;
+
+ c = &ctx->hw;
+
+ for (i = 0; i < ctx->mixer_count; i++) {
+ int lm = ctx->mixer_hw_caps[i].id;
+
+ mixercfg = SDE_REG_READ(c, CTL_LAYER(lm));
+ mixercfg_ext = SDE_REG_READ(c, CTL_LAYER_EXT(lm));
+ mixercfg_ext2 = SDE_REG_READ(c, CTL_LAYER_EXT2(lm));
+
+ mixercfg &= ~hw_ctl->mixer_cfg[lm].mixercfg_mask;
+ mixercfg_ext &= ~hw_ctl->mixer_cfg[lm].mixercfg_ext_mask;
+ mixercfg_ext2 &= ~hw_ctl->mixer_cfg[lm].mixercfg_ext2_mask;
+
+ mixercfg |= hw_ctl->mixer_cfg[lm].mixercfg;
+ mixercfg_ext |= hw_ctl->mixer_cfg[lm].mixercfg_ext;
+ mixercfg_ext2 |= hw_ctl->mixer_cfg[lm].mixercfg_ext2;
+
+ SDE_REG_WRITE(c, CTL_LAYER(lm), mixercfg);
+ SDE_REG_WRITE(c, CTL_LAYER_EXT(lm), mixercfg_ext);
+ SDE_REG_WRITE(c, CTL_LAYER_EXT2(lm), mixercfg_ext2);
+ }
+}
+
+static void _sde_shd_setup_blend_config(struct sde_hw_mixer *ctx,
+ uint32_t stage,
+ uint32_t fg_alpha, uint32_t bg_alpha, uint32_t blend_op)
+{
+ struct sde_shd_hw_mixer *hw_lm;
+ struct sde_shd_mixer_cfg *cfg;
+
+ if (!ctx)
+ return;
+
+ hw_lm = container_of(ctx, struct sde_shd_hw_mixer, base);
+
+ cfg = &hw_lm->cfg[stage + hw_lm->range.start];
+
+ cfg->fg_alpha = fg_alpha;
+ cfg->bg_alpha = bg_alpha;
+ cfg->blend_op = blend_op;
+ cfg->dirty = true;
+}
+
+static void _sde_shd_setup_mixer_out(struct sde_hw_mixer *ctx,
+ struct sde_hw_mixer_cfg *cfg)
+{
+ /* do nothing */
+}
+
+static void _sde_shd_flush_hw_lm(struct sde_hw_mixer *ctx)
+{
+ struct sde_shd_hw_mixer *hw_lm;
+ struct sde_hw_blk_reg_map *c = &ctx->hw;
+ int stage_off, i;
+ u32 reset = BIT(16), val;
+ int start, end;
+
+ if (!ctx)
+ return;
+
+ hw_lm = container_of(ctx, struct sde_shd_hw_mixer, base);
+
+ start = SDE_STAGE_0 + hw_lm->range.start;
+ end = start + hw_lm->range.size;
+ reset = ~reset;
+ for (i = start; i < end; i++) {
+ stage_off = _stage_offset(ctx, i);
+ if (WARN_ON(stage_off < 0))
+ return;
+
+ val = SDE_REG_READ(c, LM_BLEND0_OP + stage_off);
+ val &= reset;
+ SDE_REG_WRITE(c, LM_BLEND0_OP + stage_off, val);
+
+ if (hw_lm->cfg[i].dirty) {
+ hw_lm->orig->ops.setup_blend_config(ctx, i,
+ hw_lm->cfg[i].fg_alpha,
+ hw_lm->cfg[i].bg_alpha,
+ hw_lm->cfg[i].blend_op);
+ hw_lm->cfg[i].dirty = false;
+ }
+ }
+}
+
+static void _sde_shd_trigger_flush(struct sde_hw_ctl *ctx)
+{
+ struct sde_shd_hw_ctl *hw_ctl;
+ struct sde_encoder_phys_shd *shd_enc;
+ struct sde_hw_blk_reg_map *c;
+ unsigned long lock_flags;
+ int i;
+
+ hw_ctl = container_of(ctx, struct sde_shd_hw_ctl, base);
+ shd_enc = hw_ctl->shd_enc;
+
+ c = &ctx->hw;
+
+ spin_lock_irqsave(&hw_ctl_lock, lock_flags);
+
+ _sde_shd_hw_ctl_trigger_flush(ctx);
+
+ for (i = 0; i < shd_enc->num_mixers; i++)
+ _sde_shd_flush_hw_lm(shd_enc->hw_lm[i]);
+
+ hw_ctl->orig->ops.trigger_flush(ctx);
+
+ spin_unlock_irqrestore(&hw_ctl_lock, lock_flags);
+}
+
+static void _sde_encoder_phys_shd_rm_reserve(
+ struct sde_encoder_phys *phys_enc,
+ struct shd_display *display)
+{
+ struct sde_encoder_phys_shd *shd_enc;
+ struct sde_rm *rm;
+ struct sde_rm_hw_iter ctl_iter, lm_iter;
+ struct drm_encoder *encoder;
+ struct sde_shd_hw_ctl *hw_ctl;
+ struct sde_shd_hw_mixer *hw_lm;
+ int i;
+
+ encoder = display->base->encoder;
+ rm = &phys_enc->sde_kms->rm;
+ shd_enc = to_sde_encoder_phys_shd(phys_enc);
+
+ sde_rm_init_hw_iter(&ctl_iter, encoder->base.id, SDE_HW_BLK_CTL);
+ sde_rm_init_hw_iter(&lm_iter, encoder->base.id, SDE_HW_BLK_LM);
+
+ shd_enc->num_mixers = 0;
+ shd_enc->num_ctls = 0;
+
+ for (i = 0; i < CRTC_DUAL_MIXERS; i++) {
+ /* reserve layer mixer */
+ if (!sde_rm_get_hw(rm, &lm_iter))
+ break;
+ hw_lm = container_of(shd_enc->hw_lm[i],
+ struct sde_shd_hw_mixer, base);
+ hw_lm->base = *(struct sde_hw_mixer *)lm_iter.hw;
+ hw_lm->range = display->stage_range;
+ hw_lm->orig = lm_iter.hw;
+ hw_lm->base.ops.setup_blend_config =
+ _sde_shd_setup_blend_config;
+ hw_lm->base.ops.setup_mixer_out =
+ _sde_shd_setup_mixer_out;
+
+ SHD_DEBUG("reserve LM%d %pK from enc %d to %d\n",
+ hw_lm->base.idx, hw_lm,
+ DRMID(encoder),
+ DRMID(phys_enc->parent));
+
+ sde_rm_ext_blk_create_reserve(rm,
+ SDE_HW_BLK_LM, 0,
+ &hw_lm->base, phys_enc->parent);
+ shd_enc->num_mixers++;
+
+ /* reserve ctl */
+ if (!sde_rm_get_hw(rm, &ctl_iter))
+ break;
+ hw_ctl = container_of(shd_enc->hw_ctl[i],
+ struct sde_shd_hw_ctl, base);
+ hw_ctl->base = *(struct sde_hw_ctl *)ctl_iter.hw;
+ hw_ctl->shd_enc = shd_enc;
+ hw_ctl->range = display->stage_range;
+ hw_ctl->orig = ctl_iter.hw;
+ hw_ctl->base.ops.clear_all_blendstages =
+ _sde_shd_hw_ctl_clear_all_blendstages;
+ hw_ctl->base.ops.setup_blendstage =
+ _sde_shd_hw_ctl_setup_blendstage;
+ hw_ctl->base.ops.trigger_flush =
+ _sde_shd_trigger_flush;
+
+ SHD_DEBUG("reserve CTL%d %pK from enc %d to %d\n",
+ hw_ctl->base.idx, hw_ctl,
+ DRMID(encoder),
+ DRMID(phys_enc->parent));
+
+ sde_rm_ext_blk_create_reserve(rm,
+ SDE_HW_BLK_CTL, 0,
+ &hw_ctl->base, phys_enc->parent);
+ shd_enc->num_ctls++;
+ }
+}
+
+static void _sde_encoder_phys_shd_rm_release(
+ struct sde_encoder_phys *phys_enc,
+ struct shd_display *display)
+{
+ struct sde_rm *rm;
+
+ rm = &phys_enc->sde_kms->rm;
+
+ sde_rm_ext_blk_destroy(rm, phys_enc->parent);
+}
+
+static void sde_encoder_phys_shd_mode_set(
+ struct sde_encoder_phys *phys_enc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adj_mode)
+{
+ struct drm_connector *connector;
+ struct sde_connector *sde_conn;
+ struct shd_display *display;
+ struct drm_encoder *encoder;
+ struct sde_rm_hw_iter iter;
+ struct sde_rm *rm;
+
+ SHD_DEBUG("%d\n", phys_enc->parent->base.id);
+
+ phys_enc->cached_mode = *adj_mode;
+
+ connector = phys_enc->connector;
+ if (!connector || connector->encoder != phys_enc->parent) {
+ SDE_ERROR("failed to find connector\n");
+ return;
+ }
+
+ sde_conn = to_sde_connector(connector);
+ display = sde_conn->display;
+ encoder = display->base->encoder;
+
+ _sde_encoder_phys_shd_rm_reserve(phys_enc, display);
+
+ rm = &phys_enc->sde_kms->rm;
+
+ sde_rm_init_hw_iter(&iter, DRMID(phys_enc->parent), SDE_HW_BLK_CTL);
+ if (sde_rm_get_hw(rm, &iter))
+ phys_enc->hw_ctl = (struct sde_hw_ctl *)iter.hw;
+ if (IS_ERR_OR_NULL(phys_enc->hw_ctl)) {
+ SHD_DEBUG("failed to init ctl, %ld\n",
+ PTR_ERR(phys_enc->hw_ctl));
+ phys_enc->hw_ctl = NULL;
+ return;
+ }
+}
+
+static int _sde_encoder_phys_shd_wait_for_vblank(
+ struct sde_encoder_phys *phys_enc, bool notify)
+{
+ struct sde_encoder_phys_shd *shd_enc;
+ u32 irq_status;
+ int ret = 0;
+
+ if (!phys_enc) {
+ pr_err("invalid encoder\n");
+ return -EINVAL;
+ }
+
+ if (phys_enc->enable_state != SDE_ENC_ENABLED) {
+ SDE_ERROR("encoder not enabled\n");
+ return -EWOULDBLOCK;
+ }
+
+ shd_enc = to_sde_encoder_phys_shd(phys_enc);
+
+ /* Wait for kickoff to complete */
+ ret = sde_encoder_helper_wait_event_timeout(
+ DRMID(phys_enc->parent),
+ phys_enc->intf_idx - INTF_0,
+ &phys_enc->pending_kickoff_wq,
+ &phys_enc->pending_kickoff_cnt,
+ KICKOFF_TIMEOUT_MS);
+
+ if (ret <= 0) {
+ irq_status = sde_core_irq_read(phys_enc->sde_kms,
+ INTR_IDX_VSYNC, true);
+ if (irq_status) {
+ SDE_EVT32(DRMID(phys_enc->parent),
+ phys_enc->intf_idx - INTF_0);
+ SDE_DEBUG_PHYS(phys_enc, "done, irq not triggered\n");
+ if (notify && phys_enc->parent_ops.handle_frame_done)
+ phys_enc->parent_ops.handle_frame_done(
+ phys_enc->parent, phys_enc,
+ SDE_ENCODER_FRAME_EVENT_DONE);
+ sde_encoder_phys_shd_vblank_irq(phys_enc,
+ INTR_IDX_VSYNC);
+ ret = 0;
+ } else {
+ SDE_EVT32(DRMID(phys_enc->parent),
+ phys_enc->intf_idx - INTF_0);
+ SDE_ERROR_PHYS(phys_enc, "kickoff timed out\n");
+ if (notify && phys_enc->parent_ops.handle_frame_done)
+ phys_enc->parent_ops.handle_frame_done(
+ phys_enc->parent, phys_enc,
+ SDE_ENCODER_FRAME_EVENT_ERROR);
+ ret = -ETIMEDOUT;
+ }
+ } else {
+ if (notify && phys_enc->parent_ops.handle_frame_done)
+ phys_enc->parent_ops.handle_frame_done(
+ phys_enc->parent, phys_enc,
+ SDE_ENCODER_FRAME_EVENT_DONE);
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static int sde_encoder_phys_shd_wait_for_vblank(
+ struct sde_encoder_phys *phys_enc)
+{
+ return _sde_encoder_phys_shd_wait_for_vblank(phys_enc, true);
+}
+
+void sde_encoder_phys_shd_handle_post_kickoff(
+ struct sde_encoder_phys *phys_enc)
+{
+ if (!phys_enc) {
+ SDE_ERROR("invalid encoder\n");
+ return;
+ }
+
+ if (phys_enc->enable_state == SDE_ENC_ENABLING) {
+ SDE_EVT32(DRMID(phys_enc->parent));
+ phys_enc->enable_state = SDE_ENC_ENABLED;
+ }
+}
+
+static int sde_encoder_phys_shd_control_vblank_irq(
+ struct sde_encoder_phys *phys_enc,
+ bool enable)
+{
+ int ret = 0;
+ struct sde_encoder_phys_shd *shd_enc;
+
+ if (!phys_enc) {
+ SDE_ERROR("invalid encoder\n");
+ return -EINVAL;
+ }
+
+ shd_enc = to_sde_encoder_phys_shd(phys_enc);
+
+ SHD_DEBUG("[%pS] %d enable=%d/%d\n",
+ __builtin_return_address(0), DRMID(phys_enc->parent),
+ enable, atomic_read(&phys_enc->vblank_refcount));
+
+ SDE_EVT32(DRMID(phys_enc->parent), enable,
+ atomic_read(&phys_enc->vblank_refcount));
+
+ if (enable && atomic_inc_return(&phys_enc->vblank_refcount) == 1)
+ ret = _sde_encoder_phys_shd_register_irq(phys_enc,
+ SDE_IRQ_TYPE_INTF_VSYNC,
+ INTR_IDX_VSYNC,
+ sde_encoder_phys_shd_vblank_irq, "vsync_irq");
+ else if (!enable && atomic_dec_return(&phys_enc->vblank_refcount) == 0)
+ ret = _sde_encoder_phys_shd_unregister_irq(phys_enc,
+ INTR_IDX_VSYNC);
+
+ if (ret)
+ SHD_DEBUG("control vblank irq error %d, enable %d\n",
+ ret, enable);
+
+ return ret;
+}
+
+static void sde_encoder_phys_shd_enable(struct sde_encoder_phys *phys_enc)
+{
+ struct drm_connector *connector;
+
+ SHD_DEBUG("%d\n", phys_enc->parent->base.id);
+
+ if (!phys_enc->parent || !phys_enc->parent->dev) {
+ SDE_ERROR("invalid drm device\n");
+ return;
+ }
+
+ connector = phys_enc->connector;
+ if (!connector || connector->encoder != phys_enc->parent) {
+ SDE_ERROR("failed to find connector\n");
+ return;
+ }
+
+ sde_encoder_phys_shd_control_vblank_irq(phys_enc, true);
+
+ if (phys_enc->enable_state == SDE_ENC_DISABLED)
+ phys_enc->enable_state = SDE_ENC_ENABLING;
+}
+
+static void sde_encoder_phys_shd_disable(struct sde_encoder_phys *phys_enc)
+{
+ struct sde_connector *sde_conn;
+ struct shd_display *display;
+
+ SHD_DEBUG("%d\n", phys_enc->parent->base.id);
+
+ if (!phys_enc || !phys_enc->parent || !phys_enc->parent->dev ||
+ !phys_enc->parent->dev->dev_private) {
+ SDE_ERROR("invalid encoder/device\n");
+ return;
+ }
+
+ if (phys_enc->enable_state == SDE_ENC_DISABLED) {
+ SDE_ERROR("already disabled\n");
+ return;
+ }
+
+ _sde_shd_hw_ctl_clear_all_blendstages(phys_enc->hw_ctl,
+ phys_enc->sde_kms->splash_info.handoff,
+ phys_enc->sde_kms->splash_info.reserved_pipe_info,
+ MAX_BLOCKS);
+
+ _sde_shd_trigger_flush(phys_enc->hw_ctl);
+
+ _sde_encoder_phys_shd_wait_for_vblank(phys_enc, false);
+
+ sde_encoder_phys_shd_control_vblank_irq(phys_enc, false);
+
+ phys_enc->enable_state = SDE_ENC_DISABLED;
+
+ if (!phys_enc->connector)
+ return;
+
+ sde_conn = to_sde_connector(phys_enc->connector);
+ display = sde_conn->display;
+
+ _sde_encoder_phys_shd_rm_release(phys_enc, display);
+}
+
+static void sde_encoder_phys_shd_destroy(struct sde_encoder_phys *phys_enc)
+{
+ struct sde_encoder_phys_shd *shd_enc =
+ to_sde_encoder_phys_shd(phys_enc);
+
+ if (!phys_enc)
+ return;
+
+ kfree(shd_enc);
+}
+
+/**
+ * sde_encoder_phys_shd_init_ops - initialize writeback operations
+ * @ops: Pointer to encoder operation table
+ */
+static void sde_encoder_phys_shd_init_ops(struct sde_encoder_phys_ops *ops)
+{
+ ops->is_master = sde_encoder_phys_shd_is_master;
+ ops->mode_set = sde_encoder_phys_shd_mode_set;
+ ops->enable = sde_encoder_phys_shd_enable;
+ ops->disable = sde_encoder_phys_shd_disable;
+ ops->destroy = sde_encoder_phys_shd_destroy;
+ ops->control_vblank_irq = sde_encoder_phys_shd_control_vblank_irq;
+ ops->wait_for_commit_done = sde_encoder_phys_shd_wait_for_vblank;
+ ops->handle_post_kickoff = sde_encoder_phys_shd_handle_post_kickoff;
+}
+
+struct sde_encoder_phys *sde_encoder_phys_shd_init(
+ struct sde_enc_phys_init_params *p)
+{
+ struct sde_encoder_phys *phys_enc;
+ struct sde_encoder_phys_shd *shd_enc;
+ struct sde_shd_hw_ctl *hw_ctl;
+ struct sde_shd_hw_mixer *hw_lm;
+ int ret = 0, i;
+
+ SHD_DEBUG("\n");
+
+ if (!p || !p->parent) {
+ SDE_ERROR("invalid params\n");
+ ret = -EINVAL;
+ goto fail_alloc;
+ }
+
+ shd_enc = kzalloc(sizeof(*shd_enc), GFP_KERNEL);
+ if (!shd_enc) {
+ ret = -ENOMEM;
+ goto fail_alloc;
+ }
+
+ for (i = 0; i < CRTC_DUAL_MIXERS; i++) {
+ hw_ctl = kzalloc(sizeof(*hw_ctl), GFP_KERNEL);
+ if (!hw_ctl) {
+ ret = -ENOMEM;
+ goto fail_ctl;
+ }
+ shd_enc->hw_ctl[i] = &hw_ctl->base;
+
+ hw_lm = kzalloc(sizeof(*hw_lm), GFP_KERNEL);
+ if (!hw_lm) {
+ ret = -ENOMEM;
+ goto fail_ctl;
+ }
+ shd_enc->hw_lm[i] = &hw_lm->base;
+ }
+
+ phys_enc = &shd_enc->base;
+
+ sde_encoder_phys_shd_init_ops(&phys_enc->ops);
+ phys_enc->parent = p->parent;
+ phys_enc->parent_ops = p->parent_ops;
+ phys_enc->sde_kms = p->sde_kms;
+ phys_enc->split_role = p->split_role;
+ phys_enc->intf_mode = INTF_MODE_NONE;
+ phys_enc->intf_idx = p->intf_idx;
+ phys_enc->enc_spinlock = p->enc_spinlock;
+ for (i = 0; i < INTR_IDX_MAX; i++)
+ INIT_LIST_HEAD(&shd_enc->irq_cb[i].list);
+ atomic_set(&phys_enc->vblank_refcount, 0);
+ atomic_set(&phys_enc->pending_kickoff_cnt, 0);
+ init_waitqueue_head(&phys_enc->pending_kickoff_wq);
+ phys_enc->enable_state = SDE_ENC_DISABLED;
+
+ return phys_enc;
+
+fail_ctl:
+ for (i = 0; i < CRTC_DUAL_MIXERS; i++) {
+ kfree(shd_enc->hw_ctl[i]);
+ kfree(shd_enc->hw_lm[i]);
+ }
+ kfree(shd_enc);
+fail_alloc:
+ return ERR_PTR(ret);
+}
+
#include "dsi_drm.h"
#include "sde_wb.h"
#include "sde_hdmi.h"
+#include "sde_shd.h"
#include "sde_kms.h"
#include "sde_core_irq.h"
sde_power_resource_enable(&priv->phandle,
sde_kms->core_client, true);
+
+ shd_display_prepare_commit(sde_kms, state);
}
static void sde_kms_commit(struct msm_kms *kms,
for_each_crtc_in_state(old_state, crtc, old_crtc_state, i)
sde_crtc_complete_commit(crtc, old_crtc_state);
+
+ shd_display_complete_commit(sde_kms, old_state);
+
sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
SDE_EVT32(SDE_EVTLOG_FUNC_EXIT);
sde_kms->hdmi_display_count);
}
+ /* shd */
+ sde_kms->shd_displays = NULL;
+ sde_kms->shd_display_count = shd_display_get_num_of_displays();
+ if (sde_kms->shd_display_count) {
+ sde_kms->shd_displays = kcalloc(sde_kms->shd_display_count,
+ sizeof(void *), GFP_KERNEL);
+ if (!sde_kms->shd_displays)
+ goto exit_deinit_shd;
+ sde_kms->shd_display_count =
+ shd_display_get_displays(sde_kms->shd_displays,
+ sde_kms->shd_display_count);
+ }
+
return 0;
+exit_deinit_shd:
+ kfree(sde_kms->shd_displays);
+ sde_kms->shd_display_count = 0;
+ sde_kms->shd_displays = NULL;
exit_deinit_hdmi:
sde_kms->hdmi_display_count = 0;
sde_kms->hdmi_displays = NULL;
.get_csc_type = sde_hdmi_get_csc_type,
.set_topology_ctl = sde_hdmi_set_top_ctl,
};
+ static const struct sde_connector_ops shd_ops = {
+ .post_init = shd_connector_post_init,
+ .detect = shd_connector_detect,
+ .get_modes = shd_connector_get_modes,
+ .mode_valid = shd_connector_mode_valid,
+ .get_info = shd_connector_get_info,
+ };
struct msm_display_info info = {0};
struct drm_encoder *encoder;
void *display, *connector;
max_encoders = sde_kms->dsi_display_count +
sde_kms->wb_display_count +
- sde_kms->hdmi_display_count;
+ sde_kms->hdmi_display_count +
+ sde_kms->shd_display_count;
if (max_encoders > ARRAY_SIZE(priv->encoders)) {
max_encoders = ARRAY_SIZE(priv->encoders);
}
}
+ /* shd */
+ for (i = 0; i < sde_kms->shd_display_count &&
+ priv->num_encoders < max_encoders; ++i) {
+ display = sde_kms->shd_displays[i];
+ encoder = NULL;
+
+ memset(&info, 0x0, sizeof(info));
+ rc = shd_connector_get_info(&info, display);
+ if (rc) {
+ SDE_ERROR("shd get_info %d failed\n", i);
+ continue;
+ }
+
+ encoder = sde_encoder_init(dev, &info);
+ if (IS_ERR_OR_NULL(encoder)) {
+ SDE_ERROR("shd encoder init failed %d\n", i);
+ continue;
+ }
+
+ rc = shd_drm_bridge_init(display, encoder);
+ if (rc) {
+ SDE_ERROR("shd bridge %d init failed, %d\n", i, rc);
+ sde_encoder_destroy(encoder);
+ continue;
+ }
+
+ connector = sde_connector_init(dev,
+ encoder,
+ NULL,
+ display,
+ &shd_ops,
+ DRM_CONNECTOR_POLL_HPD,
+ info.intf_type);
+ if (connector) {
+ priv->encoders[priv->num_encoders++] = encoder;
+ priv->connectors[priv->num_connectors++] = connector;
+ } else {
+ SDE_ERROR("shd %d connector init failed\n", i);
+ shd_drm_bridge_deinit(display);
+ sde_encoder_destroy(encoder);
+ }
+ }
return 0;
}
*/
dev->vblank_disable_allowed = true;
+ shd_display_post_init(sde_kms);
+
return 0;
}
bool has_danger_ctrl;
void **hdmi_displays;
int hdmi_display_count;
+ int shd_display_count;
+ void **shd_displays;
/* splash handoff structure */
struct sde_splash_info splash_info;
uint32_t nplanes, src_flags = 0x0;
struct sde_plane *psde;
struct sde_plane_state *pstate;
+ struct sde_crtc_state *cstate;
const struct sde_format *fmt;
struct drm_crtc *crtc;
struct drm_framebuffer *fb;
src.x += src.w * pp->index;
dst.x += dst.w * pp->index;
}
+
+ /* add extra offset for shared display */
+ if (crtc->state) {
+ cstate = to_sde_crtc_state(crtc->state);
+ if (cstate->is_shared) {
+ dst.x += cstate->shared_roi.x;
+ dst.y += cstate->shared_roi.y;
+ }
+ }
+
pp->pipe_cfg.src_rect = src;
pp->pipe_cfg.dst_rect = dst;
int i, rc = 0;
if (!reqs->num_lm) {
- SDE_ERROR("invalid number of lm: %d\n", reqs->num_lm);
+ SDE_DEBUG("invalid no of lm %d\n", reqs->num_lm);
return -EINVAL;
}
struct sde_rm_requirements reqs;
struct msm_drm_private *priv;
struct sde_kms *sde_kms;
+ struct sde_connector *sde_conn;
int ret;
if (!rm || !enc || !crtc_state || !conn_state) {
if (!drm_atomic_crtc_needs_modeset(crtc_state))
return 0;
+ sde_conn = to_sde_connector(conn_state->connector);
+ if (sde_conn->is_shared)
+ return 0;
+
SDE_DEBUG("reserving hw for conn %d enc %d crtc %d test_only %d\n",
conn_state->connector->base.id, enc->base.id,
crtc_state->crtc->base.id, test_only);
return ret;
}
+int sde_rm_ext_blk_create_reserve(struct sde_rm *rm,
+ enum sde_hw_blk_type type,
+ uint32_t id,
+ void *hw,
+ struct drm_encoder *enc)
+{
+ struct sde_rm_hw_blk *blk;
+ struct sde_rm_rsvp *rsvp;
+ int ret = 0;
+
+ if (!rm || !hw || !enc) {
+ SDE_ERROR("invalid parameters\n");
+ return -EINVAL;
+ }
+
+ if (type >= SDE_HW_BLK_MAX) {
+ SDE_ERROR("invalid HW type\n");
+ return -EINVAL;
+ }
+
+ blk = kzalloc(sizeof(*blk), GFP_KERNEL);
+ if (!blk) {
+ _sde_rm_hw_destroy(type, hw);
+ return -ENOMEM;
+ }
+
+ mutex_lock(&rm->rm_lock);
+
+ rsvp = _sde_rm_get_rsvp(rm, enc);
+ if (!rsvp) {
+ rsvp = kzalloc(sizeof(*rsvp), GFP_KERNEL);
+ if (!rsvp) {
+ ret = -ENOMEM;
+ kfree(blk);
+ goto end;
+ }
+
+ rsvp->seq = ++rm->rsvp_next_seq;
+ rsvp->enc_id = enc->base.id;
+ list_add_tail(&rsvp->list, &rm->rsvps);
+
+ SDE_DEBUG("create rsvp %d for enc %d\n",
+ rsvp->seq, rsvp->enc_id);
+ }
+
+ blk->type = type;
+ blk->id = id;
+ blk->hw = hw;
+ blk->rsvp = rsvp;
+ list_add_tail(&blk->list, &rm->hw_blks[type]);
+
+ SDE_DEBUG("create blk %d %d for rsvp %d enc %d\n", blk->type, blk->id,
+ rsvp->seq, rsvp->enc_id);
+
+end:
+ mutex_unlock(&rm->rm_lock);
+ return ret;
+}
+
+int sde_rm_ext_blk_destroy(struct sde_rm *rm,
+ struct drm_encoder *enc)
+{
+ struct sde_rm_hw_blk *blk = NULL, *p;
+ struct sde_rm_rsvp *rsvp;
+ enum sde_hw_blk_type type;
+ int ret = 0;
+
+ if (!rm || !enc) {
+ SDE_ERROR("invalid parameters\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&rm->rm_lock);
+
+ rsvp = _sde_rm_get_rsvp(rm, enc);
+ if (!rsvp) {
+ ret = -ENOENT;
+ SDE_ERROR("failed to find rsvp for enc %d\n", enc->base.id);
+ goto end;
+ }
+
+ for (type = 0; type < SDE_HW_BLK_MAX; type++) {
+ list_for_each_entry_safe(blk, p, &rm->hw_blks[type], list) {
+ if (blk->rsvp == rsvp) {
+ list_del(&blk->list);
+ SDE_DEBUG("del blk %d %d from rsvp %d enc %d\n",
+ blk->type, blk->id,
+ rsvp->seq, rsvp->enc_id);
+ kfree(blk);
+ }
+ }
+ }
+
+ SDE_DEBUG("del rsvp %d\n", rsvp->seq);
+ list_del(&rsvp->list);
+ kfree(rsvp);
+end:
+ mutex_unlock(&rm->rm_lock);
+ return ret;
+}
+
static int _sde_rm_get_ctl_lm_for_splash(struct sde_hw_ctl *ctl,
int max_lm_cnt, u8 lm_cnt, u8 *lm_ids,
struct splash_ctl_top *top, int index)
void *sinfo,
struct sde_mdss_cfg *cat);
+/**
+ * sde_rm_ext_blk_create_reserve - Create external HW blocks
+ * in resource manager and reserve for specific encoder.
+ * @rm: SDE Resource Manager handle
+ * @type: external HW block type
+ * @id: external HW block id
+ * @hw: external HW block
+ * @enc: DRM Encoder handle
+ * @Return: 0 on Success otherwise -ERROR
+ */
+int sde_rm_ext_blk_create_reserve(struct sde_rm *rm,
+ enum sde_hw_blk_type type,
+ uint32_t id,
+ void *hw,
+ struct drm_encoder *enc);
+
+/**
+ * sde_rm_ext_blk_destroy - Given the encoder for the display chain, release
+ * external HW blocks created for that.
+ * @rm: SDE Resource Manager handle
+ * @enc: DRM Encoder handle
+ * @Return: 0 on Success otherwise -ERROR
+ */
+int sde_rm_ext_blk_destroy(struct sde_rm *rm,
+ struct drm_encoder *enc);
+
#endif /* __SDE_RM_H__ */
--- /dev/null
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "[drm-shd] %s: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+#include <linux/component.h>
+#include <linux/of_irq.h>
+#include "sde_connector.h"
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_crtc.h>
+
+#include "msm_drv.h"
+#include "msm_kms.h"
+#include "sde_connector.h"
+#include "sde_encoder.h"
+#include "sde_crtc.h"
+#include "sde_shd.h"
+
+#define SHD_DEBUG(fmt, ...) pr_debug(fmt, ##__VA_ARGS__)
+
+static LIST_HEAD(g_base_list);
+
+static const struct of_device_id shd_dt_match[] = {
+ {.compatible = "qcom,shared-display"},
+ {}
+};
+
+struct shd_bridge {
+ struct drm_bridge base;
+ struct shd_display *display;
+};
+
+int shd_display_get_num_of_displays(void)
+{
+ int display_num = 0;
+ struct shd_display *disp;
+ struct shd_display_base *base;
+
+ list_for_each_entry(base, &g_base_list, head) {
+ list_for_each_entry(disp, &base->disp_list, head)
+ ++display_num;
+ }
+
+ return display_num;
+}
+
+int shd_display_get_displays(void **displays, int count)
+{
+ int display_num = 0;
+ struct shd_display *disp;
+ struct shd_display_base *base;
+
+ list_for_each_entry(base, &g_base_list, head)
+ list_for_each_entry(disp, &base->disp_list, head)
+ displays[display_num++] = disp;
+
+ return display_num;
+}
+
+static enum drm_connector_status shd_display_base_detect(
+ struct drm_connector *connector,
+ bool force,
+ void *disp)
+{
+ return connector_status_disconnected;
+}
+
+static int shd_display_init_base_connector(struct drm_device *dev,
+ struct shd_display_base *base)
+{
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+ struct sde_connector *sde_conn;
+ int rc = 0;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ sde_conn = to_sde_connector(connector);
+ encoder = sde_conn->encoder;
+ if (encoder == base->encoder) {
+ base->connector = connector;
+ break;
+ }
+ }
+
+ if (!base->connector) {
+ SDE_ERROR("failed to find connector\n");
+ rc = -ENOENT;
+ goto error;
+ }
+
+ /* set base connector disconnected*/
+ sde_conn = to_sde_connector(base->connector);
+ base->ops = sde_conn->ops;
+ sde_conn->ops.detect = shd_display_base_detect;
+
+ SHD_DEBUG("found base connector %d\n", base->connector->base.id);
+
+error:
+ return rc;
+}
+
+static int shd_display_init_base_encoder(struct drm_device *dev,
+ struct shd_display_base *base)
+{
+ struct drm_encoder *encoder;
+ struct sde_encoder_hw_resources hw_res;
+ struct sde_connector_state conn_state = {};
+ int i, rc = 0;
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ sde_encoder_get_hw_resources(encoder,
+ &hw_res, &conn_state.base);
+ for (i = 0; i < INTF_MAX; i++) {
+ if (hw_res.intfs[i] != INTF_MODE_NONE &&
+ base->intf_idx == i) {
+ base->encoder = encoder;
+ goto found;
+ }
+ }
+ }
+
+ if (!base->encoder) {
+ pr_err("can't find base encoder for intf %d\n",
+ base->intf_idx);
+ rc = -ENOENT;
+ goto error;
+ }
+
+found:
+ switch (base->encoder->encoder_type) {
+ case DRM_MODE_ENCODER_DSI:
+ base->connector_type = DRM_MODE_CONNECTOR_DSI;
+ break;
+ case DRM_MODE_ENCODER_TMDS:
+ base->connector_type = DRM_MODE_CONNECTOR_HDMIA;
+ break;
+ default:
+ base->connector_type = DRM_MODE_CONNECTOR_Unknown;
+ break;
+ }
+
+ SHD_DEBUG("found base encoder %d, type %d, connect type %d\n",
+ base->encoder->base.id,
+ base->encoder->encoder_type,
+ base->connector_type);
+
+error:
+ return rc;
+}
+
+static int shd_display_init_base_crtc(struct drm_device *dev,
+ struct shd_display_base *base)
+{
+ struct drm_crtc *crtc;
+ struct drm_display_mode *drm_mode;
+ int rc = 0;
+
+ crtc = list_last_entry(&dev->mode_config.crtc_list,
+ struct drm_crtc, head);
+
+ base->crtc = crtc;
+ base->encoder->crtc = crtc;
+ SHD_DEBUG("found base crtc %d\n", crtc->base.id);
+
+ /* hide crtc from user */
+ list_del_init(&crtc->head);
+
+ /* fixed mode is used */
+ drm_mode = &base->mode;
+
+ /* update crtc drm structure */
+ crtc->state->active = true;
+ rc = drm_atomic_set_mode_for_crtc(crtc->state, drm_mode);
+ if (rc) {
+ SDE_ERROR("Failed: set mode for crtc. rc = %d\n", rc);
+ goto error;
+ }
+ drm_mode_copy(&crtc->state->adjusted_mode, drm_mode);
+ drm_mode_copy(&crtc->mode, drm_mode);
+
+ crtc->state->active_changed = true;
+ crtc->state->mode_changed = true;
+ crtc->state->connectors_changed = true;
+
+ if (base->connector) {
+ base->connector->state->crtc = crtc;
+ base->connector->state->best_encoder = base->encoder;
+ base->connector->encoder = base->encoder;
+ }
+
+error:
+ return rc;
+}
+
+static void shd_display_enable_base(struct drm_device *dev,
+ struct shd_display_base *base)
+{
+ const struct drm_encoder_helper_funcs *enc_funcs;
+ const struct drm_crtc_helper_funcs *crtc_funcs;
+ struct drm_display_mode *adjusted_mode;
+ struct sde_crtc *sde_crtc;
+ struct sde_hw_mixer_cfg lm_cfg;
+ struct sde_hw_mixer *hw_lm;
+ int rc, i;
+
+ SHD_DEBUG("enable base display %d\n", base->intf_idx);
+
+ enc_funcs = base->encoder->helper_private;
+ if (!enc_funcs) {
+ SDE_ERROR("failed to find encoder helper\n");
+ return;
+ }
+
+ crtc_funcs = base->crtc->helper_private;
+ if (!crtc_funcs) {
+ SDE_ERROR("failed to find crtc helper\n");
+ return;
+ }
+
+ if (!base->connector) {
+ SDE_ERROR("failed to find base connector\n");
+ return;
+ }
+
+ adjusted_mode = drm_mode_duplicate(dev, &base->mode);
+ if (!adjusted_mode) {
+ SDE_ERROR("failed to create adjusted mode\n");
+ return;
+ }
+
+ drm_bridge_mode_fixup(base->encoder->bridge,
+ &base->mode,
+ adjusted_mode);
+
+ if (enc_funcs->atomic_check) {
+ rc = enc_funcs->atomic_check(base->encoder,
+ base->crtc->state,
+ base->connector->state);
+ if (rc) {
+ SDE_ERROR("encoder atomic check failed\n");
+ goto state_fail;
+ }
+ }
+
+ if (enc_funcs->mode_fixup) {
+ enc_funcs->mode_fixup(base->encoder,
+ &base->mode,
+ adjusted_mode);
+ }
+
+ if (enc_funcs->mode_set) {
+ enc_funcs->mode_set(base->encoder,
+ &base->mode,
+ adjusted_mode);
+ }
+
+ if (crtc_funcs->atomic_begin) {
+ crtc_funcs->atomic_begin(base->crtc,
+ base->crtc->state);
+ }
+
+ sde_crtc = to_sde_crtc(base->crtc);
+ if (!sde_crtc->num_mixers) {
+ SDE_ERROR("no layer mixer found\n");
+ goto state_fail;
+ }
+
+ lm_cfg.out_width = base->mode.hdisplay / sde_crtc->num_mixers;
+ lm_cfg.out_height = base->mode.vdisplay;
+ lm_cfg.flags = 0;
+ for (i = 0; i < sde_crtc->num_mixers; i++) {
+ lm_cfg.right_mixer = i;
+ hw_lm = sde_crtc->mixers[i].hw_lm;
+ hw_lm->ops.setup_mixer_out(hw_lm, &lm_cfg);
+ }
+
+ drm_bridge_mode_set(base->encoder->bridge,
+ &base->mode,
+ adjusted_mode);
+
+ drm_bridge_pre_enable(base->encoder->bridge);
+
+ if (enc_funcs->enable)
+ enc_funcs->enable(base->encoder);
+
+ sde_encoder_kickoff(base->encoder);
+
+ drm_bridge_enable(base->encoder->bridge);
+
+ base->enabled = true;
+
+state_fail:
+ drm_mode_destroy(dev, adjusted_mode);
+}
+
+static void shd_display_disable_base(struct drm_device *dev,
+ struct shd_display_base *base)
+{
+ const struct drm_encoder_helper_funcs *enc_funcs;
+
+ SHD_DEBUG("disable base display %d\n", base->intf_idx);
+
+ enc_funcs = base->encoder->helper_private;
+ if (!enc_funcs) {
+ SDE_ERROR("failed to find encoder helper\n");
+ return;
+ }
+
+ drm_bridge_disable(base->encoder->bridge);
+
+ if (enc_funcs->disable)
+ enc_funcs->disable(base->encoder);
+
+ drm_bridge_post_disable(base->encoder->bridge);
+
+ base->enabled = false;
+}
+
+static void shd_display_enable(struct shd_display *display)
+{
+ struct drm_device *dev = display->drm_dev;
+ struct shd_display_base *base = display->base;
+
+ SHD_DEBUG("enable %s conn %d\n", display->name,
+ DRMID(display->connector));
+
+ mutex_lock(&base->base_mutex);
+
+ display->enabled = true;
+
+ if (!base->enabled)
+ shd_display_enable_base(dev, base);
+
+ mutex_unlock(&base->base_mutex);
+}
+
+static void shd_display_disable(struct shd_display *display)
+{
+ struct drm_device *dev = display->drm_dev;
+ struct shd_display_base *base = display->base;
+ struct shd_display *p;
+ bool enabled = false;
+
+ SHD_DEBUG("disable %s conn %d\n", display->name,
+ DRMID(display->connector));
+
+ mutex_lock(&base->base_mutex);
+
+ display->enabled = false;
+
+ if (!base->enabled)
+ goto end;
+
+ list_for_each_entry(p, &base->disp_list, head) {
+ if (p->enabled) {
+ enabled = true;
+ break;
+ }
+ }
+
+ if (!enabled)
+ shd_display_disable_base(dev, base);
+
+end:
+ mutex_unlock(&base->base_mutex);
+}
+
+void shd_display_prepare_commit(struct sde_kms *sde_kms,
+ struct drm_atomic_state *state)
+{
+ struct drm_connector *connector;
+ struct drm_connector_state *old_conn_state;
+ int i;
+
+ if (!sde_kms->shd_display_count)
+ return;
+
+ for_each_connector_in_state(state, connector, old_conn_state, i) {
+ struct sde_connector *sde_conn;
+
+ sde_conn = to_sde_connector(connector);
+ if (!sde_conn->is_shared)
+ continue;
+
+ if (!connector->state->best_encoder)
+ continue;
+
+ if (!connector->state->crtc->state->active ||
+ !drm_atomic_crtc_needs_modeset(
+ connector->state->crtc->state))
+ continue;
+
+ shd_display_enable(sde_conn->display);
+ }
+}
+
+void shd_display_complete_commit(struct sde_kms *sde_kms,
+ struct drm_atomic_state *state)
+{
+ struct drm_connector *connector;
+ struct drm_connector_state *old_conn_state;
+ int i;
+
+ if (!sde_kms->shd_display_count)
+ return;
+
+ for_each_connector_in_state(state, connector, old_conn_state, i) {
+ struct sde_connector *sde_conn;
+ struct drm_crtc_state *old_crtc_state;
+ unsigned int crtc_idx;
+
+ sde_conn = to_sde_connector(connector);
+ if (!sde_conn->is_shared)
+ continue;
+
+ if (!old_conn_state->crtc)
+ continue;
+
+ crtc_idx = drm_crtc_index(old_conn_state->crtc);
+ old_crtc_state = state->crtc_states[crtc_idx];
+
+ if (!old_crtc_state->active ||
+ !drm_atomic_crtc_needs_modeset(old_conn_state->crtc->state))
+ continue;
+
+ if (old_conn_state->crtc->state->active)
+ continue;
+
+ shd_display_disable(sde_conn->display);
+ }
+}
+
+int shd_display_post_init(struct sde_kms *sde_kms)
+{
+ struct shd_display *disp;
+ struct shd_display_base *base;
+ int rc = 0, i;
+
+ for (i = 0; i < sde_kms->shd_display_count; i++) {
+ disp = sde_kms->shd_displays[i];
+ base = disp->base;
+
+ if (base->crtc)
+ continue;
+
+ rc = shd_display_init_base_crtc(disp->drm_dev, base);
+ if (rc) {
+ SDE_ERROR("failed initialize base crtc\n");
+ break;
+ }
+ }
+
+ return rc;
+}
+
+int shd_connector_get_info(struct msm_display_info *info, void *data)
+{
+ struct shd_display *display = data;
+ int rc;
+
+ if (!info || !data || !display->base) {
+ pr_err("invalid params\n");
+ return -EINVAL;
+ }
+
+ if (!display->base->encoder) {
+ rc = shd_display_init_base_encoder(display->drm_dev,
+ display->base);
+ if (rc) {
+ SDE_ERROR("failed to find base encoder\n");
+ return rc;
+ }
+
+ rc = shd_display_init_base_connector(display->drm_dev,
+ display->base);
+ if (rc) {
+ SDE_ERROR("failed to find base connector\n");
+ return rc;
+ }
+ }
+
+ info->intf_type = display->base->connector_type;
+ info->capabilities = MSM_DISPLAY_CAP_VID_MODE |
+ MSM_DISPLAY_CAP_HOT_PLUG;
+ info->is_connected = true;
+ info->num_of_h_tiles = 1;
+ info->h_tile_instance[0] = display->base->intf_idx;
+ info->capabilities |= MSM_DISPLAY_CAP_SHARED;
+
+ return 0;
+}
+
+int shd_connector_post_init(struct drm_connector *connector,
+ void *info,
+ void *display)
+{
+ struct shd_display *disp = display;
+ struct sde_connector *conn;
+
+ disp->connector = connector;
+ conn = to_sde_connector(connector);
+ conn->is_shared = true;
+ conn->shared_roi = disp->roi;
+
+ sde_kms_info_add_keyint(info, "max_blendstages",
+ disp->stage_range.size);
+
+ sde_kms_info_add_keystr(info, "display type",
+ disp->display_type);
+
+ if (disp->src.h != disp->roi.h) {
+ sde_kms_info_add_keyint(info, "padding height",
+ disp->roi.h);
+ }
+
+ return 0;
+}
+
+enum drm_connector_status shd_connector_detect(struct drm_connector *conn,
+ bool force,
+ void *display)
+{
+ struct shd_display *disp = display;
+ struct sde_connector *sde_conn;
+ enum drm_connector_status status = connector_status_disconnected;
+
+ if (!conn || !display || !disp->base) {
+ pr_err("invalid params\n");
+ goto end;
+ }
+
+ mutex_lock(&disp->base->base_mutex);
+ if (disp->base->connector) {
+ sde_conn = to_sde_connector(disp->base->connector);
+ status = disp->base->ops.detect(disp->base->connector,
+ force, sde_conn->display);
+ }
+ mutex_unlock(&disp->base->base_mutex);
+
+end:
+ return status;
+}
+
+int shd_connector_get_modes(struct drm_connector *connector,
+ void *display)
+{
+ struct drm_display_mode drm_mode;
+ struct shd_display *disp = display;
+ struct drm_display_mode *m;
+
+ memcpy(&drm_mode, &disp->base->mode, sizeof(drm_mode));
+
+ drm_mode.hdisplay = disp->src.w;
+ drm_mode.hsync_start = drm_mode.hdisplay;
+ drm_mode.hsync_end = drm_mode.hsync_start;
+ drm_mode.htotal = drm_mode.hsync_end;
+
+ drm_mode.vdisplay = disp->src.h;
+ drm_mode.vsync_start = drm_mode.vdisplay;
+ drm_mode.vsync_end = drm_mode.vsync_start;
+ drm_mode.vtotal = drm_mode.vsync_end;
+
+ m = drm_mode_duplicate(disp->drm_dev, &drm_mode);
+ drm_mode_set_name(m);
+ drm_mode_probed_add(connector, m);
+
+ return 1;
+}
+
+enum drm_mode_status shd_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode,
+ void *display)
+{
+ return MODE_OK;
+}
+
+static int shd_bridge_attach(struct drm_bridge *shd_bridge)
+{
+ return 0;
+}
+
+static void shd_bridge_pre_enable(struct drm_bridge *drm_bridge)
+{
+}
+
+static void shd_bridge_enable(struct drm_bridge *drm_bridge)
+{
+}
+
+static void shd_bridge_disable(struct drm_bridge *drm_bridge)
+{
+}
+
+static void shd_bridge_post_disable(struct drm_bridge *drm_bridge)
+{
+}
+
+
+static void shd_bridge_mode_set(struct drm_bridge *drm_bridge,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+}
+
+static bool shd_bridge_mode_fixup(struct drm_bridge *drm_bridge,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static const struct drm_bridge_funcs shd_bridge_ops = {
+ .attach = shd_bridge_attach,
+ .mode_fixup = shd_bridge_mode_fixup,
+ .pre_enable = shd_bridge_pre_enable,
+ .enable = shd_bridge_enable,
+ .disable = shd_bridge_disable,
+ .post_disable = shd_bridge_post_disable,
+ .mode_set = shd_bridge_mode_set,
+};
+
+int shd_drm_bridge_init(void *data, struct drm_encoder *encoder)
+{
+ int rc = 0;
+ struct shd_bridge *bridge;
+ struct drm_device *dev;
+ struct shd_display *display = data;
+ struct msm_drm_private *priv = NULL;
+
+ bridge = kzalloc(sizeof(*bridge), GFP_KERNEL);
+ if (!bridge) {
+ rc = -ENOMEM;
+ goto error;
+ }
+
+ dev = display->drm_dev;
+ bridge->display = display;
+ bridge->base.funcs = &shd_bridge_ops;
+ bridge->base.encoder = encoder;
+
+ priv = dev->dev_private;
+
+ rc = drm_bridge_attach(dev, &bridge->base);
+ if (rc) {
+ SDE_ERROR("failed to attach bridge, rc=%d\n", rc);
+ goto error_free_bridge;
+ }
+
+ encoder->bridge = &bridge->base;
+ priv->bridges[priv->num_bridges++] = &bridge->base;
+ display->bridge = &bridge->base;
+
+ return 0;
+
+error_free_bridge:
+ kfree(bridge);
+error:
+ return rc;
+}
+
+void shd_drm_bridge_deinit(void *data)
+{
+ struct shd_display *display = data;
+ struct shd_bridge *bridge = container_of(display->bridge,
+ struct shd_bridge, base);
+
+ if (bridge && bridge->base.encoder)
+ bridge->base.encoder->bridge = NULL;
+
+ kfree(bridge);
+}
+
+/**
+ * sde_shd_bind - bind writeback device with controlling device
+ * @dev: Pointer to base of platform device
+ * @master: Pointer to container of drm device
+ * @data: Pointer to private data
+ * Returns: Zero on success
+ */
+static int sde_shd_bind(struct device *dev, struct device *master, void *data)
+{
+ struct shd_display *shd_dev;
+
+ shd_dev = platform_get_drvdata(to_platform_device(dev));
+ if (!shd_dev) {
+ SDE_ERROR("invalid shd device\n");
+ return -EINVAL;
+ }
+
+ shd_dev->drm_dev = dev_get_drvdata(master);
+
+ return 0;
+}
+
+/**
+ * sde_shd_unbind - unbind writeback from controlling device
+ * @dev: Pointer to base of platform device
+ * @master: Pointer to container of drm device
+ * @data: Pointer to private data
+ */
+static void sde_shd_unbind(struct device *dev,
+ struct device *master, void *data)
+{
+ struct shd_display *shd_dev;
+
+ shd_dev = platform_get_drvdata(to_platform_device(dev));
+ if (!shd_dev) {
+ SDE_ERROR("invalid shd device\n");
+ return;
+ }
+
+ shd_dev->drm_dev = NULL;
+}
+
+static const struct component_ops sde_shd_comp_ops = {
+ .bind = sde_shd_bind,
+ .unbind = sde_shd_unbind,
+};
+
+static int sde_shd_parse_display(struct shd_display *display)
+{
+ struct device_node *of_node = display->pdev->dev.of_node;
+ struct device_node *of_src, *of_roi;
+ u32 src_w, src_h, dst_x, dst_y, dst_w, dst_h;
+ u32 range[2];
+ int rc;
+
+ display->name = of_node->full_name;
+
+ display->display_type = of_get_property(of_node,
+ "qcom,display-type", NULL);
+ if (!display->display_type)
+ display->display_type = "unknown";
+
+ display->base_of = of_parse_phandle(of_node,
+ "qcom,shared-display-base", 0);
+ if (!display->base_of) {
+ pr_err("No base device present\n");
+ rc = -ENODEV;
+ goto error;
+ }
+
+ of_src = of_get_child_by_name(of_node, "qcom,shared-display-src-mode");
+ if (!of_src) {
+ pr_err("No src mode present\n");
+ rc = -ENODEV;
+ goto error;
+ }
+
+ rc = of_property_read_u32(of_src, "qcom,mode-h-active",
+ &src_w);
+ if (rc) {
+ pr_err("Failed to parse h active\n");
+ goto error;
+ }
+
+ rc = of_property_read_u32(of_src, "qcom,mode-v-active",
+ &src_h);
+ if (rc) {
+ pr_err("Failed to parse v active\n");
+ goto error;
+ }
+
+ of_roi = of_get_child_by_name(of_node, "qcom,shared-display-dst-mode");
+ if (!of_roi) {
+ pr_err("No roi mode present\n");
+ rc = -ENODEV;
+ goto error;
+ }
+
+ rc = of_property_read_u32(of_roi, "qcom,mode-x-offset",
+ &dst_x);
+ if (rc) {
+ pr_err("Failed to parse x offset\n");
+ goto error;
+ }
+
+ rc = of_property_read_u32(of_roi, "qcom,mode-y-offset",
+ &dst_y);
+ if (rc) {
+ pr_err("Failed to parse y offset\n");
+ goto error;
+ }
+
+ rc = of_property_read_u32(of_roi, "qcom,mode-width",
+ &dst_w);
+ if (rc) {
+ pr_err("Failed to parse roi width\n");
+ goto error;
+ }
+
+ rc = of_property_read_u32(of_roi, "qcom,mode-height",
+ &dst_h);
+ if (rc) {
+ pr_err("Failed to parse roi height\n");
+ goto error;
+ }
+
+ rc = of_property_read_u32_array(of_node, "qcom,blend-stage-range",
+ range, 2);
+ if (rc)
+ pr_err("Failed to parse blend stage range\n");
+
+ display->src.w = src_w;
+ display->src.h = src_h;
+ display->roi.x = dst_x;
+ display->roi.y = dst_y;
+ display->roi.w = dst_w;
+ display->roi.h = dst_h;
+ display->stage_range.start = range[0];
+ display->stage_range.size = range[1];
+
+ SHD_DEBUG("%s src %dx%d dst %d,%d %dx%d range %d-%d\n", display->name,
+ display->src.w, display->src.h,
+ display->roi.x, display->roi.y,
+ display->roi.w, display->roi.h,
+ display->stage_range.start,
+ display->stage_range.size);
+
+error:
+ return rc;
+}
+
+static int sde_shd_parse_base(struct shd_display_base *base)
+{
+ struct device_node *of_node = base->of_node;
+ struct device_node *node;
+ struct drm_display_mode *mode = &base->mode;
+ u32 h_front_porch, h_pulse_width, h_back_porch;
+ u32 v_front_porch, v_pulse_width, v_back_porch;
+ bool h_active_high, v_active_high;
+ u32 flags = 0;
+ int rc;
+
+ rc = of_property_read_u32(of_node, "qcom,shared-display-base-intf",
+ &base->intf_idx);
+ if (rc) {
+ pr_err("failed to read base intf, rc=%d\n", rc);
+ goto fail;
+ }
+
+ node = of_get_child_by_name(of_node, "qcom,shared-display-base-mode");
+ if (!node) {
+ pr_err("No base mode present\n");
+ rc = -ENODEV;
+ goto fail;
+ }
+
+ rc = of_property_read_u32(node, "qcom,mode-h-active",
+ &mode->hdisplay);
+ if (rc) {
+ SDE_ERROR("failed to read h-active, rc=%d\n", rc);
+ goto fail;
+ }
+
+ rc = of_property_read_u32(node, "qcom,mode-h-front-porch",
+ &h_front_porch);
+ if (rc) {
+ SDE_ERROR("failed to read h-front-porch, rc=%d\n", rc);
+ goto fail;
+ }
+
+ rc = of_property_read_u32(node, "qcom,mode-h-pulse-width",
+ &h_pulse_width);
+ if (rc) {
+ SDE_ERROR("failed to read h-pulse-width, rc=%d\n", rc);
+ goto fail;
+ }
+
+ rc = of_property_read_u32(node, "qcom,mode-h-back-porch",
+ &h_back_porch);
+ if (rc) {
+ SDE_ERROR("failed to read h-back-porch, rc=%d\n", rc);
+ goto fail;
+ }
+
+ h_active_high = of_property_read_bool(node,
+ "qcom,mode-h-active-high");
+
+ rc = of_property_read_u32(node, "qcom,mode-v-active",
+ &mode->vdisplay);
+ if (rc) {
+ SDE_ERROR("failed to read v-active, rc=%d\n", rc);
+ goto fail;
+ }
+
+ rc = of_property_read_u32(node, "qcom,mode-v-front-porch",
+ &v_front_porch);
+ if (rc) {
+ SDE_ERROR("failed to read v-front-porch, rc=%d\n", rc);
+ goto fail;
+ }
+
+ rc = of_property_read_u32(node, "qcom,mode-v-pulse-width",
+ &v_pulse_width);
+ if (rc) {
+ SDE_ERROR("failed to read v-pulse-width, rc=%d\n", rc);
+ goto fail;
+ }
+
+ rc = of_property_read_u32(node, "qcom,mode-v-back-porch",
+ &v_back_porch);
+ if (rc) {
+ SDE_ERROR("failed to read v-back-porch, rc=%d\n", rc);
+ goto fail;
+ }
+
+ v_active_high = of_property_read_bool(node,
+ "qcom,mode-v-active-high");
+
+ rc = of_property_read_u32(node, "qcom,mode-refresh-rate",
+ &mode->vrefresh);
+ if (rc) {
+ SDE_ERROR("failed to read refresh-rate, rc=%d\n", rc);
+ goto fail;
+ }
+
+ rc = of_property_read_u32(node, "qcom,mode-clock-in-khz",
+ &mode->clock);
+ if (rc) {
+ SDE_ERROR("failed to read clock, rc=%d\n", rc);
+ goto fail;
+ }
+
+ mode->hsync_start = mode->hdisplay + h_front_porch;
+ mode->hsync_end = mode->hsync_start + h_pulse_width;
+ mode->htotal = mode->hsync_end + h_back_porch;
+ mode->vsync_start = mode->vdisplay + v_front_porch;
+ mode->vsync_end = mode->vsync_start + v_pulse_width;
+ mode->vtotal = mode->vsync_end + v_back_porch;
+ if (h_active_high)
+ flags |= DRM_MODE_FLAG_PHSYNC;
+ else
+ flags |= DRM_MODE_FLAG_NHSYNC;
+ if (v_active_high)
+ flags |= DRM_MODE_FLAG_PVSYNC;
+ else
+ flags |= DRM_MODE_FLAG_NVSYNC;
+ mode->flags = flags;
+
+ SHD_DEBUG("base mode h[%d,%d,%d,%d] v[%d,%d,%d,%d] %d %xH %d\n",
+ mode->hdisplay, mode->hsync_start,
+ mode->hsync_end, mode->htotal, mode->vdisplay,
+ mode->vsync_start, mode->vsync_end, mode->vtotal,
+ mode->vrefresh, mode->flags, mode->clock);
+
+fail:
+ return rc;
+}
+
+/**
+ * sde_shd_probe - load shared display module
+ * @pdev: Pointer to platform device
+ */
+static int sde_shd_probe(struct platform_device *pdev)
+{
+ struct shd_display *shd_dev;
+ struct shd_display_base *base;
+ int ret;
+
+ shd_dev = devm_kzalloc(&pdev->dev, sizeof(*shd_dev), GFP_KERNEL);
+ if (!shd_dev)
+ return -ENOMEM;
+
+ shd_dev->pdev = pdev;
+
+ ret = sde_shd_parse_display(shd_dev);
+ if (ret) {
+ SDE_ERROR("failed to parse shared display\n");
+ goto error;
+ }
+
+ platform_set_drvdata(pdev, shd_dev);
+
+ list_for_each_entry(base, &g_base_list, head) {
+ if (base->of_node == shd_dev->base_of)
+ goto next;
+ }
+
+ base = devm_kzalloc(&pdev->dev, sizeof(*base), GFP_KERNEL);
+ if (!base) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ mutex_init(&base->base_mutex);
+ INIT_LIST_HEAD(&base->disp_list);
+ base->of_node = shd_dev->base_of;
+
+ ret = sde_shd_parse_base(base);
+ if (ret) {
+ SDE_ERROR("failed to parse shared display base\n");
+ goto base_error;
+ }
+
+ list_add_tail(&base->head, &g_base_list);
+
+next:
+ shd_dev->base = base;
+ list_add_tail(&shd_dev->head, &base->disp_list);
+ SHD_DEBUG("add shd to intf %d\n", base->intf_idx);
+
+ ret = component_add(&pdev->dev, &sde_shd_comp_ops);
+ if (ret) {
+ goto base_error;
+ pr_err("component add failed\n");
+ }
+
+ return 0;
+
+base_error:
+ devm_kfree(&pdev->dev, base);
+error:
+ devm_kfree(&pdev->dev, shd_dev);
+ return ret;
+}
+
+/**
+ * sde_shd_remove - unload shared display module
+ * @pdev: Pointer to platform device
+ */
+static int sde_shd_remove(struct platform_device *pdev)
+{
+ struct shd_display *shd_dev;
+
+ shd_dev = platform_get_drvdata(pdev);
+ if (!shd_dev)
+ return 0;
+
+ SHD_DEBUG("\n");
+
+ mutex_lock(&shd_dev->base->base_mutex);
+ list_del_init(&shd_dev->head);
+ if (list_empty(&shd_dev->base->disp_list)) {
+ list_del_init(&shd_dev->base->head);
+ mutex_unlock(&shd_dev->base->base_mutex);
+ devm_kfree(&pdev->dev, shd_dev->base);
+ } else
+ mutex_unlock(&shd_dev->base->base_mutex);
+
+ platform_set_drvdata(pdev, NULL);
+ devm_kfree(&pdev->dev, shd_dev);
+
+ return 0;
+}
+
+static const struct of_device_id dt_match[] = {
+ { .compatible = "qcom,shared-display"},
+ {}
+};
+
+static struct platform_driver sde_shd_driver = {
+ .probe = sde_shd_probe,
+ .remove = sde_shd_remove,
+ .driver = {
+ .name = "sde_shd",
+ .of_match_table = dt_match,
+ .suppress_bind_attrs = true,
+ },
+};
+
+static int __init sde_shd_register(void)
+{
+ return platform_driver_register(&sde_shd_driver);
+}
+
+static void __exit sde_shd_unregister(void)
+{
+ platform_driver_unregister(&sde_shd_driver);
+}
+
+module_init(sde_shd_register);
+module_exit(sde_shd_unregister);
+MODULE_LICENSE("GPL v2");
--- /dev/null
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _SDE_SHD_H_
+#define _SDE_SHD_H_
+
+#include <linux/types.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include "msm_drv.h"
+
+struct shd_mode_info {
+ int x_offset;
+ int y_offset;
+ int width;
+ int height;
+};
+
+struct shd_stage_range {
+ u32 start;
+ u32 size;
+};
+
+struct shd_display_base {
+ struct mutex base_mutex;
+ struct drm_display_mode mode;
+ struct drm_crtc *crtc;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+ struct list_head head;
+ struct list_head disp_list;
+ struct device_node *of_node;
+ struct sde_connector_ops ops;
+
+ int intf_idx;
+ int connector_type;
+ bool enabled;
+};
+
+struct shd_display {
+ struct drm_device *drm_dev;
+ const char *name;
+ const char *display_type;
+
+ struct shd_display_base *base;
+ struct drm_bridge *bridge;
+ struct drm_connector *connector;
+
+ struct device_node *base_of;
+ struct sde_rect src;
+ struct sde_rect roi;
+ struct shd_stage_range stage_range;
+
+ struct platform_device *pdev;
+ struct completion vsync_comp;
+ struct list_head head;
+
+ bool enabled;
+};
+
+#ifdef CONFIG_DRM_SDE_SHD
+int shd_display_get_num_of_displays(void);
+int shd_display_get_displays(void **displays, int count);
+int shd_display_post_init(struct sde_kms *sde_kms);
+void shd_display_prepare_commit(struct sde_kms *sde_kms,
+ struct drm_atomic_state *state);
+void shd_display_complete_commit(struct sde_kms *sde_kms,
+ struct drm_atomic_state *state);
+
+/**
+ * shd_connector_post_init - callback to perform additional initialization steps
+ * @connector: Pointer to drm connector structure
+ * @display: Pointer to private display handle
+ * Returns: Zero on success
+ */
+int shd_connector_post_init(struct drm_connector *connector,
+ void *info,
+ void *display);
+
+/**
+ * shd_connector_detect - callback to determine if connector is connected
+ * @connector: Pointer to drm connector structure
+ * @force: Force detect setting from drm framework
+ * @display: Pointer to private display handle
+ * Returns: Connector 'is connected' status
+ */
+enum drm_connector_status shd_connector_detect(struct drm_connector *conn,
+ bool force,
+ void *display);
+
+/**
+ * shd_connector_get_modes - callback to add drm modes via drm_mode_probed_add()
+ * @connector: Pointer to drm connector structure
+ * @display: Pointer to private display handle
+ * Returns: Number of modes added
+ */
+int shd_connector_get_modes(struct drm_connector *connector,
+ void *display);
+
+/**
+ * shd_connector_mode_valid - callback to determine if specified mode is valid
+ * @connector: Pointer to drm connector structure
+ * @mode: Pointer to drm mode structure
+ * @display: Pointer to private display handle
+ * Returns: Validity status for specified mode
+ */
+enum drm_mode_status shd_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode,
+ void *display);
+
+/**
+ * shd_connector_get_info - retrieve connector display info
+ * @connector: Pointer to drm connector structure
+ * @info: Out parameter. Information of the connected display
+ * @display: Pointer to private display structure
+ * Returns: zero on success
+ */
+int shd_connector_get_info(struct msm_display_info *info, void *display);
+
+/**
+ * shd_display_drm_bridge_init() - initializes DRM bridge object
+ * for shared display
+ * @display: Handle to the display.
+ * @encoder: Pointer to the encoder object which is connected to the
+ * display.
+ * Return: error code.
+ */
+int shd_drm_bridge_init(void *display,
+ struct drm_encoder *encoder);
+
+/**
+ * shd_display_drm_bridge_deinit() - destroys DRM bridge for the display
+ * @display: Handle to the display.
+ * Return: error code.
+ */
+void shd_drm_bridge_deinit(void *display);
+#else
+static inline
+int shd_display_get_num_of_displays(void)
+{
+ return 0;
+}
+
+static inline
+int shd_display_get_displays(void **displays, int count)
+{
+ return 0;
+}
+
+static inline
+int shd_display_post_init(struct sde_kms *sde_kms)
+{
+ return 0;
+}
+
+static inline
+void shd_display_prepare_commit(struct sde_kms *sde_kms,
+ struct drm_atomic_state *state)
+{
+}
+
+static inline
+void shd_display_complete_commit(struct sde_kms *sde_kms,
+ struct drm_atomic_state *state)
+{
+}
+
+static inline
+int shd_connector_post_init(struct drm_connector *connector,
+ void *info,
+ void *display)
+{
+ return 0;
+}
+
+static inline
+enum drm_connector_status shd_connector_detect(struct drm_connector *conn,
+ bool force,
+ void *display)
+{
+ return connector_status_unknown;
+}
+
+static inline
+int shd_connector_get_modes(struct drm_connector *connector,
+ void *display)
+{
+ return 0;
+}
+
+static inline
+enum drm_mode_status shd_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode,
+ void *display)
+{
+ return MODE_ERROR;
+}
+
+static inline
+int shd_connector_get_info(struct msm_display_info *info, void *display)
+{
+ return -EINVAL;
+}
+
+static inline
+int shd_drm_bridge_init(void *display,
+ struct drm_encoder *encoder)
+{
+ return 0;
+}
+
+static inline
+void shd_drm_bridge_deinit(void *display)
+{
+}
+#endif
+
+#endif /* _SDE_SHD_H_ */